feat: waku api send (#3669)

* Introduce api/send
Added events and requests for support.
Reworked delivery_monitor into a featured devlivery_service, that
- supports relay publish and lightpush depending on configuration but with fallback options
- if available and configured it utilizes store api to confirm message delivery
- emits message delivery events accordingly

prepare for use in api_example

* Fix edge mode config and test added
* Fix some import issues, start and stop waku shall not throw exception but return with result properly
* Utlize sync RequestBroker, adapt to non-async broker usage and gcsafe where appropriate, removed leftover
* add api_example app to examples2
* Adapt after merge from master
* Adapt code for using broker context
* Fix brokerCtx settings for all usedbrokers, cover locked node init
* Various fixes upon test failures. Added initial of subscribe API and auto-subscribe for send api
* More test added
* Fix multi propagate event emit, fix fail send test case
* Fix rebase
* Fix PushMessageHandlers in tests
* adapt libwaku to api changes
* Fix relay test by adapting publish return error in case NoPeersToPublish
* Addressing all remaining review findings. Removed leftovers. Fixed loggings and typos
* Fix rln relay broker, missed brokerCtx
* Fix rest relay test failed, due to publish will fail if no peer avail
* ignore anvil test state file
* Make terst_wakunode_rln_relay broker context aware to fix
* Fix waku rln tests by having them broker context aware
* fix typo in test_app.nim
This commit is contained in:
NagyZoltanPeter 2026-01-30 01:06:00 +01:00 committed by GitHub
parent 538b279b94
commit 1fd25355e0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
69 changed files with 2331 additions and 928 deletions

2
.gitignore vendored
View File

@ -89,3 +89,5 @@ AGENTS.md
nimble.develop
nimble.paths
nimbledeps
**/anvil_state/state-deployed-contracts-mint-and-approved.json

View File

@ -272,6 +272,10 @@ lightpushwithmix: | build deps librln
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim lightpushwithmix $(NIM_PARAMS) waku.nims
api_example: | build deps librln
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim api_example $(NIM_PARAMS) waku.nims
build/%: | build deps librln
echo -e $(BUILD_MSG) "build/$*" && \
$(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $*

View File

@ -130,7 +130,8 @@ when isMainModule:
info "Setting up shutdown hooks"
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
await waku.stop()
(await waku.stop()).isOkOr:
error "Waku shutdown failed", error = error
quit(QuitSuccess)
# Handle Ctrl-C SIGINT
@ -160,7 +161,8 @@ when isMainModule:
# Not available in -d:release mode
writeStackTrace()
waitFor waku.stop()
(waitFor waku.stop()).isOkOr:
error "Waku shutdown failed", error = error
quit(QuitFailure)
c_signal(ansi_c.SIGSEGV, handleSigsegv)

View File

@ -62,7 +62,8 @@ when isMainModule:
info "Setting up shutdown hooks"
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
await waku.stop()
(await waku.stop()).isOkOr:
error "Waku shutdown failed", error = error
quit(QuitSuccess)
# Handle Ctrl-C SIGINT
@ -92,7 +93,8 @@ when isMainModule:
# Not available in -d:release mode
writeStackTrace()
waitFor waku.stop()
(waitFor waku.stop()).isOkOr:
error "Waku shutdown failed", error = error
quit(QuitFailure)
c_signal(ansi_c.SIGSEGV, handleSigsegv)

View File

@ -0,0 +1,89 @@
import std/options
import chronos, results, confutils, confutils/defs
import waku
type CliArgs = object
ethRpcEndpoint* {.
defaultValue: "", desc: "ETH RPC Endpoint, if passed, RLN is enabled"
.}: string
proc periodicSender(w: Waku): Future[void] {.async.} =
let sentListener = MessageSentEvent.listen(
proc(event: MessageSentEvent) {.async: (raises: []).} =
echo "Message sent with request ID: ",
event.requestId, " hash: ", event.messageHash
).valueOr:
echo "Failed to listen to message sent event: ", error
return
let errorListener = MessageErrorEvent.listen(
proc(event: MessageErrorEvent) {.async: (raises: []).} =
echo "Message failed to send with request ID: ",
event.requestId, " error: ", event.error
).valueOr:
echo "Failed to listen to message error event: ", error
return
let propagatedListener = MessagePropagatedEvent.listen(
proc(event: MessagePropagatedEvent) {.async: (raises: []).} =
echo "Message propagated with request ID: ",
event.requestId, " hash: ", event.messageHash
).valueOr:
echo "Failed to listen to message propagated event: ", error
return
defer:
MessageSentEvent.dropListener(sentListener)
MessageErrorEvent.dropListener(errorListener)
MessagePropagatedEvent.dropListener(propagatedListener)
## Periodically sends a Waku message every 30 seconds
var counter = 0
while true:
let envelope = MessageEnvelope.init(
contentTopic = "example/content/topic",
payload = "Hello Waku! Message number: " & $counter,
)
let sendRequestId = (await w.send(envelope)).valueOr:
echo "Failed to send message: ", error
quit(QuitFailure)
echo "Sending message with request ID: ", sendRequestId, " counter: ", counter
counter += 1
await sleepAsync(30.seconds)
when isMainModule:
let args = CliArgs.load()
echo "Starting Waku node..."
let config =
if (args.ethRpcEndpoint == ""):
# Create a basic configuration for the Waku node
# No RLN as we don't have an ETH RPC Endpoint
NodeConfig.init(
protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 42)
)
else:
# Connect to TWN, use ETH RPC Endpoint for RLN
NodeConfig.init(mode = WakuMode.Core, ethRpcEndpoints = @[args.ethRpcEndpoint])
# Create the node using the library API's createNode function
let node = (waitFor createNode(config)).valueOr:
echo "Failed to create node: ", error
quit(QuitFailure)
echo("Waku node created successfully!")
# Start the node
(waitFor startWaku(addr node)).isOkOr:
echo "Failed to start node: ", error
quit(QuitFailure)
echo "Node started successfully!"
asyncSpawn periodicSender(node)
runForever()

View File

@ -1,40 +0,0 @@
import std/options
import chronos, results, confutils, confutils/defs
import waku
type CliArgs = object
ethRpcEndpoint* {.
defaultValue: "", desc: "ETH RPC Endpoint, if passed, RLN is enabled"
.}: string
when isMainModule:
let args = CliArgs.load()
echo "Starting Waku node..."
let config =
if (args.ethRpcEndpoint == ""):
# Create a basic configuration for the Waku node
# No RLN as we don't have an ETH RPC Endpoint
NodeConfig.init(
protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 42)
)
else:
# Connect to TWN, use ETH RPC Endpoint for RLN
NodeConfig.init(ethRpcEndpoints = @[args.ethRpcEndpoint])
# Create the node using the library API's createNode function
let node = (waitFor createNode(config)).valueOr:
echo "Failed to create node: ", error
quit(QuitFailure)
echo("Waku node created successfully!")
# Start the node
(waitFor startWaku(addr node)).isOkOr:
echo "Failed to start node: ", error
quit(QuitFailure)
echo "Node started successfully!"
runForever()

View File

@ -79,9 +79,7 @@ proc waku_start(
proc waku_stop(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
try:
await ctx.myLib[].stop()
except Exception as exc:
error "STOP_NODE failed", error = exc.msg
return err("failed to stop: " & exc.msg)
(await ctx.myLib[].stop()).isOkOr:
error "STOP_NODE failed", error = error
return err("failed to stop: " & $error)
return ok("")

431
tests/api/test_api_send.nim Normal file
View File

@ -0,0 +1,431 @@
{.used.}
import std/strutils
import chronos, testutils/unittests, stew/byteutils, libp2p/[switch, peerinfo]
import ../testlib/[common, wakucore, wakunode, testasync]
import ../waku_archive/archive_utils
import
waku, waku/[waku_node, waku_core, waku_relay/protocol, common/broker/broker_context]
import waku/api/api_conf, waku/factory/waku_conf
type SendEventOutcome {.pure.} = enum
Sent
Propagated
Error
type SendEventListenerManager = ref object
brokerCtx: BrokerContext
sentListener: MessageSentEventListener
errorListener: MessageErrorEventListener
propagatedListener: MessagePropagatedEventListener
sentFuture: Future[void]
errorFuture: Future[void]
propagatedFuture: Future[void]
sentCount: int
errorCount: int
propagatedCount: int
sentRequestIds: seq[RequestId]
errorRequestIds: seq[RequestId]
propagatedRequestIds: seq[RequestId]
proc newSendEventListenerManager(brokerCtx: BrokerContext): SendEventListenerManager =
let manager = SendEventListenerManager(brokerCtx: brokerCtx)
manager.sentFuture = newFuture[void]("sentEvent")
manager.errorFuture = newFuture[void]("errorEvent")
manager.propagatedFuture = newFuture[void]("propagatedEvent")
manager.sentListener = MessageSentEvent.listen(
brokerCtx,
proc(event: MessageSentEvent) {.async: (raises: []).} =
inc manager.sentCount
manager.sentRequestIds.add(event.requestId)
echo "SENT EVENT TRIGGERED (#",
manager.sentCount, "): requestId=", event.requestId
if not manager.sentFuture.finished():
manager.sentFuture.complete()
,
).valueOr:
raiseAssert error
manager.errorListener = MessageErrorEvent.listen(
brokerCtx,
proc(event: MessageErrorEvent) {.async: (raises: []).} =
inc manager.errorCount
manager.errorRequestIds.add(event.requestId)
echo "ERROR EVENT TRIGGERED (#", manager.errorCount, "): ", event.error
if not manager.errorFuture.finished():
manager.errorFuture.fail(
newException(CatchableError, "Error event triggered: " & event.error)
)
,
).valueOr:
raiseAssert error
manager.propagatedListener = MessagePropagatedEvent.listen(
brokerCtx,
proc(event: MessagePropagatedEvent) {.async: (raises: []).} =
inc manager.propagatedCount
manager.propagatedRequestIds.add(event.requestId)
echo "PROPAGATED EVENT TRIGGERED (#",
manager.propagatedCount, "): requestId=", event.requestId
if not manager.propagatedFuture.finished():
manager.propagatedFuture.complete()
,
).valueOr:
raiseAssert error
return manager
proc teardown(manager: SendEventListenerManager) =
MessageSentEvent.dropListener(manager.brokerCtx, manager.sentListener)
MessageErrorEvent.dropListener(manager.brokerCtx, manager.errorListener)
MessagePropagatedEvent.dropListener(manager.brokerCtx, manager.propagatedListener)
proc waitForEvents(
manager: SendEventListenerManager, timeout: Duration
): Future[bool] {.async.} =
return await allFutures(
manager.sentFuture, manager.propagatedFuture, manager.errorFuture
)
.withTimeout(timeout)
proc outcomes(manager: SendEventListenerManager): set[SendEventOutcome] =
if manager.sentFuture.completed():
result.incl(SendEventOutcome.Sent)
if manager.propagatedFuture.completed():
result.incl(SendEventOutcome.Propagated)
if manager.errorFuture.failed():
result.incl(SendEventOutcome.Error)
proc validate(manager: SendEventListenerManager, expected: set[SendEventOutcome]) =
echo "EVENT COUNTS: sent=",
manager.sentCount, ", propagated=", manager.propagatedCount, ", error=",
manager.errorCount
check manager.outcomes() == expected
proc validate(
manager: SendEventListenerManager,
expected: set[SendEventOutcome],
expectedRequestId: RequestId,
) =
manager.validate(expected)
for requestId in manager.sentRequestIds:
check requestId == expectedRequestId
for requestId in manager.propagatedRequestIds:
check requestId == expectedRequestId
for requestId in manager.errorRequestIds:
check requestId == expectedRequestId
proc createApiNodeConf(mode: WakuMode = WakuMode.Core): NodeConfig =
result = NodeConfig.init(
mode = mode,
protocolsConfig = ProtocolsConfig.init(
entryNodes = @[],
clusterId = 1,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 1),
),
p2pReliability = true,
)
suite "Waku API - Send":
var
relayNode1 {.threadvar.}: WakuNode
relayNode1PeerInfo {.threadvar.}: RemotePeerInfo
relayNode1PeerId {.threadvar.}: PeerId
relayNode2 {.threadvar.}: WakuNode
relayNode2PeerInfo {.threadvar.}: RemotePeerInfo
relayNode2PeerId {.threadvar.}: PeerId
lightpushNode {.threadvar.}: WakuNode
lightpushNodePeerInfo {.threadvar.}: RemotePeerInfo
lightpushNodePeerId {.threadvar.}: PeerId
storeNode {.threadvar.}: WakuNode
storeNodePeerInfo {.threadvar.}: RemotePeerInfo
storeNodePeerId {.threadvar.}: PeerId
asyncSetup:
lockNewGlobalBrokerContext:
relayNode1 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
relayNode1.mountMetadata(1, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await relayNode1.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
await relayNode1.mountLibp2pPing()
await relayNode1.start()
lockNewGlobalBrokerContext:
relayNode2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
relayNode2.mountMetadata(1, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await relayNode2.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
await relayNode2.mountLibp2pPing()
await relayNode2.start()
lockNewGlobalBrokerContext:
lightpushNode =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
lightpushNode.mountMetadata(1, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await lightpushNode.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
(await lightpushNode.mountLightPush()).isOkOr:
raiseAssert "Failed to mount lightpush"
await lightpushNode.mountLibp2pPing()
await lightpushNode.start()
lockNewGlobalBrokerContext:
storeNode =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
storeNode.mountMetadata(1, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await storeNode.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
# Mount archive so store can persist messages
let archiveDriver = newSqliteArchiveDriver()
storeNode.mountArchive(archiveDriver).isOkOr:
raiseAssert "Failed to mount archive: " & error
await storeNode.mountStore()
await storeNode.mountLibp2pPing()
await storeNode.start()
relayNode1PeerInfo = relayNode1.peerInfo.toRemotePeerInfo()
relayNode1PeerId = relayNode1.peerInfo.peerId
relayNode2PeerInfo = relayNode2.peerInfo.toRemotePeerInfo()
relayNode2PeerId = relayNode2.peerInfo.peerId
lightpushNodePeerInfo = lightpushNode.peerInfo.toRemotePeerInfo()
lightpushNodePeerId = lightpushNode.peerInfo.peerId
storeNodePeerInfo = storeNode.peerInfo.toRemotePeerInfo()
storeNodePeerId = storeNode.peerInfo.peerId
# Subscribe all relay nodes to the default shard topic
const testPubsubTopic = PubsubTopic("/waku/2/rs/1/0")
proc dummyHandler(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
discard
relayNode1.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr:
raiseAssert "Failed to subscribe relayNode1: " & error
relayNode2.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr:
raiseAssert "Failed to subscribe relayNode2: " & error
lightpushNode.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr:
raiseAssert "Failed to subscribe lightpushNode: " & error
storeNode.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr:
raiseAssert "Failed to subscribe storeNode: " & error
# Subscribe all relay nodes to the default shard topic
await relayNode1.connectToNodes(@[relayNode2PeerInfo, storeNodePeerInfo])
await lightpushNode.connectToNodes(@[relayNode2PeerInfo])
asyncTeardown:
await allFutures(
relayNode1.stop(), relayNode2.stop(), lightpushNode.stop(), storeNode.stop()
)
asyncTest "Check API availability (unhealthy node)":
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf())).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error
# node is not connected !
let envelope = MessageEnvelope.init(
ContentTopic("/waku/2/default-content/proto"), "test payload"
)
let sendResult = await node.send(envelope)
check sendResult.isErr() # Depending on implementation, it might say "not healthy"
check sendResult.error().contains("not healthy")
(await node.stop()).isOkOr:
raiseAssert "Failed to stop node: " & error
asyncTest "Send fully validated":
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf())).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error
await node.node.connectToNodes(
@[relayNode1PeerInfo, lightpushNodePeerInfo, storeNodePeerInfo]
)
let eventManager = newSendEventListenerManager(node.brokerCtx)
defer:
eventManager.teardown()
let envelope = MessageEnvelope.init(
ContentTopic("/waku/2/default-content/proto"), "test payload"
)
let requestId = (await node.send(envelope)).valueOr:
raiseAssert error
# Wait for events with timeout
const eventTimeout = 10.seconds
discard await eventManager.waitForEvents(eventTimeout)
eventManager.validate(
{SendEventOutcome.Sent, SendEventOutcome.Propagated}, requestId
)
(await node.stop()).isOkOr:
raiseAssert "Failed to stop node: " & error
asyncTest "Send only propagates":
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf())).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error
await node.node.connectToNodes(@[relayNode1PeerInfo])
let eventManager = newSendEventListenerManager(node.brokerCtx)
defer:
eventManager.teardown()
let envelope = MessageEnvelope.init(
ContentTopic("/waku/2/default-content/proto"), "test payload"
)
let requestId = (await node.send(envelope)).valueOr:
raiseAssert error
# Wait for events with timeout
const eventTimeout = 10.seconds
discard await eventManager.waitForEvents(eventTimeout)
eventManager.validate({SendEventOutcome.Propagated}, requestId)
(await node.stop()).isOkOr:
raiseAssert "Failed to stop node: " & error
asyncTest "Send only propagates fallback to lightpush":
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf())).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error
await node.node.connectToNodes(@[lightpushNodePeerInfo])
let eventManager = newSendEventListenerManager(node.brokerCtx)
defer:
eventManager.teardown()
let envelope = MessageEnvelope.init(
ContentTopic("/waku/2/default-content/proto"), "test payload"
)
let requestId = (await node.send(envelope)).valueOr:
raiseAssert error
# Wait for events with timeout
const eventTimeout = 10.seconds
discard await eventManager.waitForEvents(eventTimeout)
eventManager.validate({SendEventOutcome.Propagated}, requestId)
(await node.stop()).isOkOr:
raiseAssert "Failed to stop node: " & error
asyncTest "Send fully validates fallback to lightpush":
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf())).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error
await node.node.connectToNodes(@[lightpushNodePeerInfo, storeNodePeerInfo])
let eventManager = newSendEventListenerManager(node.brokerCtx)
defer:
eventManager.teardown()
let envelope = MessageEnvelope.init(
ContentTopic("/waku/2/default-content/proto"), "test payload"
)
let requestId = (await node.send(envelope)).valueOr:
raiseAssert error
# Wait for events with timeout
const eventTimeout = 10.seconds
discard await eventManager.waitForEvents(eventTimeout)
eventManager.validate(
{SendEventOutcome.Propagated, SendEventOutcome.Sent}, requestId
)
(await node.stop()).isOkOr:
raiseAssert "Failed to stop node: " & error
asyncTest "Send fails with event":
var fakeLightpushNode: WakuNode
lockNewGlobalBrokerContext:
fakeLightpushNode =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
fakeLightpushNode.mountMetadata(1, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await fakeLightpushNode.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
(await fakeLightpushNode.mountLightPush()).isOkOr:
raiseAssert "Failed to mount lightpush"
await fakeLightpushNode.mountLibp2pPing()
await fakeLightpushNode.start()
let fakeLightpushNodePeerInfo = fakeLightpushNode.peerInfo.toRemotePeerInfo()
proc dummyHandler(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
discard
fakeLightpushNode.subscribe(
(kind: PubsubSub, topic: PubsubTopic("/waku/2/rs/1/0")), dummyHandler
).isOkOr:
raiseAssert "Failed to subscribe fakeLightpushNode: " & error
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf(WakuMode.Edge))).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error
await node.node.connectToNodes(@[fakeLightpushNodePeerInfo])
let eventManager = newSendEventListenerManager(node.brokerCtx)
defer:
eventManager.teardown()
let envelope = MessageEnvelope.init(
ContentTopic("/waku/2/default-content/proto"), "test payload"
)
let requestId = (await node.send(envelope)).valueOr:
raiseAssert error
echo "Sent message with requestId=", requestId
# Wait for events with timeout
const eventTimeout = 62.seconds
discard await eventManager.waitForEvents(eventTimeout)
eventManager.validate({SendEventOutcome.Error}, requestId)
(await node.stop()).isOkOr:
raiseAssert "Failed to stop node: " & error

View File

@ -21,6 +21,27 @@ suite "LibWaku Conf - toWakuConf":
wakuConf.shardingConf.numShardsInCluster == 8
wakuConf.staticNodes.len == 0
test "Edge mode configuration":
## Given
let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1)
let nodeConfig = NodeConfig.init(mode = Edge, protocolsConfig = protocolsConfig)
## When
let wakuConfRes = toWakuConf(nodeConfig)
## Then
require wakuConfRes.isOk()
let wakuConf = wakuConfRes.get()
require wakuConf.validate().isOk()
check:
wakuConf.relay == false
wakuConf.lightPush == false
wakuConf.filterServiceConf.isSome() == false
wakuConf.storeServiceConf.isSome() == false
wakuConf.peerExchangeService == true
wakuConf.clusterId == 1
test "Core mode configuration":
## Given
let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1)

View File

@ -25,9 +25,6 @@ import
suite "Waku Legacy Lightpush - End To End":
var
handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)]
handler {.threadvar.}: PushMessageHandler
server {.threadvar.}: WakuNode
client {.threadvar.}: WakuNode
@ -37,13 +34,6 @@ suite "Waku Legacy Lightpush - End To End":
message {.threadvar.}: WakuMessage
asyncSetup:
handlerFuture = newPushHandlerFuture()
handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return ok()
let
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
@ -108,9 +98,6 @@ suite "Waku Legacy Lightpush - End To End":
suite "RLN Proofs as a Lightpush Service":
var
handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)]
handler {.threadvar.}: PushMessageHandler
server {.threadvar.}: WakuNode
client {.threadvar.}: WakuNode
anvilProc {.threadvar.}: Process
@ -122,13 +109,6 @@ suite "RLN Proofs as a Lightpush Service":
message {.threadvar.}: WakuMessage
asyncSetup:
handlerFuture = newPushHandlerFuture()
handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return ok()
let
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()

View File

@ -37,13 +37,6 @@ suite "Waku Lightpush - End To End":
message {.threadvar.}: WakuMessage
asyncSetup:
handlerFuture = newPushHandlerFuture()
handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return ok(PublishedToOnePeer)
let
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
@ -108,9 +101,6 @@ suite "Waku Lightpush - End To End":
suite "RLN Proofs as a Lightpush Service":
var
handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)]
handler {.threadvar.}: PushMessageHandler
server {.threadvar.}: WakuNode
client {.threadvar.}: WakuNode
anvilProc {.threadvar.}: Process
@ -122,13 +112,6 @@ suite "RLN Proofs as a Lightpush Service":
message {.threadvar.}: WakuMessage
asyncSetup:
handlerFuture = newPushHandlerFuture()
handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return ok(PublishedToOnePeer)
let
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()

View File

@ -38,7 +38,7 @@ suite "Waku Lightpush Client":
asyncSetup:
handlerFuture = newPushHandlerFuture()
handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
let msgLen = message.encode().buffer.len
if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024:
@ -287,7 +287,7 @@ suite "Waku Lightpush Client":
handlerError = "handler-error"
handlerFuture2 = newFuture[void]()
handler2 = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
handlerFuture2.complete()
return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError)

View File

@ -19,7 +19,7 @@ suite "Rate limited push service":
## Given
var handlerFuture = newFuture[(string, WakuMessage)]()
let handler: PushMessageHandler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return lightpushSuccessResult(1) # succeed to publish to 1 peer.
@ -84,7 +84,7 @@ suite "Rate limited push service":
# CI can be slow enough that sequential requests accidentally refill tokens.
# Instead we issue a small burst and assert we observe at least one rejection.
let handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
return lightpushSuccessResult(1)

View File

@ -35,7 +35,7 @@ suite "Waku Legacy Lightpush Client":
asyncSetup:
handlerFuture = newPushHandlerFuture()
handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
let msgLen = message.encode().buffer.len
if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024:
@ -282,7 +282,7 @@ suite "Waku Legacy Lightpush Client":
handlerError = "handler-error"
handlerFuture2 = newFuture[void]()
handler2 = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
handlerFuture2.complete()
return err(handlerError)

View File

@ -25,7 +25,7 @@ suite "Rate limited push service":
## Given
var handlerFuture = newFuture[(string, WakuMessage)]()
let handler: PushMessageHandler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return ok()
@ -87,7 +87,7 @@ suite "Rate limited push service":
## Given
let handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
return ok()

View File

@ -1,7 +1,7 @@
{.used.}
import
std/[os, sequtils, sysrand, math],
std/[os, strutils, sequtils, sysrand, math],
stew/byteutils,
testutils/unittests,
chronos,
@ -450,7 +450,8 @@ suite "WakuNode - Relay":
await sleepAsync(500.millis)
let res = await node2.publish(some($shard), message)
assert res.isOk(), $res.error
check res.isErr()
check contains($res.error, "NoPeersToPublish")
await sleepAsync(500.millis)

View File

@ -15,6 +15,7 @@ import
waku_rln_relay/rln,
waku_rln_relay/protocol_metrics,
waku_keystore,
common/broker/broker_context,
],
./rln/waku_rln_relay_utils,
./utils_onchain,
@ -233,8 +234,10 @@ suite "Waku rln relay":
let index = MembershipIndex(5)
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
var wakuRlnRelay: WakuRlnRelay
lockNewGlobalBrokerContext:
wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager)
let idCredentials = generateCredentials()
@ -290,8 +293,10 @@ suite "Waku rln relay":
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
var wakuRlnRelay: WakuRlnRelay
lockNewGlobalBrokerContext:
wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager)
let idCredentials = generateCredentials()
@ -340,8 +345,10 @@ suite "Waku rln relay":
asyncTest "multiple senders with same external nullifier":
let index1 = MembershipIndex(5)
let rlnConf1 = getWakuRlnConfig(manager = manager, index = index1)
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
var wakuRlnRelay1: WakuRlnRelay
lockNewGlobalBrokerContext:
wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
let manager1 = cast[OnchainGroupManager](wakuRlnRelay1.groupManager)
let idCredentials1 = generateCredentials()
@ -354,8 +361,10 @@ suite "Waku rln relay":
let index2 = MembershipIndex(6)
let rlnConf2 = getWakuRlnConfig(manager = manager, index = index2)
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
var wakuRlnRelay2: WakuRlnRelay
lockNewGlobalBrokerContext:
wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
let manager2 = cast[OnchainGroupManager](wakuRlnRelay2.groupManager)
let idCredentials2 = generateCredentials()
@ -486,9 +495,10 @@ suite "Waku rln relay":
let wakuRlnConfig = getWakuRlnConfig(
manager = manager, index = index, epochSizeSec = rlnEpochSizeSec.uint64
)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
var wakuRlnRelay: WakuRlnRelay
lockNewGlobalBrokerContext:
wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
let rlnMaxEpochGap = wakuRlnRelay.rlnMaxEpochGap
let testProofMetadata = default(ProofMetadata)

View File

@ -12,7 +12,8 @@ import
waku/[waku_core, waku_node, waku_rln_relay],
../testlib/[wakucore, futures, wakunode, testutils],
./utils_onchain,
./rln/waku_rln_relay_utils
./rln/waku_rln_relay_utils,
waku/common/broker/broker_context
from std/times import epochTime
@ -37,68 +38,70 @@ procSuite "WakuNode - RLN relay":
stopAnvil(anvilProc)
asyncTest "testing rln-relay with valid proof":
let
# publisher node
nodeKey1 = generateSecp256k1Key()
var node1, node2, node3: WakuNode # publisher node
let contentTopic = ContentTopic("/waku/2/default-content/proto")
# set up three nodes
lockNewGlobalBrokerContext:
let nodeKey1 = generateSecp256k1Key()
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
# Registration is mandatory before sending messages with rln-relay
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
lockNewGlobalBrokerContext:
# Relay node
nodeKey2 = generateSecp256k1Key()
let nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
lockNewGlobalBrokerContext:
# Subscriber
nodeKey3 = generateSecp256k1Key()
let nodeKey3 = generateSecp256k1Key()
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0))
contentTopic = ContentTopic("/waku/2/default-content/proto")
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# set up three nodes
# node1
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig3 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
# Registration is mandatory before sending messages with rln-relay
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
# node 2
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
# node 3
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node3", rootUpdated3
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node3", rootUpdated3
# connect them together
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -156,10 +159,67 @@ procSuite "WakuNode - RLN relay":
asyncTest "testing rln-relay is applied in all rln shards/content topics":
# create 3 nodes
let nodes = toSeq(0 ..< 3).mapIt(
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
await allFutures(nodes.mapIt(it.start()))
var node1, node2, node3: WakuNode
lockNewGlobalBrokerContext:
let nodeKey1 = generateSecp256k1Key()
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig1 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node", node = 1, rootUpdated = rootUpdated1
lockNewGlobalBrokerContext:
let nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig2 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let idCredentials2 = generateCredentials()
try:
waitFor manager2.register(idCredentials2, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node", node = 2, rootUpdated = rootUpdated2
lockNewGlobalBrokerContext:
let nodeKey3 = generateSecp256k1Key()
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0))
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig3 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let idCredentials3 = generateCredentials()
try:
waitFor manager3.register(idCredentials3, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node", node = 3, rootUpdated = rootUpdated3
let shards =
@[RelayShard(clusterId: 0, shardId: 0), RelayShard(clusterId: 0, shardId: 1)]
@ -169,31 +229,9 @@ procSuite "WakuNode - RLN relay":
ContentTopic("/waku/2/content-topic-b/proto"),
]
# set up three nodes
await allFutures(nodes.mapIt(it.mountRelay()))
# mount rlnrelay in off-chain mode
for index, node in nodes:
let wakuRlnConfig =
getWakuRlnConfig(manager = manager, index = MembershipIndex(index + 1))
await node.mountRlnRelay(wakuRlnConfig)
await node.start()
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
let idCredentials = generateCredentials()
try:
waitFor manager.register(idCredentials, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated = waitFor manager.updateRoots()
info "Updated root for node", node = index + 1, rootUpdated = rootUpdated
# connect them together
await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()])
await nodes[2].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()])
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
var rxMessagesTopic1 = 0
var rxMessagesTopic2 = 0
@ -211,15 +249,15 @@ procSuite "WakuNode - RLN relay":
): Future[void] {.async, gcsafe.} =
await sleepAsync(0.milliseconds)
nodes[0].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic in nodes[0]: " & $error
nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic in nodes[1]: " & $error
node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic in node1: " & $error
node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic in node2: " & $error
# mount the relay handlers
nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr:
node3.subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr:
node3.subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(1000.millis)
@ -236,8 +274,8 @@ procSuite "WakuNode - RLN relay":
contentTopic: contentTopics[0],
)
nodes[0].wakuRlnRelay.unsafeAppendRLNProof(
message, nodes[0].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8)
node1.wakuRlnRelay.unsafeAppendRLNProof(
message, node1.wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8)
).isOkOr:
raiseAssert $error
messages1.add(message)
@ -249,8 +287,8 @@ procSuite "WakuNode - RLN relay":
contentTopic: contentTopics[1],
)
nodes[1].wakuRlnRelay.unsafeAppendRLNProof(
message, nodes[1].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8)
node2.wakuRlnRelay.unsafeAppendRLNProof(
message, node2.wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8)
).isOkOr:
raiseAssert $error
messages2.add(message)
@ -258,9 +296,9 @@ procSuite "WakuNode - RLN relay":
# publish 3 messages from node[0] (last 2 are spam, window is 10 secs)
# publish 3 messages from node[1] (last 2 are spam, window is 10 secs)
for msg in messages1:
discard await nodes[0].publish(some($shards[0]), msg)
discard await node1.publish(some($shards[0]), msg)
for msg in messages2:
discard await nodes[1].publish(some($shards[1]), msg)
discard await node2.publish(some($shards[1]), msg)
# wait for gossip to propagate
await sleepAsync(5000.millis)
@ -271,70 +309,70 @@ procSuite "WakuNode - RLN relay":
rxMessagesTopic1 == 3
rxMessagesTopic2 == 3
await allFutures(nodes.mapIt(it.stop()))
await node1.stop()
await node2.stop()
await node3.stop()
asyncTest "testing rln-relay with invalid proof":
let
var node1, node2, node3: WakuNode
let contentTopic = ContentTopic("/waku/2/default-content/proto")
lockNewGlobalBrokerContext:
# publisher node
nodeKey1 = generateSecp256k1Key()
let nodeKey1 = generateSecp256k1Key()
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
lockNewGlobalBrokerContext:
# Relay node
nodeKey2 = generateSecp256k1Key()
let nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
lockNewGlobalBrokerContext:
# Subscriber
nodeKey3 = generateSecp256k1Key()
let nodeKey3 = generateSecp256k1Key()
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0))
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
contentTopic = ContentTopic("/waku/2/default-content/proto")
let wakuRlnConfig3 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
# set up three nodes
# node1
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
# node 2
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
# node 3
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node3", rootUpdated3
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node3", rootUpdated3
# connect them together
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -390,72 +428,70 @@ procSuite "WakuNode - RLN relay":
await node3.stop()
asyncTest "testing rln-relay double-signaling detection":
let
var node1, node2, node3: WakuNode
let contentTopic = ContentTopic("/waku/2/default-content/proto")
lockNewGlobalBrokerContext:
# publisher node
nodeKey1 = generateSecp256k1Key()
let nodeKey1 = generateSecp256k1Key()
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
# Registration is mandatory before sending messages with rln-relay
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
lockNewGlobalBrokerContext:
# Relay node
nodeKey2 = generateSecp256k1Key()
let nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
# Registration is mandatory before sending messages with rln-relay
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
lockNewGlobalBrokerContext:
# Subscriber
nodeKey3 = generateSecp256k1Key()
let nodeKey3 = generateSecp256k1Key()
node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0))
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
contentTopic = ContentTopic("/waku/2/default-content/proto")
# mount rlnrelay in off-chain mode
let wakuRlnConfig3 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
# set up three nodes
# node1
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
# Registration is mandatory before sending messages with rln-relay
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
# node 2
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
# Registration is mandatory before sending messages with rln-relay
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
# node 3
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3))
await node3.mountRlnRelay(wakuRlnConfig3)
await node3.start()
# Registration is mandatory before sending messages with rln-relay
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node3", rootUpdated3
# Registration is mandatory before sending messages with rln-relay
let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager)
let rootUpdated3 = waitFor manager3.updateRoots()
info "Updated root for node3", rootUpdated3
# connect the nodes together node1 <-> node2 <-> node3
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@ -565,49 +601,49 @@ procSuite "WakuNode - RLN relay":
xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
## This is skipped because is flaky and made CI randomly fail but is useful to run manually
# Given two nodes
var node1, node2: WakuNode
let
contentTopic = ContentTopic("/waku/2/default-content/proto")
shardSeq = @[DefaultRelayShard]
nodeKey1 = generateSecp256k1Key()
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4
lockNewGlobalBrokerContext:
let nodeKey1 = generateSecp256k1Key()
node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0))
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig1 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
# Given both nodes mount relay and rlnrelay
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node1.mountRlnRelay(wakuRlnConfig1)
await node1.start()
# Registration is mandatory before sending messages with rln-relay
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
# Registration is mandatory before sending messages with rln-relay
let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager)
let idCredentials1 = generateCredentials()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
try:
waitFor manager1.register(idCredentials1, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
lockNewGlobalBrokerContext:
let nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig2 =
getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
let rootUpdated1 = waitFor manager1.updateRoots()
info "Updated root for node1", rootUpdated1
# Mount rlnrelay in node2 in off-chain mode
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2))
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
# Registration is mandatory before sending messages with rln-relay
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
# Registration is mandatory before sending messages with rln-relay
let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager)
let rootUpdated2 = waitFor manager2.updateRoots()
info "Updated root for node2", rootUpdated2
# Given the two nodes are started and connected
waitFor allFutures(node1.start(), node2.start())
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
# Given some messages

View File

@ -60,7 +60,8 @@ suite "Wakunode2 - Waku initialization":
not node.wakuRendezvous.isNil()
## Cleanup
waitFor waku.stop()
(waitFor waku.stop()).isOkOr:
raiseAssert error
test "app properly handles dynamic port configuration":
## Given
@ -96,4 +97,5 @@ suite "Wakunode2 - Waku initialization":
typedNodeEnr.get().tcp.get() != 0
## Cleanup
waitFor waku.stop()
(waitFor waku.stop()).isOkOr:
raiseAssert error

View File

@ -21,6 +21,7 @@ import
rest_api/endpoint/relay/client as relay_rest_client,
waku_relay,
waku_rln_relay,
common/broker/broker_context,
],
../testlib/wakucore,
../testlib/wakunode,
@ -505,15 +506,41 @@ suite "Waku v2 Rest API - Relay":
asyncTest "Post a message to a content topic - POST /relay/v1/auto/messages/{topic}":
## "Relay API: publish and subscribe/unsubscribe":
# Given
let node = testWakuNode()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
require node.mountAutoSharding(1, 8).isOk
var meshNode: WakuNode
lockNewGlobalBrokerContext:
meshNode = testWakuNode()
(await meshNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
require meshNode.mountAutoSharding(1, 8).isOk
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
let wakuRlnConfig =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await meshNode.mountRlnRelay(wakuRlnConfig)
await meshNode.start()
const testPubsubTopic = PubsubTopic("/waku/2/rs/1/0")
proc dummyHandler(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
discard
meshNode.subscribe((kind: ContentSub, topic: DefaultContentTopic), dummyHandler).isOkOr:
raiseAssert "Failed to subscribe meshNode: " & error
var node: WakuNode
lockNewGlobalBrokerContext:
node = testWakuNode()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
require node.mountAutoSharding(1, 8).isOk
let wakuRlnConfig =
getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
await node.mountRlnRelay(wakuRlnConfig)
await node.start()
await node.connectToNodes(@[meshNode.peerInfo.toRemotePeerInfo()])
await node.mountRlnRelay(wakuRlnConfig)
await node.start()
# Registration is mandatory before sending messages with rln-relay
let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager)
let idCredentials = generateCredentials()

View File

@ -1,10 +1,10 @@
## Main module for using nwaku as a Nimble library
##
##
## This module re-exports the public API for creating and managing Waku nodes
## when using nwaku as a library dependency.
import waku/api/[api, api_conf]
export api, api_conf
import waku/api
export api
import waku/factory/waku
export waku

View File

@ -136,7 +136,7 @@ task testwakunode2, "Build & run wakunode2 app tests":
test "all_tests_wakunode2"
task example2, "Build Waku examples":
buildBinary "waku_example", "examples/"
buildBinary "api_example", "examples/api_example/"
buildBinary "publisher", "examples/"
buildBinary "subscriber", "examples/"
buildBinary "filter_subscriber", "examples/"
@ -176,6 +176,10 @@ task lightpushwithmix, "Build lightpushwithmix":
let name = "lightpush_publisher_mix"
buildBinary name, "examples/lightpush_mix/"
task api_example, "Build api_example":
let name = "api_example"
buildBinary name, "examples/api_example/"
task buildone, "Build custom target":
let filepath = paramStr(paramCount())
discard buildModule filepath

View File

@ -1,3 +1,4 @@
import ./api/[api, api_conf, entry_nodes]
import ./events/message_events
export api, api_conf, entry_nodes
export api, api_conf, entry_nodes, message_events

View File

@ -1,8 +1,13 @@
import chronicles, chronos, results
import waku/factory/waku
import waku/[requests/health_request, waku_core, waku_node]
import waku/node/delivery_service/send_service
import waku/node/delivery_service/subscription_service
import ./[api_conf, types]
import ./api_conf
logScope:
topics = "api"
# TODO: Specs says it should return a `WakuNode`. As `send` and other APIs are defined, we can align.
proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} =
@ -15,3 +20,53 @@ proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} =
return err("Failed setting up Waku: " & $error)
return ok(wakuRes)
proc checkApiAvailability(w: Waku): Result[void, string] =
if w.isNil():
return err("Waku node is not initialized")
# check if health is satisfactory
# If Node is not healthy, return err("Waku node is not healthy")
let healthStatus = RequestNodeHealth.request(w.brokerCtx)
if healthStatus.isErr():
warn "Failed to get Waku node health status: ", error = healthStatus.error
# Let's suppose the node is hesalthy enough, go ahead
else:
if healthStatus.get().healthStatus == NodeHealth.Unhealthy:
return err("Waku node is not healthy, has got no connections.")
return ok()
proc subscribe*(
w: Waku, contentTopic: ContentTopic
): Future[Result[void, string]] {.async.} =
?checkApiAvailability(w)
return w.deliveryService.subscriptionService.subscribe(contentTopic)
proc unsubscribe*(w: Waku, contentTopic: ContentTopic): Result[void, string] =
?checkApiAvailability(w)
return w.deliveryService.subscriptionService.unsubscribe(contentTopic)
proc send*(
w: Waku, envelope: MessageEnvelope
): Future[Result[RequestId, string]] {.async.} =
?checkApiAvailability(w)
let requestId = RequestId.new(w.rng)
let deliveryTask = DeliveryTask.new(requestId, envelope, w.brokerCtx).valueOr:
return err("API send: Failed to create delivery task: " & error)
info "API send: scheduling delivery task",
requestId = $requestId,
pubsubTopic = deliveryTask.pubsubTopic,
contentTopic = deliveryTask.msg.contentTopic,
msgHash = deliveryTask.msgHash.to0xHex(),
myPeerId = w.node.peerId()
asyncSpawn w.deliveryService.sendService.send(deliveryTask)
return ok(requestId)

View File

@ -86,6 +86,7 @@ type NodeConfig* {.requiresInit.} = object
protocolsConfig: ProtocolsConfig
networkingConfig: NetworkingConfig
ethRpcEndpoints: seq[string]
p2pReliability: bool
proc init*(
T: typedesc[NodeConfig],
@ -93,12 +94,14 @@ proc init*(
protocolsConfig: ProtocolsConfig = TheWakuNetworkPreset,
networkingConfig: NetworkingConfig = DefaultNetworkingConfig,
ethRpcEndpoints: seq[string] = @[],
p2pReliability: bool = false,
): T =
return T(
mode: mode,
protocolsConfig: protocolsConfig,
networkingConfig: networkingConfig,
ethRpcEndpoints: ethRpcEndpoints,
p2pReliability: p2pReliability,
)
proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
@ -131,7 +134,16 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
b.rateLimitConf.withRateLimits(@["filter:100/1s", "lightpush:5/1s", "px:5/1s"])
of Edge:
return err("Edge mode is not implemented")
# All client side protocols are mounted by default
# Peer exchange client is always enabled and start_node will start the px loop
# Metadata is always mounted
b.withPeerExchange(true)
# switch off all service side protocols and relay
b.withRelay(false)
b.filterServiceConf.withEnabled(false)
b.withLightPush(false)
b.storeServiceConf.withEnabled(false)
# Leave discv5 and rendezvous for user choice
## Network Conf
let protocolsConfig = nodeConfig.protocolsConfig
@ -193,6 +205,7 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
## Various configurations
b.withNatStrategy("any")
b.withP2PReliability(nodeConfig.p2pReliability)
let wakuConf = b.build().valueOr:
return err("Failed to build configuration: " & error)

46
waku/api/send_api.md Normal file
View File

@ -0,0 +1,46 @@
# SEND API
**THIS IS TO BE REMOVED BEFORE PR MERGE**
This document collects logic and todo's around the Send API.
## Overview
Send api hides the complex logic of using raw protocols for reliable message delivery.
The delivery method is chosen based on the node configuration and actual availabilities of peers.
## Delivery task
Each message send request is bundled into a task that not just holds the composed message but also the state of the delivery.
## Delivery methods
Depending on the configuration and the availability of store client protocol + actual configured and/or discovered store nodes:
- P2PReliability validation - checking network store node whether the message is reached at least a store node.
- Simple retry until message is propagated to the network
- Relay says >0 peers as publish result
- LightpushClient returns with success
Depending on node config:
- Relay
- Lightpush
These methods are used in combination to achieve the best reliability.
Fallback mechanism is used to switch between methods if the current one fails.
Relay+StoreCheck -> Relay+simple retry -> Lightpush+StoreCheck -> Lightpush simple retry -> Error
Combination is dynamically chosen on node configuration. Levels can be skipped depending on actual connectivity.
Actual connectivity is checked:
- Relay's topic health check - at least dLow peers in the mesh for the topic
- Store nodes availability - at least one store service node is available in peer manager
- Lightpush client availability - at least one lightpush service node is available in peer manager
## Delivery processing
At every send request, each task is tried to be delivered right away.
Any further retries and store check is done as a background task in a loop with predefined intervals.
Each task is set for a maximum number of retries and/or maximum time to live.
In each round of store check and retry send tasks are selected based on their state.
The state is updated based on the result of the delivery method.

65
waku/api/types.nim Normal file
View File

@ -0,0 +1,65 @@
{.push raises: [].}
import bearssl/rand, std/times, chronos
import stew/byteutils
import waku/utils/requests as request_utils
import waku/waku_core/[topics/content_topic, message/message, time]
import waku/requests/requests
type
MessageEnvelope* = object
contentTopic*: ContentTopic
payload*: seq[byte]
ephemeral*: bool
RequestId* = distinct string
NodeHealth* {.pure.} = enum
Healthy
MinimallyHealthy
Unhealthy
proc new*(T: typedesc[RequestId], rng: ref HmacDrbgContext): T =
## Generate a new RequestId using the provided RNG.
RequestId(request_utils.generateRequestId(rng))
proc `$`*(r: RequestId): string {.inline.} =
string(r)
proc `==`*(a, b: RequestId): bool {.inline.} =
string(a) == string(b)
proc init*(
T: type MessageEnvelope,
contentTopic: ContentTopic,
payload: seq[byte] | string,
ephemeral: bool = false,
): MessageEnvelope =
when payload is seq[byte]:
MessageEnvelope(contentTopic: contentTopic, payload: payload, ephemeral: ephemeral)
else:
MessageEnvelope(
contentTopic: contentTopic, payload: payload.toBytes(), ephemeral: ephemeral
)
proc toWakuMessage*(envelope: MessageEnvelope): WakuMessage =
## Convert a MessageEnvelope to a WakuMessage.
var wm = WakuMessage(
contentTopic: envelope.contentTopic,
payload: envelope.payload,
ephemeral: envelope.ephemeral,
timestamp: getNowInNanosecondTime(),
)
## TODO: First find out if proof is needed at all
## Follow up: left it to the send logic to add RLN proof if needed and possible
# let requestedProof = (
# waitFor RequestGenerateRlnProof.request(wm, getTime().toUnixFloat())
# ).valueOr:
# warn "Failed to add RLN proof to WakuMessage: ", error = error
# return wm
# wm.proof = requestedProof.proof
return wm
{.pop.}

View File

@ -0,0 +1,27 @@
import waku/waku_core/[message/message, message/digest], waku/common/broker/event_broker
type DeliveryDirection* {.pure.} = enum
PUBLISHING
RECEIVING
type DeliverySuccess* {.pure.} = enum
SUCCESSFUL
UNSUCCESSFUL
EventBroker:
type DeliveryFeedbackEvent* = ref object
success*: DeliverySuccess
dir*: DeliveryDirection
comment*: string
msgHash*: WakuMessageHash
msg*: WakuMessage
EventBroker:
type OnFilterSubscribeEvent* = object
pubsubTopic*: string
contentTopics*: seq[string]
EventBroker:
type OnFilterUnSubscribeEvent* = object
pubsubTopic*: string
contentTopics*: seq[string]

3
waku/events/events.nim Normal file
View File

@ -0,0 +1,3 @@
import ./[message_events, delivery_events]
export message_events, delivery_events

View File

@ -0,0 +1,30 @@
import waku/common/broker/event_broker
import waku/api/types
import waku/waku_core/message
export types
EventBroker:
# Event emitted when a message is sent to the network
type MessageSentEvent* = object
requestId*: RequestId
messageHash*: string
EventBroker:
# Event emitted when a message send operation fails
type MessageErrorEvent* = object
requestId*: RequestId
messageHash*: string
error*: string
EventBroker:
# Confirmation that a message has been correctly delivered to some neighbouring nodes.
type MessagePropagatedEvent* = object
requestId*: RequestId
messageHash*: string
EventBroker:
# Event emitted when a message is received via Waku
type MessageReceivedEvent* = object
messageHash*: string
message*: WakuMessage

View File

@ -25,7 +25,7 @@ import
../node/peer_manager,
../node/health_monitor,
../node/waku_metrics,
../node/delivery_monitor/delivery_monitor,
../node/delivery_service/delivery_service,
../rest_api/message_cache,
../rest_api/endpoint/server,
../rest_api/endpoint/builder as rest_server_builder,
@ -42,7 +42,10 @@ import
../factory/internal_config,
../factory/app_callbacks,
../waku_enr/multiaddr,
./waku_conf
./waku_conf,
../common/broker/broker_context,
../requests/health_request,
../api/types
logScope:
topics = "wakunode waku"
@ -66,12 +69,14 @@ type Waku* = ref object
healthMonitor*: NodeHealthMonitor
deliveryMonitor: DeliveryMonitor
deliveryService*: DeliveryService
restServer*: WakuRestServerRef
metricsServer*: MetricsHttpServerRef
appCallbacks*: AppCallbacks
brokerCtx*: BrokerContext
func version*(waku: Waku): string =
waku.version
@ -160,6 +165,7 @@ proc new*(
T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil
): Future[Result[Waku, string]] {.async.} =
let rng = crypto.newRng()
let brokerCtx = globalBrokerContext()
logging.setupLog(wakuConf.logLevel, wakuConf.logFormat)
@ -197,16 +203,8 @@ proc new*(
return err("Failed setting up app callbacks: " & $error)
## Delivery Monitor
var deliveryMonitor: DeliveryMonitor
if wakuConf.p2pReliability:
if wakuConf.remoteStoreNode.isNone():
return err("A storenode should be set when reliability mode is on")
let deliveryMonitor = DeliveryMonitor.new(
node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient,
node.wakuFilterClient,
).valueOr:
return err("could not create delivery monitor: " & $error)
let deliveryService = DeliveryService.new(wakuConf.p2pReliability, node).valueOr:
return err("could not create delivery service: " & $error)
var waku = Waku(
version: git_version,
@ -215,9 +213,10 @@ proc new*(
key: wakuConf.nodeKey,
node: node,
healthMonitor: healthMonitor,
deliveryMonitor: deliveryMonitor,
deliveryService: deliveryService,
appCallbacks: appCallbacks,
restServer: restServer,
brokerCtx: brokerCtx,
)
waku.setupSwitchServices(wakuConf, relay, rng)
@ -353,7 +352,7 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()
return
proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async: (raises: []).} =
if waku[].node.started:
warn "startWaku: waku node already started"
return ok()
@ -363,9 +362,15 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
if conf.dnsDiscoveryConf.isSome():
let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
)
let dynamicBootstrapNodesRes =
try:
await waku_dnsdisc.retrieveDynamicBootstrapNodes(
dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
)
except CatchableError as exc:
Result[seq[RemotePeerInfo], string].err(
"Retrieving dynamic bootstrap nodes failed: " & exc.msg
)
if dynamicBootstrapNodesRes.isErr():
error "Retrieving dynamic bootstrap nodes failed",
@ -379,8 +384,11 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
return err("error while calling startNode: " & $error)
## Update waku data that is set dynamically on node start
(await updateWaku(waku)).isOkOr:
return err("Error in updateApp: " & $error)
try:
(await updateWaku(waku)).isOkOr:
return err("Error in updateApp: " & $error)
except CatchableError:
return err("Caught exception in updateApp: " & getCurrentExceptionMsg())
## Discv5
if conf.discv5Conf.isSome():
@ -400,13 +408,68 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
return err("failed to start waku discovery v5: " & $error)
## Reliability
if not waku[].deliveryMonitor.isNil():
waku[].deliveryMonitor.startDeliveryMonitor()
if not waku[].deliveryService.isNil():
waku[].deliveryService.startDeliveryService()
## Health Monitor
waku[].healthMonitor.startHealthMonitor().isOkOr:
return err("failed to start health monitor: " & $error)
## Setup RequestNodeHealth provider
RequestNodeHealth.setProvider(
globalBrokerContext(),
proc(): Result[RequestNodeHealth, string] =
let healthReportFut = waku[].healthMonitor.getNodeHealthReport()
if not healthReportFut.completed():
return err("Health report not available")
try:
let healthReport = healthReportFut.read()
# Check if Relay or Lightpush Client is ready (MinimallyHealthy condition)
var relayReady = false
var lightpushClientReady = false
var storeClientReady = false
var filterClientReady = false
for protocolHealth in healthReport.protocolsHealth:
if protocolHealth.protocol == "Relay" and
protocolHealth.health == HealthStatus.READY:
relayReady = true
elif protocolHealth.protocol == "Lightpush Client" and
protocolHealth.health == HealthStatus.READY:
lightpushClientReady = true
elif protocolHealth.protocol == "Store Client" and
protocolHealth.health == HealthStatus.READY:
storeClientReady = true
elif protocolHealth.protocol == "Filter Client" and
protocolHealth.health == HealthStatus.READY:
filterClientReady = true
# Determine node health based on protocol states
let isMinimallyHealthy = relayReady or lightpushClientReady
let nodeHealth =
if isMinimallyHealthy and storeClientReady and filterClientReady:
NodeHealth.Healthy
elif isMinimallyHealthy:
NodeHealth.MinimallyHealthy
else:
NodeHealth.Unhealthy
debug "Providing health report",
nodeHealth = $nodeHealth,
relayReady = relayReady,
lightpushClientReady = lightpushClientReady,
storeClientReady = storeClientReady,
filterClientReady = filterClientReady,
details = $(healthReport)
return ok(RequestNodeHealth(healthStatus: nodeHealth))
except CatchableError as exc:
err("Failed to read health report: " & exc.msg),
).isOkOr:
error "Failed to set RequestNodeHealth provider", error = error
if conf.restServerConf.isSome():
rest_server_builder.startRestServerProtocolSupport(
waku[].restServer,
@ -422,41 +485,65 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
return err ("Starting protocols support REST server failed: " & $error)
if conf.metricsServerConf.isSome():
waku[].metricsServer = (
await (
waku_metrics.startMetricsServerAndLogging(
conf.metricsServerConf.get(), conf.portsShift
try:
waku[].metricsServer = (
await (
waku_metrics.startMetricsServerAndLogging(
conf.metricsServerConf.get(), conf.portsShift
)
)
).valueOr:
return err("Starting monitoring and external interfaces failed: " & error)
except CatchableError:
return err(
"Caught exception starting monitoring and external interfaces failed: " &
getCurrentExceptionMsg()
)
).valueOr:
return err("Starting monitoring and external interfaces failed: " & error)
waku[].healthMonitor.setOverallHealth(HealthStatus.READY)
return ok()
proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} =
proc stop*(waku: Waku): Future[Result[void, string]] {.async: (raises: []).} =
## Waku shutdown
if not waku.node.started:
warn "stop: attempting to stop node that isn't running"
waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
try:
waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
if not waku.metricsServer.isNil():
await waku.metricsServer.stop()
if not waku.metricsServer.isNil():
await waku.metricsServer.stop()
if not waku.wakuDiscv5.isNil():
await waku.wakuDiscv5.stop()
if not waku.wakuDiscv5.isNil():
await waku.wakuDiscv5.stop()
if not waku.node.isNil():
await waku.node.stop()
if not waku.node.isNil():
await waku.node.stop()
if not waku.dnsRetryLoopHandle.isNil():
await waku.dnsRetryLoopHandle.cancelAndWait()
if not waku.dnsRetryLoopHandle.isNil():
await waku.dnsRetryLoopHandle.cancelAndWait()
if not waku.healthMonitor.isNil():
await waku.healthMonitor.stopHealthMonitor()
if not waku.healthMonitor.isNil():
await waku.healthMonitor.stopHealthMonitor()
if not waku.restServer.isNil():
await waku.restServer.stop()
## Clear RequestNodeHealth provider
RequestNodeHealth.clearProvider(waku.brokerCtx)
if not waku.restServer.isNil():
await waku.restServer.stop()
except Exception:
error "waku stop failed: " & getCurrentExceptionMsg()
return err("waku stop failed: " & getCurrentExceptionMsg())
return ok()
proc isModeCoreAvailable*(waku: Waku): bool =
return not waku.node.wakuRelay.isNil()
proc isModeEdgeAvailable*(waku: Waku): bool =
return
waku.node.wakuRelay.isNil() and not waku.node.wakuStoreClient.isNil() and
not waku.node.wakuFilterClient.isNil() and not waku.node.wakuLightPushClient.isNil()
{.pop.}

View File

@ -1,17 +0,0 @@
import ../../waku_core
type DeliveryDirection* {.pure.} = enum
PUBLISHING
RECEIVING
type DeliverySuccess* {.pure.} = enum
SUCCESSFUL
UNSUCCESSFUL
type DeliveryFeedbackCallback* = proc(
success: DeliverySuccess,
dir: DeliveryDirection,
comment: string,
msgHash: WakuMessageHash,
msg: WakuMessage,
) {.gcsafe, raises: [].}

View File

@ -1,43 +0,0 @@
## This module helps to ensure the correct transmission and reception of messages
import results
import chronos
import
./recv_monitor,
./send_monitor,
./delivery_callback,
../../waku_core,
../../waku_store/client,
../../waku_relay/protocol,
../../waku_lightpush/client,
../../waku_filter_v2/client
type DeliveryMonitor* = ref object
sendMonitor: SendMonitor
recvMonitor: RecvMonitor
proc new*(
T: type DeliveryMonitor,
storeClient: WakuStoreClient,
wakuRelay: protocol.WakuRelay,
wakuLightpushClient: WakuLightpushClient,
wakuFilterClient: WakuFilterClient,
): Result[T, string] =
## storeClient is needed to give store visitility to DeliveryMonitor
## wakuRelay and wakuLightpushClient are needed to give a mechanism to SendMonitor to re-publish
let sendMonitor = ?SendMonitor.new(storeClient, wakuRelay, wakuLightpushClient)
let recvMonitor = RecvMonitor.new(storeClient, wakuFilterClient)
return ok(DeliveryMonitor(sendMonitor: sendMonitor, recvMonitor: recvMonitor))
proc startDeliveryMonitor*(self: DeliveryMonitor) =
self.sendMonitor.startSendMonitor()
self.recvMonitor.startRecvMonitor()
proc stopDeliveryMonitor*(self: DeliveryMonitor) {.async.} =
self.sendMonitor.stopSendMonitor()
await self.recvMonitor.stopRecvMonitor()
proc setDeliveryCallback*(self: DeliveryMonitor, deliveryCb: DeliveryFeedbackCallback) =
## The deliveryCb is a proc defined by the api client so that it can get delivery feedback
self.sendMonitor.setDeliveryCallback(deliveryCb)
self.recvMonitor.setDeliveryCallback(deliveryCb)

View File

@ -1,9 +0,0 @@
import chronicles
import ../../waku_core/message/message
type PublishObserver* = ref object of RootObj
method onMessagePublished*(
self: PublishObserver, pubsubTopic: string, message: WakuMessage
) {.base, gcsafe, raises: [].} =
error "onMessagePublished not implemented"

View File

@ -1,212 +0,0 @@
## This module reinforces the publish operation with regular store-v3 requests.
##
import std/[sequtils, tables]
import chronos, chronicles, libp2p/utility
import
./delivery_callback,
./publish_observer,
../../waku_core,
./not_delivered_storage/not_delivered_storage,
../../waku_store/[client, common],
../../waku_archive/archive,
../../waku_relay/protocol,
../../waku_lightpush/client
const MaxTimeInCache* = chronos.minutes(1)
## Messages older than this time will get completely forgotten on publication and a
## feedback will be given when that happens
const SendCheckInterval* = chronos.seconds(3)
## Interval at which we check that messages have been properly received by a store node
const MaxMessagesToCheckAtOnce = 100
## Max number of messages to check if they were properly archived by a store node
const ArchiveTime = chronos.seconds(3)
## Estimation of the time we wait until we start confirming that a message has been properly
## received and archived by a store node
type DeliveryInfo = object
pubsubTopic: string
msg: WakuMessage
type SendMonitor* = ref object of PublishObserver
publishedMessages: Table[WakuMessageHash, DeliveryInfo]
## Cache that contains the delivery info per message hash.
## This is needed to make sure the published messages are properly published
msgStoredCheckerHandle: Future[void] ## handle that allows to stop the async task
notDeliveredStorage: NotDeliveredStorage
## NOTE: this is not fully used because that might be tackled by higher abstraction layers
storeClient: WakuStoreClient
deliveryCb: DeliveryFeedbackCallback
wakuRelay: protocol.WakuRelay
wakuLightpushClient: WakuLightPushClient
proc new*(
T: type SendMonitor,
storeClient: WakuStoreClient,
wakuRelay: protocol.WakuRelay,
wakuLightpushClient: WakuLightPushClient,
): Result[T, string] =
if wakuRelay.isNil() and wakuLightpushClient.isNil():
return err(
"Could not create SendMonitor. wakuRelay or wakuLightpushClient should be set"
)
let notDeliveredStorage = ?NotDeliveredStorage.new()
let sendMonitor = SendMonitor(
notDeliveredStorage: notDeliveredStorage,
storeClient: storeClient,
wakuRelay: wakuRelay,
wakuLightpushClient: wakuLightPushClient,
)
if not wakuRelay.isNil():
wakuRelay.addPublishObserver(sendMonitor)
if not wakuLightpushClient.isNil():
wakuLightpushClient.addPublishObserver(sendMonitor)
return ok(sendMonitor)
proc performFeedbackAndCleanup(
self: SendMonitor,
msgsToDiscard: Table[WakuMessageHash, DeliveryInfo],
success: DeliverySuccess,
dir: DeliveryDirection,
comment: string,
) =
## This procs allows to bring delivery feedback to the API client
## It requires a 'deliveryCb' to be registered beforehand.
if self.deliveryCb.isNil():
error "deliveryCb is nil in performFeedbackAndCleanup",
success, dir, comment, hashes = toSeq(msgsToDiscard.keys).mapIt(shortLog(it))
return
for hash, deliveryInfo in msgsToDiscard:
info "send monitor performFeedbackAndCleanup",
success, dir, comment, msg_hash = shortLog(hash)
self.deliveryCb(success, dir, comment, hash, deliveryInfo.msg)
self.publishedMessages.del(hash)
proc checkMsgsInStore(
self: SendMonitor, msgsToValidate: Table[WakuMessageHash, DeliveryInfo]
): Future[
Result[
tuple[
publishedCorrectly: Table[WakuMessageHash, DeliveryInfo],
notYetPublished: Table[WakuMessageHash, DeliveryInfo],
],
void,
]
] {.async.} =
let hashesToValidate = toSeq(msgsToValidate.keys)
let storeResp: StoreQueryResponse = (
await self.storeClient.queryToAny(
StoreQueryRequest(includeData: false, messageHashes: hashesToValidate)
)
).valueOr:
error "checkMsgsInStore failed to get remote msgHashes",
hashes = hashesToValidate.mapIt(shortLog(it)), error = $error
return err()
let publishedHashes = storeResp.messages.mapIt(it.messageHash)
var notYetPublished: Table[WakuMessageHash, DeliveryInfo]
var publishedCorrectly: Table[WakuMessageHash, DeliveryInfo]
for msgHash, deliveryInfo in msgsToValidate.pairs:
if publishedHashes.contains(msgHash):
publishedCorrectly[msgHash] = deliveryInfo
self.publishedMessages.del(msgHash) ## we will no longer track that message
else:
notYetPublished[msgHash] = deliveryInfo
return ok((publishedCorrectly: publishedCorrectly, notYetPublished: notYetPublished))
proc processMessages(self: SendMonitor) {.async.} =
var msgsToValidate: Table[WakuMessageHash, DeliveryInfo]
var msgsToDiscard: Table[WakuMessageHash, DeliveryInfo]
let now = getNowInNanosecondTime()
let timeToCheckThreshold = now - ArchiveTime.nanos
let maxLifeTime = now - MaxTimeInCache.nanos
for hash, deliveryInfo in self.publishedMessages.pairs:
if deliveryInfo.msg.timestamp < maxLifeTime:
## message is too old
msgsToDiscard[hash] = deliveryInfo
if deliveryInfo.msg.timestamp < timeToCheckThreshold:
msgsToValidate[hash] = deliveryInfo
## Discard the messages that are too old
self.performFeedbackAndCleanup(
msgsToDiscard, DeliverySuccess.UNSUCCESSFUL, DeliveryDirection.PUBLISHING,
"Could not publish messages. Please try again.",
)
let (publishedCorrectly, notYetPublished) = (
await self.checkMsgsInStore(msgsToValidate)
).valueOr:
return ## the error log is printed in checkMsgsInStore
## Give positive feedback for the correctly published messages
self.performFeedbackAndCleanup(
publishedCorrectly, DeliverySuccess.SUCCESSFUL, DeliveryDirection.PUBLISHING,
"messages published correctly",
)
## Try to publish again
for msgHash, deliveryInfo in notYetPublished.pairs:
let pubsubTopic = deliveryInfo.pubsubTopic
let msg = deliveryInfo.msg
if not self.wakuRelay.isNil():
info "trying to publish again with wakuRelay", msgHash, pubsubTopic
(await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
error "could not publish with wakuRelay.publish",
msgHash, pubsubTopic, error = $error
continue
if not self.wakuLightpushClient.isNil():
info "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic
(await self.wakuLightpushClient.publishToAny(pubsubTopic, msg)).isOkOr:
error "could not publish with publishToAny", error = $error
continue
proc checkIfMessagesStored(self: SendMonitor) {.async.} =
## Continuously monitors that the sent messages have been received by a store node
while true:
await self.processMessages()
await sleepAsync(SendCheckInterval)
method onMessagePublished(
self: SendMonitor, pubsubTopic: string, msg: WakuMessage
) {.gcsafe, raises: [].} =
## Implementation of the PublishObserver interface.
##
## When publishing a message either through relay or lightpush, we want to add some extra effort
## to make sure it is received to one store node. Hence, keep track of those published messages.
info "onMessagePublished"
let msgHash = computeMessageHash(pubSubTopic, msg)
if not self.publishedMessages.hasKey(msgHash):
self.publishedMessages[msgHash] = DeliveryInfo(pubsubTopic: pubsubTopic, msg: msg)
proc startSendMonitor*(self: SendMonitor) =
self.msgStoredCheckerHandle = self.checkIfMessagesStored()
proc stopSendMonitor*(self: SendMonitor) =
discard self.msgStoredCheckerHandle.cancelAndWait()
proc setDeliveryCallback*(self: SendMonitor, deliveryCb: DeliveryFeedbackCallback) =
self.deliveryCb = deliveryCb

View File

@ -1,13 +0,0 @@
import chronicles
type SubscriptionObserver* = ref object of RootObj
method onSubscribe*(
self: SubscriptionObserver, pubsubTopic: string, contentTopics: seq[string]
) {.base, gcsafe, raises: [].} =
error "onSubscribe not implemented"
method onUnsubscribe*(
self: SubscriptionObserver, pubsubTopic: string, contentTopics: seq[string]
) {.base, gcsafe, raises: [].} =
error "onUnsubscribe not implemented"

View File

@ -0,0 +1,46 @@
## This module helps to ensure the correct transmission and reception of messages
import results
import chronos
import
./recv_service,
./send_service,
./subscription_service,
waku/[
waku_core,
waku_node,
waku_store/client,
waku_relay/protocol,
waku_lightpush/client,
waku_filter_v2/client,
]
type DeliveryService* = ref object
sendService*: SendService
recvService: RecvService
subscriptionService*: SubscriptionService
proc new*(
T: type DeliveryService, useP2PReliability: bool, w: WakuNode
): Result[T, string] =
## storeClient is needed to give store visitility to DeliveryService
## wakuRelay and wakuLightpushClient are needed to give a mechanism to SendService to re-publish
let subscriptionService = SubscriptionService.new(w)
let sendService = ?SendService.new(useP2PReliability, w, subscriptionService)
let recvService = RecvService.new(w, subscriptionService)
return ok(
DeliveryService(
sendService: sendService,
recvService: recvService,
subscriptionService: subscriptionService,
)
)
proc startDeliveryService*(self: DeliveryService) =
self.sendService.startSendService()
self.recvService.startRecvService()
proc stopDeliveryService*(self: DeliveryService) {.async.} =
self.sendService.stopSendService()
await self.recvService.stopRecvService()

View File

@ -4,7 +4,7 @@ import std/[tables, strutils, os], results, chronicles
import ../../../common/databases/db_sqlite, ../../../common/databases/common
logScope:
topics = "waku node delivery_monitor"
topics = "waku node delivery_service"
const TargetSchemaVersion* = 1
# increase this when there is an update in the database schema

View File

@ -1,17 +1,17 @@
## This module is aimed to keep track of the sent/published messages that are considered
## not being properly delivered.
##
##
## The archiving of such messages will happen in a local sqlite database.
##
##
## In the very first approach, we consider that a message is sent properly is it has been
## received by any store node.
##
##
import results
import
../../../common/databases/db_sqlite,
../../../waku_core/message/message,
../../../node/delivery_monitor/not_delivered_storage/migrations
../../../node/delivery_service/not_delivered_storage/migrations
const NotDeliveredMessagesDbUrl = "not-delivered-messages.db"

View File

@ -0,0 +1,3 @@
import ./recv_service/recv_service
export recv_service

View File

@ -4,13 +4,18 @@
import std/[tables, sequtils, options]
import chronos, chronicles, libp2p/utility
import ../[subscription_service]
import
../../waku_core,
./delivery_callback,
./subscriptions_observer,
../../waku_store/[client, common],
../../waku_filter_v2/client,
../../waku_core/topics
waku/[
waku_core,
waku_store/client,
waku_store/common,
waku_filter_v2/client,
waku_core/topics,
events/delivery_events,
waku_node,
common/broker/broker_context,
]
const StoreCheckPeriod = chronos.minutes(5) ## How often to perform store queries
@ -28,14 +33,16 @@ type RecvMessage = object
rxTime: Timestamp
## timestamp of the rx message. We will not keep the rx messages forever
type RecvMonitor* = ref object of SubscriptionObserver
type RecvService* = ref object of RootObj
brokerCtx: BrokerContext
topicsInterest: Table[PubsubTopic, seq[ContentTopic]]
## Tracks message verification requests and when was the last time a
## pubsub topic was verified for missing messages
## The key contains pubsub-topics
storeClient: WakuStoreClient
deliveryCb: DeliveryFeedbackCallback
node: WakuNode
onSubscribeListener: OnFilterSubscribeEventListener
onUnsubscribeListener: OnFilterUnsubscribeEventListener
subscriptionService: SubscriptionService
recentReceivedMsgs: seq[RecvMessage]
@ -46,10 +53,10 @@ type RecvMonitor* = ref object of SubscriptionObserver
endTimeToCheck: Timestamp
proc getMissingMsgsFromStore(
self: RecvMonitor, msgHashes: seq[WakuMessageHash]
self: RecvService, msgHashes: seq[WakuMessageHash]
): Future[Result[seq[TupleHashAndMsg], string]] {.async.} =
let storeResp: StoreQueryResponse = (
await self.storeClient.queryToAny(
await self.node.wakuStoreClient.queryToAny(
StoreQueryRequest(includeData: true, messageHashes: msgHashes)
)
).valueOr:
@ -62,35 +69,35 @@ proc getMissingMsgsFromStore(
)
proc performDeliveryFeedback(
self: RecvMonitor,
self: RecvService,
success: DeliverySuccess,
dir: DeliveryDirection,
comment: string,
msgHash: WakuMessageHash,
msg: WakuMessage,
) {.gcsafe, raises: [].} =
## This procs allows to bring delivery feedback to the API client
## It requires a 'deliveryCb' to be registered beforehand.
if self.deliveryCb.isNil():
error "deliveryCb is nil in performDeliveryFeedback",
success, dir, comment, msg_hash
return
info "recv monitor performDeliveryFeedback",
success, dir, comment, msg_hash = shortLog(msgHash)
self.deliveryCb(success, dir, comment, msgHash, msg)
proc msgChecker(self: RecvMonitor) {.async.} =
DeliveryFeedbackEvent.emit(
brokerCtx = self.brokerCtx,
success = success,
dir = dir,
comment = comment,
msgHash = msgHash,
msg = msg,
)
proc msgChecker(self: RecvService) {.async.} =
## Continuously checks if a message has been received
while true:
await sleepAsync(StoreCheckPeriod)
self.endTimeToCheck = getNowInNanosecondTime()
var msgHashesInStore = newSeq[WakuMessageHash](0)
for pubsubTopic, cTopics in self.topicsInterest.pairs:
let storeResp: StoreQueryResponse = (
await self.storeClient.queryToAny(
await self.node.wakuStoreClient.queryToAny(
StoreQueryRequest(
includeData: false,
pubsubTopic: some(PubsubTopic(pubsubTopic)),
@ -126,8 +133,8 @@ proc msgChecker(self: RecvMonitor) {.async.} =
## update next check times
self.startTimeToCheck = self.endTimeToCheck
method onSubscribe(
self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string]
proc onSubscribe(
self: RecvService, pubsubTopic: string, contentTopics: seq[string]
) {.gcsafe, raises: [].} =
info "onSubscribe", pubsubTopic, contentTopics
self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest):
@ -135,8 +142,8 @@ method onSubscribe(
do:
self.topicsInterest[pubsubTopic] = contentTopics
method onUnsubscribe(
self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string]
proc onUnsubscribe(
self: RecvService, pubsubTopic: string, contentTopics: seq[string]
) {.gcsafe, raises: [].} =
info "onUnsubscribe", pubsubTopic, contentTopics
@ -150,47 +157,63 @@ method onUnsubscribe(
do:
error "onUnsubscribe unsubscribing from wrong topic", pubsubTopic, contentTopics
proc new*(
T: type RecvMonitor,
storeClient: WakuStoreClient,
wakuFilterClient: WakuFilterClient,
): T =
proc new*(T: typedesc[RecvService], node: WakuNode, s: SubscriptionService): T =
## The storeClient will help to acquire any possible missed messages
let now = getNowInNanosecondTime()
var recvMonitor = RecvMonitor(storeClient: storeClient, startTimeToCheck: now)
if not wakuFilterClient.isNil():
wakuFilterClient.addSubscrObserver(recvMonitor)
var recvService = RecvService(
node: node,
startTimeToCheck: now,
brokerCtx: node.brokerCtx,
subscriptionService: s,
topicsInterest: initTable[PubsubTopic, seq[ContentTopic]](),
recentReceivedMsgs: @[],
)
if not node.wakuFilterClient.isNil():
let filterPushHandler = proc(
pubsubTopic: PubsubTopic, message: WakuMessage
) {.async, closure.} =
## Captures all the messages recived through filter
## Captures all the messages received through filter
let msgHash = computeMessageHash(pubSubTopic, message)
let rxMsg = RecvMessage(msgHash: msgHash, rxTime: message.timestamp)
recvMonitor.recentReceivedMsgs.add(rxMsg)
recvService.recentReceivedMsgs.add(rxMsg)
wakuFilterClient.registerPushHandler(filterPushHandler)
node.wakuFilterClient.registerPushHandler(filterPushHandler)
return recvMonitor
return recvService
proc loopPruneOldMessages(self: RecvMonitor) {.async.} =
proc loopPruneOldMessages(self: RecvService) {.async.} =
while true:
let oldestAllowedTime = getNowInNanosecondTime() - MaxMessageLife.nanos
self.recentReceivedMsgs.keepItIf(it.rxTime > oldestAllowedTime)
await sleepAsync(PruneOldMsgsPeriod)
proc startRecvMonitor*(self: RecvMonitor) =
proc startRecvService*(self: RecvService) =
self.msgCheckerHandler = self.msgChecker()
self.msgPrunerHandler = self.loopPruneOldMessages()
proc stopRecvMonitor*(self: RecvMonitor) {.async.} =
self.onSubscribeListener = OnFilterSubscribeEvent.listen(
self.brokerCtx,
proc(subsEv: OnFilterSubscribeEvent) {.async: (raises: []).} =
self.onSubscribe(subsEv.pubsubTopic, subsEv.contentTopics),
).valueOr:
error "Failed to set OnFilterSubscribeEvent listener", error = error
quit(QuitFailure)
self.onUnsubscribeListener = OnFilterUnsubscribeEvent.listen(
self.brokerCtx,
proc(subsEv: OnFilterUnsubscribeEvent) {.async: (raises: []).} =
self.onUnsubscribe(subsEv.pubsubTopic, subsEv.contentTopics),
).valueOr:
error "Failed to set OnFilterUnsubscribeEvent listener", error = error
quit(QuitFailure)
proc stopRecvService*(self: RecvService) {.async.} =
OnFilterSubscribeEvent.dropListener(self.brokerCtx, self.onSubscribeListener)
OnFilterUnsubscribeEvent.dropListener(self.brokerCtx, self.onUnsubscribeListener)
if not self.msgCheckerHandler.isNil():
await self.msgCheckerHandler.cancelAndWait()
if not self.msgPrunerHandler.isNil():
await self.msgPrunerHandler.cancelAndWait()
proc setDeliveryCallback*(self: RecvMonitor, deliveryCb: DeliveryFeedbackCallback) =
self.deliveryCb = deliveryCb

View File

@ -0,0 +1,6 @@
## This module reinforces the publish operation with regular store-v3 requests.
##
import ./send_service/[send_service, delivery_task]
export send_service, delivery_task

View File

@ -0,0 +1,74 @@
import std/[options, times], chronos
import waku/waku_core, waku/api/types, waku/requests/node_requests
import waku/common/broker/broker_context
type DeliveryState* {.pure.} = enum
Entry
SuccessfullyPropagated
# message is known to be sent to the network but not yet validated
SuccessfullyValidated
# message is known to be stored at least on one store node, thus validated
FallbackRetry # retry sending with fallback processor if available
NextRoundRetry # try sending in next loop
FailedToDeliver # final state of failed delivery
type DeliveryTask* = ref object
requestId*: RequestId
pubsubTopic*: PubsubTopic
msg*: WakuMessage
msgHash*: WakuMessageHash
tryCount*: int
state*: DeliveryState
deliveryTime*: Moment
propagateEventEmitted*: bool
errorDesc*: string
proc new*(
T: typedesc[DeliveryTask],
requestId: RequestId,
envelop: MessageEnvelope,
brokerCtx: BrokerContext,
): Result[T, string] =
let msg = envelop.toWakuMessage()
# TODO: use sync request for such as soon as available
let relayShardRes = (
RequestRelayShard.request(brokerCtx, none[PubsubTopic](), envelop.contentTopic)
).valueOr:
error "RequestRelayShard.request failed", error = error
return err("Failed create DeliveryTask: " & $error)
let pubsubTopic = relayShardRes.relayShard.toPubsubTopic()
let msgHash = computeMessageHash(pubsubTopic, msg)
return ok(
T(
requestId: requestId,
pubsubTopic: pubsubTopic,
msg: msg,
msgHash: msgHash,
tryCount: 0,
state: DeliveryState.Entry,
)
)
func `==`*(r, l: DeliveryTask): bool =
if r.isNil() == l.isNil():
r.isNil() or r.msgHash == l.msgHash
else:
false
proc messageAge*(self: DeliveryTask): timer.Duration =
let actual = getNanosecondTime(getTime().toUnixFloat())
if self.msg.timestamp >= 0 and self.msg.timestamp < actual:
nanoseconds(actual - self.msg.timestamp)
else:
ZeroDuration
proc deliveryAge*(self: DeliveryTask): timer.Duration =
if self.state == DeliveryState.SuccessfullyPropagated:
timer.Moment.now() - self.deliveryTime
else:
ZeroDuration
proc isEphemeral*(self: DeliveryTask): bool =
return self.msg.ephemeral

View File

@ -0,0 +1,81 @@
import chronicles, chronos, results
import std/options
import
waku/node/peer_manager,
waku/waku_core,
waku/waku_lightpush/[common, client, rpc],
waku/common/broker/broker_context
import ./[delivery_task, send_processor]
logScope:
topics = "send service lightpush processor"
type LightpushSendProcessor* = ref object of BaseSendProcessor
peerManager: PeerManager
lightpushClient: WakuLightPushClient
proc new*(
T: typedesc[LightpushSendProcessor],
peerManager: PeerManager,
lightpushClient: WakuLightPushClient,
brokerCtx: BrokerContext,
): T =
return
T(peerManager: peerManager, lightpushClient: lightpushClient, brokerCtx: brokerCtx)
proc isLightpushPeerAvailable(
self: LightpushSendProcessor, pubsubTopic: PubsubTopic
): bool =
return self.peerManager.selectPeer(WakuLightPushCodec, some(pubsubTopic)).isSome()
method isValidProcessor*(
self: LightpushSendProcessor, task: DeliveryTask
): bool {.gcsafe.} =
return self.isLightpushPeerAvailable(task.pubsubTopic)
method sendImpl*(
self: LightpushSendProcessor, task: DeliveryTask
): Future[void] {.async.} =
task.tryCount.inc()
info "Trying message delivery via Lightpush",
requestId = task.requestId,
msgHash = task.msgHash.to0xHex(),
tryCount = task.tryCount
let peer = self.peerManager.selectPeer(WakuLightPushCodec, some(task.pubsubTopic)).valueOr:
debug "No peer available for Lightpush, request pushed back for next round",
requestId = task.requestId
task.state = DeliveryState.NextRoundRetry
return
let numLightpushServers = (
await self.lightpushClient.publish(some(task.pubsubTopic), task.msg, peer)
).valueOr:
error "LightpushSendProcessor.sendImpl failed", error = error.desc.get($error.code)
case error.code
of LightPushErrorCode.NO_PEERS_TO_RELAY, LightPushErrorCode.TOO_MANY_REQUESTS,
LightPushErrorCode.OUT_OF_RLN_PROOF, LightPushErrorCode.SERVICE_NOT_AVAILABLE,
LightPushErrorCode.INTERNAL_SERVER_ERROR:
task.state = DeliveryState.NextRoundRetry
else:
# the message is malformed, send error
task.state = DeliveryState.FailedToDeliver
task.errorDesc = error.desc.get($error.code)
task.deliveryTime = Moment.now()
return
if numLightpushServers > 0:
info "Message propagated via Lightpush",
requestId = task.requestId, msgHash = task.msgHash.to0xHex()
task.state = DeliveryState.SuccessfullyPropagated
task.deliveryTime = Moment.now()
# TODO: with a simple retry processor it might be more accurate to say `Sent`
else:
# Controversial state, publish says ok but no peer. It should not happen.
debug "Lightpush publish returned zero peers, request pushed back for next round",
requestId = task.requestId
task.state = DeliveryState.NextRoundRetry
return

View File

@ -0,0 +1,78 @@
import std/options
import chronos, chronicles
import waku/[waku_core], waku/waku_lightpush/[common, rpc]
import waku/requests/health_request
import waku/common/broker/broker_context
import waku/api/types
import ./[delivery_task, send_processor]
logScope:
topics = "send service relay processor"
type RelaySendProcessor* = ref object of BaseSendProcessor
publishProc: PushMessageHandler
fallbackStateToSet: DeliveryState
proc new*(
T: typedesc[RelaySendProcessor],
lightpushAvailable: bool,
publishProc: PushMessageHandler,
brokerCtx: BrokerContext,
): RelaySendProcessor =
let fallbackStateToSet =
if lightpushAvailable:
DeliveryState.FallbackRetry
else:
DeliveryState.FailedToDeliver
return RelaySendProcessor(
publishProc: publishProc,
fallbackStateToSet: fallbackStateToSet,
brokerCtx: brokerCtx,
)
proc isTopicHealthy(self: RelaySendProcessor, topic: PubsubTopic): bool {.gcsafe.} =
let healthReport = RequestRelayTopicsHealth.request(self.brokerCtx, @[topic]).valueOr:
error "isTopicHealthy: failed to get health report", topic = topic, error = error
return false
if healthReport.topicHealth.len() < 1:
warn "isTopicHealthy: no topic health entries", topic = topic
return false
let health = healthReport.topicHealth[0].health
debug "isTopicHealthy: topic health is ", topic = topic, health = health
return health == MINIMALLY_HEALTHY or health == SUFFICIENTLY_HEALTHY
method isValidProcessor*(
self: RelaySendProcessor, task: DeliveryTask
): bool {.gcsafe.} =
# Topic health query is not reliable enough after a fresh subscribe...
# return self.isTopicHealthy(task.pubsubTopic)
return true
method sendImpl*(self: RelaySendProcessor, task: DeliveryTask) {.async.} =
task.tryCount.inc()
info "Trying message delivery via Relay",
requestId = task.requestId,
msgHash = task.msgHash.to0xHex(),
tryCount = task.tryCount
let noOfPublishedPeers = (await self.publishProc(task.pubsubTopic, task.msg)).valueOr:
let errorMessage = error.desc.get($error.code)
error "Failed to publish message with relay",
request = task.requestId, msgHash = task.msgHash.to0xHex(), error = errorMessage
if error.code != LightPushErrorCode.NO_PEERS_TO_RELAY:
task.state = DeliveryState.FailedToDeliver
task.errorDesc = errorMessage
else:
task.state = self.fallbackStateToSet
return
if noOfPublishedPeers > 0:
info "Message propagated via Relay",
requestId = task.requestId, msgHash = task.msgHash.to0xHex(), noOfPeers = noOfPublishedPeers
task.state = DeliveryState.SuccessfullyPropagated
task.deliveryTime = Moment.now()
else:
# It shall not happen, but still covering it
task.state = self.fallbackStateToSet

View File

@ -0,0 +1,36 @@
import chronos
import ./delivery_task
import waku/common/broker/broker_context
{.push raises: [].}
type BaseSendProcessor* = ref object of RootObj
fallbackProcessor*: BaseSendProcessor
brokerCtx*: BrokerContext
proc chain*(self: BaseSendProcessor, next: BaseSendProcessor) =
self.fallbackProcessor = next
method isValidProcessor*(
self: BaseSendProcessor, task: DeliveryTask
): bool {.base, gcsafe.} =
return false
method sendImpl*(
self: BaseSendProcessor, task: DeliveryTask
): Future[void] {.async, base.} =
assert false, "Not implemented"
method process*(
self: BaseSendProcessor, task: DeliveryTask
): Future[void] {.async, base.} =
var currentProcessor: BaseSendProcessor = self
var keepTrying = true
while not currentProcessor.isNil() and keepTrying:
if currentProcessor.isValidProcessor(task):
await currentProcessor.sendImpl(task)
currentProcessor = currentProcessor.fallbackProcessor
keepTrying = task.state == DeliveryState.FallbackRetry
if task.state == DeliveryState.FallbackRetry:
task.state = DeliveryState.NextRoundRetry

View File

@ -0,0 +1,269 @@
## This module reinforces the publish operation with regular store-v3 requests.
##
import std/[sequtils, tables, options]
import chronos, chronicles, libp2p/utility
import
./[send_processor, relay_processor, lightpush_processor, delivery_task],
../[subscription_service],
waku/[
waku_core,
node/waku_node,
node/peer_manager,
waku_store/client,
waku_store/common,
waku_relay/protocol,
waku_rln_relay/rln_relay,
waku_lightpush/client,
waku_lightpush/callbacks,
events/message_events,
common/broker/broker_context,
]
logScope:
topics = "send service"
# This useful util is missing from sequtils, this extends applyIt with predicate...
template applyItIf*(varSeq, pred, op: untyped) =
for i in low(varSeq) .. high(varSeq):
let it {.inject.} = varSeq[i]
if pred:
op
varSeq[i] = it
template forEach*(varSeq, op: untyped) =
for i in low(varSeq) .. high(varSeq):
let it {.inject.} = varSeq[i]
op
const MaxTimeInCache* = chronos.minutes(1)
## Messages older than this time will get completely forgotten on publication and a
## feedback will be given when that happens
const ServiceLoopInterval* = chronos.seconds(1)
## Interval at which we check that messages have been properly received by a store node
const ArchiveTime = chronos.seconds(3)
## Estimation of the time we wait until we start confirming that a message has been properly
## received and archived by a store node
type SendService* = ref object of RootObj
brokerCtx: BrokerContext
taskCache: seq[DeliveryTask]
## Cache that contains the delivery task per message hash.
## This is needed to make sure the published messages are properly published
serviceLoopHandle: Future[void] ## handle that allows to stop the async task
sendProcessor: BaseSendProcessor
node: WakuNode
checkStoreForMessages: bool
subscriptionService: SubscriptionService
proc setupSendProcessorChain(
peerManager: PeerManager,
lightpushClient: WakuLightPushClient,
relay: WakuRelay,
rlnRelay: WakuRLNRelay,
brokerCtx: BrokerContext,
): Result[BaseSendProcessor, string] =
let isRelayAvail = not relay.isNil()
let isLightPushAvail = not lightpushClient.isNil()
if not isRelayAvail and not isLightPushAvail:
return err("No valid send processor found for the delivery task")
var processors = newSeq[BaseSendProcessor]()
if isRelayAvail:
let rln: Option[WakuRLNRelay] =
if rlnRelay.isNil():
none[WakuRLNRelay]()
else:
some(rlnRelay)
let publishProc = getRelayPushHandler(relay, rln)
processors.add(RelaySendProcessor.new(isLightPushAvail, publishProc, brokerCtx))
if isLightPushAvail:
processors.add(LightpushSendProcessor.new(peerManager, lightpushClient, brokerCtx))
var currentProcessor: BaseSendProcessor = processors[0]
for i in 1 ..< processors.len:
currentProcessor.chain(processors[i])
currentProcessor = processors[i]
return ok(processors[0])
proc new*(
T: typedesc[SendService],
preferP2PReliability: bool,
w: WakuNode,
s: SubscriptionService,
): Result[T, string] =
if w.wakuRelay.isNil() and w.wakuLightpushClient.isNil():
return err(
"Could not create SendService. wakuRelay or wakuLightpushClient should be set"
)
let checkStoreForMessages = preferP2PReliability and not w.wakuStoreClient.isNil()
let sendProcessorChain = setupSendProcessorChain(
w.peerManager, w.wakuLightPushClient, w.wakuRelay, w.wakuRlnRelay, w.brokerCtx
).valueOr:
return err("failed to setup SendProcessorChain: " & $error)
let sendService = SendService(
brokerCtx: w.brokerCtx,
taskCache: newSeq[DeliveryTask](),
serviceLoopHandle: nil,
sendProcessor: sendProcessorChain,
node: w,
checkStoreForMessages: checkStoreForMessages,
subscriptionService: s,
)
return ok(sendService)
proc addTask(self: SendService, task: DeliveryTask) =
self.taskCache.addUnique(task)
proc isStorePeerAvailable*(sendService: SendService): bool =
return sendService.node.peerManager.selectPeer(WakuStoreCodec).isSome()
proc checkMsgsInStore(self: SendService, tasksToValidate: seq[DeliveryTask]) {.async.} =
if tasksToValidate.len() == 0:
return
if not isStorePeerAvailable(self):
warn "Skipping store validation for ",
messageCount = tasksToValidate.len(), error = "no store peer available"
return
var hashesToValidate = tasksToValidate.mapIt(it.msgHash)
# TODO: confirm hash format for store query!!!
let storeResp: StoreQueryResponse = (
await self.node.wakuStoreClient.queryToAny(
StoreQueryRequest(includeData: false, messageHashes: hashesToValidate)
)
).valueOr:
error "Failed to get store validation for messages",
hashes = hashesToValidate.mapIt(shortLog(it)), error = $error
return
let storedItems = storeResp.messages.mapIt(it.messageHash)
# Set success state for messages found in store
self.taskCache.applyItIf(storedItems.contains(it.msgHash)):
it.state = DeliveryState.SuccessfullyValidated
# set retry state for messages not found in store
hashesToValidate.keepItIf(not storedItems.contains(it))
self.taskCache.applyItIf(hashesToValidate.contains(it.msgHash)):
it.state = DeliveryState.NextRoundRetry
proc checkStoredMessages(self: SendService) {.async.} =
if not self.checkStoreForMessages:
return
let tasksToValidate = self.taskCache.filterIt(
it.state == DeliveryState.SuccessfullyPropagated and it.deliveryAge() > ArchiveTime and
not it.isEphemeral()
)
await self.checkMsgsInStore(tasksToValidate)
proc reportTaskResult(self: SendService, task: DeliveryTask) =
case task.state
of DeliveryState.SuccessfullyPropagated:
# TODO: in case of unable to strore check messages shall we report success instead?
if not task.propagateEventEmitted:
info "Message successfully propagated",
requestId = task.requestId, msgHash = task.msgHash.to0xHex()
MessagePropagatedEvent.emit(
self.brokerCtx, task.requestId, task.msgHash.to0xHex()
)
task.propagateEventEmitted = true
return
of DeliveryState.SuccessfullyValidated:
info "Message successfully sent",
requestId = task.requestId, msgHash = task.msgHash.to0xHex()
MessageSentEvent.emit(self.brokerCtx, task.requestId, task.msgHash.to0xHex())
return
of DeliveryState.FailedToDeliver:
error "Failed to send message",
requestId = task.requestId,
msgHash = task.msgHash.to0xHex(),
error = task.errorDesc
MessageErrorEvent.emit(
self.brokerCtx, task.requestId, task.msgHash.to0xHex(), task.errorDesc
)
return
else:
# rest of the states are intermediate and does not translate to event
discard
if task.messageAge() > MaxTimeInCache:
error "Failed to send message",
requestId = task.requestId,
msgHash = task.msgHash.to0xHex(),
error = "Message too old",
age = task.messageAge()
task.state = DeliveryState.FailedToDeliver
MessageErrorEvent.emit(
self.brokerCtx,
task.requestId,
task.msgHash.to0xHex(),
"Unable to send within retry time window",
)
proc evaluateAndCleanUp(self: SendService) =
self.taskCache.forEach(self.reportTaskResult(it))
self.taskCache.keepItIf(
it.state != DeliveryState.SuccessfullyValidated and
it.state != DeliveryState.FailedToDeliver
)
# remove propagated ephemeral messages as no store check is possible
self.taskCache.keepItIf(
not (it.isEphemeral() and it.state == DeliveryState.SuccessfullyPropagated)
)
proc trySendMessages(self: SendService) {.async.} =
let tasksToSend = self.taskCache.filterIt(it.state == DeliveryState.NextRoundRetry)
for task in tasksToSend:
# Todo, check if it has any perf gain to run them concurrent...
await self.sendProcessor.process(task)
proc serviceLoop(self: SendService) {.async.} =
## Continuously monitors that the sent messages have been received by a store node
while true:
await self.trySendMessages()
await self.checkStoredMessages()
self.evaluateAndCleanUp()
## TODO: add circuit breaker to avoid infinite looping in case of persistent failures
## Use OnlineStateChange observers to pause/resume the loop
await sleepAsync(ServiceLoopInterval)
proc startSendService*(self: SendService) =
self.serviceLoopHandle = self.serviceLoop()
proc stopSendService*(self: SendService) =
if not self.serviceLoopHandle.isNil():
discard self.serviceLoopHandle.cancelAndWait()
proc send*(self: SendService, task: DeliveryTask) {.async.} =
assert(not task.isNil(), "task for send must not be nil")
info "SendService.send: processing delivery task",
requestId = task.requestId, msgHash = task.msgHash.to0xHex()
self.subscriptionService.subscribe(task.msg.contentTopic).isOkOr:
error "SendService.send: failed to subscribe to content topic",
contentTopic = task.msg.contentTopic, error = error
await self.sendProcessor.process(task)
reportTaskResult(self, task)
if task.state != DeliveryState.FailedToDeliver:
self.addTask(task)

View File

@ -0,0 +1,64 @@
import chronos, chronicles
import
waku/[
waku_core,
waku_core/topics,
events/message_events,
waku_node,
common/broker/broker_context,
]
type SubscriptionService* = ref object of RootObj
brokerCtx: BrokerContext
node: WakuNode
proc new*(T: typedesc[SubscriptionService], node: WakuNode): T =
## The storeClient will help to acquire any possible missed messages
return SubscriptionService(brokerCtx: node.brokerCtx, node: node)
proc isSubscribed*(
self: SubscriptionService, topic: ContentTopic
): Result[bool, string] =
var isSubscribed = false
if self.node.wakuRelay.isNil() == false:
return self.node.isSubscribed((kind: ContentSub, topic: topic))
# TODO: Add support for edge mode with Filter subscription management
return ok(isSubscribed)
#TODO: later PR may consider to refactor or place this function elsewhere
# The only important part is that it emits MessageReceivedEvent
proc getReceiveHandler(self: SubscriptionService): WakuRelayHandler =
return proc(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
let msgHash = computeMessageHash(topic, msg).to0xHex()
info "API received message",
pubsubTopic = topic, contentTopic = msg.contentTopic, msgHash = msgHash
MessageReceivedEvent.emit(self.brokerCtx, msgHash, msg)
proc subscribe*(self: SubscriptionService, topic: ContentTopic): Result[void, string] =
let isSubscribed = self.isSubscribed(topic).valueOr:
error "Failed to check subscription status: ", error = error
return err("Failed to check subscription status: " & error)
if isSubscribed == false:
if self.node.wakuRelay.isNil() == false:
self.node.subscribe((kind: ContentSub, topic: topic), self.getReceiveHandler()).isOkOr:
error "Failed to subscribe: ", error = error
return err("Failed to subscribe: " & error)
# TODO: Add support for edge mode with Filter subscription management
return ok()
proc unsubscribe*(
self: SubscriptionService, topic: ContentTopic
): Result[void, string] =
if self.node.wakuRelay.isNil() == false:
self.node.unsubscribe((kind: ContentSub, topic: topic)).isOkOr:
error "Failed to unsubscribe: ", error = error
return err("Failed to unsubscribe: " & error)
# TODO: Add support for edge mode with Filter subscription management
return ok()

View File

@ -1,11 +1,12 @@
import chronos
import ../waku_core
import waku/waku_core
type TopicHealth* = enum
UNHEALTHY
MINIMALLY_HEALTHY
SUFFICIENTLY_HEALTHY
NOT_SUBSCRIBED
proc `$`*(t: TopicHealth): string =
result =
@ -13,6 +14,7 @@ proc `$`*(t: TopicHealth): string =
of UNHEALTHY: "UnHealthy"
of MINIMALLY_HEALTHY: "MinimallyHealthy"
of SUFFICIENTLY_HEALTHY: "SufficientlyHealthy"
of NOT_SUBSCRIBED: "NotSubscribed"
type TopicHealthChangeHandler* = proc(
pubsubTopic: PubsubTopic, topicHealth: TopicHealth

View File

@ -193,7 +193,6 @@ proc lightpushPublishHandler(
mixify: bool = false,
): Future[lightpush_protocol.WakuLightPushResult] {.async.} =
let msgHash = pubsubTopic.computeMessageHash(message).to0xHex()
if not node.wakuLightpushClient.isNil():
notice "publishing message with lightpush",
pubsubTopic = pubsubTopic,
@ -201,21 +200,23 @@ proc lightpushPublishHandler(
target_peer_id = peer.peerId,
msg_hash = msgHash,
mixify = mixify
if mixify: #indicates we want to use mix to send the message
#TODO: How to handle multiple addresses?
let conn = node.wakuMix.toConnection(
MixDestination.exitNode(peer.peerId),
WakuLightPushCodec,
MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))),
# indicating we only want a single path to be used for reply hence numSurbs = 1
).valueOr:
error "could not create mix connection"
return lighpushErrorResult(
LightPushErrorCode.SERVICE_NOT_AVAILABLE,
"Waku lightpush with mix not available",
)
if defined(libp2p_mix_experimental_exit_is_dest) and mixify:
#indicates we want to use mix to send the message
when defined(libp2p_mix_experimental_exit_is_dest):
#TODO: How to handle multiple addresses?
let conn = node.wakuMix.toConnection(
MixDestination.exitNode(peer.peerId),
WakuLightPushCodec,
MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))),
# indicating we only want a single path to be used for reply hence numSurbs = 1
).valueOr:
error "could not create mix connection"
return lighpushErrorResult(
LightPushErrorCode.SERVICE_NOT_AVAILABLE,
"Waku lightpush with mix not available",
)
return await node.wakuLightpushClient.publish(some(pubsubTopic), message, conn)
return await node.wakuLightpushClient.publish(some(pubsubTopic), message, conn)
else:
return await node.wakuLightpushClient.publish(some(pubsubTopic), message, peer)
@ -264,7 +265,7 @@ proc lightpushPublish*(
LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers"
)
let pubsubForPublish = pubSubTopic.valueOr:
let pubsubForPublish = pubsubTopic.valueOr:
if node.wakuAutoSharding.isNone():
let msg = "Pubsub topic must be specified when static sharding is enabled"
error "lightpush publish error", error = msg

View File

@ -30,6 +30,8 @@ import
../peer_manager,
../../waku_rln_relay
export waku_relay.WakuRelayHandler
declarePublicHistogram waku_histogram_message_size,
"message size histogram in kB",
buckets = [
@ -91,6 +93,23 @@ proc registerRelayHandler(
node.wakuRelay.subscribe(topic, uniqueTopicHandler)
proc getTopicOfSubscriptionEvent(
node: WakuNode, subscription: SubscriptionEvent
): Result[(PubsubTopic, Option[ContentTopic]), string] =
case subscription.kind
of ContentSub, ContentUnsub:
if node.wakuAutoSharding.isSome():
let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
return err("Autosharding error: " & error)
return ok(($shard, some(subscription.topic)))
else:
return
err("Static sharding is used, relay subscriptions must specify a pubsub topic")
of PubsubSub, PubsubUnsub:
return ok((subscription.topic, none[ContentTopic]()))
else:
return err("Unsupported subscription type in relay getTopicOfSubscriptionEvent")
proc subscribe*(
node: WakuNode, subscription: SubscriptionEvent, handler: WakuRelayHandler
): Result[void, string] =
@ -101,27 +120,15 @@ proc subscribe*(
error "Invalid API call to `subscribe`. WakuRelay not mounted."
return err("Invalid API call to `subscribe`. WakuRelay not mounted.")
let (pubsubTopic, contentTopicOp) =
case subscription.kind
of ContentSub:
if node.wakuAutoSharding.isSome():
let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
error "Autosharding error", error = error
return err("Autosharding error: " & error)
($shard, some(subscription.topic))
else:
return err(
"Static sharding is used, relay subscriptions must specify a pubsub topic"
)
of PubsubSub:
(subscription.topic, none(ContentTopic))
else:
return err("Unsupported subscription type in relay subscribe")
let (pubsubTopic, contentTopicOp) = getTopicOfSubscriptionEvent(node, subscription).valueOr:
error "Failed to decode subscription event", error = error
return err("Failed to decode subscription event: " & error)
if node.wakuRelay.isSubscribed(pubsubTopic):
warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic
return ok()
info "subscribe", pubsubTopic, contentTopicOp
node.registerRelayHandler(pubsubTopic, handler)
node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic))
@ -136,22 +143,9 @@ proc unsubscribe*(
error "Invalid API call to `unsubscribe`. WakuRelay not mounted."
return err("Invalid API call to `unsubscribe`. WakuRelay not mounted.")
let (pubsubTopic, contentTopicOp) =
case subscription.kind
of ContentUnsub:
if node.wakuAutoSharding.isSome():
let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
error "Autosharding error", error = error
return err("Autosharding error: " & error)
($shard, some(subscription.topic))
else:
return err(
"Static sharding is used, relay subscriptions must specify a pubsub topic"
)
of PubsubUnsub:
(subscription.topic, none(ContentTopic))
else:
return err("Unsupported subscription type in relay unsubscribe")
let (pubsubTopic, contentTopicOp) = getTopicOfSubscriptionEvent(node, subscription).valueOr:
error "Failed to decode unsubscribe event", error = error
return err("Failed to decode unsubscribe event: " & error)
if not node.wakuRelay.isSubscribed(pubsubTopic):
warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic
@ -163,9 +157,22 @@ proc unsubscribe*(
return ok()
proc isSubscribed*(
node: WakuNode, subscription: SubscriptionEvent
): Result[bool, string] =
if node.wakuRelay.isNil():
error "Invalid API call to `isSubscribed`. WakuRelay not mounted."
return err("Invalid API call to `isSubscribed`. WakuRelay not mounted.")
let (pubsubTopic, contentTopicOp) = getTopicOfSubscriptionEvent(node, subscription).valueOr:
error "Failed to decode subscription event", error = error
return err("Failed to decode subscription event: " & error)
return ok(node.wakuRelay.isSubscribed(pubsubTopic))
proc publish*(
node: WakuNode, pubsubTopicOp: Option[PubsubTopic], message: WakuMessage
): Future[Result[void, string]] {.async, gcsafe.} =
): Future[Result[int, string]] {.async, gcsafe.} =
## Publish a `WakuMessage`. Pubsub topic contains; none, a named or static shard.
## `WakuMessage` should contain a `contentTopic` field for light node functionality.
## It is also used to determine the shard.
@ -184,16 +191,20 @@ proc publish*(
let msg = "Autosharding error: " & error
return err(msg)
#TODO instead of discard return error when 0 peers received the message
discard await node.wakuRelay.publish(pubsubTopic, message)
let numPeers = (await node.wakuRelay.publish(pubsubTopic, message)).valueOr:
warn "waku.relay did not publish", error = error
# Todo: If NoPeersToPublish, we might want to return ok(0) instead!!!
return err("publish failed in relay: " & $error)
notice "waku.relay published",
peerId = node.peerId,
pubsubTopic = pubsubTopic,
msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(),
publishTime = getNowInNanosecondTime()
publishTime = getNowInNanosecondTime(),
numPeers = numPeers
return ok()
# TODO: investigate if we can return error in case numPeers is 0
ok(numPeers)
proc mountRelay*(
node: WakuNode,

View File

@ -27,38 +27,42 @@ import
libp2p/protocols/mix/mix_protocol
import
../waku_core,
../waku_core/topics/sharding,
../waku_relay,
../waku_archive,
../waku_archive_legacy,
../waku_store_legacy/protocol as legacy_store,
../waku_store_legacy/client as legacy_store_client,
../waku_store_legacy/common as legacy_store_common,
../waku_store/protocol as store,
../waku_store/client as store_client,
../waku_store/common as store_common,
../waku_store/resume,
../waku_store_sync,
../waku_filter_v2,
../waku_filter_v2/client as filter_client,
../waku_metadata,
../waku_rendezvous/protocol,
../waku_rendezvous/client as rendezvous_client,
../waku_rendezvous/waku_peer_record,
../waku_lightpush_legacy/client as legacy_ligntpuhs_client,
../waku_lightpush_legacy as legacy_lightpush_protocol,
../waku_lightpush/client as ligntpuhs_client,
../waku_lightpush as lightpush_protocol,
../waku_enr,
../waku_peer_exchange,
../waku_rln_relay,
waku/[
waku_core,
waku_core/topics/sharding,
waku_relay,
waku_archive,
waku_archive_legacy,
waku_store_legacy/protocol as legacy_store,
waku_store_legacy/client as legacy_store_client,
waku_store_legacy/common as legacy_store_common,
waku_store/protocol as store,
waku_store/client as store_client,
waku_store/common as store_common,
waku_store/resume,
waku_store_sync,
waku_filter_v2,
waku_filter_v2/client as filter_client,
waku_metadata,
waku_rendezvous/protocol,
waku_rendezvous/client as rendezvous_client,
waku_rendezvous/waku_peer_record,
waku_lightpush_legacy/client as legacy_ligntpuhs_client,
waku_lightpush_legacy as legacy_lightpush_protocol,
waku_lightpush/client as ligntpuhs_client,
waku_lightpush as lightpush_protocol,
waku_enr,
waku_peer_exchange,
waku_rln_relay,
common/rate_limit/setting,
common/callbacks,
common/nimchronos,
waku_mix,
requests/node_requests,
common/broker/broker_context,
],
./net_config,
./peer_manager,
../common/rate_limit/setting,
../common/callbacks,
../common/nimchronos,
../waku_mix
./peer_manager
declarePublicCounter waku_node_messages, "number of messages received", ["type"]
@ -123,6 +127,7 @@ type
enr*: enr.Record
libp2pPing*: Ping
rng*: ref rand.HmacDrbgContext
brokerCtx*: BrokerContext
wakuRendezvous*: WakuRendezVous
wakuRendezvousClient*: rendezvous_client.WakuRendezVousClient
announcedAddresses*: seq[MultiAddress]
@ -131,6 +136,23 @@ type
rateLimitSettings*: ProtocolRateLimitSettings
wakuMix*: WakuMix
proc deduceRelayShard(
node: WakuNode,
contentTopic: ContentTopic,
pubsubTopicOp: Option[PubsubTopic] = none[PubsubTopic](),
): Result[RelayShard, string] =
let pubsubTopic = pubsubTopicOp.valueOr:
if node.wakuAutoSharding.isNone():
return err("Pubsub topic must be specified when static sharding is enabled.")
let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
let msg = "Deducing shard failed: " & error
return err(msg)
return ok(shard)
let shard = RelayShard.parse(pubsubTopic).valueOr:
return err("Invalid topic:" & pubsubTopic & " " & $error)
return ok(shard)
proc getShardsGetter(node: WakuNode): GetShards =
return proc(): seq[uint16] {.closure, gcsafe, raises: [].} =
# fetch pubsubTopics subscribed to relay and convert them to shards
@ -177,11 +199,14 @@ proc new*(
info "Initializing networking", addrs = $netConfig.announcedAddresses
let brokerCtx = globalBrokerContext()
let queue = newAsyncEventQueue[SubscriptionEvent](0)
let node = WakuNode(
peerManager: peerManager,
switch: switch,
rng: rng,
brokerCtx: brokerCtx,
enr: enr,
announcedAddresses: netConfig.announcedAddresses,
topicSubscriptionQueue: queue,
@ -252,6 +277,7 @@ proc mountAutoSharding*(
info "mounting auto sharding", clusterId = clusterId, shardCount = shardCount
node.wakuAutoSharding =
some(Sharding(clusterId: clusterId, shardCountGenZero: shardCount))
return ok()
proc getMixNodePoolSize*(node: WakuNode): int =
@ -443,6 +469,21 @@ proc updateAnnouncedAddrWithPrimaryIpAddr*(node: WakuNode): Result[void, string]
return ok()
proc startProvidersAndListeners(node: WakuNode) =
RequestRelayShard.setProvider(
node.brokerCtx,
proc(
pubsubTopic: Option[PubsubTopic], contentTopic: ContentTopic
): Result[RequestRelayShard, string] =
let shard = node.deduceRelayShard(contentTopic, pubsubTopic).valueOr:
return err($error)
return ok(RequestRelayShard(relayShard: shard)),
).isOkOr:
error "Can't set provider for RequestRelayShard", error = error
proc stopProvidersAndListeners(node: WakuNode) =
RequestRelayShard.clearProvider(node.brokerCtx)
proc start*(node: WakuNode) {.async.} =
## Starts a created Waku Node and
## all its mounted protocols.
@ -491,6 +532,8 @@ proc start*(node: WakuNode) {.async.} =
## The switch will update addresses after start using the addressMapper
await node.switch.start()
node.startProvidersAndListeners()
node.started = true
if not zeroPortPresent:
@ -503,6 +546,9 @@ proc start*(node: WakuNode) {.async.} =
proc stop*(node: WakuNode) {.async.} =
## By stopping the switch we are stopping all the underlying mounted protocols
node.stopProvidersAndListeners()
await node.switch.stop()
node.peerManager.stop()

View File

@ -0,0 +1,21 @@
import waku/common/broker/[request_broker, multi_request_broker]
import waku/api/types
import waku/node/health_monitor/[protocol_health, topic_health]
import waku/waku_core/topics
export protocol_health, topic_health
RequestBroker(sync):
type RequestNodeHealth* = object
healthStatus*: NodeHealth
RequestBroker(sync):
type RequestRelayTopicsHealth* = object
topicHealth*: seq[tuple[topic: PubsubTopic, health: TopicHealth]]
proc signature(topics: seq[PubsubTopic]): Result[RequestRelayTopicsHealth, string]
MultiRequestBroker:
type RequestProtocolHealth* = object
healthStatus*: ProtocolHealth

View File

@ -0,0 +1,11 @@
import std/options
import waku/common/broker/[request_broker, multi_request_broker]
import waku/waku_core/[topics]
RequestBroker(sync):
type RequestRelayShard* = object
relayShard*: RelayShard
proc signature(
pubsubTopic: Option[PubsubTopic], contentTopic: ContentTopic
): Result[RequestRelayShard, string]

View File

@ -0,0 +1,3 @@
import ./[health_request, rln_requests, node_requests]
export health_request, rln_requests, node_requests

View File

@ -0,0 +1,9 @@
import waku/common/broker/request_broker, waku/waku_core/message/message
RequestBroker:
type RequestGenerateRlnProof* = object
proof*: seq[byte]
proc signature(
message: WakuMessage, senderEpoch: float64
): Future[Result[RequestGenerateRlnProof, string]] {.async.}

View File

@ -19,6 +19,11 @@ func shortLog*(hash: WakuMessageHash): string =
func `$`*(hash: WakuMessageHash): string =
shortLog(hash)
func to0xHex*(hash: WakuMessageHash): string =
var hexhash = newStringOfCap(64)
hexhash &= hash.toOpenArray(hash.low, hash.high).to0xHex()
hexhash
const EmptyWakuMessageHash*: WakuMessageHash = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,

View File

@ -10,9 +10,8 @@ import
bearssl/rand,
stew/byteutils
import
../node/peer_manager,
../node/delivery_monitor/subscriptions_observer,
../waku_core,
waku/
[node/peer_manager, waku_core, events/delivery_events, common/broker/broker_context],
./common,
./protocol_metrics,
./rpc_codec,
@ -22,19 +21,16 @@ logScope:
topics = "waku filter client"
type WakuFilterClient* = ref object of LPProtocol
brokerCtx: BrokerContext
rng: ref HmacDrbgContext
peerManager: PeerManager
pushHandlers: seq[FilterPushHandler]
subscrObservers: seq[SubscriptionObserver]
func generateRequestId(rng: ref HmacDrbgContext): string =
var bytes: array[10, byte]
hmacDrbgGenerate(rng[], bytes)
return toHex(bytes)
proc addSubscrObserver*(wfc: WakuFilterClient, obs: SubscriptionObserver) =
wfc.subscrObservers.add(obs)
proc sendSubscribeRequest(
wfc: WakuFilterClient,
servicePeer: RemotePeerInfo,
@ -132,8 +128,7 @@ proc subscribe*(
?await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest)
for obs in wfc.subscrObservers:
obs.onSubscribe(pubSubTopic, contentTopicSeq)
OnFilterSubscribeEvent.emit(wfc.brokerCtx, pubsubTopic, contentTopicSeq)
return ok()
@ -156,8 +151,7 @@ proc unsubscribe*(
?await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest)
for obs in wfc.subscrObservers:
obs.onUnsubscribe(pubSubTopic, contentTopicSeq)
OnFilterUnSubscribeEvent.emit(wfc.brokerCtx, pubsubTopic, contentTopicSeq)
return ok()
@ -210,6 +204,9 @@ proc initProtocolHandler(wfc: WakuFilterClient) =
proc new*(
T: type WakuFilterClient, peerManager: PeerManager, rng: ref HmacDrbgContext
): T =
let wfc = WakuFilterClient(rng: rng, peerManager: peerManager, pushHandlers: @[])
let brokerCtx = globalBrokerContext()
let wfc = WakuFilterClient(
brokerCtx: brokerCtx, rng: rng, peerManager: peerManager, pushHandlers: @[]
)
wfc.initProtocolHandler()
wfc

View File

@ -31,7 +31,7 @@ proc checkAndGenerateRLNProof*(
proc getNilPushHandler*(): PushMessageHandler =
return proc(
peer: PeerId, pubsubTopic: string, message: WakuMessage
pubsubTopic: string, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
return lightpushResultInternalError("no waku relay found")
@ -39,7 +39,7 @@ proc getRelayPushHandler*(
wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]()
): PushMessageHandler =
return proc(
peer: PeerId, pubsubTopic: string, message: WakuMessage
pubsubTopic: string, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
# append RLN proof
let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr:

View File

@ -5,7 +5,6 @@ import libp2p/peerid, libp2p/stream/connection
import
../waku_core/peers,
../node/peer_manager,
../node/delivery_monitor/publish_observer,
../utils/requests,
../waku_core,
./common,
@ -19,16 +18,12 @@ logScope:
type WakuLightPushClient* = ref object
rng*: ref rand.HmacDrbgContext
peerManager*: PeerManager
publishObservers: seq[PublishObserver]
proc new*(
T: type WakuLightPushClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext
): T =
WakuLightPushClient(peerManager: peerManager, rng: rng)
proc addPublishObserver*(wl: WakuLightPushClient, obs: PublishObserver) =
wl.publishObservers.add(obs)
proc ensureTimestampSet(message: var WakuMessage) =
if message.timestamp == 0:
message.timestamp = getNowInNanosecondTime()
@ -40,36 +35,43 @@ func shortPeerId(peer: PeerId): string =
func shortPeerId(peer: RemotePeerInfo): string =
shortLog(peer.peerId)
proc sendPushRequestToConn(
wl: WakuLightPushClient, request: LightPushRequest, conn: Connection
proc sendPushRequest(
wl: WakuLightPushClient,
req: LightPushRequest,
peer: PeerId | RemotePeerInfo,
conn: Option[Connection] = none(Connection),
): Future[WakuLightPushResult] {.async.} =
try:
await conn.writeLp(request.encode().buffer)
except LPStreamRemoteClosedError:
error "Failed to write request to peer", error = getCurrentExceptionMsg()
return lightpushResultInternalError(
"Failed to write request to peer: " & getCurrentExceptionMsg()
)
let connection = conn.valueOr:
(await wl.peerManager.dialPeer(peer, WakuLightPushCodec)).valueOr:
waku_lightpush_v3_errors.inc(labelValues = [dialFailure])
return lighpushErrorResult(
LightPushErrorCode.NO_PEERS_TO_RELAY,
dialFailure & ": " & $peer & " is not accessible",
)
defer:
await connection.closeWithEOF()
await connection.writeLP(req.encode().buffer)
var buffer: seq[byte]
try:
buffer = await conn.readLp(DefaultMaxRpcSize.int)
buffer = await connection.readLp(DefaultMaxRpcSize.int)
except LPStreamRemoteClosedError:
error "Failed to read response from peer", error = getCurrentExceptionMsg()
return lightpushResultInternalError(
"Failed to read response from peer: " & getCurrentExceptionMsg()
)
let response = LightpushResponse.decode(buffer).valueOr:
error "failed to decode response", error = $error
error "failed to decode response"
waku_lightpush_v3_errors.inc(labelValues = [decodeRpcFailure])
return lightpushResultInternalError(decodeRpcFailure)
let requestIdMismatch = response.requestId != request.requestId
let tooManyRequests = response.statusCode == LightPushErrorCode.TOO_MANY_REQUESTS
if requestIdMismatch and (not tooManyRequests):
# response with TOO_MANY_REQUESTS error code has no requestId by design
if response.requestId != req.requestId and
response.statusCode != LightPushErrorCode.TOO_MANY_REQUESTS:
error "response failure, requestId mismatch",
requestId = request.requestId, responseRequestId = response.requestId
requestId = req.requestId, responseRequestId = response.requestId
return lightpushResultInternalError("response failure, requestId mismatch")
return toPushResult(response)
@ -80,37 +82,32 @@ proc publish*(
wakuMessage: WakuMessage,
dest: Connection | PeerId | RemotePeerInfo,
): Future[WakuLightPushResult] {.async, gcsafe.} =
let conn =
when dest is Connection:
dest
else:
(await wl.peerManager.dialPeer(dest, WakuLightPushCodec)).valueOr:
waku_lightpush_v3_errors.inc(labelValues = [dialFailure])
return lighpushErrorResult(
LightPushErrorCode.NO_PEERS_TO_RELAY,
"Peer is not accessible: " & dialFailure & " - " & $dest,
)
defer:
await conn.closeWithEOF()
var message = wakuMessage
ensureTimestampSet(message)
let msgHash = computeMessageHash(pubSubTopic.get(""), message).to0xHex()
let peerIdStr =
when dest is Connection:
shortPeerId(dest.peerId)
else:
shortPeerId(dest)
info "publish",
myPeerId = wl.peerManager.switch.peerInfo.peerId,
peerId = shortPeerId(conn.peerId),
peerId = peerIdStr,
msgHash = msgHash,
sentTime = getNowInNanosecondTime()
let request = LightpushRequest(
requestId: generateRequestId(wl.rng), pubsubTopic: pubSubTopic, message: message
)
let relayPeerCount = ?await wl.sendPushRequestToConn(request, conn)
for obs in wl.publishObservers:
obs.onMessagePublished(pubSubTopic.get(""), message)
let relayPeerCount =
when dest is Connection:
?await wl.sendPushRequest(request, dest.peerId, some(dest))
else:
?await wl.sendPushRequest(request, dest)
return lightpushSuccessResult(relayPeerCount)
@ -124,3 +121,12 @@ proc publishToAny*(
LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers"
)
return await wl.publish(some(pubsubTopic), wakuMessage, peer)
proc publishWithConn*(
wl: WakuLightPushClient,
pubSubTopic: PubsubTopic,
message: WakuMessage,
conn: Connection,
destPeer: PeerId,
): Future[WakuLightPushResult] {.async, gcsafe.} =
return await wl.publish(some(pubSubTopic), message, conn)

View File

@ -25,7 +25,7 @@ type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]]
type WakuLightPushResult* = Result[uint32, ErrorStatus]
type PushMessageHandler* = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.}
const TooManyRequestsMessage* = "Request rejected due to too many requests"
@ -39,7 +39,7 @@ func toPushResult*(response: LightPushResponse): WakuLightPushResult =
return (
if (relayPeerCount == 0):
# Consider publishing to zero peers an error even if the service node
# sent us a "successful" response with zero peers
# sent us a "successful" response with zero peers
err((LightPushErrorCode.NO_PEERS_TO_RELAY, response.statusDesc))
else:
ok(relayPeerCount)

View File

@ -71,7 +71,7 @@ proc handleRequest(
msg_hash = msg_hash,
receivedTime = getNowInNanosecondTime()
let res = (await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)).valueOr:
let res = (await wl.pushHandler(pubsubTopic, pushRequest.message)).valueOr:
return err((code: error.code, desc: error.desc))
return ok(res)

View File

@ -30,7 +30,7 @@ proc checkAndGenerateRLNProof*(
proc getNilPushHandler*(): PushMessageHandler =
return proc(
peer: PeerId, pubsubTopic: string, message: WakuMessage
pubsubTopic: string, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
return err("no waku relay found")
@ -38,7 +38,7 @@ proc getRelayPushHandler*(
wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]()
): PushMessageHandler =
return proc(
peer: PeerId, pubsubTopic: string, message: WakuMessage
pubsubTopic: string, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
# append RLN proof
let msgWithProof = ?checkAndGenerateRLNProof(rlnPeer, message)

View File

@ -5,7 +5,6 @@ import libp2p/peerid
import
../waku_core/peers,
../node/peer_manager,
../node/delivery_monitor/publish_observer,
../utils/requests,
../waku_core,
./common,
@ -19,7 +18,6 @@ logScope:
type WakuLegacyLightPushClient* = ref object
peerManager*: PeerManager
rng*: ref rand.HmacDrbgContext
publishObservers: seq[PublishObserver]
proc new*(
T: type WakuLegacyLightPushClient,
@ -28,9 +26,6 @@ proc new*(
): T =
WakuLegacyLightPushClient(peerManager: peerManager, rng: rng)
proc addPublishObserver*(wl: WakuLegacyLightPushClient, obs: PublishObserver) =
wl.publishObservers.add(obs)
proc sendPushRequest(
wl: WakuLegacyLightPushClient, req: PushRequest, peer: PeerId | RemotePeerInfo
): Future[WakuLightPushResult[void]] {.async, gcsafe.} =
@ -86,9 +81,6 @@ proc publish*(
let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message)
?await wl.sendPushRequest(pushRequest, peer)
for obs in wl.publishObservers:
obs.onMessagePublished(pubSubTopic, message)
notice "publishing message with lightpush",
pubsubTopic = pubsubTopic,
contentTopic = message.contentTopic,
@ -111,7 +103,4 @@ proc publishToAny*(
let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message)
?await wl.sendPushRequest(pushRequest, peer)
for obs in wl.publishObservers:
obs.onMessagePublished(pubSubTopic, message)
return ok()

View File

@ -9,7 +9,7 @@ export WakuLegacyLightPushCodec
type WakuLightPushResult*[T] = Result[T, string]
type PushMessageHandler* = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.}
const TooManyRequestsMessage* = "TOO_MANY_REQUESTS"

View File

@ -53,7 +53,7 @@ proc handleRequest*(
msg_hash = msg_hash,
receivedTime = getNowInNanosecondTime()
let handleRes = await wl.pushHandler(peerId, pubsubTopic, message)
let handleRes = await wl.pushHandler(pubsubTopic, message)
isSuccess = handleRes.isOk()
pushResponseInfo = (if isSuccess: "OK" else: handleRes.error)

View File

@ -1,3 +1,4 @@
import ./waku_relay/[protocol, topic_health]
import ./waku_relay/protocol
import waku/node/health_monitor/topic_health
export protocol, topic_health

View File

@ -17,8 +17,13 @@ import
libp2p/protocols/pubsub/rpc/messages,
libp2p/stream/connection,
libp2p/switch
import
../waku_core, ./message_id, ./topic_health, ../node/delivery_monitor/publish_observer
waku/waku_core,
waku/node/health_monitor/topic_health,
waku/requests/health_request,
./message_id,
waku/common/broker/broker_context
from ../waku_core/codecs import WakuRelayCodec
export WakuRelayCodec
@ -157,7 +162,6 @@ type
# map topic with its assigned validator within pubsub
topicHandlers: Table[PubsubTopic, TopicHandler]
# map topic with the TopicHandler proc in charge of attending topic's incoming message events
publishObservers: seq[PublishObserver]
topicsHealth*: Table[string, TopicHealth]
onTopicHealthChange*: TopicHealthChangeHandler
topicHealthLoopHandle*: Future[void]
@ -321,6 +325,18 @@ proc initRelayObservers(w: WakuRelay) =
w.addObserver(administrativeObserver)
proc initRequestProviders(w: WakuRelay) =
RequestRelayTopicsHealth.setProvider(
globalBrokerContext(),
proc(topics: seq[PubsubTopic]): Result[RequestRelayTopicsHealth, string] =
var collectedRes: RequestRelayTopicsHealth
for topic in topics:
let health = w.topicsHealth.getOrDefault(topic, TopicHealth.NOT_SUBSCRIBED)
collectedRes.topicHealth.add((topic, health))
return ok(collectedRes),
).isOkOr:
error "Cannot set Relay Topics Health request provider", error = error
proc new*(
T: type WakuRelay, switch: Switch, maxMessageSize = int(DefaultMaxWakuMessageSize)
): WakuRelayResult[T] =
@ -340,9 +356,10 @@ proc new*(
)
procCall GossipSub(w).initPubSub()
w.topicsHealth = initTable[string, TopicHealth]()
w.initProtocolHandler()
w.initRelayObservers()
w.topicsHealth = initTable[string, TopicHealth]()
w.initRequestProviders()
except InitializationError:
return err("initialization error: " & getCurrentExceptionMsg())
@ -353,12 +370,6 @@ proc addValidator*(
) {.gcsafe.} =
w.wakuValidators.add((handler, errorMessage))
proc addPublishObserver*(w: WakuRelay, obs: PublishObserver) =
## Observer when the api client performed a publish operation. This
## is initially aimed for bringing an additional layer of delivery reliability thanks
## to store
w.publishObservers.add(obs)
proc addObserver*(w: WakuRelay, observer: PubSubObserver) {.gcsafe.} =
## Observes when a message is sent/received from the GossipSub PoV
procCall GossipSub(w).addObserver(observer)
@ -573,6 +584,7 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle
procCall GossipSub(w).subscribe(pubsubTopic, topicHandler)
w.topicHandlers[pubsubTopic] = topicHandler
asyncSpawn w.updateTopicsHealth()
proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) =
## Unsubscribe all handlers on this pubsub topic
@ -628,9 +640,6 @@ proc publish*(
if relayedPeerCount <= 0:
return err(NoPeersToPublish)
for obs in w.publishObservers:
obs.onMessagePublished(pubSubTopic, message)
return ok(relayedPeerCount)
proc getConnectedPubSubPeers*(

View File

@ -24,10 +24,14 @@ import
./nonce_manager
import
../common/error_handling,
../waku_relay, # for WakuRelayHandler
../waku_core,
../waku_keystore
waku/[
common/error_handling,
waku_relay, # for WakuRelayHandler
waku_core,
requests/rln_requests,
waku_keystore,
common/broker/broker_context,
]
logScope:
topics = "waku rln_relay"
@ -65,6 +69,7 @@ type WakuRLNRelay* = ref object of RootObj
nonceManager*: NonceManager
epochMonitorFuture*: Future[void]
rootChangesFuture*: Future[void]
brokerCtx*: BrokerContext
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
@ -91,6 +96,7 @@ proc stop*(rlnPeer: WakuRLNRelay) {.async: (raises: [Exception]).} =
# stop the group sync, and flush data to tree db
info "stopping rln-relay"
RequestGenerateRlnProof.clearProvider(rlnPeer.brokerCtx)
await rlnPeer.groupManager.stop()
proc hasDuplicate*(
@ -275,11 +281,11 @@ proc validateMessageAndUpdateLog*(
return isValidMessage
proc appendRLNProof*(
rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64
): RlnRelayResult[void] =
## returns true if it can create and append a `RateLimitProof` to the supplied `msg`
## returns false otherwise
proc createRlnProof(
rlnPeer: WakuRLNRelay, msg: WakuMessage, senderEpochTime: float64
): RlnRelayResult[seq[byte]] =
## returns a new `RateLimitProof` for the supplied `msg`
## returns an error if it cannot create the proof
## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds.
## The `epoch` field of `RateLimitProof` is derived from the provided `senderEpochTime` (using `calcEpoch()`)
@ -291,7 +297,14 @@ proc appendRLNProof*(
let proof = rlnPeer.groupManager.generateProof(input, epoch, nonce).valueOr:
return err("could not generate rln-v2 proof: " & $error)
msg.proof = proof.encode().buffer
return ok(proof.encode().buffer)
proc appendRLNProof*(
rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64
): RlnRelayResult[void] =
msg.proof = rlnPeer.createRlnProof(msg, senderEpochTime).valueOr:
return err($error)
return ok()
proc clearNullifierLog*(rlnPeer: WakuRlnRelay) =
@ -429,6 +442,7 @@ proc mount(
rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1),
rlnMaxTimestampGap: uint64(MaxClockGapSeconds),
onFatalErrorAction: conf.onFatalErrorAction,
brokerCtx: globalBrokerContext(),
)
# track root changes on smart contract merkle tree
@ -438,6 +452,19 @@ proc mount(
# Start epoch monitoring in the background
wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay)
RequestGenerateRlnProof.setProvider(
wakuRlnRelay.brokerCtx,
proc(
msg: WakuMessage, senderEpochTime: float64
): Future[Result[RequestGenerateRlnProof, string]] {.async.} =
let proof = createRlnProof(wakuRlnRelay, msg, senderEpochTime).valueOr:
return err("Could not create RLN proof: " & $error)
return ok(RequestGenerateRlnProof(proof: proof)),
).isOkOr:
return err("Proof generator provider cannot be set: " & $error)
return ok(wakuRlnRelay)
proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async: (raises: [Exception]).} =