mirror of
https://github.com/logos-messaging/logos-delivery.git
synced 2026-03-06 17:13:35 +00:00
* Introduce api/send Added events and requests for support. Reworked delivery_monitor into a featured devlivery_service, that - supports relay publish and lightpush depending on configuration but with fallback options - if available and configured it utilizes store api to confirm message delivery - emits message delivery events accordingly prepare for use in api_example * Fix edge mode config and test added * Fix some import issues, start and stop waku shall not throw exception but return with result properly * Utlize sync RequestBroker, adapt to non-async broker usage and gcsafe where appropriate, removed leftover * add api_example app to examples2 * Adapt after merge from master * Adapt code for using broker context * Fix brokerCtx settings for all usedbrokers, cover locked node init * Various fixes upon test failures. Added initial of subscribe API and auto-subscribe for send api * More test added * Fix multi propagate event emit, fix fail send test case * Fix rebase * Fix PushMessageHandlers in tests * adapt libwaku to api changes * Fix relay test by adapting publish return error in case NoPeersToPublish * Addressing all remaining review findings. Removed leftovers. Fixed loggings and typos * Fix rln relay broker, missed brokerCtx * Fix rest relay test failed, due to publish will fail if no peer avail * ignore anvil test state file * Make terst_wakunode_rln_relay broker context aware to fix * Fix waku rln tests by having them broker context aware * fix typo in test_app.nim
138 lines
4.1 KiB
Nim
138 lines
4.1 KiB
Nim
{.used.}
|
|
|
|
import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
|
|
|
|
import
|
|
waku/[
|
|
node/peer_manager,
|
|
waku_core,
|
|
waku_lightpush_legacy,
|
|
waku_lightpush_legacy/client,
|
|
waku_lightpush_legacy/common,
|
|
],
|
|
../testlib/wakucore,
|
|
./lightpush_utils
|
|
|
|
suite "Rate limited push service":
|
|
asyncTest "push message with rate limit not violated":
|
|
## Setup
|
|
let
|
|
serverSwitch = newTestSwitch()
|
|
clientSwitch = newTestSwitch()
|
|
|
|
await allFutures(serverSwitch.start(), clientSwitch.start())
|
|
|
|
## Given
|
|
var handlerFuture = newFuture[(string, WakuMessage)]()
|
|
let handler: PushMessageHandler = proc(
|
|
pubsubTopic: PubsubTopic, message: WakuMessage
|
|
): Future[WakuLightPushResult[void]] {.async.} =
|
|
handlerFuture.complete((pubsubTopic, message))
|
|
return ok()
|
|
|
|
let
|
|
tokenPeriod = 500.millis
|
|
server = await newTestWakuLegacyLightpushNode(
|
|
serverSwitch, handler, some((3, tokenPeriod))
|
|
)
|
|
client = newTestWakuLegacyLightpushClient(clientSwitch)
|
|
|
|
let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo()
|
|
|
|
let sendMsgProc = proc(): Future[void] {.async.} =
|
|
let message = fakeWakuMessage()
|
|
|
|
handlerFuture = newFuture[(string, WakuMessage)]()
|
|
let requestRes =
|
|
await client.publish(DefaultPubsubTopic, message, peer = serverPeerId)
|
|
|
|
check await handlerFuture.withTimeout(50.millis)
|
|
|
|
assert requestRes.isOk(), requestRes.error
|
|
check handlerFuture.finished()
|
|
|
|
let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read()
|
|
|
|
check:
|
|
handledMessagePubsubTopic == DefaultPubsubTopic
|
|
handledMessage == message
|
|
|
|
let waitInBetweenFor = 20.millis
|
|
|
|
# Test cannot be too explicit about the time when the TokenBucket resets
|
|
# the internal timer, although in normal use there is no use case to care about it.
|
|
var firstWaitExtend = 300.millis
|
|
|
|
for runCnt in 0 ..< 3:
|
|
let startTime = Moment.now()
|
|
for testCnt in 0 ..< 3:
|
|
await sendMsgProc()
|
|
await sleepAsync(20.millis)
|
|
|
|
var endTime = Moment.now()
|
|
var elapsed: Duration = (endTime - startTime)
|
|
await sleepAsync(tokenPeriod - elapsed + firstWaitExtend)
|
|
firstWaitEXtend = 100.millis
|
|
|
|
## Cleanup
|
|
await allFutures(clientSwitch.stop(), serverSwitch.stop())
|
|
|
|
asyncTest "push message with rate limit reject":
|
|
## Setup
|
|
let
|
|
serverSwitch = newTestSwitch()
|
|
clientSwitch = newTestSwitch()
|
|
|
|
await allFutures(serverSwitch.start(), clientSwitch.start())
|
|
|
|
## Given
|
|
let handler = proc(
|
|
pubsubTopic: PubsubTopic, message: WakuMessage
|
|
): Future[WakuLightPushResult[void]] {.async.} =
|
|
return ok()
|
|
|
|
let
|
|
tokenPeriod = 500.millis
|
|
server = await newTestWakuLegacyLightpushNode(
|
|
serverSwitch, handler, some((3, tokenPeriod))
|
|
)
|
|
client = newTestWakuLegacyLightpushClient(clientSwitch)
|
|
|
|
let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo()
|
|
|
|
# Avoid assuming the exact Nth request will be rejected. With Chronos TokenBucket
|
|
# minting semantics and real network latency, CI timing can allow refills.
|
|
# Instead, send a short burst and require that we observe at least one rejection.
|
|
let burstSize = 10
|
|
var publishFutures: seq[Future[WakuLightPushResult[string]]] = @[]
|
|
for _ in 0 ..< burstSize:
|
|
publishFutures.add(
|
|
client.publish(DefaultPubsubTopic, fakeWakuMessage(), peer = serverPeerId)
|
|
)
|
|
|
|
let finished = await allFinished(publishFutures)
|
|
var gotOk = false
|
|
var gotTooMany = false
|
|
for fut in finished:
|
|
check not fut.failed()
|
|
let res = fut.read()
|
|
if res.isOk():
|
|
gotOk = true
|
|
elif res.error == "TOO_MANY_REQUESTS":
|
|
gotTooMany = true
|
|
|
|
check:
|
|
gotOk
|
|
gotTooMany
|
|
|
|
await sleepAsync(tokenPeriod + 100.millis)
|
|
|
|
## next one shall succeed due to the rate limit time window has passed
|
|
let afterCooldownRes =
|
|
await client.publish(DefaultPubsubTopic, fakeWakuMessage(), peer = serverPeerId)
|
|
check:
|
|
afterCooldownRes.isOk()
|
|
|
|
## Cleanup
|
|
await allFutures(clientSwitch.stop(), serverSwitch.stop())
|