Rework lightpush and legacy lightpush rate limit tests to eliminate timing effect in CI that cause longer awaits thus result in minting new tokens unlike local runs

This commit is contained in:
NagyZoltanPeter 2026-01-07 14:10:39 +01:00
parent d0e5a28679
commit b0f056468c
No known key found for this signature in database
GPG Key ID: 3E1F97CF4A7B6F42
2 changed files with 29 additions and 37 deletions

View File

@ -83,11 +83,9 @@ suite "Rate limited push service":
# Don't rely on per-request timing assumptions or a single shared Future.
# CI can be slow enough that sequential requests accidentally refill tokens.
# Instead we issue a small burst and assert we observe at least one rejection.
var handledCount = 0
let handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
inc handledCount
return lightpushSuccessResult(1)
let

View File

@ -86,58 +86,52 @@ suite "Rate limited push service":
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
var handlerFuture = newFuture[(string, WakuMessage)]()
let handler = proc(
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult[void]] {.async.} =
handlerFuture.complete((pubsubTopic, message))
return ok()
let
tokenPeriod = 500.millis
server = await newTestWakuLegacyLightpushNode(
serverSwitch, handler, some((3, 500.millis))
serverSwitch, handler, some((3, tokenPeriod))
)
client = newTestWakuLegacyLightpushClient(clientSwitch)
let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo()
let topic = DefaultPubsubTopic
let successProc = proc(): Future[void] {.async.} =
let message = fakeWakuMessage()
handlerFuture = newFuture[(string, WakuMessage)]()
let requestRes =
await client.publish(DefaultPubsubTopic, message, peer = serverPeerId)
discard await handlerFuture.withTimeout(10.millis)
# Avoid assuming the exact Nth request will be rejected. With Chronos TokenBucket
# minting semantics and real network latency, CI timing can allow refills.
# Instead, send a short burst and require that we observe at least one rejection.
let burstSize = 10
var publishFutures: seq[Future[WakuLightPushResult[string]]] = @[]
for _ in 0 ..< burstSize:
publishFutures.add(
client.publish(DefaultPubsubTopic, fakeWakuMessage(), peer = serverPeerId)
)
check:
requestRes.isOk()
handlerFuture.finished()
let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read()
check:
handledMessagePubsubTopic == DefaultPubsubTopic
handledMessage == message
let finished = await allFinished(publishFutures)
var gotOk = false
var gotTooMany = false
for fut in finished:
check not fut.failed()
let res = fut.read()
if res.isOk():
gotOk = true
elif res.error == "TOO_MANY_REQUESTS":
gotTooMany = true
let rejectProc = proc(): Future[void] {.async.} =
let message = fakeWakuMessage()
handlerFuture = newFuture[(string, WakuMessage)]()
let requestRes =
await client.publish(DefaultPubsubTopic, message, peer = serverPeerId)
discard await handlerFuture.withTimeout(10.millis)
check:
gotOk
gotTooMany
check:
requestRes.isErr()
requestRes.error == "TOO_MANY_REQUESTS"
for testCnt in 0 .. 2:
await successProc()
await sleepAsync(5.millis)
await rejectProc()
await sleepAsync(500.millis)
await sleepAsync(tokenPeriod + 100.millis)
## next one shall succeed due to the rate limit time window has passed
await successProc()
let afterCooldownRes =
await client.publish(DefaultPubsubTopic, fakeWakuMessage(), peer = serverPeerId)
check:
afterCooldownRes.isOk()
## Cleanup
await allFutures(clientSwitch.stop(), serverSwitch.stop())