mirror of
https://github.com/logos-messaging/logos-delivery.git
synced 2026-03-16 22:13:38 +00:00
* Adapt using chronos' TokenBucket. Removed TokenBucket and test. bump nim-chronos -> nim-libp2p/nim-lsquic/nim-jwt -> adapt to latest libp2p changes * Fix libp2p/utility reports unlisted exception can occure from close of socket in waitForService - -d:ssl compile flag caused it * Adapt request_limiter to new chronos' TokenBucket replenish algorithm to keep original intent of use * Fix filter dos protection test * Fix peer manager tests due change caused by new libp2p * Adjust store test rate limit to eliminate CI test flakyness of timing * Adjust store test rate limit to eliminate CI test flakyness of timing - lightpush/legacy_lightpush/filter * Rework filter dos protection test to avoid CI crazy timing causing flakyness in test results compared to local runs * Rework lightpush dos protection test to avoid CI crazy timing causing flakyness in test results compared to local runs * Rework lightpush and legacy lightpush rate limit tests to eliminate timing effect in CI that cause longer awaits thus result in minting new tokens unlike local runs
39 lines
1.4 KiB
Nim
39 lines
1.4 KiB
Nim
## PerPeerRateLimiter
|
|
##
|
|
## With this class one can easily track usage of a service per PeerId
|
|
## Rate limit is applied separately by each peer upon first use. Also time period is counted distinct per peer.
|
|
## It will use compensating replenish mode for peers to balance the load and allow fair usage of a service.
|
|
|
|
{.push raises: [].}
|
|
|
|
import std/[options, tables], libp2p/stream/connection
|
|
|
|
import ./[single_token_limiter, service_metrics], ../../utils/tableutils
|
|
|
|
export token_bucket, setting, service_metrics
|
|
|
|
type PerPeerRateLimiter* = ref object of RootObj
|
|
setting*: Option[RateLimitSetting]
|
|
peerBucket: Table[PeerId, Option[TokenBucket]]
|
|
|
|
proc mgetOrPut(
|
|
perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId
|
|
): var Option[TokenBucket] =
|
|
return perPeerRateLimiter.peerBucket.mgetOrPut(
|
|
peerId, newTokenBucket(perPeerRateLimiter.setting, ReplenishMode.Continuous)
|
|
)
|
|
|
|
template checkUsageLimit*(
|
|
t: var PerPeerRateLimiter,
|
|
proto: string,
|
|
conn: Connection,
|
|
bodyWithinLimit, bodyRejected: untyped,
|
|
) =
|
|
checkUsageLimit(t.mgetOrPut(conn.peerId), proto, conn, bodyWithinLimit, bodyRejected)
|
|
|
|
proc unregister*(perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId) =
|
|
perPeerRateLimiter.peerBucket.del(peerId)
|
|
|
|
proc unregister*(perPeerRateLimiter: var PerPeerRateLimiter, peerIds: seq[PeerId]) =
|
|
perPeerRateLimiter.peerBucket.keepItIf(key notin peerIds)
|