mirror of
https://github.com/logos-messaging/logos-delivery.git
synced 2026-05-05 22:39:33 +00:00
* DOS protection of non relay protocols - rate limit phase3: - Enhanced TokenBucket to be able to add compensation tokens based on previous usage percentage, - per peer rate limiter 'PeerRateLimier' applied on waku_filter_v2 with opinionated default of acceptable request rate - Add traffic metrics to filter message push - RequestRateLimiter added to combine simple token bucket limiting of request numbers but consider per peer usage over time and prevent some peers to over use the service (although currently rule violating peers will not be disconnected by this time only their requests will get not served) - TimedMap utility created (inspired and taken from libp2p TimedCache) which serves as forgiving feature for peers had been overusing the service. - Added more tests - Fix rebase issues - Applied new RequestRateLimiter for store and legacy_store and lightpush * Incorporate review comments, typos, file/class naming and placement changes. * Add issue link reference of the original issue with nim-chronos TokenBucket * Make TimedEntry of TimedMap private and not mixable with similar named in libp2p * Fix review comments, renamings, const instead of values and more comments.
41 lines
1.4 KiB
Nim
41 lines
1.4 KiB
Nim
## PerPeerRateLimiter
|
|
##
|
|
## With this class one can easily track usage of a service per PeerId
|
|
## Rate limit is applied separately by each peer upon first use. Also time period is counted distinct per peer.
|
|
## It will use compensating replenish mode for peers to balance the load and allow fair usage of a service.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
import std/[options, tables], chronos/timer, libp2p/stream/connection, libp2p/utility
|
|
|
|
import ./[single_token_limiter, service_metrics], ../../utils/tableutils
|
|
|
|
export token_bucket, setting, service_metrics
|
|
|
|
type PerPeerRateLimiter* = ref object of RootObj
|
|
setting*: Option[RateLimitSetting]
|
|
peerBucket: Table[PeerId, Option[TokenBucket]]
|
|
|
|
proc mgetOrPut(
|
|
perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId
|
|
): var Option[TokenBucket] =
|
|
return perPeerRateLimiter.peerBucket.mgetOrPut(
|
|
peerId, newTokenBucket(perPeerRateLimiter.setting, ReplenishMode.Compensating)
|
|
)
|
|
|
|
template checkUsageLimit*(
|
|
t: var PerPeerRateLimiter,
|
|
proto: string,
|
|
conn: Connection,
|
|
bodyWithinLimit, bodyRejected: untyped,
|
|
) =
|
|
checkUsageLimit(t.mgetOrPut(conn.peerId), proto, conn, bodyWithinLimit, bodyRejected)
|
|
|
|
proc unregister*(perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId) =
|
|
perPeerRateLimiter.peerBucket.del(peerId)
|
|
|
|
proc unregister*(perPeerRateLimiter: var PerPeerRateLimiter, peerIds: seq[PeerId]) =
|
|
perPeerRateLimiter.peerBucket.keepItIf(key notin peerIds)
|