Add a flag if a peer should be disconnected when above rate limit (#954)
This commit is contained in:
parent
575344e2e9
commit
459f6851e7
|
@ -80,7 +80,8 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||||
enablePX: false,
|
enablePX: false,
|
||||||
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
||||||
iwantTimeout: 3 * GossipSubHeartbeatInterval,
|
iwantTimeout: 3 * GossipSubHeartbeatInterval,
|
||||||
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration])
|
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
|
||||||
|
disconnectPeerAboveRateLimit: false
|
||||||
)
|
)
|
||||||
|
|
||||||
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||||
|
@ -384,17 +385,17 @@ proc validateAndRelay(g: GossipSub,
|
||||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||||
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
||||||
|
|
||||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError].} =
|
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
|
||||||
# In this way we count even ignored fields by protobuf
|
# In this way we count even ignored fields by protobuf
|
||||||
|
|
||||||
var rmsg = rpcMsgOpt.valueOr:
|
var rmsg = rpcMsgOpt.valueOr:
|
||||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||||
if not overheadRateLimit.tryConsume(msgSize):
|
if not overheadRateLimit.tryConsume(msgSize):
|
||||||
libp2p_gossipsub_peers_rate_limit_disconnections.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||||
debug "Peer sent a msg that couldn't be decoded and it's above rate limit", peer, uselessAppBytesNum = msgSize
|
debug "Peer sent a msg that couldn't be decoded and it's above rate limit.", peer, uselessAppBytesNum = msgSize
|
||||||
# discard g.disconnectPeer(peer)
|
if g.parameters.disconnectPeerAboveRateLimit:
|
||||||
# debug "Peer disconnected", peer, uselessAppBytesNum = msgSize
|
await g.disconnectPeer(peer)
|
||||||
# raise newException(PeerRateLimitError, "Peer sent a msg that couldn't be decoded and it's above rate limit")
|
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||||
|
|
||||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||||
|
|
||||||
|
@ -410,11 +411,11 @@ proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize:
|
||||||
|
|
||||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||||
libp2p_gossipsub_peers_rate_limit_disconnections.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||||
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
|
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
|
||||||
# discard g.disconnectPeer(peer)
|
if g.parameters.disconnectPeerAboveRateLimit:
|
||||||
# debug "Peer disconnected", peer, msgSize, uselessAppBytesNum
|
await g.disconnectPeer(peer)
|
||||||
# raise newException(PeerRateLimitError, "Peer sent too much useless application data and it's above rate limit.")
|
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||||
|
|
||||||
method rpcHandler*(g: GossipSub,
|
method rpcHandler*(g: GossipSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
|
@ -423,11 +424,11 @@ method rpcHandler*(g: GossipSub,
|
||||||
let msgSize = data.len
|
let msgSize = data.len
|
||||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||||
debug "failed to decode msg from peer", peer, err = error
|
debug "failed to decode msg from peer", peer, err = error
|
||||||
rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
|
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
|
||||||
return
|
return
|
||||||
|
|
||||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||||
rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
|
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
|
||||||
|
|
||||||
# trigger hooks
|
# trigger hooks
|
||||||
peer.recvObservers(rpcMsg)
|
peer.recvObservers(rpcMsg)
|
||||||
|
|
|
@ -30,7 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
|
||||||
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_disconnections, "The number of peer disconnections by gossipsub because of rate limit", labels = ["agent"])
|
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
|
||||||
|
|
||||||
proc init*(_: type[TopicParams]): TopicParams =
|
proc init*(_: type[TopicParams]): TopicParams =
|
||||||
TopicParams(
|
TopicParams(
|
||||||
|
@ -245,7 +245,7 @@ proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) =
|
||||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||||
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
||||||
libp2p_gossipsub_peers_rate_limit_disconnections.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||||
# discard g.disconnectPeer(peer)
|
# discard g.disconnectPeer(peer)
|
||||||
# debug "Peer disconnected", peer, uselessAppBytesNum
|
# debug "Peer disconnected", peer, uselessAppBytesNum
|
||||||
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
|
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
|
||||||
|
|
|
@ -146,6 +146,7 @@ type
|
||||||
iwantTimeout*: Duration
|
iwantTimeout*: Duration
|
||||||
|
|
||||||
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
||||||
|
disconnectPeerAboveRateLimit*: bool
|
||||||
|
|
||||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||||
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
||||||
|
|
Loading…
Reference in New Issue