mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2025-03-01 16:40:32 +00:00
GossipSub Traffic scoring (#920)
This commit is contained in:
parent
b2eac7ecbd
commit
56599f5b9d
@ -15,7 +15,7 @@ import ./pubsub,
|
|||||||
./pubsubpeer,
|
./pubsubpeer,
|
||||||
./timedcache,
|
./timedcache,
|
||||||
./peertable,
|
./peertable,
|
||||||
./rpc/[message, messages],
|
./rpc/[message, messages, protobuf],
|
||||||
../../crypto/crypto,
|
../../crypto/crypto,
|
||||||
../../stream/connection,
|
../../stream/connection,
|
||||||
../../peerid,
|
../../peerid,
|
||||||
@ -95,7 +95,16 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
|||||||
|
|
||||||
method rpcHandler*(f: FloodSub,
|
method rpcHandler*(f: FloodSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
rpcMsg: RPCMsg) {.async.} =
|
data: seq[byte]) {.async.} =
|
||||||
|
|
||||||
|
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||||
|
debug "failed to decode msg from peer", peer, err = error
|
||||||
|
raise newException(CatchableError, "")
|
||||||
|
|
||||||
|
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||||
|
# trigger hooks
|
||||||
|
peer.recvObservers(rpcMsg)
|
||||||
|
|
||||||
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
|
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
|
||||||
template sub: untyped = rpcMsg.subscriptions[i]
|
template sub: untyped = rpcMsg.subscriptions[i]
|
||||||
f.handleSubscribe(peer, sub.topic, sub.subscribe)
|
f.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||||
|
@ -13,13 +13,14 @@
|
|||||||
|
|
||||||
import std/[sets, sequtils]
|
import std/[sets, sequtils]
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
|
import chronos/ratelimit
|
||||||
import ./pubsub,
|
import ./pubsub,
|
||||||
./floodsub,
|
./floodsub,
|
||||||
./pubsubpeer,
|
./pubsubpeer,
|
||||||
./peertable,
|
./peertable,
|
||||||
./mcache,
|
./mcache,
|
||||||
./timedcache,
|
./timedcache,
|
||||||
./rpc/[messages, message],
|
./rpc/[messages, message, protobuf],
|
||||||
../protocol,
|
../protocol,
|
||||||
../../stream/connection,
|
../../stream/connection,
|
||||||
../../peerinfo,
|
../../peerinfo,
|
||||||
@ -78,7 +79,8 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
|
|||||||
disconnectBadPeers: false,
|
disconnectBadPeers: false,
|
||||||
enablePX: false,
|
enablePX: false,
|
||||||
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
||||||
iwantTimeout: 3 * GossipSubHeartbeatInterval
|
iwantTimeout: 3 * GossipSubHeartbeatInterval,
|
||||||
|
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration])
|
||||||
)
|
)
|
||||||
|
|
||||||
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||||
@ -160,7 +162,7 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
|||||||
peer.behaviourPenalty = stats.behaviourPenalty
|
peer.behaviourPenalty = stats.behaviourPenalty
|
||||||
|
|
||||||
# Check if the score is below the threshold and disconnect the peer if necessary
|
# Check if the score is below the threshold and disconnect the peer if necessary
|
||||||
g.disconnectBadPeerCheck(peer, stats.score)
|
g.disconnectIfBadScorePeer(peer, stats.score)
|
||||||
|
|
||||||
peer.iHaveBudget = IHavePeerBudget
|
peer.iHaveBudget = IHavePeerBudget
|
||||||
peer.pingBudget = PingsPeerBudget
|
peer.pingBudget = PingsPeerBudget
|
||||||
@ -316,7 +318,7 @@ proc validateAndRelay(g: GossipSub,
|
|||||||
of ValidationResult.Reject:
|
of ValidationResult.Reject:
|
||||||
debug "Dropping message after validation, reason: reject",
|
debug "Dropping message after validation, reason: reject",
|
||||||
msgId = shortLog(msgId), peer
|
msgId = shortLog(msgId), peer
|
||||||
g.punishInvalidMessage(peer, msg.topicIds)
|
g.punishInvalidMessage(peer, msg)
|
||||||
return
|
return
|
||||||
of ValidationResult.Ignore:
|
of ValidationResult.Ignore:
|
||||||
debug "Dropping message after validation, reason: ignore",
|
debug "Dropping message after validation, reason: ignore",
|
||||||
@ -379,9 +381,57 @@ proc validateAndRelay(g: GossipSub,
|
|||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
info "validateAndRelay failed", msg=exc.msg
|
info "validateAndRelay failed", msg=exc.msg
|
||||||
|
|
||||||
|
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||||
|
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
||||||
|
|
||||||
|
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError].} =
|
||||||
|
# In this way we count even ignored fields by protobuf
|
||||||
|
|
||||||
|
var rmsg = rpcMsgOpt.valueOr:
|
||||||
|
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||||
|
if not overheadRateLimit.tryConsume(msgSize):
|
||||||
|
libp2p_gossipsub_peers_rate_limit_disconnections.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||||
|
debug "Peer sent a msg that couldn't be decoded and it's above rate limit", peer, uselessAppBytesNum = msgSize
|
||||||
|
# discard g.disconnectPeer(peer)
|
||||||
|
# debug "Peer disconnected", peer, uselessAppBytesNum = msgSize
|
||||||
|
# raise newException(PeerRateLimitError, "Peer sent a msg that couldn't be decoded and it's above rate limit")
|
||||||
|
|
||||||
|
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||||
|
|
||||||
|
let usefulMsgBytesNum =
|
||||||
|
if g.verifySignature:
|
||||||
|
byteSize(rmsg.messages)
|
||||||
|
else:
|
||||||
|
dataAndTopicsIdSize(rmsg.messages)
|
||||||
|
|
||||||
|
var uselessAppBytesNum = msgSize - usefulMsgBytesNum
|
||||||
|
rmsg.control.withValue(control):
|
||||||
|
uselessAppBytesNum -= (byteSize(control.ihave) + byteSize(control.iwant))
|
||||||
|
|
||||||
|
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||||
|
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||||
|
libp2p_gossipsub_peers_rate_limit_disconnections.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||||
|
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
|
||||||
|
# discard g.disconnectPeer(peer)
|
||||||
|
# debug "Peer disconnected", peer, msgSize, uselessAppBytesNum
|
||||||
|
# raise newException(PeerRateLimitError, "Peer sent too much useless application data and it's above rate limit.")
|
||||||
|
|
||||||
method rpcHandler*(g: GossipSub,
|
method rpcHandler*(g: GossipSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
rpcMsg: RPCMsg) {.async.} =
|
data: seq[byte]) {.async.} =
|
||||||
|
|
||||||
|
let msgSize = data.len
|
||||||
|
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||||
|
debug "failed to decode msg from peer", peer, err = error
|
||||||
|
rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
|
||||||
|
return
|
||||||
|
|
||||||
|
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||||
|
rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
|
||||||
|
|
||||||
|
# trigger hooks
|
||||||
|
peer.recvObservers(rpcMsg)
|
||||||
|
|
||||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||||
g.send(peer, RPCMsg(pong: rpcMsg.ping))
|
g.send(peer, RPCMsg(pong: rpcMsg.ping))
|
||||||
peer.pingBudget.dec
|
peer.pingBudget.dec
|
||||||
@ -445,14 +495,14 @@ method rpcHandler*(g: GossipSub,
|
|||||||
# always validate if signature is present or required
|
# always validate if signature is present or required
|
||||||
debug "Dropping message due to failed signature verification",
|
debug "Dropping message due to failed signature verification",
|
||||||
msgId = shortLog(msgId), peer
|
msgId = shortLog(msgId), peer
|
||||||
g.punishInvalidMessage(peer, msg.topicIds)
|
g.punishInvalidMessage(peer, msg)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
||||||
# if we have seqno should be 8 bytes long
|
# if we have seqno should be 8 bytes long
|
||||||
debug "Dropping message due to invalid seqno length",
|
debug "Dropping message due to invalid seqno length",
|
||||||
msgId = shortLog(msgId), peer
|
msgId = shortLog(msgId), peer
|
||||||
g.punishInvalidMessage(peer, msg.topicIds)
|
g.punishInvalidMessage(peer, msg)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# g.anonymize needs no evaluation when receiving messages
|
# g.anonymize needs no evaluation when receiving messages
|
||||||
@ -676,3 +726,13 @@ method initPubSub*(g: GossipSub)
|
|||||||
|
|
||||||
# init gossip stuff
|
# init gossip stuff
|
||||||
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
|
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
|
||||||
|
|
||||||
|
method getOrCreatePeer*(
|
||||||
|
g: GossipSub,
|
||||||
|
peerId: PeerId,
|
||||||
|
protos: seq[string]): PubSubPeer =
|
||||||
|
|
||||||
|
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
|
||||||
|
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
|
||||||
|
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
|
||||||
|
return peer
|
||||||
|
@ -11,9 +11,12 @@
|
|||||||
|
|
||||||
import std/[tables, sets]
|
import std/[tables, sets]
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
|
import chronos/ratelimit
|
||||||
import "."/[types]
|
import "."/[types]
|
||||||
import ".."/[pubsubpeer]
|
import ".."/[pubsubpeer]
|
||||||
|
import ../rpc/messages
|
||||||
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
|
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
|
||||||
|
import ../pubsub
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p gossipsub"
|
topics = "libp2p gossipsub"
|
||||||
@ -27,6 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
|
|||||||
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
|
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_disconnections, "The number of peer disconnections by gossipsub because of rate limit", labels = ["agent"])
|
||||||
|
|
||||||
proc init*(_: type[TopicParams]): TopicParams =
|
proc init*(_: type[TopicParams]): TopicParams =
|
||||||
TopicParams(
|
TopicParams(
|
||||||
@ -85,27 +89,18 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
|
|||||||
|
|
||||||
{.pop.}
|
{.pop.}
|
||||||
|
|
||||||
proc disconnectPeer(g: GossipSub, peer: PubSubPeer) {.async.} =
|
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||||
let agent =
|
|
||||||
when defined(libp2p_agents_metrics):
|
|
||||||
if peer.shortAgent.len > 0:
|
|
||||||
peer.shortAgent
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [agent])
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await g.switch.disconnect(peer.peerId)
|
await g.switch.disconnect(peer.peerId)
|
||||||
except CatchableError as exc: # Never cancelled
|
except CatchableError as exc: # Never cancelled
|
||||||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||||
|
|
||||||
proc disconnectBadPeerCheck*(g: GossipSub, peer: PubSubPeer, score: float64) =
|
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
|
||||||
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
|
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
|
||||||
peer.peerId notin g.parameters.directPeers:
|
peer.peerId notin g.parameters.directPeers:
|
||||||
debug "disconnecting bad score peer", peer, score = peer.score
|
debug "disconnecting bad score peer", peer, score = peer.score
|
||||||
asyncSpawn(g.disconnectPeer(peer))
|
asyncSpawn(g.disconnectPeer(peer))
|
||||||
|
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
|
||||||
|
|
||||||
proc updateScores*(g: GossipSub) = # avoid async
|
proc updateScores*(g: GossipSub) = # avoid async
|
||||||
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
|
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
|
||||||
@ -175,14 +170,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
|||||||
score += topicScore * topicParams.topicWeight
|
score += topicScore * topicParams.topicWeight
|
||||||
|
|
||||||
# Score metrics
|
# Score metrics
|
||||||
let agent =
|
let agent = peer.getAgent()
|
||||||
when defined(libp2p_agents_metrics):
|
|
||||||
if peer.shortAgent.len > 0:
|
|
||||||
peer.shortAgent
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
|
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
|
||||||
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
|
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
|
||||||
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
|
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
|
||||||
@ -219,14 +207,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
|||||||
score += colocationFactor * g.parameters.ipColocationFactorWeight
|
score += colocationFactor * g.parameters.ipColocationFactorWeight
|
||||||
|
|
||||||
# Score metrics
|
# Score metrics
|
||||||
let agent =
|
let agent = peer.getAgent()
|
||||||
when defined(libp2p_agents_metrics):
|
|
||||||
if peer.shortAgent.len > 0:
|
|
||||||
peer.shortAgent
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
else:
|
|
||||||
"unknown"
|
|
||||||
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
|
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
|
||||||
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
|
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
|
||||||
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
|
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
|
||||||
@ -246,8 +227,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
|||||||
|
|
||||||
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
|
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
|
||||||
|
|
||||||
g.disconnectBadPeerCheck(peer, stats.score)
|
g.disconnectIfBadScorePeer(peer, stats.score)
|
||||||
|
|
||||||
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
|
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
|
||||||
|
|
||||||
for peer in evicting:
|
for peer in evicting:
|
||||||
@ -260,8 +240,18 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
|
|||||||
trace "running scoring heartbeat", instance = cast[int](g)
|
trace "running scoring heartbeat", instance = cast[int](g)
|
||||||
g.updateScores()
|
g.updateScores()
|
||||||
|
|
||||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) =
|
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) =
|
||||||
for tt in topics:
|
let uselessAppBytesNum = msg.data.len
|
||||||
|
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||||
|
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||||
|
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
||||||
|
libp2p_gossipsub_peers_rate_limit_disconnections.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||||
|
# discard g.disconnectPeer(peer)
|
||||||
|
# debug "Peer disconnected", peer, uselessAppBytesNum
|
||||||
|
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
|
||||||
|
|
||||||
|
|
||||||
|
for tt in msg.topicIds:
|
||||||
let t = tt
|
let t = tt
|
||||||
if t notin g.topics:
|
if t notin g.topics:
|
||||||
continue
|
continue
|
||||||
|
@ -145,6 +145,8 @@ type
|
|||||||
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
|
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
|
||||||
iwantTimeout*: Duration
|
iwantTimeout*: Duration
|
||||||
|
|
||||||
|
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
||||||
|
|
||||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||||
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
import std/[tables, sequtils, sets, strutils]
|
import std/[tables, sequtils, sets, strutils]
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
|
import chronos/ratelimit
|
||||||
import ./errors as pubsub_errors,
|
import ./errors as pubsub_errors,
|
||||||
./pubsubpeer,
|
./pubsubpeer,
|
||||||
./rpc/[message, messages, protobuf],
|
./rpc/[message, messages, protobuf],
|
||||||
@ -263,7 +264,7 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
|||||||
|
|
||||||
method rpcHandler*(p: PubSub,
|
method rpcHandler*(p: PubSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
rpcMsg: RPCMsg): Future[void] {.base, async.} =
|
data: seq[byte]): Future[void] {.base, async.} =
|
||||||
## Handler that must be overridden by concrete implementation
|
## Handler that must be overridden by concrete implementation
|
||||||
raiseAssert "Unimplemented"
|
raiseAssert "Unimplemented"
|
||||||
|
|
||||||
@ -278,10 +279,11 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {
|
|||||||
of PubSubPeerEventKind.Disconnected:
|
of PubSubPeerEventKind.Disconnected:
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc getOrCreatePeer*(
|
method getOrCreatePeer*(
|
||||||
p: PubSub,
|
p: PubSub,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
protos: seq[string]): PubSubPeer =
|
protos: seq[string]): PubSubPeer {.base, gcsafe.} =
|
||||||
|
|
||||||
p.peers.withValue(peerId, peer):
|
p.peers.withValue(peerId, peer):
|
||||||
return peer[]
|
return peer[]
|
||||||
|
|
||||||
@ -354,9 +356,9 @@ method handleConn*(p: PubSub,
|
|||||||
## that we're interested in
|
## that we're interested in
|
||||||
##
|
##
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg): Future[void] =
|
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
|
||||||
# call pubsub rpc handler
|
# call pubsub rpc handler
|
||||||
p.rpcHandler(peer, msg)
|
p.rpcHandler(peer, data)
|
||||||
|
|
||||||
let peer = p.getOrCreatePeer(conn.peerId, @[proto])
|
let peer = p.getOrCreatePeer(conn.peerId, @[proto])
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
||||||
import stew/results
|
import stew/results
|
||||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||||
|
import chronos/ratelimit
|
||||||
import rpc/[messages, message, protobuf],
|
import rpc/[messages, message, protobuf],
|
||||||
../../peerid,
|
../../peerid,
|
||||||
../../peerinfo,
|
../../peerinfo,
|
||||||
@ -32,6 +33,8 @@ when defined(libp2p_expensive_metrics):
|
|||||||
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
||||||
|
|
||||||
type
|
type
|
||||||
|
PeerRateLimitError* = object of CatchableError
|
||||||
|
|
||||||
PubSubObserver* = ref object
|
PubSubObserver* = ref object
|
||||||
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||||
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||||
@ -66,8 +69,9 @@ type
|
|||||||
maxMessageSize: int
|
maxMessageSize: int
|
||||||
appScore*: float64 # application specific score
|
appScore*: float64 # application specific score
|
||||||
behaviourPenalty*: float64 # the eventual penalty score
|
behaviourPenalty*: float64 # the eventual penalty score
|
||||||
|
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||||
|
|
||||||
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
|
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
||||||
{.gcsafe, raises: [].}
|
{.gcsafe, raises: [].}
|
||||||
|
|
||||||
when defined(libp2p_agents_metrics):
|
when defined(libp2p_agents_metrics):
|
||||||
@ -107,7 +111,7 @@ func outbound*(p: PubSubPeer): bool =
|
|||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
proc recvObservers(p: PubSubPeer, msg: var RPCMsg) =
|
proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
|
||||||
# trigger hooks
|
# trigger hooks
|
||||||
if not(isNil(p.observers)) and p.observers[].len > 0:
|
if not(isNil(p.observers)) and p.observers[].len > 0:
|
||||||
for obs in p.observers[]:
|
for obs in p.observers[]:
|
||||||
@ -134,26 +138,19 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
|||||||
conn, peer = p, closed = conn.closed,
|
conn, peer = p, closed = conn.closed,
|
||||||
data = data.shortLog
|
data = data.shortLog
|
||||||
|
|
||||||
var rmsg = decodeRpcMsg(data).valueOr:
|
|
||||||
debug "failed to decode msg from peer",
|
|
||||||
conn, peer = p, closed = conn.closed,
|
|
||||||
err = error
|
|
||||||
break
|
|
||||||
data = newSeq[byte]() # Release memory
|
|
||||||
|
|
||||||
trace "decoded msg from peer",
|
|
||||||
conn, peer = p, closed = conn.closed,
|
|
||||||
msg = rmsg.shortLog
|
|
||||||
# trigger hooks
|
|
||||||
p.recvObservers(rmsg)
|
|
||||||
|
|
||||||
when defined(libp2p_expensive_metrics):
|
when defined(libp2p_expensive_metrics):
|
||||||
for m in rmsg.messages:
|
for m in rmsg.messages:
|
||||||
for t in m.topicIDs:
|
for t in m.topicIDs:
|
||||||
# metrics
|
# metrics
|
||||||
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
|
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
|
||||||
|
|
||||||
await p.handler(p, rmsg)
|
await p.handler(p, data)
|
||||||
|
data = newSeq[byte]() # Release memory
|
||||||
|
except PeerRateLimitError as exc:
|
||||||
|
debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
|
||||||
|
except CatchableError as exc:
|
||||||
|
debug "Exception occurred in PubSubPeer.handle",
|
||||||
|
conn, peer = p, closed = conn.closed, exc = exc.msg
|
||||||
finally:
|
finally:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
@ -307,7 +304,8 @@ proc new*(
|
|||||||
getConn: GetConn,
|
getConn: GetConn,
|
||||||
onEvent: OnEvent,
|
onEvent: OnEvent,
|
||||||
codec: string,
|
codec: string,
|
||||||
maxMessageSize: int): T =
|
maxMessageSize: int,
|
||||||
|
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
|
||||||
|
|
||||||
result = T(
|
result = T(
|
||||||
getConn: getConn,
|
getConn: getConn,
|
||||||
@ -315,7 +313,18 @@ proc new*(
|
|||||||
codec: codec,
|
codec: codec,
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
connectedFut: newFuture[void](),
|
connectedFut: newFuture[void](),
|
||||||
maxMessageSize: maxMessageSize
|
maxMessageSize: maxMessageSize,
|
||||||
|
overheadRateLimitOpt: overheadRateLimitOpt
|
||||||
)
|
)
|
||||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||||
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||||
|
|
||||||
|
proc getAgent*(peer: PubSubPeer): string =
|
||||||
|
return
|
||||||
|
when defined(libp2p_agents_metrics):
|
||||||
|
if peer.shortAgent.len > 0:
|
||||||
|
peer.shortAgent
|
||||||
|
else:
|
||||||
|
"unknown"
|
||||||
|
else:
|
||||||
|
"unknown"
|
||||||
|
@ -116,3 +116,32 @@ func shortLog*(m: RPCMsg): auto =
|
|||||||
messages: mapIt(m.messages, it.shortLog),
|
messages: mapIt(m.messages, it.shortLog),
|
||||||
control: m.control.get(ControlMessage()).shortLog
|
control: m.control.get(ControlMessage()).shortLog
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc byteSize*(msg: Message): int =
|
||||||
|
var total = 0
|
||||||
|
total += msg.fromPeer.len
|
||||||
|
total += msg.data.len
|
||||||
|
total += msg.seqno.len
|
||||||
|
total += msg.signature.len
|
||||||
|
total += msg.key.len
|
||||||
|
for topicId in msg.topicIds:
|
||||||
|
total += topicId.len
|
||||||
|
return total
|
||||||
|
|
||||||
|
proc byteSize*(msgs: seq[Message]): int =
|
||||||
|
msgs.mapIt(byteSize(it)).foldl(a + b, 0)
|
||||||
|
|
||||||
|
proc byteSize*(ihave: seq[ControlIHave]): int =
|
||||||
|
var total = 0
|
||||||
|
for item in ihave:
|
||||||
|
total += item.topicId.len
|
||||||
|
for msgId in item.messageIds:
|
||||||
|
total += msgId.len
|
||||||
|
return total
|
||||||
|
|
||||||
|
proc byteSize*(iwant: seq[ControlIWant]): int =
|
||||||
|
var total = 0
|
||||||
|
for item in iwant:
|
||||||
|
for msgId in item.messageIds:
|
||||||
|
total += msgId.len
|
||||||
|
return total
|
||||||
|
@ -10,6 +10,7 @@ import ../../libp2p/crypto/crypto
|
|||||||
import ../../libp2p/stream/bufferstream
|
import ../../libp2p/stream/bufferstream
|
||||||
import ../../libp2p/switch
|
import ../../libp2p/switch
|
||||||
import ../../libp2p/muxers/muxer
|
import ../../libp2p/muxers/muxer
|
||||||
|
import ../../libp2p/protocols/pubsub/rpc/protobuf
|
||||||
|
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
@ -22,7 +23,7 @@ proc getPubSubPeer(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
|||||||
proc getConn(): Future[Connection] =
|
proc getConn(): Future[Connection] =
|
||||||
p.switch.dial(peerId, GossipSubCodec)
|
p.switch.dial(peerId, GossipSubCodec)
|
||||||
|
|
||||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024)
|
let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024, Opt.some(TokenBucket.new(1024, 500.milliseconds)))
|
||||||
debug "created new pubsub peer", peerId
|
debug "created new pubsub peer", peerId
|
||||||
|
|
||||||
p.peers[peerId] = pubSubPeer
|
p.peers[peerId] = pubSubPeer
|
||||||
@ -170,7 +171,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`replenishFanout` Degree Lo":
|
asyncTest "`replenishFanout` Degree Lo":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -197,7 +198,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
asyncTest "`dropFanoutPeers` drop expired fanout topics":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -227,7 +228,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic1 = "foobar1"
|
let topic1 = "foobar1"
|
||||||
@ -264,7 +265,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -325,7 +326,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -365,7 +366,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -406,7 +407,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -447,7 +448,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "Drop messages of topics without subscription":
|
asyncTest "Drop messages of topics without subscription":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
check false
|
check false
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -470,7 +471,7 @@ suite "GossipSub internal":
|
|||||||
let peer = gossipSub.getPubSubPeer(peerId)
|
let peer = gossipSub.getPubSubPeer(peerId)
|
||||||
inc seqno
|
inc seqno
|
||||||
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
|
||||||
await gossipSub.rpcHandler(peer, RPCMsg(messages: @[msg]))
|
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
|
||||||
|
|
||||||
check gossipSub.mcache.msgs.len == 0
|
check gossipSub.mcache.msgs.len == 0
|
||||||
|
|
||||||
@ -481,7 +482,7 @@ suite "GossipSub internal":
|
|||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
gossipSub.parameters.disconnectBadPeers = true
|
gossipSub.parameters.disconnectBadPeers = true
|
||||||
gossipSub.parameters.appSpecificWeight = 1.0
|
gossipSub.parameters.appSpecificWeight = 1.0
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
check false
|
check false
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
@ -525,7 +526,7 @@ suite "GossipSub internal":
|
|||||||
conn.peerId = peerId
|
conn.peerId = peerId
|
||||||
let peer = gossipSub.getPubSubPeer(peerId)
|
let peer = gossipSub.getPubSubPeer(peerId)
|
||||||
|
|
||||||
await gossipSub.rpcHandler(peer, lotOfSubs)
|
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
gossipSub.gossipsub.len == gossipSub.topicsHigh
|
||||||
@ -656,7 +657,7 @@ suite "GossipSub internal":
|
|||||||
asyncTest "handleIHave/Iwant tests":
|
asyncTest "handleIHave/Iwant tests":
|
||||||
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
let gossipSub = TestGossipSub.init(newStandardSwitch())
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
check false
|
check false
|
||||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
|
|
||||||
@ -733,7 +734,7 @@ suite "GossipSub internal":
|
|||||||
|
|
||||||
var iwantCount = 0
|
var iwantCount = 0
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||||
check false
|
check false
|
||||||
|
|
||||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
@ -779,7 +780,7 @@ suite "GossipSub internal":
|
|||||||
data: actualMessageData
|
data: actualMessageData
|
||||||
)]
|
)]
|
||||||
)
|
)
|
||||||
await gossipSub.rpcHandler(firstPeer, rpcMsg)
|
await gossipSub.rpcHandler(firstPeer, encodeRpcMsg(rpcMsg, false))
|
||||||
|
|
||||||
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
|
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
|
||||||
|
|
||||||
@ -792,7 +793,7 @@ suite "GossipSub internal":
|
|||||||
gossipSub.parameters.iwantTimeout = 10.milliseconds
|
gossipSub.parameters.iwantTimeout = 10.milliseconds
|
||||||
await gossipSub.start()
|
await gossipSub.start()
|
||||||
|
|
||||||
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} = discard
|
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} = discard
|
||||||
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
|
|
||||||
let topic = "foobar"
|
let topic = "foobar"
|
||||||
|
@ -73,3 +73,15 @@ suite "Message":
|
|||||||
check:
|
check:
|
||||||
msgIdResult.isErr
|
msgIdResult.isErr
|
||||||
msgIdResult.error == ValidationResult.Reject
|
msgIdResult.error == ValidationResult.Reject
|
||||||
|
|
||||||
|
test "byteSize for Message":
|
||||||
|
var msg = Message(
|
||||||
|
fromPeer: PeerId(data: @[]), # Empty seq[byte]
|
||||||
|
data: @[1'u8, 2, 3], # 3 bytes
|
||||||
|
seqno: @[1'u8], # 1 byte
|
||||||
|
signature: @[], # Empty seq[byte]
|
||||||
|
key: @[1'u8], # 1 byte
|
||||||
|
topicIds: @["abc", "defgh"] # 3 + 5 = 8 bytes
|
||||||
|
)
|
||||||
|
|
||||||
|
check byteSize(msg) == 3 + 1 + 1 + 8 # Total: 13 bytes
|
Loading…
x
Reference in New Issue
Block a user