mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2025-02-05 05:14:32 +00:00
chore(gossipsub): cleanups (#1096)
This commit is contained in:
parent
3ca49a2f40
commit
0911cb20f4
@ -57,10 +57,10 @@ proc addSeen*(f: FloodSub, saltedId: SaltedId): bool =
|
|||||||
proc firstSeen*(f: FloodSub, saltedId: SaltedId): Moment =
|
proc firstSeen*(f: FloodSub, saltedId: SaltedId): Moment =
|
||||||
f.seen.addedAt(saltedId)
|
f.seen.addedAt(saltedId)
|
||||||
|
|
||||||
proc handleSubscribe*(f: FloodSub,
|
proc handleSubscribe(f: FloodSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
topic: string,
|
topic: string,
|
||||||
subscribe: bool) =
|
subscribe: bool) =
|
||||||
logScope:
|
logScope:
|
||||||
peer
|
peer
|
||||||
topic
|
topic
|
||||||
@ -106,10 +106,9 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
|||||||
method rpcHandler*(f: FloodSub,
|
method rpcHandler*(f: FloodSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
data: seq[byte]) {.async.} =
|
data: seq[byte]) {.async.} =
|
||||||
|
|
||||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||||
debug "failed to decode msg from peer", peer, err = error
|
debug "failed to decode msg from peer", peer, err = error
|
||||||
raise newException(CatchableError, "")
|
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||||
|
|
||||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||||
# trigger hooks
|
# trigger hooks
|
||||||
|
@ -266,10 +266,10 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
|||||||
|
|
||||||
procCall FloodSub(g).unsubscribePeer(peer)
|
procCall FloodSub(g).unsubscribePeer(peer)
|
||||||
|
|
||||||
proc handleSubscribe*(g: GossipSub,
|
proc handleSubscribe(g: GossipSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
topic: string,
|
topic: string,
|
||||||
subscribe: bool) =
|
subscribe: bool) =
|
||||||
logScope:
|
logScope:
|
||||||
peer
|
peer
|
||||||
topic
|
topic
|
||||||
@ -395,9 +395,7 @@ proc validateAndRelay(g: GossipSub,
|
|||||||
|
|
||||||
g.floodsub.withValue(topic, peers): toSendPeers.incl(peers[])
|
g.floodsub.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||||
g.mesh.withValue(topic, peers): toSendPeers.incl(peers[])
|
g.mesh.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||||
|
g.subscribedDirectPeers.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||||
# add direct peers
|
|
||||||
toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(topic))
|
|
||||||
|
|
||||||
# Don't send it to source peer, or peers that
|
# Don't send it to source peer, or peers that
|
||||||
# sent it during validation
|
# sent it during validation
|
||||||
@ -468,6 +466,11 @@ method rpcHandler*(g: GossipSub,
|
|||||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||||
debug "failed to decode msg from peer", peer, err = error
|
debug "failed to decode msg from peer", peer, err = error
|
||||||
await rateLimit(g, peer, msgSize)
|
await rateLimit(g, peer, msgSize)
|
||||||
|
# Raising in the handler closes the gossipsub connection (but doesn't
|
||||||
|
# disconnect the peer!)
|
||||||
|
# TODO evaluate behaviour penalty values
|
||||||
|
peer.behaviourPenalty += 0.1
|
||||||
|
|
||||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||||
|
|
||||||
when defined(libp2p_expensive_metrics):
|
when defined(libp2p_expensive_metrics):
|
||||||
@ -477,12 +480,13 @@ method rpcHandler*(g: GossipSub,
|
|||||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||||
await rateLimit(g, peer, g.messageOverhead(rpcMsg, msgSize))
|
await rateLimit(g, peer, g.messageOverhead(rpcMsg, msgSize))
|
||||||
|
|
||||||
# trigger hooks
|
# trigger hooks - these may modify the message
|
||||||
peer.recvObservers(rpcMsg)
|
peer.recvObservers(rpcMsg)
|
||||||
|
|
||||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||||
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
|
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
|
||||||
peer.pingBudget.dec
|
peer.pingBudget.dec
|
||||||
|
|
||||||
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||||
template sub: untyped = rpcMsg.subscriptions[i]
|
template sub: untyped = rpcMsg.subscriptions[i]
|
||||||
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||||
@ -502,7 +506,7 @@ method rpcHandler*(g: GossipSub,
|
|||||||
if msgIdResult.isErr:
|
if msgIdResult.isErr:
|
||||||
debug "Dropping message due to failed message id generation",
|
debug "Dropping message due to failed message id generation",
|
||||||
error = msgIdResult.error
|
error = msgIdResult.error
|
||||||
# TODO: descore peers due to error during message validation (malicious?)
|
await g.punishInvalidMessage(peer, msg)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -510,8 +514,6 @@ method rpcHandler*(g: GossipSub,
|
|||||||
msgIdSalted = g.salt(msgId)
|
msgIdSalted = g.salt(msgId)
|
||||||
topic = msg.topic
|
topic = msg.topic
|
||||||
|
|
||||||
# addSeen adds salt to msgId to avoid
|
|
||||||
# remote attacking the hash function
|
|
||||||
if g.addSeen(msgIdSalted):
|
if g.addSeen(msgIdSalted):
|
||||||
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
|
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
|
||||||
|
|
||||||
@ -599,25 +601,24 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
|||||||
|
|
||||||
g.mesh.del(topic)
|
g.mesh.del(topic)
|
||||||
|
|
||||||
|
|
||||||
# Send unsubscribe (in reverse order to sub/graft)
|
# Send unsubscribe (in reverse order to sub/graft)
|
||||||
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
||||||
|
|
||||||
method publish*(g: GossipSub,
|
method publish*(g: GossipSub,
|
||||||
topic: string,
|
topic: string,
|
||||||
data: seq[byte]): Future[int] {.async.} =
|
data: seq[byte]): Future[int] {.async.} =
|
||||||
# base returns always 0
|
|
||||||
discard await procCall PubSub(g).publish(topic, data)
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topic
|
topic
|
||||||
|
|
||||||
trace "Publishing message on topic", data = data.shortLog
|
|
||||||
|
|
||||||
if topic.len <= 0: # data could be 0/empty
|
if topic.len <= 0: # data could be 0/empty
|
||||||
debug "Empty topic, skipping publish"
|
debug "Empty topic, skipping publish"
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
# base returns always 0
|
||||||
|
discard await procCall PubSub(g).publish(topic, data)
|
||||||
|
|
||||||
|
trace "Publishing message on topic", data = data.shortLog
|
||||||
|
|
||||||
var peers: HashSet[PubSubPeer]
|
var peers: HashSet[PubSubPeer]
|
||||||
|
|
||||||
# add always direct peers
|
# add always direct peers
|
||||||
@ -630,38 +631,39 @@ method publish*(g: GossipSub,
|
|||||||
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
||||||
# but a peer's own messages will always be published to all known peers in the topic, limited
|
# but a peer's own messages will always be published to all known peers in the topic, limited
|
||||||
# to the amount of peers we can send it to in one heartbeat
|
# to the amount of peers we can send it to in one heartbeat
|
||||||
var maxPeersToFlodOpt: Opt[int64]
|
|
||||||
if g.parameters.bandwidthEstimatebps > 0:
|
let maxPeersToFlood =
|
||||||
let
|
if g.parameters.bandwidthEstimatebps > 0:
|
||||||
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
|
let
|
||||||
msToTransmit = max(data.len div bandwidth, 1)
|
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
|
||||||
maxPeersToFlodOpt = Opt.some(max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow))
|
msToTransmit = max(data.len div bandwidth, 1)
|
||||||
|
max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow)
|
||||||
|
else:
|
||||||
|
int.high() # unlimited
|
||||||
|
|
||||||
for peer in g.gossipsub.getOrDefault(topic):
|
for peer in g.gossipsub.getOrDefault(topic):
|
||||||
maxPeersToFlodOpt.withValue(maxPeersToFlod):
|
if peers.len >= maxPeersToFlood: break
|
||||||
if peers.len >= maxPeersToFlod: break
|
|
||||||
if peer.score >= g.parameters.publishThreshold:
|
if peer.score >= g.parameters.publishThreshold:
|
||||||
trace "publish: including flood/high score peer", peer
|
trace "publish: including flood/high score peer", peer
|
||||||
peers.incl(peer)
|
peers.incl(peer)
|
||||||
|
|
||||||
if peers.len < g.parameters.dLow:
|
elif peers.len < g.parameters.dLow:
|
||||||
# not subscribed, or bad mesh, send to fanout peers
|
# not subscribed or bad mesh, send to fanout peers
|
||||||
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
# when flood-publishing, fanout won't help since all potential peers have
|
||||||
if fanoutPeers.len < g.parameters.dLow:
|
# already been added
|
||||||
g.replenishFanout(topic)
|
|
||||||
fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
|
||||||
|
|
||||||
|
g.replenishFanout(topic) # Make sure fanout is populated
|
||||||
|
|
||||||
|
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||||
g.rng.shuffle(fanoutPeers)
|
g.rng.shuffle(fanoutPeers)
|
||||||
|
|
||||||
for fanPeer in fanoutPeers:
|
for fanPeer in fanoutPeers:
|
||||||
peers.incl(fanPeer)
|
peers.incl(fanPeer)
|
||||||
if peers.len > g.parameters.d: break
|
if peers.len > g.parameters.d: break
|
||||||
|
|
||||||
# even if we couldn't publish,
|
# Attempting to publish counts as fanout send (even if the message
|
||||||
# we still attempted to publish
|
# ultimately is not sent)
|
||||||
# on the topic, so it makes sense
|
|
||||||
# to update the last topic publish
|
|
||||||
# time
|
|
||||||
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
||||||
|
|
||||||
if peers.len == 0:
|
if peers.len == 0:
|
||||||
@ -690,7 +692,9 @@ method publish*(g: GossipSub,
|
|||||||
trace "Created new message", msg = shortLog(msg), peers = peers.len
|
trace "Created new message", msg = shortLog(msg), peers = peers.len
|
||||||
|
|
||||||
if g.addSeen(g.salt(msgId)):
|
if g.addSeen(g.salt(msgId)):
|
||||||
# custom msgid providers might cause this
|
# If the message was received or published recently, don't re-publish it -
|
||||||
|
# this might happen when not using sequence id:s and / or with a custom
|
||||||
|
# message id provider
|
||||||
trace "Dropping already-seen message"
|
trace "Dropping already-seen message"
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
@ -204,7 +204,6 @@ proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRe
|
|||||||
|
|
||||||
routingRecords
|
routingRecords
|
||||||
|
|
||||||
|
|
||||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
||||||
for prune in prunes:
|
for prune in prunes:
|
||||||
let topic = prune.topicID
|
let topic = prune.topicID
|
||||||
@ -611,10 +610,10 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
|||||||
x notin gossipPeers and
|
x notin gossipPeers and
|
||||||
x.score >= g.parameters.gossipThreshold
|
x.score >= g.parameters.gossipThreshold
|
||||||
|
|
||||||
var target = g.parameters.dLazy
|
# https://github.com/libp2p/specs/blob/98c5aa9421703fc31b0833ad8860a55db15be063/pubsub/gossipsub/gossipsub-v1.1.md#adaptive-gossip-dissemination
|
||||||
let factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
let
|
||||||
if factor > target:
|
factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
||||||
target = min(factor, allPeers.len)
|
target = max(g.parameters.dLazy, factor)
|
||||||
|
|
||||||
if target < allPeers.len:
|
if target < allPeers.len:
|
||||||
g.rng.shuffle(allPeers)
|
g.rng.shuffle(allPeers)
|
||||||
|
@ -172,8 +172,6 @@ type
|
|||||||
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
|
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
|
||||||
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
|
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
|
||||||
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
||||||
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
|
||||||
control*: Table[string, ControlMessage] # pending control messages
|
|
||||||
mcache*: MCache # messages cache
|
mcache*: MCache # messages cache
|
||||||
validationSeen*: ValidationSeenTable # peers who sent us message in validation
|
validationSeen*: ValidationSeenTable # peers who sent us message in validation
|
||||||
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
||||||
|
Loading…
x
Reference in New Issue
Block a user