2022-07-01 18:19:57 +00:00
|
|
|
# Nim-LibP2P
|
2023-01-20 14:47:40 +00:00
|
|
|
# Copyright (c) 2023 Status Research & Development GmbH
|
2022-07-01 18:19:57 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2023-06-07 11:12:49 +00:00
|
|
|
{.push raises: [].}
|
2021-03-09 12:22:52 +00:00
|
|
|
|
2023-04-03 08:56:20 +00:00
|
|
|
import std/[tables, sequtils, sets, algorithm, deques]
|
2021-02-06 00:13:04 +00:00
|
|
|
import chronos, chronicles, metrics
|
|
|
|
import "."/[types, scoring]
|
2023-05-18 08:24:17 +00:00
|
|
|
import ".."/[pubsubpeer, peertable, mcache, floodsub, pubsub]
|
2021-02-06 00:13:04 +00:00
|
|
|
import "../rpc"/[messages]
|
2024-06-11 15:18:06 +00:00
|
|
|
import
|
|
|
|
"../../.."/[
|
|
|
|
peerid,
|
|
|
|
multiaddress,
|
|
|
|
utility,
|
|
|
|
switch,
|
|
|
|
routing_record,
|
|
|
|
signed_envelope,
|
|
|
|
utils/heartbeat,
|
|
|
|
]
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2022-11-04 10:19:27 +00:00
|
|
|
logScope:
|
|
|
|
topics = "libp2p gossipsub"
|
|
|
|
|
2021-02-06 00:13:04 +00:00
|
|
|
declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache")
|
2024-06-11 15:18:06 +00:00
|
|
|
declareGauge(
|
|
|
|
libp2p_gossipsub_peers_per_topic_mesh,
|
|
|
|
"gossipsub peers per topic in mesh",
|
|
|
|
labels = ["topic"],
|
|
|
|
)
|
|
|
|
declareGauge(
|
|
|
|
libp2p_gossipsub_peers_per_topic_fanout,
|
|
|
|
"gossipsub peers per topic in fanout",
|
|
|
|
labels = ["topic"],
|
|
|
|
)
|
|
|
|
declareGauge(
|
|
|
|
libp2p_gossipsub_peers_per_topic_gossipsub,
|
|
|
|
"gossipsub peers per topic in gossipsub",
|
|
|
|
labels = ["topic"],
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
declareGauge(libp2p_gossipsub_under_dout_topics, "number of topics below dout")
|
2021-03-03 21:11:21 +00:00
|
|
|
declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no peers")
|
2024-06-11 15:18:06 +00:00
|
|
|
declareGauge(
|
|
|
|
libp2p_gossipsub_low_peers_topics,
|
|
|
|
"number of topics in mesh with at least one but below dlow peers",
|
|
|
|
)
|
|
|
|
declareGauge(
|
|
|
|
libp2p_gossipsub_healthy_peers_topics,
|
|
|
|
"number of topics in mesh with at least dlow peers (but below dhigh)",
|
|
|
|
)
|
|
|
|
declareCounter(
|
|
|
|
libp2p_gossipsub_above_dhigh_condition,
|
|
|
|
"number of above dhigh pruning branches ran",
|
|
|
|
labels = ["topic"],
|
|
|
|
)
|
2023-04-03 08:56:20 +00:00
|
|
|
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) =
|
2024-06-11 15:18:06 +00:00
|
|
|
g.withPeerStats(p.peerId) do(stats: var PeerStats):
|
2021-02-06 00:13:04 +00:00
|
|
|
var info = stats.topicInfos.getOrDefault(topic)
|
|
|
|
info.graftTime = Moment.now()
|
|
|
|
info.meshTime = 0.seconds
|
|
|
|
info.inMesh = true
|
|
|
|
info.meshMessageDeliveriesActive = false
|
|
|
|
|
|
|
|
stats.topicInfos[topic] = info
|
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
trace "grafted", peer = p, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
proc pruned*(
|
|
|
|
g: GossipSub,
|
|
|
|
p: PubSubPeer,
|
|
|
|
topic: string,
|
|
|
|
setBackoff: bool = true,
|
|
|
|
backoff = none(Duration),
|
|
|
|
) =
|
2021-04-22 09:51:22 +00:00
|
|
|
if setBackoff:
|
2021-12-02 14:47:40 +00:00
|
|
|
let
|
2023-06-28 14:44:58 +00:00
|
|
|
backoffDuration = backoff.get(g.parameters.pruneBackoff)
|
2021-12-02 14:47:40 +00:00
|
|
|
backoffMoment = Moment.fromNow(backoffDuration)
|
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[p.peerId] = backoffMoment
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
g.peerStats.withValue(p.peerId, stats):
|
2021-03-09 12:22:52 +00:00
|
|
|
stats.topicInfos.withValue(topic, info):
|
|
|
|
g.topicParams.withValue(topic, topicParams):
|
2021-02-06 00:13:04 +00:00
|
|
|
# penalize a peer that delivered no message
|
2021-03-09 12:22:52 +00:00
|
|
|
let threshold = topicParams[].meshMessageDeliveriesThreshold
|
2024-06-11 15:18:06 +00:00
|
|
|
if info[].inMesh and info[].meshMessageDeliveriesActive and
|
2021-03-09 12:22:52 +00:00
|
|
|
info[].meshMessageDeliveries < threshold:
|
2021-02-06 00:13:04 +00:00
|
|
|
let deficit = threshold - info.meshMessageDeliveries
|
2021-03-09 12:22:52 +00:00
|
|
|
info[].meshFailurePenalty += deficit * deficit
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
info.inMesh = false
|
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
trace "pruned", peer = p, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc handleBackingOff*(t: var BackoffTable, topic: string) =
|
2021-02-06 00:13:04 +00:00
|
|
|
let now = Moment.now()
|
|
|
|
var expired = toSeq(t.getOrDefault(topic).pairs())
|
2024-06-11 15:18:06 +00:00
|
|
|
expired.keepIf do(pair: tuple[peer: PeerId, expire: Moment]) -> bool:
|
2021-02-06 00:13:04 +00:00
|
|
|
now >= pair.expire
|
|
|
|
for (peer, _) in expired:
|
2021-03-09 12:22:52 +00:00
|
|
|
t.withValue(topic, v):
|
|
|
|
v[].del(peer)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
|
2022-05-10 08:39:43 +00:00
|
|
|
if not g.parameters.enablePX:
|
|
|
|
return @[]
|
2021-02-06 00:13:04 +00:00
|
|
|
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
|
2024-06-11 15:18:06 +00:00
|
|
|
peers.keepIf do(x: PubSubPeer) -> bool:
|
|
|
|
x.score >= 0.0
|
2021-02-06 00:13:04 +00:00
|
|
|
# by spec, larger then Dhi, but let's put some hard caps
|
|
|
|
peers.setLen(min(peers.len, g.parameters.dHigh * 2))
|
2022-05-25 10:12:57 +00:00
|
|
|
let sprBook = g.switch.peerStore[SPRBook]
|
2024-06-11 15:18:06 +00:00
|
|
|
peers.map do(x: PubSubPeer) -> PeerInfoMsg:
|
2022-03-14 08:39:30 +00:00
|
|
|
PeerInfoMsg(
|
|
|
|
peerId: x.peerId,
|
|
|
|
signedPeerRecord:
|
|
|
|
if x.peerId in sprBook:
|
2022-05-25 10:12:57 +00:00
|
|
|
sprBook[x.peerId].encode().get(default(seq[byte]))
|
2022-03-14 08:39:30 +00:00
|
|
|
else:
|
|
|
|
default(seq[byte])
|
2024-06-11 15:18:06 +00:00
|
|
|
,
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
proc handleGraft*(
|
|
|
|
g: GossipSub, peer: PubSubPeer, grafts: seq[ControlGraft]
|
|
|
|
): seq[ControlPrune] =
|
2021-04-22 09:51:22 +00:00
|
|
|
var prunes: seq[ControlPrune]
|
2021-02-06 00:13:04 +00:00
|
|
|
for graft in grafts:
|
2024-03-25 11:06:34 +00:00
|
|
|
let topic = graft.topicID
|
|
|
|
trace "peer grafted topicID", peer, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2023-09-15 15:22:02 +00:00
|
|
|
# It is an error to GRAFT on a direct peer
|
2021-02-06 00:13:04 +00:00
|
|
|
if peer.peerId in g.parameters.directPeers:
|
|
|
|
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
|
2023-09-15 15:22:02 +00:00
|
|
|
# we are trusting direct peer not to abuse this
|
|
|
|
warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
|
2021-03-09 12:22:52 +00:00
|
|
|
peer, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
# and such an attempt should be logged and rejected with a PRUNE
|
2024-06-11 15:18:06 +00:00
|
|
|
prunes.add(
|
|
|
|
ControlPrune(
|
|
|
|
topicID: topic,
|
|
|
|
peers: @[],
|
|
|
|
# omitting heavy computation here as the remote did something illegal
|
|
|
|
backoff: g.parameters.pruneBackoff.seconds.uint64,
|
|
|
|
)
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
|
|
|
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
peer.behaviourPenalty += 0.1
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
2023-03-21 16:13:25 +00:00
|
|
|
if g.mesh.hasPeer(topic, peer):
|
|
|
|
trace "peer already in mesh", peer, topic
|
|
|
|
continue
|
|
|
|
|
2021-09-01 06:41:11 +00:00
|
|
|
# Check backingOff
|
|
|
|
# Ignore BackoffSlackTime here, since this only for outbound activity
|
|
|
|
# and subtract a second time to avoid race conditions
|
|
|
|
# (peers may wait to graft us as the exact instant they're allowed to)
|
2024-06-11 15:18:06 +00:00
|
|
|
if g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId) -
|
|
|
|
(BackoffSlackTime * 2).seconds > Moment.now():
|
2021-09-01 06:41:11 +00:00
|
|
|
debug "a backingOff peer attempted to graft us", peer, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
# and such an attempt should be logged and rejected with a PRUNE
|
2024-06-11 15:18:06 +00:00
|
|
|
prunes.add(
|
|
|
|
ControlPrune(
|
|
|
|
topicID: topic,
|
|
|
|
peers: @[],
|
|
|
|
# omitting heavy computation here as the remote did something illegal
|
|
|
|
backoff: g.parameters.pruneBackoff.seconds.uint64,
|
|
|
|
)
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
|
|
|
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
peer.behaviourPenalty += 0.1
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
# not in the spec exactly, but let's avoid way too low score peers
|
|
|
|
# other clients do it too also was an audit recommendation
|
|
|
|
if peer.score < g.parameters.publishThreshold:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# If they send us a graft before they send us a subscribe, what should
|
|
|
|
# we do? For now, we add them to mesh but don't add them to gossipsub.
|
|
|
|
if topic in g.topics:
|
2023-04-26 11:44:45 +00:00
|
|
|
if g.mesh.peers(topic) < g.parameters.dHigh or
|
|
|
|
(peer.outbound and g.mesh.outboundPeers(topic) < g.parameters.dOut):
|
2021-02-06 00:13:04 +00:00
|
|
|
# In the spec, there's no mention of DHi here, but implicitly, a
|
|
|
|
# peer will be removed from the mesh on next rebalance, so we don't want
|
|
|
|
# this peer to push someone else out
|
|
|
|
if g.mesh.addPeer(topic, peer):
|
|
|
|
g.grafted(peer, topic)
|
|
|
|
g.fanout.removePeer(topic, peer)
|
|
|
|
else:
|
2021-03-09 12:22:52 +00:00
|
|
|
trace "peer already in mesh", peer, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
else:
|
2021-03-09 12:22:52 +00:00
|
|
|
trace "pruning grafting peer, mesh full",
|
|
|
|
peer, topic, score = peer.score, mesh = g.mesh.peers(topic)
|
2024-06-11 15:18:06 +00:00
|
|
|
prunes.add(
|
|
|
|
ControlPrune(
|
|
|
|
topicID: topic,
|
|
|
|
peers: g.peerExchangeList(topic),
|
|
|
|
backoff: g.parameters.pruneBackoff.seconds.uint64,
|
|
|
|
)
|
|
|
|
)
|
2023-03-21 16:13:25 +00:00
|
|
|
|
|
|
|
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
|
2024-06-11 15:18:06 +00:00
|
|
|
|
|
|
|
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] =
|
|
|
|
backoff
|
2021-02-06 00:13:04 +00:00
|
|
|
else:
|
2021-03-09 12:22:52 +00:00
|
|
|
trace "peer grafting topic we're not interested in", peer, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
# gossip 1.1, we do not send a control message prune anymore
|
|
|
|
|
2021-04-22 09:51:22 +00:00
|
|
|
return prunes
|
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
proc getPeers(
|
|
|
|
prune: ControlPrune, peer: PubSubPeer
|
|
|
|
): seq[(PeerId, Option[PeerRecord])] =
|
2022-03-14 08:39:30 +00:00
|
|
|
var routingRecords: seq[(PeerId, Option[PeerRecord])]
|
|
|
|
for record in prune.peers:
|
2023-06-28 14:44:58 +00:00
|
|
|
var peerRecord = none(PeerRecord)
|
|
|
|
if record.signedPeerRecord.len > 0:
|
|
|
|
SignedPeerRecord.decode(record.signedPeerRecord).toOpt().withValue(spr):
|
|
|
|
if record.peerId != spr.data.peerId:
|
|
|
|
trace "peer sent envelope with wrong public key", peer
|
2022-03-14 08:39:30 +00:00
|
|
|
else:
|
2023-06-28 14:44:58 +00:00
|
|
|
peerRecord = some(spr.data)
|
|
|
|
else:
|
|
|
|
trace "peer sent invalid SPR", peer
|
2022-03-14 08:39:30 +00:00
|
|
|
|
|
|
|
routingRecords.add((record.peerId, peerRecord))
|
|
|
|
|
|
|
|
routingRecords
|
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
2021-02-06 00:13:04 +00:00
|
|
|
for prune in prunes:
|
2024-03-25 11:06:34 +00:00
|
|
|
let topic = prune.topicID
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-03-25 11:06:34 +00:00
|
|
|
trace "peer pruned topicID", peer, topic
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
# add peer backoff
|
|
|
|
if prune.backoff > 0:
|
|
|
|
let
|
2021-09-01 06:41:11 +00:00
|
|
|
# avoid overflows and clamp to reasonable value
|
2024-06-11 15:18:06 +00:00
|
|
|
backoffSeconds =
|
|
|
|
clamp(prune.backoff + BackoffSlackTime, 0'u64, 1.days.seconds.uint64)
|
2021-03-02 23:23:40 +00:00
|
|
|
backoff = Moment.fromNow(backoffSeconds.int64.seconds)
|
2021-02-06 00:13:04 +00:00
|
|
|
current = g.backingOff.getOrDefault(topic).getOrDefault(peer.peerId)
|
|
|
|
if backoff > current:
|
2024-06-11 15:18:06 +00:00
|
|
|
g.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] =
|
|
|
|
backoff
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
trace "pruning rpc received peer", peer, score = peer.score
|
2021-04-22 09:51:22 +00:00
|
|
|
g.pruned(peer, topic, setBackoff = false)
|
2021-02-06 00:13:04 +00:00
|
|
|
g.mesh.removePeer(topic, peer)
|
|
|
|
|
2022-03-14 08:39:30 +00:00
|
|
|
if peer.score > g.parameters.gossipThreshold and prune.peers.len > 0 and
|
2024-06-11 15:18:06 +00:00
|
|
|
g.routingRecordsHandler.len > 0:
|
2022-03-14 08:39:30 +00:00
|
|
|
let routingRecords = prune.getPeers(peer)
|
|
|
|
|
|
|
|
for handler in g.routingRecordsHandler:
|
|
|
|
handler(peer.peerId, topic, routingRecords)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
proc handleIHave*(
|
|
|
|
g: GossipSub, peer: PubSubPeer, ihaves: seq[ControlIHave]
|
|
|
|
): ControlIWant =
|
2021-04-22 09:51:22 +00:00
|
|
|
var res: ControlIWant
|
2021-02-06 00:13:04 +00:00
|
|
|
if peer.score < g.parameters.gossipThreshold:
|
|
|
|
trace "ihave: ignoring low score peer", peer, score = peer.score
|
|
|
|
elif peer.iHaveBudget <= 0:
|
|
|
|
trace "ihave: ignoring out of budget peer", peer, score = peer.score
|
|
|
|
else:
|
2023-07-11 10:17:50 +00:00
|
|
|
for ihave in ihaves:
|
2024-06-11 15:18:06 +00:00
|
|
|
trace "peer sent ihave", peer, topicID = ihave.topicID, msgs = ihave.messageIDs
|
2024-03-25 11:06:34 +00:00
|
|
|
if ihave.topicID in g.topics:
|
|
|
|
for msgId in ihave.messageIDs:
|
2024-05-01 16:38:24 +00:00
|
|
|
if not g.hasSeen(g.salt(msgId)):
|
2023-07-11 10:17:50 +00:00
|
|
|
if peer.iHaveBudget <= 0:
|
|
|
|
break
|
2024-03-25 11:06:34 +00:00
|
|
|
elif msgId notin res.messageIDs:
|
|
|
|
res.messageIDs.add(msgId)
|
2021-02-06 00:13:04 +00:00
|
|
|
dec peer.iHaveBudget
|
2024-06-11 15:18:06 +00:00
|
|
|
trace "requested message via ihave", messageID = msgId
|
2021-04-22 09:51:22 +00:00
|
|
|
# shuffling res.messageIDs before sending it out to increase the likelihood
|
2021-02-06 00:13:04 +00:00
|
|
|
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
2024-03-25 11:06:34 +00:00
|
|
|
g.rng.shuffle(res.messageIDs)
|
2021-04-22 09:51:22 +00:00
|
|
|
return res
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
|
2023-07-28 08:58:05 +00:00
|
|
|
for dontWant in iDontWants:
|
2024-03-25 11:06:34 +00:00
|
|
|
for messageId in dontWant.messageIDs:
|
2024-06-12 13:46:47 +00:00
|
|
|
if peer.iDontWants[^1].len > 1000:
|
2024-06-11 15:18:06 +00:00
|
|
|
break
|
2024-06-12 13:46:47 +00:00
|
|
|
peer.iDontWants[^1].incl(g.salt(messageId))
|
2023-07-28 08:58:05 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
proc handleIWant*(
|
|
|
|
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
|
|
|
|
): seq[Message] =
|
2023-04-03 08:56:20 +00:00
|
|
|
var
|
|
|
|
messages: seq[Message]
|
|
|
|
invalidRequests = 0
|
2021-02-06 00:13:04 +00:00
|
|
|
if peer.score < g.parameters.gossipThreshold:
|
|
|
|
trace "iwant: ignoring low score peer", peer, score = peer.score
|
|
|
|
else:
|
2023-04-03 08:56:20 +00:00
|
|
|
for iwant in iwants:
|
2024-03-25 11:06:34 +00:00
|
|
|
for mid in iwant.messageIDs:
|
2021-02-06 00:13:04 +00:00
|
|
|
trace "peer sent iwant", peer, messageID = mid
|
2023-04-03 08:56:20 +00:00
|
|
|
# canAskIWant will only return true once for a specific message
|
|
|
|
if not peer.canAskIWant(mid):
|
2024-06-11 15:18:06 +00:00
|
|
|
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["notsent"])
|
2023-04-03 08:56:20 +00:00
|
|
|
|
|
|
|
invalidRequests.inc()
|
|
|
|
if invalidRequests > 20:
|
2024-06-11 15:18:06 +00:00
|
|
|
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["skipped"])
|
2023-04-03 08:56:20 +00:00
|
|
|
return messages
|
|
|
|
continue
|
2023-06-28 14:44:58 +00:00
|
|
|
let msg = g.mcache.get(mid).valueOr:
|
2024-06-11 15:18:06 +00:00
|
|
|
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["unknown"])
|
2023-06-28 14:44:58 +00:00
|
|
|
continue
|
2024-06-11 15:18:06 +00:00
|
|
|
libp2p_gossipsub_received_iwants.inc(1, labelValues = ["correct"])
|
2023-06-28 14:44:58 +00:00
|
|
|
messages.add(msg)
|
2021-04-22 09:51:22 +00:00
|
|
|
return messages
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc commitMetrics(metrics: var MeshMetrics) =
|
2021-03-03 21:11:21 +00:00
|
|
|
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
|
2021-02-06 00:13:04 +00:00
|
|
|
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
|
|
|
|
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
|
2021-03-03 21:11:21 +00:00
|
|
|
libp2p_gossipsub_healthy_peers_topics.set(metrics.healthyPeersTopics)
|
2024-06-11 15:18:06 +00:00
|
|
|
libp2p_gossipsub_peers_per_topic_gossipsub.set(
|
|
|
|
metrics.otherPeersPerTopicGossipsub, labelValues = ["other"]
|
|
|
|
)
|
|
|
|
libp2p_gossipsub_peers_per_topic_fanout.set(
|
|
|
|
metrics.otherPeersPerTopicFanout, labelValues = ["other"]
|
|
|
|
)
|
|
|
|
libp2p_gossipsub_peers_per_topic_mesh.set(
|
|
|
|
metrics.otherPeersPerTopicMesh, labelValues = ["other"]
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) =
|
2021-02-06 00:13:04 +00:00
|
|
|
logScope:
|
|
|
|
topic
|
|
|
|
mesh = g.mesh.peers(topic)
|
|
|
|
gossipsub = g.gossipsub.peers(topic)
|
|
|
|
|
|
|
|
trace "rebalancing mesh"
|
|
|
|
|
|
|
|
# create a mesh topic that we're subscribing to
|
|
|
|
|
|
|
|
var
|
|
|
|
prunes, grafts: seq[PubSubPeer]
|
|
|
|
npeers = g.mesh.peers(topic)
|
2023-04-26 11:44:45 +00:00
|
|
|
nOutPeers = g.mesh.outboundPeers(topic)
|
2022-05-25 10:59:33 +00:00
|
|
|
defaultMesh: HashSet[PubSubPeer]
|
|
|
|
backingOff = g.backingOff.getOrDefault(topic)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2023-04-26 11:44:45 +00:00
|
|
|
if npeers < g.parameters.dLow:
|
2021-02-06 00:13:04 +00:00
|
|
|
trace "replenishing mesh", peers = npeers
|
|
|
|
# replenish the mesh if we're below Dlo
|
2022-05-25 10:59:33 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
candidates: seq[PubSubPeer]
|
|
|
|
currentMesh = addr defaultMesh
|
2024-06-11 15:18:06 +00:00
|
|
|
g.mesh.withValue(topic, v):
|
|
|
|
currentMesh = v
|
2022-07-27 11:47:50 +00:00
|
|
|
g.gossipsub.withValue(topic, peerList):
|
2022-05-25 10:59:33 +00:00
|
|
|
for it in peerList[]:
|
2024-06-11 15:18:06 +00:00
|
|
|
if it.connected and
|
2022-05-25 10:59:33 +00:00
|
|
|
# avoid negative score peers
|
2024-06-11 15:18:06 +00:00
|
|
|
it.score >= 0.0 and it notin currentMesh[] and
|
2023-09-15 15:22:02 +00:00
|
|
|
# don't pick direct peers
|
2022-05-25 10:59:33 +00:00
|
|
|
it.peerId notin g.parameters.directPeers and
|
|
|
|
# and avoid peers we are backing off
|
|
|
|
it.peerId notin backingOff:
|
|
|
|
candidates.add(it)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
# shuffle anyway, score might be not used
|
2021-10-25 08:26:32 +00:00
|
|
|
g.rng.shuffle(candidates)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
# sort peers by score, high score first since we graft
|
|
|
|
candidates.sort(byScore, SortOrder.Descending)
|
|
|
|
|
|
|
|
# Graft peers so we reach a count of D
|
|
|
|
candidates.setLen(min(candidates.len, g.parameters.d - npeers))
|
|
|
|
|
|
|
|
trace "grafting", grafting = candidates.len
|
|
|
|
|
2021-03-03 08:41:21 +00:00
|
|
|
if candidates.len > 0:
|
2021-02-06 00:13:04 +00:00
|
|
|
for peer in candidates:
|
|
|
|
if g.mesh.addPeer(topic, peer):
|
|
|
|
g.grafted(peer, topic)
|
|
|
|
g.fanout.removePeer(topic, peer)
|
|
|
|
grafts &= peer
|
2023-04-26 11:44:45 +00:00
|
|
|
elif nOutPeers < g.parameters.dOut:
|
2022-05-25 10:59:33 +00:00
|
|
|
trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic)
|
|
|
|
|
|
|
|
var
|
|
|
|
candidates: seq[PubSubPeer]
|
|
|
|
currentMesh = addr defaultMesh
|
2024-06-11 15:18:06 +00:00
|
|
|
g.mesh.withValue(topic, v):
|
|
|
|
currentMesh = v
|
2022-07-27 11:47:50 +00:00
|
|
|
g.gossipsub.withValue(topic, peerList):
|
2022-05-25 10:59:33 +00:00
|
|
|
for it in peerList[]:
|
2024-06-11 15:18:06 +00:00
|
|
|
if it.connected and
|
|
|
|
# get only outbound ones
|
|
|
|
it.outbound and it notin currentMesh[] and
|
2022-05-25 10:59:33 +00:00
|
|
|
# avoid negative score peers
|
|
|
|
it.score >= 0.0 and
|
2023-09-15 15:22:02 +00:00
|
|
|
# don't pick direct peers
|
2022-05-25 10:59:33 +00:00
|
|
|
it.peerId notin g.parameters.directPeers and
|
|
|
|
# and avoid peers we are backing off
|
|
|
|
it.peerId notin backingOff:
|
|
|
|
candidates.add(it)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2022-05-25 10:59:33 +00:00
|
|
|
# shuffle anyway, score might be not used
|
|
|
|
g.rng.shuffle(candidates)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2022-05-25 10:59:33 +00:00
|
|
|
# sort peers by score, high score first, we are grafting
|
|
|
|
candidates.sort(byScore, SortOrder.Descending)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2023-04-26 11:44:45 +00:00
|
|
|
# Graft outgoing peers so we reach a count of dOut
|
|
|
|
candidates.setLen(min(candidates.len, g.parameters.dOut - nOutPeers))
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2022-05-25 10:59:33 +00:00
|
|
|
trace "grafting outbound peers", topic, peers = candidates.len
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2022-05-25 10:59:33 +00:00
|
|
|
for peer in candidates:
|
|
|
|
if g.mesh.addPeer(topic, peer):
|
|
|
|
g.grafted(peer, topic)
|
|
|
|
g.fanout.removePeer(topic, peer)
|
|
|
|
grafts &= peer
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
# get again npeers after possible grafts
|
|
|
|
npeers = g.mesh.peers(topic)
|
|
|
|
if npeers > g.parameters.dHigh:
|
|
|
|
if not isNil(metrics):
|
|
|
|
if g.knownTopics.contains(topic):
|
|
|
|
libp2p_gossipsub_above_dhigh_condition.inc(labelValues = [topic])
|
|
|
|
else:
|
|
|
|
libp2p_gossipsub_above_dhigh_condition.inc(labelValues = ["other"])
|
|
|
|
|
|
|
|
# prune peers if we've gone over Dhi
|
2024-06-11 15:18:06 +00:00
|
|
|
prunes = toSeq(
|
|
|
|
try:
|
|
|
|
g.mesh[topic]
|
|
|
|
except KeyError:
|
|
|
|
raiseAssert "have peers"
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
# avoid pruning peers we are currently grafting in this heartbeat
|
2024-06-11 15:18:06 +00:00
|
|
|
prunes.keepIf do(x: PubSubPeer) -> bool:
|
|
|
|
x notin grafts
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
# shuffle anyway, score might be not used
|
2021-10-25 08:26:32 +00:00
|
|
|
g.rng.shuffle(prunes)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
# sort peers by score (inverted), pruning, so low score peers are on top
|
|
|
|
prunes.sort(byScore, SortOrder.Ascending)
|
|
|
|
|
|
|
|
# keep high score peers
|
|
|
|
if prunes.len > g.parameters.dScore:
|
|
|
|
prunes.setLen(prunes.len - g.parameters.dScore)
|
|
|
|
|
|
|
|
# collect inbound/outbound info
|
|
|
|
var outbound: seq[PubSubPeer]
|
|
|
|
var inbound: seq[PubSubPeer]
|
|
|
|
for peer in prunes:
|
|
|
|
if peer.outbound:
|
|
|
|
outbound &= peer
|
|
|
|
else:
|
|
|
|
inbound &= peer
|
|
|
|
|
|
|
|
let
|
|
|
|
meshOutbound = prunes.countIt(it.outbound)
|
|
|
|
maxOutboundPrunes = meshOutbound - g.parameters.dOut
|
|
|
|
|
|
|
|
# ensure that there are at least D_out peers first and rebalance to g.d after that
|
|
|
|
outbound.setLen(min(outbound.len, max(0, maxOutboundPrunes)))
|
|
|
|
|
|
|
|
# concat remaining outbound peers
|
|
|
|
prunes = inbound & outbound
|
|
|
|
|
|
|
|
let pruneLen = prunes.len - g.parameters.d
|
|
|
|
if pruneLen > 0:
|
|
|
|
# Ok we got some peers to prune,
|
|
|
|
# for this heartbeat let's prune those
|
2021-10-25 08:26:32 +00:00
|
|
|
g.rng.shuffle(prunes)
|
2021-02-06 00:13:04 +00:00
|
|
|
prunes.setLen(pruneLen)
|
|
|
|
|
|
|
|
trace "pruning", prunes = prunes.len
|
|
|
|
for peer in prunes:
|
|
|
|
trace "pruning peer on rebalance", peer, score = peer.score
|
|
|
|
g.pruned(peer, topic)
|
|
|
|
g.mesh.removePeer(topic, peer)
|
|
|
|
|
2022-05-25 10:59:33 +00:00
|
|
|
backingOff = g.backingOff.getOrDefault(topic)
|
|
|
|
|
2021-02-06 00:13:04 +00:00
|
|
|
# opportunistic grafting, by spec mesh should not be empty...
|
|
|
|
if g.mesh.peers(topic) > 1:
|
2024-06-11 15:18:06 +00:00
|
|
|
var peers = toSeq(
|
|
|
|
try:
|
|
|
|
g.mesh[topic]
|
|
|
|
except KeyError:
|
|
|
|
raiseAssert "have peers"
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
# grafting so high score has priority
|
|
|
|
peers.sort(byScore, SortOrder.Descending)
|
|
|
|
let medianIdx = peers.len div 2
|
|
|
|
let median = peers[medianIdx]
|
|
|
|
if median.score < g.parameters.opportunisticGraftThreshold:
|
|
|
|
trace "median score below opportunistic threshold", score = median.score
|
|
|
|
|
2022-05-25 10:59:33 +00:00
|
|
|
var
|
|
|
|
avail: seq[PubSubPeer]
|
|
|
|
currentMesh = addr defaultMesh
|
2024-06-11 15:18:06 +00:00
|
|
|
g.mesh.withValue(topic, v):
|
|
|
|
currentMesh = v
|
2022-07-27 11:47:50 +00:00
|
|
|
g.gossipsub.withValue(topic, peerList):
|
2022-05-25 10:59:33 +00:00
|
|
|
for it in peerList[]:
|
2024-06-11 15:18:06 +00:00
|
|
|
if it.score >= median.score and # avoid negative score peers
|
|
|
|
it notin currentMesh[] and
|
2023-09-15 15:22:02 +00:00
|
|
|
# don't pick direct peers
|
2022-05-25 10:59:33 +00:00
|
|
|
it.peerId notin g.parameters.directPeers and
|
|
|
|
# and avoid peers we are backing off
|
|
|
|
it.peerId notin backingOff:
|
|
|
|
avail.add(it)
|
|
|
|
|
|
|
|
# by spec, grab only 2
|
|
|
|
if avail.len > 1:
|
|
|
|
break
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
for peer in avail:
|
|
|
|
if g.mesh.addPeer(topic, peer):
|
|
|
|
g.grafted(peer, topic)
|
|
|
|
grafts &= peer
|
|
|
|
trace "opportunistic grafting", peer
|
|
|
|
|
|
|
|
if not isNil(metrics):
|
2021-03-03 21:11:21 +00:00
|
|
|
npeers = g.mesh.peers(topic)
|
|
|
|
if npeers == 0:
|
|
|
|
inc metrics[].noPeersTopics
|
|
|
|
elif npeers < g.parameters.dLow:
|
|
|
|
inc metrics[].lowPeersTopics
|
|
|
|
else:
|
|
|
|
inc metrics[].healthyPeersTopics
|
|
|
|
|
|
|
|
var meshPeers = toSeq(g.mesh.getOrDefault(topic, initHashSet[PubSubPeer]()))
|
2024-06-11 15:18:06 +00:00
|
|
|
meshPeers.keepIf do(x: PubSubPeer) -> bool:
|
|
|
|
x.outbound
|
2021-03-03 21:11:21 +00:00
|
|
|
if meshPeers.len < g.parameters.dOut:
|
|
|
|
inc metrics[].underDoutTopics
|
|
|
|
|
2021-02-06 00:13:04 +00:00
|
|
|
if g.knownTopics.contains(topic):
|
2024-06-11 15:18:06 +00:00
|
|
|
libp2p_gossipsub_peers_per_topic_gossipsub.set(
|
|
|
|
g.gossipsub.peers(topic).int64, labelValues = [topic]
|
|
|
|
)
|
|
|
|
|
|
|
|
libp2p_gossipsub_peers_per_topic_fanout.set(
|
|
|
|
g.fanout.peers(topic).int64, labelValues = [topic]
|
|
|
|
)
|
|
|
|
|
|
|
|
libp2p_gossipsub_peers_per_topic_mesh.set(
|
|
|
|
g.mesh.peers(topic).int64, labelValues = [topic]
|
|
|
|
)
|
2021-02-06 00:13:04 +00:00
|
|
|
else:
|
|
|
|
metrics[].otherPeersPerTopicGossipsub += g.gossipsub.peers(topic).int64
|
|
|
|
metrics[].otherPeersPerTopicFanout += g.fanout.peers(topic).int64
|
|
|
|
metrics[].otherPeersPerTopicMesh += g.mesh.peers(topic).int64
|
|
|
|
|
|
|
|
trace "mesh balanced"
|
|
|
|
|
|
|
|
# Send changes to peers after table updates to avoid stale state
|
|
|
|
if grafts.len > 0:
|
2024-06-11 15:18:06 +00:00
|
|
|
let graft =
|
|
|
|
RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
|
2024-03-05 15:05:21 +00:00
|
|
|
g.broadcast(grafts, graft, isHighPriority = true)
|
2021-02-06 00:13:04 +00:00
|
|
|
if prunes.len > 0:
|
2024-06-11 15:18:06 +00:00
|
|
|
let prune = RPCMsg(
|
|
|
|
control: some(
|
|
|
|
ControlMessage(
|
|
|
|
prune:
|
|
|
|
@[
|
|
|
|
ControlPrune(
|
|
|
|
topicID: topic,
|
|
|
|
peers: g.peerExchangeList(topic),
|
|
|
|
backoff: g.parameters.pruneBackoff.seconds.uint64,
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
2024-03-05 15:05:21 +00:00
|
|
|
g.broadcast(prunes, prune, isHighPriority = true)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc dropFanoutPeers*(g: GossipSub) =
|
2021-02-06 00:13:04 +00:00
|
|
|
# drop peers that we haven't published to in
|
|
|
|
# GossipSubFanoutTTL seconds
|
|
|
|
let now = Moment.now()
|
2021-03-09 12:22:52 +00:00
|
|
|
var drops: seq[string]
|
|
|
|
for topic, val in g.lastFanoutPubSub:
|
2021-02-06 00:13:04 +00:00
|
|
|
if now > val:
|
|
|
|
g.fanout.del(topic)
|
2021-03-09 12:22:52 +00:00
|
|
|
drops.add topic
|
2021-02-06 00:13:04 +00:00
|
|
|
trace "dropping fanout topic", topic
|
2021-03-09 12:22:52 +00:00
|
|
|
for topic in drops:
|
|
|
|
g.lastFanoutPubSub.del topic
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc replenishFanout*(g: GossipSub, topic: string) =
|
2021-02-06 00:13:04 +00:00
|
|
|
## get fanout peers for a topic
|
2024-06-11 15:18:06 +00:00
|
|
|
logScope:
|
|
|
|
topic
|
2021-02-06 00:13:04 +00:00
|
|
|
trace "about to replenish fanout"
|
|
|
|
|
|
|
|
if g.fanout.peers(topic) < g.parameters.dLow:
|
2023-07-31 09:13:51 +00:00
|
|
|
let currentMesh = g.mesh.getOrDefault(topic)
|
2021-02-06 00:13:04 +00:00
|
|
|
trace "replenishing fanout", peers = g.fanout.peers(topic)
|
2021-03-09 12:22:52 +00:00
|
|
|
for peer in g.gossipsub.getOrDefault(topic):
|
2024-06-11 15:18:06 +00:00
|
|
|
if peer in currentMesh:
|
|
|
|
continue
|
2021-03-09 12:22:52 +00:00
|
|
|
if g.fanout.addPeer(topic, peer):
|
|
|
|
if g.fanout.peers(topic) == g.parameters.d:
|
|
|
|
break
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
trace "fanout replenished with peers", peers = g.fanout.peers(topic)
|
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
2021-02-06 00:13:04 +00:00
|
|
|
## gossip iHave messages to peers
|
|
|
|
##
|
|
|
|
|
2021-03-09 12:22:52 +00:00
|
|
|
var cacheWindowSize = 0
|
2021-04-22 09:51:22 +00:00
|
|
|
var control: Table[PubSubPeer, ControlMessage]
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
let topics = toHashSet(toSeq(g.mesh.keys)) + toHashSet(toSeq(g.fanout.keys))
|
2024-06-11 15:18:06 +00:00
|
|
|
trace "getting gossip peers (iHave)", ntopics = topics.len
|
2021-02-06 00:13:04 +00:00
|
|
|
for topic in topics:
|
|
|
|
if topic notin g.gossipsub:
|
2024-03-25 11:06:34 +00:00
|
|
|
trace "topic not in gossip array, skipping", topic = topic
|
2021-02-06 00:13:04 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
let mids = g.mcache.window(topic)
|
2024-06-11 15:18:06 +00:00
|
|
|
if not (mids.len > 0):
|
2021-04-22 09:51:22 +00:00
|
|
|
trace "no messages to emit"
|
2021-02-06 00:13:04 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
var midsSeq = toSeq(mids)
|
|
|
|
|
2021-03-09 12:22:52 +00:00
|
|
|
cacheWindowSize += midsSeq.len
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
trace "got messages to emit", size = midsSeq.len
|
2021-04-22 09:51:22 +00:00
|
|
|
|
2021-02-06 00:13:04 +00:00
|
|
|
# not in spec
|
|
|
|
# similar to rust: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/behaviour.rs#L2101
|
|
|
|
# and go https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L582
|
|
|
|
if midsSeq.len > IHaveMaxLength:
|
2021-10-25 08:26:32 +00:00
|
|
|
g.rng.shuffle(midsSeq)
|
2021-02-06 00:13:04 +00:00
|
|
|
midsSeq.setLen(IHaveMaxLength)
|
|
|
|
|
|
|
|
let
|
|
|
|
ihave = ControlIHave(topicID: topic, messageIDs: midsSeq)
|
|
|
|
mesh = g.mesh.getOrDefault(topic)
|
|
|
|
fanout = g.fanout.getOrDefault(topic)
|
|
|
|
gossipPeers = mesh + fanout
|
|
|
|
var allPeers = toSeq(g.gossipsub.getOrDefault(topic))
|
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
allPeers.keepIf do(x: PubSubPeer) -> bool:
|
|
|
|
x.peerId notin g.parameters.directPeers and x notin gossipPeers and
|
|
|
|
x.score >= g.parameters.gossipThreshold
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2024-05-15 16:57:15 +00:00
|
|
|
# https://github.com/libp2p/specs/blob/98c5aa9421703fc31b0833ad8860a55db15be063/pubsub/gossipsub/gossipsub-v1.1.md#adaptive-gossip-dissemination
|
|
|
|
let
|
|
|
|
factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
|
|
|
target = max(g.parameters.dLazy, factor)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
if target < allPeers.len:
|
2021-10-25 08:26:32 +00:00
|
|
|
g.rng.shuffle(allPeers)
|
2021-02-06 00:13:04 +00:00
|
|
|
allPeers.setLen(target)
|
|
|
|
|
|
|
|
for peer in allPeers:
|
2022-07-27 11:47:50 +00:00
|
|
|
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
2024-05-08 12:33:26 +00:00
|
|
|
for msgId in ihave.messageIDs:
|
|
|
|
peer.sentIHaves[^1].incl(msgId)
|
2021-02-06 00:13:04 +00:00
|
|
|
|
2021-03-09 12:22:52 +00:00
|
|
|
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
|
|
|
|
2021-04-22 09:51:22 +00:00
|
|
|
return control
|
|
|
|
|
2024-05-08 12:33:26 +00:00
|
|
|
proc onHeartbeat(g: GossipSub) =
|
2024-06-11 15:18:06 +00:00
|
|
|
# reset IWANT budget
|
|
|
|
# reset IHAVE cap
|
|
|
|
block:
|
|
|
|
for peer in g.peers.values:
|
|
|
|
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
|
|
|
|
if peer.sentIHaves.len > g.parameters.historyLength:
|
|
|
|
discard peer.sentIHaves.popLast()
|
2024-06-12 13:46:47 +00:00
|
|
|
peer.iDontWants.addFirst(default(HashSet[SaltedId]))
|
|
|
|
if peer.iDontWants.len > g.parameters.historyLength:
|
|
|
|
discard peer.iDontWants.popLast()
|
2024-06-11 15:18:06 +00:00
|
|
|
peer.iHaveBudget = IHavePeerBudget
|
|
|
|
peer.pingBudget = PingsPeerBudget
|
|
|
|
|
|
|
|
var meshMetrics = MeshMetrics()
|
|
|
|
|
|
|
|
for t in toSeq(g.topics.keys):
|
|
|
|
# remove expired backoffs
|
2021-03-09 12:22:52 +00:00
|
|
|
block:
|
2024-06-11 15:18:06 +00:00
|
|
|
handleBackingOff(g.backingOff, t)
|
|
|
|
|
|
|
|
# prune every negative score peer
|
|
|
|
# do this before relance
|
|
|
|
# in order to avoid grafted -> pruned in the same cycle
|
|
|
|
let meshPeers = g.mesh.getOrDefault(t)
|
|
|
|
var prunes: seq[PubSubPeer]
|
|
|
|
for peer in meshPeers:
|
|
|
|
if peer.score < 0.0:
|
|
|
|
trace "pruning negative score peer", peer, score = peer.score
|
|
|
|
g.pruned(peer, t)
|
|
|
|
g.mesh.removePeer(t, peer)
|
|
|
|
prunes &= peer
|
|
|
|
if prunes.len > 0:
|
|
|
|
let prune = RPCMsg(
|
|
|
|
control: some(
|
|
|
|
ControlMessage(
|
|
|
|
prune:
|
|
|
|
@[
|
|
|
|
ControlPrune(
|
|
|
|
topicID: t,
|
|
|
|
peers: g.peerExchangeList(t),
|
|
|
|
backoff: g.parameters.pruneBackoff.seconds.uint64,
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
g.broadcast(prunes, prune, isHighPriority = true)
|
|
|
|
|
|
|
|
# pass by ptr in order to both signal we want to update metrics
|
|
|
|
# and as well update the struct for each topic during this iteration
|
|
|
|
g.rebalanceMesh(t, addr meshMetrics)
|
|
|
|
|
|
|
|
commitMetrics(meshMetrics)
|
|
|
|
|
|
|
|
g.dropFanoutPeers()
|
|
|
|
|
|
|
|
# replenish known topics to the fanout
|
|
|
|
for t in toSeq(g.fanout.keys):
|
|
|
|
g.replenishFanout(t)
|
|
|
|
|
|
|
|
let peers = g.getGossipPeers()
|
|
|
|
for peer, control in peers:
|
|
|
|
# only ihave from here
|
|
|
|
for ihave in control.ihave:
|
|
|
|
if g.knownTopics.contains(ihave.topicID):
|
|
|
|
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicID])
|
|
|
|
else:
|
|
|
|
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
|
|
|
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
|
2021-03-09 12:22:52 +00:00
|
|
|
|
2024-06-11 15:18:06 +00:00
|
|
|
g.mcache.shift() # shift the cache
|
2021-03-09 12:22:52 +00:00
|
|
|
|
|
|
|
proc heartbeat*(g: GossipSub) {.async.} =
|
2022-05-11 08:38:43 +00:00
|
|
|
heartbeat "GossipSub", g.parameters.heartbeatInterval:
|
2021-03-09 12:22:52 +00:00
|
|
|
trace "running heartbeat", instance = cast[int](g)
|
|
|
|
g.onHeartbeat()
|
2021-02-06 00:13:04 +00:00
|
|
|
|
|
|
|
for trigger in g.heartbeatEvents:
|
|
|
|
trace "firing heartbeat event", instance = cast[int](g)
|
|
|
|
trigger.fire()
|