Fix gossipsub dOut handling (#883)

This commit is contained in:
Tanguy 2023-04-26 13:44:45 +02:00 committed by GitHub
parent db629dca25
commit a1eb53b181
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 15 additions and 6 deletions

View File

@ -164,7 +164,8 @@ proc handleGraft*(g: GossipSub,
# If they send us a graft before they send us a subscribe, what should # If they send us a graft before they send us a subscribe, what should
# we do? For now, we add them to mesh but don't add them to gossipsub. # we do? For now, we add them to mesh but don't add them to gossipsub.
if topic in g.topics: if topic in g.topics:
if g.mesh.peers(topic) < g.parameters.dHigh or peer.outbound: if g.mesh.peers(topic) < g.parameters.dHigh or
(peer.outbound and g.mesh.outboundPeers(topic) < g.parameters.dOut):
# In the spec, there's no mention of DHi here, but implicitly, a # In the spec, there's no mention of DHi here, but implicitly, a
# peer will be removed from the mesh on next rebalance, so we don't want # peer will be removed from the mesh on next rebalance, so we don't want
# this peer to push someone else out # this peer to push someone else out
@ -328,6 +329,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
var var
prunes, grafts: seq[PubSubPeer] prunes, grafts: seq[PubSubPeer]
npeers = g.mesh.peers(topic) npeers = g.mesh.peers(topic)
nOutPeers = g.mesh.outboundPeers(topic)
defaultMesh: HashSet[PubSubPeer] defaultMesh: HashSet[PubSubPeer]
backingOff = g.backingOff.getOrDefault(topic) backingOff = g.backingOff.getOrDefault(topic)
@ -370,7 +372,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
g.fanout.removePeer(topic, peer) g.fanout.removePeer(topic, peer)
grafts &= peer grafts &= peer
else: elif nOutPeers < g.parameters.dOut:
trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic) trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic)
var var
@ -398,8 +400,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# sort peers by score, high score first, we are grafting # sort peers by score, high score first, we are grafting
candidates.sort(byScore, SortOrder.Descending) candidates.sort(byScore, SortOrder.Descending)
# Graft peers so we reach a count of D # Graft outgoing peers so we reach a count of dOut
candidates.setLen(min(candidates.len, g.parameters.dOut)) candidates.setLen(min(candidates.len, g.parameters.dOut - nOutPeers))
trace "grafting outbound peers", topic, peers = candidates.len trace "grafting outbound peers", topic, peers = candidates.len

View File

@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
else: else:
{.push raises: [].} {.push raises: [].}
import std/[tables, sets] import std/[tables, sets, sequtils]
import ./pubsubpeer, ../../peerid import ./pubsubpeer, ../../peerid
export tables, sets export tables, sets
@ -53,3 +53,10 @@ func peers*(table: PeerTable, topic: string): int =
except KeyError: raiseAssert "checked with in" except KeyError: raiseAssert "checked with in"
else: else:
0 0
func outboundPeers*(table: PeerTable, topic: string): int =
if topic in table:
try: table[topic].countIt(it.outbound)
except KeyError: raiseAssert "checked with in"
else:
0