avoid creating prune message unless we're pruning (#487)

This commit is contained in:
Jacek Sieka 2020-12-15 22:46:03 +01:00 committed by GitHub
parent ea6988d380
commit 9e5ba64c48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 13 additions and 12 deletions

View File

@ -490,7 +490,7 @@ proc rebalanceMesh(g: GossipSub, topic: string) =
prunes = toSeq(g.mesh[topic]) prunes = toSeq(g.mesh[topic])
# avoid pruning peers we are currently grafting in this heartbeat # avoid pruning peers we are currently grafting in this heartbeat
prunes.keepIf do (x: PubSubPeer) -> bool: x notin grafts prunes.keepIf do (x: PubSubPeer) -> bool: x notin grafts
# shuffle anyway, score might be not used # shuffle anyway, score might be not used
shuffle(prunes) shuffle(prunes)
@ -812,7 +812,7 @@ proc heartbeat(g: GossipSub) {.async.} =
let gossipPeers = g.gossipsub.getOrDefault(t) let gossipPeers = g.gossipsub.getOrDefault(t)
# this will be changed by rebalance but does not matter # this will be changed by rebalance but does not matter
totalMeshPeers += meshPeers.len totalMeshPeers += meshPeers.len
totalGossipPeers += gossipPeers.len totalGossipPeers += g.gossipsub.peers(t)
var prunes: seq[PubSubPeer] var prunes: seq[PubSubPeer]
for peer in meshPeers: for peer in meshPeers:
if peer.score < 0.0: if peer.score < 0.0:
@ -820,12 +820,13 @@ proc heartbeat(g: GossipSub) {.async.} =
g.pruned(peer, t) g.pruned(peer, t)
g.mesh.removePeer(t, peer) g.mesh.removePeer(t, peer)
prunes &= peer prunes &= peer
let prune = RPCMsg(control: some(ControlMessage( if prunes.len > 0:
prune: @[ControlPrune( let prune = RPCMsg(control: some(ControlMessage(
topicID: t, prune: @[ControlPrune(
peers: g.peerExchangeList(t), topicID: t,
backoff: g.parameters.pruneBackoff.seconds.uint64)]))) peers: g.peerExchangeList(t),
g.broadcast(prunes, prune) backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune)
g.rebalanceMesh(t) g.rebalanceMesh(t)
@ -1140,14 +1141,14 @@ method rpcHandler*(g: GossipSub,
if stats[].meshMessageDeliveries > topicParams.meshMessageDeliveriesCap: if stats[].meshMessageDeliveries > topicParams.meshMessageDeliveriesCap:
stats[].meshMessageDeliveries = topicParams.meshMessageDeliveriesCap stats[].meshMessageDeliveries = topicParams.meshMessageDeliveriesCap
do: # make sure we don't loose this information do: # make sure we don't loose this information
pstats[].topicInfos[t] = TopicInfo(meshMessageDeliveries: 1) pstats[].topicInfos[t] = TopicInfo(meshMessageDeliveries: 1)
do: # make sure we don't loose this information do: # make sure we don't loose this information
g.peerStats[peer] = g.peerStats[peer] =
block: block:
var stats = PeerStats() var stats = PeerStats()
stats.topicInfos[t] = TopicInfo(meshMessageDeliveries: 1) stats.topicInfos[t] = TopicInfo(meshMessageDeliveries: 1)
stats stats
# onto the next message # onto the next message
continue continue
@ -1198,12 +1199,12 @@ method rpcHandler*(g: GossipSub,
if stats[].meshMessageDeliveries > topicParams.meshMessageDeliveriesCap: if stats[].meshMessageDeliveries > topicParams.meshMessageDeliveriesCap:
stats[].meshMessageDeliveries = topicParams.meshMessageDeliveriesCap stats[].meshMessageDeliveries = topicParams.meshMessageDeliveriesCap
do: # make sure we don't loose this information do: # make sure we don't loose this information
pstats[].topicInfos[t] = TopicInfo(firstMessageDeliveries: 1, meshMessageDeliveries: 1) pstats[].topicInfos[t] = TopicInfo(firstMessageDeliveries: 1, meshMessageDeliveries: 1)
do: # make sure we don't loose this information do: # make sure we don't loose this information
g.peerStats[peer] = g.peerStats[peer] =
block: block:
var stats = PeerStats() var stats = PeerStats()
stats.topicInfos[t] = TopicInfo(firstMessageDeliveries: 1, meshMessageDeliveries: 1) stats.topicInfos[t] = TopicInfo(firstMessageDeliveries: 1, meshMessageDeliveries: 1)
stats stats
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[]) g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])