populate gossipsub scores (#2091)
* force pushing to fix unstable base * increase attestation/aggregate queue sizes when there are many validators, many aggregates and attestations arrive every slot - increase the queue size a bit - also do batches on each idle loop iteration since it's fairly quick * don't score subnets for now * wrapping up * refactor and cleanups * gossip parameters fixes * comment fix Co-authored-by: Jacek Sieka <jacek@status.im>
This commit is contained in:
parent
8a09286423
commit
72b01161c1
|
@ -1554,21 +1554,36 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||
var switch = newBeaconSwitch(conf, netKeys.seckey, hostAddress, rng)
|
||||
|
||||
let
|
||||
params =
|
||||
block:
|
||||
var p = GossipSubParams.init()
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/p2p-interface.md#the-gossip-domain-gossipsub
|
||||
p.d = 8
|
||||
p.dLow = 6
|
||||
p.dHigh = 12
|
||||
p.dLazy = 6
|
||||
p.heartbeatInterval = 700.milliseconds
|
||||
p.fanoutTTL = 60.seconds
|
||||
p.historyLength = 6
|
||||
p.historyGossip = 3
|
||||
p.seenTTL = 385.seconds
|
||||
p.gossipFactor = 0.05
|
||||
p
|
||||
params = GossipSubParams(
|
||||
explicit: true,
|
||||
pruneBackoff: 1.minutes,
|
||||
floodPublish: true,
|
||||
gossipFactor: 0.05,
|
||||
d: 8,
|
||||
dLow: 6,
|
||||
dHigh: 12,
|
||||
dScore: 6,
|
||||
dOut: 6 div 2, # less than dlow and no more than dlow/2
|
||||
dLazy: 6,
|
||||
heartbeatInterval: 700.milliseconds,
|
||||
historyLength: 6,
|
||||
historyGossip: 3,
|
||||
fanoutTTL: 60.seconds,
|
||||
seenTTL: 385.seconds,
|
||||
gossipThreshold: -4000,
|
||||
publishThreshold: -8000,
|
||||
graylistThreshold: -16000, # also disconnect threshold
|
||||
opportunisticGraftThreshold: 0,
|
||||
decayInterval: 12.seconds,
|
||||
decayToZero: 0.01,
|
||||
retainScore: 385.seconds,
|
||||
appSpecificWeight: 0.0,
|
||||
ipColocationFactorWeight: -53.75,
|
||||
ipColocationFactorThreshold: 3.0,
|
||||
behaviourPenaltyWeight: -15.9,
|
||||
behaviourPenaltyDecay: 0.986,
|
||||
disconnectBadPeers: true
|
||||
)
|
||||
pubsub = GossipSub.init(
|
||||
switch = switch,
|
||||
msgIdProvider = msgIdProvider,
|
||||
|
@ -1578,14 +1593,6 @@ proc createEth2Node*(rng: ref BrHmacDrbgContext,
|
|||
anonymize = true,
|
||||
parameters = params)
|
||||
|
||||
# disable any extra scoring weight
|
||||
# do this after validation (done inside init())
|
||||
# this is a scoring violation for validation
|
||||
# but we don't want to use scores for now
|
||||
pubsub.parameters.behaviourPenaltyWeight = 0.0
|
||||
pubsub.parameters.appSpecificWeight = 0.0
|
||||
pubsub.parameters.ipColocationFactorWeight = 0.0
|
||||
|
||||
switch.mount(pubsub)
|
||||
|
||||
result = Eth2Node.init(conf, enrForkId, switch, pubsub,
|
||||
|
@ -1601,7 +1608,7 @@ proc announcedENR*(node: Eth2Node): enr.Record =
|
|||
proc shortForm*(id: KeyPair): string =
|
||||
$PeerID.init(id.pubkey)
|
||||
|
||||
proc subscribe*(node: Eth2Node, topic: string, enableTopicMetrics: bool = false) =
|
||||
proc subscribe*(node: Eth2Node, topic: string, topicParams: TopicParams, enableTopicMetrics: bool = false) =
|
||||
proc dummyMsgHandler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
|
@ -1611,6 +1618,7 @@ proc subscribe*(node: Eth2Node, topic: string, enableTopicMetrics: bool = false)
|
|||
if enableTopicMetrics:
|
||||
node.pubsub.knownTopics.incl(topicName)
|
||||
|
||||
node.pubsub.topicParams[topicName] = topicParams
|
||||
node.pubsub.subscribe(topicName, dummyMsgHandler)
|
||||
|
||||
proc setValidTopics*(node: Eth2Node, topics: openArray[string]) =
|
||||
|
|
|
@ -42,6 +42,11 @@ import
|
|||
|
||||
from eth/common/eth_types import BlockHashOrNumber
|
||||
|
||||
from
|
||||
libp2p/protocols/pubsub/gossipsub
|
||||
import
|
||||
TopicParams, validateParameters, init
|
||||
|
||||
const
|
||||
hasPrompt = not defined(withoutPrompt)
|
||||
|
||||
|
@ -370,8 +375,9 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
|
|||
|
||||
proc installAttestationSubnetHandlers(node: BeaconNode, subnets: set[uint8]) =
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
||||
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
|
||||
for subnet in subnets:
|
||||
node.network.subscribe(getAttestationTopic(node.forkDigest, subnet))
|
||||
node.network.subscribe(getAttestationTopic(node.forkDigest, subnet), TopicParams.init()) # don't score attestation subnets for now
|
||||
|
||||
proc updateStabilitySubnetMetadata(
|
||||
node: BeaconNode, stabilitySubnets: set[uint8]) =
|
||||
|
@ -671,11 +677,60 @@ proc getAttestationSubnetHandlers(node: BeaconNode) =
|
|||
node.attestationSubnets.subscribedSubnets + initialStabilitySubnets)
|
||||
|
||||
proc addMessageHandlers(node: BeaconNode) =
|
||||
node.network.subscribe(node.topicBeaconBlocks, enableTopicMetrics = true)
|
||||
node.network.subscribe(getAttesterSlashingsTopic(node.forkDigest))
|
||||
node.network.subscribe(getProposerSlashingsTopic(node.forkDigest))
|
||||
node.network.subscribe(getVoluntaryExitsTopic(node.forkDigest))
|
||||
node.network.subscribe(getAggregateAndProofsTopic(node.forkDigest), enableTopicMetrics = true)
|
||||
# inspired by lighthouse research here
|
||||
# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py
|
||||
const
|
||||
blocksTopicParams = TopicParams(
|
||||
topicWeight: 0.5,
|
||||
timeInMeshWeight: 0.03333333333333333,
|
||||
timeInMeshQuantum: chronos.seconds(12),
|
||||
timeInMeshCap: 300,
|
||||
firstMessageDeliveriesWeight: 1.1471603557060206,
|
||||
firstMessageDeliveriesDecay: 0.9928302477768374,
|
||||
firstMessageDeliveriesCap: 34.86870846001471,
|
||||
meshMessageDeliveriesWeight: -458.31054878249114,
|
||||
meshMessageDeliveriesDecay: 0.9716279515771061,
|
||||
meshMessageDeliveriesThreshold: 0.6849191409056553,
|
||||
meshMessageDeliveriesCap: 2.054757422716966,
|
||||
meshMessageDeliveriesActivation: chronos.seconds(384),
|
||||
meshMessageDeliveriesWindow: chronos.seconds(2),
|
||||
meshFailurePenaltyWeight: -458.31054878249114 ,
|
||||
meshFailurePenaltyDecay: 0.9716279515771061,
|
||||
invalidMessageDeliveriesWeight: -214.99999999999994,
|
||||
invalidMessageDeliveriesDecay: 0.9971259067705325
|
||||
)
|
||||
aggregateTopicParams = TopicParams(
|
||||
topicWeight: 0.5,
|
||||
timeInMeshWeight: 0.03333333333333333,
|
||||
timeInMeshQuantum: chronos.seconds(12),
|
||||
timeInMeshCap: 300,
|
||||
firstMessageDeliveriesWeight: 0.10764904539552399,
|
||||
firstMessageDeliveriesDecay: 0.8659643233600653,
|
||||
firstMessageDeliveriesCap: 371.5778421725158,
|
||||
meshMessageDeliveriesWeight: -0.07538533073670682,
|
||||
meshMessageDeliveriesDecay: 0.930572040929699,
|
||||
meshMessageDeliveriesThreshold: 53.404248450179836,
|
||||
meshMessageDeliveriesCap: 213.61699380071934,
|
||||
meshMessageDeliveriesActivation: chronos.seconds(384),
|
||||
meshMessageDeliveriesWindow: chronos.seconds(2),
|
||||
meshFailurePenaltyWeight: -0.07538533073670682 ,
|
||||
meshFailurePenaltyDecay: 0.930572040929699,
|
||||
invalidMessageDeliveriesWeight: -214.99999999999994,
|
||||
invalidMessageDeliveriesDecay: 0.9971259067705325
|
||||
)
|
||||
basicParams = TopicParams.init()
|
||||
|
||||
static:
|
||||
# compile time validation
|
||||
blocksTopicParams.validateParameters().tryGet()
|
||||
aggregateTopicParams.validateParameters().tryGet()
|
||||
basicParams.validateParameters.tryGet()
|
||||
|
||||
node.network.subscribe(node.topicBeaconBlocks, blocksTopicParams, enableTopicMetrics = true)
|
||||
node.network.subscribe(getAttesterSlashingsTopic(node.forkDigest), basicParams)
|
||||
node.network.subscribe(getProposerSlashingsTopic(node.forkDigest), basicParams)
|
||||
node.network.subscribe(getVoluntaryExitsTopic(node.forkDigest), basicParams)
|
||||
node.network.subscribe(getAggregateAndProofsTopic(node.forkDigest), aggregateTopicParams, enableTopicMetrics = true)
|
||||
node.getAttestationSubnetHandlers()
|
||||
|
||||
func getTopicSubscriptionEnabled(node: BeaconNode): bool =
|
||||
|
|
Loading…
Reference in New Issue