diff --git a/beacon_chain/validators/action_tracker.nim b/beacon_chain/validators/action_tracker.nim index f228e977f..39e8033b6 100644 --- a/beacon_chain/validators/action_tracker.nim +++ b/beacon_chain/validators/action_tracker.nim @@ -9,26 +9,24 @@ import std/[sequtils, tables], stew/shims/[sets, hashes], chronicles, eth/p2p/discoveryv5/random2, - ../spec/datatypes/base, - ../spec/[helpers, network], - ../consensus_object_pools/[blockchain_dag, spec_cache] + ../spec/forks, + ../consensus_object_pools/spec_cache -export base, helpers, network, sets, tables +export forks, tables, sets {.push raises: [Defect].} const - SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS* = 4 ##\ + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS* = 4 ## The number of slots before we're up for aggregation duty that we'll ## actually subscribe to the subnet we're aggregating for - this gives ## the node time to find a mesh etc - can likely be further trimmed - KNOWN_VALIDATOR_DECAY = 3 * 32 * SLOTS_PER_EPOCH ##\ + KNOWN_VALIDATOR_DECAY* = 3 * SLOTS_PER_EPOCH ## The number of slots before we "forget" about validators that have ## registered for duties - once we've forgotten about a validator, we'll - ## eventually decrease the number of stability subnets we're subscribed to - - ## 3 epochs because we perform attestations once every epoch, +1 to deal - ## with rounding + 1 to deal with the network growing beyond 260k validators - ## and us not validating every epoch any more. + ## eventually decrease the number of stability subnets we're subscribed to. + ## Active validators are expected to register for duty every epoch - we use + ## 3 epochs here to counter rounding errors and communication delays. ## When known validators decrease, we will keep the stability subnet around ## until it "naturally" expires. @@ -42,13 +40,13 @@ type subscribeAllAttnets: bool - currentSlot: Slot ##\ + currentSlot: Slot ## Duties that we accept are limited to a range around the current slot - subscribedSubnets*: AttnetBits ##\ + subscribedSubnets*: AttnetBits ## All subnets we're currently subscribed to - stabilitySubnets: seq[tuple[subnet_id: SubnetId, expiration: Epoch]] ##\ + stabilitySubnets: seq[tuple[subnet_id: SubnetId, expiration: Epoch]] ## The subnets on which we listen and broadcast gossip traffic to maintain ## the health of the network - these are advertised in the ENR nextCycleEpoch: Epoch @@ -63,12 +61,12 @@ type ## The latest dependent root we used to compute attestation duties ## for internal validators - knownValidators*: Table[ValidatorIndex, Slot] ##\ + knownValidators*: Table[ValidatorIndex, Slot] ## Validators that we've recently seen - we'll subscribe to one stability ## subnet for each such validator - the slot is used to expire validators ## that no longer are posting duties - duties: HashSet[AggregatorDuty] ##\ + duties: HashSet[AggregatorDuty] ## Known aggregation duties in the near future - before each such ## duty, we'll subscribe to the corresponding subnet to collect ## attestations for the aggregate @@ -129,7 +127,7 @@ func stabilitySubnets*(tracker: ActionTracker, slot: Slot): AttnetBits = res[v.subnet_id.int] = true res -func updateSlot*(tracker: var ActionTracker, wallSlot: Slot) = +proc updateSlot*(tracker: var ActionTracker, wallSlot: Slot) = # Prune duties from the past - this collection is kept small because there # are only so many slot/subnet combos - prune both internal and API-supplied # duties at the same time @@ -139,7 +137,9 @@ func updateSlot*(tracker: var ActionTracker, wallSlot: Slot) = var toPrune: seq[ValidatorIndex] for k, v in tracker.knownValidators: if v + KNOWN_VALIDATOR_DECAY < wallSlot: toPrune.add k - for k in toPrune: tracker.knownValidators.del k + for k in toPrune: + debug "Validator no longer active", index = k + tracker.knownValidators.del k # One stability subnet per known validator static: doAssert RANDOM_SUBNETS_PER_VALIDATOR == 1 diff --git a/tests/test_action_tracker.nim b/tests/test_action_tracker.nim index c9af001d2..a9739320a 100644 --- a/tests/test_action_tracker.nim +++ b/tests/test_action_tracker.nim @@ -29,15 +29,26 @@ suite "subnet tracker": check: tracker.aggregateSubnets(Slot(0)).countOnes() == 2 tracker.aggregateSubnets(Slot(1)).countOnes() == 1 + tracker.knownValidators.len() == 1 tracker.registerDuty(Slot(SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS), SubnetId(2), ValidatorIndex(0), true) check: tracker.aggregateSubnets(Slot(0)).countOnes() == 2 tracker.aggregateSubnets(Slot(1)).countOnes() == 2 + tracker.knownValidators.len() == 1 + + tracker.updateSlot( + Slot(SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS) + KNOWN_VALIDATOR_DECAY + 1) + + check: + # Validator should be "forgotten" if they don't register for duty + tracker.knownValidators.len() == 0 # Guaranteed to expire tracker.updateSlot( - Slot(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION * 2 * SLOTS_PER_EPOCH)) + (Epoch(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION * 2) + 1).start_slot() + + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS + KNOWN_VALIDATOR_DECAY + 1) + check: tracker.stabilitySubnets(Slot(0)).countOnes() == 0