2022-02-17 11:53:55 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2021-2022 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
import
|
2021-12-03 15:04:58 +00:00
|
|
|
std/[tables, sequtils],
|
2021-10-18 09:11:44 +00:00
|
|
|
bearssl,
|
2021-12-03 15:04:58 +00:00
|
|
|
stew/shims/[sets, hashes], chronicles,
|
2021-10-18 09:11:44 +00:00
|
|
|
eth/p2p/discoveryv5/random2,
|
|
|
|
../spec/datatypes/base,
|
2021-10-21 13:09:19 +00:00
|
|
|
../spec/[helpers, network],
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
../consensus_object_pools/[blockchain_dag, spec_cache]
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
export base, helpers, network, sets, tables
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-02-27 16:55:02 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
const
|
|
|
|
SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS* = 4 ##\
|
|
|
|
## The number of slots before we're up for aggregation duty that we'll
|
|
|
|
## actually subscribe to the subnet we're aggregating for - this gives
|
|
|
|
## the node time to find a mesh etc - can likely be further trimmed
|
|
|
|
KNOWN_VALIDATOR_DECAY = 3 * 32 * SLOTS_PER_EPOCH ##\
|
|
|
|
## The number of slots before we "forget" about validators that have
|
|
|
|
## registered for duties - once we've forgotten about a validator, we'll
|
|
|
|
## eventually decrease the number of stability subnets we're subscribed to -
|
|
|
|
## 3 epochs because we perform attestations once every epoch, +1 to deal
|
|
|
|
## with rounding + 1 to deal with the network growing beyond 260k validators
|
|
|
|
## and us not validating every epoch any more.
|
|
|
|
## When known validators decrease, we will keep the stability subnet around
|
|
|
|
## until it "naturally" expires.
|
|
|
|
|
|
|
|
type
|
|
|
|
AggregatorDuty* = object
|
2021-10-20 09:16:48 +00:00
|
|
|
subnet_id*: SubnetId
|
2021-10-18 09:11:44 +00:00
|
|
|
slot*: Slot
|
|
|
|
|
|
|
|
ActionTracker* = object
|
|
|
|
rng: ref BrHmacDrbgContext
|
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
subscribeAllAttnets*: bool
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
currentSlot*: Slot ##\
|
|
|
|
## Duties that we accept are limited to a range around the current slot
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
subscribedSubnets*: AttnetBits ##\
|
2021-10-18 09:11:44 +00:00
|
|
|
## All subnets we're currently subscribed to
|
|
|
|
|
2021-10-20 09:16:48 +00:00
|
|
|
stabilitySubnets: seq[tuple[subnet_id: SubnetId, expiration: Epoch]] ##\
|
2021-10-18 09:11:44 +00:00
|
|
|
## The subnets on which we listen and broadcast gossip traffic to maintain
|
|
|
|
## the health of the network - these are advertised in the ENR
|
|
|
|
nextCycleEpoch*: Epoch
|
|
|
|
|
|
|
|
# Used to track the next attestation and proposal slots using an
|
|
|
|
# epoch-relative coordinate system. Doesn't need initialization.
|
|
|
|
attestingSlots*: array[2, uint32]
|
|
|
|
proposingSlots*: array[2, uint32]
|
|
|
|
lastCalculatedEpoch*: Epoch
|
|
|
|
|
2022-03-15 08:24:55 +00:00
|
|
|
attesterDepRoot*: Eth2Digest
|
2022-02-04 11:25:32 +00:00
|
|
|
## The latest dependent root we used to compute attestation duties
|
|
|
|
## for internal validators
|
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
knownValidators*: Table[ValidatorIndex, Slot] ##\
|
|
|
|
## Validators that we've recently seen - we'll subscribe to one stability
|
|
|
|
## subnet for each such validator - the slot is used to expire validators
|
|
|
|
## that no longer are posting duties
|
|
|
|
|
2021-12-03 15:04:58 +00:00
|
|
|
duties*: HashSet[AggregatorDuty] ##\
|
2021-10-18 09:11:44 +00:00
|
|
|
## Known aggregation duties in the near future - before each such
|
|
|
|
## duty, we'll subscribe to the corresponding subnet to collect
|
|
|
|
## attestations for the aggregate
|
|
|
|
|
2021-12-03 15:04:58 +00:00
|
|
|
func hash*(x: AggregatorDuty): Hash =
|
|
|
|
hashAllFields(x)
|
|
|
|
|
2022-03-02 10:00:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
2021-10-18 09:11:44 +00:00
|
|
|
func randomStabilitySubnet*(
|
2021-10-20 09:16:48 +00:00
|
|
|
self: ActionTracker, epoch: Epoch): tuple[subnet_id: SubnetId, expiration: Epoch] =
|
2021-10-18 09:11:44 +00:00
|
|
|
(
|
|
|
|
self.rng[].rand(ATTESTATION_SUBNET_COUNT - 1).SubnetId,
|
|
|
|
epoch + EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
|
|
|
|
self.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64,
|
|
|
|
)
|
|
|
|
|
|
|
|
proc registerDuty*(
|
2021-10-20 09:16:48 +00:00
|
|
|
tracker: var ActionTracker, slot: Slot, subnet_id: SubnetId,
|
2021-10-18 09:11:44 +00:00
|
|
|
vidx: ValidatorIndex, isAggregator: bool) =
|
|
|
|
# Only register relevant duties
|
|
|
|
if slot < tracker.currentSlot or
|
|
|
|
slot + (SLOTS_PER_EPOCH * 2) <= tracker.currentSlot:
|
2021-10-20 09:16:48 +00:00
|
|
|
debug "Irrelevant duty", slot, subnet_id, vidx
|
2021-10-18 09:11:44 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
tracker.knownValidators[vidx] = slot # Update validator last-seen registry
|
|
|
|
|
|
|
|
if isAggregator:
|
2021-10-20 09:16:48 +00:00
|
|
|
let newDuty = AggregatorDuty(slot: slot, subnet_id: subnet_id)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2021-12-03 15:04:58 +00:00
|
|
|
if newDuty in tracker.duties:
|
|
|
|
return
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2021-10-20 09:16:48 +00:00
|
|
|
debug "Registering aggregation duty", slot, subnet_id, vidx
|
2021-12-03 15:04:58 +00:00
|
|
|
tracker.duties.incl(newDuty)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
const allSubnetBits = block:
|
2021-10-21 13:09:19 +00:00
|
|
|
var res: AttnetBits
|
2021-10-18 09:11:44 +00:00
|
|
|
for i in 0..<res.len: res[i] = true
|
|
|
|
res
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
func aggregateSubnets*(tracker: ActionTracker, wallSlot: Slot): AttnetBits =
|
|
|
|
var res: AttnetBits
|
2021-10-18 09:11:44 +00:00
|
|
|
# Subscribe to subnets for upcoming duties
|
|
|
|
for duty in tracker.duties:
|
|
|
|
if wallSlot <= duty.slot and
|
|
|
|
wallSlot + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS > duty.slot:
|
|
|
|
|
2021-10-20 09:16:48 +00:00
|
|
|
res[duty.subnet_id.int] = true
|
2021-10-18 09:11:44 +00:00
|
|
|
res
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
func stabilitySubnets*(tracker: ActionTracker, slot: Slot): AttnetBits =
|
2022-01-24 20:40:59 +00:00
|
|
|
if tracker.subscribeAllAttnets:
|
2021-10-18 09:11:44 +00:00
|
|
|
allSubnetBits
|
|
|
|
else:
|
2021-10-21 13:09:19 +00:00
|
|
|
var res: AttnetBits
|
2021-10-18 09:11:44 +00:00
|
|
|
for v in tracker.stabilitySubnets:
|
2021-10-20 09:16:48 +00:00
|
|
|
res[v.subnet_id.int] = true
|
2021-10-18 09:11:44 +00:00
|
|
|
res
|
|
|
|
|
|
|
|
func updateSlot*(tracker: var ActionTracker, wallSlot: Slot) =
|
|
|
|
# Prune duties from the past - this collection is kept small because there
|
|
|
|
# are only so many slot/subnet combos - prune both internal and API-supplied
|
|
|
|
# duties at the same time
|
|
|
|
tracker.duties.keepItIf(it.slot >= wallSlot)
|
|
|
|
|
|
|
|
# Keep stability subnets for as long as validators are validating
|
|
|
|
var toPrune: seq[ValidatorIndex]
|
|
|
|
for k, v in tracker.knownValidators:
|
|
|
|
if v + KNOWN_VALIDATOR_DECAY < wallSlot: toPrune.add k
|
|
|
|
for k in toPrune: tracker.knownValidators.del k
|
|
|
|
|
|
|
|
# One stability subnet per known validator
|
|
|
|
static: doAssert RANDOM_SUBNETS_PER_VALIDATOR == 1
|
|
|
|
|
2022-03-02 10:00:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
2021-10-18 09:11:44 +00:00
|
|
|
let expectedSubnets =
|
|
|
|
min(ATTESTATION_SUBNET_COUNT, tracker.knownValidators.len)
|
|
|
|
|
|
|
|
let epoch = wallSlot.epoch
|
|
|
|
block:
|
|
|
|
# If we have too many stability subnets, remove some expired ones
|
|
|
|
var i = 0
|
|
|
|
while tracker.stabilitySubnets.len > expectedSubnets and
|
|
|
|
i < tracker.stabilitySubnets.len:
|
|
|
|
if epoch >= tracker.stabilitySubnets[i].expiration:
|
|
|
|
tracker.stabilitySubnets.delete(i)
|
|
|
|
else:
|
|
|
|
inc i
|
|
|
|
|
|
|
|
for ss in tracker.stabilitySubnets.mitems():
|
|
|
|
if epoch >= ss.expiration:
|
|
|
|
ss = tracker.randomStabilitySubnet(epoch)
|
|
|
|
|
|
|
|
# and if we have too few, add a few more
|
|
|
|
for i in tracker.stabilitySubnets.len..<expectedSubnets:
|
|
|
|
tracker.stabilitySubnets.add(tracker.randomStabilitySubnet(epoch))
|
|
|
|
|
|
|
|
tracker.currentSlot = wallSlot
|
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
func getNextValidatorAction*(
|
|
|
|
actionSlotSource: auto, lastCalculatedEpoch: Epoch, slot: Slot): Slot =
|
|
|
|
# The relevant actions are in, depending on calculated bounds:
|
|
|
|
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
|
|
|
# current epoch next epoch
|
|
|
|
let orderedActionSlots = [
|
|
|
|
actionSlotSource[ slot.epoch mod 2'u64],
|
|
|
|
actionSlotSource[1 - (slot.epoch mod 2'u64)]]
|
|
|
|
|
|
|
|
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
|
|
|
|
|
|
|
# Cleverer ways exist, but a short loop is fine. O(n) vs O(log n) isn't that
|
|
|
|
# important when n is 32 or 64, with early exit on average no more than half
|
|
|
|
# through.
|
|
|
|
for i in [0'u64, 1'u64]:
|
|
|
|
let bitmapEpoch = slot.epoch + i
|
|
|
|
|
|
|
|
if bitmapEpoch > lastCalculatedEpoch:
|
|
|
|
return FAR_FUTURE_SLOT
|
|
|
|
|
|
|
|
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
2022-01-11 10:01:54 +00:00
|
|
|
let nextActionSlot = start_slot(bitmapEpoch) + slotOffset
|
2021-10-21 13:09:19 +00:00
|
|
|
if ((orderedActionSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
|
|
|
nextActionSlot > slot:
|
|
|
|
return nextActionSlot
|
|
|
|
|
|
|
|
FAR_FUTURE_SLOT
|
|
|
|
|
2022-01-03 21:18:49 +00:00
|
|
|
func getNextAttestationSlot*(tracker: ActionTracker, slot: Slot): Slot =
|
|
|
|
getNextValidatorAction(
|
|
|
|
tracker.attestingSlots,
|
|
|
|
tracker.lastCalculatedEpoch, slot)
|
|
|
|
|
|
|
|
func getNextProposalSlot*(tracker: ActionTracker, slot: Slot): Slot =
|
|
|
|
getNextValidatorAction(
|
|
|
|
tracker.proposingSlots,
|
|
|
|
tracker.lastCalculatedEpoch, slot)
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func needsUpdate*(
|
2022-03-15 08:24:55 +00:00
|
|
|
tracker: ActionTracker, state: ForkyHashedBeaconState, epoch: Epoch): bool =
|
|
|
|
# Using the attester dependent root here means we lock the action tracking to
|
2022-02-04 11:25:32 +00:00
|
|
|
# the dependent root for attestation duties and not block proposal -
|
|
|
|
# however, the risk of a proposer reordering in the last epoch is small
|
|
|
|
# and the action tracker is speculative in nature.
|
2022-03-15 08:24:55 +00:00
|
|
|
tracker.attesterDepRoot !=
|
|
|
|
state.dependent_root(if epoch > Epoch(0): epoch - 1 else: epoch)
|
2022-02-04 11:25:32 +00:00
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func updateActions*(
|
2022-03-15 08:24:55 +00:00
|
|
|
tracker: var ActionTracker, epochRef: EpochRef) =
|
2021-10-18 09:11:44 +00:00
|
|
|
# Updates the schedule for upcoming attestation and proposal work
|
|
|
|
let
|
|
|
|
epoch = epochRef.epoch
|
|
|
|
|
2022-03-15 08:24:55 +00:00
|
|
|
tracker.attesterDepRoot = epochRef.attester_dependent_root
|
2021-10-18 09:11:44 +00:00
|
|
|
tracker.lastCalculatedEpoch = epoch
|
|
|
|
|
2021-10-20 11:36:38 +00:00
|
|
|
let validatorIndices = toHashSet(toSeq(tracker.knownValidators.keys()))
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
# Update proposals
|
|
|
|
tracker.proposingSlots[epoch mod 2] = 0
|
|
|
|
for i, proposer in epochRef.beacon_proposers:
|
2021-10-20 11:36:38 +00:00
|
|
|
if proposer.isSome and proposer.get() in validatorIndices:
|
2021-10-18 09:11:44 +00:00
|
|
|
tracker.proposingSlots[epoch mod 2] =
|
|
|
|
tracker.proposingSlots[epoch mod 2] or (1'u32 shl i)
|
|
|
|
|
|
|
|
tracker.attestingSlots[epoch mod 2] = 0
|
|
|
|
|
|
|
|
# The relevant bitmaps are 32 bits each.
|
|
|
|
static: doAssert SLOTS_PER_EPOCH <= 32
|
|
|
|
|
2021-10-20 11:36:38 +00:00
|
|
|
for (committeeIndex, subnet_id, slot) in
|
2021-10-18 09:11:44 +00:00
|
|
|
get_committee_assignments(epochRef, validatorIndices):
|
|
|
|
|
2022-01-11 10:01:54 +00:00
|
|
|
doAssert epoch(slot) == epoch
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
# Each get_committee_assignments() call here is on the next epoch. At any
|
|
|
|
# given time, only care about two epochs, the current and next epoch. So,
|
|
|
|
# after it is done for an epoch, [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
|
|
|
# provides, sequentially, the current and next epochs' slot schedules. If
|
|
|
|
# get_committee_assignments() has not been called for the next epoch yet,
|
|
|
|
# typically because there hasn't been a block in the current epoch, there
|
|
|
|
# isn't valid information in aS[1 - (epoch mod 2)], and only slots within
|
|
|
|
# the current epoch can be known. Usually, this is not a major issue, but
|
|
|
|
# when there hasn't been a block substantially through an epoch, it might
|
|
|
|
# prove misleading to claim that there aren't attestations known, when it
|
|
|
|
# only might be known either way for 3 more slots. However, it's also not
|
|
|
|
# as important to attest when blocks aren't flowing as only attestions in
|
|
|
|
# blocks garner rewards.
|
|
|
|
tracker.attestingSlots[epoch mod 2] =
|
|
|
|
tracker.attestingSlots[epoch mod 2] or
|
|
|
|
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func init*(
|
2022-01-24 20:40:59 +00:00
|
|
|
T: type ActionTracker, rng: ref BrHmacDrbgContext,
|
|
|
|
subscribeAllAttnets: bool): T =
|
2021-10-18 09:11:44 +00:00
|
|
|
T(
|
|
|
|
rng: rng,
|
2022-01-24 20:40:59 +00:00
|
|
|
subscribeAllAttnets: subscribeAllAttnets
|
2021-10-18 09:11:44 +00:00
|
|
|
)
|