From 58340c6809037d7632f7754dcbba7337dec6530b Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 30 Oct 2024 11:38:21 +0000 Subject: [PATCH] synch achieved: There are currently 2 issues: - fetch genesis state on empty data folder: error msg "The downloaded genesis state cannot be verified (checksum mismatch)\" The error arises from here: fetchGenesisState->network_metadata_downloads.fetchGenesisBytes.L58 something to do with the readssz or withState, investigations point to the data downloaded or some config missing WA: comment lines 58-62, compile and run until the fetch genesis state is completed, then you can uncomment, it works from here - spam of error messages: \"metrics error:New label values must be added from same thread as the metric was created from\": This happens due to the fact that libp2p declares some gauges, and given that they are created inside a thread, metrics library starts to complain. (no WA/correction so far)" --- Makefile | 3 +- .../consensus/adapted_block_chain_dag.nim | 2884 +++++++++++++++++ .../consensus/consensus_wrapper.nim | 2415 +++++++++++++- 3 files changed, 5233 insertions(+), 69 deletions(-) create mode 100644 nimbus_unified/consensus/adapted_block_chain_dag.nim diff --git a/Makefile b/Makefile index 1df7e1170..e0a421d5b 100644 --- a/Makefile +++ b/Makefile @@ -370,9 +370,10 @@ endif # Nimbus unified related targets # builds the unified client +# NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) nimbus_unified: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) --verbosity:3 -d:debug -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -o:build/$@ "nimbus_unified/$@.nim" # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` diff --git a/nimbus_unified/consensus/adapted_block_chain_dag.nim b/nimbus_unified/consensus/adapted_block_chain_dag.nim new file mode 100644 index 000000000..e342480dc --- /dev/null +++ b/nimbus_unified/consensus/adapted_block_chain_dag.nim @@ -0,0 +1,2884 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + std/[algorithm, sequtils, tables, sets], + stew/[arrayops, assign2, byteutils], + chronos, metrics, results, snappy, chronicles, + beacon_chain/spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers, + state_transition, validator], + beacon_chain/spec/forks, + beacon_chain/[beacon_chain_db, beacon_clock, era_db], + beacon_chain/consensus_object_pools/[block_pools_types, block_quarantine] + +export + eth2_merkleization, eth2_ssz_serialization, + block_pools_types, results, beacon_chain_db + +logScope: topics = "chaindag" +# adapted from nimbus-eth2 + +# # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics +# declareGauge beacon_head_root, "Root of the head block of the beacon chain" +# declareGauge beacon_head_slot, "Slot of the head block of the beacon chain" + +# # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics +# declareGauge beacon_finalized_epoch, "Current finalized epoch" # On epoch transition +# declareGauge beacon_finalized_root, "Current finalized root" # On epoch transition +# declareGauge beacon_current_justified_epoch, "Current justified epoch" # On epoch transition +# declareGauge beacon_current_justified_root, "Current justified root" # On epoch transition +# declareGauge beacon_previous_justified_epoch, "Current previously justified epoch" # On epoch transition +# declareGauge beacon_previous_justified_root, "Current previously justified root" # On epoch transition + +# declareGauge beacon_reorgs_total_total, "Total occurrences of reorganizations of the chain" # On fork choice; backwards-compat name (used to be a counter) +# declareGauge beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # Interop copy +# declareCounter beacon_state_data_cache_hits, "EpochRef hits" +# declareCounter beacon_state_data_cache_misses, "EpochRef misses" +# declareCounter beacon_state_rewinds, "State database rewinds" + +# declareGauge beacon_active_validators, "Number of validators in the active validator set" +# declareGauge beacon_current_active_validators, "Number of validators in the active validator set" # Interop copy +# declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block +# declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block + +declareCounter beacon_dag_state_replay_seconds, "Time spent replaying states" + +const + EPOCHS_PER_STATE_SNAPSHOT* = 32 + ## When finality happens, we prune historical states from the database except + ## for a snapshot every 32 epochs from which replays can happen - there's a + ## balance here between making long replays and saving on disk space + MAX_SLOTS_PER_PRUNE* = SLOTS_PER_EPOCH + ## We prune the database incrementally so as not to introduce long + ## processing breaks - this number is the maximum number of blocks we allow + ## to be pruned every time the prune call is made (once per slot typically) + ## unless head is moving faster (ie during sync) + + +proc putBlock*( + dag: ChainDAGRef, signedBlock: ForkyTrustedSignedBeaconBlock) = + dag.db.putBlock(signedBlock) + +proc updateState*( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, + save: bool, cache: var StateCache): bool {.gcsafe.} + +template withUpdatedState*( + dag: ChainDAGRef, stateParam: var ForkedHashedBeaconState, + bsiParam: BlockSlotId, okBody: untyped, failureBody: untyped): untyped = + ## Helper template that updates stateData to a particular BlockSlot - usage of + ## stateData is unsafe outside of block, or across `await` boundaries + + block: + let bsi {.inject.} = bsiParam + var cache {.inject.} = StateCache() + if updateState(dag, stateParam, bsi, false, cache): + template bid(): BlockId {.inject, used.} = bsi.bid + template updatedState(): ForkedHashedBeaconState {.inject, used.} = stateParam + okBody + else: + failureBody + +func get_effective_balances( + validators: openArray[Validator], epoch: Epoch): seq[Gwei] = + ## Get the balances from a state as counted for fork choice + result.newSeq(validators.len) # zero-init + + for i in 0 ..< result.len: + # All non-active validators have a 0 balance + let validator = unsafeAddr validators[i] + if validator[].is_active_validator(epoch) and not validator[].slashed: + result[i] = validator[].effective_balance + +proc updateValidatorKeys*(dag: ChainDAGRef, validators: openArray[Validator]) = + # Update validator key cache - must be called every time a valid block is + # applied to the state - this is important to ensure that when we sync blocks + # without storing a state (non-epoch blocks essentially), the deposits from + # those blocks are persisted to the in-database cache of immutable validator + # data (but no earlier than that the whole block as been validated) + dag.db.updateImmutableValidators(validators) + +proc updateFinalizedBlocks*(db: BeaconChainDB, newFinalized: openArray[BlockId]) = + if db.db.readOnly: return # TODO abstraction leak - where to put this? + + db.withManyWrites: + for bid in newFinalized: + db.finalizedBlocks.insert(bid.slot, bid.root) + +proc updateFrontfillBlocks*(dag: ChainDAGRef) = + # When backfilling is done and manages to reach the frontfill point, we can + # write the frontfill index knowing that the block information in the + # era files match the chain + if dag.db.db.readOnly: return # TODO abstraction leak - where to put this? + + if dag.frontfillBlocks.len == 0 or dag.backfill.slot > GENESIS_SLOT: + return + + info "Writing frontfill index", slots = dag.frontfillBlocks.len + + dag.db.withManyWrites: + let low = dag.db.finalizedBlocks.low.expect( + "wrote at least tailRef during init") + let blocks = min(low.int, dag.frontfillBlocks.len - 1) + var parent: Eth2Digest + for i in 0..blocks: + let root = dag.frontfillBlocks[i] + if not isZero(root): + dag.db.finalizedBlocks.insert(Slot(i), root) + dag.db.putBeaconBlockSummary( + root, BeaconBlockSummary(slot: Slot(i), parent_root: parent)) + parent = root + + reset(dag.frontfillBlocks) + +func validatorKey*( + dag: ChainDAGRef, index: ValidatorIndex or uint64): Opt[CookedPubKey] = + ## Returns the validator pubkey for the index, assuming it's been observed + ## at any point in time - this function may return pubkeys for indicies that + ## are not (yet) part of the head state (if the key has been observed on a + ## non-head branch)! + dag.db.immutableValidators.load(index) + +template is_merge_transition_complete*( + stateParam: ForkedHashedBeaconState): bool = + withState(stateParam): + when consensusFork >= ConsensusFork.Bellatrix: + is_merge_transition_complete(forkyState.data) + else: + false + +func effective_balances*(epochRef: EpochRef): seq[Gwei] = + try: + SSZ.decode(snappy.decode(epochRef.effective_balances_bytes, uint32.high), + List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]).toSeq() + except CatchableError as exc: + raiseAssert exc.msg + +func getBlockRef*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockRef] = + ## Retrieve a resolved block reference, if available - this function does + ## not return historical finalized blocks, see `getBlockIdAtSlot` for a + ## function that covers the entire known history + let key = KeyedBlockRef.asLookupKey(root) + # HashSet lacks the api to do check-and-get in one lookup - `[]` will return + # the copy of the instance in the set which has more fields than `root` set! + if key in dag.forkBlocks: + try: ok(dag.forkBlocks[key].blockRef()) + except KeyError: raiseAssert "contains" + else: + err() + +func getBlockIdAtSlot*( + state: ForkyHashedBeaconState, slot: Slot): Opt[BlockSlotId] = + ## Use given state to attempt to find a historical `BlockSlotId`. + if slot > state.data.slot: + return Opt.none(BlockSlotId) # State does not know about requested slot + if state.data.slot > slot + SLOTS_PER_HISTORICAL_ROOT: + return Opt.none(BlockSlotId) # Cache has expired + + var idx = slot mod SLOTS_PER_HISTORICAL_ROOT + let root = + if slot == state.data.slot: + state.latest_block_root + else: + state.data.block_roots[idx] + var bid = BlockId(slot: slot, root: root) + + let availableSlots = + min(slot.uint64, slot + SLOTS_PER_HISTORICAL_ROOT - state.data.slot) + for i in 0 ..< availableSlots: + if idx == 0: + idx = SLOTS_PER_HISTORICAL_ROOT + dec idx + if state.data.block_roots[idx] != root: + return Opt.some BlockSlotId.init(bid, slot) + dec bid.slot + + if bid.slot == GENESIS_SLOT: + return Opt.some BlockSlotId.init(bid, slot) + Opt.none(BlockSlotId) # Unknown if there are more empty slots before + +func getBlockIdAtSlot*(dag: ChainDAGRef, slot: Slot): Opt[BlockSlotId] = + ## Retrieve the canonical block at the given slot, or the last block that + ## comes before - similar to atSlot, but without the linear scan - may hit + ## the database to look up early indices. + if slot > dag.finalizedHead.slot: + return dag.head.atSlot(slot).toBlockSlotId() # iterate to the given slot + + if dag.finalizedHead.blck == nil: + # Not initialized yet (in init) + return Opt.none(BlockSlotId) + + if slot >= dag.finalizedHead.blck.slot: + # finalized head is still in memory + return dag.finalizedHead.blck.atSlot(slot).toBlockSlotId() + + # Load from memory, if the block ID is sufficiently recent. + # For checkpoint sync, this is the only available of historical block IDs + # until sufficient blocks have been backfilled. + template tryWithState(state: ForkedHashedBeaconState) = + block: + withState(state): + # State must be a descendent of the finalized chain to be viable + let finBsi = forkyState.getBlockIdAtSlot(dag.finalizedHead.slot) + if finBsi.isSome and # DAG finalized bid slot wrong if CP not @ epoch + finBsi.unsafeGet.bid.root == dag.finalizedHead.blck.bid.root: + let bsi = forkyState.getBlockIdAtSlot(slot) + if bsi.isSome: + return bsi + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + + # Fallback to database, this only works for backfilled blocks + let finlow = dag.db.finalizedBlocks.low.expect("at least tailRef written") + if slot >= finlow: + var pos = slot + while true: + let root = dag.db.finalizedBlocks.get(pos) + + if root.isSome(): + return ok BlockSlotId.init( + BlockId(root: root.get(), slot: pos), slot) + + doAssert pos > finlow, "We should have returned the finlow" + + pos = pos - 1 + + if slot == GENESIS_SLOT and dag.genesis.isSome(): + return ok dag.genesis.get().atSlot() + + err() # not backfilled yet + +proc containsBlock( + cfg: RuntimeConfig, db: BeaconChainDB, slot: Slot, root: Eth2Digest): bool = + db.containsBlock(root, cfg.consensusForkAtEpoch(slot.epoch)) + +proc containsBlock*(dag: ChainDAGRef, bid: BlockId): bool = + dag.cfg.containsBlock(dag.db, bid.slot, bid.root) + +proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): + Opt[ForkedTrustedSignedBeaconBlock] = + # When we only have a digest, we don't know which fork it's from so we try + # them one by one - this should be used sparingly + static: doAssert high(ConsensusFork) == ConsensusFork.Electra + if (let blck = db.getBlock(root, electra.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, deneb.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, capella.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, bellatrix.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, altair.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, phase0.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + else: + err() + +proc getBlock*( + dag: ChainDAGRef, bid: BlockId, + T: type ForkyTrustedSignedBeaconBlock): Opt[T] = + dag.db.getBlock(bid.root, T) or + getBlock( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, Opt[Eth2Digest].ok(bid.root), T) + +proc getBlockSSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = + # Load the SSZ-encoded data of a block into `bytes`, overwriting the existing + # content + let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + dag.db.getBlockSSZ(bid.root, bytes, fork) or + (bid.slot <= dag.finalizedHead.slot and + getBlockSSZ( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, bytes).isOk() and bytes.len > 0) + +proc getBlockSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = + # Load the snappy-frame-compressed ("SZ") SSZ-encoded data of a block into + # `bytes`, overwriting the existing content + # careful: there are two snappy encodings in use, with and without framing! + # Returns true if the block is found, false if not + let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + dag.db.getBlockSZ(bid.root, bytes, fork) or + (bid.slot <= dag.finalizedHead.slot and + getBlockSZ( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, bytes).isOk and bytes.len > 0) + +proc getForkedBlock*( + dag: ChainDAGRef, bid: BlockId): Opt[ForkedTrustedSignedBeaconBlock] = + + let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + result.ok(ForkedTrustedSignedBeaconBlock(kind: fork)) + withBlck(result.get()): + type T = type(forkyBlck) + forkyBlck = getBlock(dag, bid, T).valueOr: + getBlock( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, Opt[Eth2Digest].ok(bid.root), T).valueOr: + result.err() + return + +proc getBlockId*(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] = + block: # We might have a summary in the database + let summary = db.getBeaconBlockSummary(root) + if summary.isOk(): + return ok(BlockId(root: root, slot: summary.get().slot)) + + block: + # We might have a block without having written a summary - this can happen + # if there was a crash between writing the block and writing the summary, + # specially in databases written by older nimbus versions + let forked = db.getForkedBlock(root) + if forked.isSome(): + # Shouldn't happen too often but.. + let + blck = forked.get() + summary = withBlck(blck): forkyBlck.message.toBeaconBlockSummary() + debug "Writing summary", blck = shortLog(blck) + db.putBeaconBlockSummary(root, summary) + return ok(BlockId(root: root, slot: summary.slot)) + + err() + +proc getBlockId*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockId] = + ## Look up block id by root in history - useful for turning a root into a + ## slot - may hit the database, may return blocks that have since become + ## unviable - use `getBlockIdAtSlot` to check that the block is still viable + ## if used in a sensitive context + block: # If we have a BlockRef, this is the fastest way to get a block id + let blck = dag.getBlockRef(root) + if blck.isOk(): + return ok(blck.get().bid) + + dag.db.getBlockId(root) + +proc getForkedBlock*( + dag: ChainDAGRef, root: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = + let bid = dag.getBlockId(root) + if bid.isSome(): + dag.getForkedBlock(bid.get()) + else: + # In case we didn't have a summary - should be rare, but .. + dag.db.getForkedBlock(root) + +func isCanonical*(dag: ChainDAGRef, bid: BlockId): bool = + ## Returns `true` if the given `bid` is part of the history selected by + ## `dag.head`. + let current = dag.getBlockIdAtSlot(bid.slot).valueOr: + return false # We don't know, so .. + return current.bid == bid + +func isFinalized*(dag: ChainDAGRef, bid: BlockId): bool = + ## Returns `true` if the given `bid` is part of the finalized history + ## selected by `dag.finalizedHead`. + dag.isCanonical(bid) and (bid.slot <= dag.finalizedHead.slot) + +func parent*(dag: ChainDAGRef, bid: BlockId): Opt[BlockId] = + if bid.slot == 0: + return err() + + if bid.slot > dag.finalizedHead.slot: + # Make sure we follow the correct history as there may be forks + let blck = ? dag.getBlockRef(bid.root) + + doAssert not isNil(blck.parent), "should reach finalized head" + return ok blck.parent.bid + + let bids = ? dag.getBlockIdAtSlot(bid.slot - 1) + ok(bids.bid) + +func parentOrSlot*(dag: ChainDAGRef, bsi: BlockSlotId): Opt[BlockSlotId] = + if bsi.slot == 0: + return err() + + if bsi.isProposed: + let parent = ? dag.parent(bsi.bid) + ok BlockSlotId.init(parent, bsi.slot) + else: + ok BlockSlotId.init(bsi.bid, bsi.slot - 1) + +func atSlot*(dag: ChainDAGRef, bid: BlockId, slot: Slot): Opt[BlockSlotId] = + if bid.slot > dag.finalizedHead.slot: + let blck = ? dag.getBlockRef(bid.root) + + if slot > dag.finalizedHead.slot: + return blck.atSlot(slot).toBlockSlotId() + else: + # Check if the given `bid` is still part of history - it might hail from an + # orphaned fork + let existing = ? dag.getBlockIdAtSlot(bid.slot) + if existing.bid != bid: + return err() # Not part of known / relevant history + + if existing.slot == slot: # and bid.slot == slot + return ok existing + + if bid.slot <= slot: + ok BlockSlotId.init(bid, slot) + else: + dag.getBlockIdAtSlot(slot) + +func nextTimestamp[I, T](cache: var LRUCache[I, T]): uint32 = + if cache.timestamp == uint32.high: + for i in 0 ..< I: + template e: untyped = cache.entries[i] + if e.lastUsed != 0: + e.lastUsed = 1 + cache.timestamp = 1 + inc cache.timestamp + cache.timestamp + +template peekIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = + block: + var res: Opt[T] + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + res.ok it + break + res + +template findIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = + block: + var res: Opt[T] + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + e.lastUsed = cache.nextTimestamp + res.ok it + break + res + +template delIt[I, T](cache: var LRUCache[I, T], predicate: untyped) = + block: + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + e.reset() + +func put[I, T](cache: var LRUCache[I, T], value: T) = + var lru = 0 + block: + var min = uint32.high + for i in 0 ..< I: + template e: untyped = cache.entries[i] + if e.lastUsed < min: + min = e.lastUsed + lru = i + if min == 0: + break + + template e: untyped = cache.entries[lru] + e.value = value + e.lastUsed = cache.nextTimestamp + +func epochAncestor(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): + Opt[BlockSlotId] = + ## The epoch ancestor is the last block that has an effect on the epoch- + ## related state data, as updated in `process_epoch` - this block determines + ## effective balances, validator addtions and removals etc and serves as a + ## base for `EpochRef` construction. + if epoch < dag.tail.slot.epoch or bid.slot < dag.tail.slot: + # Not enough information in database to meaningfully process pre-tail epochs + return Opt.none BlockSlotId + + let + dependentSlot = + if epoch == dag.tail.slot.epoch: + # Use the tail as "dependent block" - this may be the genesis block, or, + # in the case of checkpoint sync, the checkpoint block + dag.tail.slot + else: + epoch.start_slot() - 1 + bsi = ? dag.atSlot(bid, dependentSlot) + epochSlot = + if epoch == dag.tail.slot.epoch: + dag.tail.slot + else: + epoch.start_slot() + ok BlockSlotId(bid: bsi.bid, slot: epochSlot) + +func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] = + ## The state transition works by storing information from blocks in a + ## "working" area until the epoch transition, then batching work collected + ## during the epoch. Thus, last block in the ancestor epochs is the block + ## that has an impact on epoch currently considered. + ## + ## This function returns an epoch key pointing to that epoch boundary, i.e. the + ## boundary where the last block has been applied to the state and epoch + ## processing has been done. + let bsi = dag.epochAncestor(bid, epoch).valueOr: + return Opt.none(EpochKey) + + Opt.some(EpochKey(bid: bsi.bid, epoch: epoch)) + +func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) = + ## Store shuffling in the cache + if shufflingRef.epoch < dag.finalizedHead.slot.epoch(): + # Only cache epoch information for unfinalized blocks - earlier states + # are seldomly used (ie RPC), so no need to cache + return + + dag.shufflingRefs.put shufflingRef + +func findShufflingRef*( + dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] = + ## Lookup a shuffling in the cache, returning `none` if it's not present - see + ## `getShufflingRef` for a version that creates a new instance if it's missing + let + dependent_slot = epoch.attester_dependent_slot() + dependent_bsi = ? dag.atSlot(bid, dependent_slot) + + # Check `ShufflingRef` cache + let shufflingRef = dag.shufflingRefs.findIt( + it.epoch == epoch and it.attester_dependent_root == dependent_bsi.bid.root) + if shufflingRef.isOk: + return shufflingRef + + # Check `EpochRef` cache + let epochRef = dag.epochRefs.peekIt( + it.shufflingRef.epoch == epoch and + it.shufflingRef.attester_dependent_root == dependent_bsi.bid.root) + if epochRef.isOk: + dag.putShufflingRef(epochRef.get.shufflingRef) + return ok epochRef.get.shufflingRef + + err() + +func findEpochRef*( + dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochRef] = + ## Lookup an EpochRef in the cache, returning `none` if it's not present - see + ## `getEpochRef` for a version that creates a new instance if it's missing + let key = ? dag.epochKey(bid, epoch) + + dag.epochRefs.findIt(it.key == key) + +func putEpochRef(dag: ChainDAGRef, epochRef: EpochRef) = + if epochRef.epoch < dag.finalizedHead.slot.epoch(): + # Only cache epoch information for unfinalized blocks - earlier states + # are seldomly used (ie RPC), so no need to cache + return + + dag.epochRefs.put epochRef + +func init*( + T: type ShufflingRef, state: ForkedHashedBeaconState, + cache: var StateCache, epoch: Epoch): T = + let attester_dependent_root = + withState(state): forkyState.dependent_root(epoch.get_previous_epoch) + + ShufflingRef( + epoch: epoch, + attester_dependent_root: attester_dependent_root, + shuffled_active_validator_indices: + cache.get_shuffled_active_validator_indices(state, epoch), + ) + +func init*( + T: type EpochRef, dag: ChainDAGRef, state: ForkedHashedBeaconState, + cache: var StateCache): T = + let + epoch = state.get_current_epoch() + proposer_dependent_root = withState(state): + forkyState.proposer_dependent_root + shufflingRef = dag.findShufflingRef(state.latest_block_id, epoch).valueOr: + let tmp = ShufflingRef.init(state, cache, epoch) + dag.putShufflingRef(tmp) + tmp + + total_active_balance = withState(state): + get_total_active_balance(forkyState.data, cache) + epochRef = EpochRef( + key: dag.epochKey(state.latest_block_id, epoch).expect( + "Valid epoch ancestor when processing state"), + + eth1_data: + getStateField(state, eth1_data), + eth1_deposit_index: + getStateField(state, eth1_deposit_index), + + checkpoints: + FinalityCheckpoints( + justified: getStateField(state, current_justified_checkpoint), + finalized: getStateField(state, finalized_checkpoint)), + + # beacon_proposers: Separately filled below + proposer_dependent_root: proposer_dependent_root, + + shufflingRef: shufflingRef, + total_active_balance: total_active_balance + ) + epochStart = epoch.start_slot() + + for i in 0'u64.. 0: + load(epoch - 1) + + if dag.head != nil: # nil during init.. sigh + let period = dag.head.slot.sync_committee_period + if period == epoch.sync_committee_period and + period notin cache.sync_committees and + period > dag.cfg.ALTAIR_FORK_EPOCH.sync_committee_period(): + # If the block we're aiming for shares ancestry with head, we can reuse + # the cached head committee - this accounts for most "live" cases like + # syncing and checking blocks since the committees rarely change + let periodBsi = dag.atSlot(bid, period.start_slot) + if periodBsi.isSome and periodBsi == + dag.atSlot(dag.head.bid, period.start_slot): + # We often end up sharing sync committees with head during sync / gossip + # validation / head updates + cache.sync_committees[period] = dag.headSyncCommittees + +func containsForkBlock*(dag: ChainDAGRef, root: Eth2Digest): bool = + ## Checks for blocks at the finalized checkpoint or newer + KeyedBlockRef.asLookupKey(root) in dag.forkBlocks + +func isFinalizedStateSnapshot(slot: Slot): bool = + slot.is_epoch and slot.epoch mod EPOCHS_PER_STATE_SNAPSHOT == 0 + +func isStateCheckpoint(dag: ChainDAGRef, bsi: BlockSlotId): bool = + ## State checkpoints are the points in time for which we store full state + ## snapshots, which later serve as rewind starting points when replaying state + ## transitions from database, for example during reorgs. + ## + # As a policy, we only store epoch boundary states without the epoch block + # (if it exists) applied - the rest can be reconstructed by loading an epoch + # boundary state and applying the missing blocks. + # We also avoid states that were produced with empty slots only - as such, + # there is only a checkpoint for the first epoch after a block. + + # The tail block also counts as a state checkpoint! + (bsi.isProposed and bsi.bid == dag.tail) or + (bsi.slot.is_epoch and bsi.slot.epoch == (bsi.bid.slot.epoch + 1)) + +proc getState( + db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, slot: Slot, + state: var ForkedHashedBeaconState, rollback: RollbackProc): bool = + let state_root = db.getStateRoot(block_root, slot).valueOr: + return false + + db.getState(cfg.consensusForkAtEpoch(slot.epoch), state_root, state, rollback) + +proc containsState*( + db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, + slots: Slice[Slot], legacy = true): bool = + var slot = slots.b + while slot >= slots.a: + let state_root = db.getStateRoot(block_root, slot) + if state_root.isSome() and + db.containsState( + cfg.consensusForkAtEpoch(slot.epoch), state_root.get(), legacy): + return true + + if slot == slots.a: # avoid underflow at genesis + break + slot -= 1 + false + +proc getState*( + db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, + slots: Slice[Slot], state: var ForkedHashedBeaconState, + rollback: RollbackProc): bool = + var slot = slots.b + while slot >= slots.a: + let state_root = db.getStateRoot(block_root, slot) + if state_root.isSome() and + db.getState( + cfg.consensusForkAtEpoch(slot.epoch), state_root.get(), state, + rollback): + return true + + if slot == slots.a: # avoid underflow at genesis + break + slot -= 1 + false + +proc getState( + dag: ChainDAGRef, bsi: BlockSlotId, state: var ForkedHashedBeaconState): bool = + ## Load a state from the database given a block and a slot - this will first + ## lookup the state root in the state root table then load the corresponding + ## state, if it exists + if not dag.isStateCheckpoint(bsi): + return false + + let rollbackAddr = + # Any restore point will do as long as it's not the object being updated + if unsafeAddr(state) == unsafeAddr(dag.headState): + unsafeAddr dag.clearanceState + else: + unsafeAddr dag.headState + + let v = addr state + func rollback() = + assign(v[], rollbackAddr[]) + + dag.db.getState(dag.cfg, bsi.bid.root, bsi.slot, state, rollback) + +proc getStateByParent( + dag: ChainDAGRef, bid: BlockId, state: var ForkedHashedBeaconState): bool = + ## Try to load the state referenced by the parent of the given `bid` - this + ## state can be used to advance to the `bid` state itself. + let slot = bid.slot + + let + summary = dag.db.getBeaconBlockSummary(bid.root).valueOr: + return false + parentMinSlot = + dag.db.getBeaconBlockSummary(summary.parent_root). + map(proc(x: auto): auto = x.slot).valueOr: + # in the cases that we don't have slot information, we'll search for the + # state for a few back from the `bid` slot - if there are gaps of empty + # slots larger than this, we will not be able to load the state using this + # trick + if slot.uint64 >= (EPOCHS_PER_STATE_SNAPSHOT * 2) * SLOTS_PER_EPOCH: + slot - (EPOCHS_PER_STATE_SNAPSHOT * 2) * SLOTS_PER_EPOCH + else: + Slot(0) + + let rollbackAddr = + # Any restore point will do as long as it's not the object being updated + if unsafeAddr(state) == unsafeAddr(dag.headState): + unsafeAddr dag.clearanceState + else: + unsafeAddr dag.headState + + let v = addr state + func rollback() = + assign(v[], rollbackAddr[]) + + dag.db.getState( + dag.cfg, summary.parent_root, parentMinSlot..slot, state, rollback) + +proc getNearbyState( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, + lowSlot: Slot): Opt[void] = + ## Load state from DB that is close to `bid` and has at least slot `lowSlot`. + var + e = bid.slot.epoch + b = bid + while true: + let stateSlot = e.start_slot + if stateSlot < lowSlot: + return err() + b = (? dag.atSlot(b, max(stateSlot, 1.Slot) - 1)).bid + let bsi = BlockSlotId.init(b, stateSlot) + if not dag.getState(bsi, state): + if e == GENESIS_EPOCH: + return err() + dec e + continue + return ok() + +proc currentSyncCommitteeForPeriod*( + dag: ChainDAGRef, + tmpState: var ForkedHashedBeaconState, + period: SyncCommitteePeriod): Opt[SyncCommittee] = + ## Fetch a `SyncCommittee` for a given sync committee period. + ## For non-finalized periods, follow the chain as selected by fork choice. + let lowSlot = max(dag.tail.slot, dag.cfg.ALTAIR_FORK_EPOCH.start_slot) + if period < lowSlot.sync_committee_period: + return err() + let + periodStartSlot = period.start_slot + syncCommitteeSlot = max(periodStartSlot, lowSlot) + bsi = ? dag.getBlockIdAtSlot(syncCommitteeSlot) + dag.withUpdatedState(tmpState, bsi) do: + withState(updatedState): + when consensusFork >= ConsensusFork.Altair: + ok forkyState.data.current_sync_committee + else: err() + do: err() + +proc getBlockIdAtSlot*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, slot: Slot): Opt[BlockId] = + if slot >= state.data.slot: + Opt.some state.latest_block_id + elif state.data.slot <= slot + SLOTS_PER_HISTORICAL_ROOT: + dag.getBlockId(state.data.get_block_root_at_slot(slot)) + else: + Opt.none(BlockId) + +# adapted from nimbus-eth2 + +# proc updateBeaconMetrics( +# state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) = + # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#additional-metrics + # both non-negative, so difference can't overflow or underflow int64 + + # beacon_head_root.set(bid.root.toGaugeValue) + # beacon_head_slot.set(bid.slot.toGaugeValue) + + # withState(state): + # beacon_pending_deposits.set( + # (forkyState.data.eth1_data.deposit_count - + # forkyState.data.eth1_deposit_index).toGaugeValue) + # beacon_processed_deposits_total.set( + # forkyState.data.eth1_deposit_index.toGaugeValue) + + # beacon_current_justified_epoch.set( + # forkyState.data.current_justified_checkpoint.epoch.toGaugeValue) + # beacon_current_justified_root.set( + # forkyState.data.current_justified_checkpoint.root.toGaugeValue) + # beacon_previous_justified_epoch.set( + # forkyState.data.previous_justified_checkpoint.epoch.toGaugeValue) + # beacon_previous_justified_root.set( + # forkyState.data.previous_justified_checkpoint.root.toGaugeValue) + # beacon_finalized_epoch.set( + # forkyState.data.finalized_checkpoint.epoch.toGaugeValue) + # beacon_finalized_root.set( + # forkyState.data.finalized_checkpoint.root.toGaugeValue) + + # let active_validators = count_active_validators( + # forkyState.data, forkyState.data.slot.epoch, cache).toGaugeValue + # beacon_active_validators.set(active_validators) + # beacon_current_active_validators.set(active_validators) + +# import blockchain_dag_light_client + +# export +# blockchain_dag_light_client.getLightClientBootstrap, +# blockchain_dag_light_client.getLightClientUpdateForPeriod, +# blockchain_dag_light_client.getLightClientFinalityUpdate, +# blockchain_dag_light_client.getLightClientOptimisticUpdate + +proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, bid: BlockId) = + # Store a state and its root + let slot = getStateField(state, slot) + logScope: + blck = shortLog(bid) + stateSlot = shortLog(slot) + stateRoot = shortLog(getStateRoot(state)) + + if not dag.isStateCheckpoint(BlockSlotId.init(bid, slot)): + return + + # Don't consider legacy tables here, they are slow to read so we'll want to + # rewrite things in the new table anyway. + if dag.db.containsState( + dag.cfg.consensusForkAtEpoch(slot.epoch), getStateRoot(state), + legacy = false): + return + + let startTick = Moment.now() + # Ideally we would save the state and the root lookup cache in a single + # transaction to prevent database inconsistencies, but the state loading code + # is resilient against one or the other going missing + withState(state): + dag.db.putState(forkyState) + + debug "Stored state", putStateDur = Moment.now() - startTick + +proc advanceSlots*( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, slot: Slot, save: bool, + cache: var StateCache, info: var ForkedEpochInfo) = + # Given a state, advance it zero or more slots by applying empty slot + # processing - the state must be positioned at or before `slot` + doAssert getStateField(state, slot) <= slot + + let stateBid = state.latest_block_id + while getStateField(state, slot) < slot: + let + preEpoch = getStateField(state, slot).epoch + + loadStateCache(dag, cache, stateBid, getStateField(state, slot).epoch) + + process_slots( + dag.cfg, state, getStateField(state, slot) + 1, cache, info, + dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct") + if save: + dag.putState(state, stateBid) + + # The reward information in the state transition is computed for epoch + # transitions - when transitioning into epoch N, the activities in epoch + # N-2 are translated into balance updates, and this is what we capture + # in the monitor. This may be inaccurate during a deep reorg (>1 epoch) + # which is an acceptable tradeoff for monitoring. + withState(state): + let postEpoch = forkyState.data.slot.epoch + if preEpoch != postEpoch and postEpoch >= 2: + var proposers: array[SLOTS_PER_EPOCH, Opt[ValidatorIndex]] + let epochRef = dag.findEpochRef(stateBid, postEpoch - 2) + if epochRef.isSome(): + proposers = epochRef[][].beacon_proposers + + dag.validatorMonitor[].registerEpochInfo( + forkyState.data, proposers, info) + +proc applyBlock( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, + cache: var StateCache, info: var ForkedEpochInfo): Result[void, cstring] = + loadStateCache(dag, cache, bid, getStateField(state, slot).epoch) + + discard case dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + of ConsensusFork.Phase0: + let data = getBlock(dag, bid, phase0.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Altair: + let data = getBlock(dag, bid, altair.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Bellatrix: + let data = getBlock(dag, bid, bellatrix.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Capella: + let data = getBlock(dag, bid, capella.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Deneb: + let data = getBlock(dag, bid, deneb.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Electra: + let data = getBlock(dag, bid, electra.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + + ok() + +## NOTE: Adapted from nimbus-eth2/beacon_chain/consensus_object_pools/blockchain_dag.nim +## removed lightclient initialization +proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, + validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags, + eraPath = ".", + onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil, + onReorgCb: OnReorgCallback = nil, onFinCb: OnFinalizedCallback = nil, + vanityLogs = default(VanityLogs) + # lcDataConfig = default(LightClientDataConfig) + ): ChainDAGRef = + cfg.checkForkConsistency() + + doAssert updateFlags - {strictVerification} == {}, + "Other flags not supported in ChainDAG" + + # TODO we require that the db contains both a head and a tail block - + # asserting here doesn't seem like the right way to go about it however.. + + # Tail is the first block for which we can construct a state - either + # genesis or a checkpoint + let + startTick = Moment.now() + genesisRoot = db.getGenesisBlock() + tailRoot = db.getTailBlock().expect( + "preInit should have initialized the database with a tail block root") + tail = db.getBlockId(tailRoot).expect( + "tail block summary in database, database corrupt?") + headRoot = db.getHeadBlock().expect("head root, database corrupt?") + head = db.getBlockId(headRoot).expect("head block id, database corrupt?") + + # Have to be careful with this instance, it is not yet fully initialized so + # as to avoid having to allocate a separate "init" state + dag = ChainDAGRef( + db: db, + validatorMonitor: validatorMonitor, + genesis: genesisRoot.map( + proc(x: auto): auto = BlockId(root: x, slot: GENESIS_SLOT)), + tail: tail, + + # The only allowed flag right now is strictVerification, as the others all + # allow skipping some validation. + updateFlags: updateFlags * {strictVerification}, + cfg: cfg, + + vanityLogs: vanityLogs, + + # NOTE: commented from original file + # lcDataStore: initLightClientDataStore( + # lcDataConfig, cfg, db.getLightClientDataDB()), + + onBlockAdded: onBlockCb, + onHeadChanged: onHeadCb, + onReorgHappened: onReorgCb, + onFinHappened: onFinCb, + ) + loadTick = Moment.now() + + var + headRef, curRef: BlockRef + + # When starting from a checkpoint with an empty block, we'll store the state + # "ahead" of the head slot - this slot would be considered finalized + slot = max(head.slot, (tail.slot.epoch + 1).start_slot) + # To know the finalized checkpoint of the head, we need to recreate its + # state - the tail is implicitly finalized, and if we have a finalized block + # table, that provides another hint + finalizedSlot = db.finalizedBlocks.high.get(tail.slot) + cache: StateCache + foundHeadState = false + headBlocks: seq[BlockRef] + + # Load head -> finalized, or all summaries in case the finalized block table + # hasn't been written yet + for blck in db.getAncestorSummaries(head.root): + # The execution block root gets filled in as needed. Nonfinalized Bellatrix + # and later blocks are loaded as optimistic, which gets adjusted that first + # `VALID` fcU from an EL plus markBlockVerified. Pre-merge blocks still get + # marked as `VALID`. + let newRef = BlockRef.init( + blck.root, Opt.none Eth2Digest, executionValid = false, + blck.summary.slot) + if headRef == nil: + headRef = newRef + + if curRef != nil: + link(newRef, curRef) + + curRef = newRef + + dag.forkBlocks.incl(KeyedBlockRef.init(curRef)) + + if not foundHeadState: + foundHeadState = db.getState( + cfg, blck.root, blck.summary.slot..slot, dag.headState, noRollback) + slot = blck.summary.slot + + if not foundHeadState: + # When the database has been written with a pre-fork version of the + # software, it may happen that blocks produced using an "unforked" + # chain get written to the database - we need to skip such blocks + # when loading the database with a fork-compatible version + if containsBlock(cfg, db, curRef.slot, curRef.root): + headBlocks.add curRef + else: + if headBlocks.len > 0: + fatal "Missing block needed to create head state, database corrupt?", + curRef = shortLog(curRef) + quit 1 + # Without the block data we can't form a state for this root, so + # we'll need to move the head back + headRef = nil + dag.forkBlocks.excl(KeyedBlockRef.init(curRef)) + + if curRef.slot <= finalizedSlot: + # Only non-finalized slots get a `BlockRef` + break + + let summariesTick = Moment.now() + + if not foundHeadState: + if not dag.getStateByParent(curRef.bid, dag.headState): + fatal "Could not load head state, database corrupt?", + head = shortLog(head), tail = shortLog(dag.tail) + quit 1 + + block: + # EpochRef needs an epoch boundary state + assign(dag.epochRefState, dag.headState) + + var info: ForkedEpochInfo + + while headBlocks.len > 0: + dag.applyBlock( + dag.headState, headBlocks.pop().bid, cache, + info).expect("head blocks should apply") + + dag.head = headRef + dag.heads = @[headRef] + + withState(dag.headState): + when consensusFork >= ConsensusFork.Altair: + dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) + + assign(dag.clearanceState, dag.headState) + + if dag.headState.latest_block_root == tail.root: + # In case we started from a checkpoint with an empty slot + finalizedSlot = getStateField(dag.headState, slot) + + finalizedSlot = + max( + finalizedSlot, + getStateField(dag.headState, finalized_checkpoint).epoch.start_slot) + + let + configFork = case dag.headState.kind + of ConsensusFork.Phase0: genesisFork(cfg) + of ConsensusFork.Altair: altairFork(cfg) + of ConsensusFork.Bellatrix: bellatrixFork(cfg) + of ConsensusFork.Capella: capellaFork(cfg) + of ConsensusFork.Deneb: denebFork(cfg) + of ConsensusFork.Electra: electraFork(cfg) + stateFork = getStateField(dag.headState, fork) + + # Here, we check only the `current_version` field because the spec + # mandates that testnets starting directly from a particular fork + # should have `previous_version` set to `current_version` while + # this doesn't happen to be the case in network that go through + # regular hard-fork upgrades. See for example: + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#testing + if stateFork.current_version != configFork.current_version: + error "State from database does not match network, check --network parameter", + tail = dag.tail, headRef, stateFork, configFork + quit 1 + + # Need to load state to find genesis validators root, before loading era db + dag.era = EraDB.new( + cfg, eraPath, getStateField(dag.headState, genesis_validators_root)) + + # We used an interim finalizedHead while loading the head state above - now + # that we have loaded the dag up to the finalized slot, we can also set + # finalizedHead to its real value + dag.finalizedHead = headRef.atSlot(finalizedSlot) + dag.lastPrunePoint = dag.finalizedHead.toBlockSlotId().expect("not nil") + + doAssert dag.finalizedHead.blck != nil, + "The finalized head should exist at the slot" + + block: # Top up finalized blocks + if db.finalizedBlocks.high.isNone or + db.finalizedBlocks.high.get() < dag.finalizedHead.blck.slot: + # Versions prior to 1.7.0 did not store finalized blocks in the + # database, and / or the application might have crashed between the head + # and finalized blocks updates. + info "Loading finalized blocks", + finHigh = db.finalizedBlocks.high, + finalizedHead = shortLog(dag.finalizedHead) + + var + newFinalized: seq[BlockId] + tmp = dag.finalizedHead.blck + while tmp.parent != nil: + newFinalized.add(tmp.bid) + let p = tmp.parent + tmp.parent = nil + tmp = p + + for blck in db.getAncestorSummaries(tmp.root): + if db.finalizedBlocks.high.isSome and + blck.summary.slot <= db.finalizedBlocks.high.get: + break + + newFinalized.add(BlockId(slot: blck.summary.slot, root: blck.root)) + + db.updateFinalizedBlocks(newFinalized) + + doAssert dag.finalizedHead.blck.parent == nil, + "The finalized head is the last BlockRef with a parent" + + block: + let finalized = db.finalizedBlocks.get(db.finalizedBlocks.high.get()).expect( + "tail at least") + if finalized != dag.finalizedHead.blck.root: + error "Head does not lead to finalized block, database corrupt?", + head = shortLog(head), finalizedHead = shortLog(dag.finalizedHead), + tail = shortLog(dag.tail), finalized = shortLog(finalized) + quit 1 + + dag.backfill = block: + let backfillSlot = db.finalizedBlocks.low.expect("tail at least") + if backfillSlot <= dag.horizon: + # Backfill done, no need to load anything + BeaconBlockSummary() + elif backfillSlot < dag.tail.slot: + let backfillRoot = db.finalizedBlocks.get(backfillSlot).expect( + "low to be loadable") + + db.getBeaconBlockSummary(backfillRoot).expect( + "Backfill block must have a summary: " & $backfillRoot) + elif dag.containsBlock(dag.tail): + db.getBeaconBlockSummary(dag.tail.root).expect( + "Tail block must have a summary: " & $dag.tail.root) + else: + # Checkpoint sync, checkpoint block unavailable + BeaconBlockSummary( + slot: dag.tail.slot + 1, + parent_root: dag.tail.root) + + dag.forkDigests = newClone ForkDigests.init( + cfg, getStateField(dag.headState, genesis_validators_root)) + + withState(dag.headState): + dag.validatorMonitor[].registerState(forkyState.data) + + # updateBeaconMetrics(dag.headState, dag.head.bid, cache) + + let finalizedTick = Moment.now() + + if dag.backfill.slot > GENESIS_SLOT: # Try frontfill from era files + let backfillSlot = dag.backfill.slot - 1 + dag.frontfillBlocks = newSeqOfCap[Eth2Digest](backfillSlot.int) + + let + historical_roots = getStateField(dag.headState, historical_roots).asSeq() + historical_summaries = dag.headState.historical_summaries.asSeq() + + var + blocks = 0 + + # Here, we'll build up the slot->root mapping in memory for the range of + # blocks from genesis to backfill, if possible. + for bid in dag.era.getBlockIds( + historical_roots, historical_summaries, Slot(0), Eth2Digest()): + # If backfill has not yet started, the backfill slot itself also needs + # to be served from era files. Checkpoint sync starts from state only + if bid.slot > backfillSlot or + (bid.slot == backfillSlot and bid.root != dag.tail.root): + # If we end up in here, we failed the root comparison just below in + # an earlier iteration + fatal "Era summaries don't lead up to backfill, database or era files corrupt?", + bid, backfillSlot + quit 1 + + # In BeaconState.block_roots, empty slots are filled with the root of + # the previous block - in our data structure, we use a zero hash instead + dag.frontfillBlocks.setLen(bid.slot.int + 1) + dag.frontfillBlocks[bid.slot.int] = bid.root + + if bid.root == dag.backfill.parent_root: + # We've reached the backfill point, meaning blocks are available + # in the sqlite database from here onwards - remember this point in + # time so that we can write summaries to the database - it's a lot + # faster to load from database than to iterate over era files with + # the current naive era file reader. + reset(dag.backfill) + + dag.updateFrontfillBlocks() + + break + + blocks += 1 + + if blocks > 0: + info "Front-filled blocks from era files", blocks, backfillSlot + + let frontfillTick = Moment.now() + + # Fill validator key cache in case we're loading an old database that doesn't + # have a cache + dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq()) + + # Initialize pruning such that when starting with a database that hasn't been + # pruned, we work our way from the tail to the horizon in incremental steps + dag.lastHistoryPruneHorizon = dag.horizon() + dag.lastHistoryPruneBlockHorizon = block: + let boundary = min(dag.tail.slot, dag.horizon()) + if boundary.epoch() >= EPOCHS_PER_STATE_SNAPSHOT: + start_slot(boundary.epoch() - EPOCHS_PER_STATE_SNAPSHOT) + else: + Slot(0) + + info "Block DAG initialized", + head = shortLog(dag.head), + finalizedHead = shortLog(dag.finalizedHead), + tail = shortLog(dag.tail), + backfill = shortLog(dag.backfill), + + loadDur = loadTick - startTick, + summariesDur = summariesTick - loadTick, + finalizedDur = finalizedTick - summariesTick, + frontfillDur = frontfillTick - finalizedTick, + keysDur = Moment.now() - frontfillTick + + dag.initLightClientDataCache() + + dag + +template genesis_validators_root*(dag: ChainDAGRef): Eth2Digest = + getStateField(dag.headState, genesis_validators_root) + +proc genesisBlockRoot*(dag: ChainDAGRef): Eth2Digest = + dag.db.getGenesisBlock().expect("DB must be initialized with genesis block") + +func getEpochRef*( + dag: ChainDAGRef, state: ForkedHashedBeaconState, cache: var StateCache): EpochRef = + ## Get a cached `EpochRef` or construct one based on the given state - always + ## returns an EpochRef instance + let + bid = state.latest_block_id + epoch = state.get_current_epoch() + + dag.findEpochRef(bid, epoch).valueOr: + let res = EpochRef.init(dag, state, cache) + dag.putEpochRef(res) + res + +proc getEpochRef*( + dag: ChainDAGRef, bid: BlockId, epoch: Epoch, + preFinalized: bool): Result[EpochRef, cstring] = + ## Return a cached EpochRef or construct one from the database, if possible - + ## returns `none` on failure. + ## + ## When `preFinalized` is true, include epochs from before the finalized + ## checkpoint in the search - this potentially can result in long processing + ## times due to state replays. + ## + ## Requests for epochs >= dag.finalizedHead.slot.epoch always return an + ## instance. One must be careful to avoid race conditions in `async` code + ## where the finalized head might change during an `await`. + ## + ## Requests for epochs < dag.finalizedHead.slot.epoch may fail, either because + ## the search was limited by the `preFinalized` flag or because state history + ## has been pruned - `none` will be returned in this case. + if not preFinalized and epoch < dag.finalizedHead.slot.epoch: + return err("Requesting pre-finalized EpochRef") + + if bid.slot < dag.tail.slot or epoch < dag.tail.slot.epoch: + return err("Requesting EpochRef for pruned state") + + let epochRef = dag.findEpochRef(bid, epoch) + if epochRef.isOk(): + # adapted from nimbus-eth2 + # beacon_state_data_cache_hits.inc + return ok epochRef.get() + + # beacon_state_data_cache_misses.inc + + let + ancestor = dag.epochAncestor(bid, epoch).valueOr: + # If we got in here, the bid must be unknown or we would have gotten + # _some_ ancestor (like the tail) + return err("Requesting EpochRef for non-canonical block") + + var cache: StateCache + if not updateState(dag, dag.epochRefState, ancestor, false, cache): + return err("Could not load requested state") + + ok(dag.getEpochRef(dag.epochRefState, cache)) + +proc getEpochRef*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, + preFinalized: bool): Result[EpochRef, cstring] = + dag.getEpochRef(blck.bid, epoch, preFinalized) + +proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef = + dag.getEpochRef( + dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect( + "getEpochRef for finalized head should always succeed") + +proc ancestorSlot*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId, + lowSlot: Slot): Opt[Slot] = + ## Return common ancestor slot of `bid` and `state`, if at least `lowSlot`. + ## Return `none` if no common ancestor is found with slot >= `lowSlot`. + if state.data.slot < lowSlot or bid.slot < lowSlot: + return Opt.none(Slot) + + var stateBid = ? dag.getBlockIdAtSlot(state, bid.slot) + if stateBid.slot < lowSlot: + return Opt.none(Slot) + + var blockBid = (? dag.atSlot(bid, stateBid.slot)).bid + if blockBid.slot < lowSlot: + return Opt.none(Slot) + + while stateBid != blockBid: + if stateBid.slot >= blockBid.slot: + stateBid = ? dag.getBlockIdAtSlot( + state, min(blockBid.slot, stateBid.slot - 1)) + if stateBid.slot < lowSlot: + return Opt.none(Slot) + else: + blockBid = ? dag.parent(blockBid) + if blockBid.slot < lowSlot: + return Opt.none(Slot) + + Opt.some stateBid.slot + +proc computeRandaoMix( + bdata: ForkedTrustedSignedBeaconBlock): Opt[Eth2Digest] = + ## Compute the requested RANDAO mix for `bdata` without `state`, if possible. + withBlck(bdata): + when consensusFork >= ConsensusFork.Bellatrix: + if forkyBlck.message.is_execution_block: + var mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) + mix.data.mxor forkyBlck.message.body.execution_payload.prev_randao.data + return ok mix + Opt.none(Eth2Digest) + +proc computeRandaoMix*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId, + lowSlot: Slot): Opt[Eth2Digest] = + ## Compute the requested RANDAO mix for `bid` based on `state`. + ## Return `none` if `state` and `bid` do not share a common ancestor + ## with slot >= `lowSlot`. + let ancestorSlot = ? dag.ancestorSlot(state, bid, lowSlot) + doAssert ancestorSlot <= state.data.slot + doAssert ancestorSlot <= bid.slot + + # If `blck` is post merge, RANDAO information is immediately available + let + bdata = ? dag.getForkedBlock(bid) + fullMix = computeRandaoMix(bdata) + if fullMix.isSome: + return fullMix + + # RANDAO mix has to be recomputed from `bid` and `state` + var mix {.noinit.}: Eth2Digest + proc mixToAncestor(highBid: BlockId): Opt[void] = + ## Mix in/out RANDAO reveals back to `ancestorSlot` + var bid = highBid + while bid.slot > ancestorSlot: + let bdata = ? dag.getForkedBlock(bid) + withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset` + mix.data.mxor eth2digest( + forkyBlck.message.body.randao_reveal.toRaw()).data + bid = ? dag.parent(bid) + ok() + + # Mix in RANDAO from `bid` + if ancestorSlot < bid.slot: + withBlck(bdata): + mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) + ? mixToAncestor(? dag.parent(bid)) + else: + mix.reset() + + # Mix in RANDAO from `state` + let ancestorEpoch = ancestorSlot.epoch + if ancestorEpoch + EPOCHS_PER_HISTORICAL_VECTOR <= state.data.slot.epoch: + return Opt.none(Eth2Digest) + let mixRoot = state.dependent_root(ancestorEpoch + 1) + if mixRoot.isZero: + return Opt.none(Eth2Digest) + ? mixToAncestor(? dag.getBlockId(mixRoot)) + mix.data.mxor state.data.get_randao_mix(ancestorEpoch).data + + ok mix + +proc computeRandaoMixFromMemory*( + dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = + ## Compute requested RANDAO mix for `bid` from available states (~5 ms). + template tryWithState(state: ForkedHashedBeaconState) = + block: + withState(state): + let mix = dag.computeRandaoMix(forkyState, bid, lowSlot) + if mix.isSome: + return mix + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + +proc computeRandaoMixFromDatabase*( + dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = + ## Compute requested RANDAO mix for `bid` using closest DB state (~500 ms). + let state = assignClone(dag.headState) + ? dag.getNearbyState(state[], bid, lowSlot) + withState(state[]): + dag.computeRandaoMix(forkyState, bid, lowSlot) + +proc computeRandaoMix( + dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = + # Try to compute from states available in memory + let mix = dag.computeRandaoMixFromMemory(bid, lowSlot) + if mix.isSome: + return mix + + # If `blck` is post merge, RANDAO information is immediately available + let + bdata = ? dag.getForkedBlock(bid) + fullMix = computeRandaoMix(bdata) + if fullMix.isSome: + return fullMix + + # Fall back to database + dag.computeRandaoMixFromDatabase(bid, lowSlot) + +proc computeRandaoMix*(dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] = + ## Compute requested RANDAO mix for `bid`. + const maxSlotDistance = SLOTS_PER_HISTORICAL_ROOT + let lowSlot = max(bid.slot, maxSlotDistance.Slot) - maxSlotDistance + dag.computeRandaoMix(bid, lowSlot) + +proc lowSlotForAttesterShuffling*(epoch: Epoch): Slot = + ## Return minimum slot that a state must share ancestry with a block history + ## so that RANDAO at `epoch.attester_dependent_slot` can be computed. + + # A state must be somewhat recent so that `get_active_validator_indices` + # for the queried `epoch` cannot be affected by any such skipped processing. + const numDelayEpochs = compute_activation_exit_epoch(GENESIS_EPOCH).uint64 + let lowEpoch = max(epoch, (numDelayEpochs - 1).Epoch) - (numDelayEpochs - 1) + lowEpoch.start_slot + +proc computeShufflingRef*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, + blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + ## Compute `ShufflingRef` for `blck@epoch` based on `state`. + ## If `state` has unviable `get_active_validator_indices`, return `none`. + + let + dependentBid = (? dag.atSlot(blck.bid, epoch.attester_dependent_slot)).bid + lowSlot = epoch.lowSlotForAttesterShuffling + mix = ? dag.computeRandaoMix(state, dependentBid, lowSlot) + + return ok ShufflingRef( + epoch: epoch, + attester_dependent_root: dependentBid.root, + shuffled_active_validator_indices: + state.data.get_shuffled_active_validator_indices(epoch, mix)) + +proc computeShufflingRefFromMemory*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + ## Compute `ShufflingRef` from available states (~5 ms). + template tryWithState(state: ForkedHashedBeaconState) = + block: + withState(state): + let shufflingRef = dag.computeShufflingRef(forkyState, blck, epoch) + if shufflingRef.isOk: + return shufflingRef + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + +proc getShufflingRef*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, + preFinalized: bool): Opt[ShufflingRef] = + ## Return the shuffling in the given history and epoch - this potentially is + ## faster than returning a full EpochRef because the shuffling is determined + ## an epoch in advance and therefore is less sensitive to reorgs + var shufflingRef = dag.findShufflingRef(blck.bid, epoch) + if shufflingRef.isSome: + return shufflingRef + + # Use existing states to quickly compute the shuffling + shufflingRef = dag.computeShufflingRefFromMemory(blck, epoch) + if shufflingRef.isSome: + dag.putShufflingRef(shufflingRef.get) + return shufflingRef + + # Last resort, this can take several seconds as this may replay states + let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr: + return Opt.none ShufflingRef + dag.putShufflingRef(epochRef.shufflingRef) + Opt.some epochRef.shufflingRef + +func stateCheckpoint*(dag: ChainDAGRef, bsi: BlockSlotId): BlockSlotId = + ## The first ancestor BlockSlot that is a state checkpoint + var bsi = bsi + while not dag.isStateCheckpoint(bsi): + if bsi.isProposed: + bsi.bid = dag.parent(bsi.bid).valueOr: + break + else: + bsi.slot = bsi.slot - 1 + bsi + +template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork = + forkAtEpoch(dag.cfg, epoch) + +proc getBlockRange*( + dag: ChainDAGRef, startSlot: Slot, skipStep: uint64, + output: var openArray[BlockId]): Natural = + ## This function populates an `output` buffer of blocks + ## with a slots ranging from `startSlot` up to, but not including, + ## `startSlot + skipStep * output.len`, skipping any slots that don't have + ## a block. + ## + ## Blocks will be written to `output` from the end without gaps, even if + ## a block is missing in a particular slot. The return value shows how + ## many slots were missing blocks - to iterate over the result, start + ## at this index. + ## + ## If there were no blocks in the range, `output.len` will be returned. + let + requestedCount = output.lenu64 + headSlot = dag.head.slot + + trace "getBlockRange entered", + head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot + + if startSlot < dag.backfill.slot: + debug "Got request for pre-backfill slot", + startSlot, backfillSlot = dag.backfill.slot, horizonSlot = dag.horizon + return output.len + + if headSlot <= startSlot or requestedCount == 0: + return output.len # Identical to returning an empty set of block as indicated above + + let + runway = uint64(headSlot - startSlot) + + # This is the number of blocks that will follow the start block + extraSlots = min(runway div skipStep, requestedCount - 1) + + # If `skipStep` is very large, `extraSlots` should be 0 from + # the previous line, so `endSlot` will be equal to `startSlot`: + endSlot = startSlot + extraSlots * skipStep + + var + curSlot = endSlot + o = output.len + + # Process all blocks that follow the start block (may be zero blocks) + while curSlot > startSlot: + let bs = dag.getBlockIdAtSlot(curSlot) + if bs.isSome and bs.get().isProposed(): + o -= 1 + output[o] = bs.get().bid + curSlot -= skipStep + + # Handle start slot separately (to avoid underflow when computing curSlot) + let bs = dag.getBlockIdAtSlot(startSlot) + if bs.isSome and bs.get().isProposed(): + o -= 1 + output[o] = bs.get().bid + + o # Return the index of the first non-nil item in the output + +proc updateState*( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, + save: bool, cache: var StateCache): bool = + ## Rewind or advance state such that it matches the given block and slot - + ## this may include replaying from an earlier snapshot if blck is on a + ## different branch or has advanced to a higher slot number than slot + ## If `bs.slot` is higher than `bs.blck.slot`, `updateState` will fill in + ## with empty/non-block slots + + # First, see if we're already at the requested block. If we are, also check + # that the state has not been advanced past the desired block - if it has, + # an earlier state must be loaded since there's no way to undo the slot + # transitions + + let + startTick = Moment.now() + current {.used.} = withState(state): + BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) + + var + ancestors: seq[BlockId] + found = false + + template exactMatch(state: ForkedHashedBeaconState, bsi: BlockSlotId): bool = + # The block is the same and we're at an early enough slot - the state can + # be used to arrive at the desired blockslot + state.matches_block_slot(bsi.bid.root, bsi.slot) + + template canAdvance(state: ForkedHashedBeaconState, bsi: BlockSlotId): bool = + # The block is the same and we're at an early enough slot - the state can + # be used to arrive at the desired blockslot + state.can_advance_slots(bsi.bid.root, bsi.slot) + + # Fast path: check all caches for an exact match - this is faster than + # advancing a state where there's epoch processing to do, by a wide margin - + # it also avoids `hash_tree_root` for slot processing + if exactMatch(state, bsi): + found = true + elif not save: + # When required to save states, we cannot rely on the caches because that + # would skip the extra processing that save does - not all information that + # goes into the database is cached + if exactMatch(dag.headState, bsi): + assign(state, dag.headState) + found = true + elif exactMatch(dag.clearanceState, bsi): + assign(state, dag.clearanceState) + found = true + elif exactMatch(dag.epochRefState, bsi): + assign(state, dag.epochRefState) + found = true + + const RewindBlockThreshold = 64 + + if not found: + # No exact match found - see if any in-memory state can be used as a base + # onto which we can apply a few blocks - there's a tradeoff here between + # loading the state from disk and performing the block applications + var cur = bsi + while ancestors.len < RewindBlockThreshold: + if isZero(cur.bid.root): # tail reached + break + + if canAdvance(state, cur): # Typical case / fast path when there's no reorg + found = true + break + + if not save: # see above + if canAdvance(dag.headState, cur): + assign(state, dag.headState) + found = true + break + + if canAdvance(dag.clearanceState, cur): + assign(state, dag.clearanceState) + found = true + break + + if canAdvance(dag.epochRefState, cur): + assign(state, dag.epochRefState) + found = true + break + + if cur.isProposed(): + # This is not an empty slot, so the block will need to be applied to + # eventually reach bs + ancestors.add(cur.bid) + + # Move slot by slot to capture epoch boundary states + cur = dag.parentOrSlot(cur).valueOr: + break + + if not found: + debug "UpdateStateData cache miss", + current = shortLog(current), target = shortLog(bsi) + + # Either the state is too new or was created by applying a different block. + # We'll now resort to loading the state from the database then reapplying + # blocks until we reach the desired point in time. + + var cur = bsi + ancestors.setLen(0) + + # Look for a state in the database and load it - as long as it cannot be + # found, keep track of the blocks that are needed to reach it from the + # state that eventually will be found. + # If we hit the tail, it means that we've reached a point for which we can + # no longer recreate history - this happens for example when starting from + # a checkpoint block + let startEpoch = bsi.slot.epoch + while not canAdvance(state, cur) and + not dag.db.getState(dag.cfg, cur.bid.root, cur.slot, state, noRollback): + # There's no state saved for this particular BlockSlot combination, and + # the state we have can't trivially be advanced (in case it was older than + # RewindBlockThreshold), keep looking.. + if cur.isProposed(): + # This is not an empty slot, so the block will need to be applied to + # eventually reach bs + ancestors.add(cur.bid) + + if cur.slot == GENESIS_SLOT or (cur.slot < dag.finalizedHead.slot and + cur.slot.epoch + uint64(EPOCHS_PER_STATE_SNAPSHOT) * 2 < startEpoch): + # We've either walked two full state snapshot lengths or hit the tail + # and still can't find a matching state: this can happen when + # starting the node from an arbitrary finalized checkpoint and not + # backfilling the states + notice "Request for pruned historical state", + request = shortLog(bsi), tail = shortLog(dag.tail), + cur = shortLog(cur), finalized = shortLog(dag.finalizedHead) + return false + + # Move slot by slot to capture epoch boundary states + cur = dag.parentOrSlot(cur).valueOr: + if not dag.getStateByParent(cur.bid, state): + notice "Request for pruned historical state", + request = shortLog(bsi), tail = shortLog(dag.tail), + cur = shortLog(cur) + return false + break + + # beacon_state_rewinds.inc() + + # Starting state has been assigned, either from memory or database + let + assignTick = Moment.now() + ancestor {.used.} = withState(state): + BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) + ancestorRoot {.used.} = getStateRoot(state) + + var info: ForkedEpochInfo + # Time to replay all the blocks between then and now + for i in countdown(ancestors.len - 1, 0): + # Because the ancestors are in the database, there's no need to persist them + # again. Also, because we're applying blocks that were loaded from the + # database, we can skip certain checks that have already been performed + # before adding the block to the database. + if (let res = dag.applyBlock(state, ancestors[i], cache, info); res.isErr): + warn "Failed to apply block from database", + blck = shortLog(ancestors[i]), + state_bid = shortLog(state.latest_block_id), + error = res.error() + + return false + + # ...and make sure to process empty slots as requested + dag.advanceSlots(state, bsi.slot, save, cache, info) + + # ...and make sure to load the state cache, if it exists + loadStateCache(dag, cache, bsi.bid, getStateField(state, slot).epoch) + + let + assignDur = assignTick - startTick + replayDur = Moment.now() - assignTick + beacon_dag_state_replay_seconds.inc(replayDur.toFloatSeconds) + + # TODO https://github.com/status-im/nim-chronicles/issues/108 + if (assignDur + replayDur) >= MinSignificantProcessingDuration: + # This might indicate there's a cache that's not in order or a disk that is + # too slow - for now, it's here for investigative purposes and the cutoff + # time might need tuning + info "State replayed", + blocks = ancestors.len, + slots = getStateField(state, slot) - ancestor.slot, + current = shortLog(current), + ancestor = shortLog(ancestor), + target = shortLog(bsi), + ancestorStateRoot = shortLog(ancestorRoot), + targetStateRoot = shortLog(getStateRoot(state)), + found, + assignDur, + replayDur + elif ancestors.len > 0: + debug "State replayed", + blocks = ancestors.len, + slots = getStateField(state, slot) - ancestor.slot, + current = shortLog(current), + ancestor = shortLog(ancestor), + target = shortLog(bsi), + ancestorStateRoot = shortLog(ancestorRoot), + targetStateRoot = shortLog(getStateRoot(state)), + found, + assignDur, + replayDur + else: # Normal case! + trace "State advanced", + blocks = ancestors.len, + slots = getStateField(state, slot) - ancestor.slot, + current = shortLog(current), + ancestor = shortLog(ancestor), + target = shortLog(bsi), + ancestorStateRoot = shortLog(ancestorRoot), + targetStateRoot = shortLog(getStateRoot(state)), + found, + assignDur, + replayDur + + true + +proc delState(dag: ChainDAGRef, bsi: BlockSlotId) = + # Delete state and mapping for a particular block+slot + if not dag.isStateCheckpoint(bsi): + return # We only ever save epoch states + + if (let root = dag.db.getStateRoot(bsi.bid.root, bsi.slot); root.isSome()): + dag.db.withManyWrites: + dag.db.delStateRoot(bsi.bid.root, bsi.slot) + dag.db.delState( + dag.cfg.consensusForkAtEpoch(bsi.slot.epoch), root.get()) + +proc pruneBlockSlot(dag: ChainDAGRef, bs: BlockSlot) = + # TODO: should we move that disk I/O to `onSlotEnd` + dag.delState(bs.toBlockSlotId().expect("not nil")) + + if bs.isProposed(): + # Update light client data + # dag.deleteLightClientData(bs.blck.bid) + + bs.blck.executionValid = true + dag.forkBlocks.excl(KeyedBlockRef.init(bs.blck)) + discard dag.db.delBlock( + dag.cfg.consensusForkAtEpoch(bs.blck.slot.epoch), bs.blck.root) + +proc pruneBlocksDAG(dag: ChainDAGRef) = + ## This prunes the block DAG + ## This does NOT prune the cached state checkpoints and EpochRef + ## This must be done after a new finalization point is reached + ## to invalidate pending blocks or attestations referring + ## to a now invalid fork. + ## + ## This does NOT update the `dag.lastPrunePoint` field. + ## as the caches and fork choice can be pruned at a later time. + + # Clean up block refs, walking block by block + let startTick = Moment.now() + + # Finalization means that we choose a single chain as the canonical one - + # it also means we're no longer interested in any branches from that chain + # up to the finalization point + let hlen = dag.heads.len + for i in 0..= ConsensusFork.Altair: + let + period = sync_committee_period(slot) + curPeriod = sync_committee_period(forkyState.data.slot) + + if period == curPeriod: + @(dag.headSyncCommittees.current_sync_committee) + elif period == curPeriod + 1: + @(dag.headSyncCommittees.next_sync_committee) + else: @[] + else: + @[] + +func getSubcommitteePositionsAux( + dag: ChainDAGRef, + syncCommittee: openArray[ValidatorIndex], + subcommitteeIdx: SyncSubcommitteeIndex, + validatorIdx: uint64): seq[uint64] = + var pos = 0'u64 + for valIdx in syncCommittee.syncSubcommittee(subcommitteeIdx): + if validatorIdx == uint64(valIdx): + result.add pos + inc pos + +func getSubcommitteePositions*( + dag: ChainDAGRef, + slot: Slot, + subcommitteeIdx: SyncSubcommitteeIndex, + validatorIdx: uint64): seq[uint64] = + withState(dag.headState): + when consensusFork >= ConsensusFork.Altair: + let + period = sync_committee_period(slot) + curPeriod = sync_committee_period(forkyState.data.slot) + + template search(syncCommittee: openArray[ValidatorIndex]): seq[uint64] = + dag.getSubcommitteePositionsAux( + syncCommittee, subcommitteeIdx, validatorIdx) + + if period == curPeriod: + search(dag.headSyncCommittees.current_sync_committee) + elif period == curPeriod + 1: + search(dag.headSyncCommittees.next_sync_committee) + else: @[] + else: + @[] + +template syncCommitteeParticipants*( + dag: ChainDAGRef, + slot: Slot, + subcommitteeIdx: SyncSubcommitteeIndex): seq[ValidatorIndex] = + toSeq(syncSubcommittee(dag.syncCommitteeParticipants(slot), subcommitteeIdx)) + +iterator syncCommitteeParticipants*( + dag: ChainDAGRef, + slot: Slot, + subcommitteeIdx: SyncSubcommitteeIndex, + aggregationBits: SyncCommitteeAggregationBits): ValidatorIndex = + for pos, valIdx in dag.syncCommitteeParticipants(slot, subcommitteeIdx): + if pos < aggregationBits.bits and aggregationBits[pos]: + yield valIdx + +func needStateCachesAndForkChoicePruning*(dag: ChainDAGRef): bool = + dag.lastPrunePoint != dag.finalizedHead.toBlockSlotId().expect("not nil") + +proc pruneStateCachesDAG*(dag: ChainDAGRef) = + ## This prunes the cached state checkpoints and EpochRef + ## This does NOT prune the state associated with invalidated blocks on a fork + ## They are pruned via `pruneBlocksDAG` + ## + ## This updates the `dag.lastPrunePoint` variable + doAssert dag.needStateCachesAndForkChoicePruning() + let startTick = Moment.now() + block: # Remove states, walking slot by slot + # We remove all state checkpoints that come _before_ the current finalized + # head, as we might frequently be asked to replay states from the + # finalized checkpoint and onwards (for example when validating blocks and + # attestations) + var + finPoint = dag.finalizedHead.toBlockSlotId().expect("not nil") + cur = dag.parentOrSlot(dag.stateCheckpoint(finPoint)) + prev = dag.parentOrSlot(dag.stateCheckpoint(dag.lastPrunePoint)) + + while cur.isSome and prev.isSome and cur.get() != prev.get(): + let bs = cur.get() + if not isFinalizedStateSnapshot(bs.slot) and + bs.slot != dag.tail.slot: + dag.delState(bs) + let tmp = cur.get() + cur = dag.parentOrSlot(tmp) + + let statePruneTick = Moment.now() + + block: # Clean up old EpochRef instances + # After finalization, we can clear up the epoch cache and save memory - + # it will be recomputed if needed + dag.epochRefs.delIt(it.epoch < dag.finalizedHead.slot.epoch) + dag.shufflingRefs.delIt(it.epoch < dag.finalizedHead.slot.epoch) + + let epochRefPruneTick = Moment.now() + + dag.lastPrunePoint = dag.finalizedHead.toBlockSlotId().expect("not nil") + + debug "Pruned the state checkpoints and DAG caches.", + statePruneDur = statePruneTick - startTick, + epochRefPruneDur = epochRefPruneTick - statePruneTick + +func pruneStep(horizon, lastHorizon, lastBlockHorizon: Slot): + tuple[stateHorizon, blockHorizon: Slot] = + ## Compute a reasonable incremental pruning step considering the current + ## horizon, how far the database has been pruned already and where we want the + ## tail to be - the return value shows the first state and block that we + ## should _keep_ (inclusive). + + const SLOTS_PER_STATE_SNAPSHOT = + uint64(EPOCHS_PER_STATE_SNAPSHOT * SLOTS_PER_EPOCH) + + let + blockHorizon = block: + let + # Keep up with horizon if it's moving fast, ie if we're syncing + maxSlots = max(horizon - lastHorizon, MAX_SLOTS_PER_PRUNE) + + # Move the block horizon cap with a lag so that it moves slot-by-slot + # instead of a big jump every time we prune a state - assuming we + # prune every slot, this makes us prune one slot at a time instead of + # a burst of prunes (as computed by maxSlots) around every snapshot + # change followed by no pruning for the rest of the period + maxBlockHorizon = + if horizon + 1 >= SLOTS_PER_STATE_SNAPSHOT: + horizon + 1 - SLOTS_PER_STATE_SNAPSHOT + else: + Slot(0) + + # `lastBlockHorizon` captures the case where we're incrementally + # pruning a database that hasn't been pruned for a while: it's + # initialized to a pre-tail value on startup and moves to approach + # `maxBlockHorizon`. + min(maxBlockHorizon, lastBlockHorizon + maxSlots) + + # Round up such that we remove state only once blocks have been removed + stateHorizon = + ((blockHorizon + SLOTS_PER_STATE_SNAPSHOT - 1) div + SLOTS_PER_STATE_SNAPSHOT) * SLOTS_PER_STATE_SNAPSHOT + + (Slot(stateHorizon), blockHorizon) + +proc pruneHistory*(dag: ChainDAGRef, startup = false) = + ## Perform an incremental pruning step of the history + if dag.db.db.readOnly: + return + + let + horizon = dag.horizon() + (stateHorizon, blockHorizon) = pruneStep( + horizon, dag.lastHistoryPruneHorizon, dag.lastHistoryPruneBlockHorizon) + + doAssert blockHorizon <= stateHorizon, + "we must never prune blocks while leaving the state" + + debug "Pruning history", + horizon, blockHorizon, stateHorizon, + lastHorizon = dag.lastHistoryPruneHorizon, + lastBlockHorizon = dag.lastHistoryPruneBlockHorizon, + tail = dag.tail, head = dag.head + + dag.lastHistoryPruneHorizon = horizon + dag.lastHistoryPruneBlockHorizon = blockHorizon + + dag.db.withManyWrites: + if stateHorizon > dag.tail.slot: + # First, we want to see if it's possible to prune any states - we store one + # state every EPOCHS_PER_STATE_SNAPSHOT, so this happens infrequently. + + var + cur = dag.getBlockIdAtSlot(stateHorizon) + + var first = true + while cur.isSome(): + let bs = cur.get() + # We don't delete legacy states because the legacy database is openend + # in read-only and slow to delete from due to its sub-optimal structure + if dag.db.containsState( + dag.cfg, bs.bid.root, bs.slot..bs.slot, legacy = first): + if first: + # We leave the state on the prune horizon intact and update the tail + # to point to this state, indicating the new point in time from + # which we can load states in general. + debug "Updating tail", bs + dag.db.putTailBlock(bs.bid.root) + dag.tail = bs.bid + first = false + else: + debug "Pruning historical state", bs + dag.delState(bs) + elif not bs.isProposed: + trace "Reached already-pruned slot, done pruning states", bs + break + + if bs.isProposed: + # We store states either at the same slot at the block (checkpoint) or + # by advancing the slot to the nearest epoch start - check both when + # pruning + cur = dag.parentOrSlot(bs) + elif bs.slot.epoch > EPOCHS_PER_STATE_SNAPSHOT: + # Jump one snapshot interval at a time, but don't prune genesis + cur = dag.getBlockIdAtSlot(start_slot(bs.slot.epoch() - EPOCHS_PER_STATE_SNAPSHOT)) + else: + break + + # Prune blocks after sanity-checking that we don't prune post-tail blocks - + # this could happen if a state is missing at the expected state horizon and + # would indicate a partially inconsistent database since the base + # invariant is that there exists a state at the snapshot slot - better not + # further mess things up regardless + if blockHorizon > GENESIS_SLOT and blockHorizon <= dag.tail.slot: + var + # Leave the horizon block itself + cur = dag.getBlockIdAtSlot(blockHorizon - 1).map(proc(x: auto): auto = x.bid) + + while cur.isSome: + let + bid = cur.get() + fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + + if bid.slot == GENESIS_SLOT: + # Leave genesis block for nostalgia and the REST API + break + + if not dag.db.delBlock(fork, bid.root): + # Stop at the first gap - this is typically the pruning point of the + # previous call to pruneHistory. An inconsistent DB might have more + # blocks beyond that point but we have no efficient way of detecting + # that. + break + + cur = dag.parent(bid) + + # TODO There have been varied reports of startup pruning causing long + # startup times - an incremental approach would be needed here also + if false and + startup and + dag.cfg.consensusForkAtEpoch(blockHorizon.epoch) > ConsensusFork.Phase0: + # Once during start, we'll clear all "old fork" data - this ensures we get + # rid of any leftover junk in the tables - we do so after linear pruning + # so as to "mostly" clean up the phase0 tables as well (which cannot be + # pruned easily by fork) - one fork at a time, so as not to take too long + + let stateFork = dag.cfg.consensusForkAtEpoch(dag.tail.slot.epoch) + var clearedStates = false + if stateFork > ConsensusFork.Phase0: + for fork in ConsensusFork.Phase0.. ConsensusFork.Phase0: + for fork in ConsensusFork.Phase0..= ConsensusFork.Bellatrix: + Opt.some forkyBlck.message.body.execution_payload.block_hash + else: + Opt.some ZERO_HASH + +proc loadExecutionBlockHash*( + dag: ChainDAGRef, blck: BlockRef): Opt[Eth2Digest] = + if blck.executionBlockHash.isNone: + blck.executionBlockHash = dag.loadExecutionBlockHash(blck.bid) + blck.executionBlockHash + +from std/packedsets import PackedSet, incl, items + +func getValidatorChangeStatuses( + state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]): + PackedSet[ValidatorIndex] = + var res: PackedSet[ValidatorIndex] + withState(state): + for vi in vis: + if forkyState.data.validators[vi].withdrawal_credentials.data[0] == + BLS_WITHDRAWAL_PREFIX: + res.incl vi + res + +func checkBlsToExecutionChanges( + state: ForkedHashedBeaconState, vis: PackedSet[ValidatorIndex]): bool = + # Within each fork, BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX + # and never ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX. Latter + # can still happen via reorgs. + # Cases: + # 1) unchanged (BLS_WITHDRAWAL_PREFIX or ETH1_ADDRESS_WITHDRAWAL_PREFIX) from + # old to new head. + # 2) ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX + # 3) BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX + # + # Only report (3), i.e. whether there were validator indices with withdrawal + # credentials previously using BLS_WITHDRAWAL_PREFIX now using, instead, the + # ETH1_ADDRESS_WITHDRAWAL_PREFIX prefix indicating a BLS to execution change + # went through. + # + # Since it tracks head, it's possible reorgs trigger reporting the same + # validator indices multiple times; this is fine. + withState(state): + anyIt( vis, forkyState.data.validators[it].has_eth1_withdrawal_credential) + +proc updateHead*( + dag: ChainDAGRef, newHead: BlockRef, quarantine: var Quarantine, + knownValidators: openArray[ValidatorIndex]) = + ## Update what we consider to be the current head, as given by the fork + ## choice. + ## + ## The choice of head affects the choice of finalization point - the order + ## of operations naturally becomes important here - after updating the head, + ## blocks that were once considered potential candidates for a tree will + ## now fall from grace, or no longer be considered resolved. + doAssert not newHead.isNil() + + # Could happen if enough blocks get invalidated and would corrupt database - + # When finalized checkpoint is empty, the slot may also be smaller + doAssert newHead.slot >= dag.finalizedHead.slot or + newHead == dag.finalizedHead.blck + + let lastHead = dag.head + + logScope: + newHead = shortLog(newHead) + lastHead = shortLog(lastHead) + + if lastHead == newHead: + trace "No head block update" + return + + if newHead.parent.isNil: + # The new head should always have the finalizedHead as ancestor - thus, + # this should not happen except in a race condition where the selected + # `BlockRef` had its parent set to nil as happens during finalization - + # notably, resetting the head to be the finalizedHead is not allowed + error "Cannot update head to block without parent" + return + + let + lastHeadStateRoot = getStateRoot(dag.headState) + lastHeadMergeComplete = dag.headState.is_merge_transition_complete() + lastHeadKind = dag.headState.kind + lastKnownValidatorsChangeStatuses = getValidatorChangeStatuses( + dag.headState, knownValidators) + + # Start off by making sure we have the right state - updateState will try + # to use existing in-memory states to make this smooth + var cache: StateCache + if not updateState( + dag, dag.headState, newHead.bid.atSlot(), false, cache): + # Advancing the head state should never fail, given that the tail is + # implicitly finalised, the head is an ancestor of the tail and we always + # store the tail state in the database, as well as every epoch slot state in + # between + fatal "Unable to load head state during head update, database corrupt?", + lastHead = shortLog(lastHead) + quit 1 + + dag.head = newHead + + if dag.headState.is_merge_transition_complete() and not + lastHeadMergeComplete and + dag.vanityLogs.onMergeTransitionBlock != nil: + dag.vanityLogs.onMergeTransitionBlock() + + if dag.headState.kind > lastHeadKind: + case dag.headState.kind + of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: + discard + of ConsensusFork.Capella: + if dag.vanityLogs.onUpgradeToCapella != nil: + dag.vanityLogs.onUpgradeToCapella() + of ConsensusFork.Deneb: + if dag.vanityLogs.onUpgradeToDeneb != nil: + dag.vanityLogs.onUpgradeToDeneb() + of ConsensusFork.Electra: + if dag.vanityLogs.onUpgradeToElectra != nil: + dag.vanityLogs.onUpgradeToElectra() + + if dag.vanityLogs.onKnownBlsToExecutionChange != nil and + checkBlsToExecutionChanges( + dag.headState, lastKnownValidatorsChangeStatuses): + dag.vanityLogs.onKnownBlsToExecutionChange() + + dag.db.putHeadBlock(newHead.root) + + # updateBeaconMetrics(dag.headState, dag.head.bid, cache) + + withState(dag.headState): + when consensusFork >= ConsensusFork.Altair: + dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) + + let + finalized_checkpoint = + getStateField(dag.headState, finalized_checkpoint) + finalizedSlot = + # finalized checkpoint may move back in the head state compared to what + # we've seen in other forks - it does not move back in fork choice + # however, so we'll use the last-known-finalized in that case + max(finalized_checkpoint.epoch.start_slot(), dag.finalizedHead.slot) + finalizedHead = newHead.atSlot(finalizedSlot) + + doAssert (not finalizedHead.blck.isNil), + "Block graph should always lead to a finalized block" + + # Update light client data + # dag.processHeadChangeForLightClient() + + let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead) + if not(isAncestor): + notice "Updated head block with chain reorg", + headParent = shortLog(newHead.parent), + stateRoot = shortLog(getStateRoot(dag.headState)), + justified = shortLog(getStateField( + dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), + isOptHead = not newHead.executionValid + + if not(isNil(dag.onReorgHappened)): + let + # TODO (cheatfate): Proper implementation required + data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth), + lastHead.root, newHead.root, + lastHeadStateRoot, + getStateRoot(dag.headState)) + dag.onReorgHappened(data) + + # A reasonable criterion for "reorganizations of the chain" + quarantine.clearAfterReorg() + + # beacon_reorgs_total_total.inc() + # beacon_reorgs_total.inc() + else: + debug "Updated head block", + stateRoot = shortLog(getStateRoot(dag.headState)), + justified = shortLog(getStateField( + dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), + isOptHead = not newHead.executionValid + + if not(isNil(dag.onHeadChanged)): + let + depRoot = withState(dag.headState): forkyState.proposer_dependent_root + prevDepRoot = withState(dag.headState): + forkyState.attester_dependent_root + epochTransition = (finalizedHead != dag.finalizedHead) + # TODO (cheatfate): Proper implementation required + data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root, + getStateRoot(dag.headState), + epochTransition, prevDepRoot, + depRoot) + dag.onHeadChanged(data) + + withState(dag.headState): + # Every time the head changes, the "canonical" view of balances and other + # state-related metrics change - notify the validator monitor. + # Doing this update during head update ensures there's a reasonable number + # of such updates happening - at most once per valid block. + dag.validatorMonitor[].registerState(forkyState.data) + + if finalizedHead != dag.finalizedHead: + debug "Reached new finalization checkpoint", + stateRoot = shortLog(getStateRoot(dag.headState)), + justified = shortLog(getStateField( + dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)) + let oldFinalizedHead = dag.finalizedHead + + block: + # Update `dag.finalizedBlocks` with all newly finalized blocks (those + # newer than the previous finalized head), then update `dag.finalizedHead` + var newFinalized: seq[BlockId] + var tmp = finalizedHead.blck + while not isNil(tmp) and tmp.slot >= dag.finalizedHead.slot: + newFinalized.add(tmp.bid) + if tmp != finalizedHead.blck: + # The newly finalized block itself should remain in here so that fork + # choice still can find it via root + dag.forkBlocks.excl(KeyedBlockRef.init(tmp)) + + let p = tmp.parent + tmp.parent = nil # Reset all parent links to release memory + tmp = p + + dag.finalizedHead = finalizedHead + + dag.db.updateFinalizedBlocks(newFinalized) + + let oldBlockHash = dag.loadExecutionBlockHash(oldFinalizedHead.blck) + if oldBlockHash.isSome and oldBlockHash.unsafeGet.isZero: + let newBlockHash = dag.loadExecutionBlockHash(dag.finalizedHead.blck) + if newBlockHash.isSome and not newBlockHash.unsafeGet.isZero: + if dag.vanityLogs.onFinalizedMergeTransitionBlock != nil: + dag.vanityLogs.onFinalizedMergeTransitionBlock() + + # Pruning the block dag is required every time the finalized head changes + # in order to clear out blocks that are no longer viable and should + # therefore no longer be considered as part of the chain we're following + dag.pruneBlocksDAG() + + # Update light client data + # dag.processFinalizationForLightClient(oldFinalizedHead) + + # Send notification about new finalization point via callback. + if not(isNil(dag.onFinHappened)): + let stateRoot = + if dag.finalizedHead.slot == dag.head.slot: getStateRoot(dag.headState) + elif dag.finalizedHead.slot + SLOTS_PER_HISTORICAL_ROOT > dag.head.slot: + getStateField(dag.headState, state_roots).data[ + int(dag.finalizedHead.slot mod SLOTS_PER_HISTORICAL_ROOT)] + else: + Eth2Digest() # The thing that finalized was >8192 blocks old? + # TODO (cheatfate): Proper implementation required + let data = FinalizationInfoObject.init( + dag.finalizedHead.blck.root, stateRoot, dag.finalizedHead.slot.epoch) + dag.onFinHappened(dag, data) + +proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstring] = + ## Lightweight check to see if it is likely that the given database has been + ## initialized + let + tailBlockRoot = db.getTailBlock() + if not tailBlockRoot.isSome(): + return err("Tail block root missing") + + let + tailBlock = db.getBlockId(tailBlockRoot.get()) + if not tailBlock.isSome(): + return err("Tail block information missing") + + ok() + +proc preInit*( + T: type ChainDAGRef, db: BeaconChainDB, state: ForkedHashedBeaconState) = + ## Initialize a database using the given state, which potentially may be a + ## non-genesis state. + ## + ## When used with a non-genesis state, the resulting database will not be + ## compatible with pre-22.11 versions. + logScope: + stateRoot = $getStateRoot(state) + stateSlot = getStateField(state, slot) + + doAssert getStateField(state, slot).is_epoch, + "Can only initialize database from epoch states" + + withState(state): + db.putState(forkyState) + + if forkyState.data.slot == GENESIS_SLOT: + let blck = get_initial_beacon_block(forkyState) + db.putBlock(blck) + db.putGenesisBlock(blck.root) + db.putHeadBlock(blck.root) + db.putTailBlock(blck.root) + + notice "Database initialized from genesis", + blockRoot = $blck.root + else: + let blockRoot = forkyState.latest_block_root() + # We write a summary but not the block contents - these will have to be + # backfilled from the network + db.putBeaconBlockSummary(blockRoot, BeaconBlockSummary( + slot: forkyState.data.latest_block_header.slot, + parent_root: forkyState.data.latest_block_header.parent_root + )) + db.putHeadBlock(blockRoot) + db.putTailBlock(blockRoot) + + if db.getGenesisBlock().isSome(): + notice "Checkpoint written to database", blockRoot = $blockRoot + else: + notice "Database initialized from checkpoint", blockRoot = $blockRoot + +proc getProposer*( + dag: ChainDAGRef, head: BlockRef, slot: Slot): Opt[ValidatorIndex] = + let + epochRef = dag.getEpochRef(head.bid, slot.epoch(), false).valueOr: + notice "Cannot load EpochRef for given head", head, slot, error + return Opt.none(ValidatorIndex) + + slotInEpoch = slot.since_epoch_start() + + let proposer = epochRef.beacon_proposers[slotInEpoch] + if proposer.isSome(): + if proposer.get().uint64 >= dag.db.immutableValidators.lenu64(): + # Sanity check - it should never happen that the key cache doesn't contain + # a key for the selected proposer - that would mean that we somehow + # created validators in the state without updating the cache! + warn "Proposer key not found", + keys = dag.db.immutableValidators.lenu64(), proposer = proposer.get() + return Opt.none(ValidatorIndex) + + proposer + +proc getProposalState*( + dag: ChainDAGRef, head: BlockRef, slot: Slot, cache: var StateCache): + Result[ref ForkedHashedBeaconState, cstring] = + ## Return a state suitable for making proposals for the given head and slot - + ## in particular, the state can be discarded after use and does not have a + ## state root set + + # Start with the clearance state, since this one typically has been advanced + # and thus has a hot hash tree cache + let state = assignClone(dag.clearanceState) + + var + info = ForkedEpochInfo() + if not state[].can_advance_slots(head.root, slot): + # The last state root will be computed as part of block production, so skip + # it now + if not dag.updateState( + state[], head.atSlot(slot - 1).toBlockSlotId().expect("not nil"), + false, cache): + error "Cannot get proposal state - skipping block production, database corrupt?", + head = shortLog(head), + slot + return err("Cannot create proposal state") + else: + loadStateCache(dag, cache, head.bid, slot.epoch) + + if getStateField(state[], slot) < slot: + process_slots( + dag.cfg, state[], slot, cache, info, + {skipLastStateRootCalculation}).expect("advancing 1 slot should not fail") + + ok state + +func aggregateAll*( + dag: ChainDAGRef, + validator_indices: openArray[ValidatorIndex]): Result[CookedPubKey, cstring] = + if validator_indices.len == 0: + # Aggregation spec requires non-empty collection + # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 + # Consensus specs require at least one attesting index in attestation + # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#is_valid_indexed_attestation + return err("aggregate: no attesting keys") + + let + firstKey = dag.validatorKey(validator_indices[0]).valueOr: + return err("aggregate: invalid validator index") + + var aggregateKey{.noinit.}: AggregatePublicKey + + aggregateKey.init(firstKey) + + for i in 1 ..< validator_indices.len: + let key = dag.validatorKey(validator_indices[i]).valueOr: + return err("aggregate: invalid validator index") + aggregateKey.aggregate(key) + + ok(finish(aggregateKey)) + +func aggregateAll*( + dag: ChainDAGRef, + validator_indices: openArray[ValidatorIndex|uint64], + bits: BitSeq | BitArray): Result[CookedPubKey, cstring] = + if validator_indices.len() != bits.len(): + return err("aggregateAll: mismatch in bits length") + + var + aggregateKey{.noinit.}: AggregatePublicKey + inited = false + + for i in 0.. dag.horizon + +proc rebuildIndex*(dag: ChainDAGRef) = + ## After a checkpoint sync, we lack intermediate states to replay from - this + ## function rebuilds them so that historical replay can take place again + ## TODO the pruning of junk states could be moved to a separate function that + ## runs either on startup + # First, we check what states we already have in the database - that allows + # resuming the operation at any time + let + roots = dag.db.loadStateRoots() + historicalRoots = getStateField(dag.headState, historical_roots).asSeq() + historicalSummaries = dag.headState.historical_summaries.asSeq() + + var + canonical = newSeq[Eth2Digest]( + (dag.finalizedHead.slot.epoch + EPOCHS_PER_STATE_SNAPSHOT - 1) div + EPOCHS_PER_STATE_SNAPSHOT) + # `junk` puts in place some infrastructure to prune unnecessary states - it + # will be more useful in the future as a base for pruning + junk: seq[((Slot, Eth2Digest), Eth2Digest)] + + for k, v in roots: + if k[0] >= dag.finalizedHead.slot: + continue # skip newer stuff + if k[0] < dag.backfill.slot: + continue # skip stuff for which we have no blocks + + if not isFinalizedStateSnapshot(k[0]): + # `tail` will move at the end of the process, so we won't need any + # intermediate states + junk.add((k, v)) + + continue # skip non-snapshot slots + + if k[0] > 0: + let bs = dag.getBlockIdAtSlot(k[0] - 1) + if bs.isNone or bs.get().bid.root != k[1]: + # remove things that are no longer a canonical part of the chain or + # cannot be reached via a block + junk.add((k, v)) + continue + + if not dag.db.containsState(dag.cfg.consensusForkAtEpoch(k[0].epoch), v): + continue # If it's not in the database.. + + canonical[k[0].epoch div EPOCHS_PER_STATE_SNAPSHOT] = v + + let + state = (ref ForkedHashedBeaconState)() + + var + cache: StateCache + info: ForkedEpochInfo + tailBid: Opt[BlockId] + states: int + + # `canonical` holds all slots at which a state is expected to appear, using a + # zero root whenever a particular state is missing - this way, if there's + # partial progress or gaps, they will be dealt with correctly + for i, state_root in canonical.mpairs(): + let + slot = Epoch(i * EPOCHS_PER_STATE_SNAPSHOT).start_slot + + if slot < dag.backfill.slot: + # TODO if we have era files, we could try to load blocks from them at + # this point + # TODO if we don't do the above, we can of course compute the starting `i` + continue + + if tailBid.isNone(): + if state_root.isZero: + # If we can find an era file with this state, use it as an alternative + # starting point - ignore failures for now + if dag.era.getState( + historicalRoots, historicalSummaries, slot, state[]).isOk(): + state_root = getStateRoot(state[]) + + withState(state[]): dag.db.putState(forkyState) + tailBid = Opt.some state[].latest_block_id() + + else: + if not dag.db.getState( + dag.cfg.consensusForkAtEpoch(slot.epoch), state_root, state[], + noRollback): + fatal "Cannot load state, database corrupt or created for a different network?", + state_root, slot + quit 1 + tailBid = Opt.some state[].latest_block_id() + + continue + + if i == 0 or canonical[i - 1].isZero: + reset(tailBid) # No unbroken history! + continue + + if not state_root.isZero: + states += 1 + continue + + let + startSlot = Epoch((i - 1) * EPOCHS_PER_STATE_SNAPSHOT).start_slot + + info "Recreating state snapshot", + slot, startStateRoot = canonical[i - 1], startSlot + + if getStateRoot(state[]) != canonical[i - 1]: + if not dag.db.getState( + dag.cfg.consensusForkAtEpoch(startSlot.epoch), canonical[i - 1], + state[], noRollback): + error "Can't load start state, database corrupt?", + startStateRoot = shortLog(canonical[i - 1]), slot = startSlot + return + + for slot in startSlot.. 0: + info "Dropping redundant states", states, redundant = junk.len + + for i in junk: + dag.db.delStateRoot(i[0][1], i[0][0]) + dag.db.delState(dag.cfg.consensusForkAtEpoch(i[0][0].epoch), i[1]) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index b80153edc..8fc648997 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -5,8 +5,10 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +#TODO: Clean these imports import - std/[os, atomics], + std/[os, atomics, random, terminal, times, exitprocs, sequtils], + metrics, beacon_chain/nimbus_binary_common, beacon_chain/spec/forks, beacon_chain/[beacon_chain_db, trusted_node_sync], @@ -14,55 +16,1161 @@ import chronos, chronicles, stew/io2, - ../configs/nimbus_configs + eth/p2p/discoveryv5/[enr, random2], + ../configs/nimbus_configs, + beacon_chain/consensus_object_pools/vanity_logs/vanity_logs, + beacon_chain/statusbar, + beacon_chain/nimbus_binary_common, + beacon_chain/spec/[forks, digest, helpers], + beacon_chain/spec/datatypes/base, + beacon_chain/[beacon_chain_db, trusted_node_sync, beacon_node], + beacon_chain/spec/weak_subjectivity, + beacon_chain/rpc/[rest_beacon_api, rest_api, state_ttl_cache], + beacon_chain/consensus_object_pools/blob_quarantine, + beacon_chain/networking/[topic_params, network_metadata, network_metadata_downloads], + beacon_chain/spec/datatypes/[bellatrix], + beacon_chain/sync/[sync_protocol], + beacon_chain/validators/[keystore_management, beacon_validators], + beacon_chain/consensus_object_pools/[blockchain_dag], + beacon_chain/spec/ + [beaconstate, state_transition, state_transition_epoch, validator, ssz_codec] export nimbus_configs +when defined(posix): + import system/ansi_c + +from beacon_chain/spec/datatypes/deneb import SignedBeaconBlock +from beacon_chain/beacon_node_light_client + import shouldSyncOptimistically, initLightClient, updateLightClientFromDag +from libp2p/protocols/pubsub/gossipsub + import TopicParams, validateParameters, init + ## log logScope: topics = "Consensus layer" -## following procedures are copies from nimbus_beacon_node.nim. -## TODO: if possible, extract from that file into a common file +# adapted from nimbus-eth2 +# # https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics +# declareGauge beacon_slot, "Latest slot of the beacon chain state" +# declareGauge beacon_current_epoch, "Current epoch" -## runs beacon node -## adapted from nimbus-eth2 -proc doRunBeaconNode( - config: var BeaconNodeConf, rng: ref HmacDrbgContext -) {.raises: [CatchableError].} = - info "Launching beacon node", - version = "fullVersionStr", #TODO:changed from original version - bls_backend = $BLS_BACKEND, - const_preset, - cmdParams = commandLineParams(), - config +# # Finalization tracking +# declareGauge finalization_delay, +# "Epoch delay between scheduled epoch and finalized epoch" - template ignoreDeprecatedOption(option: untyped): untyped = - if config.option.isSome: - warn "Config option is deprecated", option = config.option.get +# declareGauge ticks_delay, +# "How long does to take to run the onSecond loop" - ignoreDeprecatedOption requireEngineAPI - ignoreDeprecatedOption safeSlotsToImportOptimistically - ignoreDeprecatedOption terminalTotalDifficultyOverride - ignoreDeprecatedOption optimistic - ignoreDeprecatedOption validatorMonitorTotals - ignoreDeprecatedOption web3ForcePolling +# declareGauge next_action_wait, +# "Seconds until the next attestation will be sent" - config.createDumpDirs() +# declareGauge next_proposal_wait, +# "Seconds until the next proposal will be sent, or Inf if not known" - #TODO: We might need to split this on the same file - # if config.metricsEnabled: - # let metricsAddress = config.metricsAddress - # notice "Starting metrics HTTP server", - # url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" - # try: - # startMetricsHttpServer($metricsAddress, config.metricsPort) - # except CatchableError as exc: - # raise exc - # except Exception as exc: - # raiseAssert exc.msg # TODO fix metrics +# declareGauge sync_committee_active, +# "1 if there are current sync committee duties, 0 otherwise" + +# declareCounter db_checkpoint_seconds, +# "Time spent checkpointing the database to clear the WAL file" + +const SlashingDbName = "slashing_protection" +# changing this requires physical file rename as well or history is lost + + +## NOTE +## following procedures are copies/adaptations from nimbus_beacon_node.nim. +## TODO: Extract do adequate structures and files + + +# TODO: need to figure out behaviour on threaded patterns +# Using this function here is signaled as non GC SAFE given +# that gPidFile might be accessed concurrently with no guards + +# var gPidFile: string +# proc createPidFile(filename: string) {.raises: [IOError].} = +# writeFile filename, $os.getCurrentProcessId() +# gPidFile = filename +# addExitProc ( +# proc() = +# discard io2.removeFile(filename) +# ) + +proc initFullNode( + node: BeaconNode, + rng: ref HmacDrbgContext, + dag: ChainDAGRef, + taskpool: TaskPoolPtr, + getBeaconTime: GetBeaconTimeFn +) {.async.} = + template config(): auto = + node.config + + proc onPhase0AttestationReceived(data: phase0.Attestation) = + node.eventBus.attestQueue.emit(data) + + proc onElectraAttestationReceived(data: electra.Attestation) = + debugComment "electra attestation queue" + + proc onSyncContribution(data: SignedContributionAndProof) = + node.eventBus.contribQueue.emit(data) + + proc onVoluntaryExitAdded(data: SignedVoluntaryExit) = + node.eventBus.exitQueue.emit(data) + + proc onBLSToExecutionChangeAdded(data: SignedBLSToExecutionChange) = + node.eventBus.blsToExecQueue.emit(data) + + proc onProposerSlashingAdded(data: ProposerSlashing) = + node.eventBus.propSlashQueue.emit(data) + + proc onPhase0AttesterSlashingAdded(data: phase0.AttesterSlashing) = + node.eventBus.attSlashQueue.emit(data) + + proc onElectraAttesterSlashingAdded(data: electra.AttesterSlashing) = + debugComment "electra att slasher queue" + + proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = + node.eventBus.blobSidecarQueue.emit(data) + + proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = + let optimistic = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + some node.dag.is_optimistic(data.toBlockId()) + else: + none[bool]() + node.eventBus.blocksQueue.emit( + EventBeaconBlockObject.init(data, optimistic)) + + proc onHeadChanged(data: HeadChangeInfoObject) = + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + res.optimistic = + some node.dag.is_optimistic(BlockId(slot: data.slot, root: data.block_root)) + res + else: + data + node.eventBus.headQueue.emit(eventData) + + proc onChainReorg(data: ReorgInfoObject) = + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + res.optimistic = some node.dag.is_optimistic( + BlockId(slot: data.slot, root: data.new_head_block) + ) + res + else: + data + node.eventBus.reorgQueue.emit(eventData) + + proc makeOnFinalizationCb( + # This `nimcall` functions helps for keeping track of what + # needs to be captured by the onFinalization closure. + eventBus: EventBus, + elManager: ELManager, + ): OnFinalizedCallback {.nimcall.} = + static: + doAssert (elManager is ref) + return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = + if elManager != nil: + let finalizedEpochRef = dag.getFinalizedEpochRef() + discard trackFinalizedState( + elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index + ) + # node.updateLightClientFromDag() + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + # `slot` in this `BlockId` may be higher than block's actual slot, + # this is alright for the purpose of calling `is_optimistic`. + res.optimistic = some node.dag.is_optimistic( + BlockId(slot: data.epoch.start_slot, root: data.block_root) + ) + res + else: + data + eventBus.finalQueue.emit(eventData) + + func getLocalHeadSlot(): Slot = + dag.head.slot + + proc getLocalWallSlot(): Slot = + node.beaconClock.now.slotOrZero + + func getFirstSlotAtFinalizedEpoch(): Slot = + dag.finalizedHead.slot + + func getBackfillSlot(): Slot = + if dag.backfill.parent_root != dag.tail.root: dag.backfill.slot else: dag.tail.slot + + func getFrontfillSlot(): Slot = + max(dag.frontfill.get(BlockId()).slot, dag.horizon) + + proc isWithinWeakSubjectivityPeriod(): bool = + let + currentSlot = node.beaconClock.now().slotOrZero() + checkpoint = Checkpoint( + epoch: epoch(getStateField(node.dag.headState, slot)), + root: getStateField(node.dag.headState, latest_block_header).state_root, + ) + is_within_weak_subjectivity_period( + node.dag.cfg, currentSlot, node.dag.headState, checkpoint + ) + + proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} = + await node.shutdownEvent.wait() + bnStatus = BeaconNodeStatus.Stopping + + asyncSpawn eventWaiter() + + let + quarantine = newClone(Quarantine.init()) + attestationPool = newClone( + AttestationPool.init( + dag, quarantine, onPhase0AttestationReceived, onElectraAttestationReceived + ) + ) + syncCommitteeMsgPool = + newClone(SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) + # adapted from nimbus-eth2 + # lightClientPool = newClone(LightClientPool()) + validatorChangePool = newClone( + ValidatorChangePool.init( + dag, attestationPool, onVoluntaryExitAdded, onBLSToExecutionChangeAdded, + onProposerSlashingAdded, onPhase0AttesterSlashingAdded, + onElectraAttesterSlashingAdded, + ) + ) + blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded)) + consensusManager = ConsensusManager.new( + dag, + attestationPool, + quarantine, + node.elManager, + ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), + node.dynamicFeeRecipientsStore, + config.validatorsDir, + config.defaultFeeRecipient, + config.suggestedGasLimit, + ) + blockProcessor = BlockProcessor.new( + config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, rng, taskpool, + consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, + ) + blockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, + blobs: Opt[BlobSidecars], + maybeFinalized: bool, + ): Future[Result[void, VerifierError]] {. + async: (raises: [CancelledError], raw: true) + .} = + # The design with a callback for block verification is unusual compared + # to the rest of the application, but fits with the general approach + # taken in the sync/request managers - this is an architectural compromise + # that should probably be reimagined more holistically in the future. + blockProcessor[].addBlock( + MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized + ) + rmanBlockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool + ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = + withBlck(signedBlock): + when consensusFork >= ConsensusFork.Deneb: + if not blobQuarantine[].hasBlobs(forkyBlck): + # We don't have all the blobs for this block, so we have + # to put it in blobless quarantine. + if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VerifierError.MissingParent) + else: + let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) + await blockProcessor[].addBlock( + MsgSource.gossip, + signedBlock, + Opt.some(blobs), + maybeFinalized = maybeFinalized, + ) + else: + await blockProcessor[].addBlock( + MsgSource.gossip, + signedBlock, + Opt.none(BlobSidecars), + maybeFinalized = maybeFinalized, + ) + rmanBlockLoader = proc(blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = + dag.getForkedBlock(blockRoot) + rmanBlobLoader = proc(blobId: BlobIdentifier): Opt[ref BlobSidecar] = + var blob_sidecar = BlobSidecar.new() + if dag.db.getBlobSidecar(blobId.block_root, blobId.index, blob_sidecar[]): + Opt.some blob_sidecar + else: + Opt.none(ref BlobSidecar) + + #TODO: + # removing this light client var + lightClientPool = newClone( + LightClientPool()) + + processor = Eth2Processor.new( + config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, + attestationPool, validatorChangePool, node.attachedValidators, + syncCommitteeMsgPool, lightClientPool, quarantine, blobQuarantine, rng, + getBeaconTime, taskpool, + ) + syncManagerFlags = + if node.config.longRangeSync != LongRangeSyncMode.Lenient: + {SyncManagerFlag.NoGenesisSync} + else: + {} + syncManager = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + SyncQueueKind.Forward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + dag.tail.slot, + blockVerifier, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + backfiller = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + SyncQueueKind.Backward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + dag.backfill.slot, + blockVerifier, + maxHeadAge = 0, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + router = (ref MessageRouter)(processor: processor, network: node.network) + requestManager = RequestManager.init( + node.network, + dag.cfg.DENEB_FORK_EPOCH, + getBeaconTime, + ( + proc(): bool = + syncManager.inProgress + ), + quarantine, + blobQuarantine, + rmanBlockVerifier, + rmanBlockLoader, + rmanBlobLoader, + ) + # adapted from nimbus-eth2 + # if node.config.lightClientDataServe: + # proc scheduleSendingLightClientUpdates(slot: Slot) = + # if node.lightClientPool[].broadcastGossipFut != nil: + # return + # if slot <= node.lightClientPool[].latestBroadcastedSlot: + # return + # node.lightClientPool[].latestBroadcastedSlot = slot + + # template fut(): auto = + # node.lightClientPool[].broadcastGossipFut + + # fut = node.handleLightClientUpdates(slot) + # fut.addCallback do(p: pointer) {.gcsafe.}: + # fut = nil + + # router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates + + dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) + dag.setBlockCb(onBlockAdded) + dag.setHeadCb(onHeadChanged) + dag.setReorgCb(onChainReorg) + + node.dag = dag + node.blobQuarantine = blobQuarantine + node.quarantine = quarantine + node.attestationPool = attestationPool + node.syncCommitteeMsgPool = syncCommitteeMsgPool + # node.lightClientPool = lightClientPool + node.validatorChangePool = validatorChangePool + node.processor = processor + node.blockProcessor = blockProcessor + node.consensusManager = consensusManager + node.requestManager = requestManager + node.syncManager = syncManager + node.backfiller = backfiller + node.router = router + + await node.addValidators() + + block: + # Add in-process validators to the list of "known" validators such that + # we start with a reasonable ENR + let wallSlot = node.beaconClock.now().slotOrZero() + for validator in node.attachedValidators[].validators.values(): + if config.validatorMonitorAuto: + node.validatorMonitor[].addMonitor(validator.pubkey, validator.index) + + if validator.index.isSome(): + withState(dag.headState): + let idx = validator.index.get() + if distinctBase(idx) <= forkyState.data.validators.lenu64: + template v(): auto = + forkyState.data.validators.item(idx) + + if is_active_validator(v, wallSlot.epoch) or + is_active_validator(v, wallSlot.epoch + 1): + node.consensusManager[].actionTracker.knownValidators[idx] = wallSlot + elif is_exited_validator(v, wallSlot.epoch): + notice "Ignoring exited validator", + index = idx, pubkey = shortLog(v.pubkey) + let stabilitySubnets = + node.consensusManager[].actionTracker.stabilitySubnets(wallSlot) + # Here, we also set the correct ENR should we be in all subnets mode! + node.network.updateStabilitySubnetMetadata(stabilitySubnets) + + node.network.registerProtocol( + PeerSync, PeerSync.NetworkState.init(node.dag, node.beaconClock.getBeaconTimeFn()) + ) + + node.network.registerProtocol(BeaconSync, BeaconSync.NetworkState.init(node.dag)) + # adapted from nimbus-eth2 + + # if node.dag.lcDataStore.serve: + # node.network.registerProtocol( + # LightClientSync, LightClientSync.NetworkState.init(node.dag) + # ) + + # node.updateValidatorMetrics() + +func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = + case stdoutKind + of StdoutLogKind.Auto: raiseAssert "inadmissable here" + of StdoutLogKind.Colors: + VanityLogs( + onMergeTransitionBlock: bellatrixColor, + onFinalizedMergeTransitionBlock: bellatrixBlink, + onUpgradeToCapella: capellaColor, + onKnownBlsToExecutionChange: capellaBlink, + onUpgradeToDeneb: denebColor, + onUpgradeToElectra: electraColor) + of StdoutLogKind.NoColors: + VanityLogs( + onMergeTransitionBlock: bellatrixMono, + onFinalizedMergeTransitionBlock: bellatrixMono, + onUpgradeToCapella: capellaMono, + onKnownBlsToExecutionChange: capellaMono, + onUpgradeToDeneb: denebMono, + onUpgradeToElectra: electraMono) + of StdoutLogKind.Json, StdoutLogKind.None: + VanityLogs( + onMergeTransitionBlock: + (proc() = notice "🐼 Proof of Stake Activated 🐼"), + onFinalizedMergeTransitionBlock: + (proc() = notice "🐼 Proof of Stake Finalized 🐼"), + onUpgradeToCapella: + (proc() = notice "🦉 Withdrowls now available 🦉"), + onKnownBlsToExecutionChange: + (proc() = notice "🦉 BLS to execution changed 🦉"), + onUpgradeToDeneb: + (proc() = notice "🐟 Proto-Danksharding is ON 🐟"), + onUpgradeToElectra: + (proc() = notice "🦒 [PH] Electra 🦒")) + +func getVanityMascot(consensusFork: ConsensusFork): string = + case consensusFork + of ConsensusFork.Electra: + "🦒" + of ConsensusFork.Deneb: + "🐟" + of ConsensusFork.Capella: + "🦉" + of ConsensusFork.Bellatrix: + "🐼" + of ConsensusFork.Altair: + "✨" + of ConsensusFork.Phase0: + "🦏" + +# NOTE: light client related code commented +proc loadChainDag( + config: BeaconNodeConf, + cfg: RuntimeConfig, + db: BeaconChainDB, + eventBus: EventBus, + validatorMonitor: ref ValidatorMonitor, + networkGenesisValidatorsRoot: Opt[Eth2Digest], +): ChainDAGRef = + info "Loading block DAG from database", path = config.databaseDir + + var dag: ChainDAGRef + proc onLightClientFinalityUpdate(data: ForkedLightClientFinalityUpdate) = + if dag == nil: + return + withForkyFinalityUpdate(data): + when lcDataFork > LightClientDataFork.None: + let contextFork = dag.cfg.consensusForkAtEpoch(forkyFinalityUpdate.contextEpoch) + eventBus.finUpdateQueue.emit( + RestVersioned[ForkedLightClientFinalityUpdate]( + data: data, + jsonVersion: contextFork, + sszContext: dag.forkDigests[].atConsensusFork(contextFork), + ) + ) + + proc onLightClientOptimisticUpdate(data: ForkedLightClientOptimisticUpdate) = + if dag == nil: + return + withForkyOptimisticUpdate(data): + when lcDataFork > LightClientDataFork.None: + let contextFork = + dag.cfg.consensusForkAtEpoch(forkyOptimisticUpdate.contextEpoch) + eventBus.optUpdateQueue.emit( + RestVersioned[ForkedLightClientOptimisticUpdate]( + data: data, + jsonVersion: contextFork, + sszContext: dag.forkDigests[].atConsensusFork(contextFork), + ) + ) + + let + chainDagFlags = + if config.strictVerification: + {strictVerification} + else: + {} + onLightClientFinalityUpdateCb = + if config.lightClientDataServe: onLightClientFinalityUpdate else: nil + onLightClientOptimisticUpdateCb = + if config.lightClientDataServe: onLightClientOptimisticUpdate else: nil + + dag = ChainDAGRef.init( + cfg, db, validatorMonitor, chainDagFlags, config.eraDir, + vanityLogs = getVanityLogs(detectTTY(config.logStdout)), + lcDataConfig = LightClientDataConfig( + serve: config.lightClientDataServe, + importMode: config.lightClientDataImportMode, + maxPeriods: config.lightClientDataMaxPeriods, + onLightClientFinalityUpdate: onLightClientFinalityUpdateCb, + onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb)) + + if networkGenesisValidatorsRoot.isSome: + let databaseGenesisValidatorsRoot = + getStateField(dag.headState, genesis_validators_root) + if networkGenesisValidatorsRoot.get != databaseGenesisValidatorsRoot: + fatal "The specified --data-dir contains data for a different network", + networkGenesisValidatorsRoot = networkGenesisValidatorsRoot.get, + databaseGenesisValidatorsRoot, + dataDir = config.dataDir + quit 1 + + # The first pruning after restart may take a while.. + if config.historyMode == HistoryMode.Prune: + dag.pruneHistory(true) + + dag + +proc doRunTrustedNodeSync( + db: BeaconChainDB, + metadata: Eth2NetworkMetadata, + databaseDir: string, + eraDir: string, + restUrl: string, + stateId: Option[string], + trustedBlockRoot: Option[Eth2Digest], + backfill: bool, + reindex: bool, + downloadDepositSnapshot: bool, + genesisState: ref ForkedHashedBeaconState, +) {.async.} = + let syncTarget = + if stateId.isSome: + if trustedBlockRoot.isSome: + warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) + elif trustedBlockRoot.isSome: + TrustedNodeSyncTarget( + kind: TrustedNodeSyncKind.TrustedBlockRoot, + trustedBlockRoot: trustedBlockRoot.get, + ) + else: + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") + + await db.doTrustedNodeSync( + metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, + downloadDepositSnapshot, genesisState, + ) + +proc initBeaconNode*(T: type BeaconNode, + rng: ref HmacDrbgContext, + config: BeaconNodeConf, + metadata: Eth2NetworkMetadata): Future[BeaconNode] + {.async.} = + var + taskpool: TaskPoolPtr + genesisState: ref ForkedHashedBeaconState = nil + + template cfg: auto = metadata.cfg + template eth1Network: auto = metadata.eth1Network + + if not(isDir(config.databaseDir)): + # If database directory missing, we going to use genesis state to check + # for weak_subjectivity_period. + genesisState = + await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + let + genesisTime = getStateField(genesisState[], genesis_time) + beaconClock = BeaconClock.init(genesisTime).valueOr: + fatal "Invalid genesis time in genesis state", genesisTime + quit 1 + currentSlot = beaconClock.now().slotOrZero() + checkpoint = Checkpoint( + epoch: epoch(getStateField(genesisState[], slot)), + root: getStateField(genesisState[], latest_block_header).state_root) + # adapted from nimbus-eth2 + # if config.longRangeSync == LongRangeSyncMode.Light: + # if not is_within_weak_subjectivity_period(metadata.cfg, currentSlot, + # genesisState[], checkpoint): + # fatal WeakSubjectivityLogMessage, current_slot = currentSlot + # quit 1 + + try: + if config.numThreads < 0: + fatal "The number of threads --numThreads cannot be negative." + quit 1 + elif config.numThreads == 0: + taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16)) + else: + taskpool = TaskPoolPtr.new(numThreads = config.numThreads) + + info "Threadpool started", numThreads = taskpool.numThreads + except Exception: + raise newException(Defect, "Failure in taskpool initialization.") + + if metadata.genesis.kind == BakedIn: + if config.genesisState.isSome: + warn "The --genesis-state option has no effect on networks with built-in genesis state" + + if config.genesisStateUrl.isSome: + warn "The --genesis-state-url option has no effect on networks with built-in genesis state" + + let + eventBus = EventBus( + headQueue: newAsyncEventQueue[HeadChangeInfoObject](), + blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), + attestQueue: newAsyncEventQueue[phase0.Attestation](), + exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), + blsToExecQueue: newAsyncEventQueue[SignedBLSToExecutionChange](), + propSlashQueue: newAsyncEventQueue[ProposerSlashing](), + attSlashQueue: newAsyncEventQueue[AttesterSlashing](), + blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), + finalQueue: newAsyncEventQueue[FinalizationInfoObject](), + reorgQueue: newAsyncEventQueue[ReorgInfoObject](), + contribQueue: newAsyncEventQueue[SignedContributionAndProof](), + finUpdateQueue: newAsyncEventQueue[ + RestVersioned[ForkedLightClientFinalityUpdate]](), + optUpdateQueue: newAsyncEventQueue[ + RestVersioned[ForkedLightClientOptimisticUpdate]]()) + db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) + + if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: + let trustedBlockRoot = + if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome: + config.trustedBlockRoot + elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH: + # Sync can be bootstrapped from the genesis block root + if genesisState.isNil: + genesisState = await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + if not genesisState.isNil: + let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root + notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & + "provided with `--external-beacon-api-url`, " & + "falling back to genesis block root", + externalBeaconApiUrl = config.externalBeaconApiUrl.get, + trustedBlockRoot = config.trustedBlockRoot, + trustedStateRoot = config.trustedStateRoot, + genesisBlockRoot = $genesisBlockRoot + some genesisBlockRoot + else: + none[Eth2Digest]() + else: + none[Eth2Digest]() + if config.trustedStateRoot.isNone and trustedBlockRoot.isNone: + warn "Ignoring `--external-beacon-api-url`, neither " & + "`--trusted-block-root` nor `--trusted-state-root` provided", + externalBeaconApiUrl = config.externalBeaconApiUrl.get, + trustedBlockRoot = config.trustedBlockRoot, + trustedStateRoot = config.trustedStateRoot + else: + if genesisState.isNil: + genesisState = await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + await db.doRunTrustedNodeSync( + metadata, + config.databaseDir, + config.eraDir, + config.externalBeaconApiUrl.get, + config.trustedStateRoot.map do (x: Eth2Digest) -> string: + "0x" & x.data.toHex, + trustedBlockRoot, + backfill = false, + reindex = false, + downloadDepositSnapshot = false, + genesisState) + + if config.finalizedCheckpointBlock.isSome: + warn "--finalized-checkpoint-block has been deprecated, ignoring" + + let checkpointState = if config.finalizedCheckpointState.isSome: + let checkpointStatePath = config.finalizedCheckpointState.get.string + let tmp = try: + newClone(readSszForkedHashedBeaconState( + cfg, readAllBytes(checkpointStatePath).tryGet())) + except SszError as err: + fatal "Checkpoint state loading failed", + err = formatMsg(err, checkpointStatePath) + quit 1 + except CatchableError as err: + fatal "Failed to read checkpoint state file", err = err.msg + quit 1 + + if not getStateField(tmp[], slot).is_epoch: + fatal "--finalized-checkpoint-state must point to a state for an epoch slot", + slot = getStateField(tmp[], slot) + quit 1 + tmp + else: + nil + + if config.finalizedDepositTreeSnapshot.isSome: + let + depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string + snapshot = + try: + SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) + except SszError as err: + fatal "Deposit tree snapshot loading failed", + err = formatMsg(err, depositTreeSnapshotPath) + quit 1 + except CatchableError as err: + fatal "Failed to read deposit tree snapshot file", err = err.msg + quit 1 + depositContractSnapshot = DepositContractSnapshot.init(snapshot).valueOr: + fatal "Invalid deposit tree snapshot file" + quit 1 + db.putDepositContractSnapshot(depositContractSnapshot) + + let engineApiUrls = config.engineApiUrls + + if engineApiUrls.len == 0: + notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" + + var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot + + if not ChainDAGRef.isInitialized(db).isOk(): + genesisState = + if not checkpointState.isNil and + getStateField(checkpointState[], slot) == 0: + checkpointState + else: + if genesisState.isNil: + await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + else: + genesisState + + if genesisState.isNil and checkpointState.isNil: + fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & + "with the network configuration" + quit 1 + + if not genesisState.isNil and not checkpointState.isNil: + if getStateField(genesisState[], genesis_validators_root) != + getStateField(checkpointState[], genesis_validators_root): + fatal "Checkpoint state does not match genesis - check the --network parameter", + rootFromGenesis = getStateField( + genesisState[], genesis_validators_root), + rootFromCheckpoint = getStateField( + checkpointState[], genesis_validators_root) + quit 1 + + try: + # Always store genesis state if we have it - this allows reindexing and + # answering genesis queries + if not genesisState.isNil: + ChainDAGRef.preInit(db, genesisState[]) + networkGenesisValidatorsRoot = + Opt.some(getStateField(genesisState[], genesis_validators_root)) + + if not checkpointState.isNil: + if genesisState.isNil or + getStateField(checkpointState[], slot) != GENESIS_SLOT: + ChainDAGRef.preInit(db, checkpointState[]) + + doAssert ChainDAGRef.isInitialized(db).isOk(), "preInit should have initialized db" + except CatchableError as exc: + error "Failed to initialize database", err = exc.msg + quit 1 + else: + if not checkpointState.isNil: + fatal "A database already exists, cannot start from given checkpoint", + dataDir = config.dataDir + quit 1 + + # Doesn't use std/random directly, but dependencies might + randomize(rng[].rand(high(int))) + + # The validatorMonitorTotals flag has been deprecated and should eventually be + # removed - until then, it's given priority if set so as not to needlessly + # break existing setups + let + validatorMonitor = newClone(ValidatorMonitor.init( + config.validatorMonitorAuto, + config.validatorMonitorTotals.get( + not config.validatorMonitorDetails))) + + for key in config.validatorMonitorPubkeys: + validatorMonitor[].addMonitor(key, Opt.none(ValidatorIndex)) + + let + dag = loadChainDag( + config, cfg, db, eventBus, + validatorMonitor, networkGenesisValidatorsRoot) + genesisTime = getStateField(dag.headState, genesis_time) + beaconClock = BeaconClock.init(genesisTime).valueOr: + fatal "Invalid genesis time in state", genesisTime + quit 1 + + getBeaconTime = beaconClock.getBeaconTimeFn() + + if config.weakSubjectivityCheckpoint.isSome: + dag.checkWeakSubjectivityCheckpoint( + config.weakSubjectivityCheckpoint.get, beaconClock) + + let elManager = ELManager.new( + cfg, + metadata.depositContractBlock, + metadata.depositContractBlockHash, + db, + engineApiUrls, + eth1Network) + + if config.rpcEnabled.isSome: + warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." + + let restServer = if config.restEnabled: + RestServerRef.init(config.restAddress, config.restPort, + config.restAllowedOrigin, + validateBeaconApiQueries, + nimbusAgentStr, + config) + else: + nil + + let + netKeys = getPersistentNetKeys(rng[], config) + nickname = if config.nodeName == "auto": shortForm(netKeys) + else: config.nodeName + network = createEth2Node( + rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime, + getStateField(dag.headState, genesis_validators_root)) + + case config.slashingDbKind + of SlashingDbKind.v2: + discard + of SlashingDbKind.v1: + error "Slashing DB v1 is no longer supported for writing" + quit 1 + of SlashingDbKind.both: + warn "Slashing DB v1 deprecated, writing only v2" + + info "Loading slashing protection database (v2)", + path = config.validatorsDir() + + proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = + withState(dag.headState): + getValidator(forkyState().data.validators.asSeq(), pubkey) + + func getCapellaForkVersion(): Opt[Version] = + Opt.some(cfg.CAPELLA_FORK_VERSION) + + func getDenebForkEpoch(): Opt[Epoch] = + Opt.some(cfg.DENEB_FORK_EPOCH) + + proc getForkForEpoch(epoch: Epoch): Opt[Fork] = + Opt.some(dag.forkAtEpoch(epoch)) + + proc getGenesisRoot(): Eth2Digest = + getStateField(dag.headState, genesis_validators_root) + + let + keystoreCache = KeystoreCacheRef.init() + slashingProtectionDB = + SlashingProtectionDB.init( + getStateField(dag.headState, genesis_validators_root), + config.validatorsDir(), SlashingDbName) + validatorPool = newClone(ValidatorPool.init( + slashingProtectionDB, config.doppelgangerDetection)) + + keymanagerInitResult = initKeymanagerServer(config, restServer) + keymanagerHost = if keymanagerInitResult.server != nil: + newClone KeymanagerHost.init( + validatorPool, + keystoreCache, + rng, + keymanagerInitResult.token, + config.validatorsDir, + config.secretsDir, + config.defaultFeeRecipient, + config.suggestedGasLimit, + config.defaultGraffitiBytes, + config.getPayloadBuilderAddress, + getValidatorAndIdx, + getBeaconTime, + getCapellaForkVersion, + getDenebForkEpoch, + getForkForEpoch, + getGenesisRoot) + else: nil + + stateTtlCache = + if config.restCacheSize > 0: + StateTtlCache.init( + cacheSize = config.restCacheSize, + cacheTtl = chronos.seconds(config.restCacheTtl)) + else: + nil + + if config.payloadBuilderEnable: + info "Using external payload builder", + payloadBuilderUrl = config.payloadBuilderUrl + + let node = BeaconNode( + nickname: nickname, + graffitiBytes: if config.graffiti.isSome: config.graffiti.get + else: defaultGraffitiBytes(), + network: network, + netKeys: netKeys, + db: db, + config: config, + attachedValidators: validatorPool, + elManager: elManager, + restServer: restServer, + keymanagerHost: keymanagerHost, + keymanagerServer: keymanagerInitResult.server, + keystoreCache: keystoreCache, + eventBus: eventBus, + gossipState: {}, + blocksGossipState: {}, + beaconClock: beaconClock, + validatorMonitor: validatorMonitor, + stateTtlCache: stateTtlCache, + shutdownEvent: newAsyncEvent(), + dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) + + # TODO: we are initializing the light client given that it has a function + # to validate if the sync should be done optimistically or not, and it used + # along beacon node + node.initLightClient( + rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root) + + await node.initFullNode(rng, dag, taskpool, getBeaconTime) + + node.updateLightClientFromDag() + + node + +proc installMessageValidators(node: BeaconNode) = + # These validators stay around the whole time, regardless of which specific + # subnets are subscribed to during any given epoch. + let forkDigests = node.dag.forkDigests + + for fork in ConsensusFork: + withConsensusFork(fork): + let digest = forkDigests[].atConsensusFork(consensusFork) + + # beacon_block + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block + node.network.addValidator( + getBeaconBlocksTopic(digest), proc ( + signedBlock: consensusFork.SignedBeaconBlock + ): ValidationResult = + if node.shouldSyncOptimistically(node.currentSlot): + toValidationResult( + node.optimisticProcessor.processSignedBeaconBlock( + signedBlock)) + else: + toValidationResult( + node.processor[].processSignedBeaconBlock( + MsgSource.gossip, signedBlock))) + + # beacon_attestation_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id + when consensusFork >= ConsensusFork.Electra: + for it in SubnetId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), proc ( + attestation: electra.Attestation + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, attestation, subnet_id, + checkSignature = true, checkValidator = false))) + else: + for it in SubnetId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), proc ( + attestation: phase0.Attestation + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, attestation, subnet_id, + checkSignature = true, checkValidator = false))) + + # beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof + when consensusFork >= ConsensusFork.Electra: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), proc ( + signedAggregateAndProof: electra.SignedAggregateAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof))) + else: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), proc ( + signedAggregateAndProof: phase0.SignedAggregateAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof))) + + # attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra + when consensusFork >= ConsensusFork.Electra: + node.network.addValidator( + getAttesterSlashingsTopic(digest), proc ( + attesterSlashing: electra.AttesterSlashing + ): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing))) + else: + node.network.addValidator( + getAttesterSlashingsTopic(digest), proc ( + attesterSlashing: phase0.AttesterSlashing + ): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing))) + + # proposer_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing + node.network.addValidator( + getProposerSlashingsTopic(digest), proc ( + proposerSlashing: ProposerSlashing + ): ValidationResult = + toValidationResult( + node.processor[].processProposerSlashing( + MsgSource.gossip, proposerSlashing))) + + # voluntary_exit + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit + node.network.addValidator( + getVoluntaryExitsTopic(digest), proc ( + signedVoluntaryExit: SignedVoluntaryExit + ): ValidationResult = + toValidationResult( + node.processor[].processSignedVoluntaryExit( + MsgSource.gossip, signedVoluntaryExit))) + + when consensusFork >= ConsensusFork.Altair: + # sync_committee_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let idx = subcommitteeIdx + node.network.addAsyncValidator( + getSyncCommitteeTopic(digest, idx), proc ( + msg: SyncCommitteeMessage + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSyncCommitteeMessage( + MsgSource.gossip, msg, idx))) + + # sync_committee_contribution_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof + node.network.addAsyncValidator( + getSyncCommitteeContributionAndProofTopic(digest), proc ( + msg: SignedContributionAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedContributionAndProof( + MsgSource.gossip, msg))) + + when consensusFork >= ConsensusFork.Capella: + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#bls_to_execution_change + node.network.addAsyncValidator( + getBlsToExecutionChangeTopic(digest), proc ( + msg: SignedBLSToExecutionChange + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processBlsToExecutionChange( + MsgSource.gossip, msg))) + + when consensusFork >= ConsensusFork.Deneb: + # blob_sidecar_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id + for it in BlobId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addValidator( + getBlobSidecarTopic(digest, subnet_id), proc ( + blobSidecar: deneb.BlobSidecar + ): ValidationResult = + toValidationResult( + node.processor[].processBlobSidecar( + MsgSource.gossip, blobSidecar, subnet_id))) + + # node.installLightClientMessageValidators() + +proc checkWeakSubjectivityCheckpoint( + dag: ChainDAGRef, wsCheckpoint: Checkpoint, beaconClock: BeaconClock +) = + let + currentSlot = beaconClock.now.slotOrZero + isCheckpointStale = + not is_within_weak_subjectivity_period( + dag.cfg, currentSlot, dag.headState, wsCheckpoint + ) + + if isCheckpointStale: + error "Weak subjectivity checkpoint is stale", + currentSlot, + checkpoint = wsCheckpoint, + headStateSlot = getStateField(dag.headState, slot) + quit 1 -## adapted/copied from nimbus-eth2 proc fetchGenesisState( metadata: Eth2NetworkMetadata, genesisState = none(InputFile), @@ -97,37 +1205,1208 @@ proc fetchGenesisState( else: nil -## adapted/copied from nimbus-eth2 -proc doRunTrustedNodeSync( - db: BeaconChainDB, - metadata: Eth2NetworkMetadata, - databaseDir: string, - eraDir: string, - restUrl: string, - stateId: Option[string], - trustedBlockRoot: Option[Eth2Digest], - backfill: bool, - reindex: bool, - downloadDepositSnapshot: bool, - genesisState: ref ForkedHashedBeaconState, -) {.async.} = - let syncTarget = - if stateId.isSome: - if trustedBlockRoot.isSome: - warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) - elif trustedBlockRoot.isSome: - TrustedNodeSyncTarget( - kind: TrustedNodeSyncKind.TrustedBlockRoot, - trustedBlockRoot: trustedBlockRoot.get, - ) - else: - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") +proc pruneBlobs(node: BeaconNode, slot: Slot) = + let blobPruneEpoch = (slot.epoch - + node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) + if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: + var blocks: array[SLOTS_PER_EPOCH.int, BlockId] + var count = 0 + let startIndex = node.dag.getBlockRange( + blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) + for i in startIndex..= ConsensusFork.Altair: + forkyState.data.current_sync_committee + else: + return static(default(SyncnetBits)) + + getSyncSubnets(node.hasSyncPubKey(epoch), syncCommittee) + +func getNextSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let syncCommittee = withState(node.dag.headState): + when consensusFork >= ConsensusFork.Altair: + forkyState.data.next_sync_committee + else: + return static(default(SyncnetBits)) + + getSyncSubnets( + node.hasSyncPubKey((epoch.sync_committee_period + 1).start_slot().epoch), + syncCommittee) + +func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let + subnets = node.getCurrentSyncCommiteeSubnets(epoch) + epochsToSyncPeriod = nearSyncCommitteePeriod(epoch) + + # The end-slot tracker might call this when it's theoretically applicable, + # but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync + # committee period begins, in which case `epochsToNextSyncPeriod` is none. + if epochsToSyncPeriod.isNone or + node.dag.cfg.consensusForkAtEpoch(epoch + epochsToSyncPeriod.get) < + ConsensusFork.Altair: + return subnets + + subnets + node.getNextSyncCommitteeSubnets(epoch) + +func forkDigests(node: BeaconNode): auto = + let forkDigestsArray: array[ConsensusFork, auto] = [ + node.dag.forkDigests.phase0, + node.dag.forkDigests.altair, + node.dag.forkDigests.bellatrix, + node.dag.forkDigests.capella, + node.dag.forkDigests.deneb, + node.dag.forkDigests.electra] + forkDigestsArray + +proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = + template lastSyncUpdate: untyped = + node.consensusManager[].actionTracker.lastSyncUpdate + if lastSyncUpdate == Opt.some(slot.sync_committee_period()) and + nearSyncCommitteePeriod(slot.epoch).isNone(): + # No need to update unless we're close to the next sync committee period or + # new validators were registered with the action tracker + # TODO we _could_ skip running this in some of the "near" slots, but.. + return + + lastSyncUpdate = Opt.some(slot.sync_committee_period()) + + let syncnets = node.getSyncCommitteeSubnets(slot.epoch) + + debug "Updating sync committee subnets", + syncnets, + metadata_syncnets = node.network.metadata.syncnets, + gossipState = node.gossipState + + # Assume that different gossip fork sync committee setups are in sync; this + # only remains relevant, currently, for one gossip transition epoch, so the + # consequences of this not being true aren't exceptionally dire, while this + # allows for bookkeeping simplication. + if syncnets == node.network.metadata.syncnets: + return + + let + newSyncnets = + syncnets - node.network.metadata.syncnets + oldSyncnets = + node.network.metadata.syncnets - syncnets + forkDigests = node.forkDigests() + + for subcommitteeIdx in SyncSubcommitteeIndex: + doAssert not (newSyncnets[subcommitteeIdx] and + oldSyncnets[subcommitteeIdx]) + for gossipFork in node.gossipState: + template topic(): auto = + getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx) + if oldSyncnets[subcommitteeIdx]: + node.network.unsubscribe(topic) + elif newSyncnets[subcommitteeIdx]: + node.network.subscribe(topic, basicParams) + + node.network.updateSyncnetsMetadata(syncnets) + +proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest)) + node.network.unsubscribe(getProposerSlashingsTopic(forkDigest)) + node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest)) + node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest)) + + for subnet_id in SubnetId: + node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id)) + + node.consensusManager[].actionTracker.subscribedSubnets = default(AttnetBits) + +# updateAttestationSubnetHandlers subscribes attestation subnets +proc addPhase0MessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.network.subscribe(getAttesterSlashingsTopic(forkDigest), basicParams) + node.network.subscribe(getProposerSlashingsTopic(forkDigest), basicParams) + node.network.subscribe(getVoluntaryExitsTopic(forkDigest), basicParams) + node.network.subscribe( + getAggregateAndProofsTopic(forkDigest), aggregateTopicParams, + enableTopicMetrics = true) + +proc addAltairMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addPhase0MessageHandlers(forkDigest, slot) + + # If this comes online near sync committee period, it'll immediately get + # replaced as usual by trackSyncCommitteeTopics, which runs at slot end. + let syncnets = node.getSyncCommitteeSubnets(slot.epoch) + + for subcommitteeIdx in SyncSubcommitteeIndex: + if syncnets[subcommitteeIdx]: + node.network.subscribe( + getSyncCommitteeTopic(forkDigest, subcommitteeIdx), basicParams) + + node.network.subscribe( + getSyncCommitteeContributionAndProofTopic(forkDigest), basicParams) + + node.network.updateSyncnetsMetadata(syncnets) + +proc addCapellaMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addAltairMessageHandlers(forkDigest, slot) + node.network.subscribe(getBlsToExecutionChangeTopic(forkDigest), basicParams) + +proc addDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addCapellaMessageHandlers(forkDigest, slot) + for topic in blobSidecarTopics(forkDigest): + node.network.subscribe(topic, basicParams) + +proc addElectraMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addDenebMessageHandlers(forkDigest, slot) + +proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removePhase0MessageHandlers(forkDigest) + + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: + let idx = subcommitteeIdx + node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx)) + + node.network.unsubscribe( + getSyncCommitteeContributionAndProofTopic(forkDigest)) + +proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeAltairMessageHandlers(forkDigest) + node.network.unsubscribe(getBlsToExecutionChangeTopic(forkDigest)) + +proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeCapellaMessageHandlers(forkDigest) + for topic in blobSidecarTopics(forkDigest): + node.network.unsubscribe(topic) + +proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeDenebMessageHandlers(forkDigest) + +proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) = + if not node.processor[].doppelgangerDetectionEnabled: + return + + # broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring + # gossip - it is only viable to assert liveness in epochs where gossip is + # active + if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch: + for validator in node.attachedValidators[]: + validator.doppelgangerChecked(epoch - 1) + + +proc updateBlocksGossipStatus*( + node: BeaconNode, slot: Slot, dagIsBehind: bool) = + template cfg(): auto = node.dag.cfg + + let + isBehind = + if node.shouldSyncOptimistically(slot): + # If optimistic sync is active, always subscribe to blocks gossip + false + else: + # Use DAG status to determine whether to subscribe for blocks gossip + dagIsBehind + + targetGossipState = getTargetGossipState( + slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, + cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, + isBehind) + + template currentGossipState(): auto = node.blocksGossipState + if currentGossipState == targetGossipState: + return + + if currentGossipState.card == 0 and targetGossipState.card > 0: + debug "Enabling blocks topic subscriptions", + wallSlot = slot, targetGossipState + elif currentGossipState.card > 0 and targetGossipState.card == 0: + debug "Disabling blocks topic subscriptions", + wallSlot = slot + else: + # Individual forks added / removed + discard + + let + newGossipForks = targetGossipState - currentGossipState + oldGossipForks = currentGossipState - targetGossipState + + for gossipFork in oldGossipForks: + let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + node.network.unsubscribe(getBeaconBlocksTopic(forkDigest)) + + for gossipFork in newGossipForks: + let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + node.network.subscribe( + getBeaconBlocksTopic(forkDigest), blocksTopicParams, + enableTopicMetrics = true) + + node.blocksGossipState = targetGossipState + +func subnetLog(v: BitArray): string = + $toSeq(v.oneIndices()) + +# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription +proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = + if node.gossipState.card == 0: + # When disconnected, updateBlocksGossipStatus is responsible for all things + # subnets - in particular, it will remove subscriptions on the edge where + # we enter the disconnected state. + return + + let + aggregateSubnets = + node.consensusManager[].actionTracker.aggregateSubnets(slot) + stabilitySubnets = + node.consensusManager[].actionTracker.stabilitySubnets(slot) + subnets = aggregateSubnets + stabilitySubnets + + node.network.updateStabilitySubnetMetadata(stabilitySubnets) + + # Now we know what we should be subscribed to - make it so + let + prevSubnets = node.consensusManager[].actionTracker.subscribedSubnets + unsubscribeSubnets = prevSubnets - subnets + subscribeSubnets = subnets - prevSubnets + + # Remember what we subscribed to, so we can unsubscribe later + node.consensusManager[].actionTracker.subscribedSubnets = subnets + + let forkDigests = node.forkDigests() + + for gossipFork in node.gossipState: + let forkDigest = forkDigests[gossipFork] + node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest) + node.network.subscribeAttestationSubnets(subscribeSubnets, forkDigest) + + debug "Attestation subnets", + slot, epoch = slot.epoch, gossipState = node.gossipState, + stabilitySubnets = subnetLog(stabilitySubnets), + aggregateSubnets = subnetLog(aggregateSubnets), + prevSubnets = subnetLog(prevSubnets), + subscribeSubnets = subnetLog(subscribeSubnets), + unsubscribeSubnets = subnetLog(unsubscribeSubnets), + gossipState = node.gossipState + +#TODO: overriden due to shadowing from +proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = + ## Subscribe to subnets that we are providing stability for or aggregating + ## and unsubscribe from the ones that are no longer relevant. + + # Let the tracker know what duties are approaching - this will tell us how + # many stability subnets we need to be subscribed to and what subnets we'll + # soon be aggregating - in addition to the in-beacon-node duties, there may + # also be duties coming from the validator client, but we don't control when + # these arrive + await node.registerDuties(slot) + + # We start subscribing to gossip before we're fully synced - this allows time + # to subscribe before the sync end game + const + TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 + HYSTERESIS_BUFFER = 16 + + static: doAssert high(ConsensusFork) == ConsensusFork.Electra + + let + head = node.dag.head + headDistance = + if slot > head.slot: (slot - head.slot).uint64 + else: 0'u64 + isBehind = + headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER + targetGossipState = + getTargetGossipState( + slot.epoch, + node.dag.cfg.ALTAIR_FORK_EPOCH, + node.dag.cfg.BELLATRIX_FORK_EPOCH, + node.dag.cfg.CAPELLA_FORK_EPOCH, + node.dag.cfg.DENEB_FORK_EPOCH, + node.dag.cfg.ELECTRA_FORK_EPOCH, + isBehind) + + doAssert targetGossipState.card <= 2 + + let + newGossipForks = targetGossipState - node.gossipState + oldGossipForks = node.gossipState - targetGossipState + + doAssert newGossipForks.card <= 2 + doAssert oldGossipForks.card <= 2 + + func maxGossipFork(gossipState: GossipState): int = + var res = -1 + for gossipFork in gossipState: + res = max(res, gossipFork.int) + res + + if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and + targetGossipState != {}: + warn "Unexpected clock regression during transition", + targetGossipState, + gossipState = node.gossipState + + if node.gossipState.card == 0 and targetGossipState.card > 0: + # We are synced, so we will connect + debug "Enabling topic subscriptions", + wallSlot = slot, + headSlot = head.slot, + headDistance, targetGossipState + + node.processor[].setupDoppelgangerDetection(slot) + + # Specially when waiting for genesis, we'll already be synced on startup - + # it might also happen on a sufficiently fast restart + + # We "know" the actions for the current and the next epoch + withState(node.dag.headState): + if node.consensusManager[].actionTracker.needsUpdate( + forkyState, slot.epoch): + let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( + "Getting head EpochRef should never fail") + node.consensusManager[].actionTracker.updateActions( + epochRef.shufflingRef, epochRef.beacon_proposers) + + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) + + if node.gossipState.card > 0 and targetGossipState.card == 0: + debug "Disabling topic subscriptions", + wallSlot = slot, + headSlot = head.slot, + headDistance + + node.processor[].clearDoppelgangerProtection() + + let forkDigests = node.forkDigests() + + const removeMessageHandlers: array[ConsensusFork, auto] = [ + removePhase0MessageHandlers, + removeAltairMessageHandlers, + removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + removeCapellaMessageHandlers, + removeDenebMessageHandlers, + removeElectraMessageHandlers + ] + + for gossipFork in oldGossipForks: + removeMessageHandlers[gossipFork](node, forkDigests[gossipFork]) + + const addMessageHandlers: array[ConsensusFork, auto] = [ + addPhase0MessageHandlers, + addAltairMessageHandlers, + addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + addCapellaMessageHandlers, + addDenebMessageHandlers, + addElectraMessageHandlers + ] + + for gossipFork in newGossipForks: + addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot) + + node.gossipState = targetGossipState + node.doppelgangerChecked(slot.epoch) + node.updateAttestationSubnetHandlers(slot) + node.updateBlocksGossipStatus(slot, isBehind) + # node.updateLightClientGossipStatus(slot, isBehind) + +proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = + # Things we do when slot processing has ended and we're about to wait for the + # next slot + + # By waiting until close before slot end, ensure that preparation for next + # slot does not interfere with propagation of messages and with VC duties. + const endOffset = aggregateSlotOffset + nanos( + (NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) + let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset) + if endCutoff.inFuture: + debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) + await sleepAsync(endCutoff.offset) + + if node.dag.needStateCachesAndForkChoicePruning(): + if node.attachedValidators[].validators.len > 0: + node.attachedValidators[] + .slashingProtection + # pruning is only done if the DB is set to pruning mode. + .pruneAfterFinalization( + node.dag.finalizedHead.slot.epoch() + ) + + # Delay part of pruning until latency critical duties are done. + # The other part of pruning, `pruneBlocksDAG`, is done eagerly. + # ---- + # This is the last pruning to do as it clears the "needPruning" condition. + node.consensusManager[].pruneStateCachesAndForkChoice() + + if node.config.historyMode == HistoryMode.Prune: + if not (slot + 1).is_epoch(): + # The epoch slot already is "heavy" due to the epoch processing, leave + # the pruning for later + node.dag.pruneHistory() + node.pruneBlobs(slot) + + when declared(GC_fullCollect): + # The slots in the beacon node work as frames in a game: we want to make + # sure that we're ready for the next one and don't get stuck in lengthy + # garbage collection tasks when time is of essence in the middle of a slot - + # while this does not guarantee that we'll never collect during a slot, it + # makes sure that all the scratch space we used during slot tasks (logging, + # temporary buffers etc) gets recycled for the next slot that is likely to + # need similar amounts of memory. + try: + GC_fullCollect() + except Defect as exc: + raise exc # Reraise to maintain call stack + except Exception: + # TODO upstream + raiseAssert "Unexpected exception during GC collection" + let gcCollectionTick = Moment.now() + + # Checkpoint the database to clear the WAL file and make sure changes in + # the database are synced with the filesystem. + node.db.checkpoint() + let + dbCheckpointTick = Moment.now() + dbCheckpointDur = dbCheckpointTick - gcCollectionTick + # db_checkpoint_seconds.inc(dbCheckpointDur.toFloatSeconds) + if dbCheckpointDur >= MinSignificantProcessingDuration: + info "Database checkpointed", dur = dbCheckpointDur + else: + debug "Database checkpointed", dur = dbCheckpointDur + + node.syncCommitteeMsgPool[].pruneData(slot) + if slot.is_epoch: + node.dynamicFeeRecipientsStore[].pruneOldMappings(slot.epoch) + + # Update upcoming actions - we do this every slot in case a reorg happens + let head = node.dag.head + if node.isSynced(head) and head.executionValid: + withState(node.dag.headState): + # maybeUpdateActionTrackerNextEpoch might not account for balance changes + # from the process_rewards_and_penalties() epoch transition but only from + # process_block() and other per-slot sources. This mainly matters insofar + # as it might trigger process_effective_balance_updates() changes in that + # same epoch transition, which function is therefore potentially blind to + # but which might then affect beacon proposers. + # + # Because this runs every slot, it can account naturally for slashings, + # which affect balances via slash_validator() when they happen, and any + # missed sync committee participation via process_sync_aggregate(), but + # attestation penalties for example, need, specific handling. + # checked by maybeUpdateActionTrackerNextEpoch. + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) + + let + nextAttestationSlot = + node.consensusManager[].actionTracker.getNextAttestationSlot(slot) + nextProposalSlot = + node.consensusManager[].actionTracker.getNextProposalSlot(slot) + nextActionSlot = min(nextAttestationSlot, nextProposalSlot) + nextActionWaitTime = saturate(fromNow(node.beaconClock, nextActionSlot)) + + # -1 is a more useful output than 18446744073709551615 as an indicator of + # no future attestation/proposal known. + template formatInt64(x: Slot): int64 = + if x == high(uint64).Slot: + -1'i64 + else: + toGaugeValue(x) + + let + syncCommitteeSlot = slot + 1 + syncCommitteeEpoch = syncCommitteeSlot.epoch + inCurrentSyncCommittee = + not node.getCurrentSyncCommiteeSubnets(syncCommitteeEpoch).isZeros() + + template formatSyncCommitteeStatus(): string = + if inCurrentSyncCommittee: + "current" + elif not node.getNextSyncCommitteeSubnets(syncCommitteeEpoch).isZeros(): + let slotsToNextSyncCommitteePeriod = + SLOTS_PER_SYNC_COMMITTEE_PERIOD - + since_sync_committee_period_start(syncCommitteeSlot) + # int64 conversion is safe + doAssert slotsToNextSyncCommitteePeriod <= SLOTS_PER_SYNC_COMMITTEE_PERIOD + "in " & toTimeLeftString( + SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64) + else: + "none" + + info "Slot end", + slot = shortLog(slot), + nextActionWait = + if nextActionSlot == FAR_FUTURE_SLOT: + "n/a" + else: + shortLog(nextActionWaitTime), + nextAttestationSlot = formatInt64(nextAttestationSlot), + nextProposalSlot = formatInt64(nextProposalSlot), + syncCommitteeDuties = formatSyncCommitteeStatus(), + head = shortLog(head) + + # if nextActionSlot != FAR_FUTURE_SLOT: + # next_action_wait.set(nextActionWaitTime.toFloatSeconds) + + # next_proposal_wait.set( + # if nextProposalSlot != FAR_FUTURE_SLOT: + # saturate(fromNow(node.beaconClock, nextProposalSlot)).toFloatSeconds() + # else: + # Inf) + + # sync_committee_active.set(if inCurrentSyncCommittee: 1 else: 0) + + let epoch = slot.epoch + if epoch + 1 >= node.network.forkId.next_fork_epoch: + # Update 1 epoch early to block non-fork-ready peers + node.network.updateForkId(epoch, node.dag.genesis_validators_root) + + # When we're not behind schedule, we'll speculatively update the clearance + # state in anticipation of receiving the next block - we do it after + # logging slot end since the nextActionWaitTime can be short + let advanceCutoff = node.beaconClock.fromNow( + slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1))) + if advanceCutoff.inFuture: + # We wait until there's only a second left before the next slot begins, then + # we advance the clearance state to the next slot - this gives us a high + # probability of being prepared for the block that will arrive and the + # epoch processing that follows + await sleepAsync(advanceCutoff.offset) + node.dag.advanceClearanceState() + + # Prepare action tracker for the next slot + node.consensusManager[].actionTracker.updateSlot(slot + 1) + + # The last thing we do is to perform the subscriptions and unsubscriptions for + # the next slot, just before that slot starts - because of the advance cuttoff + # above, this will be done just before the next slot starts + node.updateSyncCommitteeTopics(slot + 1) + + await node.localUpdateGossipStatus(slot + 1) + +func formatNextConsensusFork( + node: BeaconNode, withVanityArt = false): Opt[string] = + let consensusFork = + node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) + if consensusFork == ConsensusFork.high: + return Opt.none(string) + let + nextConsensusFork = consensusFork.succ() + nextForkEpoch = node.dag.cfg.consensusForkEpoch(nextConsensusFork) + if nextForkEpoch == FAR_FUTURE_EPOCH: + return Opt.none(string) + Opt.some( + (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & + $nextConsensusFork & ":" & $nextForkEpoch) + +func syncStatus(node: BeaconNode, wallSlot: Slot): string = + let optimisticHead = not node.dag.head.executionValid + if node.syncManager.inProgress: + let + optimisticSuffix = + if optimisticHead: + "/opt" + else: + "" + # lightClientSuffix = + # if node.consensusManager[].shouldSyncOptimistically(wallSlot): + # " - lc: " & $shortLog(node.consensusManager[].optimisticHead) + # else: + # "" + node.syncManager.syncStatus & optimisticSuffix #& lightClientSuffix + elif node.backfiller.inProgress: + "backfill: " & node.backfiller.syncStatus + elif optimisticHead: + "synced/opt" + else: + "synced" + +func connectedPeersCount(node: BeaconNode): int = + len(node.network.peerPool) + +func formatGwei(amount: Gwei): string = + # TODO This is implemented in a quite a silly way. + # Better routines for formatting decimal numbers + # should exists somewhere else. + let + eth = distinctBase(amount) div 1000000000 + remainder = distinctBase(amount) mod 1000000000 + + result = $eth + if remainder != 0: + result.add '.' + let remainderStr = $remainder + for i in remainderStr.len ..< 9: + result.add '0' + result.add remainderStr + while result[^1] == '0': + result.setLen(result.len - 1) + +when not defined(windows): + proc initStatusBar(node: BeaconNode) {.raises: [ValueError].} = + if not isatty(stdout): return + if not node.config.statusBarEnabled: return + + try: + enableTrueColors() + except Exception as exc: # TODO Exception + error "Couldn't enable colors", err = exc.msg + + proc dataResolver(expr: string): string {.raises: [].} = + template justified: untyped = node.dag.head.atEpochStart( + getStateField( + node.dag.headState, current_justified_checkpoint).epoch) + # TODO: + # We should introduce a general API for resolving dot expressions + # such as `db.latest_block.slot` or `metrics.connected_peers`. + # Such an API can be shared between the RPC back-end, CLI tools + # such as ncli, a potential GraphQL back-end and so on. + # The status bar feature would allow the user to specify an + # arbitrary expression that is resolvable through this API. + case expr.toLowerAscii + of "version": + versionAsStr + + of "full_version": + fullVersionStr + + of "connected_peers": + $(node.connectedPeersCount) + + of "head_root": + shortLog(node.dag.head.root) + of "head_epoch": + $(node.dag.head.slot.epoch) + of "head_epoch_slot": + $(node.dag.head.slot.since_epoch_start) + of "head_slot": + $(node.dag.head.slot) + + of "justifed_root": + shortLog(justified.blck.root) + of "justifed_epoch": + $(justified.slot.epoch) + of "justifed_epoch_slot": + $(justified.slot.since_epoch_start) + of "justifed_slot": + $(justified.slot) + + of "finalized_root": + shortLog(node.dag.finalizedHead.blck.root) + of "finalized_epoch": + $(node.dag.finalizedHead.slot.epoch) + of "finalized_epoch_slot": + $(node.dag.finalizedHead.slot.since_epoch_start) + of "finalized_slot": + $(node.dag.finalizedHead.slot) + + of "epoch": + $node.currentSlot.epoch + + of "epoch_slot": + $(node.currentSlot.since_epoch_start) + + of "slot": + $node.currentSlot + + of "slots_per_epoch": + $SLOTS_PER_EPOCH + + of "slot_trailing_digits": + var slotStr = $node.currentSlot + if slotStr.len > 3: slotStr = slotStr[^3..^1] + slotStr + + of "attached_validators_balance": + formatGwei(node.attachedValidatorBalanceTotal) + + of "next_consensus_fork": + let nextConsensusForkDescription = + node.formatNextConsensusFork(withVanityArt = true) + if nextConsensusForkDescription.isNone: + "" + else: + " (scheduled " & nextConsensusForkDescription.get & ")" + + of "sync_status": + node.syncStatus(node.currentSlot) + else: + # We ignore typos for now and just render the expression + # as it was written. TODO: come up with a good way to show + # an error message to the user. + "$" & expr + + var statusBar = StatusBarView.init( + node.config.statusBarContents, + dataResolver) + + when compiles(defaultChroniclesStream.outputs[0].writer): + let tmp = defaultChroniclesStream.outputs[0].writer + + defaultChroniclesStream.outputs[0].writer = + proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [].} = + try: + # p.hidePrompt + erase statusBar + # p.writeLine msg + tmp(logLevel, msg) + render statusBar + # p.showPrompt + except Exception as e: # render raises Exception + logLoggingFailure(cstring(msg), e) + + proc statusBarUpdatesPollingLoop() {.async.} = + try: + while true: + update statusBar + erase statusBar + render statusBar + await sleepAsync(chronos.seconds(1)) + except CatchableError as exc: + warn "Failed to update status bar, no further updates", err = exc.msg + + asyncSpawn statusBarUpdatesPollingLoop() + +proc initializeNetworking(node: BeaconNode) {.async.} = + node.installMessageValidators() + + info "Listening to incoming network requests" + await node.network.startListening() + + let addressFile = node.config.dataDir / "beacon_node.enr" + writeFile(addressFile, node.network.announcedENR.toURI) + + await node.network.start() + +proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) = + restServer.router.installBeaconApiHandlers(node) + restServer.router.installBuilderApiHandlers(node) + restServer.router.installConfigApiHandlers(node) + restServer.router.installDebugApiHandlers(node) + restServer.router.installEventApiHandlers(node) + restServer.router.installNimbusApiHandlers(node) + restServer.router.installNodeApiHandlers(node) + restServer.router.installValidatorApiHandlers(node) + restServer.router.installRewardsApiHandlers(node) + if node.dag.lcDataStore.serve: + restServer.router.installLightClientApiHandlers(node) + +from beacon_chain/spec/datatypes/capella import SignedBeaconBlock + +proc stop(node: BeaconNode) = + bnStatus = BeaconNodeStatus.Stopping + notice "Graceful shutdown" + if not node.config.inProcessValidators: + try: + node.vcProcess.close() + except Exception as exc: + warn "Couldn't close vc process", msg = exc.msg + try: + waitFor node.network.stop() + except CatchableError as exc: + warn "Couldn't stop network", msg = exc.msg + + node.attachedValidators[].slashingProtection.close() + node.attachedValidators[].close() + node.db.close() + notice "Databases closed" + +func verifyFinalization(node: BeaconNode, slot: Slot) = + # Epoch must be >= 4 to check finalization + const SETTLING_TIME_OFFSET = 1'u64 + let epoch = slot.epoch() + + # Don't static-assert this -- if this isn't called, don't require it + doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET + + # Intentionally, loudly assert. Point is to fail visibly and unignorably + # during testing. + if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET: + let finalizedEpoch = + node.dag.finalizedHead.slot.epoch() + # Finalization rule 234, that has the most lag slots among the cases, sets + # state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3 + # and then state.slot gets incremented, to increase the maximum offset, if + # finalization occurs every slot, to 4 slots vs scheduledSlot. + doAssert finalizedEpoch + 4 >= epoch + +proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, + lastSlot: Slot): Future[bool] {.async.} = + ## Called at the beginning of a slot - usually every slot, but sometimes might + ## skip a few in case we're running late. + ## wallTime: current system time - we will strive to perform all duties up + ## to this point in time + ## lastSlot: the last slot that we successfully processed, so we know where to + ## start work from - there might be jumps if processing is delayed + let + # The slot we should be at, according to the clock + wallSlot = wallTime.slotOrZero + # If everything was working perfectly, the slot that we should be processing + expectedSlot = lastSlot + 1 + finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch() + delay = wallTime - expectedSlot.start_beacon_time() + + node.processingDelay = Opt.some(nanoseconds(delay.nanoseconds)) + + block: + logScope: + slot = shortLog(wallSlot) + epoch = shortLog(wallSlot.epoch) + sync = node.syncStatus(wallSlot) + peers = len(node.network.peerPool) + head = shortLog(node.dag.head) + finalized = shortLog(getStateField( + node.dag.headState, finalized_checkpoint)) + delay = shortLog(delay) + let nextConsensusForkDescription = node.formatNextConsensusFork() + if nextConsensusForkDescription.isNone: + info "Slot start" + else: + info "Slot start", nextFork = nextConsensusForkDescription.get + + # Check before any re-scheduling of onSlotStart() + if checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch): + quit(0) + + when defined(windows): + if node.config.runAsService: + reportServiceStatusSuccess() + + # TODO: metrics + # beacon_slot.set wallSlot.toGaugeValue + # beacon_current_epoch.set wallSlot.epoch.toGaugeValue + + # both non-negative, so difference can't overflow or underflow int64 + # finalization_delay.set( + # wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue) + + if node.config.strictVerification: + verifyFinalization(node, wallSlot) + + node.consensusManager[].updateHead(wallSlot) + + await node.handleValidatorDuties(lastSlot, wallSlot) + + await onSlotEnd(node, wallSlot) + + # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination + # This specification suggests validators re-submit to builder software every + # `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs. + if wallSlot.is_epoch and + wallSlot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0: + asyncSpawn node.registerValidators(wallSlot.epoch) + + return false + +proc startBackfillTask(node: BeaconNode) {.async.} = + while node.dag.needsBackfill: + if not node.syncManager.inProgress: + # Only start the backfiller if it's needed _and_ head sync has completed - + # if we lose sync after having synced head, we could stop the backfilller, + # but this should be a fringe case - might as well keep the logic simple for + # now + node.backfiller.start() + return + + await sleepAsync(chronos.seconds(2)) + +proc onSecond(node: BeaconNode, time: Moment) = + # Nim GC metrics (for the main thread) + + # TODO: Collect metrics + # updateThreadMetrics() + + if node.config.stopAtSyncedEpoch != 0 and + node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: + notice "Shutting down after having reached the target synced epoch" + bnStatus = BeaconNodeStatus.Stopping + +proc runOnSecondLoop(node: BeaconNode) {.async.} = + const + sleepTime = chronos.seconds(1) + nanosecondsIn1s = float(sleepTime.nanoseconds) + while true: + let start = chronos.now(chronos.Moment) + await chronos.sleepAsync(sleepTime) + let afterSleep = chronos.now(chronos.Moment) + let sleepTime = afterSleep - start + node.onSecond(start) + let finished = chronos.now(chronos.Moment) + let processingTime = finished - afterSleep + + # TODO: metrics + # ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s) + trace "onSecond task completed", sleepTime, processingTime + +proc run(node: BeaconNode) {.raises: [CatchableError].} = + bnStatus = BeaconNodeStatus.Running + + if not isNil(node.restServer): + node.restServer.installRestHandlers(node) + node.restServer.start() + + if not isNil(node.keymanagerServer): + doAssert not isNil(node.keymanagerHost) + node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) + if node.keymanagerServer != node.restServer: + node.keymanagerServer.start() + + let + wallTime = node.beaconClock.now() + wallSlot = wallTime.slotOrZero() + + # node.startLightClient() + node.requestManager.start() + node.syncManager.start() + + if node.dag.needsBackfill(): asyncSpawn node.startBackfillTask() + + waitFor node.localUpdateGossipStatus(wallSlot) + + for web3signerUrl in node.config.web3SignerUrls: + # TODO + # The current strategy polls all remote signers independently + # from each other which may lead to some race conditions of + # validators are migrated from one signer to another + # (because the updates to our validator pool are not atomic). + # Consider using different strategies that would detect such + # race conditions. + asyncSpawn node.pollForDynamicValidators( + web3signerUrl, node.config.web3signerUpdateInterval) + + asyncSpawn runSlotLoop(node, wallTime, onSlotStart) + asyncSpawn runOnSecondLoop(node) + asyncSpawn runQueueProcessingLoop(node.blockProcessor) + asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) + + # main event loop + while bnStatus == BeaconNodeStatus.Running: + poll() # if poll fails, the network is broken + + # time to say goodbye + node.stop() + +proc start*(node: BeaconNode) {.raises: [CatchableError].} = + let + head = node.dag.head + finalizedHead = node.dag.finalizedHead + genesisTime = node.beaconClock.fromNow(start_beacon_time(Slot 0)) + + notice "Starting beacon node", + version = fullVersionStr, + nimVersion = NimVersion, + enr = node.network.announcedENR.toURI, + peerId = $node.network.switch.peerInfo.peerId, + timeSinceFinalization = + node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), + head = shortLog(head), + justified = shortLog(getStateField( + node.dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField( + node.dag.headState, finalized_checkpoint)), + finalizedHead = shortLog(finalizedHead), + SLOTS_PER_EPOCH, + SECONDS_PER_SLOT, + SPEC_VERSION, + dataDir = node.config.dataDir.string, + validators = node.attachedValidators[].count + + if genesisTime.inFuture: + notice "Waiting for genesis", genesisIn = genesisTime.offset + + waitFor node.initializeNetworking() + + node.elManager.start() + node.run() + +## runs beacon node +## adapted from nimbus-eth2 +proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.raises: [CatchableError].} = + + # TODO: Define this varaibles somewhere + info "Launching beacon node", + version = fullVersionStr, + bls_backend = $BLS_BACKEND, + const_preset, + cmdParams = commandLineParams(), + config + + template ignoreDeprecatedOption(option: untyped): untyped = + if config.option.isSome: + warn "Config option is deprecated", + option = config.option.get + ignoreDeprecatedOption requireEngineAPI + ignoreDeprecatedOption safeSlotsToImportOptimistically + ignoreDeprecatedOption terminalTotalDifficultyOverride + ignoreDeprecatedOption optimistic + ignoreDeprecatedOption validatorMonitorTotals + ignoreDeprecatedOption web3ForcePolling + + #TODO: figure out the comment on createPidFile + # createPidFile(config.dataDir.string / "beacon_node.pid") + + config.createDumpDirs() + + # if config.metricsEnabled: + # let metricsAddress = config.metricsAddress + # notice "Starting metrics HTTP server", + # url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" + # try: + # startMetricsHttpServer($metricsAddress, config.metricsPort) + # except CatchableError as exc: + # raise exc + # except Exception as exc: + # raiseAssert exc.msg # TODO fix metrics + + # Nim GC metrics (for the main thread) will be collected in onSecond(), but + # we disable piggy-backing on other metrics here. + setSystemMetricsAutomaticUpdate(false) + + # There are no managed event loops in here, to do a graceful shutdown, but + # letting the default Ctrl+C handler exit is safe, since we only read from + # the db. + let metadata = config.loadEth2Network() + + # Updating the config based on the metadata certainly is not beautiful but it + # works + for node in metadata.bootstrapNodes: + config.bootstrapNodes.add node + + ## Ctrl+C handling + proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + try: + setupForeignThreadGc() + except Exception as exc: raiseAssert exc.msg # shouldn't happen + notice "Shutting down after having received SIGINT" + bnStatus = BeaconNodeStatus.Stopping + try: + setControlCHook(controlCHandler) + except Exception as exc: # TODO Exception + warn "Cannot set ctrl-c handler", msg = exc.msg + + # equivalent SIGTERM handler + when defined(posix): + proc SIGTERMHandler(signal: cint) {.noconv.} = + notice "Shutting down after having received SIGTERM" + bnStatus = BeaconNodeStatus.Stopping + c_signal(ansi_c.SIGTERM, SIGTERMHandler) + + block: + let res = + if config.trustedSetupFile.isNone: + conf.loadKzgTrustedSetup() + else: + conf.loadKzgTrustedSetup(config.trustedSetupFile.get) + if res.isErr(): + raiseAssert res.error() + + let node = waitFor BeaconNode.initBeaconNode(rng, config, metadata) + + if bnStatus == BeaconNodeStatus.Stopping: + return + + when not defined(windows): + # This status bar can lock a Windows terminal emulator, blocking the whole + # event loop (seen on Windows 10, with a default MSYS2 terminal). + initStatusBar(node) + + if node.nickname != "": + dynamicLogScope(node = node.nickname): node.start() + else: + node.start() ## --end copy paste file from nimbus-eth2/nimbus_beacon_node.nim @@ -149,4 +2428,4 @@ proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} fatal "error", message = e.msg isShutDownRequired.store(true) - warn "\tExiting consensus wrapper" \ No newline at end of file + warn "\tExiting consensus wrapper"