2021-03-11 10:10:57 +00:00
|
|
|
# beacon_chain
|
2023-01-06 21:01:10 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2021-03-11 10:10:57 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
import
|
2023-03-05 01:40:21 +00:00
|
|
|
chronicles, chronos, web3/[ethtypes, engine_api_types],
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/datatypes/base,
|
2022-03-25 11:40:10 +00:00
|
|
|
../consensus_object_pools/[blockchain_dag, block_quarantine, attestation_pool],
|
2023-03-05 01:40:21 +00:00
|
|
|
../eth1/eth1_monitor,
|
|
|
|
../beacon_clock
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2023-01-06 21:01:10 +00:00
|
|
|
from ../spec/beaconstate import get_expected_withdrawals
|
|
|
|
from ../spec/datatypes/capella import Withdrawal
|
2022-08-23 16:19:52 +00:00
|
|
|
from ../spec/eth2_apis/dynamic_fee_recipients import
|
|
|
|
DynamicFeeRecipientsStore, getDynamicFeeRecipient
|
|
|
|
from ../validators/keystore_management import
|
2023-02-15 15:10:31 +00:00
|
|
|
KeymanagerHost, getSuggestedFeeRecipient, getSuggestedGasLimit
|
2022-09-07 18:34:52 +00:00
|
|
|
from ../validators/action_tracker import ActionTracker, getNextProposalSlot
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
type
|
|
|
|
ConsensusManager* = object
|
|
|
|
expectedSlot: Slot
|
|
|
|
expectedBlockReceived: Future[bool]
|
|
|
|
|
|
|
|
# Validated & Verified
|
|
|
|
# ----------------------------------------------------------------
|
2021-06-01 11:13:40 +00:00
|
|
|
dag*: ChainDAGRef
|
2021-03-11 10:10:57 +00:00
|
|
|
attestationPool*: ref AttestationPool
|
|
|
|
|
|
|
|
# Missing info
|
|
|
|
# ----------------------------------------------------------------
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine*: ref Quarantine
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
# Execution layer integration
|
|
|
|
# ----------------------------------------------------------------
|
2023-03-05 01:40:21 +00:00
|
|
|
elManager*: ELManager
|
2022-03-25 11:40:10 +00:00
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
# Allow determination of whether there's an upcoming proposal
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
actionTracker*: ActionTracker
|
|
|
|
|
2022-08-23 16:19:52 +00:00
|
|
|
# Allow determination of preferred fee recipient during proposals
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore
|
2022-09-17 05:30:07 +00:00
|
|
|
validatorsDir: string
|
2022-08-23 16:19:52 +00:00
|
|
|
defaultFeeRecipient: Eth1Address
|
2023-02-15 15:10:31 +00:00
|
|
|
defaultGasLimit: uint64
|
2022-08-23 16:19:52 +00:00
|
|
|
|
|
|
|
# Tracking last proposal forkchoiceUpdated payload information
|
|
|
|
# ----------------------------------------------------------------
|
2022-08-29 12:16:35 +00:00
|
|
|
optimisticHead: tuple[bid: BlockId, execution_block_hash: Eth2Digest]
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Initialization
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func new*(T: type ConsensusManager,
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: ChainDAGRef,
|
2021-03-11 10:10:57 +00:00
|
|
|
attestationPool: ref AttestationPool,
|
2022-03-25 11:40:10 +00:00
|
|
|
quarantine: ref Quarantine,
|
2023-03-05 01:40:21 +00:00
|
|
|
elManager: ELManager,
|
2022-09-07 18:34:52 +00:00
|
|
|
actionTracker: ActionTracker,
|
2022-08-23 16:19:52 +00:00
|
|
|
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore,
|
2022-09-17 05:30:07 +00:00
|
|
|
validatorsDir: string,
|
2023-02-15 15:10:31 +00:00
|
|
|
defaultFeeRecipient: Eth1Address,
|
|
|
|
defaultGasLimit: uint64
|
2021-03-11 10:10:57 +00:00
|
|
|
): ref ConsensusManager =
|
|
|
|
(ref ConsensusManager)(
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: dag,
|
2021-03-11 10:10:57 +00:00
|
|
|
attestationPool: attestationPool,
|
2022-03-25 11:40:10 +00:00
|
|
|
quarantine: quarantine,
|
2023-03-05 01:40:21 +00:00
|
|
|
elManager: elManager,
|
2022-09-07 18:34:52 +00:00
|
|
|
actionTracker: actionTracker,
|
2022-08-23 16:19:52 +00:00
|
|
|
dynamicFeeRecipientsStore: dynamicFeeRecipientsStore,
|
2022-09-17 05:30:07 +00:00
|
|
|
validatorsDir: validatorsDir,
|
2023-02-15 15:10:31 +00:00
|
|
|
defaultFeeRecipient: defaultFeeRecipient,
|
|
|
|
defaultGasLimit: defaultGasLimit
|
2021-03-11 10:10:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# Consensus Management
|
|
|
|
# -----------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc checkExpectedBlock(self: var ConsensusManager) =
|
|
|
|
if self.expectedBlockReceived == nil:
|
|
|
|
return
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
if self.dag.head.slot < self.expectedSlot:
|
2021-03-11 10:10:57 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
self.expectedBlockReceived.complete(true)
|
|
|
|
self.expectedBlockReceived = nil # Don't keep completed futures around!
|
|
|
|
|
|
|
|
proc expectBlock*(self: var ConsensusManager, expectedSlot: Slot): Future[bool] =
|
|
|
|
## Return a future that will complete when a head is selected whose slot is
|
|
|
|
## equal or greater than the given slot, or a new expectation is created
|
|
|
|
if self.expectedBlockReceived != nil:
|
|
|
|
# Reset the old future to not leave it hanging.. an alternative would be to
|
|
|
|
# cancel it, but it doesn't make any practical difference for now
|
|
|
|
self.expectedBlockReceived.complete(false)
|
|
|
|
|
|
|
|
let fut = newFuture[bool]("ConsensusManager.expectBlock")
|
|
|
|
self.expectedSlot = expectedSlot
|
|
|
|
self.expectedBlockReceived = fut
|
|
|
|
|
|
|
|
# It might happen that by the time we're expecting a block, it might have
|
|
|
|
# already been processed!
|
|
|
|
self.checkExpectedBlock()
|
|
|
|
|
|
|
|
return fut
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from web3/engine_api_types import
|
2023-03-06 16:19:15 +00:00
|
|
|
ForkchoiceUpdatedResponse, PayloadExecutionStatus, PayloadStatusV1,
|
|
|
|
PayloadAttributesV1
|
2022-07-04 20:35:33 +00:00
|
|
|
|
|
|
|
func `$`(h: BlockHash): string = $h.asEth2Digest
|
|
|
|
|
2022-08-29 12:16:35 +00:00
|
|
|
func shouldSyncOptimistically*(
|
|
|
|
optimisticSlot, dagSlot, wallSlot: Slot): bool =
|
|
|
|
## Determine whether an optimistic execution block hash should be reported
|
|
|
|
## to the EL client instead of the current head as determined by fork choice.
|
|
|
|
|
|
|
|
# Check whether optimistic head is sufficiently ahead of DAG
|
|
|
|
const minProgress = 8 * SLOTS_PER_EPOCH # Set arbitrarily
|
|
|
|
if optimisticSlot < dagSlot or optimisticSlot - dagSlot < minProgress:
|
|
|
|
return false
|
|
|
|
|
|
|
|
# Check whether optimistic head has synced sufficiently close to wall slot
|
|
|
|
const maxAge = 2 * SLOTS_PER_EPOCH # Set arbitrarily
|
|
|
|
if optimisticSlot < max(wallSlot, maxAge.Slot) - maxAge:
|
|
|
|
return false
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
func shouldSyncOptimistically*(self: ConsensusManager, wallSlot: Slot): bool =
|
|
|
|
if self.optimisticHead.execution_block_hash.isZero:
|
|
|
|
return false
|
|
|
|
|
|
|
|
shouldSyncOptimistically(
|
|
|
|
optimisticSlot = self.optimisticHead.bid.slot,
|
|
|
|
dagSlot = getStateField(self.dag.headState, slot),
|
|
|
|
wallSlot = wallSlot)
|
|
|
|
|
|
|
|
func optimisticHead*(self: ConsensusManager): BlockId =
|
|
|
|
self.optimisticHead.bid
|
|
|
|
|
|
|
|
func optimisticExecutionPayloadHash*(self: ConsensusManager): Eth2Digest =
|
|
|
|
self.optimisticHead.execution_block_hash
|
|
|
|
|
|
|
|
func setOptimisticHead*(
|
|
|
|
self: var ConsensusManager,
|
|
|
|
bid: BlockId, execution_block_hash: Eth2Digest) =
|
|
|
|
self.optimisticHead = (bid: bid, execution_block_hash: execution_block_hash)
|
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
proc updateExecutionClientHead(self: ref ConsensusManager,
|
|
|
|
newHead: BeaconHead): Future[Opt[void]] {.async.} =
|
2023-04-11 16:56:29 +00:00
|
|
|
let headExecutionPayloadHash = self.dag.loadExecutionBlockHash(newHead.blck)
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
if headExecutionPayloadHash.isZero:
|
2022-07-04 20:35:33 +00:00
|
|
|
# Blocks without execution payloads can't be optimistic.
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
|
2022-09-07 20:54:37 +00:00
|
|
|
return Opt[void].ok()
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2023-04-17 14:17:52 +00:00
|
|
|
template callForkchoiceUpdated(attributes: untyped): auto =
|
2023-03-05 01:40:21 +00:00
|
|
|
await self.elManager.forkchoiceUpdated(
|
2023-03-06 16:19:15 +00:00
|
|
|
headBlockHash = headExecutionPayloadHash,
|
|
|
|
safeBlockHash = newHead.safeExecutionPayloadHash,
|
|
|
|
finalizedBlockHash = newHead.finalizedExecutionPayloadHash,
|
2023-04-17 14:17:52 +00:00
|
|
|
payloadAttributes = none attributes)
|
|
|
|
|
|
|
|
# Can't use dag.head here because it hasn't been updated yet
|
|
|
|
let (payloadExecutionStatus, latestValidHash) =
|
|
|
|
case self.dag.cfg.consensusForkAtEpoch(newHead.blck.bid.slot.epoch)
|
|
|
|
of ConsensusFork.Capella, ConsensusFork.Deneb:
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#specification-1
|
|
|
|
# Consensus layer client MUST call this method instead of
|
|
|
|
# `engine_forkchoiceUpdatedV1` under any of the following conditions:
|
|
|
|
# `headBlockHash` references a block which `timestamp` is greater or
|
|
|
|
# equal to the Shanghai timestamp
|
|
|
|
callForkchoiceUpdated(PayloadAttributesV2)
|
|
|
|
of ConsensusFork.Phase0, ConsensusFork.Altair, ConsensusFork.Bellatrix:
|
|
|
|
callForkchoiceUpdated(PayloadAttributesV1)
|
2022-07-04 20:35:33 +00:00
|
|
|
|
|
|
|
case payloadExecutionStatus
|
|
|
|
of PayloadExecutionStatus.valid:
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
|
2022-07-04 20:35:33 +00:00
|
|
|
of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash:
|
2022-09-27 12:11:47 +00:00
|
|
|
self.attestationPool[].forkChoice.mark_root_invalid(newHead.blck.root)
|
2022-08-25 23:34:02 +00:00
|
|
|
self.quarantine[].addUnviable(newHead.blck.root)
|
2022-09-07 20:54:37 +00:00
|
|
|
return Opt.none(void)
|
2022-07-04 20:35:33 +00:00
|
|
|
of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing:
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.optimisticRoots.incl newHead.blck.root
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2022-09-07 20:54:37 +00:00
|
|
|
return Opt[void].ok()
|
|
|
|
|
2023-03-02 16:13:35 +00:00
|
|
|
func getKnownValidatorsForBlsChangeTracking(
|
|
|
|
self: ConsensusManager, newHead: BlockRef): seq[ValidatorIndex] =
|
|
|
|
# Ensure that large nodes won't be overloaded by a nice-to-have, but
|
|
|
|
# inessential cosmetic feature.
|
|
|
|
const MAX_CHECKED_INDICES = 64
|
|
|
|
|
|
|
|
if newHead.bid.slot.epoch >= self.dag.cfg.CAPELLA_FORK_EPOCH:
|
|
|
|
var res = newSeqOfCap[ValidatorIndex](min(
|
|
|
|
len(self.actionTracker.knownValidators), MAX_CHECKED_INDICES))
|
|
|
|
for vi in self.actionTracker.knownValidators.keys():
|
|
|
|
res.add vi
|
|
|
|
if res.len >= MAX_CHECKED_INDICES:
|
|
|
|
break
|
|
|
|
res
|
|
|
|
else:
|
|
|
|
# It is not possible for any BLS to execution changes, for any validator,
|
|
|
|
# to have been yet processed.
|
|
|
|
# https://github.com/nim-lang/Nim/issues/19802
|
|
|
|
(static(@[]))
|
|
|
|
|
2022-07-21 18:26:36 +00:00
|
|
|
proc updateHead*(self: var ConsensusManager, newHead: BlockRef) =
|
|
|
|
## Trigger fork choice and update the DAG with the new head block
|
|
|
|
## This does not automatically prune the DAG after finalization
|
|
|
|
## `pruneFinalized` must be called for pruning.
|
|
|
|
|
|
|
|
# Store the new head in the chain DAG - this may cause epochs to be
|
|
|
|
# justified and finalized
|
2023-03-02 16:13:35 +00:00
|
|
|
self.dag.updateHead(
|
|
|
|
newHead, self.quarantine[],
|
|
|
|
self.getKnownValidatorsForBlsChangeTracking(newHead))
|
2022-07-21 18:26:36 +00:00
|
|
|
|
|
|
|
self.checkExpectedBlock()
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
proc updateHead*(self: var ConsensusManager, wallSlot: Slot) =
|
|
|
|
## Trigger fork choice and update the DAG with the new head block
|
|
|
|
## This does not automatically prune the DAG after finalization
|
|
|
|
## `pruneFinalized` must be called for pruning.
|
|
|
|
|
|
|
|
# Grab the new head according to our latest attestation data
|
2022-07-04 20:35:33 +00:00
|
|
|
let newHead = self.attestationPool[].selectOptimisticHead(
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
wallSlot.start_beacon_time).valueOr:
|
2021-03-11 10:10:57 +00:00
|
|
|
warn "Head selection failed, using previous head",
|
2021-06-01 11:13:40 +00:00
|
|
|
head = shortLog(self.dag.head), wallSlot
|
2021-03-11 10:10:57 +00:00
|
|
|
return
|
|
|
|
|
2023-04-11 16:56:29 +00:00
|
|
|
if self.dag.loadExecutionBlockHash(newHead.blck).isZero:
|
2022-07-04 20:35:33 +00:00
|
|
|
# Blocks without execution payloads can't be optimistic.
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
self.updateHead(newHead.blck)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-09-08 00:31:24 +00:00
|
|
|
func isSynced(dag: ChainDAGRef, wallSlot: Slot): bool =
|
|
|
|
# This is a tweaked version of the validator_duties isSynced. TODO, refactor
|
|
|
|
# that one so this becomes the default version, with the same information to
|
|
|
|
# work with. For the head slot, use the DAG head regardless of what head the
|
|
|
|
# proposer forkchoiceUpdated is using, because by the validator_duties might
|
|
|
|
# be ready to actually propose, it's going to do so from the DAG head. Given
|
|
|
|
# the defaultSyncHorizon, it will start triggering in time so that potential
|
|
|
|
# discrepancies between the head here, and the head the DAG has (which might
|
|
|
|
# not yet be updated) won't be visible.
|
|
|
|
const defaultSyncHorizon = 50
|
|
|
|
|
|
|
|
if dag.head.slot + defaultSyncHorizon < wallSlot:
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
not dag.is_optimistic(dag.head.root)
|
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
proc checkNextProposer(
|
|
|
|
dag: ChainDAGRef, actionTracker: ActionTracker,
|
|
|
|
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore,
|
2022-09-08 00:31:24 +00:00
|
|
|
wallSlot: Slot):
|
2022-08-23 16:19:52 +00:00
|
|
|
Opt[(ValidatorIndex, ValidatorPubKey)] =
|
2022-09-08 00:31:24 +00:00
|
|
|
let nextWallSlot = wallSlot + 1
|
|
|
|
|
|
|
|
# Avoid long rewinds during syncing, when it's not going to propose. Though
|
|
|
|
# this is preparing for a proposal on `nextWallSlot`, it can't possibly yet
|
|
|
|
# be on said slot, so still check just `wallSlot`.
|
|
|
|
if not dag.isSynced(wallSlot):
|
|
|
|
return Opt.none((ValidatorIndex, ValidatorPubKey))
|
|
|
|
|
2023-01-11 12:29:21 +00:00
|
|
|
let proposer = ? dag.getProposer(dag.head, nextWallSlot)
|
|
|
|
|
2022-09-08 00:31:24 +00:00
|
|
|
if actionTracker.getNextProposalSlot(wallSlot) != nextWallSlot and
|
2022-09-07 18:34:52 +00:00
|
|
|
dynamicFeeRecipientsStore[].getDynamicFeeRecipient(
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer, nextWallSlot.epoch).isNone:
|
2022-09-07 18:34:52 +00:00
|
|
|
return Opt.none((ValidatorIndex, ValidatorPubKey))
|
2023-01-11 12:29:21 +00:00
|
|
|
let proposerKey = dag.validatorKey(proposer).get().toPubKey
|
|
|
|
Opt.some((proposer, proposerKey))
|
2022-09-07 18:34:52 +00:00
|
|
|
|
|
|
|
proc checkNextProposer*(self: ref ConsensusManager, wallSlot: Slot):
|
|
|
|
Opt[(ValidatorIndex, ValidatorPubKey)] =
|
|
|
|
self.dag.checkNextProposer(
|
|
|
|
self.actionTracker, self.dynamicFeeRecipientsStore, wallSlot)
|
2022-08-23 16:19:52 +00:00
|
|
|
|
|
|
|
proc getFeeRecipient*(
|
2022-09-17 05:30:07 +00:00
|
|
|
self: ConsensusManager, pubkey: ValidatorPubKey,
|
|
|
|
validatorIdx: Opt[ValidatorIndex], epoch: Epoch): Eth1Address =
|
|
|
|
let dynFeeRecipient = if validatorIdx.isSome:
|
|
|
|
self.dynamicFeeRecipientsStore[].getDynamicFeeRecipient(
|
|
|
|
validatorIdx.get(), epoch)
|
|
|
|
else:
|
|
|
|
Opt.none(Eth1Address)
|
|
|
|
|
|
|
|
dynFeeRecipient.valueOr:
|
|
|
|
self.validatorsDir.getSuggestedFeeRecipient(
|
|
|
|
pubkey, self.defaultFeeRecipient).valueOr:
|
|
|
|
# Ignore errors and use default - errors are logged in gsfr
|
2022-08-23 16:19:52 +00:00
|
|
|
self.defaultFeeRecipient
|
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
proc getGasLimit*(
|
|
|
|
self: ConsensusManager, pubkey: ValidatorPubKey): uint64 =
|
|
|
|
self.validatorsDir.getSuggestedGasLimit(
|
|
|
|
pubkey, self.defaultGasLimit).valueOr:
|
|
|
|
self.defaultGasLimit
|
|
|
|
|
2022-08-23 16:19:52 +00:00
|
|
|
from ../spec/datatypes/bellatrix import PayloadID
|
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
proc runProposalForkchoiceUpdated*(
|
|
|
|
self: ref ConsensusManager, wallSlot: Slot) {.async.} =
|
|
|
|
let
|
|
|
|
nextWallSlot = wallSlot + 1
|
|
|
|
(validatorIndex, nextProposer) = self.checkNextProposer(wallSlot).valueOr:
|
2022-08-23 16:19:52 +00:00
|
|
|
return
|
2022-09-07 18:34:52 +00:00
|
|
|
debug "runProposalForkchoiceUpdated: expected to be proposing next slot",
|
|
|
|
nextWallSlot, validatorIndex, nextProposer
|
|
|
|
|
2023-04-20 15:07:27 +00:00
|
|
|
# In Capella and later, computing correct withdrawals would mean creating a
|
|
|
|
# proposal state. Instead, only do that at proposal time.
|
|
|
|
if nextWallSlot.is_epoch:
|
|
|
|
debug "runProposalForkchoiceUpdated: not running early fcU for epoch-aligned proposal slot",
|
|
|
|
nextWallSlot, validatorIndex, nextProposer
|
|
|
|
return
|
|
|
|
|
2023-02-20 08:46:37 +00:00
|
|
|
# Approximately lines up with validator_duties version. Used optimistically/
|
2022-09-07 18:34:52 +00:00
|
|
|
# opportunistically, so mismatches are fine if not too frequent.
|
|
|
|
let
|
|
|
|
timestamp = withState(self.dag.headState):
|
2022-09-10 06:12:07 +00:00
|
|
|
compute_timestamp_at_slot(forkyState.data, nextWallSlot)
|
2023-04-15 21:48:24 +00:00
|
|
|
# If the current head block still forms the basis of the eventual proposal
|
|
|
|
# state, then its `get_randao_mix` will remain unchanged as well, as it is
|
|
|
|
# constant until the next block.
|
2022-09-07 18:34:52 +00:00
|
|
|
randomData = withState(self.dag.headState):
|
2022-09-10 06:12:07 +00:00
|
|
|
get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)).data
|
2022-09-17 05:30:07 +00:00
|
|
|
feeRecipient = self[].getFeeRecipient(
|
|
|
|
nextProposer, Opt.some(validatorIndex), nextWallSlot.epoch)
|
2023-04-20 15:07:27 +00:00
|
|
|
withdrawals =
|
|
|
|
if self.dag.headState.kind >= ConsensusFork.Capella:
|
|
|
|
# Head state is not eventual proposal state, but withdrawals will be
|
|
|
|
# identical within an epoch.
|
|
|
|
withState(self.dag.headState):
|
|
|
|
when consensusFork >= ConsensusFork.Capella:
|
|
|
|
Opt.some get_expected_withdrawals(forkyState.data)
|
|
|
|
else:
|
|
|
|
Opt.none(seq[Withdrawal])
|
2023-01-06 21:01:10 +00:00
|
|
|
else:
|
|
|
|
Opt.none(seq[Withdrawal])
|
2022-09-07 18:34:52 +00:00
|
|
|
beaconHead = self.attestationPool[].getBeaconHead(self.dag.head)
|
2023-04-11 16:56:29 +00:00
|
|
|
headBlockHash = self.dag.loadExecutionBlockHash(beaconHead.blck)
|
2022-09-07 18:34:52 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
if headBlockHash.isZero:
|
2022-09-07 18:34:52 +00:00
|
|
|
return
|
|
|
|
|
2023-03-06 16:19:15 +00:00
|
|
|
try:
|
|
|
|
let safeBlockHash = beaconHead.safeExecutionPayloadHash
|
|
|
|
|
|
|
|
withState(self.dag.headState):
|
|
|
|
template callForkchoiceUpdated(fcPayloadAttributes: auto) =
|
|
|
|
let (status, _) = await self.elManager.forkchoiceUpdated(
|
|
|
|
headBlockHash, safeBlockHash,
|
|
|
|
beaconHead.finalizedExecutionPayloadHash,
|
2023-04-17 14:17:52 +00:00
|
|
|
payloadAttributes = some fcPayloadAttributes)
|
2023-03-06 16:19:15 +00:00
|
|
|
debug "Fork-choice updated for proposal", status
|
|
|
|
|
|
|
|
static: doAssert high(ConsensusFork) == ConsensusFork.Deneb
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Capella:
|
2023-03-06 16:19:15 +00:00
|
|
|
callForkchoiceUpdated(PayloadAttributesV2(
|
|
|
|
timestamp: Quantity timestamp,
|
|
|
|
prevRandao: FixedBytes[32] randomData,
|
|
|
|
suggestedFeeRecipient: feeRecipient,
|
|
|
|
withdrawals: toEngineWithdrawals get_expected_withdrawals(forkyState.data)))
|
2023-03-05 01:40:21 +00:00
|
|
|
else:
|
2023-03-06 16:19:15 +00:00
|
|
|
callForkchoiceUpdated(PayloadAttributesV1(
|
|
|
|
timestamp: Quantity timestamp,
|
|
|
|
prevRandao: FixedBytes[32] randomData,
|
|
|
|
suggestedFeeRecipient: feeRecipient))
|
2022-09-07 18:34:52 +00:00
|
|
|
except CatchableError as err:
|
|
|
|
error "Engine API fork-choice update failed", err = err.msg
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
proc updateHeadWithExecution*(
|
2022-09-07 20:54:37 +00:00
|
|
|
self: ref ConsensusManager, initialNewHead: BeaconHead,
|
|
|
|
getBeaconTimeFn: GetBeaconTimeFn) {.async.} =
|
2022-07-04 20:35:33 +00:00
|
|
|
## Trigger fork choice and update the DAG with the new head block
|
|
|
|
## This does not automatically prune the DAG after finalization
|
|
|
|
## `pruneFinalized` must be called for pruning.
|
|
|
|
|
|
|
|
# Grab the new head according to our latest attestation data
|
2022-07-07 16:57:52 +00:00
|
|
|
try:
|
|
|
|
# Ensure dag.updateHead has most current information
|
2022-09-07 20:54:37 +00:00
|
|
|
var
|
|
|
|
attempts = 0
|
|
|
|
newHead = initialNewHead
|
|
|
|
while (await self.updateExecutionClientHead(newHead)).isErr:
|
|
|
|
# This proc is called on every new block; guarantee timely return
|
|
|
|
inc attempts
|
2022-10-03 13:10:08 +00:00
|
|
|
const maxAttempts = 5
|
2022-09-07 20:54:37 +00:00
|
|
|
if attempts >= maxAttempts:
|
|
|
|
warn "updateHeadWithExecution: too many attempts to recover from invalid payload",
|
|
|
|
attempts, maxAttempts, newHead, initialNewHead
|
|
|
|
break
|
|
|
|
|
|
|
|
# Select new head for next attempt
|
|
|
|
let
|
|
|
|
wallTime = getBeaconTimeFn()
|
|
|
|
nextHead = self.attestationPool[].selectOptimisticHead(wallTime).valueOr:
|
|
|
|
warn "Head selection failed after invalid block, using previous head",
|
|
|
|
newHead, wallSlot = wallTime.slotOrZero
|
|
|
|
break
|
|
|
|
warn "updateHeadWithExecution: attempting to recover from invalid payload",
|
|
|
|
attempts, maxAttempts, newHead, initialNewHead, nextHead
|
|
|
|
newHead = nextHead
|
2022-07-07 16:57:52 +00:00
|
|
|
|
|
|
|
# Store the new head in the chain DAG - this may cause epochs to be
|
|
|
|
# justified and finalized
|
2023-03-02 16:13:35 +00:00
|
|
|
self.dag.updateHead(
|
|
|
|
newHead.blck, self.quarantine[],
|
|
|
|
self[].getKnownValidatorsForBlsChangeTracking(newHead.blck))
|
2022-07-07 16:57:52 +00:00
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
# If this node should propose next slot, start preparing payload. Both
|
|
|
|
# fcUs are useful: the updateExecutionClientHead(newHead) call updates
|
|
|
|
# the head state (including optimistic status) that self.dagUpdateHead
|
|
|
|
# needs while runProposalForkchoiceUpdated requires RANDAO information
|
|
|
|
# from the head state corresponding to the `newHead` block, which only
|
|
|
|
# self.dag.updateHead(...) sets up.
|
2022-09-07 20:54:37 +00:00
|
|
|
await self.runProposalForkchoiceUpdated(getBeaconTimeFn().slotOrZero)
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2022-07-07 16:57:52 +00:00
|
|
|
self[].checkExpectedBlock()
|
|
|
|
except CatchableError as exc:
|
|
|
|
debug "updateHeadWithExecution error",
|
|
|
|
error = exc.msg
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) =
|
|
|
|
## Prune unneeded and invalidated data after finalization
|
|
|
|
## - the DAG state checkpoints
|
|
|
|
## - the DAG EpochRef
|
|
|
|
## - the attestation pool/fork choice
|
|
|
|
|
|
|
|
# Cleanup DAG & fork choice if we have a finalized head
|
2021-06-01 11:13:40 +00:00
|
|
|
if self.dag.needStateCachesAndForkChoicePruning():
|
|
|
|
self.dag.pruneStateCachesDAG()
|
2021-03-11 10:10:57 +00:00
|
|
|
self.attestationPool[].prune()
|