2021-03-11 10:10:57 +00:00
|
|
|
# beacon_chain
|
2022-02-17 11:53:55 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2021-03-11 10:10:57 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2022-07-29 10:53:42 +00:00
|
|
|
when (NimMajor, NimMinor) < (1, 4):
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
else:
|
|
|
|
{.push raises: [].}
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
import
|
2021-08-18 18:57:58 +00:00
|
|
|
chronicles, chronos,
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/datatypes/base,
|
2022-03-25 11:40:10 +00:00
|
|
|
../consensus_object_pools/[blockchain_dag, block_quarantine, attestation_pool],
|
|
|
|
../eth1/eth1_monitor
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-08-23 16:19:52 +00:00
|
|
|
from ../spec/eth2_apis/dynamic_fee_recipients import
|
|
|
|
DynamicFeeRecipientsStore, getDynamicFeeRecipient
|
|
|
|
from ../validators/keystore_management import
|
|
|
|
KeymanagerHost, getSuggestedFeeRecipient
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
type
|
2022-08-23 16:19:52 +00:00
|
|
|
ForkChoiceUpdatedInformation* = object
|
|
|
|
payloadId*: PayloadID
|
|
|
|
headBlockRoot*: Eth2Digest
|
|
|
|
safeBlockRoot*: Eth2Digest
|
|
|
|
finalizedBlockRoot*: Eth2Digest
|
|
|
|
timestamp*: uint64
|
|
|
|
feeRecipient*: Eth1Address
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
ConsensusManager* = object
|
|
|
|
expectedSlot: Slot
|
|
|
|
expectedBlockReceived: Future[bool]
|
|
|
|
|
|
|
|
# Validated & Verified
|
|
|
|
# ----------------------------------------------------------------
|
2021-06-01 11:13:40 +00:00
|
|
|
dag*: ChainDAGRef
|
2021-03-11 10:10:57 +00:00
|
|
|
attestationPool*: ref AttestationPool
|
|
|
|
|
|
|
|
# Missing info
|
|
|
|
# ----------------------------------------------------------------
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine*: ref Quarantine
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
# Execution layer integration
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
eth1Monitor*: Eth1Monitor
|
|
|
|
|
2022-08-23 16:19:52 +00:00
|
|
|
# Allow determination of preferred fee recipient during proposals
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore
|
|
|
|
keymanagerHost: ref KeymanagerHost
|
|
|
|
defaultFeeRecipient: Eth1Address
|
|
|
|
|
|
|
|
# Tracking last proposal forkchoiceUpdated payload information
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
forkchoiceUpdatedInfo*: Opt[ForkchoiceUpdatedInformation]
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Initialization
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func new*(T: type ConsensusManager,
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: ChainDAGRef,
|
2021-03-11 10:10:57 +00:00
|
|
|
attestationPool: ref AttestationPool,
|
2022-03-25 11:40:10 +00:00
|
|
|
quarantine: ref Quarantine,
|
2022-08-23 16:19:52 +00:00
|
|
|
eth1Monitor: Eth1Monitor,
|
|
|
|
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore,
|
|
|
|
keymanagerHost: ref KeymanagerHost,
|
|
|
|
defaultFeeRecipient: Eth1Address
|
2021-03-11 10:10:57 +00:00
|
|
|
): ref ConsensusManager =
|
|
|
|
(ref ConsensusManager)(
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: dag,
|
2021-03-11 10:10:57 +00:00
|
|
|
attestationPool: attestationPool,
|
2022-03-25 11:40:10 +00:00
|
|
|
quarantine: quarantine,
|
2022-08-23 16:19:52 +00:00
|
|
|
eth1Monitor: eth1Monitor,
|
|
|
|
dynamicFeeRecipientsStore: dynamicFeeRecipientsStore,
|
|
|
|
keymanagerHost: keymanagerHost,
|
|
|
|
forkchoiceUpdatedInfo: Opt.none ForkchoiceUpdatedInformation,
|
|
|
|
defaultFeeRecipient: defaultFeeRecipient
|
2021-03-11 10:10:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# Consensus Management
|
|
|
|
# -----------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc checkExpectedBlock(self: var ConsensusManager) =
|
|
|
|
if self.expectedBlockReceived == nil:
|
|
|
|
return
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
if self.dag.head.slot < self.expectedSlot:
|
2021-03-11 10:10:57 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
self.expectedBlockReceived.complete(true)
|
|
|
|
self.expectedBlockReceived = nil # Don't keep completed futures around!
|
|
|
|
|
|
|
|
proc expectBlock*(self: var ConsensusManager, expectedSlot: Slot): Future[bool] =
|
|
|
|
## Return a future that will complete when a head is selected whose slot is
|
|
|
|
## equal or greater than the given slot, or a new expectation is created
|
|
|
|
if self.expectedBlockReceived != nil:
|
|
|
|
# Reset the old future to not leave it hanging.. an alternative would be to
|
|
|
|
# cancel it, but it doesn't make any practical difference for now
|
|
|
|
self.expectedBlockReceived.complete(false)
|
|
|
|
|
|
|
|
let fut = newFuture[bool]("ConsensusManager.expectBlock")
|
|
|
|
self.expectedSlot = expectedSlot
|
|
|
|
self.expectedBlockReceived = fut
|
|
|
|
|
|
|
|
# It might happen that by the time we're expecting a block, it might have
|
|
|
|
# already been processed!
|
|
|
|
self.checkExpectedBlock()
|
|
|
|
|
|
|
|
return fut
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from eth/async_utils import awaitWithTimeout
|
|
|
|
from web3/engine_api_types import
|
|
|
|
ForkchoiceUpdatedResponse, PayloadExecutionStatus, PayloadStatusV1
|
|
|
|
|
|
|
|
func `$`(h: BlockHash): string = $h.asEth2Digest
|
|
|
|
|
2022-07-14 04:07:40 +00:00
|
|
|
proc runForkchoiceUpdated*(
|
2022-08-25 23:34:02 +00:00
|
|
|
eth1Monitor: Eth1Monitor,
|
|
|
|
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest):
|
2022-07-04 20:35:33 +00:00
|
|
|
Future[PayloadExecutionStatus] {.async.} =
|
|
|
|
# Allow finalizedBlockRoot to be 0 to avoid sync deadlocks.
|
|
|
|
#
|
|
|
|
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events
|
|
|
|
# has "Before the first finalized block occurs in the system the finalized
|
|
|
|
# block hash provided by this event is stubbed with
|
|
|
|
# `0x0000000000000000000000000000000000000000000000000000000000000000`."
|
|
|
|
# and
|
2022-08-20 16:03:32 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/bellatrix/validator.md#executionpayload
|
2022-07-04 20:35:33 +00:00
|
|
|
# notes "`finalized_block_hash` is the hash of the latest finalized execution
|
|
|
|
# payload (`Hash32()` if none yet finalized)"
|
|
|
|
doAssert not headBlockRoot.isZero
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Minimize window for Eth1 monitor to shut down connection
|
|
|
|
await eth1Monitor.ensureDataProvider()
|
|
|
|
|
|
|
|
let fcuR = awaitWithTimeout(
|
|
|
|
forkchoiceUpdated(
|
2022-08-25 23:34:02 +00:00
|
|
|
eth1Monitor, headBlockRoot, safeBlockRoot, finalizedBlockRoot),
|
2022-07-04 20:35:33 +00:00
|
|
|
FORKCHOICEUPDATED_TIMEOUT):
|
|
|
|
debug "runForkchoiceUpdated: forkchoiceUpdated timed out"
|
|
|
|
ForkchoiceUpdatedResponse(
|
2022-08-25 23:34:02 +00:00
|
|
|
payloadStatus: PayloadStatusV1(
|
|
|
|
status: PayloadExecutionStatus.syncing))
|
2022-07-04 20:35:33 +00:00
|
|
|
|
|
|
|
debug "runForkchoiceUpdated: ran forkchoiceUpdated",
|
2022-08-25 23:34:02 +00:00
|
|
|
headBlockRoot, safeBlockRoot, finalizedBlockRoot,
|
2022-07-04 20:35:33 +00:00
|
|
|
payloadStatus = $fcuR.payloadStatus.status,
|
|
|
|
latestValidHash = $fcuR.payloadStatus.latestValidHash,
|
|
|
|
validationError = $fcuR.payloadStatus.validationError
|
|
|
|
|
|
|
|
return fcuR.payloadStatus.status
|
|
|
|
except CatchableError as err:
|
2022-08-18 12:57:32 +00:00
|
|
|
error "runForkchoiceUpdated: forkchoiceUpdated failed",
|
2022-07-04 20:35:33 +00:00
|
|
|
err = err.msg
|
|
|
|
return PayloadExecutionStatus.syncing
|
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
proc updateExecutionClientHead(self: ref ConsensusManager, newHead: BeaconHead)
|
2022-07-04 20:35:33 +00:00
|
|
|
{.async.} =
|
|
|
|
if self.eth1Monitor.isNil:
|
|
|
|
return
|
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
let headExecutionPayloadHash = self.dag.loadExecutionBlockRoot(newHead.blck)
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
if headExecutionPayloadHash.isZero:
|
2022-07-04 20:35:33 +00:00
|
|
|
# Blocks without execution payloads can't be optimistic.
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
|
2022-07-04 20:35:33 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# Can't use dag.head here because it hasn't been updated yet
|
2022-07-21 18:26:36 +00:00
|
|
|
let payloadExecutionStatus = await self.eth1Monitor.runForkchoiceUpdated(
|
2022-08-25 23:34:02 +00:00
|
|
|
headExecutionPayloadHash,
|
|
|
|
newHead.safeExecutionPayloadHash,
|
|
|
|
newHead.finalizedExecutionPayloadHash)
|
2022-07-04 20:35:33 +00:00
|
|
|
|
|
|
|
case payloadExecutionStatus
|
|
|
|
of PayloadExecutionStatus.valid:
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
|
2022-07-04 20:35:33 +00:00
|
|
|
of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash:
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockInvalid(newHead.blck.root)
|
|
|
|
self.quarantine[].addUnviable(newHead.blck.root)
|
2022-07-04 20:35:33 +00:00
|
|
|
of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing:
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.optimisticRoots.incl newHead.blck.root
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2022-07-21 18:26:36 +00:00
|
|
|
proc updateHead*(self: var ConsensusManager, newHead: BlockRef) =
|
|
|
|
## Trigger fork choice and update the DAG with the new head block
|
|
|
|
## This does not automatically prune the DAG after finalization
|
|
|
|
## `pruneFinalized` must be called for pruning.
|
|
|
|
|
|
|
|
# Store the new head in the chain DAG - this may cause epochs to be
|
|
|
|
# justified and finalized
|
|
|
|
self.dag.updateHead(newHead, self.quarantine[])
|
|
|
|
|
|
|
|
self.checkExpectedBlock()
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
proc updateHead*(self: var ConsensusManager, wallSlot: Slot) =
|
|
|
|
## Trigger fork choice and update the DAG with the new head block
|
|
|
|
## This does not automatically prune the DAG after finalization
|
|
|
|
## `pruneFinalized` must be called for pruning.
|
|
|
|
|
|
|
|
# Grab the new head according to our latest attestation data
|
2022-07-04 20:35:33 +00:00
|
|
|
let newHead = self.attestationPool[].selectOptimisticHead(
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
wallSlot.start_beacon_time).valueOr:
|
2021-03-11 10:10:57 +00:00
|
|
|
warn "Head selection failed, using previous head",
|
2021-06-01 11:13:40 +00:00
|
|
|
head = shortLog(self.dag.head), wallSlot
|
2021-03-11 10:10:57 +00:00
|
|
|
return
|
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
if self.dag.loadExecutionBlockRoot(newHead.blck).isZero:
|
2022-07-04 20:35:33 +00:00
|
|
|
# Blocks without execution payloads can't be optimistic.
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
self.updateHead(newHead.blck)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-08-23 16:19:52 +00:00
|
|
|
proc checkNextProposer(dag: ChainDAGRef, slot: Slot):
|
|
|
|
Opt[(ValidatorIndex, ValidatorPubKey)] =
|
|
|
|
let proposer = dag.getProposer(dag.head, slot + 1)
|
|
|
|
if proposer.isNone():
|
|
|
|
return Opt.none((ValidatorIndex, ValidatorPubKey))
|
|
|
|
Opt.some((proposer.get, dag.validatorKey(proposer.get).get().toPubKey))
|
|
|
|
|
|
|
|
proc getFeeRecipient*(
|
|
|
|
self: ref ConsensusManager, pubkey: ValidatorPubKey, validatorIdx: ValidatorIndex,
|
|
|
|
epoch: Epoch): Eth1Address =
|
|
|
|
self.dynamicFeeRecipientsStore[].getDynamicFeeRecipient(validatorIdx, epoch).valueOr:
|
|
|
|
if self.keymanagerHost != nil:
|
|
|
|
self.keymanagerHost[].getSuggestedFeeRecipient(pubkey).valueOr:
|
|
|
|
self.defaultFeeRecipient
|
|
|
|
else:
|
|
|
|
self.defaultFeeRecipient
|
|
|
|
|
|
|
|
from ../spec/datatypes/bellatrix import PayloadID
|
|
|
|
|
|
|
|
proc runProposalForkchoiceUpdated*(self: ref ConsensusManager) {.async.} =
|
|
|
|
withState(self.dag.headState):
|
|
|
|
let
|
|
|
|
nextSlot = state.data.slot + 1
|
|
|
|
(validatorIndex, nextProposer) =
|
|
|
|
self.dag.checkNextProposer(nextSlot).valueOr:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Approximately lines up with validator_duties version. Used optimistcally/
|
|
|
|
# opportunistically, so mismatches are fine if not too frequent.
|
|
|
|
let
|
|
|
|
timestamp = compute_timestamp_at_slot(state.data, nextSlot)
|
|
|
|
randomData =
|
|
|
|
get_randao_mix(state.data, get_current_epoch(state.data)).data
|
|
|
|
feeRecipient = self.getFeeRecipient(
|
|
|
|
nextProposer, validatorIndex, nextSlot.epoch)
|
2022-08-25 23:34:02 +00:00
|
|
|
beaconHead = self.attestationPool[].getBeaconHead(self.dag.head)
|
|
|
|
headBlockRoot = self.dag.loadExecutionBlockRoot(beaconHead.blck)
|
2022-08-23 16:19:52 +00:00
|
|
|
|
|
|
|
if headBlockRoot.isZero:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
let fcResult = awaitWithTimeout(
|
|
|
|
forkchoiceUpdated(
|
2022-08-25 23:34:02 +00:00
|
|
|
self.eth1Monitor,
|
|
|
|
headBlockRoot,
|
|
|
|
beaconHead.safeExecutionPayloadHash,
|
|
|
|
beaconHead.finalizedExecutionPayloadHash,
|
|
|
|
timestamp, randomData, feeRecipient),
|
2022-08-23 16:19:52 +00:00
|
|
|
FORKCHOICEUPDATED_TIMEOUT):
|
|
|
|
debug "runProposalForkchoiceUpdated: forkchoiceUpdated timed out"
|
|
|
|
ForkchoiceUpdatedResponse(
|
|
|
|
payloadStatus: PayloadStatusV1(status: PayloadExecutionStatus.syncing))
|
|
|
|
|
|
|
|
if fcResult.payloadStatus.status != PayloadExecutionStatus.valid or
|
|
|
|
fcResult.payloadId.isNone:
|
|
|
|
return
|
|
|
|
|
|
|
|
self.forkchoiceUpdatedInfo = Opt.some ForkchoiceUpdatedInformation(
|
|
|
|
payloadId: bellatrix.PayloadID(fcResult.payloadId.get),
|
|
|
|
headBlockRoot: headBlockRoot,
|
2022-08-25 23:34:02 +00:00
|
|
|
safeBlockRoot: beaconHead.safeExecutionPayloadHash,
|
|
|
|
finalizedBlockRoot: beaconHead.finalizedExecutionPayloadHash,
|
2022-08-23 16:19:52 +00:00
|
|
|
timestamp: timestamp,
|
|
|
|
feeRecipient: feeRecipient)
|
|
|
|
except CatchableError as err:
|
|
|
|
error "Engine API fork-choice update failed", err = err.msg
|
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
proc updateHeadWithExecution*(self: ref ConsensusManager, newHead: BeaconHead)
|
2022-07-04 20:35:33 +00:00
|
|
|
{.async.} =
|
|
|
|
## Trigger fork choice and update the DAG with the new head block
|
|
|
|
## This does not automatically prune the DAG after finalization
|
|
|
|
## `pruneFinalized` must be called for pruning.
|
|
|
|
|
|
|
|
# Grab the new head according to our latest attestation data
|
2022-07-07 16:57:52 +00:00
|
|
|
try:
|
|
|
|
# Ensure dag.updateHead has most current information
|
|
|
|
await self.updateExecutionClientHead(newHead)
|
|
|
|
|
|
|
|
# Store the new head in the chain DAG - this may cause epochs to be
|
|
|
|
# justified and finalized
|
2022-08-25 23:34:02 +00:00
|
|
|
self.dag.updateHead(newHead.blck, self.quarantine[])
|
2022-07-07 16:57:52 +00:00
|
|
|
|
2022-08-23 16:19:52 +00:00
|
|
|
# TODO after things stabilize with this, check for upcoming proposal and
|
|
|
|
# don't bother sending first fcU, but initially, keep both in place
|
|
|
|
asyncSpawn self.runProposalForkchoiceUpdated()
|
|
|
|
|
2022-07-07 16:57:52 +00:00
|
|
|
self[].checkExpectedBlock()
|
|
|
|
except CatchableError as exc:
|
|
|
|
debug "updateHeadWithExecution error",
|
|
|
|
error = exc.msg
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
proc pruneStateCachesAndForkChoice*(self: var ConsensusManager) =
|
|
|
|
## Prune unneeded and invalidated data after finalization
|
|
|
|
## - the DAG state checkpoints
|
|
|
|
## - the DAG EpochRef
|
|
|
|
## - the attestation pool/fork choice
|
|
|
|
|
|
|
|
# Cleanup DAG & fork choice if we have a finalized head
|
2021-06-01 11:13:40 +00:00
|
|
|
if self.dag.needStateCachesAndForkChoicePruning():
|
|
|
|
self.dag.pruneStateCachesDAG()
|
2021-03-11 10:10:57 +00:00
|
|
|
self.attestationPool[].prune()
|