do not cache zero block hash if block unavailable (#5865)

With checkpoint sync, the checkpoint block is typically unavailable at
the start, and only backfilled later. To avoid treating it as having
zero hash, execution disabled in some contexts, wrap the result of
`loadExecutionBlockHash` in `Opt` and handle block hash being unknown.

---------

Co-authored-by: Jacek Sieka <jacek@status.im>
This commit is contained in:
Etan Kissling 2024-02-09 23:10:38 +01:00 committed by GitHub
parent 7c53841cd8
commit 9593ef74b8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 92 additions and 29 deletions

View File

@ -768,6 +768,7 @@ proc getBeaconHead*(
let
finalizedExecutionBlockHash =
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
.get(ZERO_HASH)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/fork_choice/safe-block.md#get_safe_execution_payload_hash
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
@ -783,6 +784,7 @@ proc getBeaconHead*(
finalizedExecutionBlockHash
else:
pool.dag.loadExecutionBlockHash(safeBlock.get)
.get(finalizedExecutionBlockHash)
BeaconHead(
blck: headBlock,

View File

@ -2278,20 +2278,25 @@ proc pruneHistory*(dag: ChainDAGRef, startup = false) =
if dag.db.clearBlocks(fork):
break
proc loadExecutionBlockHash*(dag: ChainDAGRef, bid: BlockId): Eth2Digest =
proc loadExecutionBlockHash*(
dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] =
let blockData = dag.getForkedBlock(bid).valueOr:
return ZERO_HASH
# Besides database inconsistency issues, this is hit with checkpoint sync.
# The initial `BlockRef` is creted before the checkpoint block is loaded.
# It is backfilled later, so return `none` and keep retrying.
return Opt.none(Eth2Digest)
withBlck(blockData):
when consensusFork >= ConsensusFork.Bellatrix:
forkyBlck.message.body.execution_payload.block_hash
Opt.some forkyBlck.message.body.execution_payload.block_hash
else:
ZERO_HASH
Opt.some ZERO_HASH
proc loadExecutionBlockHash*(dag: ChainDAGRef, blck: BlockRef): Eth2Digest =
proc loadExecutionBlockHash*(
dag: ChainDAGRef, blck: BlockRef): Opt[Eth2Digest] =
if blck.executionBlockHash.isNone:
blck.executionBlockHash = Opt.some dag.loadExecutionBlockHash(blck.bid)
blck.executionBlockHash.unsafeGet
blck.executionBlockHash = dag.loadExecutionBlockHash(blck.bid)
blck.executionBlockHash
from std/packedsets import PackedSet, incl, items
@ -2509,10 +2514,12 @@ proc updateHead*(
dag.db.updateFinalizedBlocks(newFinalized)
if dag.loadExecutionBlockHash(oldFinalizedHead.blck).isZero and
not dag.loadExecutionBlockHash(dag.finalizedHead.blck).isZero and
dag.vanityLogs.onFinalizedMergeTransitionBlock != nil:
dag.vanityLogs.onFinalizedMergeTransitionBlock()
let oldBlockHash = dag.loadExecutionBlockHash(oldFinalizedHead.blck)
if oldBlockHash.isSome and oldBlockHash.unsafeGet.isZero:
let newBlockHash = dag.loadExecutionBlockHash(dag.finalizedHead.blck)
if newBlockHash.isSome and not newBlockHash.unsafeGet.isZero:
if dag.vanityLogs.onFinalizedMergeTransitionBlock != nil:
dag.vanityLogs.onFinalizedMergeTransitionBlock()
# Pruning the block dag is required every time the finalized head changes
# in order to clear out blocks that are no longer viable and should

View File

@ -25,6 +25,8 @@ from ../validators/keystore_management import
getSuggestedGasLimit
from ../validators/action_tracker import ActionTracker, getNextProposalSlot
logScope: topics = "cman"
type
ConsensusManager* = object
expectedSlot: Slot
@ -155,7 +157,20 @@ func setOptimisticHead*(
proc updateExecutionClientHead(self: ref ConsensusManager,
newHead: BeaconHead): Future[Opt[void]] {.async: (raises: [CancelledError]).} =
let headExecutionBlockHash = self.dag.loadExecutionBlockHash(newHead.blck)
let headExecutionBlockHash =
self.dag.loadExecutionBlockHash(newHead.blck).valueOr:
# `BlockRef` are only created for blocks that have passed
# execution block hash validation, either explicitly in
# `block_processor.storeBlock`, or implicitly, e.g., through
# checkpoint sync. With checkpoint sync, the checkpoint block
# is initially not available, so if there is a reorg to it,
# this may be triggered. Such a reorg could happen if the first
# imported chain is completely invalid (after the checkpoint block)
# and is subsequently pruned, in which case checkpoint block is head.
# Because execution block hash validation has already passed,
# we can treat this as `SYNCING`.
warn "Failed to load head execution block hash", head = newHead.blck
return Opt[void].ok()
if headExecutionBlockHash.isZero:
# Blocks without execution payloads can't be optimistic.
@ -240,13 +255,15 @@ proc updateHead*(self: var ConsensusManager, wallSlot: Slot) =
## `pruneFinalized` must be called for pruning.
# Grab the new head according to our latest attestation data
let newHead = self.attestationPool[].selectOptimisticHead(
wallSlot.start_beacon_time).valueOr:
warn "Head selection failed, using previous head",
head = shortLog(self.dag.head), wallSlot
return
let
newHead = self.attestationPool[].selectOptimisticHead(
wallSlot.start_beacon_time).valueOr:
warn "Head selection failed, using previous head",
head = shortLog(self.dag.head), wallSlot
return
executionBlockHash = self.dag.loadExecutionBlockHash(newHead.blck)
if self.dag.loadExecutionBlockHash(newHead.blck).isZero:
if executionBlockHash.isSome and executionBlockHash.unsafeGet.isZero:
# Blocks without execution payloads can't be optimistic.
self.dag.markBlockVerified(newHead.blck)
@ -347,7 +364,7 @@ proc runProposalForkchoiceUpdated*(
feeRecipient = self[].getFeeRecipient(
nextProposer, Opt.some(validatorIndex), nextWallSlot.epoch)
beaconHead = self.attestationPool[].getBeaconHead(self.dag.head)
headBlockHash = self.dag.loadExecutionBlockHash(beaconHead.blck)
headBlockHash = ? self.dag.loadExecutionBlockHash(beaconHead.blck)
if headBlockHash.isZero:
return err()

View File

@ -652,7 +652,7 @@ proc storeBlock(
else:
let
headExecutionBlockHash =
dag.loadExecutionBlockHash(newHead.get.blck)
dag.loadExecutionBlockHash(newHead.get.blck).get(ZERO_HASH)
wallSlot = self.getBeaconTime().slotOrZero
if headExecutionBlockHash.isZero or
NewPayloadStatus.noResponse == payloadStatus:

View File

@ -285,8 +285,18 @@ template validateBeaconBlockBellatrix(
# shows that `state.latest_execution_payload_header` being default or not is
# exactly equivalent to whether that block's execution payload is default or
# not, so test cached block information rather than reconstructing a state.
if signed_beacon_block.message.is_execution_block or
not dag.loadExecutionBlockHash(parent).isZero:
let isExecutionEnabled =
if signed_beacon_block.message.is_execution_block:
true
else:
# If we don't know whether the parent block had execution enabled,
# assume it didn't. This way, we don't reject here if the timestamp
# is invalid, and let state transition check the timestamp.
# This is an edge case, and may be hit in a pathological scenario with
# checkpoint sync, because the checkpoint block may be unavailable
# and it could already be the parent of the new block before backfill.
not dag.loadExecutionBlockHash(parent).get(ZERO_HASH).isZero
if isExecutionEnabled:
# [REJECT] The block's execution payload timestamp is correct with respect
# to the slot -- i.e. execution_payload.timestamp ==
# compute_timestamp_at_slot(state, block.slot).
@ -523,15 +533,34 @@ proc validateBeaconBlock*(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block
# `is_execution_enabled(state, block.body)` check, but unlike in
# validateBeaconBlockBellatrix() don't have parent BlockRef.
if signed_beacon_block.message.is_execution_block or
not dag.loadExecutionBlockHash(dag.finalizedHead.blck).isZero:
if signed_beacon_block.message.is_execution_block:
# Blocks with execution enabled will be permitted to propagate
# regardless of the validity of the execution payload. This prevents
# network segregation between optimistic and non-optimistic nodes.
#
# [IGNORE] The block's parent (defined by `block.parent_root`)
# passes all validation (including execution node verification of the
# `block.body.execution_payload`).
# If execution_payload verification of block's parent by an execution
# node is not complete:
#
# - [REJECT] The block's parent (defined by `block.parent_root`) passes
# all validation (excluding execution node verification of the
# `block.body.execution_payload`).
#
# otherwise:
#
# - [IGNORE] The block's parent (defined by `block.parent_root`) passes
# all validation (including execution node verification of the
# `block.body.execution_payload`).
# Implementation restrictions:
#
# - We don't know if the parent state had execution enabled.
# If it had, and the block doesn't have it enabled anymore,
# we end up in the pre-Merge path below (`else`) and REJECT.
# Such a block is clearly invalid, though, without asking the EL.
#
# - We know that the parent was marked unviable, but don't know
# whether it was marked unviable due to consensus (REJECT) or
# execution (IGNORE) verification failure. We err on the IGNORE side.
return errIgnore("BeaconBlock: ignored, parent from unviable fork")
else:
# [REJECT] The block's parent (defined by `block.parent_root`) passes

View File

@ -126,7 +126,8 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
RestNodeValidity.optimistic
else:
RestNodeValidity.valid,
execution_block_hash: node.dag.loadExecutionBlockHash(item.bid),
execution_block_hash:
node.dag.loadExecutionBlockHash(item.bid).get(ZERO_HASH),
extra_data: some RestNodeExtraData(
justified_root: item.checkpoints.justified.root,
finalized_root: item.checkpoints.finalized.root,

View File

@ -767,7 +767,14 @@ proc getBlindedBlockParts[
Future[Result[(EPH, UInt256, ForkedBeaconBlock), string]]
{.async: (raises: [CancelledError]).} =
let
executionBlockHash = node.dag.loadExecutionBlockHash(head)
executionBlockHash = node.dag.loadExecutionBlockHash(head).valueOr:
# With checkpoint sync, the checkpoint block may be unavailable,
# and it could already be the parent of the new block before backfill.
# Fallback to EL, hopefully the block is available on the local path.
warn "Failed to load parent execution block hash, skipping block builder",
slot, validator_index, head = shortLog(head)
return err("loadExecutionBlockHash failed")
executionPayloadHeader =
try:
awaitWithTimeout(