enqueue missing parent block if stored in local DB (#6122)

When checking for `MissingParent`, it may be that the parent block was
already discovered as part of a prior run. In that case, it can be
loaded from storage and processed without having to rediscover the
entire branch from the network. This is similar to #6112 but for blocks
that are discovered via gossip / sync mgr instead of via request mgr.
This commit is contained in:
Etan Kissling 2024-03-22 14:35:46 +01:00 committed by GitHub
parent a6e9e0774c
commit 2d9586a5a8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 49 additions and 3 deletions

View File

@ -10,7 +10,7 @@
import import
stew/results, stew/results,
chronicles, chronos, metrics, chronicles, chronos, metrics,
../spec/[signatures, signatures_batch], ../spec/[forks, signatures, signatures_batch],
../sszdump ../sszdump
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
@ -20,7 +20,7 @@ from ../consensus_object_pools/consensus_manager import
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead, runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
updateHeadWithExecution updateHeadWithExecution
from ../consensus_object_pools/blockchain_dag import from ../consensus_object_pools/blockchain_dag import
getBlockRef, getProposer, forkAtEpoch, loadExecutionBlockHash, getBlockRef, getForkedBlock, getProposer, forkAtEpoch, loadExecutionBlockHash,
markBlockVerified, validatorKey markBlockVerified, validatorKey
from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds
from ../consensus_object_pools/block_dag import BlockRef, root, shortLog, slot from ../consensus_object_pools/block_dag import BlockRef, root, shortLog, slot
@ -33,7 +33,7 @@ from ../consensus_object_pools/blob_quarantine import
from ../validators/validator_monitor import from ../validators/validator_monitor import
MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock, MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock,
registerSyncAggregateInBlock registerSyncAggregateInBlock
from ../beacon_chain_db import putBlobSidecar from ../beacon_chain_db import getBlobSidecar, putBlobSidecar
from ../spec/state_transition_block import validate_blobs from ../spec/state_transition_block import validate_blobs
export sszdump, signatures_batch export sszdump, signatures_batch
@ -473,6 +473,44 @@ proc storeBlock(
parent = dag.checkHeadBlock(signedBlock) parent = dag.checkHeadBlock(signedBlock)
if parent.isErr(): if parent.isErr():
# TODO This logic can be removed if the database schema is extended
# to store non-canonical heads on top of the canonical head!
# If that is done, the database no longer contains extra blocks
# that have not yet been assigned a `BlockRef`
if parent.error() == VerifierError.MissingParent:
# This indicates that no `BlockRef` is available for the `parent_root`.
# However, the block may still be available in local storage. On startup,
# only the canonical branch is imported into `blockchain_dag`, while
# non-canonical branches are re-discovered with sync/request managers.
# Data from non-canonical branches that has already been verified during
# a previous run of the beacon node is already stored in the database but
# only lacks a `BlockRef`. Loading the branch from the database saves a
# lot of time, especially when a non-canonical branch has non-trivial
# depth. Note that if it turns out that a non-canonical branch eventually
# becomes canonical, it is vital to import it as quickly as possible.
let
parent_root = signedBlock.message.parent_root
parentBlck = dag.getForkedBlock(parent_root)
if parentBlck.isSome():
var blobsOk = true
let blobs =
withBlck(parentBlck.get()):
when consensusFork >= ConsensusFork.Deneb:
var blob_sidecars: BlobSidecars
for i in 0 ..< forkyBlck.message.body.blob_kzg_commitments.len:
let blob = BlobSidecar.new()
if not dag.db.getBlobSidecar(parent_root, i.BlobIndex, blob[]):
blobsOk = false # Pruned, or inconsistent DB
break
blob_sidecars.add blob
Opt.some blob_sidecars
else:
Opt.none BlobSidecars
if blobsOk:
debug "Loaded parent block from storage", parent_root
self[].enqueueBlock(
MsgSource.gossip, parentBlck.unsafeGet().asSigned(), blobs)
return handleVerifierError(parent.error()) return handleVerifierError(parent.error())
let let

View File

@ -247,6 +247,10 @@ proc requestManagerBlockLoop(
if missingBlockRoots.len == 0: if missingBlockRoots.len == 0:
continue continue
# TODO This logic can be removed if the database schema is extended
# to store non-canonical heads on top of the canonical head!
# If that is done, the database no longer contains extra blocks
# that have not yet been assigned a `BlockRef`
var blockRoots: seq[Eth2Digest] var blockRoots: seq[Eth2Digest]
if rman.blockLoader == nil: if rman.blockLoader == nil:
blockRoots = missingBlockRoots blockRoots = missingBlockRoots
@ -335,6 +339,10 @@ proc requestManagerBlobLoop(
if missingBlobIds.len == 0: if missingBlobIds.len == 0:
continue continue
# TODO This logic can be removed if the database schema is extended
# to store non-canonical heads on top of the canonical head!
# If that is done, the database no longer contains extra blocks
# that have not yet been assigned a `BlockRef`
var blobIds: seq[BlobIdentifier] var blobIds: seq[BlobIdentifier]
if rman.blobLoader == nil: if rman.blobLoader == nil:
blobIds = missingBlobIds blobIds = missingBlobIds