2021-03-11 10:10:57 +00:00
|
|
|
# beacon_chain
|
2023-01-09 22:44:44 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2021-03-11 10:10:57 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-05-28 16:34:00 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
import
|
|
|
|
stew/results,
|
|
|
|
chronicles, chronos, metrics,
|
2023-04-16 08:37:56 +00:00
|
|
|
../spec/[signatures, signatures_batch],
|
2021-08-18 18:57:58 +00:00
|
|
|
../sszdump
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2023-04-26 07:00:03 +00:00
|
|
|
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
|
2022-07-13 14:13:54 +00:00
|
|
|
from ../consensus_object_pools/consensus_manager import
|
2022-09-07 18:34:52 +00:00
|
|
|
ConsensusManager, checkNextProposer, optimisticExecutionPayloadHash,
|
2023-03-05 01:40:21 +00:00
|
|
|
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
|
|
|
|
updateHeadWithExecution
|
2023-04-16 08:37:56 +00:00
|
|
|
from ../consensus_object_pools/blockchain_dag import
|
|
|
|
getBlockRef, getProposer, forkAtEpoch, validatorKey
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds
|
2023-01-19 22:00:40 +00:00
|
|
|
from ../consensus_object_pools/block_dag import BlockRef, root, shortLog, slot
|
2022-11-10 17:40:27 +00:00
|
|
|
from ../consensus_object_pools/block_pools_types import
|
|
|
|
EpochRef, VerifierError
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../consensus_object_pools/block_quarantine import
|
2023-04-13 19:11:40 +00:00
|
|
|
addBlobless, addOrphan, addUnviable, pop, removeOrphan
|
|
|
|
from ../consensus_object_pools/blob_quarantine import
|
|
|
|
BlobQuarantine, hasBlobs, popBlobs
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../validators/validator_monitor import
|
|
|
|
MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock,
|
|
|
|
registerSyncAggregateInBlock
|
2023-04-25 10:55:35 +00:00
|
|
|
from ../beacon_chain_db import putBlobSidecar
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
export sszdump, signatures_batch
|
2021-07-15 19:01:07 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
# Block Processor
|
2021-03-11 10:10:57 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2021-05-28 16:34:00 +00:00
|
|
|
# The block processor moves blocks from "Incoming" to "Consensus verified"
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
declareHistogram beacon_store_block_duration_seconds,
|
|
|
|
"storeBlock() duration", buckets = [0.25, 0.5, 1, 2, 4, 8, Inf]
|
|
|
|
|
2023-02-06 07:22:08 +00:00
|
|
|
const
|
|
|
|
SLOTS_PER_PAYLOAD = SLOTS_PER_HISTORICAL_ROOT
|
|
|
|
## Number of slots we process between each execution payload execution, while
|
|
|
|
## syncing the finalized part of the chain
|
|
|
|
PAYLOAD_PRE_WALL_SLOTS = SLOTS_PER_EPOCH * 2
|
|
|
|
## Number of slots from wall time that we start processing every payload
|
2023-04-26 07:00:03 +00:00
|
|
|
MAX_DEDUP_QUEUE_LEN = 16
|
|
|
|
## Number of blocks, with FIFO discipline, against which to check queued
|
|
|
|
## blocks before being processed to avoid spamming ELs. This should stay
|
|
|
|
## small enough that even O(n) algorithms are reasonable.
|
2023-02-06 07:22:08 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
type
|
2023-03-07 20:19:17 +00:00
|
|
|
BlobSidecars* = seq[ref BlobSidecar]
|
2023-02-28 11:36:17 +00:00
|
|
|
BlockEntry = object
|
2021-07-15 19:01:07 +00:00
|
|
|
blck*: ForkedSignedBeaconBlock
|
2023-02-28 11:36:17 +00:00
|
|
|
blobs*: BlobSidecars
|
2023-02-06 07:22:08 +00:00
|
|
|
maybeFinalized*: bool
|
|
|
|
## The block source claims the block has been finalized already
|
2022-11-10 17:40:27 +00:00
|
|
|
resfut*: Future[Result[void, VerifierError]]
|
2021-05-28 16:34:00 +00:00
|
|
|
queueTick*: Moment # Moment when block was enqueued
|
|
|
|
validationDur*: Duration # Time it took to perform gossip validation
|
2021-12-20 19:20:31 +00:00
|
|
|
src*: MsgSource
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
BlockProcessor* = object
|
|
|
|
## This manages the processing of blocks from different sources
|
2021-03-11 10:10:57 +00:00
|
|
|
## Blocks and attestations are enqueued in a gossip-validated state
|
|
|
|
##
|
|
|
|
## from:
|
|
|
|
## - Gossip (when synced)
|
|
|
|
## - SyncManager (during sync)
|
|
|
|
## - RequestManager (missing ancestor blocks)
|
|
|
|
##
|
|
|
|
## are then consensus-verified and added to:
|
|
|
|
## - the blockchain DAG
|
|
|
|
## - database
|
|
|
|
## - attestation pool
|
|
|
|
## - fork choice
|
2021-12-06 09:49:01 +00:00
|
|
|
##
|
|
|
|
## The processor will also reinsert blocks from the quarantine, should a
|
|
|
|
## parent be found.
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Config
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
dumpEnabled: bool
|
|
|
|
dumpDirInvalid: string
|
|
|
|
dumpDirIncoming: string
|
|
|
|
|
|
|
|
# Producers
|
|
|
|
# ----------------------------------------------------------------
|
2022-01-26 12:20:08 +00:00
|
|
|
blockQueue: AsyncQueue[BlockEntry]
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Consumer
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
consensusManager: ref ConsensusManager
|
|
|
|
## Blockchain DAG, AttestationPool and Quarantine
|
2023-03-05 01:40:21 +00:00
|
|
|
## Blockchain DAG, AttestationPool, Quarantine, and ELManager
|
2022-01-26 12:20:08 +00:00
|
|
|
validatorMonitor: ref ValidatorMonitor
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2023-04-13 19:11:40 +00:00
|
|
|
blobQuarantine: ref BlobQuarantine
|
2021-12-06 09:49:01 +00:00
|
|
|
verifier: BatchVerifier
|
|
|
|
|
2023-02-06 07:22:08 +00:00
|
|
|
lastPayload: Slot
|
|
|
|
## The slot at which we sent a payload to the execution client the last
|
|
|
|
## time
|
|
|
|
|
2023-04-26 07:00:03 +00:00
|
|
|
dupBlckBuf: Deque[(Eth2Digest, ValidatorSig)]
|
|
|
|
# Small buffer to allow for filtering of duplicate blocks in block queue
|
|
|
|
|
2022-10-14 19:48:56 +00:00
|
|
|
NewPayloadStatus {.pure.} = enum
|
|
|
|
valid
|
|
|
|
notValid
|
|
|
|
invalid
|
|
|
|
noResponse
|
|
|
|
|
2022-11-10 17:40:27 +00:00
|
|
|
ProcessingStatus {.pure.} = enum
|
2022-10-14 19:48:56 +00:00
|
|
|
completed
|
|
|
|
notCompleted
|
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
proc addBlock*(
|
|
|
|
self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock,
|
2023-02-28 11:36:17 +00:00
|
|
|
blobs: BlobSidecars,
|
2022-11-10 17:40:27 +00:00
|
|
|
resfut: Future[Result[void, VerifierError]] = nil,
|
2023-02-06 07:22:08 +00:00
|
|
|
maybeFinalized = false,
|
2022-03-25 11:40:10 +00:00
|
|
|
validationDur = Duration())
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Initialization
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
proc new*(T: type BlockProcessor,
|
2021-04-06 11:59:11 +00:00
|
|
|
dumpEnabled: bool,
|
|
|
|
dumpDirInvalid, dumpDirIncoming: string,
|
2022-06-21 08:29:16 +00:00
|
|
|
rng: ref HmacDrbgContext, taskpool: TaskPoolPtr,
|
2021-03-11 10:10:57 +00:00
|
|
|
consensusManager: ref ConsensusManager,
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor: ref ValidatorMonitor,
|
2023-04-13 19:11:40 +00:00
|
|
|
blobQuarantine: ref BlobQuarantine,
|
2023-01-04 15:51:14 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn): ref BlockProcessor =
|
2021-05-28 16:34:00 +00:00
|
|
|
(ref BlockProcessor)(
|
2021-04-06 11:59:11 +00:00
|
|
|
dumpEnabled: dumpEnabled,
|
|
|
|
dumpDirInvalid: dumpDirInvalid,
|
|
|
|
dumpDirIncoming: dumpDirIncoming,
|
2021-12-06 09:49:01 +00:00
|
|
|
blockQueue: newAsyncQueue[BlockEntry](),
|
2021-04-06 11:59:11 +00:00
|
|
|
consensusManager: consensusManager,
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor: validatorMonitor,
|
2023-04-13 19:11:40 +00:00
|
|
|
blobQuarantine: blobQuarantine,
|
2021-12-06 09:49:01 +00:00
|
|
|
getBeaconTime: getBeaconTime,
|
2023-04-26 07:00:03 +00:00
|
|
|
verifier: BatchVerifier(rng: rng, taskpool: taskpool),
|
|
|
|
dupBlckBuf: initDeque[(Eth2Digest, ValidatorSig)](
|
|
|
|
initialSize = MAX_DEDUP_QUEUE_LEN)
|
2021-12-06 09:49:01 +00:00
|
|
|
)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Sync callbacks
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func hasBlocks*(self: BlockProcessor): bool =
|
2021-12-06 09:49:01 +00:00
|
|
|
self.blockQueue.len() > 0
|
2021-04-26 20:39:44 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Storage
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
proc dumpInvalidBlock*(
|
2021-12-06 09:49:01 +00:00
|
|
|
self: BlockProcessor, signedBlock: ForkySignedBeaconBlock) =
|
2021-11-05 15:39:47 +00:00
|
|
|
if self.dumpEnabled:
|
|
|
|
dump(self.dumpDirInvalid, signedBlock)
|
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
proc dumpBlock[T](
|
2021-07-15 19:01:07 +00:00
|
|
|
self: BlockProcessor,
|
2021-11-05 07:34:34 +00:00
|
|
|
signedBlock: ForkySignedBeaconBlock,
|
2022-11-10 17:40:27 +00:00
|
|
|
res: Result[T, VerifierError]) =
|
2021-03-11 10:10:57 +00:00
|
|
|
if self.dumpEnabled and res.isErr:
|
2021-12-06 09:49:01 +00:00
|
|
|
case res.error
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.Invalid:
|
2021-11-05 15:39:47 +00:00
|
|
|
self.dumpInvalidBlock(signedBlock)
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.MissingParent:
|
2021-11-05 15:39:47 +00:00
|
|
|
dump(self.dumpDirIncoming, signedBlock)
|
2021-03-11 10:10:57 +00:00
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../consensus_object_pools/block_clearance import
|
|
|
|
addBackfillBlock, addHeadBlock
|
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
proc storeBackfillBlock(
|
|
|
|
self: var BlockProcessor,
|
2023-01-09 18:42:10 +00:00
|
|
|
signedBlock: ForkySignedBeaconBlock,
|
2023-02-28 11:36:17 +00:00
|
|
|
blobs: BlobSidecars): Result[void, VerifierError] =
|
2022-01-26 12:20:08 +00:00
|
|
|
|
|
|
|
# The block is certainly not missing any more
|
|
|
|
self.consensusManager.quarantine[].missing.del(signedBlock.root)
|
|
|
|
|
2023-01-09 18:42:10 +00:00
|
|
|
# Establish blob viability before calling addbackfillBlock to avoid
|
|
|
|
# writing the block in case of blob error.
|
|
|
|
let blobsOk =
|
2023-03-04 13:35:39 +00:00
|
|
|
when typeof(signedBlock).toFork() >= ConsensusFork.Deneb:
|
2023-02-28 11:36:17 +00:00
|
|
|
blobs.len > 0 or true
|
|
|
|
# TODO: validate blobs
|
2023-01-09 18:42:10 +00:00
|
|
|
else:
|
|
|
|
true
|
|
|
|
if not blobsOk:
|
|
|
|
return err(VerifierError.Invalid)
|
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
let res = self.consensusManager.dag.addBackfillBlock(signedBlock)
|
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
case res.error
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.MissingParent:
|
2022-01-26 12:20:08 +00:00
|
|
|
if signedBlock.message.parent_root in
|
|
|
|
self.consensusManager.quarantine[].unviable:
|
|
|
|
# DAG doesn't know about unviable ancestor blocks - we do! Translate
|
|
|
|
# this to the appropriate error so that sync etc doesn't retry the block
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
|
2022-11-10 17:40:27 +00:00
|
|
|
return err(VerifierError.UnviableFork)
|
|
|
|
of VerifierError.UnviableFork:
|
2022-01-26 12:20:08 +00:00
|
|
|
# Track unviables so that descendants can be discarded properly
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
else: discard
|
2023-01-09 18:42:10 +00:00
|
|
|
return res
|
2022-01-26 12:20:08 +00:00
|
|
|
|
2023-02-28 11:36:17 +00:00
|
|
|
# Only store blobs after successfully establishing block viability.
|
|
|
|
# TODO: store blobs in db
|
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
res
|
|
|
|
|
2023-04-17 14:17:52 +00:00
|
|
|
from web3/engine_api_types import
|
|
|
|
PayloadAttributesV1, PayloadAttributesV2, PayloadExecutionStatus,
|
|
|
|
PayloadStatusV1
|
2022-07-21 18:26:36 +00:00
|
|
|
from ../eth1/eth1_monitor import
|
2023-04-19 19:42:30 +00:00
|
|
|
ELManager, asEngineExecutionPayload, forkchoiceUpdated, hasConnection,
|
|
|
|
sendNewPayload
|
2022-07-21 18:26:36 +00:00
|
|
|
|
|
|
|
proc expectValidForkchoiceUpdated(
|
2023-04-17 14:17:52 +00:00
|
|
|
elManager: ELManager, headBlockPayloadAttributesType: typedesc,
|
2022-10-26 21:14:11 +00:00
|
|
|
headBlockHash, safeBlockHash, finalizedBlockHash: Eth2Digest,
|
|
|
|
receivedBlock: ForkySignedBeaconBlock): Future[void] {.async.} =
|
|
|
|
let
|
2023-03-05 01:40:21 +00:00
|
|
|
(payloadExecutionStatus, _) = await elManager.forkchoiceUpdated(
|
2023-03-06 16:19:15 +00:00
|
|
|
headBlockHash = headBlockHash,
|
|
|
|
safeBlockHash = safeBlockHash,
|
|
|
|
finalizedBlockHash = finalizedBlockHash,
|
2023-04-17 14:17:52 +00:00
|
|
|
payloadAttributes = none headBlockPayloadAttributesType)
|
2022-10-26 21:14:11 +00:00
|
|
|
receivedExecutionBlockHash =
|
2023-01-28 19:53:41 +00:00
|
|
|
when typeof(receivedBlock).toFork >= ConsensusFork.Bellatrix:
|
2022-10-26 21:14:11 +00:00
|
|
|
receivedBlock.message.body.execution_payload.block_hash
|
|
|
|
else:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/19802
|
|
|
|
(static(default(Eth2Digest)))
|
|
|
|
|
|
|
|
# Only called when expecting this to be valid because `newPayload` or some
|
|
|
|
# previous `forkchoiceUpdated` had already marked it as valid. However, if
|
|
|
|
# it's not the block that was received, don't info/warn either way given a
|
|
|
|
# relative lack of immediate evidence.
|
|
|
|
if receivedExecutionBlockHash != headBlockHash:
|
|
|
|
return
|
|
|
|
|
|
|
|
case payloadExecutionStatus
|
|
|
|
of PayloadExecutionStatus.valid:
|
|
|
|
# situation nominal
|
|
|
|
discard
|
|
|
|
of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing:
|
|
|
|
info "execution payload forkChoiceUpdated status ACCEPTED/SYNCING, but was previously VALID",
|
|
|
|
payloadExecutionStatus = $payloadExecutionStatus, headBlockHash,
|
|
|
|
safeBlockHash, finalizedBlockHash,
|
|
|
|
receivedBlock = shortLog(receivedBlock)
|
|
|
|
of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash:
|
|
|
|
warn "execution payload forkChoiceUpdated status INVALID, but was previously VALID",
|
|
|
|
payloadExecutionStatus = $payloadExecutionStatus, headBlockHash,
|
|
|
|
safeBlockHash, finalizedBlockHash,
|
|
|
|
receivedBlock = shortLog(receivedBlock)
|
2022-07-21 18:26:36 +00:00
|
|
|
|
|
|
|
from ../consensus_object_pools/attestation_pool import
|
2022-08-25 23:34:02 +00:00
|
|
|
addForkChoice, selectOptimisticHead, BeaconHead
|
2022-07-21 18:26:36 +00:00
|
|
|
from ../consensus_object_pools/blockchain_dag import
|
2023-04-11 16:56:29 +00:00
|
|
|
is_optimistic, loadExecutionBlockHash, markBlockVerified
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../consensus_object_pools/spec_cache import get_attesting_indices
|
|
|
|
from ../spec/datatypes/phase0 import TrustedSignedBeaconBlock
|
2022-10-14 19:48:56 +00:00
|
|
|
from ../spec/datatypes/altair import SignedBeaconBlock
|
|
|
|
|
|
|
|
from ../spec/datatypes/bellatrix import ExecutionPayload, SignedBeaconBlock
|
2022-11-24 07:53:04 +00:00
|
|
|
from ../spec/datatypes/capella import
|
|
|
|
ExecutionPayload, SignedBeaconBlock, asTrusted, shortLog
|
2022-10-14 19:48:56 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
# TODO investigate why this seems to allow compilation even though it doesn't
|
|
|
|
# directly address deneb.ExecutionPayload when complaint was that it didn't
|
|
|
|
# know about "deneb"
|
|
|
|
from ../spec/datatypes/deneb import SignedBeaconBlock, asTrusted, shortLog
|
|
|
|
from ../eth1/eth1_monitor import hasProperlyConfiguredConnection
|
|
|
|
|
2022-10-14 19:48:56 +00:00
|
|
|
proc newExecutionPayload*(
|
2023-03-05 01:40:21 +00:00
|
|
|
elManager: ELManager,
|
|
|
|
executionPayload: ForkyExecutionPayload):
|
2022-10-14 19:48:56 +00:00
|
|
|
Future[Opt[PayloadExecutionStatus]] {.async.} =
|
2023-03-05 01:40:21 +00:00
|
|
|
|
|
|
|
if not elManager.hasProperlyConfiguredConnection:
|
2023-04-19 19:42:30 +00:00
|
|
|
if elManager.hasConnection:
|
|
|
|
info "No execution client connected; cannot process block payloads",
|
|
|
|
executionPayload = shortLog(executionPayload)
|
|
|
|
else:
|
|
|
|
debug "No execution client connected; cannot process block payloads",
|
|
|
|
executionPayload = shortLog(executionPayload)
|
2022-10-14 19:48:56 +00:00
|
|
|
return Opt.none PayloadExecutionStatus
|
|
|
|
|
|
|
|
debug "newPayload: inserting block into execution engine",
|
2023-02-14 20:41:49 +00:00
|
|
|
executionPayload = shortLog(executionPayload)
|
2022-10-14 19:48:56 +00:00
|
|
|
|
|
|
|
try:
|
2023-03-05 01:40:21 +00:00
|
|
|
let payloadStatus = await elManager.sendNewPayload(
|
|
|
|
executionPayload.asEngineExecutionPayload)
|
2022-10-14 19:48:56 +00:00
|
|
|
|
|
|
|
debug "newPayload: succeeded",
|
|
|
|
parentHash = executionPayload.parent_hash,
|
|
|
|
blockHash = executionPayload.block_hash,
|
|
|
|
blockNumber = executionPayload.block_number,
|
2022-10-26 21:14:11 +00:00
|
|
|
payloadStatus = $payloadStatus
|
2022-10-14 19:48:56 +00:00
|
|
|
|
|
|
|
return Opt.some payloadStatus
|
|
|
|
except CatchableError as err:
|
2023-01-04 15:51:14 +00:00
|
|
|
warn "newPayload failed - check execution client",
|
|
|
|
msg = err.msg,
|
|
|
|
parentHash = shortLog(executionPayload.parent_hash),
|
|
|
|
blockHash = shortLog(executionPayload.block_hash),
|
|
|
|
blockNumber = executionPayload.block_number
|
2022-10-14 19:48:56 +00:00
|
|
|
return Opt.none PayloadExecutionStatus
|
|
|
|
|
|
|
|
proc getExecutionValidity(
|
2023-03-05 01:40:21 +00:00
|
|
|
elManager: ELManager,
|
2023-01-11 18:21:19 +00:00
|
|
|
blck: bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock |
|
2023-03-09 00:34:17 +00:00
|
|
|
deneb.SignedBeaconBlock):
|
2022-10-14 19:48:56 +00:00
|
|
|
Future[NewPayloadStatus] {.async.} =
|
|
|
|
if not blck.message.is_execution_block:
|
|
|
|
return NewPayloadStatus.valid # vacuously
|
|
|
|
|
|
|
|
try:
|
2023-03-05 01:40:21 +00:00
|
|
|
let executionPayloadStatus = await elManager.newExecutionPayload(
|
|
|
|
blck.message.body.execution_payload)
|
2022-10-14 19:48:56 +00:00
|
|
|
if executionPayloadStatus.isNone:
|
|
|
|
return NewPayloadStatus.noResponse
|
|
|
|
|
|
|
|
case executionPayloadStatus.get
|
|
|
|
of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash:
|
2023-05-10 07:17:15 +00:00
|
|
|
# Blocks come either from gossip or request manager requests. In the
|
|
|
|
# former case, they've passed libp2p gosisp validation which implies
|
|
|
|
# correct signature for correct proposer,which makes spam expensive,
|
|
|
|
# while for the latter, spam is limited by the request manager.
|
|
|
|
info "execution payload invalid from EL client newPayload",
|
2022-10-14 19:48:56 +00:00
|
|
|
executionPayloadStatus = $executionPayloadStatus.get,
|
2023-05-10 07:17:15 +00:00
|
|
|
executionPayload = shortLog(blck.message.body.execution_payload),
|
2022-10-14 19:48:56 +00:00
|
|
|
blck = shortLog(blck)
|
|
|
|
return NewPayloadStatus.invalid
|
|
|
|
of PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted:
|
|
|
|
return NewPayloadStatus.notValid
|
|
|
|
of PayloadExecutionStatus.valid:
|
|
|
|
return NewPayloadStatus.valid
|
|
|
|
except CatchableError as err:
|
2023-05-10 07:17:15 +00:00
|
|
|
error "getExecutionValidity: newPayload failed",
|
|
|
|
err = err.msg,
|
|
|
|
executionPayload = shortLog(blck.message.body.execution_payload),
|
|
|
|
blck = shortLog(blck)
|
2022-10-14 19:48:56 +00:00
|
|
|
return NewPayloadStatus.noResponse
|
2022-07-04 20:35:33 +00:00
|
|
|
|
2023-04-16 08:37:56 +00:00
|
|
|
proc checkBloblessSignature(self: BlockProcessor,
|
|
|
|
signed_beacon_block: deneb.SignedBeaconBlock):
|
|
|
|
Result[void, cstring] =
|
|
|
|
let dag = self.consensusManager.dag
|
|
|
|
let parent = dag.getBlockRef(signed_beacon_block.message.parent_root).valueOr:
|
|
|
|
return err("checkBloblessSignature called with orphan block")
|
|
|
|
let proposer = getProposer(
|
|
|
|
dag, parent, signed_beacon_block.message.slot).valueOr:
|
|
|
|
return err("checkBloblessSignature: Cannot compute proposer")
|
|
|
|
if uint64(proposer) != signed_beacon_block.message.proposer_index:
|
|
|
|
return err("checkBloblessSignature: Incorrect proposer")
|
|
|
|
if not verify_block_signature(
|
|
|
|
dag.forkAtEpoch(signed_beacon_block.message.slot.epoch),
|
|
|
|
getStateField(dag.headState, genesis_validators_root),
|
|
|
|
signed_beacon_block.message.slot,
|
|
|
|
signed_beacon_block.root,
|
|
|
|
dag.validatorKey(proposer).get(),
|
|
|
|
signed_beacon_block.signature):
|
|
|
|
return err("checkBloblessSignature: Invalid proposer signature")
|
|
|
|
|
2021-10-19 15:20:55 +00:00
|
|
|
proc storeBlock*(
|
2022-10-14 19:48:56 +00:00
|
|
|
self: ref BlockProcessor, src: MsgSource, wallTime: BeaconTime,
|
2023-01-09 18:42:10 +00:00
|
|
|
signedBlock: ForkySignedBeaconBlock,
|
2023-02-28 11:36:17 +00:00
|
|
|
blobs: BlobSidecars,
|
2023-02-06 07:22:08 +00:00
|
|
|
maybeFinalized = false,
|
2023-01-09 18:42:10 +00:00
|
|
|
queueTick: Moment = Moment.now(), validationDur = Duration()):
|
2022-11-10 17:40:27 +00:00
|
|
|
Future[Result[BlockRef, (VerifierError, ProcessingStatus)]] {.async.} =
|
2021-12-20 19:20:31 +00:00
|
|
|
## storeBlock is the main entry point for unvalidated blocks - all untrusted
|
|
|
|
## blocks, regardless of origin, pass through here. When storing a block,
|
|
|
|
## we will add it to the dag and pass it to all block consumers that need
|
|
|
|
## to know about it, such as the fork choice and the monitoring
|
2021-03-11 10:10:57 +00:00
|
|
|
let
|
|
|
|
attestationPool = self.consensusManager.attestationPool
|
2021-12-02 18:34:12 +00:00
|
|
|
startTick = Moment.now()
|
2021-12-20 19:20:31 +00:00
|
|
|
vm = self.validatorMonitor
|
2021-12-06 09:49:01 +00:00
|
|
|
dag = self.consensusManager.dag
|
2023-02-06 07:22:08 +00:00
|
|
|
wallSlot = wallTime.slotOrZero
|
2022-10-14 19:48:56 +00:00
|
|
|
payloadStatus =
|
2023-02-06 07:22:08 +00:00
|
|
|
if maybeFinalized and
|
|
|
|
(self.lastPayload + SLOTS_PER_PAYLOAD) > signedBlock.message.slot and
|
2023-04-09 14:58:20 +00:00
|
|
|
(signedBlock.message.slot + PAYLOAD_PRE_WALL_SLOTS) < wallSlot and
|
|
|
|
signedBlock.message.is_execution_block:
|
2023-02-06 07:22:08 +00:00
|
|
|
# Skip payload validation when message source (reasonably) claims block
|
|
|
|
# has been finalized - this speeds up forward sync - in the worst case
|
|
|
|
# that the claim is false, we will correct every time we process a block
|
|
|
|
# from an honest source (or when we're close to head).
|
|
|
|
# Occasionally we also send a payload to the the EL so that it can
|
|
|
|
# progress in its own sync.
|
|
|
|
NewPayloadStatus.noResponse
|
2023-01-04 15:51:14 +00:00
|
|
|
else:
|
2023-02-06 07:22:08 +00:00
|
|
|
when typeof(signedBlock).toFork() >= ConsensusFork.Bellatrix:
|
2023-03-05 01:40:21 +00:00
|
|
|
await self.consensusManager.elManager.getExecutionValidity(signedBlock)
|
2023-02-06 07:22:08 +00:00
|
|
|
else:
|
|
|
|
NewPayloadStatus.valid # vacuously
|
2022-10-14 19:48:56 +00:00
|
|
|
payloadValid = payloadStatus == NewPayloadStatus.valid
|
2021-12-06 09:49:01 +00:00
|
|
|
|
|
|
|
# The block is certainly not missing any more
|
|
|
|
self.consensusManager.quarantine[].missing.del(signedBlock.root)
|
|
|
|
|
2022-10-14 19:48:56 +00:00
|
|
|
if NewPayloadStatus.invalid == payloadStatus:
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
2022-11-10 17:40:27 +00:00
|
|
|
return err((VerifierError.UnviableFork, ProcessingStatus.completed))
|
2022-10-26 20:44:45 +00:00
|
|
|
|
2022-12-20 08:24:33 +00:00
|
|
|
if NewPayloadStatus.noResponse == payloadStatus:
|
2023-01-04 15:51:14 +00:00
|
|
|
# When the execution layer is not available to verify the payload, we do the
|
|
|
|
# required check on the CL side instead and proceed as if the EL was syncing
|
2022-12-20 08:24:33 +00:00
|
|
|
|
2023-04-17 20:11:28 +00:00
|
|
|
# Client software MUST validate `blockHash` value as being equivalent to
|
|
|
|
# `Keccak256(RLP(ExecutionBlockHeader))`
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification
|
2023-01-28 19:53:41 +00:00
|
|
|
when typeof(signedBlock).toFork() >= ConsensusFork.Bellatrix:
|
2022-12-20 08:24:33 +00:00
|
|
|
template payload(): auto = signedBlock.message.body.execution_payload
|
2023-03-02 00:11:46 +00:00
|
|
|
if signedBlock.message.is_execution_block and
|
|
|
|
payload.block_hash != payload.compute_execution_block_hash():
|
2023-01-24 13:19:38 +00:00
|
|
|
debug "Execution block hash validation failed",
|
|
|
|
execution_payload = shortLog(payload)
|
2022-12-20 08:24:33 +00:00
|
|
|
doAssert strictVerification notin dag.updateFlags
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
2023-01-04 15:51:14 +00:00
|
|
|
return err((VerifierError.Invalid, ProcessingStatus.completed))
|
2022-12-20 08:24:33 +00:00
|
|
|
else:
|
|
|
|
discard
|
2022-10-14 19:48:56 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
# We'll also remove the block as an orphan: it's unlikely the parent is
|
|
|
|
# missing if we get this far - should that be the case, the block will
|
|
|
|
# be re-added later
|
|
|
|
self.consensusManager.quarantine[].removeOrphan(signedBlock)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2023-01-09 18:42:10 +00:00
|
|
|
# Establish blob viability before calling addHeadBlock to avoid
|
|
|
|
# writing the block in case of blob error.
|
2023-03-04 13:35:39 +00:00
|
|
|
when typeof(signedBlock).toFork() >= ConsensusFork.Deneb:
|
2023-02-28 11:36:17 +00:00
|
|
|
if blobs.len > 0:
|
|
|
|
discard
|
|
|
|
# TODO: validate blobs
|
2023-01-09 18:42:10 +00:00
|
|
|
|
2021-07-15 19:01:07 +00:00
|
|
|
type Trusted = typeof signedBlock.asTrusted()
|
2022-07-04 20:35:33 +00:00
|
|
|
let blck = dag.addHeadBlock(self.verifier, signedBlock, payloadValid) do (
|
2022-07-06 10:33:02 +00:00
|
|
|
blckRef: BlockRef, trustedBlock: Trusted,
|
|
|
|
epochRef: EpochRef, unrealized: FinalityCheckpoints):
|
2021-03-11 10:10:57 +00:00
|
|
|
# Callback add to fork choice if valid
|
|
|
|
attestationPool[].addForkChoice(
|
2022-07-06 10:33:02 +00:00
|
|
|
epochRef, blckRef, unrealized, trustedBlock.message, wallTime)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
vm[].registerBeaconBlock(
|
|
|
|
src, wallTime, trustedBlock.message)
|
|
|
|
|
|
|
|
for attestation in trustedBlock.message.body.attestations:
|
2022-07-06 10:33:02 +00:00
|
|
|
for validator_index in dag.get_attesting_indices(attestation):
|
2022-01-08 23:28:49 +00:00
|
|
|
vm[].registerAttestationInBlock(attestation.data, validator_index,
|
2022-04-06 09:23:01 +00:00
|
|
|
trustedBlock.message.slot)
|
2021-12-20 19:20:31 +00:00
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(dag[].clearanceState):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Altair and
|
2021-12-20 19:20:31 +00:00
|
|
|
Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+
|
|
|
|
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():
|
|
|
|
vm[].registerSyncAggregateInBlock(
|
|
|
|
trustedBlock.message.slot, trustedBlock.root,
|
2022-09-10 06:12:07 +00:00
|
|
|
forkyState.data.current_sync_committee.pubkeys.data[i])
|
2021-12-20 19:20:31 +00:00
|
|
|
|
2022-10-14 19:48:56 +00:00
|
|
|
self[].dumpBlock(signedBlock, blck)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# There can be a scenario where we receive a block we already received.
|
|
|
|
# However this block was before the last finalized epoch and so its parent
|
|
|
|
# was pruned from the ForkChoice.
|
2021-12-06 09:49:01 +00:00
|
|
|
if blck.isErr():
|
2022-01-26 12:20:08 +00:00
|
|
|
case blck.error()
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.MissingParent:
|
2022-01-26 12:20:08 +00:00
|
|
|
if signedBlock.message.parent_root in
|
|
|
|
self.consensusManager.quarantine[].unviable:
|
|
|
|
# DAG doesn't know about unviable ancestor blocks - we do! Translate
|
|
|
|
# this to the appropriate error so that sync etc doesn't retry the block
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
|
2022-11-10 17:40:27 +00:00
|
|
|
return err((VerifierError.UnviableFork, ProcessingStatus.completed))
|
2022-01-26 12:20:08 +00:00
|
|
|
|
|
|
|
if not self.consensusManager.quarantine[].addOrphan(
|
2023-02-27 06:10:22 +00:00
|
|
|
dag.finalizedHead.slot, ForkedSignedBeaconBlock.init(signedBlock)):
|
2021-12-06 09:49:01 +00:00
|
|
|
debug "Block quarantine full",
|
|
|
|
blockRoot = shortLog(signedBlock.root),
|
|
|
|
blck = shortLog(signedBlock.message),
|
|
|
|
signature = shortLog(signedBlock.signature)
|
2022-11-10 17:40:27 +00:00
|
|
|
of VerifierError.UnviableFork:
|
2022-01-26 12:20:08 +00:00
|
|
|
# Track unviables so that descendants can be discarded properly
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
else: discard
|
2021-12-06 09:49:01 +00:00
|
|
|
|
2022-11-10 17:40:27 +00:00
|
|
|
return err((blck.error, ProcessingStatus.completed))
|
2021-12-02 18:34:12 +00:00
|
|
|
|
2023-02-06 07:22:08 +00:00
|
|
|
if payloadStatus in {NewPayloadStatus.valid, NewPayloadStatus.notValid}:
|
|
|
|
# If the EL responded at all, we don't need to try again for a while
|
|
|
|
self[].lastPayload = signedBlock.message.slot
|
|
|
|
|
2023-04-25 10:55:35 +00:00
|
|
|
# write blobs now that block has been written.
|
|
|
|
for b in blobs:
|
|
|
|
self.consensusManager.dag.db.putBlobSidecar(b[])
|
2023-01-09 18:42:10 +00:00
|
|
|
|
2021-12-02 18:34:12 +00:00
|
|
|
let storeBlockTick = Moment.now()
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
# Eagerly update head: the incoming block "should" get selected.
|
|
|
|
#
|
|
|
|
# storeBlock gets called from validator_duties, which depends on its not
|
|
|
|
# blocking progress any longer than necessary, and processBlock here, in
|
|
|
|
# which case it's fine to await for a while on engine API results.
|
2022-07-21 18:26:36 +00:00
|
|
|
#
|
|
|
|
# Three general scenarios: (1) pre-merge; (2) merge, already `VALID` by way
|
|
|
|
# of `newPayload`; (3) optimistically imported, need to call fcU before DAG
|
2023-02-13 11:13:52 +00:00
|
|
|
# updateHead. Because in a non-finalizing network, completing sync isn't as
|
|
|
|
# useful because regular reorgs likely still occur, and when finalizing the
|
|
|
|
# EL is only called every SLOTS_PER_PAYLOAD slots regardless, await, rather
|
|
|
|
# than asyncSpawn forkchoiceUpdated calls.
|
|
|
|
#
|
|
|
|
# This reduces in-flight fcU spam, which both reduces EL load and decreases
|
|
|
|
# otherwise somewhat unpredictable CL head movement.
|
2022-07-21 18:26:36 +00:00
|
|
|
|
|
|
|
if payloadValid:
|
2023-04-16 01:25:17 +00:00
|
|
|
dag.markBlockVerified(self.consensusManager.quarantine[], signedBlock.root)
|
2022-07-21 18:26:36 +00:00
|
|
|
|
|
|
|
# Grab the new head according to our latest attestation data; determines how
|
|
|
|
# async this needs to be.
|
2023-02-14 20:41:49 +00:00
|
|
|
let newHead = attestationPool[].selectOptimisticHead(
|
|
|
|
wallSlot.start_beacon_time)
|
2022-07-21 18:26:36 +00:00
|
|
|
|
|
|
|
if newHead.isOk:
|
2023-03-05 01:40:21 +00:00
|
|
|
template elManager(): auto = self.consensusManager.elManager
|
2022-08-29 12:16:35 +00:00
|
|
|
if self.consensusManager[].shouldSyncOptimistically(wallSlot):
|
|
|
|
# Optimistic head is far in the future; report it as head block to EL.
|
|
|
|
|
|
|
|
# Note that the specification allows an EL client to skip fcU processing
|
|
|
|
# if an update to an ancestor is requested.
|
|
|
|
# > Client software MAY skip an update of the forkchoice state and MUST
|
|
|
|
# NOT begin a payload build process if `forkchoiceState.headBlockHash`
|
|
|
|
# references an ancestor of the head of canonical chain.
|
2023-04-17 14:17:52 +00:00
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification-1
|
2022-08-29 12:16:35 +00:00
|
|
|
#
|
|
|
|
# However, in practice, an EL client may not have completed importing all
|
|
|
|
# block headers, so may be unaware of a block's ancestor status.
|
|
|
|
# Therefore, hopping back and forth between the optimistic head and the
|
|
|
|
# chain DAG head does not work well in practice, e.g., Geth:
|
|
|
|
# - "Beacon chain gapped" from DAG head to optimistic head,
|
|
|
|
# - followed by "Beacon chain reorged" from optimistic head back to DAG.
|
2022-08-25 23:34:02 +00:00
|
|
|
self.consensusManager[].updateHead(newHead.get.blck)
|
2023-04-17 14:17:52 +00:00
|
|
|
|
|
|
|
template callForkchoiceUpdated(attributes: untyped) =
|
|
|
|
discard await elManager.forkchoiceUpdated(
|
|
|
|
headBlockHash = self.consensusManager[].optimisticExecutionPayloadHash,
|
|
|
|
safeBlockHash = newHead.get.safeExecutionPayloadHash,
|
|
|
|
finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash,
|
|
|
|
payloadAttributes = none attributes)
|
|
|
|
|
|
|
|
case self.consensusManager.dag.cfg.consensusForkAtEpoch(
|
|
|
|
newHead.get.blck.bid.slot.epoch)
|
|
|
|
of ConsensusFork.Capella, ConsensusFork.Deneb:
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#specification-1
|
|
|
|
# Consensus layer client MUST call this method instead of
|
|
|
|
# `engine_forkchoiceUpdatedV1` under any of the following conditions:
|
|
|
|
# `headBlockHash` references a block which `timestamp` is greater or
|
|
|
|
# equal to the Shanghai timestamp
|
|
|
|
callForkchoiceUpdated(PayloadAttributesV2)
|
|
|
|
of ConsensusFork.Bellatrix:
|
|
|
|
callForkchoiceUpdated(PayloadAttributesV1)
|
|
|
|
of ConsensusFork.Phase0, ConsensusFork.Altair:
|
|
|
|
discard
|
2022-07-21 18:26:36 +00:00
|
|
|
else:
|
2022-09-07 18:34:52 +00:00
|
|
|
let
|
|
|
|
headExecutionPayloadHash =
|
2023-04-16 01:25:17 +00:00
|
|
|
dag.loadExecutionBlockHash(newHead.get.blck)
|
2022-09-07 18:34:52 +00:00
|
|
|
wallSlot = self.getBeaconTime().slotOrZero
|
2023-02-14 20:41:49 +00:00
|
|
|
if headExecutionPayloadHash.isZero or
|
|
|
|
NewPayloadStatus.noResponse == payloadStatus:
|
|
|
|
# Blocks without execution payloads can't be optimistic, and don't try
|
|
|
|
# to fcU to a block the EL hasn't seen
|
2022-08-29 12:16:35 +00:00
|
|
|
self.consensusManager[].updateHead(newHead.get.blck)
|
2023-04-16 01:25:17 +00:00
|
|
|
elif not dag.is_optimistic newHead.get.blck.root:
|
2022-08-29 12:16:35 +00:00
|
|
|
# Not `NOT_VALID`; either `VALID` or `INVALIDATED`, but latter wouldn't
|
|
|
|
# be selected as head, so `VALID`. `forkchoiceUpdated` necessary for EL
|
|
|
|
# client only.
|
|
|
|
self.consensusManager[].updateHead(newHead.get.blck)
|
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
if self.consensusManager.checkNextProposer(wallSlot).isNone:
|
|
|
|
# No attached validator is next proposer, so use non-proposal fcU
|
2023-04-17 14:17:52 +00:00
|
|
|
|
|
|
|
template callForkchoiceUpdated(payloadAttributeType: untyped): auto =
|
|
|
|
await elManager.expectValidForkchoiceUpdated(
|
|
|
|
headBlockPayloadAttributesType = payloadAttributeType,
|
|
|
|
headBlockHash = headExecutionPayloadHash,
|
|
|
|
safeBlockHash = newHead.get.safeExecutionPayloadHash,
|
|
|
|
finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash,
|
|
|
|
receivedBlock = signedBlock)
|
|
|
|
|
|
|
|
case self.consensusManager.dag.cfg.consensusForkAtEpoch(
|
|
|
|
newHead.get.blck.bid.slot.epoch)
|
|
|
|
of ConsensusFork.Capella, ConsensusFork.Deneb:
|
|
|
|
callForkchoiceUpdated(payloadAttributeType = PayloadAttributesV2)
|
|
|
|
of ConsensusFork.Phase0, ConsensusFork.Altair,
|
|
|
|
ConsensusFork.Bellatrix:
|
|
|
|
callForkchoiceUpdated(payloadAttributeType = PayloadAttributesV1)
|
2022-09-07 18:34:52 +00:00
|
|
|
else:
|
|
|
|
# Some attached validator is next proposer, so prepare payload. As
|
|
|
|
# updateHead() updated the DAG head, runProposalForkchoiceUpdated,
|
|
|
|
# which needs the state corresponding to that head block, can run.
|
2023-02-13 11:13:52 +00:00
|
|
|
await self.consensusManager.runProposalForkchoiceUpdated(
|
2022-09-07 18:34:52 +00:00
|
|
|
wallSlot)
|
2022-08-29 12:16:35 +00:00
|
|
|
else:
|
2023-02-13 11:13:52 +00:00
|
|
|
await self.consensusManager.updateHeadWithExecution(
|
2022-09-07 20:54:37 +00:00
|
|
|
newHead.get, self.getBeaconTime)
|
2022-07-04 20:35:33 +00:00
|
|
|
else:
|
2022-07-21 18:26:36 +00:00
|
|
|
warn "Head selection failed, using previous head",
|
2023-04-16 01:25:17 +00:00
|
|
|
head = shortLog(dag.head), wallSlot
|
2021-12-02 18:34:12 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
updateHeadTick = Moment.now()
|
|
|
|
queueDur = startTick - queueTick
|
|
|
|
storeBlockDur = storeBlockTick - startTick
|
|
|
|
updateHeadDur = updateHeadTick - storeBlockTick
|
|
|
|
|
|
|
|
beacon_store_block_duration_seconds.observe(storeBlockDur.toFloatSeconds())
|
|
|
|
|
|
|
|
debug "Block processed",
|
2023-04-16 01:25:17 +00:00
|
|
|
localHeadSlot = dag.head.slot,
|
2021-12-02 18:34:12 +00:00
|
|
|
blockSlot = blck.get().slot,
|
|
|
|
validationDur, queueDur, storeBlockDur, updateHeadDur
|
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
for quarantined in self.consensusManager.quarantine[].pop(blck.get().root):
|
|
|
|
# Process the blocks that had the newly accepted block as parent
|
2023-04-13 19:11:40 +00:00
|
|
|
withBlck(quarantined):
|
|
|
|
when typeof(blck).toFork() < ConsensusFork.Deneb:
|
|
|
|
self[].addBlock(MsgSource.gossip, quarantined, BlobSidecars @[])
|
|
|
|
else:
|
|
|
|
if len(blck.message.body.blob_kzg_commitments) == 0:
|
|
|
|
self[].addBlock(MsgSource.gossip, quarantined, BlobSidecars @[])
|
|
|
|
else:
|
2023-04-16 08:37:56 +00:00
|
|
|
if (let res = checkBloblessSignature(self[], blck); res.isErr):
|
|
|
|
warn "Failed to verify signature of unorphaned blobless block",
|
|
|
|
blck = shortLog(blck),
|
|
|
|
error = res.error()
|
|
|
|
continue
|
2023-04-13 19:11:40 +00:00
|
|
|
if self.blobQuarantine[].hasBlobs(blck):
|
|
|
|
let blobs = self.blobQuarantine[].popBlobs(blck.root)
|
|
|
|
self[].addBlock(MsgSource.gossip, quarantined, blobs)
|
|
|
|
else:
|
|
|
|
if not self.consensusManager.quarantine[].addBlobless(
|
|
|
|
dag.finalizedHead.slot, blck):
|
|
|
|
notice "Block quarantine full (blobless)",
|
|
|
|
blockRoot = shortLog(quarantined.root),
|
|
|
|
signature = shortLog(quarantined.signature)
|
2021-12-06 09:49:01 +00:00
|
|
|
|
2022-11-10 17:40:27 +00:00
|
|
|
return Result[BlockRef, (VerifierError, ProcessingStatus)].ok blck.get
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
# Enqueue
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc addBlock*(
|
|
|
|
self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock,
|
2023-02-28 11:36:17 +00:00
|
|
|
blobs: BlobSidecars,
|
2023-02-06 07:22:08 +00:00
|
|
|
resfut: Future[Result[void, VerifierError]] = nil,
|
|
|
|
maybeFinalized = false,
|
2022-01-26 12:20:08 +00:00
|
|
|
validationDur = Duration()) =
|
|
|
|
## Enqueue a Gossip-validated block for consensus verification
|
|
|
|
# Backpressure:
|
|
|
|
# There is no backpressure here - producers must wait for `resfut` to
|
|
|
|
# constrain their own processing
|
|
|
|
# Producers:
|
|
|
|
# - Gossip (when synced)
|
|
|
|
# - SyncManager (during sync)
|
|
|
|
# - RequestManager (missing ancestor blocks)
|
|
|
|
|
|
|
|
withBlck(blck):
|
|
|
|
if blck.message.slot <= self.consensusManager.dag.finalizedHead.slot:
|
|
|
|
# let backfill blocks skip the queue - these are always "fast" to process
|
|
|
|
# because there are no state rewinds to deal with
|
2023-01-09 18:42:10 +00:00
|
|
|
let res = self.storeBackfillBlock(blck, blobs)
|
2022-04-08 16:22:49 +00:00
|
|
|
if resfut != nil:
|
|
|
|
resfut.complete(res)
|
2022-01-26 12:20:08 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.blockQueue.addLastNoWait(BlockEntry(
|
|
|
|
blck: blck,
|
2023-01-09 18:42:10 +00:00
|
|
|
blobs: blobs,
|
2023-02-06 07:22:08 +00:00
|
|
|
maybeFinalized: maybeFinalized,
|
2022-01-26 12:20:08 +00:00
|
|
|
resfut: resfut, queueTick: Moment.now(),
|
|
|
|
validationDur: validationDur,
|
|
|
|
src: src))
|
|
|
|
except AsyncQueueFullError:
|
|
|
|
raiseAssert "unbounded queue"
|
|
|
|
|
2023-04-26 07:00:03 +00:00
|
|
|
# Dedup
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
func checkDuplicateBlocks(self: ref BlockProcessor, entry: BlockEntry): bool =
|
|
|
|
let key = (entry.blck.root, entry.blck.signature)
|
|
|
|
if self.dupBlckBuf.contains key:
|
|
|
|
return true
|
|
|
|
doAssert self.dupBlckBuf.len <= MAX_DEDUP_QUEUE_LEN
|
|
|
|
if self.dupBlckBuf.len >= MAX_DEDUP_QUEUE_LEN:
|
|
|
|
self.dupBlckBuf.shrink(fromFirst = 1)
|
|
|
|
self.dupBlckBuf.addLast key
|
|
|
|
false
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Event Loop
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
proc processBlock(
|
2022-10-14 19:48:56 +00:00
|
|
|
self: ref BlockProcessor, entry: BlockEntry) {.async.} =
|
2021-03-11 10:10:57 +00:00
|
|
|
logScope:
|
2021-05-28 16:34:00 +00:00
|
|
|
blockRoot = shortLog(entry.blck.root)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
let
|
2021-08-19 10:45:31 +00:00
|
|
|
wallTime = self.getBeaconTime()
|
2021-03-11 10:10:57 +00:00
|
|
|
(afterGenesis, wallSlot) = wallTime.toSlot()
|
|
|
|
|
|
|
|
if not afterGenesis:
|
|
|
|
error "Processing block before genesis, clock turned back?"
|
|
|
|
quit 1
|
|
|
|
|
2023-04-26 07:00:03 +00:00
|
|
|
if self.checkDuplicateBlocks(entry):
|
|
|
|
if entry.resfut != nil:
|
|
|
|
entry.resfut.complete(Result[void, VerifierError].err(
|
|
|
|
VerifierError.Duplicate))
|
|
|
|
return
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
let res = withBlck(entry.blck):
|
2022-10-14 19:48:56 +00:00
|
|
|
await self.storeBlock(
|
2023-02-06 07:22:08 +00:00
|
|
|
entry.src, wallTime, blck, entry.blobs, entry.maybeFinalized,
|
|
|
|
entry.queueTick, entry.validationDur)
|
2022-10-14 19:48:56 +00:00
|
|
|
|
2022-11-10 17:40:27 +00:00
|
|
|
if res.isErr and res.error[1] == ProcessingStatus.notCompleted:
|
2022-10-14 19:48:56 +00:00
|
|
|
# When an execution engine returns an error or fails to respond to a
|
|
|
|
# payload validity request for some block, a consensus engine:
|
|
|
|
# - MUST NOT optimistically import the block.
|
|
|
|
# - MUST NOT apply the block to the fork choice store.
|
|
|
|
# - MAY queue the block for later processing.
|
2023-05-05 20:48:33 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/sync/optimistic.md#execution-engine-errors
|
2022-10-14 19:48:56 +00:00
|
|
|
await sleepAsync(chronos.seconds(1))
|
|
|
|
self[].addBlock(
|
2023-02-06 07:22:08 +00:00
|
|
|
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,
|
|
|
|
entry.validationDur)
|
2022-10-14 19:48:56 +00:00
|
|
|
# To ensure backpressure on the sync manager, do not complete these futures.
|
|
|
|
return
|
2021-05-28 16:34:00 +00:00
|
|
|
|
2021-12-16 14:57:16 +00:00
|
|
|
if entry.resfut != nil:
|
|
|
|
entry.resfut.complete(
|
2022-11-10 17:40:27 +00:00
|
|
|
if res.isOk(): Result[void, VerifierError].ok()
|
|
|
|
else: Result[void, VerifierError].err(res.error()[0]))
|
2022-03-25 11:40:10 +00:00
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
proc runQueueProcessingLoop*(self: ref BlockProcessor) {.async.} =
|
2021-03-11 10:10:57 +00:00
|
|
|
while true:
|
2021-04-26 20:39:44 +00:00
|
|
|
# Cooperative concurrency: one block per loop iteration - because
|
2021-03-11 10:10:57 +00:00
|
|
|
# we run both networking and CPU-heavy things like block processing
|
|
|
|
# on the same thread, we need to make sure that there is steady progress
|
|
|
|
# on the networking side or we get long lockups that lead to timeouts.
|
|
|
|
const
|
|
|
|
# We cap waiting for an idle slot in case there's a lot of network traffic
|
|
|
|
# taking up all CPU - we don't want to _completely_ stop processing blocks
|
2021-04-26 20:39:44 +00:00
|
|
|
# in this case - doing so also allows us to benefit from more batching /
|
|
|
|
# larger network reads when under load.
|
2021-03-11 10:10:57 +00:00
|
|
|
idleTimeout = 10.milliseconds
|
|
|
|
|
|
|
|
discard await idleAsync().withTimeout(idleTimeout)
|
|
|
|
|
2022-10-14 19:48:56 +00:00
|
|
|
await self.processBlock(await self[].blockQueue.popFirst())
|