2021-03-11 10:10:57 +00:00
|
|
|
# beacon_chain
|
2022-01-18 13:36:52 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2021-03-11 10:10:57 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
import
|
|
|
|
stew/results,
|
|
|
|
chronicles, chronos, metrics,
|
2022-07-04 20:35:33 +00:00
|
|
|
../spec/signatures_batch,
|
2021-08-18 18:57:58 +00:00
|
|
|
../sszdump
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-07-13 14:13:54 +00:00
|
|
|
from ../consensus_object_pools/consensus_manager import
|
2022-07-04 20:35:33 +00:00
|
|
|
ConsensusManager, updateHead, updateHeadWithExecution
|
|
|
|
from ../beacon_clock import GetBeaconTimeFn, toFloatSeconds
|
|
|
|
from ../consensus_object_pools/block_dag import BlockRef, root, slot
|
|
|
|
from ../consensus_object_pools/block_pools_types import BlockError, EpochRef
|
|
|
|
from ../consensus_object_pools/block_quarantine import
|
|
|
|
addOrphan, addUnviable, pop, removeOrphan
|
|
|
|
from ../validators/validator_monitor import
|
|
|
|
MsgSource, ValidatorMonitor, registerAttestationInBlock, registerBeaconBlock,
|
|
|
|
registerSyncAggregateInBlock
|
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
export sszdump, signatures_batch
|
2021-07-15 19:01:07 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
# Block Processor
|
2021-03-11 10:10:57 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2021-05-28 16:34:00 +00:00
|
|
|
# The block processor moves blocks from "Incoming" to "Consensus verified"
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
declareHistogram beacon_store_block_duration_seconds,
|
|
|
|
"storeBlock() duration", buckets = [0.25, 0.5, 1, 2, 4, 8, Inf]
|
|
|
|
|
|
|
|
type
|
|
|
|
BlockEntry* = object
|
2021-07-15 19:01:07 +00:00
|
|
|
blck*: ForkedSignedBeaconBlock
|
2021-05-28 16:34:00 +00:00
|
|
|
resfut*: Future[Result[void, BlockError]]
|
|
|
|
queueTick*: Moment # Moment when block was enqueued
|
|
|
|
validationDur*: Duration # Time it took to perform gossip validation
|
2021-12-20 19:20:31 +00:00
|
|
|
src*: MsgSource
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
BlockProcessor* = object
|
|
|
|
## This manages the processing of blocks from different sources
|
2021-03-11 10:10:57 +00:00
|
|
|
## Blocks and attestations are enqueued in a gossip-validated state
|
|
|
|
##
|
|
|
|
## from:
|
|
|
|
## - Gossip (when synced)
|
|
|
|
## - SyncManager (during sync)
|
|
|
|
## - RequestManager (missing ancestor blocks)
|
|
|
|
##
|
|
|
|
## are then consensus-verified and added to:
|
|
|
|
## - the blockchain DAG
|
|
|
|
## - database
|
|
|
|
## - attestation pool
|
|
|
|
## - fork choice
|
2021-12-06 09:49:01 +00:00
|
|
|
##
|
|
|
|
## The processor will also reinsert blocks from the quarantine, should a
|
|
|
|
## parent be found.
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Config
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
dumpEnabled: bool
|
|
|
|
dumpDirInvalid: string
|
|
|
|
dumpDirIncoming: string
|
2022-07-04 20:35:33 +00:00
|
|
|
safeSlotsToImportOptimistically: uint16
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Producers
|
|
|
|
# ----------------------------------------------------------------
|
2022-01-26 12:20:08 +00:00
|
|
|
blockQueue: AsyncQueue[BlockEntry]
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Consumer
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
consensusManager: ref ConsensusManager
|
|
|
|
## Blockchain DAG, AttestationPool and Quarantine
|
2022-03-25 11:40:10 +00:00
|
|
|
## Blockchain DAG, AttestationPool, Quarantine, and Eth1Manager
|
2022-01-26 12:20:08 +00:00
|
|
|
validatorMonitor: ref ValidatorMonitor
|
2021-08-19 10:45:31 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
verifier: BatchVerifier
|
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
proc addBlock*(
|
|
|
|
self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock,
|
|
|
|
resfut: Future[Result[void, BlockError]] = nil,
|
|
|
|
validationDur = Duration())
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Initialization
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
proc new*(T: type BlockProcessor,
|
2021-04-06 11:59:11 +00:00
|
|
|
dumpEnabled: bool,
|
|
|
|
dumpDirInvalid, dumpDirIncoming: string,
|
2022-06-21 08:29:16 +00:00
|
|
|
rng: ref HmacDrbgContext, taskpool: TaskPoolPtr,
|
2021-03-11 10:10:57 +00:00
|
|
|
consensusManager: ref ConsensusManager,
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor: ref ValidatorMonitor,
|
2022-07-04 20:35:33 +00:00
|
|
|
getBeaconTime: GetBeaconTimeFn,
|
|
|
|
safeSlotsToImportOptimistically: uint16): ref BlockProcessor =
|
2021-05-28 16:34:00 +00:00
|
|
|
(ref BlockProcessor)(
|
2021-04-06 11:59:11 +00:00
|
|
|
dumpEnabled: dumpEnabled,
|
|
|
|
dumpDirInvalid: dumpDirInvalid,
|
|
|
|
dumpDirIncoming: dumpDirIncoming,
|
2021-12-06 09:49:01 +00:00
|
|
|
blockQueue: newAsyncQueue[BlockEntry](),
|
2021-04-06 11:59:11 +00:00
|
|
|
consensusManager: consensusManager,
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor: validatorMonitor,
|
2021-12-06 09:49:01 +00:00
|
|
|
getBeaconTime: getBeaconTime,
|
2022-07-04 20:35:33 +00:00
|
|
|
safeSlotsToImportOptimistically: safeSlotsToImportOptimistically,
|
2021-12-06 09:49:01 +00:00
|
|
|
verifier: BatchVerifier(rng: rng, taskpool: taskpool)
|
|
|
|
)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# Sync callbacks
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func hasBlocks*(self: BlockProcessor): bool =
|
2021-12-06 09:49:01 +00:00
|
|
|
self.blockQueue.len() > 0
|
2021-04-26 20:39:44 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Storage
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
proc dumpInvalidBlock*(
|
2021-12-06 09:49:01 +00:00
|
|
|
self: BlockProcessor, signedBlock: ForkySignedBeaconBlock) =
|
2021-11-05 15:39:47 +00:00
|
|
|
if self.dumpEnabled:
|
|
|
|
dump(self.dumpDirInvalid, signedBlock)
|
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
proc dumpBlock[T](
|
2021-07-15 19:01:07 +00:00
|
|
|
self: BlockProcessor,
|
2021-11-05 07:34:34 +00:00
|
|
|
signedBlock: ForkySignedBeaconBlock,
|
2021-12-06 09:49:01 +00:00
|
|
|
res: Result[T, BlockError]) =
|
2021-03-11 10:10:57 +00:00
|
|
|
if self.dumpEnabled and res.isErr:
|
2021-12-06 09:49:01 +00:00
|
|
|
case res.error
|
|
|
|
of BlockError.Invalid:
|
2021-11-05 15:39:47 +00:00
|
|
|
self.dumpInvalidBlock(signedBlock)
|
2021-12-06 09:49:01 +00:00
|
|
|
of BlockError.MissingParent:
|
2021-11-05 15:39:47 +00:00
|
|
|
dump(self.dumpDirIncoming, signedBlock)
|
2021-03-11 10:10:57 +00:00
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../consensus_object_pools/block_clearance import
|
|
|
|
addBackfillBlock, addHeadBlock
|
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
proc storeBackfillBlock(
|
|
|
|
self: var BlockProcessor,
|
|
|
|
signedBlock: ForkySignedBeaconBlock): Result[void, BlockError] =
|
|
|
|
|
|
|
|
# The block is certainly not missing any more
|
|
|
|
self.consensusManager.quarantine[].missing.del(signedBlock.root)
|
|
|
|
|
|
|
|
let res = self.consensusManager.dag.addBackfillBlock(signedBlock)
|
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
case res.error
|
|
|
|
of BlockError.MissingParent:
|
|
|
|
if signedBlock.message.parent_root in
|
|
|
|
self.consensusManager.quarantine[].unviable:
|
|
|
|
# DAG doesn't know about unviable ancestor blocks - we do! Translate
|
|
|
|
# this to the appropriate error so that sync etc doesn't retry the block
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
|
|
|
|
return err(BlockError.UnviableFork)
|
|
|
|
of BlockError.UnviableFork:
|
|
|
|
# Track unviables so that descendants can be discarded properly
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
else: discard
|
|
|
|
|
|
|
|
res
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../consensus_object_pools/attestation_pool import addForkChoice
|
|
|
|
from ../consensus_object_pools/spec_cache import get_attesting_indices
|
|
|
|
from ../spec/datatypes/phase0 import TrustedSignedBeaconBlock
|
|
|
|
|
2021-10-19 15:20:55 +00:00
|
|
|
proc storeBlock*(
|
2022-07-07 16:57:52 +00:00
|
|
|
self: var BlockProcessor,
|
2021-12-20 19:20:31 +00:00
|
|
|
src: MsgSource, wallTime: BeaconTime,
|
2022-07-04 20:35:33 +00:00
|
|
|
signedBlock: ForkySignedBeaconBlock, payloadValid: bool,
|
|
|
|
queueTick: Moment = Moment.now(),
|
2022-07-07 16:57:52 +00:00
|
|
|
validationDur = Duration()): Result[BlockRef, BlockError] =
|
2021-12-20 19:20:31 +00:00
|
|
|
## storeBlock is the main entry point for unvalidated blocks - all untrusted
|
|
|
|
## blocks, regardless of origin, pass through here. When storing a block,
|
|
|
|
## we will add it to the dag and pass it to all block consumers that need
|
|
|
|
## to know about it, such as the fork choice and the monitoring
|
2021-03-11 10:10:57 +00:00
|
|
|
let
|
|
|
|
attestationPool = self.consensusManager.attestationPool
|
2021-12-02 18:34:12 +00:00
|
|
|
startTick = Moment.now()
|
2021-12-20 19:20:31 +00:00
|
|
|
vm = self.validatorMonitor
|
2021-12-06 09:49:01 +00:00
|
|
|
dag = self.consensusManager.dag
|
|
|
|
|
|
|
|
# The block is certainly not missing any more
|
|
|
|
self.consensusManager.quarantine[].missing.del(signedBlock.root)
|
|
|
|
|
|
|
|
# We'll also remove the block as an orphan: it's unlikely the parent is
|
|
|
|
# missing if we get this far - should that be the case, the block will
|
|
|
|
# be re-added later
|
|
|
|
self.consensusManager.quarantine[].removeOrphan(signedBlock)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2021-07-15 19:01:07 +00:00
|
|
|
type Trusted = typeof signedBlock.asTrusted()
|
2022-07-04 20:35:33 +00:00
|
|
|
let blck = dag.addHeadBlock(self.verifier, signedBlock, payloadValid) do (
|
2022-07-06 10:33:02 +00:00
|
|
|
blckRef: BlockRef, trustedBlock: Trusted,
|
|
|
|
epochRef: EpochRef, unrealized: FinalityCheckpoints):
|
2021-03-11 10:10:57 +00:00
|
|
|
# Callback add to fork choice if valid
|
|
|
|
attestationPool[].addForkChoice(
|
2022-07-06 10:33:02 +00:00
|
|
|
epochRef, blckRef, unrealized, trustedBlock.message, wallTime)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
vm[].registerBeaconBlock(
|
|
|
|
src, wallTime, trustedBlock.message)
|
|
|
|
|
|
|
|
for attestation in trustedBlock.message.body.attestations:
|
2022-07-06 10:33:02 +00:00
|
|
|
for validator_index in dag.get_attesting_indices(attestation):
|
2022-01-08 23:28:49 +00:00
|
|
|
vm[].registerAttestationInBlock(attestation.data, validator_index,
|
2022-04-06 09:23:01 +00:00
|
|
|
trustedBlock.message.slot)
|
2021-12-20 19:20:31 +00:00
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(dag[].clearanceState):
|
2021-12-20 19:20:31 +00:00
|
|
|
when stateFork >= BeaconStateFork.Altair and
|
|
|
|
Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+
|
|
|
|
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():
|
|
|
|
vm[].registerSyncAggregateInBlock(
|
|
|
|
trustedBlock.message.slot, trustedBlock.root,
|
|
|
|
state.data.current_sync_committee.pubkeys.data[i])
|
|
|
|
|
2022-07-07 16:57:52 +00:00
|
|
|
self.dumpBlock(signedBlock, blck)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
# There can be a scenario where we receive a block we already received.
|
|
|
|
# However this block was before the last finalized epoch and so its parent
|
|
|
|
# was pruned from the ForkChoice.
|
2021-12-06 09:49:01 +00:00
|
|
|
if blck.isErr():
|
2022-01-26 12:20:08 +00:00
|
|
|
case blck.error()
|
|
|
|
of BlockError.MissingParent:
|
|
|
|
if signedBlock.message.parent_root in
|
|
|
|
self.consensusManager.quarantine[].unviable:
|
|
|
|
# DAG doesn't know about unviable ancestor blocks - we do! Translate
|
|
|
|
# this to the appropriate error so that sync etc doesn't retry the block
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
|
|
|
|
return err(BlockError.UnviableFork)
|
|
|
|
|
|
|
|
if not self.consensusManager.quarantine[].addOrphan(
|
|
|
|
dag.finalizedHead.slot, ForkedSignedBeaconBlock.init(signedBlock)):
|
2021-12-06 09:49:01 +00:00
|
|
|
debug "Block quarantine full",
|
|
|
|
blockRoot = shortLog(signedBlock.root),
|
|
|
|
blck = shortLog(signedBlock.message),
|
|
|
|
signature = shortLog(signedBlock.signature)
|
2022-01-26 12:20:08 +00:00
|
|
|
of BlockError.UnviableFork:
|
|
|
|
# Track unviables so that descendants can be discarded properly
|
|
|
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
|
|
|
else: discard
|
2021-12-06 09:49:01 +00:00
|
|
|
|
|
|
|
return blck
|
2021-12-02 18:34:12 +00:00
|
|
|
|
|
|
|
let storeBlockTick = Moment.now()
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
# Eagerly update head: the incoming block "should" get selected.
|
|
|
|
#
|
|
|
|
# storeBlock gets called from validator_duties, which depends on its not
|
|
|
|
# blocking progress any longer than necessary, and processBlock here, in
|
|
|
|
# which case it's fine to await for a while on engine API results.
|
|
|
|
if not is_execution_block(signedBlock.message):
|
|
|
|
self.consensusManager[].updateHead(wallTime.slotOrZero)
|
|
|
|
else:
|
|
|
|
# This primarily exists to ensure that by the time the DAG updateHead is
|
|
|
|
# called valid blocks have already been registered as verified. The head
|
|
|
|
# can lag a slot behind wall clock, complicating detecting synced status
|
|
|
|
# for validating, otherwise.
|
2022-07-07 16:57:52 +00:00
|
|
|
asyncSpawn self.consensusManager.updateHeadWithExecution(
|
|
|
|
wallTime.slotOrZero)
|
2021-12-02 18:34:12 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
updateHeadTick = Moment.now()
|
|
|
|
queueDur = startTick - queueTick
|
|
|
|
storeBlockDur = storeBlockTick - startTick
|
|
|
|
updateHeadDur = updateHeadTick - storeBlockTick
|
|
|
|
|
|
|
|
beacon_store_block_duration_seconds.observe(storeBlockDur.toFloatSeconds())
|
|
|
|
|
|
|
|
debug "Block processed",
|
|
|
|
localHeadSlot = self.consensusManager.dag.head.slot,
|
|
|
|
blockSlot = blck.get().slot,
|
|
|
|
validationDur, queueDur, storeBlockDur, updateHeadDur
|
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
for quarantined in self.consensusManager.quarantine[].pop(blck.get().root):
|
|
|
|
# Process the blocks that had the newly accepted block as parent
|
2022-07-07 16:57:52 +00:00
|
|
|
self.addBlock(MsgSource.gossip, quarantined)
|
2021-12-06 09:49:01 +00:00
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
return blck
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-01-26 12:20:08 +00:00
|
|
|
# Enqueue
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc addBlock*(
|
|
|
|
self: var BlockProcessor, src: MsgSource, blck: ForkedSignedBeaconBlock,
|
|
|
|
resfut: Future[Result[void, BlockError]] = nil,
|
|
|
|
validationDur = Duration()) =
|
|
|
|
## Enqueue a Gossip-validated block for consensus verification
|
|
|
|
# Backpressure:
|
|
|
|
# There is no backpressure here - producers must wait for `resfut` to
|
|
|
|
# constrain their own processing
|
|
|
|
# Producers:
|
|
|
|
# - Gossip (when synced)
|
|
|
|
# - SyncManager (during sync)
|
|
|
|
# - RequestManager (missing ancestor blocks)
|
|
|
|
|
|
|
|
withBlck(blck):
|
|
|
|
if blck.message.slot <= self.consensusManager.dag.finalizedHead.slot:
|
|
|
|
# let backfill blocks skip the queue - these are always "fast" to process
|
|
|
|
# because there are no state rewinds to deal with
|
|
|
|
let res = self.storeBackfillBlock(blck)
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
if resfut != nil:
|
|
|
|
resfut.complete(res)
|
2022-01-26 12:20:08 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.blockQueue.addLastNoWait(BlockEntry(
|
|
|
|
blck: blck,
|
|
|
|
resfut: resfut, queueTick: Moment.now(),
|
|
|
|
validationDur: validationDur,
|
|
|
|
src: src))
|
|
|
|
except AsyncQueueFullError:
|
|
|
|
raiseAssert "unbounded queue"
|
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
# Event Loop
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
proc processBlock(
|
2022-07-07 16:57:52 +00:00
|
|
|
self: var BlockProcessor, entry: BlockEntry, payloadValid: bool) =
|
2021-03-11 10:10:57 +00:00
|
|
|
logScope:
|
2021-05-28 16:34:00 +00:00
|
|
|
blockRoot = shortLog(entry.blck.root)
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
let
|
2021-08-19 10:45:31 +00:00
|
|
|
wallTime = self.getBeaconTime()
|
2021-03-11 10:10:57 +00:00
|
|
|
(afterGenesis, wallSlot) = wallTime.toSlot()
|
|
|
|
|
|
|
|
if not afterGenesis:
|
|
|
|
error "Processing block before genesis, clock turned back?"
|
|
|
|
quit 1
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
let res = withBlck(entry.blck):
|
2022-07-07 16:57:52 +00:00
|
|
|
self.storeBlock(
|
2022-07-04 20:35:33 +00:00
|
|
|
entry.src, wallTime, blck, payloadValid, entry.queueTick,
|
|
|
|
entry.validationDur)
|
2021-05-28 16:34:00 +00:00
|
|
|
|
2021-12-16 14:57:16 +00:00
|
|
|
if entry.resfut != nil:
|
|
|
|
entry.resfut.complete(
|
|
|
|
if res.isOk(): Result[void, BlockError].ok()
|
|
|
|
else: Result[void, BlockError].err(res.error()))
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from eth/async_utils import awaitWithTimeout
|
|
|
|
from web3/engine_api_types import PayloadExecutionStatus, PayloadStatusV1
|
|
|
|
from ../eth1/eth1_monitor import
|
|
|
|
Eth1Monitor, asEngineExecutionPayload, ensureDataProvider, newPayload
|
|
|
|
from ../spec/datatypes/bellatrix import ExecutionPayload, SignedBeaconBlock
|
2022-03-25 11:40:10 +00:00
|
|
|
|
2022-04-05 08:40:59 +00:00
|
|
|
proc newExecutionPayload*(
|
2022-03-25 11:40:10 +00:00
|
|
|
eth1Monitor: Eth1Monitor, executionPayload: bellatrix.ExecutionPayload):
|
|
|
|
Future[PayloadExecutionStatus] {.async.} =
|
2022-07-04 20:35:33 +00:00
|
|
|
if eth1Monitor.isNil:
|
|
|
|
warn "newPayload: attempting to process execution payload without an Eth1Monitor. Ensure --web3-url setting is correct."
|
|
|
|
return PayloadExecutionStatus.syncing
|
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
debug "newPayload: inserting block into execution engine",
|
|
|
|
parentHash = executionPayload.parent_hash,
|
|
|
|
blockHash = executionPayload.block_hash,
|
|
|
|
stateRoot = shortLog(executionPayload.state_root),
|
|
|
|
receiptsRoot = shortLog(executionPayload.receipts_root),
|
|
|
|
prevRandao = shortLog(executionPayload.prev_randao),
|
|
|
|
blockNumber = executionPayload.block_number,
|
|
|
|
gasLimit = executionPayload.gas_limit,
|
|
|
|
gasUsed = executionPayload.gas_used,
|
|
|
|
timestamp = executionPayload.timestamp,
|
|
|
|
extraDataLen = executionPayload.extra_data.len,
|
2022-07-04 20:35:33 +00:00
|
|
|
baseFeePerGas = $executionPayload.base_fee_per_gas,
|
2022-03-25 11:40:10 +00:00
|
|
|
numTransactions = executionPayload.transactions.len
|
|
|
|
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
payloadResponse =
|
|
|
|
awaitWithTimeout(
|
|
|
|
eth1Monitor.newPayload(
|
|
|
|
executionPayload.asEngineExecutionPayload),
|
2022-05-17 13:57:33 +00:00
|
|
|
NEWPAYLOAD_TIMEOUT):
|
2022-04-14 20:15:34 +00:00
|
|
|
info "newPayload: newPayload timed out"
|
2022-03-25 11:40:10 +00:00
|
|
|
PayloadStatusV1(status: PayloadExecutionStatus.syncing)
|
|
|
|
payloadStatus = payloadResponse.status
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
debug "newPayload: succeeded",
|
|
|
|
parentHash = executionPayload.parent_hash,
|
|
|
|
blockHash = executionPayload.block_hash,
|
|
|
|
blockNumber = executionPayload.block_number,
|
|
|
|
payloadStatus
|
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
return payloadStatus
|
|
|
|
except CatchableError as err:
|
2022-04-05 08:40:59 +00:00
|
|
|
debug "newPayload failed", msg = err.msg
|
2022-03-25 11:40:10 +00:00
|
|
|
return PayloadExecutionStatus.syncing
|
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
from ../consensus_object_pools/blockchain_dag import
|
|
|
|
getBlockRef, loadExecutionBlockRoot, markBlockInvalid
|
2022-04-05 08:40:59 +00:00
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#helpers
|
|
|
|
proc is_optimistic_candidate_block(
|
|
|
|
self: BlockProcessor, blck: ForkedSignedBeaconBlock): bool =
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#when-to-optimistically-import-blocks
|
|
|
|
# The current slot (as per the system clock) is at least
|
|
|
|
# `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY` ahead of the slot of the block being
|
|
|
|
# imported.
|
|
|
|
if blck.slot + self.safeSlotsToImportOptimistically <=
|
|
|
|
self.getBeaconTime().slotOrZero:
|
|
|
|
return true
|
2022-06-17 14:16:03 +00:00
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
let
|
|
|
|
parentRoot = withBlck(blck): blck.message.parent_root
|
|
|
|
parentBlck = self.consensusManager.dag.getBlockRef(parentRoot).valueOr:
|
|
|
|
return false
|
|
|
|
|
|
|
|
# The parent of the block has execution enabled.
|
|
|
|
not self.consensusManager.dag.loadExecutionBlockRoot(parentBlck).isZero
|
|
|
|
|
|
|
|
proc runQueueProcessingLoop*(self: ref BlockProcessor) {.async.} =
|
2021-03-11 10:10:57 +00:00
|
|
|
while true:
|
2021-04-26 20:39:44 +00:00
|
|
|
# Cooperative concurrency: one block per loop iteration - because
|
2021-03-11 10:10:57 +00:00
|
|
|
# we run both networking and CPU-heavy things like block processing
|
|
|
|
# on the same thread, we need to make sure that there is steady progress
|
|
|
|
# on the networking side or we get long lockups that lead to timeouts.
|
|
|
|
const
|
|
|
|
# We cap waiting for an idle slot in case there's a lot of network traffic
|
|
|
|
# taking up all CPU - we don't want to _completely_ stop processing blocks
|
2021-04-26 20:39:44 +00:00
|
|
|
# in this case - doing so also allows us to benefit from more batching /
|
|
|
|
# larger network reads when under load.
|
2021-03-11 10:10:57 +00:00
|
|
|
idleTimeout = 10.milliseconds
|
|
|
|
|
|
|
|
discard await idleAsync().withTimeout(idleTimeout)
|
|
|
|
|
2022-03-25 11:40:10 +00:00
|
|
|
let
|
|
|
|
blck = await self[].blockQueue.popFirst()
|
2022-07-04 20:35:33 +00:00
|
|
|
hasExecutionPayload =
|
|
|
|
withBlck(blck.blck): blck.message.is_execution_block
|
2022-03-25 11:40:10 +00:00
|
|
|
executionPayloadStatus =
|
2022-07-04 20:35:33 +00:00
|
|
|
if hasExecutionPayload:
|
|
|
|
# Eth1 syncing is asynchronous from this
|
|
|
|
# TODO self.consensusManager.eth1Monitor.terminalBlockHash.isSome
|
|
|
|
# should gate this when it works more reliably
|
|
|
|
# TODO detect have-TTD-but-not-is_execution_block case, and where
|
|
|
|
# execution payload was non-zero when TTD detection more reliable
|
|
|
|
when true:
|
|
|
|
try:
|
|
|
|
# Minimize window for Eth1 monitor to shut down connection
|
|
|
|
await self.consensusManager.eth1Monitor.ensureDataProvider()
|
|
|
|
|
|
|
|
let executionPayload =
|
|
|
|
withBlck(blck.blck):
|
|
|
|
when stateFork >= BeaconStateFork.Bellatrix:
|
|
|
|
blck.message.body.execution_payload
|
|
|
|
else:
|
|
|
|
doAssert false
|
|
|
|
default(bellatrix.ExecutionPayload) # satisfy Nim
|
|
|
|
|
|
|
|
await newExecutionPayload(
|
|
|
|
self.consensusManager.eth1Monitor, executionPayload)
|
|
|
|
except CatchableError as err:
|
|
|
|
info "runQueueProcessingLoop: newPayload failed",
|
|
|
|
err = err.msg
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#execution-engine-errors
|
|
|
|
if not blck.resfut.isNil:
|
|
|
|
blck.resfut.complete(
|
|
|
|
Result[void, BlockError].err(BlockError.MissingParent))
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
debug "runQueueProcessingLoop: got execution payload before TTD"
|
|
|
|
PayloadExecutionStatus.syncing
|
|
|
|
else:
|
|
|
|
# Vacuously
|
|
|
|
PayloadExecutionStatus.valid
|
|
|
|
|
|
|
|
if executionPayloadStatus in static([
|
2022-03-25 11:40:10 +00:00
|
|
|
PayloadExecutionStatus.invalid,
|
2022-07-04 20:35:33 +00:00
|
|
|
PayloadExecutionStatus.invalid_block_hash]):
|
2022-04-05 08:40:59 +00:00
|
|
|
debug "runQueueProcessingLoop: execution payload invalid",
|
2022-07-04 20:35:33 +00:00
|
|
|
executionPayloadStatus,
|
|
|
|
blck = shortLog(blck.blck)
|
|
|
|
self.consensusManager.dag.markBlockInvalid(blck.blck.root)
|
|
|
|
self.consensusManager.quarantine[].addUnviable(blck.blck.root)
|
2022-06-17 14:16:03 +00:00
|
|
|
# Every loop iteration ends with some version of blck.resfut.complete(),
|
|
|
|
# including processBlock(), otherwise the sync manager stalls.
|
2022-03-25 11:40:10 +00:00
|
|
|
if not blck.resfut.isNil:
|
|
|
|
blck.resfut.complete(Result[void, BlockError].err(BlockError.Invalid))
|
2022-07-04 20:35:33 +00:00
|
|
|
else:
|
|
|
|
if executionPayloadStatus == PayloadExecutionStatus.valid or
|
|
|
|
self[].is_optimistic_candidate_block(blck.blck):
|
2022-07-07 16:57:52 +00:00
|
|
|
self[].processBlock(
|
2022-07-04 20:35:33 +00:00
|
|
|
blck, executionPayloadStatus == PayloadExecutionStatus.valid)
|
|
|
|
else:
|
|
|
|
debug "runQueueProcessingLoop: block cannot be optimistically imported",
|
|
|
|
blck = shortLog(blck.blck)
|
2022-06-17 14:16:03 +00:00
|
|
|
if not blck.resfut.isNil:
|
2022-07-04 20:35:33 +00:00
|
|
|
blck.resfut.complete(
|
|
|
|
Result[void, BlockError].err(BlockError.MissingParent))
|