nimbus-eth2/beacon_chain/gossip_processing/block_processor.nim

234 lines
7.9 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
std/math,
stew/results,
chronicles, chronos, metrics,
../spec/datatypes/[phase0, altair, merge],
../spec/[forks],
../consensus_object_pools/[block_clearance, blockchain_dag, attestation_pool],
./consensus_manager,
".."/[beacon_clock],
disentangle eth2 types from the ssz library (#2785) * reorganize ssz dependencies This PR continues the work in https://github.com/status-im/nimbus-eth2/pull/2646, https://github.com/status-im/nimbus-eth2/pull/2779 as well as past issues with serialization and type, to disentangle SSZ from eth2 and at the same time simplify imports and exports with a structured approach. The principal idea here is that when a library wants to introduce SSZ support, they do so via 3 files: * `ssz_codecs` which imports and reexports `codecs` - this covers the basic byte conversions and ensures no overloads get lost * `xxx_merkleization` imports and exports `merkleization` to specialize and get access to `hash_tree_root` and friends * `xxx_ssz_serialization` imports and exports `ssz_serialization` to specialize ssz for a specific library Those that need to interact with SSZ always import the `xxx_` versions of the modules and never `ssz` itself so as to keep imports simple and safe. This is similar to how the REST / JSON-RPC serializers are structured in that someone wanting to serialize spec types to REST-JSON will import `eth2_rest_serialization` and nothing else. * split up ssz into a core library that is independendent of eth2 types * rename `bytes_reader` to `codec` to highlight that it contains coding and decoding of bytes and native ssz types * remove tricky List init overload that causes compile issues * get rid of top-level ssz import * reenable merkleization tests * move some "standard" json serializers to spec * remove `ValidatorIndex` serialization for now * remove test_ssz_merkleization * add tests for over/underlong byte sequences * fix broken seq[byte] test - seq[byte] is not an SSZ type There are a few things this PR doesn't solve: * like #2646 this PR is weak on how to handle root and other dontSerialize fields that "sometimes" should be computed - the same problem appears in REST / JSON-RPC etc * Fix a build problem on macOS * Another way to fix the macOS builds Co-authored-by: Zahary Karadjov <zahary@gmail.com>
2021-08-18 18:57:58 +00:00
../sszdump
export sszdump
# Block Processor
# ------------------------------------------------------------------------------
# The block processor moves blocks from "Incoming" to "Consensus verified"
declareHistogram beacon_store_block_duration_seconds,
"storeBlock() duration", buckets = [0.25, 0.5, 1, 2, 4, 8, Inf]
type
BlockEntry* = object
blck*: ForkedSignedBeaconBlock
resfut*: Future[Result[void, BlockError]]
queueTick*: Moment # Moment when block was enqueued
validationDur*: Duration # Time it took to perform gossip validation
BlockProcessor* = object
## This manages the processing of blocks from different sources
## Blocks and attestations are enqueued in a gossip-validated state
##
## from:
## - Gossip (when synced)
## - SyncManager (during sync)
## - RequestManager (missing ancestor blocks)
##
## are then consensus-verified and added to:
## - the blockchain DAG
## - database
## - attestation pool
## - fork choice
# Config
# ----------------------------------------------------------------
dumpEnabled: bool
dumpDirInvalid: string
dumpDirIncoming: string
# Producers
# ----------------------------------------------------------------
blocksQueue*: AsyncQueue[BlockEntry] # Exported for "test_sync_manager"
# Consumer
# ----------------------------------------------------------------
consensusManager: ref ConsensusManager
## Blockchain DAG, AttestationPool and Quarantine
getBeaconTime: GetBeaconTimeFn
# Initialization
# ------------------------------------------------------------------------------
proc new*(T: type BlockProcessor,
dumpEnabled: bool,
dumpDirInvalid, dumpDirIncoming: string,
consensusManager: ref ConsensusManager,
getBeaconTime: GetBeaconTimeFn): ref BlockProcessor =
(ref BlockProcessor)(
dumpEnabled: dumpEnabled,
dumpDirInvalid: dumpDirInvalid,
dumpDirIncoming: dumpDirIncoming,
blocksQueue: newAsyncQueue[BlockEntry](),
consensusManager: consensusManager,
getBeaconTime: getBeaconTime)
# Sync callbacks
# ------------------------------------------------------------------------------
proc done*(entry: BlockEntry) =
## Send signal to [Sync/Request]Manager that the block ``entry`` has passed
## verification successfully.
if entry.resfut != nil:
entry.resfut.complete(Result[void, BlockError].ok())
proc fail*(entry: BlockEntry, error: BlockError) =
## Send signal to [Sync/Request]Manager that the block ``blk`` has NOT passed
## verification with specific ``error``.
if entry.resfut != nil:
entry.resfut.complete(Result[void, BlockError].err(error))
proc hasBlocks*(self: BlockProcessor): bool =
self.blocksQueue.len() > 0
# Enqueue
# ------------------------------------------------------------------------------
proc addBlock*(
self: var BlockProcessor, blck: ForkedSignedBeaconBlock,
resfut: Future[Result[void, BlockError]] = nil,
validationDur = Duration()) =
## Enqueue a Gossip-validated block for consensus verification
# Backpressure:
# There is no backpressure here - producers must wait for `resfut` to
# constrain their own processing
# Producers:
# - Gossip (when synced)
# - SyncManager (during sync)
# - RequestManager (missing ancestor blocks)
# addLast doesn't fail with unbounded queues, but we'll add asyncSpawn as a
# sanity check
try:
self.blocksQueue.addLastNoWait(BlockEntry(
blck: blck,
resfut: resfut, queueTick: Moment.now(),
validationDur: validationDur))
except AsyncQueueFullError:
raiseAssert "unbounded queue"
# Storage
# ------------------------------------------------------------------------------
proc dumpBlock*[T](
self: BlockProcessor,
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock |
merge.SignedBeaconBlock,
res: Result[T, (ValidationResult, BlockError)]) =
if self.dumpEnabled and res.isErr:
case res.error[1]
of Invalid:
dump(
self.dumpDirInvalid, signedBlock)
of MissingParent:
dump(
self.dumpDirIncoming, signedBlock)
else:
discard
proc storeBlock*(
self: var BlockProcessor,
signedBlock: phase0.SignedBeaconBlock | altair.SignedBeaconBlock |
merge.SignedBeaconBlock,
wallSlot: Slot): Result[BlockRef, BlockError] =
let
attestationPool = self.consensusManager.attestationPool
type Trusted = typeof signedBlock.asTrusted()
let blck = self.consensusManager.dag.addRawBlock(
self.consensusManager.quarantine, signedBlock) do (
blckRef: BlockRef, trustedBlock: Trusted, epochRef: EpochRef):
# Callback add to fork choice if valid
attestationPool[].addForkChoice(
epochRef, blckRef, trustedBlock.message, wallSlot)
self.dumpBlock(signedBlock, blck)
# There can be a scenario where we receive a block we already received.
# However this block was before the last finalized epoch and so its parent
# was pruned from the ForkChoice.
if blck.isErr:
return err(blck.error[1])
ok(blck.get())
# Event Loop
# ------------------------------------------------------------------------------
proc processBlock(self: var BlockProcessor, entry: BlockEntry) =
logScope:
blockRoot = shortLog(entry.blck.root)
let
wallTime = self.getBeaconTime()
(afterGenesis, wallSlot) = wallTime.toSlot()
if not afterGenesis:
error "Processing block before genesis, clock turned back?"
quit 1
let
startTick = Moment.now()
res = withBlck(entry.blck): self.storeBlock(blck, wallSlot)
storeBlockTick = Moment.now()
if res.isOk():
# Eagerly update head in case the new block gets selected
self.consensusManager[].updateHead(wallSlot)
let
updateHeadTick = Moment.now()
queueDur = startTick - entry.queueTick
storeBlockDur = storeBlockTick - startTick
updateHeadDur = updateHeadTick - storeBlockTick
beacon_store_block_duration_seconds.observe(storeBlockDur.toFloatSeconds())
debug "Block processed",
localHeadSlot = self.consensusManager.dag.head.slot,
blockSlot = entry.blck.slot,
validationDur = entry.validationDur,
queueDur, storeBlockDur, updateHeadDur
entry.done()
elif res.error() in {BlockError.Duplicate, BlockError.Old}:
# Duplicate and old blocks are ok from a sync point of view, so we mark
# them as successful
entry.done()
else:
entry.fail(res.error())
proc runQueueProcessingLoop*(self: ref BlockProcessor) {.async.} =
while true:
# Cooperative concurrency: one block per loop iteration - because
# we run both networking and CPU-heavy things like block processing
# on the same thread, we need to make sure that there is steady progress
# on the networking side or we get long lockups that lead to timeouts.
const
# We cap waiting for an idle slot in case there's a lot of network traffic
# taking up all CPU - we don't want to _completely_ stop processing blocks
# in this case - doing so also allows us to benefit from more batching /
# larger network reads when under load.
idleTimeout = 10.milliseconds
discard await idleAsync().withTimeout(idleTimeout)
self[].processBlock(await self[].blocksQueue.popFirst())