2020-05-19 14:18:07 +00:00
|
|
|
# beacon_chain
|
2021-02-25 13:37:22 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-05-19 14:18:07 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2020-06-16 05:45:04 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
import
|
2020-10-20 12:31:20 +00:00
|
|
|
std/tables,
|
2020-07-28 13:54:32 +00:00
|
|
|
chronicles,
|
2020-12-16 08:37:22 +00:00
|
|
|
stew/[assign2, results],
|
2021-01-25 18:45:48 +00:00
|
|
|
eth/keys,
|
2021-12-06 09:49:01 +00:00
|
|
|
../spec/[
|
|
|
|
eth2_merkleization, forks, helpers, signatures, signatures_batch,
|
|
|
|
state_transition],
|
2021-09-27 14:22:58 +00:00
|
|
|
../spec/datatypes/[phase0, altair, merge],
|
2021-12-06 09:49:01 +00:00
|
|
|
"."/[blockchain_dag]
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
export results, signatures_batch
|
2020-05-21 17:08:31 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
# Clearance
|
|
|
|
# ---------------------------------------------
|
|
|
|
#
|
|
|
|
# This module is in charge of making the
|
|
|
|
# "quarantined" network blocks
|
2020-07-30 19:18:17 +00:00
|
|
|
# pass the firewall and be stored in the chain DAG
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-06-16 05:45:04 +00:00
|
|
|
logScope:
|
|
|
|
topics = "clearance"
|
2020-05-19 14:18:07 +00:00
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
proc addResolvedHeadBlock(
|
2021-12-06 09:49:01 +00:00
|
|
|
dag: ChainDAGRef,
|
2021-06-23 14:43:18 +00:00
|
|
|
state: var StateData,
|
2021-11-05 07:34:34 +00:00
|
|
|
trustedBlock: ForkyTrustedSignedBeaconBlock,
|
2020-08-05 06:28:43 +00:00
|
|
|
parent: BlockRef, cache: var StateCache,
|
2021-09-27 14:22:58 +00:00
|
|
|
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded | OnMergeBlockAdded,
|
2021-10-19 15:20:55 +00:00
|
|
|
stateDataDur, sigVerifyDur, stateVerifyDur: Duration
|
2021-12-06 09:49:01 +00:00
|
|
|
): BlockRef =
|
2021-06-11 17:51:46 +00:00
|
|
|
doAssert getStateField(state.data, slot) == trustedBlock.message.slot,
|
2020-08-18 20:29:33 +00:00
|
|
|
"state must match block"
|
2021-01-25 18:45:48 +00:00
|
|
|
doAssert state.blck.root == trustedBlock.message.parent_root,
|
2020-08-18 20:29:33 +00:00
|
|
|
"the StateData passed into the addResolved function not yet updated!"
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-07-16 13:16:51 +00:00
|
|
|
let
|
2021-01-25 18:45:48 +00:00
|
|
|
blockRoot = trustedBlock.root
|
|
|
|
blockRef = BlockRef.init(blockRoot, trustedBlock.message)
|
2021-05-28 16:34:00 +00:00
|
|
|
startTick = Moment.now()
|
2020-08-11 19:39:53 +00:00
|
|
|
|
|
|
|
link(parent, blockRef)
|
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
dag.blocks.incl(KeyedBlockRef.init(blockRef))
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
# Resolved blocks should be stored in database
|
2021-06-24 18:34:08 +00:00
|
|
|
dag.putBlock(trustedBlock)
|
2021-05-28 19:03:20 +00:00
|
|
|
let putBlockTick = Moment.now()
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-05-29 18:56:30 +00:00
|
|
|
var foundHead: bool
|
2020-05-19 14:18:07 +00:00
|
|
|
for head in dag.heads.mitems():
|
2020-07-28 13:54:32 +00:00
|
|
|
if head.isAncestorOf(blockRef):
|
|
|
|
head = blockRef
|
2021-05-29 18:56:30 +00:00
|
|
|
foundHead = true
|
2020-05-19 14:18:07 +00:00
|
|
|
break
|
|
|
|
|
2021-05-29 18:56:30 +00:00
|
|
|
if not foundHead:
|
|
|
|
dag.heads.add(blockRef)
|
|
|
|
|
|
|
|
# Up to here, state.data was referring to the new state after the block had
|
|
|
|
# been applied but the `blck` field was still set to the parent
|
|
|
|
state.blck = blockRef
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# Regardless of the chain we're on, the deposits come in the same order so
|
|
|
|
# as soon as we import a block, we'll also update the shared public key
|
|
|
|
# cache
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.updateValidatorKeys(getStateField(state.data, validators).asSeq())
|
2021-06-01 11:13:40 +00:00
|
|
|
|
2021-05-29 18:56:30 +00:00
|
|
|
# Getting epochRef with the state will potentially create a new EpochRef
|
|
|
|
let
|
|
|
|
epochRef = dag.getEpochRef(state, cache)
|
|
|
|
epochRefTick = Moment.now()
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Block resolved",
|
2020-05-19 14:18:07 +00:00
|
|
|
blockRoot = shortLog(blockRoot),
|
2021-11-05 15:39:47 +00:00
|
|
|
blck = shortLog(trustedBlock.message),
|
2021-05-28 16:34:00 +00:00
|
|
|
heads = dag.heads.len(),
|
2021-05-28 19:03:20 +00:00
|
|
|
stateDataDur, sigVerifyDur, stateVerifyDur,
|
2021-05-29 18:56:30 +00:00
|
|
|
putBlockDur = putBlockTick - startTick,
|
|
|
|
epochRefDur = epochRefTick - putBlockTick
|
2020-08-18 20:29:33 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
# Notify others of the new block before processing the quarantine, such that
|
|
|
|
# notifications for parents happens before those of the children
|
2020-07-22 09:42:55 +00:00
|
|
|
if onBlockAdded != nil:
|
2021-06-21 08:35:24 +00:00
|
|
|
onBlockAdded(blockRef, trustedBlock, epochRef)
|
2021-09-22 12:17:15 +00:00
|
|
|
if not(isNil(dag.onBlockAdded)):
|
|
|
|
dag.onBlockAdded(ForkedTrustedSignedBeaconBlock.init(trustedBlock))
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
blockRef
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
proc checkStateTransition(
|
2022-01-11 10:01:54 +00:00
|
|
|
dag: ChainDAGRef, signedBlock: SomeForkySignedBeaconBlock,
|
2021-12-06 09:49:01 +00:00
|
|
|
cache: var StateCache): Result[void, BlockError] =
|
2021-06-01 11:13:40 +00:00
|
|
|
## Ensure block can be applied on a state
|
2021-06-11 17:51:46 +00:00
|
|
|
func restore(v: var ForkedHashedBeaconState) =
|
2021-01-25 18:45:48 +00:00
|
|
|
# TODO address this ugly workaround - there should probably be a
|
|
|
|
# `state_transition` that takes a `StateData` instead and updates
|
|
|
|
# the block as well
|
2021-05-05 06:54:21 +00:00
|
|
|
doAssert v.addr == addr dag.clearanceState.data
|
|
|
|
assign(dag.clearanceState, dag.headState)
|
2021-01-25 18:45:48 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
logScope:
|
|
|
|
blockRoot = shortLog(signedBlock.root)
|
2021-11-05 15:39:47 +00:00
|
|
|
blck = shortLog(signedBlock.message)
|
2021-05-17 16:37:26 +00:00
|
|
|
|
2021-06-03 09:42:25 +00:00
|
|
|
if not state_transition_block(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
dag.cfg, dag.clearanceState.data, signedBlock,
|
|
|
|
cache, dag.updateFlags, restore):
|
2021-01-25 18:45:48 +00:00
|
|
|
info "Invalid block"
|
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
err(BlockError.Invalid)
|
|
|
|
else:
|
|
|
|
ok()
|
2021-01-25 18:45:48 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc advanceClearanceState*(dag: ChainDAGRef) =
|
2021-05-29 18:56:30 +00:00
|
|
|
# When the chain is synced, the most likely block to be produced is the block
|
|
|
|
# right after head - we can exploit this assumption and advance the state
|
|
|
|
# to that slot before the block arrives, thus allowing us to do the expensive
|
|
|
|
# epoch transition ahead of time.
|
|
|
|
# Notably, we use the clearance state here because that's where the block will
|
|
|
|
# first be seen - later, this state will be copied to the head state!
|
2021-06-11 17:51:46 +00:00
|
|
|
if dag.clearanceState.blck.slot == getStateField(dag.clearanceState.data, slot):
|
2021-06-01 11:13:40 +00:00
|
|
|
let next =
|
|
|
|
dag.clearanceState.blck.atSlot(dag.clearanceState.blck.slot + 1)
|
|
|
|
|
2021-06-01 15:33:00 +00:00
|
|
|
let startTick = Moment.now()
|
2021-05-29 18:56:30 +00:00
|
|
|
var cache = StateCache()
|
2022-01-05 18:38:04 +00:00
|
|
|
if not updateStateData(dag, dag.clearanceState, next, true, cache):
|
|
|
|
# The next head update will likely fail - something is very wrong here
|
|
|
|
error "Cannot advance to next slot, database corrupt?",
|
|
|
|
clearance = shortLog(dag.clearanceState.blck),
|
|
|
|
next = shortLog(next)
|
|
|
|
else:
|
|
|
|
debug "Prepared clearance state for next block",
|
|
|
|
next, updateStateDur = Moment.now() - startTick
|
2021-06-01 15:33:00 +00:00
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
proc addHeadBlock*(
|
2021-12-06 09:49:01 +00:00
|
|
|
dag: ChainDAGRef, verifier: var BatchVerifier,
|
|
|
|
signedBlock: ForkySignedBeaconBlock,
|
|
|
|
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded | OnMergeBlockAdded
|
|
|
|
): Result[BlockRef, BlockError] =
|
|
|
|
## Try adding a block to the chain, verifying first that it passes the state
|
|
|
|
## transition function and contains correct cryptographic signature.
|
|
|
|
##
|
|
|
|
## Cryptographic checks can be skipped by adding skipBLSValidation to dag.updateFlags
|
2021-08-05 08:26:10 +00:00
|
|
|
logScope:
|
|
|
|
blockRoot = shortLog(signedBlock.root)
|
2021-11-05 15:39:47 +00:00
|
|
|
blck = shortLog(signedBlock.message)
|
2021-12-06 09:49:01 +00:00
|
|
|
|
|
|
|
template blck(): untyped = signedBlock.message # shortcuts without copy
|
|
|
|
template blockRoot(): untyped = signedBlock.root
|
|
|
|
|
|
|
|
if blockRoot in dag:
|
|
|
|
debug "Block already exists"
|
|
|
|
|
|
|
|
# We should not call the block added callback for blocks that already
|
|
|
|
# existed in the pool, as that may confuse consumers such as the fork
|
|
|
|
# choice. While the validation result won't be accessed, it's IGNORE,
|
|
|
|
# according to the spec.
|
|
|
|
return err(BlockError.Duplicate)
|
|
|
|
|
|
|
|
# If the block we get is older than what we finalized already, we drop it.
|
|
|
|
# One way this can happen is that we start request a block and finalization
|
|
|
|
# happens in the meantime - the block we requested will then be stale
|
|
|
|
# by the time it gets here.
|
|
|
|
if blck.slot <= dag.finalizedHead.slot:
|
|
|
|
debug "Old block, dropping",
|
|
|
|
finalizedHead = shortLog(dag.finalizedHead),
|
|
|
|
tail = shortLog(dag.tail)
|
|
|
|
|
|
|
|
# Doesn't correspond to any specific validation condition, and still won't
|
|
|
|
# be used, but certainly would be IGNORE.
|
|
|
|
return err(BlockError.UnviableFork)
|
|
|
|
|
|
|
|
let parent = dag.getRef(blck.parent_root)
|
|
|
|
|
|
|
|
if parent == nil:
|
|
|
|
debug "Block parent unknown"
|
|
|
|
return err(BlockError.MissingParent)
|
2021-01-25 18:45:48 +00:00
|
|
|
|
|
|
|
if parent.slot >= signedBlock.message.slot:
|
|
|
|
# A block whose parent is newer than the block itself is clearly invalid -
|
|
|
|
# discard it immediately
|
2021-12-06 09:49:01 +00:00
|
|
|
debug "Block with invalid parent, dropping",
|
2021-01-25 18:45:48 +00:00
|
|
|
parentBlock = shortLog(parent)
|
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
return err(BlockError.Invalid)
|
2021-01-25 18:45:48 +00:00
|
|
|
|
|
|
|
if (parent.slot < dag.finalizedHead.slot) or
|
|
|
|
(parent.slot == dag.finalizedHead.slot and
|
|
|
|
parent != dag.finalizedHead.blck):
|
|
|
|
# We finalized a block that's newer than the parent of this block - this
|
|
|
|
# block, although recent, is thus building on a history we're no longer
|
|
|
|
# interested in pursuing. This can happen if a client produces a block
|
|
|
|
# while syncing - ie it's own head block will be old, but it'll create
|
|
|
|
# a block according to the wall clock, in its own little world - this is
|
|
|
|
# correct - from their point of view, the head block they have is the
|
|
|
|
# latest thing that happened on the chain and they're performing their
|
|
|
|
# duty correctly.
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
debug "Block from unviable fork",
|
2020-05-19 14:18:07 +00:00
|
|
|
finalizedHead = shortLog(dag.finalizedHead),
|
2020-07-16 13:16:51 +00:00
|
|
|
tail = shortLog(dag.tail)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
return err(BlockError.UnviableFork)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-01-25 18:45:48 +00:00
|
|
|
# The block is resolved, now it's time to validate it to ensure that the
|
|
|
|
# blocks we add to the database are clean for the given state
|
2021-05-28 19:03:20 +00:00
|
|
|
let startTick = Moment.now()
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-12-30 11:33:03 +00:00
|
|
|
# The clearance state works as the canonical
|
|
|
|
# "let's make things permanent" point and saves things to the database -
|
|
|
|
# storing things is slow, so we don't want to do so before there's a
|
|
|
|
# reasonable chance that the information will become more permanently useful -
|
|
|
|
# by the time a new block reaches this point, the parent block will already
|
|
|
|
# have "established" itself in the network to some degree at least.
|
2021-01-25 18:45:48 +00:00
|
|
|
var cache = StateCache()
|
2022-01-05 18:38:04 +00:00
|
|
|
if not updateStateData(
|
|
|
|
dag, dag.clearanceState, parent.atSlot(signedBlock.message.slot), true,
|
|
|
|
cache):
|
|
|
|
# We should never end up here - the parent must be a block no older than and
|
|
|
|
# rooted in the finalized checkpoint, hence we should always be able to
|
|
|
|
# load its corresponding state
|
|
|
|
error "Unable to load clearance state for parent block, database corrupt?",
|
|
|
|
parent = shortLog(parent.atSlot(signedBlock.message.slot)),
|
|
|
|
clearance = shortLog(dag.clearanceState.blck)
|
|
|
|
return err(BlockError.MissingParent)
|
|
|
|
|
2021-05-28 19:03:20 +00:00
|
|
|
let stateDataTick = Moment.now()
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# First, batch-verify all signatures in block
|
2021-01-25 18:45:48 +00:00
|
|
|
if skipBLSValidation notin dag.updateFlags:
|
|
|
|
# TODO: remove skipBLSValidation
|
|
|
|
var sigs: seq[SignatureSet]
|
2021-08-05 08:26:10 +00:00
|
|
|
if (let e = sigs.collectSignatureSets(
|
2021-08-09 11:14:28 +00:00
|
|
|
signedBlock, dag.db.immutableValidators,
|
|
|
|
dag.clearanceState.data, cache); e.isErr()):
|
2021-08-05 08:26:10 +00:00
|
|
|
info "Unable to load signature sets",
|
|
|
|
err = e.error()
|
|
|
|
|
2021-01-25 18:45:48 +00:00
|
|
|
# A PublicKey or Signature isn't on the BLS12-381 curve
|
2021-12-06 09:49:01 +00:00
|
|
|
return err(BlockError.Invalid)
|
|
|
|
if not verifier.batchVerify(sigs):
|
2021-08-05 08:26:10 +00:00
|
|
|
info "Block signature verification failed"
|
2021-12-06 09:49:01 +00:00
|
|
|
return err(BlockError.Invalid)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-05-28 19:03:20 +00:00
|
|
|
let sigVerifyTick = Moment.now()
|
2021-12-06 09:49:01 +00:00
|
|
|
|
|
|
|
? checkStateTransition(dag, signedBlock.asSigVerified(), cache)
|
2021-05-29 18:56:30 +00:00
|
|
|
|
2021-05-28 19:03:20 +00:00
|
|
|
let stateVerifyTick = Moment.now()
|
2021-01-25 18:45:48 +00:00
|
|
|
# Careful, clearanceState.data has been updated but not blck - we need to
|
|
|
|
# create the BlockRef first!
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
ok addResolvedHeadBlock(
|
2021-12-06 09:49:01 +00:00
|
|
|
dag, dag.clearanceState,
|
2021-01-25 18:45:48 +00:00
|
|
|
signedBlock.asTrusted(),
|
|
|
|
parent, cache,
|
2021-05-28 19:03:20 +00:00
|
|
|
onBlockAdded,
|
|
|
|
stateDataDur = stateDataTick - startTick,
|
|
|
|
sigVerifyDur = sigVerifyTick - stateDataTick,
|
|
|
|
stateVerifyDur = stateVerifyTick - sigVerifyTick)
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
proc addBackfillBlock*(
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
signedBlock: ForkySignedBeaconBlock): Result[void, BlockError] =
|
|
|
|
## When performing checkpoint sync, we need to backfill historical blocks
|
|
|
|
## in order to respond to GetBlocksByRange requests. Backfill blocks are
|
|
|
|
## added in backwards order, one by one, based on the `parent_root` of the
|
|
|
|
## earliest block we know about.
|
|
|
|
##
|
|
|
|
## Because only one history is relevant when backfilling, one doesn't have to
|
|
|
|
## consider forks or other finalization-related issues - a block is either
|
|
|
|
## valid and finalized, or not.
|
|
|
|
logScope:
|
|
|
|
blockRoot = shortLog(signedBlock.root)
|
|
|
|
blck = shortLog(signedBlock.message)
|
2021-12-21 10:40:14 +00:00
|
|
|
backfill = (dag.backfill.slot, shortLog(dag.backfill.parent_root))
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
template blck(): untyped = signedBlock.message # shortcuts without copy
|
|
|
|
template blockRoot(): untyped = signedBlock.root
|
|
|
|
|
|
|
|
if dag.backfill.slot <= signedBlock.message.slot or
|
|
|
|
signedBlock.message.slot <= dag.genesis.slot:
|
|
|
|
if blockRoot in dag:
|
|
|
|
debug "Block already exists"
|
|
|
|
return err(BlockError.Duplicate)
|
|
|
|
|
|
|
|
# The block is newer than our backfill position but not in the dag - either
|
|
|
|
# it sits somewhere between backfill and tail or it comes from an unviable
|
|
|
|
# fork. We don't have an in-memory way of checking the former condition so
|
|
|
|
# we return UnviableFork for that condition as well, even though `Duplicate`
|
|
|
|
# would be more correct
|
|
|
|
debug "Block unviable or duplicate"
|
|
|
|
return err(BlockError.UnviableFork)
|
|
|
|
|
2021-12-21 10:40:14 +00:00
|
|
|
if dag.backfill.parent_root != signedBlock.root:
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
debug "Block does not match expected backfill root"
|
|
|
|
return err(BlockError.MissingParent) # MissingChild really, but ..
|
|
|
|
|
|
|
|
# If the hash is correct, the block itself must be correct, but the root does
|
|
|
|
# not cover the signature, which we check next
|
|
|
|
|
|
|
|
let proposerKey = dag.validatorKey(blck.proposer_index)
|
|
|
|
if proposerKey.isNone():
|
|
|
|
# This cannot happen, in theory, unless the checkpoint state is broken or
|
|
|
|
# there is a bug in our validator key caching scheme - in order not to
|
|
|
|
# send invalid attestations, we'll shut down defensively here - this might
|
|
|
|
# need revisiting in the future.
|
|
|
|
fatal "Invalid proposer in backfill block - checkpoint state corrupt?"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
if not verify_block_signature(
|
|
|
|
dag.forkAtEpoch(blck.slot.epoch),
|
|
|
|
getStateField(dag.headState.data, genesis_validators_root),
|
|
|
|
blck.slot,
|
|
|
|
signedBlock.root,
|
|
|
|
proposerKey.get(),
|
|
|
|
signedBlock.signature):
|
|
|
|
info "Block signature verification failed"
|
|
|
|
return err(BlockError.Invalid)
|
|
|
|
|
|
|
|
dag.putBlock(signedBlock.asTrusted())
|
2021-12-21 10:40:14 +00:00
|
|
|
dag.backfill = blck.toBeaconBlockSummary()
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
# Invariants maintained on startup
|
|
|
|
doAssert dag.backfillBlocks.lenu64 == dag.tail.slot.uint64
|
|
|
|
doAssert dag.backfillBlocks.lenu64 > blck.slot.uint64
|
|
|
|
|
|
|
|
dag.backfillBlocks[blck.slot.int] = signedBlock.root
|
|
|
|
|
|
|
|
debug "Block backfilled"
|
|
|
|
|
|
|
|
ok()
|