2021-11-25 18:41:39 +00:00
|
|
|
# beacon_chain
|
2022-01-04 09:45:38 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2021-11-25 18:41:39 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-12-01 12:55:42 +00:00
|
|
|
{.used.}
|
|
|
|
|
2021-11-25 18:41:39 +00:00
|
|
|
import
|
|
|
|
# Standard library
|
2021-12-09 12:56:54 +00:00
|
|
|
std/[json, os, strutils, tables],
|
2021-11-25 18:41:39 +00:00
|
|
|
# Status libraries
|
2021-12-01 12:55:42 +00:00
|
|
|
stew/[results, endians2], chronicles,
|
2021-11-25 18:41:39 +00:00
|
|
|
eth/keys, taskpools,
|
|
|
|
# Internals
|
|
|
|
../../beacon_chain/spec/[helpers, forks],
|
|
|
|
../../beacon_chain/spec/datatypes/[
|
|
|
|
base,
|
2022-01-12 14:50:30 +00:00
|
|
|
phase0, altair, bellatrix],
|
2021-11-25 18:41:39 +00:00
|
|
|
../../beacon_chain/fork_choice/[fork_choice, fork_choice_types],
|
2021-12-21 18:56:08 +00:00
|
|
|
../../beacon_chain/[beacon_chain_db, beacon_clock],
|
2021-11-25 18:41:39 +00:00
|
|
|
../../beacon_chain/consensus_object_pools/[
|
2021-12-06 09:49:01 +00:00
|
|
|
blockchain_dag, block_clearance, spec_cache],
|
2021-11-25 18:41:39 +00:00
|
|
|
# Third-party
|
|
|
|
yaml,
|
|
|
|
# Test
|
|
|
|
../testutil,
|
|
|
|
./fixtures_utils
|
|
|
|
|
2021-12-13 15:45:48 +00:00
|
|
|
# Test format described at https://github.com/ethereum/consensus-specs/tree/v1.1.6/tests/formats/fork_choice
|
2021-11-25 18:41:39 +00:00
|
|
|
# Note that our implementation has been optimized with "ProtoArray"
|
|
|
|
# instead of following the spec (in particular the "store").
|
|
|
|
|
|
|
|
type
|
|
|
|
OpKind = enum
|
|
|
|
opOnTick
|
|
|
|
opOnAttestation
|
|
|
|
opOnBlock
|
|
|
|
opOnMergeBlock
|
|
|
|
opChecks
|
|
|
|
|
|
|
|
Operation = object
|
|
|
|
valid: bool
|
|
|
|
# variant specific fields
|
|
|
|
case kind*: OpKind
|
|
|
|
of opOnTick:
|
|
|
|
tick: int
|
|
|
|
of opOnAttestation:
|
|
|
|
att: Attestation
|
|
|
|
of opOnBlock:
|
|
|
|
blk: ForkedSignedBeaconBlock
|
|
|
|
of opOnMergeBlock:
|
|
|
|
powBlock: PowBlock
|
|
|
|
of opChecks:
|
|
|
|
checks: JsonNode
|
|
|
|
|
|
|
|
proc initialLoad(
|
|
|
|
path: string, db: BeaconChainDB,
|
|
|
|
StateType, BlockType: typedesc): tuple[
|
|
|
|
dag: ChainDagRef, fkChoice: ref ForkChoice
|
|
|
|
] =
|
|
|
|
|
|
|
|
# TODO: support more than phase 0 genesis
|
|
|
|
|
|
|
|
let state = newClone(parseTest(
|
|
|
|
path/"anchor_state.ssz_snappy",
|
|
|
|
SSZ, StateType
|
|
|
|
))
|
|
|
|
|
|
|
|
# TODO stack usage. newClone and assignClone do not seem to
|
|
|
|
# prevent temporaries created by case objects
|
|
|
|
let forkedState = new ForkedHashedBeaconState
|
|
|
|
forkedState.kind = BeaconStateFork.Phase0
|
|
|
|
forkedState.phase0Data.data = state[]
|
|
|
|
forkedState.phase0Data.root = hash_tree_root(state[])
|
|
|
|
|
|
|
|
let blk = parseTest(
|
|
|
|
path/"anchor_block.ssz_snappy",
|
|
|
|
SSZ, BlockType
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
let signedBlock = ForkedSignedBeaconBlock.init(phase0.SignedBeaconBlock(
|
|
|
|
message: blk,
|
|
|
|
# signature: - unused as it's trusted
|
|
|
|
root: hashTreeRoot(blk)
|
|
|
|
))
|
|
|
|
|
|
|
|
ChainDagRef.preInit(
|
|
|
|
db,
|
|
|
|
forkedState[], forkedState[],
|
|
|
|
asTrusted(signedBlock)
|
|
|
|
)
|
2021-12-20 19:20:31 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(
|
|
|
|
defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
fkChoice = newClone(ForkChoice.init(
|
|
|
|
dag.getFinalizedEpochRef(),
|
2022-02-04 11:59:40 +00:00
|
|
|
dag.finalizedHead.blck,
|
|
|
|
true
|
2021-12-20 19:20:31 +00:00
|
|
|
))
|
2021-11-25 18:41:39 +00:00
|
|
|
|
|
|
|
(dag, fkChoice)
|
|
|
|
|
|
|
|
proc loadOps(path: string, fork: BeaconBlockFork): seq[Operation] =
|
|
|
|
let stepsYAML = readFile(path/"steps.yaml")
|
|
|
|
let steps = yaml.loadToJson(stepsYAML)
|
|
|
|
|
|
|
|
result = @[]
|
|
|
|
for step in steps[0]:
|
|
|
|
if step.hasKey"tick":
|
|
|
|
result.add Operation(kind: opOnTick, tick: step["tick"].getInt())
|
|
|
|
elif step.hasKey"block":
|
|
|
|
let filename = step["block"].getStr()
|
|
|
|
case fork
|
|
|
|
of BeaconBlockFork.Phase0:
|
|
|
|
let blk = parseTest(
|
|
|
|
path/filename & ".ssz_snappy",
|
|
|
|
SSZ, phase0.SignedBeaconBlock
|
|
|
|
)
|
|
|
|
result.add Operation(kind: opOnBlock,
|
|
|
|
blk: ForkedSignedBeaconBlock.init(blk))
|
2021-12-06 09:49:01 +00:00
|
|
|
of BeaconBlockFork.Altair:
|
2021-11-25 18:41:39 +00:00
|
|
|
let blk = parseTest(
|
|
|
|
path/filename & ".ssz_snappy",
|
|
|
|
SSZ, altair.SignedBeaconBlock
|
|
|
|
)
|
|
|
|
result.add Operation(kind: opOnBlock,
|
|
|
|
blk: ForkedSignedBeaconBlock.init(blk))
|
2022-01-04 09:45:38 +00:00
|
|
|
of BeaconBlockFork.Bellatrix:
|
2021-11-25 18:41:39 +00:00
|
|
|
let blk = parseTest(
|
|
|
|
path/filename & ".ssz_snappy",
|
2022-01-12 14:50:30 +00:00
|
|
|
SSZ, bellatrix.SignedBeaconBlock
|
2021-11-25 18:41:39 +00:00
|
|
|
)
|
|
|
|
result.add Operation(kind: opOnBlock,
|
|
|
|
blk: ForkedSignedBeaconBlock.init(blk))
|
|
|
|
elif step.hasKey"attestation":
|
|
|
|
let filename = step["attestation"].getStr()
|
|
|
|
let att = parseTest(
|
|
|
|
path/filename & ".ssz_snappy",
|
|
|
|
SSZ, Attestation
|
|
|
|
)
|
|
|
|
result.add Operation(kind: opOnAttestation,
|
|
|
|
att: att)
|
|
|
|
elif step.hasKey"checks":
|
|
|
|
result.add Operation(kind: opChecks,
|
|
|
|
checks: step["checks"])
|
|
|
|
else:
|
|
|
|
doAssert false, "Unreachable: " & $step
|
|
|
|
|
|
|
|
if step.hasKey"valid":
|
|
|
|
doAssert step.len == 2
|
|
|
|
result[^1].valid = step["valid"].getBool()
|
|
|
|
elif not step.hasKey"checks":
|
|
|
|
doAssert step.len == 1
|
|
|
|
result[^1].valid = true
|
|
|
|
|
|
|
|
proc stepOnBlock(
|
|
|
|
dag: ChainDagRef,
|
|
|
|
fkChoice: ref ForkChoice,
|
2021-12-06 09:49:01 +00:00
|
|
|
verifier: var BatchVerifier,
|
2022-03-16 07:20:40 +00:00
|
|
|
state: var ForkedHashedBeaconState,
|
2021-11-25 18:41:39 +00:00
|
|
|
stateCache: var StateCache,
|
2021-12-06 09:49:01 +00:00
|
|
|
signedBlock: ForkySignedBeaconBlock,
|
2021-12-21 18:56:08 +00:00
|
|
|
time: BeaconTime): Result[BlockRef, BlockError] =
|
2021-11-25 18:41:39 +00:00
|
|
|
# 1. Move state to proper slot.
|
2022-03-16 07:20:40 +00:00
|
|
|
doAssert dag.updateState(
|
2021-11-25 18:41:39 +00:00
|
|
|
state,
|
2021-12-21 18:56:08 +00:00
|
|
|
dag.head.atSlot(time.slotOrZero),
|
2021-11-25 18:41:39 +00:00
|
|
|
save = false,
|
|
|
|
stateCache
|
|
|
|
)
|
|
|
|
|
|
|
|
# 2. Add block to DAG
|
|
|
|
when signedBlock is phase0.SignedBeaconBlock:
|
|
|
|
type TrustedBlock = phase0.TrustedSignedBeaconBlock
|
|
|
|
elif signedBlock is altair.SignedBeaconBlock:
|
|
|
|
type TrustedBlock = altair.TrustedSignedBeaconBlock
|
|
|
|
else:
|
2022-01-12 14:50:30 +00:00
|
|
|
type TrustedBlock = bellatrix.TrustedSignedBeaconBlock
|
2021-11-25 18:41:39 +00:00
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let blockAdded = dag.addHeadBlock(verifier, signedBlock) do (
|
2021-11-25 18:41:39 +00:00
|
|
|
blckRef: BlockRef, signedBlock: TrustedBlock, epochRef: EpochRef
|
|
|
|
):
|
|
|
|
|
|
|
|
# 3. Update fork choice if valid
|
|
|
|
let status = fkChoice[].process_block(
|
|
|
|
dag,
|
|
|
|
epochRef,
|
|
|
|
blckRef,
|
|
|
|
signedBlock.message,
|
|
|
|
time
|
|
|
|
)
|
|
|
|
doAssert status.isOk()
|
2021-12-06 09:49:01 +00:00
|
|
|
|
2022-02-21 11:55:56 +00:00
|
|
|
blockAdded
|
2021-11-25 18:41:39 +00:00
|
|
|
|
|
|
|
proc stepOnAttestation(
|
|
|
|
dag: ChainDagRef,
|
|
|
|
fkChoice: ref ForkChoice,
|
|
|
|
att: Attestation,
|
2021-12-21 18:56:08 +00:00
|
|
|
time: BeaconTime): FcResult[void] =
|
|
|
|
let epochRef =
|
2022-01-05 18:38:04 +00:00
|
|
|
dag.getEpochRef(
|
2022-01-11 10:01:54 +00:00
|
|
|
dag.head, time.slotOrZero().epoch(), false).expect("no pruning in test")
|
2022-01-08 23:28:49 +00:00
|
|
|
let attesters = epochRef.get_attesting_indices(
|
|
|
|
att.data.slot, CommitteeIndex(att.data.index), att.aggregation_bits)
|
2021-11-25 18:41:39 +00:00
|
|
|
let status = fkChoice[].on_attestation(
|
|
|
|
dag,
|
|
|
|
att.data.slot, att.data.beacon_block_root, attesters,
|
|
|
|
time
|
|
|
|
)
|
|
|
|
|
|
|
|
status
|
|
|
|
|
|
|
|
proc stepChecks(
|
|
|
|
checks: JsonNode,
|
|
|
|
dag: ChainDagRef,
|
|
|
|
fkChoice: ref ForkChoice,
|
2021-12-21 18:56:08 +00:00
|
|
|
time: BeaconTime
|
2021-11-25 18:41:39 +00:00
|
|
|
) =
|
|
|
|
doAssert checks.len >= 1, "No checks found"
|
|
|
|
for check, val in checks:
|
|
|
|
if check == "time":
|
2022-01-11 10:01:54 +00:00
|
|
|
doAssert time.ns_since_genesis == val.getInt().seconds.nanoseconds()
|
2021-12-21 18:56:08 +00:00
|
|
|
doAssert fkChoice.checkpoints.time.slotOrZero == time.slotOrZero
|
2021-11-25 18:41:39 +00:00
|
|
|
elif check == "head":
|
|
|
|
let headRoot = fkChoice[].get_head(dag, time).get()
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let headRef = dag.getBlockRef(headRoot).get()
|
2021-11-25 18:41:39 +00:00
|
|
|
doAssert headRef.slot == Slot(val["slot"].getInt())
|
|
|
|
doAssert headRef.root == Eth2Digest.fromHex(val["root"].getStr())
|
|
|
|
elif check == "justified_checkpoint":
|
|
|
|
let checkpointRoot = fkChoice.checkpoints.justified.checkpoint.root
|
|
|
|
let checkpointEpoch = fkChoice.checkpoints.justified.checkpoint.epoch
|
|
|
|
doAssert checkpointEpoch == Epoch(val["epoch"].getInt())
|
|
|
|
doAssert checkpointRoot == Eth2Digest.fromHex(val["root"].getStr())
|
|
|
|
elif check == "justified_checkpoint_root": # undocumented check
|
|
|
|
let checkpointRoot = fkChoice.checkpoints.justified.checkpoint.root
|
|
|
|
doAssert checkpointRoot == Eth2Digest.fromHex(val.getStr())
|
|
|
|
elif check == "finalized_checkpoint":
|
|
|
|
let checkpointRoot = fkChoice.checkpoints.finalized.root
|
|
|
|
let checkpointEpoch = fkChoice.checkpoints.finalized.epoch
|
|
|
|
doAssert checkpointEpoch == Epoch(val["epoch"].getInt())
|
|
|
|
doAssert checkpointRoot == Eth2Digest.fromHex(val["root"].getStr())
|
|
|
|
elif check == "best_justified_checkpoint":
|
|
|
|
let checkpointRoot = fkChoice.checkpoints.best_justified.root
|
|
|
|
let checkpointEpoch = fkChoice.checkpoints.best_justified.epoch
|
|
|
|
doAssert checkpointEpoch == Epoch(val["epoch"].getInt())
|
|
|
|
doAssert checkpointRoot == Eth2Digest.fromHex(val["root"].getStr())
|
2021-12-01 12:55:42 +00:00
|
|
|
elif check == "proposer_boost_root":
|
2021-12-21 18:56:08 +00:00
|
|
|
doAssert fkChoice.checkpoints.proposer_boost_root ==
|
|
|
|
Eth2Digest.fromHex(val.getStr())
|
2021-11-25 18:41:39 +00:00
|
|
|
elif check == "genesis_time":
|
|
|
|
# The fork choice is pruned regularly
|
|
|
|
# and does not store the genesis time,
|
|
|
|
# hence we check the DAG
|
|
|
|
doAssert dag.genesis.slot == Slot(val.getInt())
|
|
|
|
else:
|
|
|
|
doAssert false, "Unsupported check '" & $check & "'"
|
|
|
|
|
|
|
|
proc runTest(path: string, fork: BeaconBlockFork) =
|
|
|
|
let db = BeaconChainDB.new("", inMemory = true)
|
|
|
|
defer:
|
|
|
|
db.close()
|
|
|
|
|
2022-02-20 20:13:06 +00:00
|
|
|
let stores = case fork
|
2021-11-25 18:41:39 +00:00
|
|
|
of BeaconBlockFork.Phase0:
|
|
|
|
initialLoad(
|
|
|
|
path, db,
|
|
|
|
phase0.BeaconState, phase0.BeaconBlock
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
doAssert false, "Unsupported fork: " & $fork
|
|
|
|
(ChainDAGRef(), (ref ForkChoice)())
|
|
|
|
# of BeaconBlockFork.Altair:
|
|
|
|
# initialLoad(
|
|
|
|
# path, db,
|
2021-12-03 20:01:13 +00:00
|
|
|
# # The tests always use phase 0 block for anchor - https://github.com/ethereum/consensus-specs/pull/2323
|
2021-11-25 18:41:39 +00:00
|
|
|
# # TODO: support altair genesis state
|
|
|
|
# altair.BeaconState, phase0.BeaconBlock
|
|
|
|
# )
|
|
|
|
# of BeaconBlockFork.Merge:
|
|
|
|
# initialLoad(
|
|
|
|
# path, db,
|
2021-12-03 20:01:13 +00:00
|
|
|
# # The tests always use phase 0 block for anchor - https://github.com/ethereum/consensus-specs/pull/2323
|
2021-11-25 18:41:39 +00:00
|
|
|
# # TODO: support merge genesis state
|
2022-01-12 14:50:30 +00:00
|
|
|
# bellatrix.BeaconState, phase0.BeaconBlock
|
2021-11-25 18:41:39 +00:00
|
|
|
# )
|
2021-12-06 09:49:01 +00:00
|
|
|
var
|
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
2021-11-25 18:41:39 +00:00
|
|
|
|
|
|
|
let steps = loadOps(path, fork)
|
|
|
|
var time = stores.fkChoice.checkpoints.time
|
|
|
|
|
|
|
|
let state = newClone(stores.dag.headState)
|
|
|
|
var stateCache = StateCache()
|
|
|
|
|
|
|
|
for step in steps:
|
|
|
|
case step.kind
|
|
|
|
of opOnTick:
|
2022-01-11 10:01:54 +00:00
|
|
|
time = BeaconTime(ns_since_genesis: step.tick.seconds.nanoseconds)
|
2021-12-21 18:56:08 +00:00
|
|
|
doAssert stores.fkChoice.checkpoints.on_tick(time).isOk
|
2021-11-25 18:41:39 +00:00
|
|
|
of opOnBlock:
|
|
|
|
withBlck(step.blk):
|
|
|
|
let status = stepOnBlock(
|
|
|
|
stores.dag, stores.fkChoice,
|
2021-12-06 09:49:01 +00:00
|
|
|
verifier,
|
2021-11-25 18:41:39 +00:00
|
|
|
state[], stateCache,
|
|
|
|
blck,
|
|
|
|
time)
|
|
|
|
doAssert status.isOk == step.valid
|
|
|
|
of opOnAttestation:
|
|
|
|
let status = stepOnAttestation(
|
|
|
|
stores.dag, stores.fkChoice,
|
|
|
|
step.att, time)
|
|
|
|
doAssert status.isOk == step.valid
|
|
|
|
of opChecks:
|
|
|
|
stepChecks(step.checks, stores.dag, stores.fkChoice, time)
|
|
|
|
else:
|
|
|
|
doAssert false, "Unsupported"
|
|
|
|
|
2022-01-05 08:42:56 +00:00
|
|
|
suite "EF - ForkChoice" & preset():
|
2021-11-25 18:41:39 +00:00
|
|
|
const SKIP = [
|
|
|
|
# protoArray can handle blocks in the future gracefully
|
|
|
|
# spec: https://github.com/ethereum/consensus-specs/blame/v1.1.3/specs/phase0/fork-choice.md#L349
|
|
|
|
# test: tests/fork_choice/scenarios/no_votes.nim
|
|
|
|
# "Ensure the head is still 4 whilst the justified epoch is 0."
|
|
|
|
"on_block_future_block",
|
|
|
|
]
|
|
|
|
|
|
|
|
for fork in [BeaconBlockFork.Phase0]: # TODO: init ChainDAG from Merge/Altair
|
|
|
|
let forkStr = toLowerAscii($fork)
|
|
|
|
for testKind in ["get_head", "on_block"]:
|
|
|
|
let basePath = SszTestsDir/const_preset/forkStr/"fork_choice"/testKind/"pyspec_tests"
|
|
|
|
for kind, path in walkDir(basePath, relative = true, checkDir = true):
|
|
|
|
test "ForkChoice - " & const_preset/forkStr/"fork_choice"/testKind/"pyspec_tests"/path:
|
|
|
|
if const_preset == "minimal":
|
|
|
|
# TODO: Minimal tests have long paths issues on Windows
|
|
|
|
# and some are testing implementation details:
|
|
|
|
# - assertion that input block is not in the future
|
|
|
|
# - block slot is later than finalized slot
|
|
|
|
# - ...
|
|
|
|
# that ProtoArray handles gracefully
|
|
|
|
skip()
|
|
|
|
elif path in SKIP:
|
|
|
|
skip()
|
|
|
|
else:
|
|
|
|
runTest(basePath/path, fork)
|
|
|
|
|
|
|
|
|