Support starting from altair (#3054)
* Support starting from altair * hide `finalized-checkpoint-` - they are incomplete and usage may cause crashes * remove genesis detection code (broken, obsolete) * enable starting ChainDAG from altair checkpoints - this is a prerequisite for checkpoint sync (TODO: backfill) * tighten checkpoint state conditions * show error when starting from checkpoint with existing database (not supported) * print rest-compatible JSON in ncli/state_sim * altair/merge support in ncli * more altair/merge support in ncli_db * pre-load header to speed up loading * fix forked block decoding
This commit is contained in:
parent
03a70fbf36
commit
ec650c7fd7
|
@ -158,6 +158,17 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ Next fork epoch check OK
|
||||
```
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## Forked SSZ readers
|
||||
```diff
|
||||
+ load altair block OK
|
||||
+ load altair state OK
|
||||
+ load merge block OK
|
||||
+ load merge state OK
|
||||
+ load phase0 block OK
|
||||
+ load phase0 state OK
|
||||
+ should raise on unknown data OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
## Gossip validation [Preset: mainnet]
|
||||
```diff
|
||||
+ Any committee index is valid OK
|
||||
|
@ -452,4 +463,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
OK: 42/56 Fail: 0/56 Skip: 14/56
|
||||
|
||||
---TOTAL---
|
||||
OK: 258/274 Fail: 0/274 Skip: 16/274
|
||||
OK: 265/281 Fail: 0/281 Skip: 16/281
|
||||
|
|
|
@ -129,6 +129,17 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ Next fork epoch check OK
|
||||
```
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## Forked SSZ readers
|
||||
```diff
|
||||
+ load altair block OK
|
||||
+ load altair state OK
|
||||
+ load merge block OK
|
||||
+ load merge state OK
|
||||
+ load phase0 block OK
|
||||
+ load phase0 state OK
|
||||
+ should raise on unknown data OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
## Gossip validation [Preset: mainnet]
|
||||
```diff
|
||||
+ Any committee index is valid OK
|
||||
|
@ -369,4 +380,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
OK: 42/56 Fail: 0/56 Skip: 14/56
|
||||
|
||||
---TOTAL---
|
||||
OK: 203/219 Fail: 0/219 Skip: 16/219
|
||||
OK: 210/226 Fail: 0/226 Skip: 16/226
|
||||
|
|
|
@ -221,10 +221,12 @@ type
|
|||
name: "weak-subjectivity-checkpoint" }: Option[Checkpoint]
|
||||
|
||||
finalizedCheckpointState* {.
|
||||
hidden # TODO unhide when backfilling is done
|
||||
desc: "SSZ file specifying a recent finalized state"
|
||||
name: "finalized-checkpoint-state" }: Option[InputFile]
|
||||
|
||||
finalizedCheckpointBlock* {.
|
||||
hidden # TODO unhide when backfilling is done
|
||||
desc: "SSZ file specifying a recent finalized block"
|
||||
name: "finalized-checkpoint-block" }: Option[InputFile]
|
||||
|
||||
|
|
|
@ -11,17 +11,15 @@ import
|
|||
std/[options, sequtils, tables, sets],
|
||||
stew/[assign2, byteutils, results],
|
||||
metrics, snappy, chronicles,
|
||||
../spec/[
|
||||
beaconstate, eth2_merkleization, eth2_ssz_serialization, forks, helpers,
|
||||
../spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers,
|
||||
state_transition, validator],
|
||||
../spec/datatypes/[phase0, altair, merge],
|
||||
".."/beacon_chain_db,
|
||||
"."/[block_pools_types, block_quarantine, forkedbeaconstate_dbhelpers]
|
||||
"."/[block_pools_types, block_quarantine]
|
||||
|
||||
export
|
||||
forks, block_pools_types, results, forkedbeaconstate_dbhelpers,
|
||||
beacon_chain_db,
|
||||
eth2_merkleization, eth2_ssz_serialization
|
||||
eth2_merkleization, eth2_ssz_serialization,
|
||||
block_pools_types, results, beacon_chain_db
|
||||
|
||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
||||
declareGauge beacon_head_root, "Root of the head block of the beacon chain"
|
||||
|
@ -364,6 +362,19 @@ proc getStateData(
|
|||
|
||||
true
|
||||
|
||||
proc getForkedBlock(db: BeaconChainDB, root: Eth2Digest):
|
||||
Opt[ForkedTrustedSignedBeaconBlock] =
|
||||
# When we only have a digest, we don't know which fork it's from so we try
|
||||
# them one by one - this should be used sparingly
|
||||
if (let blck = db.getMergeBlock(root); blck.isSome()):
|
||||
ok(ForkedTrustedSignedBeaconBlock.init(blck.get()))
|
||||
elif (let blck = db.getAltairBlock(root); blck.isSome()):
|
||||
ok(ForkedTrustedSignedBeaconBlock.init(blck.get()))
|
||||
elif (let blck = db.getPhase0Block(root); blck.isSome()):
|
||||
ok(ForkedTrustedSignedBeaconBlock.init(blck.get()))
|
||||
else:
|
||||
err()
|
||||
|
||||
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||
updateFlags: UpdateFlags, onBlockCb: OnBlockCallback = nil,
|
||||
onHeadCb: OnHeadCallback = nil, onReorgCb: OnReorgCallback = nil,
|
||||
|
@ -380,19 +391,19 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
|
||||
let
|
||||
tailRoot = tailBlockRoot.get()
|
||||
tailBlock = db.getPhase0Block(tailRoot).get()
|
||||
tailRef = BlockRef.init(tailRoot, tailBlock.message)
|
||||
tailBlock = db.getForkedBlock(tailRoot).get()
|
||||
tailRef = withBlck(tailBlock): BlockRef.init(tailRoot, blck.message)
|
||||
headRoot = headBlockRoot.get()
|
||||
|
||||
let genesisRef = if tailBlock.message.slot == GENESIS_SLOT:
|
||||
let genesisRef = if tailBlock.slot == GENESIS_SLOT:
|
||||
tailRef
|
||||
else:
|
||||
let
|
||||
genesisBlockRoot = db.getGenesisBlock().expect(
|
||||
"preInit should have initialized the database with a genesis block root")
|
||||
genesisBlock = db.getPhase0Block(genesisBlockRoot).expect(
|
||||
genesisBlock = db.getForkedBlock(genesisBlockRoot).expect(
|
||||
"preInit should have initialized the database with a genesis block")
|
||||
BlockRef.init(genesisBlockRoot, genesisBlock.message)
|
||||
withBlck(genesisBlock): BlockRef.init(genesisBlockRoot, blck.message)
|
||||
|
||||
var
|
||||
blocks: HashSet[KeyedBlockRef]
|
||||
|
@ -644,7 +655,7 @@ proc putState(dag: ChainDAGRef, state: StateData) =
|
|||
# Ideally we would save the state and the root lookup cache in a single
|
||||
# transaction to prevent database inconsistencies, but the state loading code
|
||||
# is resilient against one or the other going missing
|
||||
dag.db.putState(state.data)
|
||||
withState(state.data): dag.db.putState(state.root, state.data)
|
||||
dag.db.putStateRoot(
|
||||
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
||||
|
||||
|
@ -1313,65 +1324,102 @@ proc updateHead*(
|
|||
dag.onFinHappened(data)
|
||||
|
||||
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
|
||||
# Lightweight check to see if we have the minimal information needed to
|
||||
# load up a database - we don't check head here - if something is wrong with
|
||||
# head, it's likely an initialized, but corrupt database - init will detect
|
||||
# that
|
||||
let
|
||||
headBlockRoot = db.getHeadBlock()
|
||||
genesisBlockRoot = db.getGenesisBlock()
|
||||
tailBlockRoot = db.getTailBlock()
|
||||
|
||||
if not (headBlockRoot.isSome() and tailBlockRoot.isSome()):
|
||||
if not (genesisBlockRoot.isSome() and tailBlockRoot.isSome()):
|
||||
return false
|
||||
|
||||
let
|
||||
headBlockPhase0 = db.getPhase0Block(headBlockRoot.get())
|
||||
headBlockAltair = db.getAltairBlock(headBlockRoot.get())
|
||||
tailBlock = db.getPhase0Block(tailBlockRoot.get())
|
||||
genesisBlock = db.getForkedBlock(genesisBlockRoot.get())
|
||||
tailBlock = db.getForkedBlock(tailBlockRoot.get())
|
||||
|
||||
if not ((headBlockPhase0.isSome() or headBlockAltair.isSome()) and
|
||||
tailBlock.isSome()):
|
||||
if not (genesisBlock.isSome() and tailBlock.isSome()):
|
||||
return false
|
||||
let
|
||||
genesisStateRoot = withBlck(genesisBlock.get()): blck.message.state_root
|
||||
tailStateRoot = withBlck(tailBlock.get()): blck.message.state_root
|
||||
|
||||
if not db.containsState(tailBlock.get().message.state_root):
|
||||
if not (
|
||||
db.containsState(genesisStateRoot) and db.containsState(tailStateRoot)):
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
proc preInit*(
|
||||
T: type ChainDAGRef, db: BeaconChainDB,
|
||||
genesisState, tailState: var phase0.BeaconState, tailBlock: phase0.TrustedSignedBeaconBlock) =
|
||||
genesisState, tailState: ForkedHashedBeaconState,
|
||||
tailBlock: ForkedTrustedSignedBeaconBlock) =
|
||||
# write a genesis state, the way the ChainDAGRef expects it to be stored in
|
||||
# database
|
||||
# TODO probably should just init a block pool with the freshly written
|
||||
# state - but there's more refactoring needed to make it nice - doing
|
||||
# a minimal patch for now..
|
||||
doAssert tailBlock.message.state_root == hash_tree_root(tailState)
|
||||
|
||||
logScope:
|
||||
genesisStateRoot = getStateRoot(genesisState)
|
||||
genesisStateSlot = getStateField(genesisState, slot)
|
||||
tailStateRoot = getStateRoot(tailState)
|
||||
tailStateSlot = getStateField(tailState, slot)
|
||||
|
||||
let genesisBlockRoot = withState(genesisState):
|
||||
if state.root != getStateRoot(tailState):
|
||||
# Different tail and genesis
|
||||
if state.data.slot >= getStateField(tailState, slot):
|
||||
fatal "Tail state must be newer or the same as genesis state"
|
||||
quit 1
|
||||
|
||||
let tail_genesis_validators_root =
|
||||
getStateField(tailState, genesis_validators_root)
|
||||
if state.data.genesis_validators_root != tail_genesis_validators_root:
|
||||
fatal "Tail state doesn't match genesis validators root, it is likely from a different network!",
|
||||
genesis_validators_root = shortLog(state.data.genesis_validators_root),
|
||||
tail_genesis_validators_root = shortLog(tail_genesis_validators_root)
|
||||
quit 1
|
||||
|
||||
let blck = get_initial_beacon_block(state.data)
|
||||
db.putGenesisBlock(blck.root)
|
||||
db.putBlock(blck)
|
||||
|
||||
db.putState(state.root, state.data)
|
||||
db.putStateRoot(blck.root, state.data.slot, state.root)
|
||||
blck.root
|
||||
else: # tail and genesis are the same
|
||||
withBlck(tailBlock):
|
||||
db.putGenesisBlock(blck.root)
|
||||
blck.root
|
||||
|
||||
withState(tailState):
|
||||
withBlck(tailBlock):
|
||||
# When looking up the state root of the tail block, we don't use the
|
||||
# BlockSlot->state_root map, so the only way the init code can find the
|
||||
# state is through the state root in the block - this could be relaxed
|
||||
# down the line
|
||||
if blck.message.state_root != state.root:
|
||||
fatal "State must match the given block",
|
||||
tailBlck = shortLog(blck)
|
||||
|
||||
quit 1
|
||||
|
||||
db.putBlock(blck)
|
||||
db.putTailBlock(blck.root)
|
||||
db.putHeadBlock(blck.root)
|
||||
|
||||
db.putState(state.root, state.data)
|
||||
db.putStateRoot(blck.root, state.data.slot, state.root)
|
||||
|
||||
notice "New database from snapshot",
|
||||
blockRoot = shortLog(tailBlock.root),
|
||||
stateRoot = shortLog(tailBlock.message.state_root),
|
||||
fork = tailState.fork,
|
||||
validators = tailState.validators.len()
|
||||
|
||||
db.putState(tailState)
|
||||
db.putBlock(tailBlock)
|
||||
db.putTailBlock(tailBlock.root)
|
||||
db.putHeadBlock(tailBlock.root)
|
||||
db.putStateRoot(tailBlock.root, tailState.slot, tailBlock.message.state_root)
|
||||
|
||||
if tailState.slot == GENESIS_SLOT:
|
||||
db.putGenesisBlock(tailBlock.root)
|
||||
else:
|
||||
doAssert genesisState.slot == GENESIS_SLOT
|
||||
db.putState(genesisState)
|
||||
let genesisBlock = get_initial_beacon_block(genesisState)
|
||||
db.putBlock(genesisBlock)
|
||||
db.putStateRoot(genesisBlock.root, GENESIS_SLOT, genesisBlock.message.state_root)
|
||||
db.putGenesisBlock(genesisBlock.root)
|
||||
|
||||
func setTailState*(dag: ChainDAGRef,
|
||||
checkpointState: phase0.BeaconState,
|
||||
checkpointBlock: phase0.TrustedSignedBeaconBlock) =
|
||||
# TODO(zah)
|
||||
# Delete all records up to the tail node. If the tail node is not
|
||||
# in the database, init the dabase in a way similar to `preInit`.
|
||||
discard
|
||||
genesisBlockRoot = shortLog(genesisBlockRoot),
|
||||
genesisStateRoot = shortLog(getStateRoot(genesisState)),
|
||||
tailBlockRoot = shortLog(blck.root),
|
||||
tailStateRoot = shortLog(state.root),
|
||||
fork = state.data.fork,
|
||||
validators = state.data.validators.len()
|
||||
|
||||
proc getGenesisBlockData*(dag: ChainDAGRef): BlockData =
|
||||
dag.get(dag.genesis)
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
../spec/forks,
|
||||
../beacon_chain_db
|
||||
|
||||
proc putState*(db: BeaconChainDB, state: ForkedHashedBeaconState) =
|
||||
case state.kind:
|
||||
of BeaconStateFork.Phase0: db.putState(getStateRoot(state), state.phase0Data.data)
|
||||
of BeaconStateFork.Altair: db.putState(getStateRoot(state), state.altairData.data)
|
||||
of BeaconStateFork.Merge: db.putState(getStateRoot(state), state.mergeData.data)
|
|
@ -12,6 +12,7 @@ import
|
|||
tables, times, terminal],
|
||||
|
||||
# Nimble packages
|
||||
stew/io2,
|
||||
spec/eth2_apis/eth2_rest_serialization,
|
||||
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
|
||||
chronos, confutils, metrics, metrics/chronos_httpserver,
|
||||
|
@ -141,8 +142,8 @@ proc init*(T: type BeaconNode,
|
|||
db = BeaconChainDB.new(config.databaseDir, inMemory = false)
|
||||
|
||||
var
|
||||
genesisState, checkpointState: ref phase0.BeaconState
|
||||
checkpointBlock: phase0.TrustedSignedBeaconBlock
|
||||
genesisState, checkpointState: ref ForkedHashedBeaconState
|
||||
checkpointBlock: ForkedTrustedSignedBeaconBlock
|
||||
|
||||
proc onAttestationReceived(data: Attestation) =
|
||||
eventBus.emit("attestation-received", data)
|
||||
|
@ -164,8 +165,9 @@ proc init*(T: type BeaconNode,
|
|||
if config.finalizedCheckpointState.isSome:
|
||||
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
||||
checkpointState = try:
|
||||
newClone(SSZ.loadFile(checkpointStatePath, phase0.BeaconState))
|
||||
except SerializationError as err:
|
||||
newClone(readSszForkedHashedBeaconState(
|
||||
cfg, readAllBytes(checkpointStatePath).tryGet()))
|
||||
except SszError as err:
|
||||
fatal "Checkpoint state deserialization failed",
|
||||
err = formatMsg(err, checkpointStatePath)
|
||||
quit 1
|
||||
|
@ -174,15 +176,18 @@ proc init*(T: type BeaconNode,
|
|||
quit 1
|
||||
|
||||
if config.finalizedCheckpointBlock.isNone:
|
||||
if checkpointState.slot > 0:
|
||||
withState(checkpointState[]):
|
||||
if getStateField(checkpointState[], slot) > 0:
|
||||
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
|
||||
quit 1
|
||||
else:
|
||||
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
|
||||
try:
|
||||
# TODO Perform sanity checks like signature and slot verification at least
|
||||
checkpointBlock = SSZ.loadFile(checkpointBlockPath, phase0.TrustedSignedBeaconBlock)
|
||||
except SerializationError as err:
|
||||
# Checkpoint block might come from an earlier fork than the state with
|
||||
# the state having empty slots processed past the fork epoch.
|
||||
checkpointBlock = readSszForkedTrustedSignedBeaconBlock(
|
||||
cfg, readAllBytes(checkpointBlockPath).tryGet())
|
||||
except SszError as err:
|
||||
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
|
||||
quit 1
|
||||
except IOError as err:
|
||||
|
@ -197,8 +202,8 @@ proc init*(T: type BeaconNode,
|
|||
var eth1Monitor: Eth1Monitor
|
||||
if not ChainDAGRef.isInitialized(db):
|
||||
var
|
||||
tailState: ref phase0.BeaconState
|
||||
tailBlock: phase0.TrustedSignedBeaconBlock
|
||||
tailState: ref ForkedHashedBeaconState
|
||||
tailBlock: ForkedTrustedSignedBeaconBlock
|
||||
|
||||
if genesisStateContents.len == 0 and checkpointState == nil:
|
||||
when hasGenesisDetection:
|
||||
|
@ -243,12 +248,14 @@ proc init*(T: type BeaconNode,
|
|||
eth1Block = genesisState.eth1_data.block_hash,
|
||||
totalDeposits = genesisState.eth1_data.deposit_count
|
||||
else:
|
||||
fatal "The beacon node must be compiled with -d:has_genesis_detection " &
|
||||
fatal "No database and no genesis snapshot found: supply a genesis.ssz " &
|
||||
"with the network configuration, or compile the beacon node with " &
|
||||
"the -d:has_genesis_detection option " &
|
||||
"in order to support monitoring for genesis events"
|
||||
quit 1
|
||||
|
||||
elif genesisStateContents.len == 0:
|
||||
if checkpointState.slot == GENESIS_SLOT:
|
||||
if getStateField(checkpointState[], slot) == GENESIS_SLOT:
|
||||
genesisState = checkpointState
|
||||
tailState = checkpointState
|
||||
tailBlock = get_initial_beacon_block(genesisState[])
|
||||
|
@ -257,11 +264,13 @@ proc init*(T: type BeaconNode,
|
|||
quit 1
|
||||
else:
|
||||
try:
|
||||
genesisState = newClone(SSZ.decode(genesisStateContents, phase0.BeaconState))
|
||||
genesisState = newClone(readSszForkedHashedBeaconState(
|
||||
cfg,
|
||||
genesisStateContents.toOpenArrayByte(0, genesisStateContents.high())))
|
||||
except CatchableError as err:
|
||||
raiseAssert "Invalid baked-in state: " & err.msg
|
||||
|
||||
if checkpointState != nil:
|
||||
if not checkpointState.isNil:
|
||||
tailState = checkpointState
|
||||
tailBlock = checkpointBlock
|
||||
else:
|
||||
|
@ -274,6 +283,11 @@ proc init*(T: type BeaconNode,
|
|||
except CatchableError as exc:
|
||||
error "Failed to initialize database", err = exc.msg
|
||||
quit 1
|
||||
else:
|
||||
if not checkpointState.isNil:
|
||||
fatal "A database already exists, cannot start from given checkpoint",
|
||||
dataDir = config.dataDir
|
||||
quit 1
|
||||
|
||||
# Doesn't use std/random directly, but dependencies might
|
||||
randomize(rng[].rand(high(int)))
|
||||
|
@ -319,16 +333,6 @@ proc init*(T: type BeaconNode,
|
|||
headStateSlot = getStateField(dag.headState.data, slot)
|
||||
quit 1
|
||||
|
||||
if checkpointState != nil:
|
||||
let checkpointGenesisValidatorsRoot = checkpointState[].genesis_validators_root
|
||||
if checkpointGenesisValidatorsRoot != databaseGenesisValidatorsRoot:
|
||||
fatal "The specified checkpoint state is intended for a different network",
|
||||
checkpointGenesisValidatorsRoot, databaseGenesisValidatorsRoot,
|
||||
dataDir = config.dataDir
|
||||
quit 1
|
||||
|
||||
dag.setTailState(checkpointState[], checkpointBlock)
|
||||
|
||||
if eth1Monitor.isNil and
|
||||
config.web3Urls.len > 0 and
|
||||
genesisDepositsSnapshotContents.len > 0:
|
||||
|
|
|
@ -6,14 +6,14 @@
|
|||
import std/[typetraits, strutils, sets]
|
||||
import stew/[results, base10], chronicles,
|
||||
nimcrypto/utils as ncrutils
|
||||
import ".."/[beacon_chain_db, beacon_node],
|
||||
import "."/rest_utils,
|
||||
".."/[beacon_chain_db, beacon_node],
|
||||
".."/networking/eth2_network,
|
||||
".."/consensus_object_pools/[blockchain_dag, spec_cache,
|
||||
attestation_pool, sync_committee_msg_pool],
|
||||
".."/validators/validator_duties,
|
||||
".."/spec/[beaconstate, forks, network],
|
||||
".."/spec/datatypes/[phase0, altair],
|
||||
"."/rest_utils
|
||||
".."/spec/datatypes/[phase0, altair]
|
||||
|
||||
export rest_utils
|
||||
|
||||
|
|
|
@ -298,6 +298,35 @@ func get_initial_beacon_block*(state: phase0.BeaconState):
|
|||
phase0.TrustedSignedBeaconBlock(
|
||||
message: message, root: hash_tree_root(message))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors
|
||||
func get_initial_beacon_block*(state: altair.BeaconState):
|
||||
altair.TrustedSignedBeaconBlock =
|
||||
# The genesis block is implicitly trusted
|
||||
let message = altair.TrustedBeaconBlock(
|
||||
slot: state.slot,
|
||||
state_root: hash_tree_root(state),)
|
||||
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||
# initialized to default values.
|
||||
altair.TrustedSignedBeaconBlock(
|
||||
message: message, root: hash_tree_root(message))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#testing
|
||||
func get_initial_beacon_block*(state: merge.BeaconState):
|
||||
merge.TrustedSignedBeaconBlock =
|
||||
# The genesis block is implicitly trusted
|
||||
let message = merge.TrustedBeaconBlock(
|
||||
slot: state.slot,
|
||||
state_root: hash_tree_root(state),)
|
||||
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||
# initialized to default values.
|
||||
merge.TrustedSignedBeaconBlock(
|
||||
message: message, root: hash_tree_root(message))
|
||||
|
||||
func get_initial_beacon_block*(state: ForkedHashedBeaconState):
|
||||
ForkedTrustedSignedBeaconBlock =
|
||||
withState(state):
|
||||
ForkedTrustedSignedBeaconBlock.init(get_initial_beacon_block(state.data))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#get_block_root_at_slot
|
||||
func get_block_root_at_slot*(state: ForkyBeaconState, slot: Slot): Eth2Digest =
|
||||
## Return the block root at a recent ``slot``.
|
||||
|
|
|
@ -403,6 +403,8 @@ type
|
|||
data*: T
|
||||
|
||||
ForkedSignedBlockHeader* = object
|
||||
message*: uint32 # message offset
|
||||
signature*: ValidatorSig
|
||||
slot*: Slot
|
||||
|
||||
ForkedBeaconStateHeader* = object
|
||||
|
|
|
@ -412,60 +412,58 @@ func getForkSchedule*(cfg: RuntimeConfig): array[2, Fork] =
|
|||
## NOTE: Update this procedure when new fork will be scheduled.
|
||||
[cfg.genesisFork(), cfg.altairFork()]
|
||||
|
||||
func readSszForkedHashedBeaconState*(
|
||||
data: openArray[byte], likelyFork: BeaconStateFork):
|
||||
ForkedHashedBeaconState {.raises: [Defect, SszError].} =
|
||||
## Helper to read a state from bytes when it's not certain what kind of state
|
||||
## it is - this happens for example when loading an SSZ state from command
|
||||
## line - we'll use wall time to "guess" which state to start with
|
||||
# careful - `result` is used, RVO didn't seem to work without
|
||||
result = ForkedHashedBeaconState(kind: likelyFork)
|
||||
var tried: set[BeaconStateFork]
|
||||
type
|
||||
# The first few fields of a state, shared across all forks
|
||||
BeaconStateHeader = object
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Eth2Digest
|
||||
slot: uint64
|
||||
|
||||
func readSszForkedHashedBeaconState*(cfg: RuntimeConfig, data: openArray[byte]):
|
||||
ForkedHashedBeaconState {.raises: [Defect, SszError].} =
|
||||
## Helper to read a header from bytes when it's not certain what kind of state
|
||||
## it is - this happens for example when loading an SSZ state from command
|
||||
## line
|
||||
if data.len() < sizeof(BeaconStateHeader):
|
||||
raise (ref MalformedSszError)(msg: "Not enough data for BeaconState header")
|
||||
let header = SSZ.decode(
|
||||
data.toOpenArray(0, sizeof(BeaconStateHeader) - 1),
|
||||
BeaconStateHeader)
|
||||
|
||||
# careful - `result` is used, RVO didn't seem to work without
|
||||
# TODO move time helpers somewhere to avoid circular imports
|
||||
result = ForkedHashedBeaconState(
|
||||
kind: cfg.stateForkAtEpoch(Epoch(header.slot div SLOTS_PER_EPOCH)))
|
||||
|
||||
template readFork() =
|
||||
withState(result):
|
||||
try:
|
||||
readSszBytes(data, state.data)
|
||||
state.root = hash_tree_root(state.data)
|
||||
return result
|
||||
except SszError as exc:
|
||||
tried.incl result.kind
|
||||
|
||||
readFork()
|
||||
|
||||
for fork in BeaconStateFork:
|
||||
if fork in tried: continue
|
||||
result = ForkedHashedBeaconState(kind: fork)
|
||||
readFork()
|
||||
|
||||
raise (ref SszError)(msg: "Unable to match data to any known fork")
|
||||
type
|
||||
ForkedBeaconBlockHeader = object
|
||||
message*: uint32 # message offset
|
||||
signature*: ValidatorSig
|
||||
slot: Slot # start of BeaconBlock
|
||||
|
||||
func readSszForkedTrustedSignedBeaconBlock*(
|
||||
data: openArray[byte], likelyFork: BeaconBlockFork):
|
||||
cfg: RuntimeConfig, data: openArray[byte]):
|
||||
ForkedTrustedSignedBeaconBlock {.raises: [Defect, SszError].} =
|
||||
## Helper to read a state from bytes when it's not certain what kind of state
|
||||
## it is - this happens for example when loading an SSZ state from command
|
||||
## line - we'll use wall time to "guess" which state to start with
|
||||
## Helper to read a header from bytes when it's not certain what kind of block
|
||||
## it is
|
||||
|
||||
var
|
||||
res = ForkedTrustedSignedBeaconBlock(kind: likelyFork)
|
||||
tried: set[BeaconBlockFork]
|
||||
if data.len() < sizeof(BeaconBlockHeader):
|
||||
raise (ref MalformedSszError)(msg: "Not enough data for SignedBeaconBlock header")
|
||||
|
||||
template readFork() =
|
||||
withBlck(res):
|
||||
try:
|
||||
let header = SSZ.decode(
|
||||
data.toOpenArray(0, sizeof(ForkedBeaconBlockHeader) - 1),
|
||||
ForkedBeaconBlockHeader)
|
||||
# careful - `result` is used, RVO didn't seem to work without
|
||||
# TODO move time helpers somewhere to avoid circular imports
|
||||
result = ForkedTrustedSignedBeaconBlock(
|
||||
kind: cfg.blockForkAtEpoch(Epoch(header.slot div SLOTS_PER_EPOCH)))
|
||||
|
||||
withBlck(result):
|
||||
readSszBytes(data, blck)
|
||||
return res
|
||||
except SszError as exc:
|
||||
tried.incl res.kind
|
||||
|
||||
readFork()
|
||||
|
||||
for fork in BeaconBlockFork:
|
||||
if fork in tried: continue
|
||||
res = ForkedTrustedSignedBeaconBlock(kind: fork)
|
||||
readFork()
|
||||
raise (ref SszError)(msg: "Unable to match data to any known fork")
|
||||
|
||||
func toBeaconBlockFork*(fork: BeaconStateFork): BeaconBlockFork =
|
||||
case fork
|
||||
|
|
|
@ -29,23 +29,11 @@ proc dump*(dir: string, v: AttestationData, validator: ValidatorPubKey) =
|
|||
logErrors:
|
||||
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
|
||||
|
||||
proc dump*(dir: string, v: phase0.TrustedSignedBeaconBlock) =
|
||||
proc dump*(dir: string, v: ForkyTrustedSignedBeaconBlock) =
|
||||
logErrors:
|
||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||
|
||||
proc dump*(dir: string, v: altair.TrustedSignedBeaconBlock) =
|
||||
logErrors:
|
||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||
|
||||
proc dump*(dir: string, v: phase0.SignedBeaconBlock) =
|
||||
logErrors:
|
||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||
|
||||
proc dump*(dir: string, v: altair.SignedBeaconBlock) =
|
||||
logErrors:
|
||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||
|
||||
proc dump*(dir: string, v: merge.SignedBeaconBlock) =
|
||||
proc dump*(dir: string, v: ForkySignedBeaconBlock) =
|
||||
logErrors:
|
||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||
|
||||
|
|
|
@ -62,8 +62,6 @@ The following options are available:
|
|||
[=false].
|
||||
--weak-subjectivity-checkpoint Weak subjectivity checkpoint in the format
|
||||
block_root:epoch_number.
|
||||
--finalized-checkpoint-state SSZ file specifying a recent finalized state.
|
||||
--finalized-checkpoint-block SSZ file specifying a recent finalized block.
|
||||
--node-name A name for this node that will appear in the logs. If you set
|
||||
this to 'auto', a persistent automatically generated ID will
|
||||
be selected for each --data-dir folder.
|
||||
|
|
|
@ -3,7 +3,8 @@ import
|
|||
confutils, chronicles, json_serialization,
|
||||
stew/byteutils,
|
||||
../research/simutils,
|
||||
../beacon_chain/spec/datatypes/[phase0],
|
||||
../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
|
||||
../beacon_chain/spec/datatypes/[phase0, altair, merge],
|
||||
../beacon_chain/spec/[
|
||||
eth2_ssz_serialization, forks, helpers, state_transition],
|
||||
../beacon_chain/networking/network_metadata
|
||||
|
@ -157,12 +158,12 @@ proc doSSZ(conf: NcliConf) =
|
|||
|
||||
case conf.cmd:
|
||||
of hashTreeRoot:
|
||||
when t is phase0.SignedBeaconBlock:
|
||||
when t is ForkySignedBeaconBlock:
|
||||
echo hash_tree_root(v.message).data.toHex()
|
||||
else:
|
||||
echo hash_tree_root(v[]).data.toHex()
|
||||
of pretty:
|
||||
echo JSON.encode(v[], pretty = true)
|
||||
echo RestJson.encode(v[], pretty = true)
|
||||
else:
|
||||
raiseAssert "doSSZ() only implements hashTreeRoot and pretty commands"
|
||||
|
||||
|
@ -171,14 +172,22 @@ proc doSSZ(conf: NcliConf) =
|
|||
case kind
|
||||
of "attester_slashing": printit(AttesterSlashing)
|
||||
of "attestation": printit(Attestation)
|
||||
of "signed_block": printit(phase0.SignedBeaconBlock)
|
||||
of "block": printit(phase0.BeaconBlock)
|
||||
of "block_body": printit(phase0.BeaconBlockBody)
|
||||
of "phase0_signed_block": printit(phase0.SignedBeaconBlock)
|
||||
of "altair_signed_block": printit(altair.SignedBeaconBlock)
|
||||
of "merge_signed_block": printit(merge.SignedBeaconBlock)
|
||||
of "phase0_block": printit(phase0.BeaconBlock)
|
||||
of "altair_block": printit(altair.BeaconBlock)
|
||||
of "merge_block": printit(merge.BeaconBlock)
|
||||
of "phase0_block_body": printit(phase0.BeaconBlockBody)
|
||||
of "altair_block_body": printit(altair.BeaconBlockBody)
|
||||
of "merge_block_body": printit(merge.BeaconBlockBody)
|
||||
of "block_header": printit(BeaconBlockHeader)
|
||||
of "deposit": printit(Deposit)
|
||||
of "deposit_data": printit(DepositData)
|
||||
of "eth1_data": printit(Eth1Data)
|
||||
of "state": printit(phase0.BeaconState)
|
||||
of "phase0_state": printit(phase0.BeaconState)
|
||||
of "altiar_state": printit(altair.BeaconState)
|
||||
of "merge_state": printit(merge.BeaconState)
|
||||
of "proposer_slashing": printit(ProposerSlashing)
|
||||
of "voluntary_exit": printit(VoluntaryExit)
|
||||
|
||||
|
|
105
ncli/ncli_db.nim
105
ncli/ncli_db.nim
|
@ -3,9 +3,8 @@ import
|
|||
chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3,
|
||||
../beacon_chain/networking/network_metadata,
|
||||
../beacon_chain/[beacon_chain_db],
|
||||
../beacon_chain/consensus_object_pools/[
|
||||
blockchain_dag, forkedbeaconstate_dbhelpers],
|
||||
../beacon_chain/spec/datatypes/phase0,
|
||||
../beacon_chain/consensus_object_pools/[blockchain_dag],
|
||||
../beacon_chain/spec/datatypes/[phase0, altair, merge],
|
||||
../beacon_chain/spec/[
|
||||
beaconstate, helpers, state_transition, state_transition_epoch, validator],
|
||||
../beacon_chain/sszdump,
|
||||
|
@ -176,32 +175,46 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
var
|
||||
(start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots)
|
||||
blockRefs = dag.getBlockRange(start, ends)
|
||||
blocks: seq[phase0.TrustedSignedBeaconBlock]
|
||||
blocks: (
|
||||
seq[phase0.TrustedSignedBeaconBlock],
|
||||
seq[altair.TrustedSignedBeaconBlock],
|
||||
seq[merge.TrustedSignedBeaconBlock])
|
||||
|
||||
echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks"
|
||||
doAssert blockRefs.len() > 0, "Must select at least one block"
|
||||
|
||||
for b in 0..<blockRefs.len:
|
||||
let blck = blockRefs[blockRefs.len - b - 1]
|
||||
withTimer(timers[tLoadBlock]):
|
||||
blocks.add db.getPhase0Block(blockRefs[blockRefs.len - b - 1].root).get()
|
||||
case cfg.blockForkAtEpoch(blck.slot.epoch)
|
||||
of BeaconBlockFork.Phase0:
|
||||
blocks[0].add dag.db.getPhase0Block(blck.root).get()
|
||||
of BeaconBlockFork.Altair:
|
||||
blocks[1].add dag.db.getAltairBlock(blck.root).get()
|
||||
of BeaconBlockFork.Merge:
|
||||
blocks[2].add dag.db.getMergeBlock(blck.root).get()
|
||||
|
||||
let state = newClone(dag.headState)
|
||||
let stateData = newClone(dag.headState)
|
||||
|
||||
var
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
loadedState = new phase0.BeaconState
|
||||
loadedState = (
|
||||
(ref phase0.HashedBeaconState)(),
|
||||
(ref altair.HashedBeaconState)(),
|
||||
(ref merge.HashedBeaconState)())
|
||||
|
||||
withTimer(timers[tLoadState]):
|
||||
dag.updateStateData(
|
||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
|
||||
template processBlocks(blocks: auto) =
|
||||
for b in blocks.mitems():
|
||||
while getStateField(state[].data, slot) < b.message.slot:
|
||||
let isEpoch = (getStateField(state[].data, slot) + 1).isEpoch()
|
||||
while getStateField(stateData[].data, slot) < b.message.slot:
|
||||
let isEpoch = (getStateField(stateData[].data, slot) + 1).isEpoch()
|
||||
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
||||
let ok = process_slots(
|
||||
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
||||
dag.cfg, stateData[].data, getStateField(stateData[].data, slot) + 1, cache,
|
||||
info, {})
|
||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||
|
||||
|
@ -210,7 +223,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
if conf.resetCache:
|
||||
cache = StateCache()
|
||||
if not state_transition_block(
|
||||
dag.cfg, state[].data, b, cache, {}, noRollback):
|
||||
dag.cfg, stateData[].data, b, cache, {}, noRollback):
|
||||
dump("./", b)
|
||||
echo "State transition failed (!)"
|
||||
quit 1
|
||||
|
@ -220,20 +233,38 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
withTimer(timers[tDbStore]):
|
||||
dbBenchmark.putBlock(b)
|
||||
|
||||
if getStateField(state[].data, slot).isEpoch and conf.storeStates:
|
||||
if getStateField(state[].data, slot).epoch < 2:
|
||||
dbBenchmark.putState(state[].data)
|
||||
withState(stateData[].data):
|
||||
if state.data.slot.isEpoch and conf.storeStates:
|
||||
if state.data.slot.epoch < 2:
|
||||
dbBenchmark.putState(state.root, state.data)
|
||||
dbBenchmark.checkpoint()
|
||||
else:
|
||||
withTimer(timers[tDbStore]):
|
||||
dbBenchmark.putState(state[].data)
|
||||
dbBenchmark.putState(state.root, state.data)
|
||||
dbBenchmark.checkpoint()
|
||||
|
||||
withTimer(timers[tDbLoad]):
|
||||
doAssert dbBenchmark.getState(getStateRoot(state[].data), loadedState[], noRollback)
|
||||
case stateFork
|
||||
of BeaconStateFork.Phase0:
|
||||
doAssert dbBenchmark.getState(
|
||||
state.root, loadedState[0][].data, noRollback)
|
||||
of BeaconStateFork.Altair:
|
||||
doAssert dbBenchmark.getState(
|
||||
state.root, loadedState[1][].data, noRollback)
|
||||
of BeaconStateFork.Merge:
|
||||
doAssert dbBenchmark.getState(
|
||||
state.root, loadedState[2][].data, noRollback)
|
||||
|
||||
if getStateField(state[].data, slot).epoch mod 16 == 0:
|
||||
doAssert hash_tree_root(state[].data.phase0Data.data) == hash_tree_root(loadedState[])
|
||||
if state.data.slot.epoch mod 16 == 0:
|
||||
let loadedRoot = case stateFork
|
||||
of BeaconStateFork.Phase0: hash_tree_root(loadedState[0][].data)
|
||||
of BeaconStateFork.Altair: hash_tree_root(loadedState[1][].data)
|
||||
of BeaconStateFork.Merge: hash_tree_root(loadedState[2][].data)
|
||||
doAssert hash_tree_root(state.data) == loadedRoot
|
||||
|
||||
processBlocks(blocks[0])
|
||||
processBlocks(blocks[1])
|
||||
processBlocks(blocks[2])
|
||||
|
||||
printTimers(false, timers)
|
||||
|
||||
|
@ -241,16 +272,27 @@ proc cmdDumpState(conf: DbConf) =
|
|||
let db = BeaconChainDB.new(conf.databaseDir.string)
|
||||
defer: db.close()
|
||||
|
||||
let
|
||||
phase0State = (ref phase0.HashedBeaconState)()
|
||||
altairState = (ref altair.HashedBeaconState)()
|
||||
mergeState = (ref merge.HashedBeaconState)()
|
||||
|
||||
for stateRoot in conf.stateRoot:
|
||||
template doit(state: untyped) =
|
||||
try:
|
||||
let root = Eth2Digest(data: hexToByteArray[32](stateRoot))
|
||||
var state = (ref phase0.HashedBeaconState)(root: root)
|
||||
if not db.getState(root, state.data, noRollback):
|
||||
echo "Couldn't load ", root
|
||||
else:
|
||||
dump("./", state[])
|
||||
state.root = Eth2Digest.fromHex(stateRoot)
|
||||
|
||||
if db.getState(state.root, state.data, noRollback):
|
||||
dump("./", state)
|
||||
continue
|
||||
except CatchableError as e:
|
||||
echo "Couldn't load ", stateRoot, ": ", e.msg
|
||||
echo "Couldn't load ", state.root, ": ", e.msg
|
||||
|
||||
doit(phase0State[])
|
||||
doit(altairState[])
|
||||
doit(mergeState[])
|
||||
|
||||
echo "Couldn't load ", stateRoot
|
||||
|
||||
proc cmdDumpBlock(conf: DbConf) =
|
||||
let db = BeaconChainDB.new(conf.databaseDir.string)
|
||||
|
@ -258,11 +300,15 @@ proc cmdDumpBlock(conf: DbConf) =
|
|||
|
||||
for blockRoot in conf.blockRootx:
|
||||
try:
|
||||
let root = Eth2Digest(data: hexToByteArray[32](blockRoot))
|
||||
let root = Eth2Digest.fromHex(blockRoot)
|
||||
if (let blck = db.getPhase0Block(root); blck.isSome):
|
||||
dump("./", blck.get())
|
||||
elif (let blck = db.getAltairBlock(root); blck.isSome):
|
||||
dump("./", blck.get())
|
||||
elif (let blck = db.getMergeBlock(root); blck.isSome):
|
||||
dump("./", blck.get())
|
||||
else:
|
||||
echo "Couldn't load ", root
|
||||
echo "Couldn't load ", blockRoot
|
||||
except CatchableError as e:
|
||||
echo "Couldn't load ", blockRoot, ": ", e.msg
|
||||
|
||||
|
@ -370,7 +416,8 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
|||
let tmpState = assignClone(dag.headState)
|
||||
dag.withState(tmpState[], blckRef.atSlot(Slot(conf.slot))):
|
||||
echo "Writing state..."
|
||||
dump("./", stateData.data.phase0Data, blck)
|
||||
withState(stateData.data):
|
||||
dump("./", state, blck)
|
||||
|
||||
func atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
|
||||
if slot == 0:
|
||||
|
|
|
@ -64,9 +64,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||
replay = true):
|
||||
let
|
||||
(state, depositContractSnapshot) = loadGenesis(validators, false)
|
||||
genesisBlock = get_initial_beacon_block(state[].data)
|
||||
genesisTime = float state[].data.genesis_time
|
||||
(genesisState, depositContractSnapshot) = loadGenesis(validators, false)
|
||||
genesisBlock = get_initial_beacon_block(genesisState[])
|
||||
genesisTime = float getStateField(genesisState[], genesis_time)
|
||||
|
||||
var
|
||||
validatorKeyToIndex = initTable[ValidatorPubKey, int]()
|
||||
|
@ -80,9 +80,10 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
let db = BeaconChainDB.new("block_sim_db")
|
||||
defer: db.close()
|
||||
|
||||
ChainDAGRef.preInit(db, state[].data, state[].data, genesisBlock)
|
||||
ChainDAGRef.preInit(db, genesisState[], genesisState[], genesisBlock)
|
||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||
|
||||
withState(genesisState[]):
|
||||
for i in 0 ..< state.data.validators.len:
|
||||
validatorKeyToIndex[state.data.validators[i].pubkey] = i
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import
|
||||
stew/io2,
|
||||
stats, os, strformat, times,
|
||||
../tests/testblockutil,
|
||||
../beacon_chain/beacon_chain_db,
|
||||
|
@ -61,23 +62,26 @@ func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: auto) =
|
|||
state, finalized_checkpoint).epoch + 2 >= current_epoch
|
||||
|
||||
proc loadGenesis*(validators: Natural, validate: bool):
|
||||
(ref phase0.HashedBeaconState, DepositContractSnapshot) =
|
||||
(ref ForkedHashedBeaconState, DepositContractSnapshot) =
|
||||
let
|
||||
genesisFn =
|
||||
&"genesis_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
||||
contractSnapshotFn =
|
||||
&"deposit_contract_snapshot_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
||||
res = (ref phase0.HashedBeaconState)()
|
||||
cfg = defaultRuntimeConfig
|
||||
|
||||
|
||||
if fileExists(genesisFn) and fileExists(contractSnapshotFn):
|
||||
res.data = SSZ.loadFile(genesisFn, phase0.BeaconState)
|
||||
res.root = hash_tree_root(res.data)
|
||||
if res.data.slot != GENESIS_SLOT:
|
||||
let res = newClone(readSszForkedHashedBeaconState(
|
||||
cfg, readAllBytes(genesisFn).tryGet()))
|
||||
|
||||
withState(res[]):
|
||||
if state.data.slot != GENESIS_SLOT:
|
||||
echo "Can only start from genesis state"
|
||||
quit 1
|
||||
|
||||
if res.data.validators.len != validators:
|
||||
echo &"Supplied genesis file has {res.data.validators.len} validators, while {validators} where requested, running anyway"
|
||||
if state.data.validators.len != validators:
|
||||
echo &"Supplied genesis file has {state.data.validators.len} validators, while {validators} where requested, running anyway"
|
||||
|
||||
echo &"Loaded {genesisFn}..."
|
||||
|
||||
|
@ -101,17 +105,18 @@ proc loadGenesis*(validators: Natural, validate: bool):
|
|||
let contractSnapshot = DepositContractSnapshot(
|
||||
depositContractState: merkleizer.toDepositContractState)
|
||||
|
||||
res.data = initialize_beacon_state_from_eth1(
|
||||
defaultRuntimeConfig,
|
||||
let res = (ref ForkedHashedBeaconState)(kind: BeaconStateFork.Phase0)
|
||||
res.phase0Data.data = initialize_beacon_state_from_eth1(
|
||||
cfg,
|
||||
Eth2Digest(),
|
||||
0,
|
||||
deposits,
|
||||
flags)[]
|
||||
|
||||
res.root = hash_tree_root(res.data)
|
||||
res.phase0Data.root = hash_tree_root(res[].phase0Data.data)
|
||||
|
||||
echo &"Saving to {genesisFn}..."
|
||||
SSZ.saveFile(genesisFn, res.data)
|
||||
SSZ.saveFile(genesisFn, res.phase0Data.data)
|
||||
echo &"Saving to {contractSnapshotFn}..."
|
||||
SSZ.saveFile(contractSnapshotFn, contractSnapshot)
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import
|
|||
options, sequtils, random, tables,
|
||||
../tests/testblockutil,
|
||||
../beacon_chain/spec/datatypes/phase0,
|
||||
../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
|
||||
../beacon_chain/spec/[beaconstate, forks, helpers],
|
||||
./simutils
|
||||
|
||||
|
@ -30,7 +31,7 @@ func jsonName(prefix, slot: auto): string =
|
|||
proc writeJson*(fn, v: auto) =
|
||||
var f: File
|
||||
defer: close(f)
|
||||
Json.saveFile(fn, v, pretty = true)
|
||||
RestJson.saveFile(fn, v, pretty = true)
|
||||
|
||||
cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
||||
|
@ -41,20 +42,18 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
validate = true):
|
||||
let
|
||||
flags = if validate: {} else: {skipBlsValidation}
|
||||
(hashedState, _) = loadGenesis(validators, validate)
|
||||
genesisBlock = get_initial_beacon_block(hashedState.data)
|
||||
state = (ref ForkedHashedBeaconState)(
|
||||
kind: BeaconStateFork.Phase0, phase0Data: hashedState[])
|
||||
(state, _) = loadGenesis(validators, validate)
|
||||
genesisBlock = get_initial_beacon_block(state[])
|
||||
|
||||
echo "Starting simulation..."
|
||||
|
||||
var
|
||||
attestations = initTable[Slot, seq[Attestation]]()
|
||||
latest_block_root = hash_tree_root(genesisBlock.message)
|
||||
latest_block_root = withBlck(genesisBlock): blck.root
|
||||
timers: array[Timers, RunningStat]
|
||||
attesters: RunningStat
|
||||
r = initRand(1)
|
||||
signedBlock: phase0.SignedBeaconBlock
|
||||
signedBlock: ForkedSignedBeaconBlock
|
||||
cache = StateCache()
|
||||
|
||||
proc maybeWrite(last: bool) =
|
||||
|
@ -65,10 +64,11 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
write(stdout, ".")
|
||||
|
||||
if last:
|
||||
writeJson("state.json", state[].phase0Data)
|
||||
withState(state[]): writeJson("state.json", state.data)
|
||||
else:
|
||||
if getStateField(state[], slot) mod json_interval.uint64 == 0:
|
||||
writeJson(jsonName(prefix, getStateField(state[], slot)), state[].phase0Data.data)
|
||||
withState(state[]):
|
||||
if state.data.slot mod json_interval.uint64 == 0:
|
||||
writeJson(jsonName(prefix, state.data.slot), state.data)
|
||||
write(stdout, ":")
|
||||
else:
|
||||
write(stdout, ".")
|
||||
|
@ -97,10 +97,9 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
withTimer(timers[t]):
|
||||
signedBlock = addTestBlock(
|
||||
state[], latest_block_root, cache, attestations = blockAttestations,
|
||||
flags = flags).phase0Data
|
||||
flags = flags)
|
||||
latest_block_root = withTimerRet(timers[tHashBlock]):
|
||||
hash_tree_root(signedBlock.message)
|
||||
signedBlock.root = latest_block_root
|
||||
withBlck(signedBlock): hash_tree_root(blck.message)
|
||||
|
||||
if attesterRatio > 0.0:
|
||||
# attesterRatio is the fraction of attesters that actually do their
|
||||
|
@ -168,4 +167,4 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
|||
|
||||
echo "Done!"
|
||||
|
||||
printTimers(state[].phase0Data.data, attesters, validate, timers)
|
||||
printTimers(state[], attesters, validate, timers)
|
||||
|
|
|
@ -22,6 +22,7 @@ import # Unit test
|
|||
./test_eth1_monitor,
|
||||
./test_eth2_ssz_serialization,
|
||||
./test_exit_pool,
|
||||
./test_forks,
|
||||
./test_gossip_validation,
|
||||
./test_helpers,
|
||||
./test_honest_validator,
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
import
|
||||
unittest2,
|
||||
stew/byteutils,
|
||||
../beacon_chain/spec/[forks, helpers],
|
||||
../beacon_chain/spec/datatypes/[phase0, altair, merge]
|
||||
|
||||
{.used.}
|
||||
|
||||
template testHashedBeaconState(T: type, s: Slot) =
|
||||
let
|
||||
state = (ref T)()
|
||||
state[].slot = s
|
||||
let
|
||||
bytes = SSZ.encode(state[])
|
||||
forked = (ref ForkedHashedBeaconState)()
|
||||
|
||||
forked[] = readSszForkedHashedBeaconState(cfg, bytes)
|
||||
|
||||
check:
|
||||
forked.kind == T.toFork()
|
||||
|
||||
template testTrustedSignedBeaconBlock(T: type, s: Slot) =
|
||||
let
|
||||
blck = (ref T)()
|
||||
|
||||
blck[].message.slot = s
|
||||
|
||||
let
|
||||
bytes = SSZ.encode(blck[])
|
||||
forked = (ref ForkedTrustedSignedBeaconBlock)()
|
||||
|
||||
forked[] = readSszForkedTrustedSignedBeaconBlock(cfg, bytes)
|
||||
|
||||
check:
|
||||
forked.kind == T.toFork()
|
||||
|
||||
suite "Forked SSZ readers":
|
||||
var
|
||||
cfg = defaultRuntimeConfig
|
||||
|
||||
cfg.ALTAIR_FORK_EPOCH = Epoch(1)
|
||||
cfg.MERGE_FORK_EPOCH = Epoch(2)
|
||||
|
||||
test "load phase0 state":
|
||||
testHashedBeaconState(phase0.BeaconState, 0.Slot)
|
||||
|
||||
expect(SszError):
|
||||
testHashedBeaconState(altair.BeaconState, 0.Slot)
|
||||
expect(SszError):
|
||||
testHashedBeaconState(merge.BeaconState, 0.Slot)
|
||||
|
||||
test "load altair state":
|
||||
testHashedBeaconState(altair.BeaconState, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
expect(SszError):
|
||||
testHashedBeaconState(phase0.BeaconState, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
expect(SszError):
|
||||
testHashedBeaconState(merge.BeaconState, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
test "load merge state":
|
||||
testHashedBeaconState(merge.BeaconState, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
expect(SszError):
|
||||
testHashedBeaconState(phase0.BeaconState, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
expect(SszError):
|
||||
testHashedBeaconState(altair.BeaconState, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
test "should raise on unknown data":
|
||||
let
|
||||
bytes = SSZ.encode(AttestationData())
|
||||
expect(SszError):
|
||||
discard newClone(readSszForkedHashedBeaconState(cfg, bytes))
|
||||
|
||||
test "load phase0 block":
|
||||
testTrustedSignedBeaconBlock(phase0.TrustedSignedBeaconBlock, 0.Slot)
|
||||
expect(SszError):
|
||||
testTrustedSignedBeaconBlock(altair.TrustedSignedBeaconBlock, 0.Slot)
|
||||
expect(SszError):
|
||||
testTrustedSignedBeaconBlock(merge.TrustedSignedBeaconBlock, 0.Slot)
|
||||
|
||||
test "load altair block":
|
||||
testTrustedSignedBeaconBlock(altair.TrustedSignedBeaconBlock, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
expect(SszError):
|
||||
testTrustedSignedBeaconBlock(phase0.TrustedSignedBeaconBlock, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
expect(SszError):
|
||||
testTrustedSignedBeaconBlock(merge.TrustedSignedBeaconBlock, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
test "load merge block":
|
||||
testTrustedSignedBeaconBlock(merge.TrustedSignedBeaconBlock, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
expect(SszError):
|
||||
testTrustedSignedBeaconBlock(phase0.TrustedSignedBeaconBlock, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
expect(SszError):
|
||||
testTrustedSignedBeaconBlock(altair.TrustedSignedBeaconBlock, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||
|
||||
test "should raise on unknown data":
|
||||
let
|
||||
bytes = SSZ.encode(AttestationData())
|
||||
expect(SszError):
|
||||
discard newClone(readSszForkedTrustedSignedBeaconBlock(cfg, bytes))
|
|
@ -10,23 +10,27 @@ import
|
|||
../beacon_chain/[beacon_chain_db],
|
||||
../beacon_chain/consensus_object_pools/blockchain_dag,
|
||||
../beacon_chain/spec/datatypes/phase0,
|
||||
../beacon_chain/spec/[beaconstate],
|
||||
../beacon_chain/spec/[beaconstate, forks],
|
||||
eth/db/[kvstore, kvstore_sqlite3],
|
||||
./testblockutil
|
||||
|
||||
export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3
|
||||
|
||||
proc makeTestDB*(tailState: var phase0.BeaconState, tailBlock: phase0.TrustedSignedBeaconBlock): BeaconChainDB =
|
||||
proc makeTestDB*(
|
||||
tailState: ForkedHashedBeaconState,
|
||||
tailBlock: ForkedTrustedSignedBeaconBlock): BeaconChainDB =
|
||||
result = BeaconChainDB.new("", inMemory = true)
|
||||
ChainDAGRef.preInit(result, tailState, tailState, tailBlock)
|
||||
|
||||
proc makeTestDB*(validators: Natural): BeaconChainDB =
|
||||
let
|
||||
genState = initialize_beacon_state_from_eth1(
|
||||
genState = (ref ForkedHashedBeaconState)(
|
||||
kind: BeaconStateFork.Phase0,
|
||||
phase0Data: initialize_hashed_beacon_state_from_eth1(
|
||||
defaultRuntimeConfig,
|
||||
Eth2Digest(),
|
||||
0,
|
||||
makeInitialDeposits(validators.uint64, flags = {skipBlsValidation}),
|
||||
{skipBlsValidation})
|
||||
{skipBlsValidation}))
|
||||
genBlock = get_initial_beacon_block(genState[])
|
||||
makeTestDB(genState[], genBlock)
|
||||
|
|
Loading…
Reference in New Issue