Support starting from altair (#3054)
* Support starting from altair * hide `finalized-checkpoint-` - they are incomplete and usage may cause crashes * remove genesis detection code (broken, obsolete) * enable starting ChainDAG from altair checkpoints - this is a prerequisite for checkpoint sync (TODO: backfill) * tighten checkpoint state conditions * show error when starting from checkpoint with existing database (not supported) * print rest-compatible JSON in ncli/state_sim * altair/merge support in ncli * more altair/merge support in ncli_db * pre-load header to speed up loading * fix forked block decoding
This commit is contained in:
parent
03a70fbf36
commit
ec650c7fd7
|
@ -158,6 +158,17 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
+ Next fork epoch check OK
|
+ Next fork epoch check OK
|
||||||
```
|
```
|
||||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
|
## Forked SSZ readers
|
||||||
|
```diff
|
||||||
|
+ load altair block OK
|
||||||
|
+ load altair state OK
|
||||||
|
+ load merge block OK
|
||||||
|
+ load merge state OK
|
||||||
|
+ load phase0 block OK
|
||||||
|
+ load phase0 state OK
|
||||||
|
+ should raise on unknown data OK
|
||||||
|
```
|
||||||
|
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||||
## Gossip validation [Preset: mainnet]
|
## Gossip validation [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ Any committee index is valid OK
|
+ Any committee index is valid OK
|
||||||
|
@ -452,4 +463,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
OK: 42/56 Fail: 0/56 Skip: 14/56
|
OK: 42/56 Fail: 0/56 Skip: 14/56
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 258/274 Fail: 0/274 Skip: 16/274
|
OK: 265/281 Fail: 0/281 Skip: 16/281
|
||||||
|
|
|
@ -129,6 +129,17 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
+ Next fork epoch check OK
|
+ Next fork epoch check OK
|
||||||
```
|
```
|
||||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
|
## Forked SSZ readers
|
||||||
|
```diff
|
||||||
|
+ load altair block OK
|
||||||
|
+ load altair state OK
|
||||||
|
+ load merge block OK
|
||||||
|
+ load merge state OK
|
||||||
|
+ load phase0 block OK
|
||||||
|
+ load phase0 state OK
|
||||||
|
+ should raise on unknown data OK
|
||||||
|
```
|
||||||
|
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||||
## Gossip validation [Preset: mainnet]
|
## Gossip validation [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ Any committee index is valid OK
|
+ Any committee index is valid OK
|
||||||
|
@ -369,4 +380,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
OK: 42/56 Fail: 0/56 Skip: 14/56
|
OK: 42/56 Fail: 0/56 Skip: 14/56
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 203/219 Fail: 0/219 Skip: 16/219
|
OK: 210/226 Fail: 0/226 Skip: 16/226
|
||||||
|
|
|
@ -221,10 +221,12 @@ type
|
||||||
name: "weak-subjectivity-checkpoint" }: Option[Checkpoint]
|
name: "weak-subjectivity-checkpoint" }: Option[Checkpoint]
|
||||||
|
|
||||||
finalizedCheckpointState* {.
|
finalizedCheckpointState* {.
|
||||||
|
hidden # TODO unhide when backfilling is done
|
||||||
desc: "SSZ file specifying a recent finalized state"
|
desc: "SSZ file specifying a recent finalized state"
|
||||||
name: "finalized-checkpoint-state" }: Option[InputFile]
|
name: "finalized-checkpoint-state" }: Option[InputFile]
|
||||||
|
|
||||||
finalizedCheckpointBlock* {.
|
finalizedCheckpointBlock* {.
|
||||||
|
hidden # TODO unhide when backfilling is done
|
||||||
desc: "SSZ file specifying a recent finalized block"
|
desc: "SSZ file specifying a recent finalized block"
|
||||||
name: "finalized-checkpoint-block" }: Option[InputFile]
|
name: "finalized-checkpoint-block" }: Option[InputFile]
|
||||||
|
|
||||||
|
|
|
@ -11,17 +11,15 @@ import
|
||||||
std/[options, sequtils, tables, sets],
|
std/[options, sequtils, tables, sets],
|
||||||
stew/[assign2, byteutils, results],
|
stew/[assign2, byteutils, results],
|
||||||
metrics, snappy, chronicles,
|
metrics, snappy, chronicles,
|
||||||
../spec/[
|
../spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers,
|
||||||
beaconstate, eth2_merkleization, eth2_ssz_serialization, forks, helpers,
|
|
||||||
state_transition, validator],
|
state_transition, validator],
|
||||||
../spec/datatypes/[phase0, altair, merge],
|
../spec/datatypes/[phase0, altair, merge],
|
||||||
".."/beacon_chain_db,
|
".."/beacon_chain_db,
|
||||||
"."/[block_pools_types, block_quarantine, forkedbeaconstate_dbhelpers]
|
"."/[block_pools_types, block_quarantine]
|
||||||
|
|
||||||
export
|
export
|
||||||
forks, block_pools_types, results, forkedbeaconstate_dbhelpers,
|
eth2_merkleization, eth2_ssz_serialization,
|
||||||
beacon_chain_db,
|
block_pools_types, results, beacon_chain_db
|
||||||
eth2_merkleization, eth2_ssz_serialization
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
||||||
declareGauge beacon_head_root, "Root of the head block of the beacon chain"
|
declareGauge beacon_head_root, "Root of the head block of the beacon chain"
|
||||||
|
@ -364,6 +362,19 @@ proc getStateData(
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
|
proc getForkedBlock(db: BeaconChainDB, root: Eth2Digest):
|
||||||
|
Opt[ForkedTrustedSignedBeaconBlock] =
|
||||||
|
# When we only have a digest, we don't know which fork it's from so we try
|
||||||
|
# them one by one - this should be used sparingly
|
||||||
|
if (let blck = db.getMergeBlock(root); blck.isSome()):
|
||||||
|
ok(ForkedTrustedSignedBeaconBlock.init(blck.get()))
|
||||||
|
elif (let blck = db.getAltairBlock(root); blck.isSome()):
|
||||||
|
ok(ForkedTrustedSignedBeaconBlock.init(blck.get()))
|
||||||
|
elif (let blck = db.getPhase0Block(root); blck.isSome()):
|
||||||
|
ok(ForkedTrustedSignedBeaconBlock.init(blck.get()))
|
||||||
|
else:
|
||||||
|
err()
|
||||||
|
|
||||||
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
updateFlags: UpdateFlags, onBlockCb: OnBlockCallback = nil,
|
updateFlags: UpdateFlags, onBlockCb: OnBlockCallback = nil,
|
||||||
onHeadCb: OnHeadCallback = nil, onReorgCb: OnReorgCallback = nil,
|
onHeadCb: OnHeadCallback = nil, onReorgCb: OnReorgCallback = nil,
|
||||||
|
@ -380,19 +391,19 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
|
|
||||||
let
|
let
|
||||||
tailRoot = tailBlockRoot.get()
|
tailRoot = tailBlockRoot.get()
|
||||||
tailBlock = db.getPhase0Block(tailRoot).get()
|
tailBlock = db.getForkedBlock(tailRoot).get()
|
||||||
tailRef = BlockRef.init(tailRoot, tailBlock.message)
|
tailRef = withBlck(tailBlock): BlockRef.init(tailRoot, blck.message)
|
||||||
headRoot = headBlockRoot.get()
|
headRoot = headBlockRoot.get()
|
||||||
|
|
||||||
let genesisRef = if tailBlock.message.slot == GENESIS_SLOT:
|
let genesisRef = if tailBlock.slot == GENESIS_SLOT:
|
||||||
tailRef
|
tailRef
|
||||||
else:
|
else:
|
||||||
let
|
let
|
||||||
genesisBlockRoot = db.getGenesisBlock().expect(
|
genesisBlockRoot = db.getGenesisBlock().expect(
|
||||||
"preInit should have initialized the database with a genesis block root")
|
"preInit should have initialized the database with a genesis block root")
|
||||||
genesisBlock = db.getPhase0Block(genesisBlockRoot).expect(
|
genesisBlock = db.getForkedBlock(genesisBlockRoot).expect(
|
||||||
"preInit should have initialized the database with a genesis block")
|
"preInit should have initialized the database with a genesis block")
|
||||||
BlockRef.init(genesisBlockRoot, genesisBlock.message)
|
withBlck(genesisBlock): BlockRef.init(genesisBlockRoot, blck.message)
|
||||||
|
|
||||||
var
|
var
|
||||||
blocks: HashSet[KeyedBlockRef]
|
blocks: HashSet[KeyedBlockRef]
|
||||||
|
@ -644,7 +655,7 @@ proc putState(dag: ChainDAGRef, state: StateData) =
|
||||||
# Ideally we would save the state and the root lookup cache in a single
|
# Ideally we would save the state and the root lookup cache in a single
|
||||||
# transaction to prevent database inconsistencies, but the state loading code
|
# transaction to prevent database inconsistencies, but the state loading code
|
||||||
# is resilient against one or the other going missing
|
# is resilient against one or the other going missing
|
||||||
dag.db.putState(state.data)
|
withState(state.data): dag.db.putState(state.root, state.data)
|
||||||
dag.db.putStateRoot(
|
dag.db.putStateRoot(
|
||||||
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
||||||
|
|
||||||
|
@ -1313,65 +1324,102 @@ proc updateHead*(
|
||||||
dag.onFinHappened(data)
|
dag.onFinHappened(data)
|
||||||
|
|
||||||
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
|
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
|
||||||
|
# Lightweight check to see if we have the minimal information needed to
|
||||||
|
# load up a database - we don't check head here - if something is wrong with
|
||||||
|
# head, it's likely an initialized, but corrupt database - init will detect
|
||||||
|
# that
|
||||||
let
|
let
|
||||||
headBlockRoot = db.getHeadBlock()
|
genesisBlockRoot = db.getGenesisBlock()
|
||||||
tailBlockRoot = db.getTailBlock()
|
tailBlockRoot = db.getTailBlock()
|
||||||
|
|
||||||
if not (headBlockRoot.isSome() and tailBlockRoot.isSome()):
|
if not (genesisBlockRoot.isSome() and tailBlockRoot.isSome()):
|
||||||
return false
|
return false
|
||||||
|
|
||||||
let
|
let
|
||||||
headBlockPhase0 = db.getPhase0Block(headBlockRoot.get())
|
genesisBlock = db.getForkedBlock(genesisBlockRoot.get())
|
||||||
headBlockAltair = db.getAltairBlock(headBlockRoot.get())
|
tailBlock = db.getForkedBlock(tailBlockRoot.get())
|
||||||
tailBlock = db.getPhase0Block(tailBlockRoot.get())
|
|
||||||
|
|
||||||
if not ((headBlockPhase0.isSome() or headBlockAltair.isSome()) and
|
if not (genesisBlock.isSome() and tailBlock.isSome()):
|
||||||
tailBlock.isSome()):
|
|
||||||
return false
|
return false
|
||||||
|
let
|
||||||
|
genesisStateRoot = withBlck(genesisBlock.get()): blck.message.state_root
|
||||||
|
tailStateRoot = withBlck(tailBlock.get()): blck.message.state_root
|
||||||
|
|
||||||
if not db.containsState(tailBlock.get().message.state_root):
|
if not (
|
||||||
|
db.containsState(genesisStateRoot) and db.containsState(tailStateRoot)):
|
||||||
return false
|
return false
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
proc preInit*(
|
proc preInit*(
|
||||||
T: type ChainDAGRef, db: BeaconChainDB,
|
T: type ChainDAGRef, db: BeaconChainDB,
|
||||||
genesisState, tailState: var phase0.BeaconState, tailBlock: phase0.TrustedSignedBeaconBlock) =
|
genesisState, tailState: ForkedHashedBeaconState,
|
||||||
|
tailBlock: ForkedTrustedSignedBeaconBlock) =
|
||||||
# write a genesis state, the way the ChainDAGRef expects it to be stored in
|
# write a genesis state, the way the ChainDAGRef expects it to be stored in
|
||||||
# database
|
# database
|
||||||
# TODO probably should just init a block pool with the freshly written
|
# TODO probably should just init a block pool with the freshly written
|
||||||
# state - but there's more refactoring needed to make it nice - doing
|
# state - but there's more refactoring needed to make it nice - doing
|
||||||
# a minimal patch for now..
|
# a minimal patch for now..
|
||||||
doAssert tailBlock.message.state_root == hash_tree_root(tailState)
|
|
||||||
notice "New database from snapshot",
|
|
||||||
blockRoot = shortLog(tailBlock.root),
|
|
||||||
stateRoot = shortLog(tailBlock.message.state_root),
|
|
||||||
fork = tailState.fork,
|
|
||||||
validators = tailState.validators.len()
|
|
||||||
|
|
||||||
db.putState(tailState)
|
logScope:
|
||||||
db.putBlock(tailBlock)
|
genesisStateRoot = getStateRoot(genesisState)
|
||||||
db.putTailBlock(tailBlock.root)
|
genesisStateSlot = getStateField(genesisState, slot)
|
||||||
db.putHeadBlock(tailBlock.root)
|
tailStateRoot = getStateRoot(tailState)
|
||||||
db.putStateRoot(tailBlock.root, tailState.slot, tailBlock.message.state_root)
|
tailStateSlot = getStateField(tailState, slot)
|
||||||
|
|
||||||
if tailState.slot == GENESIS_SLOT:
|
let genesisBlockRoot = withState(genesisState):
|
||||||
db.putGenesisBlock(tailBlock.root)
|
if state.root != getStateRoot(tailState):
|
||||||
else:
|
# Different tail and genesis
|
||||||
doAssert genesisState.slot == GENESIS_SLOT
|
if state.data.slot >= getStateField(tailState, slot):
|
||||||
db.putState(genesisState)
|
fatal "Tail state must be newer or the same as genesis state"
|
||||||
let genesisBlock = get_initial_beacon_block(genesisState)
|
quit 1
|
||||||
db.putBlock(genesisBlock)
|
|
||||||
db.putStateRoot(genesisBlock.root, GENESIS_SLOT, genesisBlock.message.state_root)
|
|
||||||
db.putGenesisBlock(genesisBlock.root)
|
|
||||||
|
|
||||||
func setTailState*(dag: ChainDAGRef,
|
let tail_genesis_validators_root =
|
||||||
checkpointState: phase0.BeaconState,
|
getStateField(tailState, genesis_validators_root)
|
||||||
checkpointBlock: phase0.TrustedSignedBeaconBlock) =
|
if state.data.genesis_validators_root != tail_genesis_validators_root:
|
||||||
# TODO(zah)
|
fatal "Tail state doesn't match genesis validators root, it is likely from a different network!",
|
||||||
# Delete all records up to the tail node. If the tail node is not
|
genesis_validators_root = shortLog(state.data.genesis_validators_root),
|
||||||
# in the database, init the dabase in a way similar to `preInit`.
|
tail_genesis_validators_root = shortLog(tail_genesis_validators_root)
|
||||||
discard
|
quit 1
|
||||||
|
|
||||||
|
let blck = get_initial_beacon_block(state.data)
|
||||||
|
db.putGenesisBlock(blck.root)
|
||||||
|
db.putBlock(blck)
|
||||||
|
|
||||||
|
db.putState(state.root, state.data)
|
||||||
|
db.putStateRoot(blck.root, state.data.slot, state.root)
|
||||||
|
blck.root
|
||||||
|
else: # tail and genesis are the same
|
||||||
|
withBlck(tailBlock):
|
||||||
|
db.putGenesisBlock(blck.root)
|
||||||
|
blck.root
|
||||||
|
|
||||||
|
withState(tailState):
|
||||||
|
withBlck(tailBlock):
|
||||||
|
# When looking up the state root of the tail block, we don't use the
|
||||||
|
# BlockSlot->state_root map, so the only way the init code can find the
|
||||||
|
# state is through the state root in the block - this could be relaxed
|
||||||
|
# down the line
|
||||||
|
if blck.message.state_root != state.root:
|
||||||
|
fatal "State must match the given block",
|
||||||
|
tailBlck = shortLog(blck)
|
||||||
|
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
db.putBlock(blck)
|
||||||
|
db.putTailBlock(blck.root)
|
||||||
|
db.putHeadBlock(blck.root)
|
||||||
|
|
||||||
|
db.putState(state.root, state.data)
|
||||||
|
db.putStateRoot(blck.root, state.data.slot, state.root)
|
||||||
|
|
||||||
|
notice "New database from snapshot",
|
||||||
|
genesisBlockRoot = shortLog(genesisBlockRoot),
|
||||||
|
genesisStateRoot = shortLog(getStateRoot(genesisState)),
|
||||||
|
tailBlockRoot = shortLog(blck.root),
|
||||||
|
tailStateRoot = shortLog(state.root),
|
||||||
|
fork = state.data.fork,
|
||||||
|
validators = state.data.validators.len()
|
||||||
|
|
||||||
proc getGenesisBlockData*(dag: ChainDAGRef): BlockData =
|
proc getGenesisBlockData*(dag: ChainDAGRef): BlockData =
|
||||||
dag.get(dag.genesis)
|
dag.get(dag.genesis)
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2021 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
import
|
|
||||||
../spec/forks,
|
|
||||||
../beacon_chain_db
|
|
||||||
|
|
||||||
proc putState*(db: BeaconChainDB, state: ForkedHashedBeaconState) =
|
|
||||||
case state.kind:
|
|
||||||
of BeaconStateFork.Phase0: db.putState(getStateRoot(state), state.phase0Data.data)
|
|
||||||
of BeaconStateFork.Altair: db.putState(getStateRoot(state), state.altairData.data)
|
|
||||||
of BeaconStateFork.Merge: db.putState(getStateRoot(state), state.mergeData.data)
|
|
|
@ -12,6 +12,7 @@ import
|
||||||
tables, times, terminal],
|
tables, times, terminal],
|
||||||
|
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
|
stew/io2,
|
||||||
spec/eth2_apis/eth2_rest_serialization,
|
spec/eth2_apis/eth2_rest_serialization,
|
||||||
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
|
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
|
||||||
chronos, confutils, metrics, metrics/chronos_httpserver,
|
chronos, confutils, metrics, metrics/chronos_httpserver,
|
||||||
|
@ -141,8 +142,8 @@ proc init*(T: type BeaconNode,
|
||||||
db = BeaconChainDB.new(config.databaseDir, inMemory = false)
|
db = BeaconChainDB.new(config.databaseDir, inMemory = false)
|
||||||
|
|
||||||
var
|
var
|
||||||
genesisState, checkpointState: ref phase0.BeaconState
|
genesisState, checkpointState: ref ForkedHashedBeaconState
|
||||||
checkpointBlock: phase0.TrustedSignedBeaconBlock
|
checkpointBlock: ForkedTrustedSignedBeaconBlock
|
||||||
|
|
||||||
proc onAttestationReceived(data: Attestation) =
|
proc onAttestationReceived(data: Attestation) =
|
||||||
eventBus.emit("attestation-received", data)
|
eventBus.emit("attestation-received", data)
|
||||||
|
@ -164,8 +165,9 @@ proc init*(T: type BeaconNode,
|
||||||
if config.finalizedCheckpointState.isSome:
|
if config.finalizedCheckpointState.isSome:
|
||||||
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
||||||
checkpointState = try:
|
checkpointState = try:
|
||||||
newClone(SSZ.loadFile(checkpointStatePath, phase0.BeaconState))
|
newClone(readSszForkedHashedBeaconState(
|
||||||
except SerializationError as err:
|
cfg, readAllBytes(checkpointStatePath).tryGet()))
|
||||||
|
except SszError as err:
|
||||||
fatal "Checkpoint state deserialization failed",
|
fatal "Checkpoint state deserialization failed",
|
||||||
err = formatMsg(err, checkpointStatePath)
|
err = formatMsg(err, checkpointStatePath)
|
||||||
quit 1
|
quit 1
|
||||||
|
@ -174,15 +176,18 @@ proc init*(T: type BeaconNode,
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
if config.finalizedCheckpointBlock.isNone:
|
if config.finalizedCheckpointBlock.isNone:
|
||||||
if checkpointState.slot > 0:
|
withState(checkpointState[]):
|
||||||
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
|
if getStateField(checkpointState[], slot) > 0:
|
||||||
quit 1
|
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
|
||||||
|
quit 1
|
||||||
else:
|
else:
|
||||||
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
|
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
|
||||||
try:
|
try:
|
||||||
# TODO Perform sanity checks like signature and slot verification at least
|
# Checkpoint block might come from an earlier fork than the state with
|
||||||
checkpointBlock = SSZ.loadFile(checkpointBlockPath, phase0.TrustedSignedBeaconBlock)
|
# the state having empty slots processed past the fork epoch.
|
||||||
except SerializationError as err:
|
checkpointBlock = readSszForkedTrustedSignedBeaconBlock(
|
||||||
|
cfg, readAllBytes(checkpointBlockPath).tryGet())
|
||||||
|
except SszError as err:
|
||||||
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
|
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
|
||||||
quit 1
|
quit 1
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
|
@ -197,8 +202,8 @@ proc init*(T: type BeaconNode,
|
||||||
var eth1Monitor: Eth1Monitor
|
var eth1Monitor: Eth1Monitor
|
||||||
if not ChainDAGRef.isInitialized(db):
|
if not ChainDAGRef.isInitialized(db):
|
||||||
var
|
var
|
||||||
tailState: ref phase0.BeaconState
|
tailState: ref ForkedHashedBeaconState
|
||||||
tailBlock: phase0.TrustedSignedBeaconBlock
|
tailBlock: ForkedTrustedSignedBeaconBlock
|
||||||
|
|
||||||
if genesisStateContents.len == 0 and checkpointState == nil:
|
if genesisStateContents.len == 0 and checkpointState == nil:
|
||||||
when hasGenesisDetection:
|
when hasGenesisDetection:
|
||||||
|
@ -243,12 +248,14 @@ proc init*(T: type BeaconNode,
|
||||||
eth1Block = genesisState.eth1_data.block_hash,
|
eth1Block = genesisState.eth1_data.block_hash,
|
||||||
totalDeposits = genesisState.eth1_data.deposit_count
|
totalDeposits = genesisState.eth1_data.deposit_count
|
||||||
else:
|
else:
|
||||||
fatal "The beacon node must be compiled with -d:has_genesis_detection " &
|
fatal "No database and no genesis snapshot found: supply a genesis.ssz " &
|
||||||
|
"with the network configuration, or compile the beacon node with " &
|
||||||
|
"the -d:has_genesis_detection option " &
|
||||||
"in order to support monitoring for genesis events"
|
"in order to support monitoring for genesis events"
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
elif genesisStateContents.len == 0:
|
elif genesisStateContents.len == 0:
|
||||||
if checkpointState.slot == GENESIS_SLOT:
|
if getStateField(checkpointState[], slot) == GENESIS_SLOT:
|
||||||
genesisState = checkpointState
|
genesisState = checkpointState
|
||||||
tailState = checkpointState
|
tailState = checkpointState
|
||||||
tailBlock = get_initial_beacon_block(genesisState[])
|
tailBlock = get_initial_beacon_block(genesisState[])
|
||||||
|
@ -257,11 +264,13 @@ proc init*(T: type BeaconNode,
|
||||||
quit 1
|
quit 1
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
genesisState = newClone(SSZ.decode(genesisStateContents, phase0.BeaconState))
|
genesisState = newClone(readSszForkedHashedBeaconState(
|
||||||
|
cfg,
|
||||||
|
genesisStateContents.toOpenArrayByte(0, genesisStateContents.high())))
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
raiseAssert "Invalid baked-in state: " & err.msg
|
raiseAssert "Invalid baked-in state: " & err.msg
|
||||||
|
|
||||||
if checkpointState != nil:
|
if not checkpointState.isNil:
|
||||||
tailState = checkpointState
|
tailState = checkpointState
|
||||||
tailBlock = checkpointBlock
|
tailBlock = checkpointBlock
|
||||||
else:
|
else:
|
||||||
|
@ -274,6 +283,11 @@ proc init*(T: type BeaconNode,
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
error "Failed to initialize database", err = exc.msg
|
error "Failed to initialize database", err = exc.msg
|
||||||
quit 1
|
quit 1
|
||||||
|
else:
|
||||||
|
if not checkpointState.isNil:
|
||||||
|
fatal "A database already exists, cannot start from given checkpoint",
|
||||||
|
dataDir = config.dataDir
|
||||||
|
quit 1
|
||||||
|
|
||||||
# Doesn't use std/random directly, but dependencies might
|
# Doesn't use std/random directly, but dependencies might
|
||||||
randomize(rng[].rand(high(int)))
|
randomize(rng[].rand(high(int)))
|
||||||
|
@ -319,16 +333,6 @@ proc init*(T: type BeaconNode,
|
||||||
headStateSlot = getStateField(dag.headState.data, slot)
|
headStateSlot = getStateField(dag.headState.data, slot)
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
if checkpointState != nil:
|
|
||||||
let checkpointGenesisValidatorsRoot = checkpointState[].genesis_validators_root
|
|
||||||
if checkpointGenesisValidatorsRoot != databaseGenesisValidatorsRoot:
|
|
||||||
fatal "The specified checkpoint state is intended for a different network",
|
|
||||||
checkpointGenesisValidatorsRoot, databaseGenesisValidatorsRoot,
|
|
||||||
dataDir = config.dataDir
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
dag.setTailState(checkpointState[], checkpointBlock)
|
|
||||||
|
|
||||||
if eth1Monitor.isNil and
|
if eth1Monitor.isNil and
|
||||||
config.web3Urls.len > 0 and
|
config.web3Urls.len > 0 and
|
||||||
genesisDepositsSnapshotContents.len > 0:
|
genesisDepositsSnapshotContents.len > 0:
|
||||||
|
|
|
@ -6,14 +6,14 @@
|
||||||
import std/[typetraits, strutils, sets]
|
import std/[typetraits, strutils, sets]
|
||||||
import stew/[results, base10], chronicles,
|
import stew/[results, base10], chronicles,
|
||||||
nimcrypto/utils as ncrutils
|
nimcrypto/utils as ncrutils
|
||||||
import ".."/[beacon_chain_db, beacon_node],
|
import "."/rest_utils,
|
||||||
|
".."/[beacon_chain_db, beacon_node],
|
||||||
".."/networking/eth2_network,
|
".."/networking/eth2_network,
|
||||||
".."/consensus_object_pools/[blockchain_dag, spec_cache,
|
".."/consensus_object_pools/[blockchain_dag, spec_cache,
|
||||||
attestation_pool, sync_committee_msg_pool],
|
attestation_pool, sync_committee_msg_pool],
|
||||||
".."/validators/validator_duties,
|
".."/validators/validator_duties,
|
||||||
".."/spec/[beaconstate, forks, network],
|
".."/spec/[beaconstate, forks, network],
|
||||||
".."/spec/datatypes/[phase0, altair],
|
".."/spec/datatypes/[phase0, altair]
|
||||||
"."/rest_utils
|
|
||||||
|
|
||||||
export rest_utils
|
export rest_utils
|
||||||
|
|
||||||
|
|
|
@ -298,6 +298,35 @@ func get_initial_beacon_block*(state: phase0.BeaconState):
|
||||||
phase0.TrustedSignedBeaconBlock(
|
phase0.TrustedSignedBeaconBlock(
|
||||||
message: message, root: hash_tree_root(message))
|
message: message, root: hash_tree_root(message))
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors
|
||||||
|
func get_initial_beacon_block*(state: altair.BeaconState):
|
||||||
|
altair.TrustedSignedBeaconBlock =
|
||||||
|
# The genesis block is implicitly trusted
|
||||||
|
let message = altair.TrustedBeaconBlock(
|
||||||
|
slot: state.slot,
|
||||||
|
state_root: hash_tree_root(state),)
|
||||||
|
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||||
|
# initialized to default values.
|
||||||
|
altair.TrustedSignedBeaconBlock(
|
||||||
|
message: message, root: hash_tree_root(message))
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#testing
|
||||||
|
func get_initial_beacon_block*(state: merge.BeaconState):
|
||||||
|
merge.TrustedSignedBeaconBlock =
|
||||||
|
# The genesis block is implicitly trusted
|
||||||
|
let message = merge.TrustedBeaconBlock(
|
||||||
|
slot: state.slot,
|
||||||
|
state_root: hash_tree_root(state),)
|
||||||
|
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||||
|
# initialized to default values.
|
||||||
|
merge.TrustedSignedBeaconBlock(
|
||||||
|
message: message, root: hash_tree_root(message))
|
||||||
|
|
||||||
|
func get_initial_beacon_block*(state: ForkedHashedBeaconState):
|
||||||
|
ForkedTrustedSignedBeaconBlock =
|
||||||
|
withState(state):
|
||||||
|
ForkedTrustedSignedBeaconBlock.init(get_initial_beacon_block(state.data))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#get_block_root_at_slot
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#get_block_root_at_slot
|
||||||
func get_block_root_at_slot*(state: ForkyBeaconState, slot: Slot): Eth2Digest =
|
func get_block_root_at_slot*(state: ForkyBeaconState, slot: Slot): Eth2Digest =
|
||||||
## Return the block root at a recent ``slot``.
|
## Return the block root at a recent ``slot``.
|
||||||
|
|
|
@ -403,6 +403,8 @@ type
|
||||||
data*: T
|
data*: T
|
||||||
|
|
||||||
ForkedSignedBlockHeader* = object
|
ForkedSignedBlockHeader* = object
|
||||||
|
message*: uint32 # message offset
|
||||||
|
signature*: ValidatorSig
|
||||||
slot*: Slot
|
slot*: Slot
|
||||||
|
|
||||||
ForkedBeaconStateHeader* = object
|
ForkedBeaconStateHeader* = object
|
||||||
|
|
|
@ -412,60 +412,58 @@ func getForkSchedule*(cfg: RuntimeConfig): array[2, Fork] =
|
||||||
## NOTE: Update this procedure when new fork will be scheduled.
|
## NOTE: Update this procedure when new fork will be scheduled.
|
||||||
[cfg.genesisFork(), cfg.altairFork()]
|
[cfg.genesisFork(), cfg.altairFork()]
|
||||||
|
|
||||||
func readSszForkedHashedBeaconState*(
|
type
|
||||||
data: openArray[byte], likelyFork: BeaconStateFork):
|
# The first few fields of a state, shared across all forks
|
||||||
|
BeaconStateHeader = object
|
||||||
|
genesis_time: uint64
|
||||||
|
genesis_validators_root: Eth2Digest
|
||||||
|
slot: uint64
|
||||||
|
|
||||||
|
func readSszForkedHashedBeaconState*(cfg: RuntimeConfig, data: openArray[byte]):
|
||||||
ForkedHashedBeaconState {.raises: [Defect, SszError].} =
|
ForkedHashedBeaconState {.raises: [Defect, SszError].} =
|
||||||
## Helper to read a state from bytes when it's not certain what kind of state
|
## Helper to read a header from bytes when it's not certain what kind of state
|
||||||
## it is - this happens for example when loading an SSZ state from command
|
## it is - this happens for example when loading an SSZ state from command
|
||||||
## line - we'll use wall time to "guess" which state to start with
|
## line
|
||||||
|
if data.len() < sizeof(BeaconStateHeader):
|
||||||
|
raise (ref MalformedSszError)(msg: "Not enough data for BeaconState header")
|
||||||
|
let header = SSZ.decode(
|
||||||
|
data.toOpenArray(0, sizeof(BeaconStateHeader) - 1),
|
||||||
|
BeaconStateHeader)
|
||||||
|
|
||||||
# careful - `result` is used, RVO didn't seem to work without
|
# careful - `result` is used, RVO didn't seem to work without
|
||||||
result = ForkedHashedBeaconState(kind: likelyFork)
|
# TODO move time helpers somewhere to avoid circular imports
|
||||||
var tried: set[BeaconStateFork]
|
result = ForkedHashedBeaconState(
|
||||||
|
kind: cfg.stateForkAtEpoch(Epoch(header.slot div SLOTS_PER_EPOCH)))
|
||||||
|
|
||||||
template readFork() =
|
withState(result):
|
||||||
withState(result):
|
readSszBytes(data, state.data)
|
||||||
try:
|
state.root = hash_tree_root(state.data)
|
||||||
readSszBytes(data, state.data)
|
|
||||||
state.root = hash_tree_root(state.data)
|
|
||||||
return result
|
|
||||||
except SszError as exc:
|
|
||||||
tried.incl result.kind
|
|
||||||
|
|
||||||
readFork()
|
type
|
||||||
|
ForkedBeaconBlockHeader = object
|
||||||
for fork in BeaconStateFork:
|
message*: uint32 # message offset
|
||||||
if fork in tried: continue
|
signature*: ValidatorSig
|
||||||
result = ForkedHashedBeaconState(kind: fork)
|
slot: Slot # start of BeaconBlock
|
||||||
readFork()
|
|
||||||
|
|
||||||
raise (ref SszError)(msg: "Unable to match data to any known fork")
|
|
||||||
|
|
||||||
func readSszForkedTrustedSignedBeaconBlock*(
|
func readSszForkedTrustedSignedBeaconBlock*(
|
||||||
data: openArray[byte], likelyFork: BeaconBlockFork):
|
cfg: RuntimeConfig, data: openArray[byte]):
|
||||||
ForkedTrustedSignedBeaconBlock {.raises: [Defect, SszError].} =
|
ForkedTrustedSignedBeaconBlock {.raises: [Defect, SszError].} =
|
||||||
## Helper to read a state from bytes when it's not certain what kind of state
|
## Helper to read a header from bytes when it's not certain what kind of block
|
||||||
## it is - this happens for example when loading an SSZ state from command
|
## it is
|
||||||
## line - we'll use wall time to "guess" which state to start with
|
|
||||||
|
|
||||||
var
|
if data.len() < sizeof(BeaconBlockHeader):
|
||||||
res = ForkedTrustedSignedBeaconBlock(kind: likelyFork)
|
raise (ref MalformedSszError)(msg: "Not enough data for SignedBeaconBlock header")
|
||||||
tried: set[BeaconBlockFork]
|
|
||||||
|
|
||||||
template readFork() =
|
let header = SSZ.decode(
|
||||||
withBlck(res):
|
data.toOpenArray(0, sizeof(ForkedBeaconBlockHeader) - 1),
|
||||||
try:
|
ForkedBeaconBlockHeader)
|
||||||
readSszBytes(data, blck)
|
# careful - `result` is used, RVO didn't seem to work without
|
||||||
return res
|
# TODO move time helpers somewhere to avoid circular imports
|
||||||
except SszError as exc:
|
result = ForkedTrustedSignedBeaconBlock(
|
||||||
tried.incl res.kind
|
kind: cfg.blockForkAtEpoch(Epoch(header.slot div SLOTS_PER_EPOCH)))
|
||||||
|
|
||||||
readFork()
|
withBlck(result):
|
||||||
|
readSszBytes(data, blck)
|
||||||
for fork in BeaconBlockFork:
|
|
||||||
if fork in tried: continue
|
|
||||||
res = ForkedTrustedSignedBeaconBlock(kind: fork)
|
|
||||||
readFork()
|
|
||||||
raise (ref SszError)(msg: "Unable to match data to any known fork")
|
|
||||||
|
|
||||||
func toBeaconBlockFork*(fork: BeaconStateFork): BeaconBlockFork =
|
func toBeaconBlockFork*(fork: BeaconStateFork): BeaconBlockFork =
|
||||||
case fork
|
case fork
|
||||||
|
|
|
@ -29,23 +29,11 @@ proc dump*(dir: string, v: AttestationData, validator: ValidatorPubKey) =
|
||||||
logErrors:
|
logErrors:
|
||||||
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
|
SSZ.saveFile(dir / &"att-{v.slot}-{v.index}-{shortLog(validator)}.ssz", v)
|
||||||
|
|
||||||
proc dump*(dir: string, v: phase0.TrustedSignedBeaconBlock) =
|
proc dump*(dir: string, v: ForkyTrustedSignedBeaconBlock) =
|
||||||
logErrors:
|
logErrors:
|
||||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||||
|
|
||||||
proc dump*(dir: string, v: altair.TrustedSignedBeaconBlock) =
|
proc dump*(dir: string, v: ForkySignedBeaconBlock) =
|
||||||
logErrors:
|
|
||||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
|
||||||
|
|
||||||
proc dump*(dir: string, v: phase0.SignedBeaconBlock) =
|
|
||||||
logErrors:
|
|
||||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
|
||||||
|
|
||||||
proc dump*(dir: string, v: altair.SignedBeaconBlock) =
|
|
||||||
logErrors:
|
|
||||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
|
||||||
|
|
||||||
proc dump*(dir: string, v: merge.SignedBeaconBlock) =
|
|
||||||
logErrors:
|
logErrors:
|
||||||
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
|
||||||
|
|
||||||
|
|
|
@ -62,8 +62,6 @@ The following options are available:
|
||||||
[=false].
|
[=false].
|
||||||
--weak-subjectivity-checkpoint Weak subjectivity checkpoint in the format
|
--weak-subjectivity-checkpoint Weak subjectivity checkpoint in the format
|
||||||
block_root:epoch_number.
|
block_root:epoch_number.
|
||||||
--finalized-checkpoint-state SSZ file specifying a recent finalized state.
|
|
||||||
--finalized-checkpoint-block SSZ file specifying a recent finalized block.
|
|
||||||
--node-name A name for this node that will appear in the logs. If you set
|
--node-name A name for this node that will appear in the logs. If you set
|
||||||
this to 'auto', a persistent automatically generated ID will
|
this to 'auto', a persistent automatically generated ID will
|
||||||
be selected for each --data-dir folder.
|
be selected for each --data-dir folder.
|
||||||
|
|
|
@ -3,7 +3,8 @@ import
|
||||||
confutils, chronicles, json_serialization,
|
confutils, chronicles, json_serialization,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
../research/simutils,
|
../research/simutils,
|
||||||
../beacon_chain/spec/datatypes/[phase0],
|
../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
|
||||||
|
../beacon_chain/spec/datatypes/[phase0, altair, merge],
|
||||||
../beacon_chain/spec/[
|
../beacon_chain/spec/[
|
||||||
eth2_ssz_serialization, forks, helpers, state_transition],
|
eth2_ssz_serialization, forks, helpers, state_transition],
|
||||||
../beacon_chain/networking/network_metadata
|
../beacon_chain/networking/network_metadata
|
||||||
|
@ -157,12 +158,12 @@ proc doSSZ(conf: NcliConf) =
|
||||||
|
|
||||||
case conf.cmd:
|
case conf.cmd:
|
||||||
of hashTreeRoot:
|
of hashTreeRoot:
|
||||||
when t is phase0.SignedBeaconBlock:
|
when t is ForkySignedBeaconBlock:
|
||||||
echo hash_tree_root(v.message).data.toHex()
|
echo hash_tree_root(v.message).data.toHex()
|
||||||
else:
|
else:
|
||||||
echo hash_tree_root(v[]).data.toHex()
|
echo hash_tree_root(v[]).data.toHex()
|
||||||
of pretty:
|
of pretty:
|
||||||
echo JSON.encode(v[], pretty = true)
|
echo RestJson.encode(v[], pretty = true)
|
||||||
else:
|
else:
|
||||||
raiseAssert "doSSZ() only implements hashTreeRoot and pretty commands"
|
raiseAssert "doSSZ() only implements hashTreeRoot and pretty commands"
|
||||||
|
|
||||||
|
@ -171,14 +172,22 @@ proc doSSZ(conf: NcliConf) =
|
||||||
case kind
|
case kind
|
||||||
of "attester_slashing": printit(AttesterSlashing)
|
of "attester_slashing": printit(AttesterSlashing)
|
||||||
of "attestation": printit(Attestation)
|
of "attestation": printit(Attestation)
|
||||||
of "signed_block": printit(phase0.SignedBeaconBlock)
|
of "phase0_signed_block": printit(phase0.SignedBeaconBlock)
|
||||||
of "block": printit(phase0.BeaconBlock)
|
of "altair_signed_block": printit(altair.SignedBeaconBlock)
|
||||||
of "block_body": printit(phase0.BeaconBlockBody)
|
of "merge_signed_block": printit(merge.SignedBeaconBlock)
|
||||||
|
of "phase0_block": printit(phase0.BeaconBlock)
|
||||||
|
of "altair_block": printit(altair.BeaconBlock)
|
||||||
|
of "merge_block": printit(merge.BeaconBlock)
|
||||||
|
of "phase0_block_body": printit(phase0.BeaconBlockBody)
|
||||||
|
of "altair_block_body": printit(altair.BeaconBlockBody)
|
||||||
|
of "merge_block_body": printit(merge.BeaconBlockBody)
|
||||||
of "block_header": printit(BeaconBlockHeader)
|
of "block_header": printit(BeaconBlockHeader)
|
||||||
of "deposit": printit(Deposit)
|
of "deposit": printit(Deposit)
|
||||||
of "deposit_data": printit(DepositData)
|
of "deposit_data": printit(DepositData)
|
||||||
of "eth1_data": printit(Eth1Data)
|
of "eth1_data": printit(Eth1Data)
|
||||||
of "state": printit(phase0.BeaconState)
|
of "phase0_state": printit(phase0.BeaconState)
|
||||||
|
of "altiar_state": printit(altair.BeaconState)
|
||||||
|
of "merge_state": printit(merge.BeaconState)
|
||||||
of "proposer_slashing": printit(ProposerSlashing)
|
of "proposer_slashing": printit(ProposerSlashing)
|
||||||
of "voluntary_exit": printit(VoluntaryExit)
|
of "voluntary_exit": printit(VoluntaryExit)
|
||||||
|
|
||||||
|
|
155
ncli/ncli_db.nim
155
ncli/ncli_db.nim
|
@ -3,9 +3,8 @@ import
|
||||||
chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3,
|
chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3,
|
||||||
../beacon_chain/networking/network_metadata,
|
../beacon_chain/networking/network_metadata,
|
||||||
../beacon_chain/[beacon_chain_db],
|
../beacon_chain/[beacon_chain_db],
|
||||||
../beacon_chain/consensus_object_pools/[
|
../beacon_chain/consensus_object_pools/[blockchain_dag],
|
||||||
blockchain_dag, forkedbeaconstate_dbhelpers],
|
../beacon_chain/spec/datatypes/[phase0, altair, merge],
|
||||||
../beacon_chain/spec/datatypes/phase0,
|
|
||||||
../beacon_chain/spec/[
|
../beacon_chain/spec/[
|
||||||
beaconstate, helpers, state_transition, state_transition_epoch, validator],
|
beaconstate, helpers, state_transition, state_transition_epoch, validator],
|
||||||
../beacon_chain/sszdump,
|
../beacon_chain/sszdump,
|
||||||
|
@ -176,64 +175,96 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
var
|
var
|
||||||
(start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots)
|
(start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots)
|
||||||
blockRefs = dag.getBlockRange(start, ends)
|
blockRefs = dag.getBlockRange(start, ends)
|
||||||
blocks: seq[phase0.TrustedSignedBeaconBlock]
|
blocks: (
|
||||||
|
seq[phase0.TrustedSignedBeaconBlock],
|
||||||
|
seq[altair.TrustedSignedBeaconBlock],
|
||||||
|
seq[merge.TrustedSignedBeaconBlock])
|
||||||
|
|
||||||
echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks"
|
echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks"
|
||||||
doAssert blockRefs.len() > 0, "Must select at least one block"
|
doAssert blockRefs.len() > 0, "Must select at least one block"
|
||||||
|
|
||||||
for b in 0..<blockRefs.len:
|
for b in 0..<blockRefs.len:
|
||||||
|
let blck = blockRefs[blockRefs.len - b - 1]
|
||||||
withTimer(timers[tLoadBlock]):
|
withTimer(timers[tLoadBlock]):
|
||||||
blocks.add db.getPhase0Block(blockRefs[blockRefs.len - b - 1].root).get()
|
case cfg.blockForkAtEpoch(blck.slot.epoch)
|
||||||
|
of BeaconBlockFork.Phase0:
|
||||||
|
blocks[0].add dag.db.getPhase0Block(blck.root).get()
|
||||||
|
of BeaconBlockFork.Altair:
|
||||||
|
blocks[1].add dag.db.getAltairBlock(blck.root).get()
|
||||||
|
of BeaconBlockFork.Merge:
|
||||||
|
blocks[2].add dag.db.getMergeBlock(blck.root).get()
|
||||||
|
|
||||||
let state = newClone(dag.headState)
|
let stateData = newClone(dag.headState)
|
||||||
|
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
info = ForkedEpochInfo()
|
info = ForkedEpochInfo()
|
||||||
loadedState = new phase0.BeaconState
|
loadedState = (
|
||||||
|
(ref phase0.HashedBeaconState)(),
|
||||||
|
(ref altair.HashedBeaconState)(),
|
||||||
|
(ref merge.HashedBeaconState)())
|
||||||
|
|
||||||
withTimer(timers[tLoadState]):
|
withTimer(timers[tLoadState]):
|
||||||
dag.updateStateData(
|
dag.updateStateData(
|
||||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||||
|
|
||||||
for b in blocks.mitems():
|
template processBlocks(blocks: auto) =
|
||||||
while getStateField(state[].data, slot) < b.message.slot:
|
for b in blocks.mitems():
|
||||||
let isEpoch = (getStateField(state[].data, slot) + 1).isEpoch()
|
while getStateField(stateData[].data, slot) < b.message.slot:
|
||||||
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
let isEpoch = (getStateField(stateData[].data, slot) + 1).isEpoch()
|
||||||
let ok = process_slots(
|
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
||||||
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
let ok = process_slots(
|
||||||
info, {})
|
dag.cfg, stateData[].data, getStateField(stateData[].data, slot) + 1, cache,
|
||||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
info, {})
|
||||||
|
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||||
|
|
||||||
var start = Moment.now()
|
var start = Moment.now()
|
||||||
withTimer(timers[tApplyBlock]):
|
withTimer(timers[tApplyBlock]):
|
||||||
if conf.resetCache:
|
if conf.resetCache:
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
if not state_transition_block(
|
if not state_transition_block(
|
||||||
dag.cfg, state[].data, b, cache, {}, noRollback):
|
dag.cfg, stateData[].data, b, cache, {}, noRollback):
|
||||||
dump("./", b)
|
dump("./", b)
|
||||||
echo "State transition failed (!)"
|
echo "State transition failed (!)"
|
||||||
quit 1
|
quit 1
|
||||||
if conf.printTimes:
|
if conf.printTimes:
|
||||||
echo b.message.slot, ",", toHex(b.root.data), ",", nanoseconds(Moment.now() - start)
|
echo b.message.slot, ",", toHex(b.root.data), ",", nanoseconds(Moment.now() - start)
|
||||||
if conf.storeBlocks:
|
if conf.storeBlocks:
|
||||||
withTimer(timers[tDbStore]):
|
|
||||||
dbBenchmark.putBlock(b)
|
|
||||||
|
|
||||||
if getStateField(state[].data, slot).isEpoch and conf.storeStates:
|
|
||||||
if getStateField(state[].data, slot).epoch < 2:
|
|
||||||
dbBenchmark.putState(state[].data)
|
|
||||||
dbBenchmark.checkpoint()
|
|
||||||
else:
|
|
||||||
withTimer(timers[tDbStore]):
|
withTimer(timers[tDbStore]):
|
||||||
dbBenchmark.putState(state[].data)
|
dbBenchmark.putBlock(b)
|
||||||
dbBenchmark.checkpoint()
|
|
||||||
|
|
||||||
withTimer(timers[tDbLoad]):
|
withState(stateData[].data):
|
||||||
doAssert dbBenchmark.getState(getStateRoot(state[].data), loadedState[], noRollback)
|
if state.data.slot.isEpoch and conf.storeStates:
|
||||||
|
if state.data.slot.epoch < 2:
|
||||||
|
dbBenchmark.putState(state.root, state.data)
|
||||||
|
dbBenchmark.checkpoint()
|
||||||
|
else:
|
||||||
|
withTimer(timers[tDbStore]):
|
||||||
|
dbBenchmark.putState(state.root, state.data)
|
||||||
|
dbBenchmark.checkpoint()
|
||||||
|
|
||||||
if getStateField(state[].data, slot).epoch mod 16 == 0:
|
withTimer(timers[tDbLoad]):
|
||||||
doAssert hash_tree_root(state[].data.phase0Data.data) == hash_tree_root(loadedState[])
|
case stateFork
|
||||||
|
of BeaconStateFork.Phase0:
|
||||||
|
doAssert dbBenchmark.getState(
|
||||||
|
state.root, loadedState[0][].data, noRollback)
|
||||||
|
of BeaconStateFork.Altair:
|
||||||
|
doAssert dbBenchmark.getState(
|
||||||
|
state.root, loadedState[1][].data, noRollback)
|
||||||
|
of BeaconStateFork.Merge:
|
||||||
|
doAssert dbBenchmark.getState(
|
||||||
|
state.root, loadedState[2][].data, noRollback)
|
||||||
|
|
||||||
|
if state.data.slot.epoch mod 16 == 0:
|
||||||
|
let loadedRoot = case stateFork
|
||||||
|
of BeaconStateFork.Phase0: hash_tree_root(loadedState[0][].data)
|
||||||
|
of BeaconStateFork.Altair: hash_tree_root(loadedState[1][].data)
|
||||||
|
of BeaconStateFork.Merge: hash_tree_root(loadedState[2][].data)
|
||||||
|
doAssert hash_tree_root(state.data) == loadedRoot
|
||||||
|
|
||||||
|
processBlocks(blocks[0])
|
||||||
|
processBlocks(blocks[1])
|
||||||
|
processBlocks(blocks[2])
|
||||||
|
|
||||||
printTimers(false, timers)
|
printTimers(false, timers)
|
||||||
|
|
||||||
|
@ -241,16 +272,27 @@ proc cmdDumpState(conf: DbConf) =
|
||||||
let db = BeaconChainDB.new(conf.databaseDir.string)
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
||||||
defer: db.close()
|
defer: db.close()
|
||||||
|
|
||||||
|
let
|
||||||
|
phase0State = (ref phase0.HashedBeaconState)()
|
||||||
|
altairState = (ref altair.HashedBeaconState)()
|
||||||
|
mergeState = (ref merge.HashedBeaconState)()
|
||||||
|
|
||||||
for stateRoot in conf.stateRoot:
|
for stateRoot in conf.stateRoot:
|
||||||
try:
|
template doit(state: untyped) =
|
||||||
let root = Eth2Digest(data: hexToByteArray[32](stateRoot))
|
try:
|
||||||
var state = (ref phase0.HashedBeaconState)(root: root)
|
state.root = Eth2Digest.fromHex(stateRoot)
|
||||||
if not db.getState(root, state.data, noRollback):
|
|
||||||
echo "Couldn't load ", root
|
if db.getState(state.root, state.data, noRollback):
|
||||||
else:
|
dump("./", state)
|
||||||
dump("./", state[])
|
continue
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
echo "Couldn't load ", stateRoot, ": ", e.msg
|
echo "Couldn't load ", state.root, ": ", e.msg
|
||||||
|
|
||||||
|
doit(phase0State[])
|
||||||
|
doit(altairState[])
|
||||||
|
doit(mergeState[])
|
||||||
|
|
||||||
|
echo "Couldn't load ", stateRoot
|
||||||
|
|
||||||
proc cmdDumpBlock(conf: DbConf) =
|
proc cmdDumpBlock(conf: DbConf) =
|
||||||
let db = BeaconChainDB.new(conf.databaseDir.string)
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
||||||
|
@ -258,11 +300,15 @@ proc cmdDumpBlock(conf: DbConf) =
|
||||||
|
|
||||||
for blockRoot in conf.blockRootx:
|
for blockRoot in conf.blockRootx:
|
||||||
try:
|
try:
|
||||||
let root = Eth2Digest(data: hexToByteArray[32](blockRoot))
|
let root = Eth2Digest.fromHex(blockRoot)
|
||||||
if (let blck = db.getPhase0Block(root); blck.isSome):
|
if (let blck = db.getPhase0Block(root); blck.isSome):
|
||||||
dump("./", blck.get())
|
dump("./", blck.get())
|
||||||
|
elif (let blck = db.getAltairBlock(root); blck.isSome):
|
||||||
|
dump("./", blck.get())
|
||||||
|
elif (let blck = db.getMergeBlock(root); blck.isSome):
|
||||||
|
dump("./", blck.get())
|
||||||
else:
|
else:
|
||||||
echo "Couldn't load ", root
|
echo "Couldn't load ", blockRoot
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
echo "Couldn't load ", blockRoot, ": ", e.msg
|
echo "Couldn't load ", blockRoot, ": ", e.msg
|
||||||
|
|
||||||
|
@ -370,7 +416,8 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
let tmpState = assignClone(dag.headState)
|
let tmpState = assignClone(dag.headState)
|
||||||
dag.withState(tmpState[], blckRef.atSlot(Slot(conf.slot))):
|
dag.withState(tmpState[], blckRef.atSlot(Slot(conf.slot))):
|
||||||
echo "Writing state..."
|
echo "Writing state..."
|
||||||
dump("./", stateData.data.phase0Data, blck)
|
withState(stateData.data):
|
||||||
|
dump("./", state, blck)
|
||||||
|
|
||||||
func atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
|
func atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
|
||||||
if slot == 0:
|
if slot == 0:
|
||||||
|
|
|
@ -64,9 +64,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
|
||||||
replay = true):
|
replay = true):
|
||||||
let
|
let
|
||||||
(state, depositContractSnapshot) = loadGenesis(validators, false)
|
(genesisState, depositContractSnapshot) = loadGenesis(validators, false)
|
||||||
genesisBlock = get_initial_beacon_block(state[].data)
|
genesisBlock = get_initial_beacon_block(genesisState[])
|
||||||
genesisTime = float state[].data.genesis_time
|
genesisTime = float getStateField(genesisState[], genesis_time)
|
||||||
|
|
||||||
var
|
var
|
||||||
validatorKeyToIndex = initTable[ValidatorPubKey, int]()
|
validatorKeyToIndex = initTable[ValidatorPubKey, int]()
|
||||||
|
@ -80,11 +80,12 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||||
let db = BeaconChainDB.new("block_sim_db")
|
let db = BeaconChainDB.new("block_sim_db")
|
||||||
defer: db.close()
|
defer: db.close()
|
||||||
|
|
||||||
ChainDAGRef.preInit(db, state[].data, state[].data, genesisBlock)
|
ChainDAGRef.preInit(db, genesisState[], genesisState[], genesisBlock)
|
||||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||||
|
|
||||||
for i in 0 ..< state.data.validators.len:
|
withState(genesisState[]):
|
||||||
validatorKeyToIndex[state.data.validators[i].pubkey] = i
|
for i in 0 ..< state.data.validators.len:
|
||||||
|
validatorKeyToIndex[state.data.validators[i].pubkey] = i
|
||||||
|
|
||||||
var
|
var
|
||||||
dag = ChainDAGRef.init(cfg, db, {})
|
dag = ChainDAGRef.init(cfg, db, {})
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import
|
import
|
||||||
|
stew/io2,
|
||||||
stats, os, strformat, times,
|
stats, os, strformat, times,
|
||||||
../tests/testblockutil,
|
../tests/testblockutil,
|
||||||
../beacon_chain/beacon_chain_db,
|
../beacon_chain/beacon_chain_db,
|
||||||
|
@ -61,31 +62,34 @@ func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: auto) =
|
||||||
state, finalized_checkpoint).epoch + 2 >= current_epoch
|
state, finalized_checkpoint).epoch + 2 >= current_epoch
|
||||||
|
|
||||||
proc loadGenesis*(validators: Natural, validate: bool):
|
proc loadGenesis*(validators: Natural, validate: bool):
|
||||||
(ref phase0.HashedBeaconState, DepositContractSnapshot) =
|
(ref ForkedHashedBeaconState, DepositContractSnapshot) =
|
||||||
let
|
let
|
||||||
genesisFn =
|
genesisFn =
|
||||||
&"genesis_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
&"genesis_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
||||||
contractSnapshotFn =
|
contractSnapshotFn =
|
||||||
&"deposit_contract_snapshot_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
&"deposit_contract_snapshot_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
|
||||||
res = (ref phase0.HashedBeaconState)()
|
cfg = defaultRuntimeConfig
|
||||||
|
|
||||||
|
|
||||||
if fileExists(genesisFn) and fileExists(contractSnapshotFn):
|
if fileExists(genesisFn) and fileExists(contractSnapshotFn):
|
||||||
res.data = SSZ.loadFile(genesisFn, phase0.BeaconState)
|
let res = newClone(readSszForkedHashedBeaconState(
|
||||||
res.root = hash_tree_root(res.data)
|
cfg, readAllBytes(genesisFn).tryGet()))
|
||||||
if res.data.slot != GENESIS_SLOT:
|
|
||||||
echo "Can only start from genesis state"
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
if res.data.validators.len != validators:
|
withState(res[]):
|
||||||
echo &"Supplied genesis file has {res.data.validators.len} validators, while {validators} where requested, running anyway"
|
if state.data.slot != GENESIS_SLOT:
|
||||||
|
echo "Can only start from genesis state"
|
||||||
|
quit 1
|
||||||
|
|
||||||
echo &"Loaded {genesisFn}..."
|
if state.data.validators.len != validators:
|
||||||
|
echo &"Supplied genesis file has {state.data.validators.len} validators, while {validators} where requested, running anyway"
|
||||||
|
|
||||||
# TODO check that the private keys are interop keys
|
echo &"Loaded {genesisFn}..."
|
||||||
|
|
||||||
let contractSnapshot = SSZ.loadFile(contractSnapshotFn,
|
# TODO check that the private keys are interop keys
|
||||||
DepositContractSnapshot)
|
|
||||||
(res, contractSnapshot)
|
let contractSnapshot = SSZ.loadFile(contractSnapshotFn,
|
||||||
|
DepositContractSnapshot)
|
||||||
|
(res, contractSnapshot)
|
||||||
else:
|
else:
|
||||||
echo "Genesis file not found, making one up (use nimbus_beacon_node createTestnet to make one)"
|
echo "Genesis file not found, making one up (use nimbus_beacon_node createTestnet to make one)"
|
||||||
|
|
||||||
|
@ -101,17 +105,18 @@ proc loadGenesis*(validators: Natural, validate: bool):
|
||||||
let contractSnapshot = DepositContractSnapshot(
|
let contractSnapshot = DepositContractSnapshot(
|
||||||
depositContractState: merkleizer.toDepositContractState)
|
depositContractState: merkleizer.toDepositContractState)
|
||||||
|
|
||||||
res.data = initialize_beacon_state_from_eth1(
|
let res = (ref ForkedHashedBeaconState)(kind: BeaconStateFork.Phase0)
|
||||||
defaultRuntimeConfig,
|
res.phase0Data.data = initialize_beacon_state_from_eth1(
|
||||||
|
cfg,
|
||||||
Eth2Digest(),
|
Eth2Digest(),
|
||||||
0,
|
0,
|
||||||
deposits,
|
deposits,
|
||||||
flags)[]
|
flags)[]
|
||||||
|
|
||||||
res.root = hash_tree_root(res.data)
|
res.phase0Data.root = hash_tree_root(res[].phase0Data.data)
|
||||||
|
|
||||||
echo &"Saving to {genesisFn}..."
|
echo &"Saving to {genesisFn}..."
|
||||||
SSZ.saveFile(genesisFn, res.data)
|
SSZ.saveFile(genesisFn, res.phase0Data.data)
|
||||||
echo &"Saving to {contractSnapshotFn}..."
|
echo &"Saving to {contractSnapshotFn}..."
|
||||||
SSZ.saveFile(contractSnapshotFn, contractSnapshot)
|
SSZ.saveFile(contractSnapshotFn, contractSnapshot)
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@ import
|
||||||
options, sequtils, random, tables,
|
options, sequtils, random, tables,
|
||||||
../tests/testblockutil,
|
../tests/testblockutil,
|
||||||
../beacon_chain/spec/datatypes/phase0,
|
../beacon_chain/spec/datatypes/phase0,
|
||||||
|
../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
|
||||||
../beacon_chain/spec/[beaconstate, forks, helpers],
|
../beacon_chain/spec/[beaconstate, forks, helpers],
|
||||||
./simutils
|
./simutils
|
||||||
|
|
||||||
|
@ -30,7 +31,7 @@ func jsonName(prefix, slot: auto): string =
|
||||||
proc writeJson*(fn, v: auto) =
|
proc writeJson*(fn, v: auto) =
|
||||||
var f: File
|
var f: File
|
||||||
defer: close(f)
|
defer: close(f)
|
||||||
Json.saveFile(fn, v, pretty = true)
|
RestJson.saveFile(fn, v, pretty = true)
|
||||||
|
|
||||||
cli do(slots = SLOTS_PER_EPOCH * 5,
|
cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||||
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
|
||||||
|
@ -41,20 +42,18 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||||
validate = true):
|
validate = true):
|
||||||
let
|
let
|
||||||
flags = if validate: {} else: {skipBlsValidation}
|
flags = if validate: {} else: {skipBlsValidation}
|
||||||
(hashedState, _) = loadGenesis(validators, validate)
|
(state, _) = loadGenesis(validators, validate)
|
||||||
genesisBlock = get_initial_beacon_block(hashedState.data)
|
genesisBlock = get_initial_beacon_block(state[])
|
||||||
state = (ref ForkedHashedBeaconState)(
|
|
||||||
kind: BeaconStateFork.Phase0, phase0Data: hashedState[])
|
|
||||||
|
|
||||||
echo "Starting simulation..."
|
echo "Starting simulation..."
|
||||||
|
|
||||||
var
|
var
|
||||||
attestations = initTable[Slot, seq[Attestation]]()
|
attestations = initTable[Slot, seq[Attestation]]()
|
||||||
latest_block_root = hash_tree_root(genesisBlock.message)
|
latest_block_root = withBlck(genesisBlock): blck.root
|
||||||
timers: array[Timers, RunningStat]
|
timers: array[Timers, RunningStat]
|
||||||
attesters: RunningStat
|
attesters: RunningStat
|
||||||
r = initRand(1)
|
r = initRand(1)
|
||||||
signedBlock: phase0.SignedBeaconBlock
|
signedBlock: ForkedSignedBeaconBlock
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
|
|
||||||
proc maybeWrite(last: bool) =
|
proc maybeWrite(last: bool) =
|
||||||
|
@ -65,13 +64,14 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||||
write(stdout, ".")
|
write(stdout, ".")
|
||||||
|
|
||||||
if last:
|
if last:
|
||||||
writeJson("state.json", state[].phase0Data)
|
withState(state[]): writeJson("state.json", state.data)
|
||||||
else:
|
else:
|
||||||
if getStateField(state[], slot) mod json_interval.uint64 == 0:
|
withState(state[]):
|
||||||
writeJson(jsonName(prefix, getStateField(state[], slot)), state[].phase0Data.data)
|
if state.data.slot mod json_interval.uint64 == 0:
|
||||||
write(stdout, ":")
|
writeJson(jsonName(prefix, state.data.slot), state.data)
|
||||||
else:
|
write(stdout, ":")
|
||||||
write(stdout, ".")
|
else:
|
||||||
|
write(stdout, ".")
|
||||||
|
|
||||||
# TODO doAssert against this up-front
|
# TODO doAssert against this up-front
|
||||||
# indexed attestation: validator index beyond max validators per committee
|
# indexed attestation: validator index beyond max validators per committee
|
||||||
|
@ -97,10 +97,9 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||||
withTimer(timers[t]):
|
withTimer(timers[t]):
|
||||||
signedBlock = addTestBlock(
|
signedBlock = addTestBlock(
|
||||||
state[], latest_block_root, cache, attestations = blockAttestations,
|
state[], latest_block_root, cache, attestations = blockAttestations,
|
||||||
flags = flags).phase0Data
|
flags = flags)
|
||||||
latest_block_root = withTimerRet(timers[tHashBlock]):
|
latest_block_root = withTimerRet(timers[tHashBlock]):
|
||||||
hash_tree_root(signedBlock.message)
|
withBlck(signedBlock): hash_tree_root(blck.message)
|
||||||
signedBlock.root = latest_block_root
|
|
||||||
|
|
||||||
if attesterRatio > 0.0:
|
if attesterRatio > 0.0:
|
||||||
# attesterRatio is the fraction of attesters that actually do their
|
# attesterRatio is the fraction of attesters that actually do their
|
||||||
|
@ -168,4 +167,4 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
|
||||||
|
|
||||||
echo "Done!"
|
echo "Done!"
|
||||||
|
|
||||||
printTimers(state[].phase0Data.data, attesters, validate, timers)
|
printTimers(state[], attesters, validate, timers)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import # Unit test
|
||||||
./test_eth1_monitor,
|
./test_eth1_monitor,
|
||||||
./test_eth2_ssz_serialization,
|
./test_eth2_ssz_serialization,
|
||||||
./test_exit_pool,
|
./test_exit_pool,
|
||||||
|
./test_forks,
|
||||||
./test_gossip_validation,
|
./test_gossip_validation,
|
||||||
./test_helpers,
|
./test_helpers,
|
||||||
./test_honest_validator,
|
./test_honest_validator,
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
import
|
||||||
|
unittest2,
|
||||||
|
stew/byteutils,
|
||||||
|
../beacon_chain/spec/[forks, helpers],
|
||||||
|
../beacon_chain/spec/datatypes/[phase0, altair, merge]
|
||||||
|
|
||||||
|
{.used.}
|
||||||
|
|
||||||
|
template testHashedBeaconState(T: type, s: Slot) =
|
||||||
|
let
|
||||||
|
state = (ref T)()
|
||||||
|
state[].slot = s
|
||||||
|
let
|
||||||
|
bytes = SSZ.encode(state[])
|
||||||
|
forked = (ref ForkedHashedBeaconState)()
|
||||||
|
|
||||||
|
forked[] = readSszForkedHashedBeaconState(cfg, bytes)
|
||||||
|
|
||||||
|
check:
|
||||||
|
forked.kind == T.toFork()
|
||||||
|
|
||||||
|
template testTrustedSignedBeaconBlock(T: type, s: Slot) =
|
||||||
|
let
|
||||||
|
blck = (ref T)()
|
||||||
|
|
||||||
|
blck[].message.slot = s
|
||||||
|
|
||||||
|
let
|
||||||
|
bytes = SSZ.encode(blck[])
|
||||||
|
forked = (ref ForkedTrustedSignedBeaconBlock)()
|
||||||
|
|
||||||
|
forked[] = readSszForkedTrustedSignedBeaconBlock(cfg, bytes)
|
||||||
|
|
||||||
|
check:
|
||||||
|
forked.kind == T.toFork()
|
||||||
|
|
||||||
|
suite "Forked SSZ readers":
|
||||||
|
var
|
||||||
|
cfg = defaultRuntimeConfig
|
||||||
|
|
||||||
|
cfg.ALTAIR_FORK_EPOCH = Epoch(1)
|
||||||
|
cfg.MERGE_FORK_EPOCH = Epoch(2)
|
||||||
|
|
||||||
|
test "load phase0 state":
|
||||||
|
testHashedBeaconState(phase0.BeaconState, 0.Slot)
|
||||||
|
|
||||||
|
expect(SszError):
|
||||||
|
testHashedBeaconState(altair.BeaconState, 0.Slot)
|
||||||
|
expect(SszError):
|
||||||
|
testHashedBeaconState(merge.BeaconState, 0.Slot)
|
||||||
|
|
||||||
|
test "load altair state":
|
||||||
|
testHashedBeaconState(altair.BeaconState, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
expect(SszError):
|
||||||
|
testHashedBeaconState(phase0.BeaconState, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
expect(SszError):
|
||||||
|
testHashedBeaconState(merge.BeaconState, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
test "load merge state":
|
||||||
|
testHashedBeaconState(merge.BeaconState, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
expect(SszError):
|
||||||
|
testHashedBeaconState(phase0.BeaconState, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
expect(SszError):
|
||||||
|
testHashedBeaconState(altair.BeaconState, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
test "should raise on unknown data":
|
||||||
|
let
|
||||||
|
bytes = SSZ.encode(AttestationData())
|
||||||
|
expect(SszError):
|
||||||
|
discard newClone(readSszForkedHashedBeaconState(cfg, bytes))
|
||||||
|
|
||||||
|
test "load phase0 block":
|
||||||
|
testTrustedSignedBeaconBlock(phase0.TrustedSignedBeaconBlock, 0.Slot)
|
||||||
|
expect(SszError):
|
||||||
|
testTrustedSignedBeaconBlock(altair.TrustedSignedBeaconBlock, 0.Slot)
|
||||||
|
expect(SszError):
|
||||||
|
testTrustedSignedBeaconBlock(merge.TrustedSignedBeaconBlock, 0.Slot)
|
||||||
|
|
||||||
|
test "load altair block":
|
||||||
|
testTrustedSignedBeaconBlock(altair.TrustedSignedBeaconBlock, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
expect(SszError):
|
||||||
|
testTrustedSignedBeaconBlock(phase0.TrustedSignedBeaconBlock, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
expect(SszError):
|
||||||
|
testTrustedSignedBeaconBlock(merge.TrustedSignedBeaconBlock, cfg.ALTAIR_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
test "load merge block":
|
||||||
|
testTrustedSignedBeaconBlock(merge.TrustedSignedBeaconBlock, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
expect(SszError):
|
||||||
|
testTrustedSignedBeaconBlock(phase0.TrustedSignedBeaconBlock, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
expect(SszError):
|
||||||
|
testTrustedSignedBeaconBlock(altair.TrustedSignedBeaconBlock, cfg.MERGE_FORK_EPOCH.compute_start_slot_at_epoch)
|
||||||
|
|
||||||
|
test "should raise on unknown data":
|
||||||
|
let
|
||||||
|
bytes = SSZ.encode(AttestationData())
|
||||||
|
expect(SszError):
|
||||||
|
discard newClone(readSszForkedTrustedSignedBeaconBlock(cfg, bytes))
|
|
@ -10,23 +10,27 @@ import
|
||||||
../beacon_chain/[beacon_chain_db],
|
../beacon_chain/[beacon_chain_db],
|
||||||
../beacon_chain/consensus_object_pools/blockchain_dag,
|
../beacon_chain/consensus_object_pools/blockchain_dag,
|
||||||
../beacon_chain/spec/datatypes/phase0,
|
../beacon_chain/spec/datatypes/phase0,
|
||||||
../beacon_chain/spec/[beaconstate],
|
../beacon_chain/spec/[beaconstate, forks],
|
||||||
eth/db/[kvstore, kvstore_sqlite3],
|
eth/db/[kvstore, kvstore_sqlite3],
|
||||||
./testblockutil
|
./testblockutil
|
||||||
|
|
||||||
export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3
|
export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3
|
||||||
|
|
||||||
proc makeTestDB*(tailState: var phase0.BeaconState, tailBlock: phase0.TrustedSignedBeaconBlock): BeaconChainDB =
|
proc makeTestDB*(
|
||||||
|
tailState: ForkedHashedBeaconState,
|
||||||
|
tailBlock: ForkedTrustedSignedBeaconBlock): BeaconChainDB =
|
||||||
result = BeaconChainDB.new("", inMemory = true)
|
result = BeaconChainDB.new("", inMemory = true)
|
||||||
ChainDAGRef.preInit(result, tailState, tailState, tailBlock)
|
ChainDAGRef.preInit(result, tailState, tailState, tailBlock)
|
||||||
|
|
||||||
proc makeTestDB*(validators: Natural): BeaconChainDB =
|
proc makeTestDB*(validators: Natural): BeaconChainDB =
|
||||||
let
|
let
|
||||||
genState = initialize_beacon_state_from_eth1(
|
genState = (ref ForkedHashedBeaconState)(
|
||||||
defaultRuntimeConfig,
|
kind: BeaconStateFork.Phase0,
|
||||||
Eth2Digest(),
|
phase0Data: initialize_hashed_beacon_state_from_eth1(
|
||||||
0,
|
defaultRuntimeConfig,
|
||||||
makeInitialDeposits(validators.uint64, flags = {skipBlsValidation}),
|
Eth2Digest(),
|
||||||
{skipBlsValidation})
|
0,
|
||||||
|
makeInitialDeposits(validators.uint64, flags = {skipBlsValidation}),
|
||||||
|
{skipBlsValidation}))
|
||||||
genBlock = get_initial_beacon_block(genState[])
|
genBlock = get_initial_beacon_block(genState[])
|
||||||
makeTestDB(genState[], genBlock)
|
makeTestDB(genState[], genBlock)
|
||||||
|
|
Loading…
Reference in New Issue