ncli_db: add putState, putBlock (#3096)

* ncli_db: add putState, putBlock

These tools allow modifying an existing nimbus database for the purpose
of recovery or reorg, moving the head, tail and genesis to arbitrary
points.

* remove potentially expensive `putState` in `BeaconStateDB`
* introduce `latest_block_root` which computes the root of the latest
applied block from the `latest_block_header` field (instead of passing
it in separately)
* avoid some unnecessary BeaconState copies during init
* discover https://github.com/nim-lang/Nim/issues/19094
* prefer `HashedBeaconState` in a few places to avoid recomputing state
root
* fetch latest block root from state when creating blocks
* harden `get_beacon_proposer_index` against invalid slots and document
* move random spec function tests to `test_spec.nim`
* avoid unnecessary state root computation before block proposal
This commit is contained in:
Jacek Sieka 2021-11-18 13:02:43 +01:00 committed by GitHub
parent 4bcdccab9a
commit f19a497eec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 400 additions and 267 deletions

1
.gitignore vendored
View File

@ -23,6 +23,7 @@ build/
*.la
*.exe
*.dll
*.su
/scripts/testnet*.sh

View File

@ -67,8 +67,10 @@ OK: 16/16 Fail: 0/16 Skip: 0/16
## Beacon state [Preset: mainnet]
```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
+ get_beacon_proposer_index OK
+ latest_block_root OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 3/3 Fail: 0/3 Skip: 0/3
## Block pool processing [Preset: mainnet]
```diff
+ Adding the same block twice returns a Duplicate error [Preset: mainnet] OK
@ -364,4 +366,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 204/206 Fail: 0/206 Skip: 2/206
OK: 206/208 Fail: 0/208 Skip: 2/208

View File

@ -535,9 +535,6 @@ proc putState*(db: BeaconChainDB, key: Eth2Digest, value: merge.BeaconState) =
db.mergeStatesNoVal.putSnappySSZ(
key.data, toBeaconStateNoImmutableValidators(value))
proc putState*(db: BeaconChainDB, value: ForkyBeaconState) =
db.putState(hash_tree_root(value), value)
# For testing rollback
proc putCorruptPhase0State*(db: BeaconChainDB, key: Eth2Digest) =
db.statesNoVal.putSnappySSZ(key.data, Validator())

View File

@ -667,9 +667,9 @@ proc putState(dag: ChainDAGRef, state: StateData) =
# Ideally we would save the state and the root lookup cache in a single
# transaction to prevent database inconsistencies, but the state loading code
# is resilient against one or the other going missing
withState(state.data): dag.db.putState(state.root, state.data)
dag.db.putStateRoot(
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
withState(state.data):
dag.db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
dag.db.putState(state.root, state.data)
debug "Stored state", putStateDur = Moment.now() - startTick
@ -1394,12 +1394,12 @@ proc preInit*(
tail_genesis_validators_root = shortLog(tail_genesis_validators_root)
quit 1
let blck = get_initial_beacon_block(state.data)
let blck = get_initial_beacon_block(state)
db.putGenesisBlock(blck.root)
db.putBlock(blck)
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
db.putState(state.root, state.data)
db.putStateRoot(blck.root, state.data.slot, state.root)
blck.root
else: # tail and genesis are the same
withBlck(tailBlock):
@ -1422,8 +1422,8 @@ proc preInit*(
db.putTailBlock(blck.root)
db.putHeadBlock(blck.root)
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
db.putState(state.root, state.data)
db.putStateRoot(blck.root, state.data.slot, state.root)
notice "New database from snapshot",
genesisBlockRoot = shortLog(genesisBlockRoot),

View File

@ -208,11 +208,11 @@ when hasGenesisDetection:
var deposits = m.allGenesisDepositsUpTo(eth1Block.voteData.deposit_count)
result = initialize_beacon_state_from_eth1(
result = newClone(initialize_beacon_state_from_eth1(
m.cfg,
eth1Block.voteData.block_hash,
eth1Block.timestamp.uint64,
deposits, {})
deposits, {}))
if eth1Block.activeValidatorsCount != 0:
doAssert result.validators.lenu64 == eth1Block.activeValidatorsCount

View File

@ -176,7 +176,6 @@ proc init*(T: type BeaconNode,
quit 1
if config.finalizedCheckpointBlock.isNone:
withState(checkpointState[]):
if getStateField(checkpointState[], slot) > 0:
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
quit 1
@ -1550,8 +1549,8 @@ proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.raise
else: (waitFor getEth1BlockHash(config.web3Urls[0], blockId("latest"))).asEth2Digest
cfg = getRuntimeConfig(config.eth2Network)
var
initialState = initialize_beacon_state_from_eth1(
cfg, eth1Hash, startTime, deposits, {skipBlsValidation})
initialState = newClone(initialize_beacon_state_from_eth1(
cfg, eth1Hash, startTime, deposits, {skipBlsValidation}))
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = startTime

View File

@ -195,7 +195,7 @@ proc initialize_beacon_state_from_eth1*(
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
flags: UpdateFlags = {}): phase0.BeaconStateRef {.nbench.} =
flags: UpdateFlags = {}): phase0.BeaconState {.nbench.} =
## Get the genesis ``BeaconState``.
##
## Before the beacon chain starts, validators will register in the Eth1 chain
@ -212,7 +212,9 @@ proc initialize_beacon_state_from_eth1*(
# at that point :)
doAssert deposits.lenu64 >= SLOTS_PER_EPOCH
var state = phase0.BeaconStateRef(
# TODO https://github.com/nim-lang/Nim/issues/19094
template state(): untyped = result
state = phase0.BeaconState(
fork: genesisFork(cfg),
genesis_time: genesis_time_from_eth1_timestamp(cfg, eth1_timestamp),
eth1_data:
@ -243,7 +245,7 @@ proc initialize_beacon_state_from_eth1*(
pubkeyToIndex.withValue(pubkey, foundIdx) do:
# Increase balance by deposit amount
increase_balance(state[], ValidatorIndex foundIdx[], amount)
increase_balance(state, ValidatorIndex foundIdx[], amount)
do:
if skipBlsValidation in flags or
verify_deposit_signature(cfg, deposit):
@ -274,7 +276,8 @@ proc initialize_beacon_state_from_eth1*(
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = hash_tree_root(state.validators)
state
# TODO https://github.com/nim-lang/Nim/issues/19094
# state
proc initialize_hashed_beacon_state_from_eth1*(
cfg: RuntimeConfig,
@ -282,42 +285,43 @@ proc initialize_hashed_beacon_state_from_eth1*(
eth1_timestamp: uint64,
deposits: openArray[DepositData],
flags: UpdateFlags = {}): phase0.HashedBeaconState =
let genesisState = initialize_beacon_state_from_eth1(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags)
phase0.HashedBeaconState(
data: genesisState[], root: hash_tree_root(genesisState[]))
# TODO https://github.com/nim-lang/Nim/issues/19094
result = phase0.HashedBeaconState(
data: initialize_beacon_state_from_eth1(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#genesis-block
func get_initial_beacon_block*(state: phase0.BeaconState):
func get_initial_beacon_block*(state: phase0.HashedBeaconState):
phase0.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
let message = phase0.TrustedBeaconBlock(
slot: state.slot,
state_root: hash_tree_root(state),)
slot: state.data.slot,
state_root: state.root)
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
phase0.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors
func get_initial_beacon_block*(state: altair.BeaconState):
func get_initial_beacon_block*(state: altair.HashedBeaconState):
altair.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
let message = altair.TrustedBeaconBlock(
slot: state.slot,
state_root: hash_tree_root(state),)
slot: state.data.slot,
state_root: state.root)
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
altair.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#testing
func get_initial_beacon_block*(state: merge.BeaconState):
func get_initial_beacon_block*(state: merge.HashedBeaconState):
merge.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
let message = merge.TrustedBeaconBlock(
slot: state.slot,
state_root: hash_tree_root(state),)
slot: state.data.slot,
state_root: state.root,)
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
merge.TrustedSignedBeaconBlock(
@ -326,7 +330,7 @@ func get_initial_beacon_block*(state: merge.BeaconState):
func get_initial_beacon_block*(state: ForkedHashedBeaconState):
ForkedTrustedSignedBeaconBlock =
withState(state):
ForkedTrustedSignedBeaconBlock.init(get_initial_beacon_block(state.data))
ForkedTrustedSignedBeaconBlock.init(get_initial_beacon_block(state))
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: ForkyBeaconState, slot: Slot): Eth2Digest =
@ -894,3 +898,27 @@ func upgrade_to_merge*(cfg: RuntimeConfig, pre: altair.BeaconState):
template isValidInState*(idx: ValidatorIndex, state: ForkyBeaconState): bool =
idx.uint64 < state.validators.lenu64
func latest_block_root*(state: ForkyBeaconState, state_root: Eth2Digest): Eth2Digest =
# The root of the last block that was successfully applied to this state -
# normally, when a block is applied, the data from the header is stored in
# the state without the state root - on the next process_slot, the state root
# is added to the header and the block root can now be computed and added to
# the block roots table. If process_slot has not yet run on top of the new
# block, we must fill in the state root ourselves.
if state.slot == state.latest_block_header.slot:
# process_slot will not yet have updated the header of the "current" block -
# similar to block creation, we fill it in with the state root
var tmp = state.latest_block_header
tmp.state_root = state_root
hash_tree_root(tmp)
elif state.slot <=
(state.latest_block_header.slot + SLOTS_PER_HISTORICAL_ROOT):
# block_roots is limited to about a day - see assert in
# `get_block_root_at_slot`
state.get_block_root_at_slot(state.latest_block_header.slot)
else:
# Reallly long periods of empty slots - unlikely but possible
hash_tree_root(state.latest_block_header)
func latest_block_root*(state: ForkyHashedBeaconState): Eth2Digest =
latest_block_root(state.data, state.root)

View File

@ -363,7 +363,6 @@ template partialBeaconBlock(
cfg: RuntimeConfig,
state: var phase0.HashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -375,7 +374,7 @@ template partialBeaconBlock(
phase0.BeaconBlock(
slot: state.data.slot,
proposer_index: proposer_index.uint64,
parent_root: parent_root,
parent_root: state.latest_block_root(),
body: phase0.BeaconBlockBody(
randao_reveal: randao_reveal,
eth1_data: eth1data,
@ -390,7 +389,6 @@ proc makeBeaconBlock*(
cfg: RuntimeConfig,
state: var phase0.HashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -401,14 +399,15 @@ proc makeBeaconBlock*(
executionPayload: ExecutionPayload,
rollback: RollbackHashedProc,
cache: var StateCache): Result[phase0.BeaconBlock, string] =
## Create a block for the given state. The last block applied to it must be
## the one identified by parent_root and process_slots must be called up to
## the slot for which a block is to be created.
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(cfg, state, proposer_index, parent_root,
var blck = partialBeaconBlock(cfg, state, proposer_index,
randao_reveal, eth1_data, graffiti, attestations, deposits,
exits, sync_aggregate, executionPayload)
@ -433,7 +432,6 @@ template partialBeaconBlock(
cfg: RuntimeConfig,
state: var altair.HashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -445,7 +443,7 @@ template partialBeaconBlock(
altair.BeaconBlock(
slot: state.data.slot,
proposer_index: proposer_index.uint64,
parent_root: parent_root,
parent_root: state.latest_block_root(),
body: altair.BeaconBlockBody(
randao_reveal: randao_reveal,
eth1_data: eth1data,
@ -461,7 +459,6 @@ proc makeBeaconBlock*(
cfg: RuntimeConfig,
state: var altair.HashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -472,14 +469,15 @@ proc makeBeaconBlock*(
executionPayload: ExecutionPayload,
rollback: RollbackAltairHashedProc,
cache: var StateCache): Result[altair.BeaconBlock, string] =
## Create a block for the given state. The last block applied to it must be
## the one identified by parent_root and process_slots must be called up to
## the slot for which a block is to be created.
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(cfg, state, proposer_index, parent_root,
var blck = partialBeaconBlock(cfg, state, proposer_index,
randao_reveal, eth1_data, graffiti, attestations, deposits,
exits, sync_aggregate, executionPayload)
@ -504,7 +502,6 @@ template partialBeaconBlock(
cfg: RuntimeConfig,
state: var merge.HashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -516,7 +513,7 @@ template partialBeaconBlock(
merge.BeaconBlock(
slot: state.data.slot,
proposer_index: proposer_index.uint64,
parent_root: parent_root,
parent_root: state.latest_block_root(),
body: merge.BeaconBlockBody(
randao_reveal: randao_reveal,
eth1_data: eth1data,
@ -533,7 +530,6 @@ proc makeBeaconBlock*(
cfg: RuntimeConfig,
state: var merge.HashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -544,14 +540,15 @@ proc makeBeaconBlock*(
executionPayload: ExecutionPayload,
rollback: RollbackMergeHashedProc,
cache: var StateCache): Result[merge.BeaconBlock, string] =
## Create a block for the given state. The last block applied to it must be
## the one identified by parent_root and process_slots must be called up to
## the slot for which a block is to be created.
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(cfg, state, proposer_index, parent_root,
var blck = partialBeaconBlock(cfg, state, proposer_index,
randao_reveal, eth1_data, graffiti, attestations, deposits,
exits, sync_aggregate, executionPayload)
@ -575,7 +572,6 @@ proc makeBeaconBlock*(
cfg: RuntimeConfig,
state: var ForkedHashedBeaconState,
proposer_index: ValidatorIndex,
parent_root: Eth2Digest,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
@ -586,9 +582,10 @@ proc makeBeaconBlock*(
executionPayload: ExecutionPayload,
rollback: RollbackForkedHashedProc,
cache: var StateCache): Result[ForkedBeaconBlock, string] =
## Create a block for the given state. The last block applied to it must be
## the one identified by parent_root and process_slots must be called up to
## the slot for which a block is to be created.
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
template makeBeaconBlock(kind: untyped): Result[ForkedBeaconBlock, string] =
# To create a block, we'll first apply a partial block to the state, skipping
@ -596,7 +593,7 @@ proc makeBeaconBlock*(
var blck =
ForkedBeaconBlock.init(
partialBeaconBlock(cfg, state.`kind Data`, proposer_index, parent_root,
partialBeaconBlock(cfg, state.`kind Data`, proposer_index,
randao_reveal, eth1_data, graffiti, attestations, deposits,
exits, sync_aggregate, executionPayload))

View File

@ -385,18 +385,24 @@ func compute_proposer_index(state: ForkyBeaconState,
func get_beacon_proposer_index*(
state: ForkyBeaconState, cache: var StateCache, slot: Slot):
Option[ValidatorIndex] =
let epoch = get_current_epoch(state)
if slot.epoch() != epoch:
# compute_proposer_index depends on `effective_balance`, therefore the
# beacon proposer index can only be computed for the "current" epoch:
# https://github.com/ethereum/consensus-specs/pull/772#issuecomment-475574357
return none(ValidatorIndex)
cache.beacon_proposer_indices.withValue(slot, proposer) do:
return proposer[]
do:
# Return the beacon proposer index at the current slot.
let epoch = get_current_epoch(state)
var buffer: array[32 + 8, byte]
buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data
# There's exactly one beacon proposer per slot.
# There's exactly one beacon proposer per slot - the same validator may
# however propose several times in the same epoch (however unlikely)
let
# active validator indices are kept in cache but sorting them takes
# quite a while

View File

@ -10,12 +10,11 @@
import
std/[os, strformat],
chronicles,
./spec/[eth2_ssz_serialization, eth2_merkleization, forks],
./spec/datatypes/[phase0, altair, merge],
./consensus_object_pools/block_pools_types
./spec/[beaconstate, eth2_ssz_serialization, eth2_merkleization, forks],
./spec/datatypes/[phase0, altair, merge]
export
eth2_ssz_serialization, eth2_merkleization, forks, block_pools_types
beaconstate, eth2_ssz_serialization, eth2_merkleization, forks
# Dump errors are generally not fatal where used currently - the code calling
# these functions, like most code, is not exception safe
@ -37,18 +36,11 @@ proc dump*(dir: string, v: ForkySignedBeaconBlock) =
logErrors:
SSZ.saveFile(dir / &"block-{v.message.slot}-{shortLog(v.root)}.ssz", v)
proc dump*(dir: string, v: ForkyHashedBeaconState, blck: BlockRef) =
mixin saveFile
logErrors:
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(blck.root)}-{shortLog(v.root)}.ssz",
v.data)
proc dump*(dir: string, v: ForkyHashedBeaconState) =
mixin saveFile
logErrors:
SSZ.saveFile(
dir / &"state-{v.data.slot}-{shortLog(v.root)}.ssz",
dir / &"state-{v.data.slot}-{shortLog(v.latest_block_root)}-{shortLog(v.root)}.ssz",
v.data)
proc dump*(dir: string, v: SyncCommitteeMessage, validator: ValidatorPubKey) =

View File

@ -390,34 +390,31 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
head: BlockRef, slot: Slot
): Future[ForkedBlockResult] {.async.} =
# Advance state to the slot that we're proposing for
let
proposalState = assignClone(node.dag.headState)
proposalStateAddr = unsafeAddr proposalState[]
node.dag.withState(proposalState[], head.atSlot(slot)):
node.dag.withState(proposalState[], head.atSlot(slot - 1)):
# Advance to the given slot without calculating state root - we'll only
# need a state root _with_ the block applied
var info: ForkedEpochInfo
if not process_slots(
node.dag.cfg, stateData.data, slot, cache, info,
{skipLastStateRootCalculation}):
return ForkedBlockResult.err("Unable to advance state to slot")
let
eth1Proposal = node.getBlockProposalEth1Data(stateData.data)
poolPtr = unsafeAddr node.dag # safe because restore is short-lived
if eth1Proposal.hasMissingDeposits:
error "Eth1 deposits not available. Skipping block proposal", slot
return ForkedBlockResult.err("Eth1 deposits not available")
func restore(v: var ForkedHashedBeaconState) =
# TODO address this ugly workaround - there should probably be a
# `state_transition` that takes a `StateData` instead and updates
# the block as well
doAssert v.addr == addr proposalStateAddr.data
assign(proposalStateAddr[], poolPtr.headState)
let exits = withState(stateData.data):
node.exitPool[].getBeaconBlockExits(state.data)
return makeBeaconBlock(
node.dag.cfg,
stateData.data,
validator_index,
head.root,
randao_reveal,
eth1Proposal.vote,
graffiti,
@ -429,7 +426,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
else:
node.sync_committee_msg_pool[].produceSyncAggregate(head.root),
default(merge.ExecutionPayload),
restore,
noRollback, # Temporary state - no need for rollback
cache)
proc proposeSignedBlock*(node: BeaconNode,

View File

@ -1,6 +1,6 @@
import
os, stats, strformat, tables,
chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3,
chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3,
../beacon_chain/networking/network_metadata,
../beacon_chain/[beacon_chain_db],
../beacon_chain/consensus_object_pools/[blockchain_dag],
@ -21,13 +21,15 @@ type Timers = enum
tDbStore = "Database store"
type
DbCmd* = enum
bench
dumpState
dumpBlock
DbCmd* {.pure.} = enum
bench = "Run a replay benchmark for block and epoch processing"
dumpState = "Extract a state from the database as-is - only works for states that have been explicitly stored"
putState = "Store a given BeaconState in the database"
dumpBlock = "Extract a (trusted) SignedBeaconBlock from the database"
putBlock = "Store a given SignedBeaconBlock in the database, potentially updating some of the pointers"
pruneDatabase
rewindState
exportEra
rewindState = "Extract any state from the database based on a given block and slot, replaying if needed"
exportEra = "Write an experimental era file"
validatorPerf
validatorDb = "Create or update attestation performance database"
@ -48,7 +50,7 @@ type
desc: ""
.}: DbCmd
of bench:
of DbCmd.bench:
benchSlot* {.
defaultValue: 0
name: "start-slot"
@ -70,17 +72,41 @@ type
defaultValue: false
desc: "Process each block with a fresh cache".}: bool
of dumpState:
of DbCmd.dumpState:
stateRoot* {.
argument
desc: "State roots to save".}: seq[string]
of dumpBlock:
of DbCmd.putState:
stateFile {.
argument
name: "file"
desc: "Files to import".}: seq[string]
of DbCmd.dumpBlock:
blockRootx* {.
argument
desc: "Block roots to save".}: seq[string]
of pruneDatabase:
of DbCmd.putBlock:
blckFile {.
argument
name: "file"
desc: "Files to import".}: seq[string]
setHead {.
defaultValue: false
name: "set-head"
desc: "Update head to this block"}: bool
setTail {.
defaultValue: false
name: "set-tail"
desc: "Update tail to this block"}: bool
setGenesis {.
defaultValue: false
name: "set-genesis"
desc: "Update genesis to this block"}: bool
of DbCmd.pruneDatabase:
dryRun* {.
defaultValue: false
desc: "Don't write to the database copy; only simulate actions; default false".}: bool
@ -91,7 +117,7 @@ type
defaultValue: false
desc: "Enables verbose output; default false".}: bool
of rewindState:
of DbCmd.rewindState:
blockRoot* {.
argument
desc: "Block root".}: string
@ -100,7 +126,7 @@ type
argument
desc: "Slot".}: uint64
of exportEra:
of DbCmd.exportEra:
era* {.
defaultValue: 0
desc: "The era number to write".}: uint64
@ -108,7 +134,7 @@ type
defaultValue: 1
desc: "Number of eras to write".}: uint64
of validatorPerf:
of DbCmd.validatorPerf:
perfSlot* {.
defaultValue: -128 * SLOTS_PER_EPOCH.int64
name: "start-slot"
@ -117,7 +143,7 @@ type
defaultValue: 0
name: "slots"
desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64
of validatorDb:
of DbCmd.validatorDb:
outDir* {.
defaultValue: ""
name: "out-db"
@ -127,6 +153,11 @@ type
name: "perfect"
desc: "Include perfect records (full rewards)".}: bool
proc putState(db: BeaconChainDB, state: ForkedHashedBeaconState) =
withState(state):
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
db.putState(state.root, state.data)
func getSlotRange(dag: ChainDAGRef, startSlot: int64, count: uint64): (Slot, Slot) =
let
start =
@ -294,6 +325,15 @@ proc cmdDumpState(conf: DbConf) =
echo "Couldn't load ", stateRoot
proc cmdPutState(conf: DbConf, cfg: RuntimeConfig) =
let db = BeaconChainDB.new(conf.databaseDir.string)
defer: db.close()
for file in conf.stateFile:
let state = newClone(readSszForkedHashedBeaconState(
cfg, readAllBytes(file).tryGet()))
db.putState(state[])
proc cmdDumpBlock(conf: DbConf) =
let db = BeaconChainDB.new(conf.databaseDir.string)
defer: db.close()
@ -312,6 +352,23 @@ proc cmdDumpBlock(conf: DbConf) =
except CatchableError as e:
echo "Couldn't load ", blockRoot, ": ", e.msg
proc cmdPutBlock(conf: DbConf, cfg: RuntimeConfig) =
let db = BeaconChainDB.new(conf.databaseDir.string)
defer: db.close()
for file in conf.blckFile:
let blck = readSszForkedTrustedSignedBeaconBlock(
cfg, readAllBytes(file).tryGet())
withBlck(blck):
db.putBlock(blck)
if conf.setHead:
db.putHeadBlock(blck.root)
if conf.setTail:
db.putTailBlock(blck.root)
if conf.setGenesis:
db.putGenesisBlock(blck.root)
proc copyPrunedDatabase(
db: BeaconChainDB, copyDb: BeaconChainDB,
dryRun, verbose, keepOldStates: bool) =
@ -320,26 +377,32 @@ proc copyPrunedDatabase(
let
headBlock = db.getHeadBlock()
tailBlock = db.getTailBlock()
genesisBlock = db.getGenesisBlock()
doAssert headBlock.isOk and tailBlock.isOk
doAssert db.getPhase0Block(headBlock.get).isOk
doAssert db.getPhase0Block(tailBlock.get).isOk
doAssert db.getPhase0Block(genesisBlock.get).isOk
var
beaconState: ref phase0.BeaconState
beaconState = (ref phase0.HashedBeaconState)()
finalizedEpoch: Epoch # default value of 0 is conservative/safe
prevBlockSlot = db.getPhase0Block(db.getHeadBlock().get).get.message.slot
beaconState = new phase0.BeaconState
let headEpoch = db.getPhase0Block(headBlock.get).get.message.slot.epoch
let
headEpoch = db.getPhase0Block(headBlock.get).get.message.slot.epoch
tailStateRoot = db.getPhase0Block(tailBlock.get).get.message.state_root
# Tail states are specially addressed; no stateroot intermediary
if not db.getState(
db.getPhase0Block(tailBlock.get).get.message.state_root, beaconState[],
noRollback):
if not db.getState(tailStateRoot, beaconState[].data, noRollback):
doAssert false, "could not load tail state"
beaconState[].root = tailStateRoot
if not dry_run:
copyDb.putState(beaconState[])
copyDb.putStateRoot(
beaconState[].latest_block_root(), beaconState[].data.slot,
beaconState[].root)
copyDb.putState(beaconState[].root, beaconState[].data)
copyDb.putBlock(db.getPhase0Block(genesisBlock.get).get)
for signedBlock in getAncestors(db, headBlock.get):
if not dry_run:
@ -363,18 +426,21 @@ proc copyPrunedDatabase(
slot, " with root ", signedBlock.root
continue
if not db.getState(sr.get, beaconState[], noRollback):
if not db.getState(sr.get, beaconState[].data, noRollback):
# Don't copy dangling stateroot pointers
if stateRequired:
doAssert false, "state root and state required"
continue
beaconState[].root = sr.get()
finalizedEpoch = max(
finalizedEpoch, beaconState.finalized_checkpoint.epoch)
finalizedEpoch, beaconState[].data.finalized_checkpoint.epoch)
if not dry_run:
copyDb.putStateRoot(signedBlock.root, slot, sr.get)
copyDb.putState(beaconState[])
copyDb.putStateRoot(
beaconState[].latest_block_root(), beaconState[].data.slot,
beaconState[].root)
copyDb.putState(beaconState[].root, beaconState[].data)
if verbose:
echo "copied state at slot ", slot, " from block at ", shortLog(signedBlock.message.slot)
@ -383,6 +449,7 @@ proc copyPrunedDatabase(
if not dry_run:
copyDb.putHeadBlock(headBlock.get)
copyDb.putTailBlock(tailBlock.get)
copyDb.putGenesisBlock(genesisBlock.get)
proc cmdPrune(conf: DbConf) =
let
@ -417,7 +484,7 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
dag.withState(tmpState[], blckRef.atSlot(Slot(conf.slot))):
echo "Writing state..."
withState(stateData.data):
dump("./", state, blck)
dump("./", state)
func atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
if slot == 0:
@ -855,19 +922,23 @@ when isMainModule:
cfg = getRuntimeConfig(conf.eth2Network)
case conf.cmd
of bench:
of DbCmd.bench:
cmdBench(conf, cfg)
of dumpState:
of DbCmd.dumpState:
cmdDumpState(conf)
of dumpBlock:
of DbCmd.putState:
cmdPutState(conf, cfg)
of DbCmd.dumpBlock:
cmdDumpBlock(conf)
of pruneDatabase:
of DbCmd.putBlock:
cmdPutBlock(conf, cfg)
of DbCmd.pruneDatabase:
cmdPrune(conf)
of rewindState:
of DbCmd.rewindState:
cmdRewindState(conf, cfg)
of exportEra:
of DbCmd.exportEra:
cmdExportEra(conf, cfg)
of validatorPerf:
of DbCmd.validatorPerf:
cmdValidatorPerf(conf, cfg)
of validatorDb:
of DbCmd.validatorDb:
cmdValidatorDb(conf, cfg)

View File

@ -269,7 +269,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
cfg,
hashedState[],
proposerIdx,
dag.head.root,
privKey.genRandaoReveal(
getStateField(stateData.data, fork),
getStateField(stateData.data, genesis_validators_root),

View File

@ -105,15 +105,10 @@ proc loadGenesis*(validators: Natural, validate: bool):
let contractSnapshot = DepositContractSnapshot(
depositContractState: merkleizer.toDepositContractState)
let res = (ref ForkedHashedBeaconState)(kind: BeaconStateFork.Phase0)
res.phase0Data.data = initialize_beacon_state_from_eth1(
cfg,
Eth2Digest(),
0,
deposits,
flags)[]
res.phase0Data.root = hash_tree_root(res[].phase0Data.data)
let res = (ref ForkedHashedBeaconState)(
kind: BeaconStateFork.Phase0,
phase0Data: initialize_hashed_beacon_state_from_eth1(
cfg, Eth2Digest(), 0, deposits, flags))
echo &"Saving to {genesisFn}..."
SSZ.saveFile(genesisFn, res.phase0Data.data)

View File

@ -96,7 +96,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
withTimer(timers[t]):
signedBlock = addTestBlock(
state[], latest_block_root, cache, attestations = blockAttestations,
state[], cache, attestations = blockAttestations,
flags = flags)
latest_block_root = withTimerRet(timers[tHashBlock]):
withBlck(signedBlock): hash_tree_root(blck.message)

View File

@ -15,7 +15,6 @@ import # Unit test
./test_action_tracker,
./test_attestation_pool,
./test_beacon_chain_db,
./test_beaconstate,
./test_block_pool,
./test_datatypes,
./test_discovery,
@ -30,6 +29,7 @@ import # Unit test
./test_keystore,
./test_message_signatures,
./test_peer_pool,
./test_spec,
./test_statediff,
./test_sync_committee_pool,
./test_sync_manager,

View File

@ -14,7 +14,7 @@ import
stew/bitops2,
# Beacon chain internals
../../../beacon_chain/spec/
[forks, helpers, light_client_sync, signatures, state_transition],
[beaconstate, forks, helpers, light_client_sync, signatures, state_transition],
# Mock helpers
../../mocking/[mock_blocks, mock_genesis],
# Test utilities
@ -58,20 +58,15 @@ proc block_for_next_slot(
withAttestations = false): ForkedSignedBeaconBlock =
template state: untyped {.inject.} = forked.altairData.data
let parent_root = block:
var previous_block_header = state.latest_block_header
if previous_block_header.state_root == ZERO_HASH:
previous_block_header.state_root = state.hash_tree_root()
previous_block_header.hash_tree_root()
let attestations =
if withAttestations:
makeFullAttestations(forked, parent_root, state.slot, cache)
let block_root = withState(forked): state.latest_block_root()
makeFullAttestations(forked, block_root, state.slot, cache)
else:
@[]
addTestBlock(
forked, parent_root, cache, attestations = attestations, cfg = cfg)
forked, cache, attestations = attestations, cfg = cfg)
let full_sync_committee_bits = block:
var res: BitArray[SYNC_COMMITTEE_SIZE]

View File

@ -13,7 +13,7 @@ import
math,
# Specs
../../beacon_chain/spec/[eth2_merkleization, keystore, signatures],
../../beacon_chain/spec/[eth2_merkleization, keystore, forks, signatures],
../../beacon_chain/spec/datatypes/base,
# Internals
@ -106,8 +106,8 @@ proc mockGenesisBalancedDeposits*(
mockGenesisDepositsImpl(result, validatorCount,amount,flags):
discard
proc mockUpdateStateForNewDeposit*[T](
state: var T,
proc mockUpdateStateForNewDeposit*(
state: var ForkyBeaconState,
validator_index: uint64,
amount: uint64,
# withdrawal_credentials: Eth2Digest

View File

@ -20,7 +20,7 @@ import
../beacon_chain/consensus_object_pools/[
block_quarantine, blockchain_dag, block_clearance, attestation_pool],
../beacon_chain/spec/datatypes/phase0,
../beacon_chain/spec/[helpers, state_transition, validator],
../beacon_chain/spec/[beaconstate, helpers, state_transition, validator],
# Test utilities
./testutil, ./testdbutil, ./testblockutil
@ -111,18 +111,22 @@ suite "Attestation pool processing" & preset():
let
root1 = addTestBlock(
state.data, state.blck.root,
cache, attestations = attestations, nextSlot = false).phase0Data.root
state.data, cache, attestations = attestations,
nextSlot = false).phase0Data.root
bc1 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
att1 = makeAttestation(state[].data, root1, bc1[0], cache)
check:
withState(state.data): state.latest_block_root == root1
process_slots(
defaultRuntimeConfig, state.data,
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
info, {})
withState(state.data): state.latest_block_root == root1
check:
# shouldn't include already-included attestations
pool[].getAttestationsForBlock(state.data, cache) == []
@ -376,7 +380,7 @@ suite "Attestation pool processing" & preset():
test "Fork choice returns latest block with no attestations":
var cache = StateCache()
let
b1 = addTestBlock(state.data, dag.tail.root, cache).phase0Data
b1 = addTestBlock(state.data, cache).phase0Data
b1Add = dag.addRawBlock(quarantine, b1) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -389,7 +393,7 @@ suite "Attestation pool processing" & preset():
head == b1Add[]
let
b2 = addTestBlock(state.data, b1.root, cache).phase0Data
b2 = addTestBlock(state.data, cache).phase0Data
b2Add = dag.addRawBlock(quarantine, b2) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -404,7 +408,7 @@ suite "Attestation pool processing" & preset():
test "Fork choice returns block with attestation":
var cache = StateCache()
let
b10 = makeTestBlock(state.data, dag.tail.root, cache).phase0Data
b10 = makeTestBlock(state.data, cache).phase0Data
b10Add = dag.addRawBlock(quarantine, b10) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -417,7 +421,7 @@ suite "Attestation pool processing" & preset():
head == b10Add[]
let
b11 = makeTestBlock(state.data, dag.tail.root, cache,
b11 = makeTestBlock(state.data, cache,
graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
).phase0Data
b11Add = dag.addRawBlock(quarantine, b11) do (
@ -465,7 +469,7 @@ suite "Attestation pool processing" & preset():
test "Trying to add a block twice tags the second as an error":
var cache = StateCache()
let
b10 = makeTestBlock(state.data, dag.tail.root, cache).phase0Data
b10 = makeTestBlock(state.data, cache).phase0Data
b10Add = dag.addRawBlock(quarantine, b10) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -494,7 +498,7 @@ suite "Attestation pool processing" & preset():
dag.updateFlags.incl {skipBLSValidation}
var cache = StateCache()
let
b10 = addTestBlock(state.data, dag.tail.root, cache).phase0Data
b10 = addTestBlock(state.data, cache).phase0Data
b10Add = dag.addRawBlock(quarantine, b10) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -510,8 +514,6 @@ suite "Attestation pool processing" & preset():
# -------------------------------------------------------------
# Pass an epoch
var block_root = b10.root
var attestations: seq[Attestation]
for epoch in 0 ..< 5:
@ -520,9 +522,8 @@ suite "Attestation pool processing" & preset():
get_committee_count_per_slot(state[].data, Epoch epoch, cache)
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
let new_block = addTestBlock(
state.data, block_root, cache, attestations = attestations).phase0Data
state.data, cache, attestations = attestations).phase0Data
block_root = new_block.root
let blockRef = dag.addRawBlock(quarantine, new_block) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -550,7 +551,7 @@ suite "Attestation pool processing" & preset():
aggregation_bits: aggregation_bits,
data: makeAttestationData(
state[].data, getStateField(state.data, slot),
index.CommitteeIndex, blockroot)
index.CommitteeIndex, blockRef.get().root)
# signature: ValidatorSig()
)

View File

@ -199,8 +199,8 @@ suite "Beacon chain DB" & preset():
var db = makeTestDB(SLOTS_PER_EPOCH)
for state in testStatesPhase0:
db.putState(state[].phase0Data.data)
let root = hash_tree_root(state[].phase0Data.data)
let root = state[].phase0Data.root
db.putState(root, state[].phase0Data.data)
check:
db.containsState(root)
@ -217,8 +217,8 @@ suite "Beacon chain DB" & preset():
var db = makeTestDB(SLOTS_PER_EPOCH)
for state in testStatesAltair:
db.putState(state[].altairData.data)
let root = hash_tree_root(state[].altairData.data)
let root = state[].altairData.root
db.putState(root, state[].altairData.data)
check:
db.containsState(root)
@ -235,8 +235,8 @@ suite "Beacon chain DB" & preset():
var db = makeTestDB(SLOTS_PER_EPOCH)
for state in testStatesMerge:
db.putState(state[].mergeData.data)
let root = hash_tree_root(state[].mergeData.data)
let root = state[].mergeData.root
db.putState(root, state[].mergeData.data)
check:
db.containsState(root)
@ -254,8 +254,8 @@ suite "Beacon chain DB" & preset():
let stateBuffer = (phase0.BeaconStateRef)()
for state in testStatesPhase0:
db.putState(state[].phase0Data.data)
let root = hash_tree_root(state[].phase0Data.data)
let root = state[].phase0Data.root
db.putState(root, state[].phase0Data.data)
check:
db.getState(root, stateBuffer[], noRollback)
@ -274,8 +274,8 @@ suite "Beacon chain DB" & preset():
let stateBuffer = (altair.BeaconStateRef)()
for state in testStatesAltair:
db.putState(state[].altairData.data)
let root = hash_tree_root(state[].altairData.data)
let root = state[].altairData.root
db.putState(root, state[].altairData.data)
check:
db.getState(root, stateBuffer[], noRollback)
@ -294,8 +294,8 @@ suite "Beacon chain DB" & preset():
let stateBuffer = (merge.BeaconStateRef)()
for state in testStatesMerge:
db.putState(state[].mergeData.data)
let root = hash_tree_root(state[].mergeData.data)
let root = state[].mergeData.root
db.putState(root, state[].mergeData.data)
check:
db.getState(root, stateBuffer[], noRollback)
@ -432,21 +432,20 @@ suite "Beacon chain DB" & preset():
db = BeaconChainDB.new("", inMemory = true)
let
state = initialize_beacon_state_from_eth1(
state = newClone(initialize_hashed_beacon_state_from_eth1(
defaultRuntimeConfig, eth1BlockHash, 0,
makeInitialDeposits(SLOTS_PER_EPOCH), {skipBlsValidation})
root = hash_tree_root(state[])
makeInitialDeposits(SLOTS_PER_EPOCH), {skipBlsValidation}))
db.putState(state[])
db.putState(state[].root, state[].data)
check db.containsState(root)
let state2 = db.getPhase0StateRef(root)
db.delState(root)
check not db.containsState(root)
check db.containsState(state[].root)
let state2 = db.getPhase0StateRef(state[].root)
db.delState(state[].root)
check not db.containsState(state[].root)
db.close()
check:
hash_tree_root(state2[]) == root
hash_tree_root(state2[]) == state[].root
test "sanity check state diff roundtrip" & preset():
var

View File

@ -1,20 +0,0 @@
# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
unittest2,
../beacon_chain/spec/datatypes/base,
../beacon_chain/spec/beaconstate,
./testutil, ./testblockutil
suite "Beacon state" & preset():
test "Smoke test initialize_beacon_state_from_eth1" & preset():
let state = initialize_beacon_state_from_eth1(
defaultRuntimeConfig, Eth2Digest(), 0, makeInitialDeposits(SLOTS_PER_EPOCH, {}), {})
check: state.validators.lenu64 == SLOTS_PER_EPOCH

View File

@ -126,8 +126,8 @@ suite "Block pool processing" & preset():
cache = StateCache()
info = ForkedEpochInfo()
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0).phase0Data
b2 = addTestBlock(state[], b1.root, cache).phase0Data
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
b2 = addTestBlock(state[], cache).phase0Data
test "getRef returns nil for missing blocks":
check:
dag.getRef(default Eth2Digest) == nil
@ -180,7 +180,7 @@ suite "Block pool processing" & preset():
info, {})
let
b4 = addTestBlock(state[], b2.root, cache).phase0Data
b4 = addTestBlock(state[], cache).phase0Data
b4Add = dag.addRawBlock(quarantine, b4, nilPhase0Callback)
check:
@ -356,7 +356,7 @@ suite "chain DAG finalization tests" & preset():
test "prune heads on finalization" & preset():
# Create a fork that will not be taken
var
blck = makeTestBlock(dag.headState.data, dag.head.root, cache).phase0Data
blck = makeTestBlock(dag.headState.data, cache).phase0Data
tmpState = assignClone(dag.headState.data)
check:
process_slots(
@ -364,7 +364,7 @@ suite "chain DAG finalization tests" & preset():
getStateField(tmpState[], slot) + (5 * SLOTS_PER_EPOCH).uint64,
cache, info, {})
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache).phase0Data
let lateBlock = addTestBlock(tmpState[], cache).phase0Data
block:
let status = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
check: status.isOk()
@ -378,7 +378,7 @@ suite "chain DAG finalization tests" & preset():
dag.heads.len == 2
blck = addTestBlock(
tmpState[], dag.head.root, cache,
tmpState[], cache,
attestations = makeFullAttestations(
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {})).phase0Data
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
@ -451,7 +451,7 @@ suite "chain DAG finalization tests" & preset():
if i == SLOTS_PER_EPOCH - 1:
assign(prestate[], dag.headState.data)
let blck = makeTestBlock(dag.headState.data, dag.head.root, cache).phase0Data
let blck = makeTestBlock(dag.headState.data, cache).phase0Data
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
check: added.isOk()
dag.updateHead(added[], quarantine)
@ -468,7 +468,7 @@ suite "chain DAG finalization tests" & preset():
cache, info, {})
# create another block, orphaning the head
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache).phase0Data
let blck = makeTestBlock(prestate[], cache).phase0Data
# Add block, but don't update head
let added = dag.addRawBlock(quarantine, blck, nilPhase0Callback)
@ -483,7 +483,7 @@ suite "chain DAG finalization tests" & preset():
test "init with gaps" & preset():
for blck in makeTestBlocks(
dag.headState.data, dag.head.root, cache, int(SLOTS_PER_EPOCH * 6 - 2),
dag.headState.data, cache, int(SLOTS_PER_EPOCH * 6 - 2),
true):
let added = dag.addRawBlock(quarantine, blck.phase0Data, nilPhase0Callback)
check: added.isOk()
@ -497,7 +497,7 @@ suite "chain DAG finalization tests" & preset():
cache, info, {})
var blck = makeTestBlock(
dag.headState.data, dag.head.root, cache,
dag.headState.data, cache,
attestations = makeFullAttestations(
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot),
cache, {})).phase0Data
@ -536,12 +536,12 @@ suite "chain DAG finalization tests" & preset():
suite "Old database versions" & preset():
setup:
let
genState = initialize_beacon_state_from_eth1(
genState = newClone(initialize_hashed_beacon_state_from_eth1(
defaultRuntimeConfig,
Eth2Digest(),
0,
makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}),
{skipBlsValidation})
{skipBlsValidation}))
genBlock = get_initial_beacon_block(genState[])
taskpool = Taskpool.new()
quarantine = QuarantineRef.init(keys.newRng(), taskpool)
@ -552,11 +552,13 @@ suite "Old database versions" & preset():
let db = BeaconChainDB.new("", inMemory = true)
# preInit a database to a v1.0.12 state
db.putStateV0(genBlock.message.state_root, genState[])
db.putStateRoot(
genState[].latest_block_root(), genState[].data.slot, genState[].root)
db.putStateV0(genState[].root, genState[].data)
db.putBlockV0(genBlock)
db.putTailBlock(genBlock.root)
db.putHeadBlock(genBlock.root)
db.putStateRoot(genBlock.root, genState.slot, genBlock.message.state_root)
db.putGenesisBlock(genBlock.root)
var
@ -564,7 +566,7 @@ suite "Old database versions" & preset():
state = newClone(dag.headState.data)
cache = StateCache()
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0).phase0Data
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
check:
@ -588,7 +590,7 @@ suite "Diverging hardforks":
state = newClone(dag.headState.data)
cache = StateCache()
info = ForkedEpochInfo()
blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
blck = makeTestBlock(dag.headState.data, cache)
tmpState = assignClone(dag.headState.data)
test "Tail block only in common":
@ -601,7 +603,7 @@ suite "Diverging hardforks":
# Because the first block is after the Altair transition, the only block in
# common is the tail block
var
b1 = addTestBlock(tmpState[], dag.tail.root, cache).phase0Data
b1 = addTestBlock(tmpState[], cache).phase0Data
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
check b1Add.isOk()
@ -619,7 +621,7 @@ suite "Diverging hardforks":
# There's a block in the shared-correct phase0 hardfork, before epoch 2
var
b1 = addTestBlock(tmpState[], dag.tail.root, cache).phase0Data
b1 = addTestBlock(tmpState[], cache).phase0Data
b1Add = dag.addRawBlock(quarantine, b1, nilPhase0Callback)
check:
@ -630,7 +632,7 @@ suite "Diverging hardforks":
cache, info, {})
var
b2 = addTestBlock(tmpState[], b1.root, cache).phase0Data
b2 = addTestBlock(tmpState[], cache).phase0Data
b2Add = dag.addRawBlock(quarantine, b2, nilPhase0Callback)
check b2Add.isOk()

View File

@ -76,8 +76,7 @@ suite "Gossip validation " & preset():
var
cache: StateCache
for blck in makeTestBlocks(
dag.headState.data, dag.head.root, cache,
int(SLOTS_PER_EPOCH * 5), false):
dag.headState.data, cache, int(SLOTS_PER_EPOCH * 5), false):
let added = dag.addRawBlock(quarantine, blck.phase0Data) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef):
@ -190,8 +189,7 @@ suite "Gossip validation - Extra": # Not based on preset config
quarantine = QuarantineRef.init(keys.newRng(), taskpool)
var cache = StateCache()
for blck in makeTestBlocks(
dag.headState.data, dag.head.root, cache,
int(SLOTS_PER_EPOCH), false, cfg = cfg):
dag.headState.data, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg):
let added =
case blck.kind
of BeaconBlockFork.Phase0:

View File

@ -151,8 +151,8 @@ suite "Interop":
const genesis_time = 1570500000
var
initialState = initialize_beacon_state_from_eth1(
defaultRuntimeConfig, eth1BlockHash, genesis_time, deposits, {})
initialState = newClone(initialize_beacon_state_from_eth1(
defaultRuntimeConfig, eth1BlockHash, genesis_time, deposits, {}))
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState.genesis_time = genesis_time

76
tests/test_spec.nim Normal file
View File

@ -0,0 +1,76 @@
# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
# Test for spec functions and helpers outside of the EF test vectors - mainly
# helpers that extend or make the spec functions usable outside of the state
# transition functions
import
unittest2,
../beacon_chain/spec/datatypes/phase0,
../beacon_chain/spec/[beaconstate, state_transition],
./testutil, ./testblockutil
suite "Beacon state" & preset():
test "Smoke test initialize_beacon_state_from_eth1" & preset():
let state = newClone(initialize_beacon_state_from_eth1(
defaultRuntimeConfig, Eth2Digest(), 0,
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {}))
check: state.validators.lenu64 == SLOTS_PER_EPOCH
test "latest_block_root":
var
cfg = defaultRuntimeConfig
state = (ref ForkedHashedBeaconState)(
kind: BeaconStateFork.Phase0,
phase0Data: initialize_hashed_beacon_state_from_eth1(
defaultRuntimeConfig, Eth2Digest(), 0,
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipBlsValidation}))
genBlock = get_initial_beacon_block(state[])
cache: StateCache
info: ForkedEpochInfo
check: # Works for genesis block
state[].phase0Data.latest_block_root() == genBlock.root
process_slots(cfg, state[], Slot 1, cache, info, {})
state[].phase0Data.latest_block_root() == genBlock.root
let blck = addTestBlock(
state[], cache, nextSlot = false, flags = {skipBlsValidation}).phase0Data
check: # Works for random blocks
state[].phase0Data.latest_block_root() == blck.root
process_slots(cfg, state[], Slot 2, cache, info, {})
state[].phase0Data.latest_block_root() == blck.root
test "get_beacon_proposer_index":
var
cfg = defaultRuntimeConfig
state = (ref ForkedHashedBeaconState)(
kind: BeaconStateFork.Phase0,
phase0Data: initialize_hashed_beacon_state_from_eth1(
defaultRuntimeConfig, Eth2Digest(), 0,
makeInitialDeposits(SLOTS_PER_EPOCH, {}), {skipBlsValidation}))
cache: StateCache
info: ForkedEpochInfo
check:
get_beacon_proposer_index(state[].phase0Data.data, cache, Slot 1).isSome()
get_beacon_proposer_index(
state[].phase0Data.data, cache, Epoch(1).compute_start_slot_at_epoch()).isNone()
get_beacon_proposer_index(
state[].phase0Data.data, cache, Epoch(2).compute_start_slot_at_epoch()).isNone()
check:
process_slots(cfg, state[], Epoch(1).compute_start_slot_at_epoch(), cache, info, {})
get_beacon_proposer_index(state[].phase0Data.data, cache, Slot 1).isNone()
get_beacon_proposer_index(
state[].phase0Data.data, cache, Epoch(1).compute_start_slot_at_epoch()).isSome()
get_beacon_proposer_index(
state[].phase0Data.data, cache, Epoch(2).compute_start_slot_at_epoch()).isNone()

View File

@ -74,7 +74,6 @@ func signBlock(
proc addTestBlock*(
state: var ForkedHashedBeaconState,
parent_root: Eth2Digest,
cache: var StateCache,
eth1_data = Eth1Data(),
attestations = newSeq[Attestation](),
@ -107,7 +106,6 @@ proc addTestBlock*(
cfg,
state,
proposer_index.get(),
parent_root,
randao_reveal,
# Keep deposit counts internally consistent.
Eth1Data(
@ -135,7 +133,6 @@ proc addTestBlock*(
proc makeTestBlock*(
state: ForkedHashedBeaconState,
parent_root: Eth2Digest,
cache: var StateCache,
eth1_data = Eth1Data(),
attestations = newSeq[Attestation](),
@ -148,22 +145,16 @@ proc makeTestBlock*(
# because the block includes the state root.
var tmpState = assignClone(state)
addTestBlock(
tmpState[], parent_root, cache, eth1_data, attestations, deposits,
graffiti, cfg = cfg)
tmpState[], cache, eth1_data, attestations, deposits, graffiti, cfg = cfg)
func makeAttestationData*(
state: ForkedHashedBeaconState, slot: Slot, committee_index: CommitteeIndex,
state: ForkyBeaconState, slot: Slot, committee_index: CommitteeIndex,
beacon_block_root: Eth2Digest): AttestationData =
## Create an attestation / vote for the block `beacon_block_root` using the
## data in `state` to fill in the rest of the fields.
## `state` is the state corresponding to the `beacon_block_root` advanced to
## the slot we're attesting to.
let
current_epoch = get_current_epoch(state)
start_slot = compute_start_slot_at_epoch(current_epoch)
epoch_boundary_block_root =
if start_slot == getStateField(state, slot): beacon_block_root
if start_slot == state.slot: beacon_block_root
else: get_block_root_at_slot(state, start_slot)
doAssert slot.compute_epoch_at_slot == current_epoch,
@ -175,13 +166,23 @@ func makeAttestationData*(
slot: slot,
index: committee_index.uint64,
beacon_block_root: beacon_block_root,
source: getStateField(state, current_justified_checkpoint),
source: state.current_justified_checkpoint,
target: Checkpoint(
epoch: current_epoch,
root: epoch_boundary_block_root
)
)
func makeAttestationData*(
state: ForkedHashedBeaconState, slot: Slot, committee_index: CommitteeIndex,
beacon_block_root: Eth2Digest): AttestationData =
## Create an attestation / vote for the block `beacon_block_root` using the
## data in `state` to fill in the rest of the fields.
## `state` is the state corresponding to the `beacon_block_root` advanced to
## the slot we're attesting to.
withState(state):
makeAttestationData(state.data, slot, committee_index, beacon_block_root)
func makeAttestation*(
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest,
committee: seq[ValidatorIndex], slot: Slot, index: CommitteeIndex,
@ -284,21 +285,17 @@ func makeFullAttestations*(
iterator makeTestBlocks*(
state: ForkedHashedBeaconState,
parent_root: Eth2Digest,
cache: var StateCache,
blocks: int,
attested: bool,
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
var
state = assignClone(state)
parent_root = parent_root
for _ in 0..<blocks:
let parent_root = withState(state[]): state.latest_block_root()
let attestations = if attested:
makeFullAttestations(state[], parent_root, getStateField(state[], slot), cache)
else:
@[]
let blck = addTestBlock(
state[], parent_root, cache, attestations = attestations, cfg = cfg)
yield blck
parent_root = blck.root
yield addTestBlock(state[], cache, attestations = attestations, cfg = cfg)

View File

@ -14,30 +14,31 @@ import
../beacon_chain/spec/[
forks, helpers, state_transition, state_transition_block]
proc valid_deposit[T](state: var T) =
proc valid_deposit(state: var ForkyHashedBeaconState) =
const deposit_amount = MAX_EFFECTIVE_BALANCE
let validator_index = state.validators.len
let validator_index = state.data.validators.len
let deposit = mockUpdateStateForNewDeposit(
state,
state.data,
uint64 validator_index,
deposit_amount,
flags = {}
)
let pre_val_count = state.validators.len
let pre_val_count = state.data.validators.len
let pre_balance = if validator_index < pre_val_count:
state.balances[validator_index]
state.data.balances[validator_index]
else:
0
doAssert process_deposit(defaultRuntimeConfig, state, deposit, {}).isOk
doAssert state.validators.len == pre_val_count + 1
doAssert state.balances.len == pre_val_count + 1
doAssert state.balances[validator_index] == pre_balance + deposit.data.amount
doAssert state.validators[validator_index].effective_balance ==
doAssert process_deposit(defaultRuntimeConfig, state.data, deposit, {}).isOk
doAssert state.data.validators.len == pre_val_count + 1
doAssert state.data.balances.len == pre_val_count + 1
doAssert state.data.balances[validator_index] == pre_balance + deposit.data.amount
doAssert state.data.validators[validator_index].effective_balance ==
round_multiple_down(
min(MAX_EFFECTIVE_BALANCE, state.balances[validator_index]),
min(MAX_EFFECTIVE_BALANCE, state.data.balances[validator_index]),
EFFECTIVE_BALANCE_INCREMENT
)
state.root = hash_tree_root(state.data)
proc getTestStates*(
initialState: ForkedHashedBeaconState, stateFork: BeaconStateFork):
@ -77,7 +78,7 @@ proc getTestStates*(
if i mod 3 == 0:
withState(tmpState[]):
valid_deposit(state.data)
valid_deposit(state)
doAssert getStateField(tmpState[], slot) == slot
if tmpState[].kind == stateFork: