remove StateData (#3507)

One more step on the journey to reduce `BlockRef` usage across the
codebase - this one gets rid of `StateData` whose job was to keep track
of which block was last assigned to a state - these duties have now been
taken over by `latest_block_root`, a fairly recent addition that
computes this block root from state data (at a small cost that should be
insignificant)

99% mechanical change.
This commit is contained in:
Jacek Sieka 2022-03-16 08:20:40 +01:00 committed by GitHub
parent 6d1d31dd01
commit c64bf045f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 607 additions and 583 deletions

View File

@ -67,7 +67,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
+ getBlockRef returns none for missing blocks OK + getBlockRef returns none for missing blocks OK
+ loading tail block works [Preset: mainnet] OK + loading tail block works [Preset: mainnet] OK
+ updateHead updates head and headState [Preset: mainnet] OK + updateHead updates head and headState [Preset: mainnet] OK
+ updateStateData sanity [Preset: mainnet] OK + updateState sanity [Preset: mainnet] OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 6/6 Fail: 0/6 Skip: 0/6
## Block processor [Preset: mainnet] ## Block processor [Preset: mainnet]

View File

@ -664,7 +664,7 @@ proc putState*(db: BeaconChainDB, key: Eth2Digest, value: ForkyBeaconState) =
proc putState*(db: BeaconChainDB, state: ForkyHashedBeaconState) = proc putState*(db: BeaconChainDB, state: ForkyHashedBeaconState) =
db.withManyWrites: db.withManyWrites:
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root) db.putStateRoot(state.latest_block_root, state.data.slot, state.root)
db.putState(state.root, state.data) db.putState(state.root, state.data)
# For testing rollback # For testing rollback

View File

@ -149,9 +149,8 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef,
info "Fork choice initialized", info "Fork choice initialized",
justified_epoch = getStateField( justified_epoch = getStateField(
dag.headState.data, current_justified_checkpoint).epoch, dag.headState, current_justified_checkpoint).epoch,
finalized_epoch = getStateField( finalized_epoch = getStateField(dag.headState, finalized_checkpoint).epoch,
dag.headState.data, finalized_checkpoint).epoch,
finalized_root = shortLog(dag.finalizedHead.blck.root) finalized_root = shortLog(dag.finalizedHead.blck.root)
T( T(

View File

@ -10,7 +10,7 @@
import import
chronicles, chronicles,
stew/[assign2, results], stew/[assign2, results],
../spec/[forks, signatures, signatures_batch, state_transition], ../spec/[beaconstate, forks, signatures, signatures_batch, state_transition],
"."/[block_dag, blockchain_dag, blockchain_dag_light_client] "."/[block_dag, blockchain_dag, blockchain_dag_light_client]
export results, signatures_batch, block_dag, blockchain_dag export results, signatures_batch, block_dag, blockchain_dag
@ -27,16 +27,15 @@ logScope:
proc addResolvedHeadBlock( proc addResolvedHeadBlock(
dag: ChainDAGRef, dag: ChainDAGRef,
state: var StateData, state: var ForkedHashedBeaconState,
trustedBlock: ForkyTrustedSignedBeaconBlock, trustedBlock: ForkyTrustedSignedBeaconBlock,
parent: BlockRef, cache: var StateCache, parent: BlockRef, cache: var StateCache,
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded | OnBellatrixBlockAdded, onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded | OnBellatrixBlockAdded,
stateDataDur, sigVerifyDur, stateVerifyDur: Duration stateDataDur, sigVerifyDur, stateVerifyDur: Duration
): BlockRef = ): BlockRef =
doAssert getStateField(state.data, slot) == trustedBlock.message.slot, doAssert state.matches_block_slot(
"state must match block" trustedBlock.root, trustedBlock.message.slot),
doAssert state.blck.root == trustedBlock.message.parent_root, "Given state must have the new block applied"
"the StateData passed into the addResolved function not yet updated!"
let let
blockRoot = trustedBlock.root blockRoot = trustedBlock.root
@ -63,17 +62,16 @@ proc addResolvedHeadBlock(
# Up to here, state.data was referring to the new state after the block had # Up to here, state.data was referring to the new state after the block had
# been applied but the `blck` field was still set to the parent # been applied but the `blck` field was still set to the parent
state.blck = blockRef dag.clearanceBlck = blockRef
# Regardless of the chain we're on, the deposits come in the same order so # Regardless of the chain we're on, the deposits come in the same order so
# as soon as we import a block, we'll also update the shared public key # as soon as we import a block, we'll also update the shared public key
# cache # cache
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
dag.updateValidatorKeys(getStateField(state.data, validators).asSeq())
# Getting epochRef with the state will potentially create a new EpochRef # Getting epochRef with the state will potentially create a new EpochRef
let let
epochRef = dag.getEpochRef(state, cache) epochRef = dag.getEpochRef(state, blockRef, cache)
epochRefTick = Moment.now() epochRefTick = Moment.now()
debug "Block resolved", debug "Block resolved",
@ -101,15 +99,12 @@ proc checkStateTransition(
cache: var StateCache): Result[void, BlockError] = cache: var StateCache): Result[void, BlockError] =
## Ensure block can be applied on a state ## Ensure block can be applied on a state
func restore(v: var ForkedHashedBeaconState) = func restore(v: var ForkedHashedBeaconState) =
# TODO address this ugly workaround - there should probably be a
# `state_transition` that takes a `StateData` instead and updates
# the block as well
doAssert v.addr == addr dag.clearanceState.data
assign(dag.clearanceState, dag.headState) assign(dag.clearanceState, dag.headState)
let res = state_transition_block( let res = state_transition_block(
dag.cfg, dag.clearanceState.data, signedBlock, dag.cfg, dag.clearanceState, signedBlock,
cache, dag.updateFlags, restore) cache, dag.updateFlags, restore)
if res.isErr(): if res.isErr():
info "Invalid block", info "Invalid block",
blockRoot = shortLog(signedBlock.root), blockRoot = shortLog(signedBlock.root),
@ -127,16 +122,15 @@ proc advanceClearanceState*(dag: ChainDAGRef) =
# epoch transition ahead of time. # epoch transition ahead of time.
# Notably, we use the clearance state here because that's where the block will # Notably, we use the clearance state here because that's where the block will
# first be seen - later, this state will be copied to the head state! # first be seen - later, this state will be copied to the head state!
if dag.clearanceState.blck.slot == getStateField(dag.clearanceState.data, slot): if dag.clearanceBlck.slot == getStateField(dag.clearanceState, slot):
let next = let next = dag.clearanceBlck.atSlot(dag.clearanceBlck.slot + 1)
dag.clearanceState.blck.atSlot(dag.clearanceState.blck.slot + 1)
let startTick = Moment.now() let startTick = Moment.now()
var cache = StateCache() var cache = StateCache()
if not updateStateData(dag, dag.clearanceState, next, true, cache): if not updateState(dag, dag.clearanceState, next, true, cache):
# The next head update will likely fail - something is very wrong here # The next head update will likely fail - something is very wrong here
error "Cannot advance to next slot, database corrupt?", error "Cannot advance to next slot, database corrupt?",
clearance = shortLog(dag.clearanceState.blck), clearance = shortLog(dag.clearanceBlck),
next = shortLog(next) next = shortLog(next)
else: else:
debug "Prepared clearance state for next block", debug "Prepared clearance state for next block",
@ -222,7 +216,7 @@ proc addHeadBlock*(
# by the time a new block reaches this point, the parent block will already # by the time a new block reaches this point, the parent block will already
# have "established" itself in the network to some degree at least. # have "established" itself in the network to some degree at least.
var cache = StateCache() var cache = StateCache()
if not updateStateData( if not updateState(
dag, dag.clearanceState, parent.atSlot(signedBlock.message.slot), true, dag, dag.clearanceState, parent.atSlot(signedBlock.message.slot), true,
cache): cache):
# We should never end up here - the parent must be a block no older than and # We should never end up here - the parent must be a block no older than and
@ -230,8 +224,9 @@ proc addHeadBlock*(
# load its corresponding state # load its corresponding state
error "Unable to load clearance state for parent block, database corrupt?", error "Unable to load clearance state for parent block, database corrupt?",
parent = shortLog(parent.atSlot(signedBlock.message.slot)), parent = shortLog(parent.atSlot(signedBlock.message.slot)),
clearance = shortLog(dag.clearanceState.blck) clearanceBlock = shortLog(dag.clearanceBlck)
return err(BlockError.MissingParent) return err(BlockError.MissingParent)
dag.clearanceBlck = parent
let stateDataTick = Moment.now() let stateDataTick = Moment.now()
@ -241,7 +236,7 @@ proc addHeadBlock*(
var sigs: seq[SignatureSet] var sigs: seq[SignatureSet]
if (let e = sigs.collectSignatureSets( if (let e = sigs.collectSignatureSets(
signedBlock, dag.db.immutableValidators, signedBlock, dag.db.immutableValidators,
dag.clearanceState.data, cache); e.isErr()): dag.clearanceState, cache); e.isErr()):
# A PublicKey or Signature isn't on the BLS12-381 curve # A PublicKey or Signature isn't on the BLS12-381 curve
info "Unable to load signature sets", info "Unable to load signature sets",
err = e.error() err = e.error()
@ -354,7 +349,7 @@ proc addBackfillBlock*(
if not verify_block_signature( if not verify_block_signature(
dag.forkAtEpoch(blck.slot.epoch), dag.forkAtEpoch(blck.slot.epoch),
getStateField(dag.headState.data, genesis_validators_root), getStateField(dag.headState, genesis_validators_root),
blck.slot, blck.slot,
signedBlock.root, signedBlock.root,
proposerKey.get(), proposerKey.get(),

View File

@ -140,6 +140,9 @@ type
## go - the tail block is unique in that its parent is set to `nil`, even ## go - the tail block is unique in that its parent is set to `nil`, even
## in the case where an earlier genesis block exists. ## in the case where an earlier genesis block exists.
head*: BlockRef
## The most recently known head, as chosen by fork choice
backfill*: BeaconBlockSummary backfill*: BeaconBlockSummary
## The backfill points to the oldest block with an unbroken ancestry from ## The backfill points to the oldest block with an unbroken ancestry from
## dag.tail - when backfilling, we'll move backwards in time starting ## dag.tail - when backfilling, we'll move backwards in time starting
@ -162,17 +165,19 @@ type
# ----------------------------------- # -----------------------------------
# Rewinder - Mutable state processing # Rewinder - Mutable state processing
headState*: StateData headState*: ForkedHashedBeaconState
## State given by the head block - must only be updated in `updateHead` - ## State given by the head block - must only be updated in `updateHead` -
## always matches dag.head ## always matches dag.head
epochRefState*: StateData epochRefState*: ForkedHashedBeaconState
## State used to produce epochRef instances - must only be used in ## State used to produce epochRef instances - must only be used in
## `getEpochRef` ## `getEpochRef`
clearanceState*: StateData clearanceState*: ForkedHashedBeaconState
## Cached state used during block clearance - must only be used in ## Cached state used during block clearance - must only be used in
## clearance module ## clearance module
clearanceBlck*: BlockRef
## The latest block that was applied to the clearance state
updateFlags*: UpdateFlags updateFlags*: UpdateFlags
@ -249,12 +254,6 @@ type
# balances, as used in fork choice # balances, as used in fork choice
effective_balances_bytes*: seq[byte] effective_balances_bytes*: seq[byte]
StateData* = object
data*: ForkedHashedBeaconState
blck*: BlockRef
## The block associated with the state found in data
# TODO when Nim 1.2 support is dropped, make these generic. 1.2 generates # TODO when Nim 1.2 support is dropped, make these generic. 1.2 generates
# invalid C code, which gcc refuses to compile. Example test case: # invalid C code, which gcc refuses to compile. Example test case:
# type # type

View File

@ -56,31 +56,32 @@ proc putBlock*(
dag: ChainDAGRef, signedBlock: ForkyTrustedSignedBeaconBlock) = dag: ChainDAGRef, signedBlock: ForkyTrustedSignedBeaconBlock) =
dag.db.putBlock(signedBlock) dag.db.putBlock(signedBlock)
proc updateStateData*( proc updateState*(
dag: ChainDAGRef, state: var StateData, bs: BlockSlot, save: bool, dag: ChainDAGRef, state: var ForkedHashedBeaconState, bs: BlockSlot, save: bool,
cache: var StateCache): bool {.gcsafe.} cache: var StateCache): bool {.gcsafe.}
template withStateVars*( template withStateVars*(
stateDataInternal: var StateData, body: untyped): untyped = stateInternal: var ForkedHashedBeaconState, body: untyped): untyped =
## Inject a few more descriptive names for the members of `stateData` - ## Inject a few more descriptive names for the members of `stateData` -
## the stateData instance may get mutated through these names as well ## the stateData instance may get mutated through these names as well
template stateData(): StateData {.inject, used.} = stateDataInternal template state(): ForkedHashedBeaconState {.inject, used.} = stateInternal
template stateRoot(): Eth2Digest {.inject, used.} = template stateRoot(): Eth2Digest {.inject, used.} =
getStateRoot(stateDataInternal.data) getStateRoot(stateInternal)
template blck(): BlockRef {.inject, used.} = stateDataInternal.blck
body body
template withUpdatedState*( template withUpdatedState*(
dag: ChainDAGRef, stateData: var StateData, blockSlot: BlockSlot, dag: ChainDAGRef, state: var ForkedHashedBeaconState,
okBody: untyped, failureBody: untyped): untyped = blockSlot: BlockSlot, okBody: untyped, failureBody: untyped): untyped =
## Helper template that updates stateData to a particular BlockSlot - usage of ## Helper template that updates stateData to a particular BlockSlot - usage of
## stateData is unsafe outside of block, or across `await` boundaries ## stateData is unsafe outside of block, or across `await` boundaries
block: block:
var cache {.inject.} = StateCache() var cache {.inject.} = StateCache()
if updateStateData(dag, stateData, blockSlot, false, cache): if updateState(dag, state, blockSlot, false, cache):
withStateVars(stateData): template blck(): BlockRef {.inject, used.} = blockSlot.blck
withStateVars(state):
okBody okBody
else: else:
failureBody failureBody
@ -133,33 +134,31 @@ func validatorKey*(
validatorKey(epochRef.dag, index) validatorKey(epochRef.dag, index)
func init*( func init*(
T: type EpochRef, dag: ChainDAGRef, state: StateData, T: type EpochRef, dag: ChainDAGRef, state: ForkedHashedBeaconState,
cache: var StateCache): T = blck: BlockRef, cache: var StateCache): T =
let let
epoch = state.data.get_current_epoch() epoch = state.get_current_epoch()
proposer_dependent_root = withState(state.data): proposer_dependent_root = withState(state): state.proposer_dependent_root
state.proposer_dependent_root attester_dependent_root = withState(state): state.attester_dependent_root
attester_dependent_root = withState(state.data):
state.attester_dependent_root
epochRef = EpochRef( epochRef = EpochRef(
dag: dag, # This gives access to the validator pubkeys through an EpochRef dag: dag, # This gives access to the validator pubkeys through an EpochRef
key: state.blck.epochAncestor(epoch), key: blck.epochAncestor(epoch),
eth1_data: getStateField(state.data, eth1_data), eth1_data: getStateField(state, eth1_data),
eth1_deposit_index: getStateField(state.data, eth1_deposit_index), eth1_deposit_index: getStateField(state, eth1_deposit_index),
current_justified_checkpoint: current_justified_checkpoint:
getStateField(state.data, current_justified_checkpoint), getStateField(state, current_justified_checkpoint),
finalized_checkpoint: getStateField(state.data, finalized_checkpoint), finalized_checkpoint: getStateField(state, finalized_checkpoint),
proposer_dependent_root: proposer_dependent_root, proposer_dependent_root: proposer_dependent_root,
shuffled_active_validator_indices: shuffled_active_validator_indices:
cache.get_shuffled_active_validator_indices(state.data, epoch), cache.get_shuffled_active_validator_indices(state, epoch),
attester_dependent_root: attester_dependent_root, attester_dependent_root: attester_dependent_root,
merge_transition_complete: merge_transition_complete:
case state.data.kind: case state.kind:
of BeaconStateFork.Phase0: false of BeaconStateFork.Phase0: false
of BeaconStateFork.Altair: false of BeaconStateFork.Altair: false
of BeaconStateFork.Bellatrix: of BeaconStateFork.Bellatrix:
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/beacon-chain.md#is_merge_transition_complete # https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/beacon-chain.md#is_merge_transition_complete
state.data.bellatrixData.data.latest_execution_payload_header != state.bellatrixData.data.latest_execution_payload_header !=
ExecutionPayloadHeader() ExecutionPayloadHeader()
) )
epochStart = epoch.start_slot() epochStart = epoch.start_slot()
@ -168,7 +167,7 @@ func init*(
for i in 0'u64..<SLOTS_PER_EPOCH: for i in 0'u64..<SLOTS_PER_EPOCH:
epochRef.beacon_proposers[i] = get_beacon_proposer_index( epochRef.beacon_proposers[i] = get_beacon_proposer_index(
state.data, cache, epochStart + i) state, cache, epochStart + i)
# When fork choice runs, it will need the effective balance of the justified # When fork choice runs, it will need the effective balance of the justified
# checkpoint - we pre-load the balances here to avoid rewinding the justified # checkpoint - we pre-load the balances here to avoid rewinding the justified
@ -182,9 +181,8 @@ func init*(
epochRef.effective_balances_bytes = epochRef.effective_balances_bytes =
snappyEncode(SSZ.encode( snappyEncode(SSZ.encode(
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](get_effective_balances( List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](
getStateField(state.data, validators).asSeq, get_effective_balances(getStateField(state, validators).asSeq, epoch))))
epoch))))
epochRef epochRef
@ -386,9 +384,9 @@ proc getState(
true true
proc getStateData( proc getState(
db: BeaconChainDB, cfg: RuntimeConfig, state: var StateData, bs: BlockSlot, db: BeaconChainDB, cfg: RuntimeConfig, state: var ForkedHashedBeaconState,
rollback: RollbackProc): bool = bs: BlockSlot, rollback: RollbackProc): bool =
if not bs.isStateCheckpoint(): if not bs.isStateCheckpoint():
return false return false
@ -396,11 +394,9 @@ proc getStateData(
if not root.isSome(): if not root.isSome():
return false return false
if not db.getState(cfg, bs.slot, root.get(), state.data, rollback): if not db.getState(cfg, bs.slot, root.get(), state, rollback):
return false return false
state.blck = bs.blck
true true
proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest):
@ -422,7 +418,7 @@ proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest):
proc getBlock*( proc getBlock*(
dag: ChainDAGRef, bid: BlockId, dag: ChainDAGRef, bid: BlockId,
T: type ForkyTrustedSignedBeaconBlock): Opt[T] = T: type ForkyTrustedSignedBeaconBlock): Opt[T] =
withState(dag.headState.data): withState(dag.headState):
dag.db.getBlock(bid.root, T) dag.db.getBlock(bid.root, T)
proc getBlockSSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = proc getBlockSSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool =
@ -453,14 +449,15 @@ proc getForkedBlock*(
# In case we didn't have a summary - should be rare, but .. # In case we didn't have a summary - should be rare, but ..
dag.db.getForkedBlock(root) dag.db.getForkedBlock(root)
proc updateBeaconMetrics(state: StateData, cache: var StateCache) = proc updateBeaconMetrics(
state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) =
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics # https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
# both non-negative, so difference can't overflow or underflow int64 # both non-negative, so difference can't overflow or underflow int64
beacon_head_root.set(state.blck.root.toGaugeValue) beacon_head_root.set(bid.root.toGaugeValue)
beacon_head_slot.set(state.blck.slot.toGaugeValue) beacon_head_slot.set(bid.slot.toGaugeValue)
withState(state.data): withState(state):
beacon_pending_deposits.set( beacon_pending_deposits.set(
(state.data.eth1_data.deposit_count - (state.data.eth1_data.deposit_count -
state.data.eth1_deposit_index).toGaugeValue) state.data.eth1_deposit_index).toGaugeValue)
@ -662,6 +659,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
validatorMonitor: validatorMonitor, validatorMonitor: validatorMonitor,
genesis: genesisRef, genesis: genesisRef,
tail: tailRef, tail: tailRef,
head: headRef,
backfill: backfill, backfill: backfill,
finalizedHead: tailRef.atSlot(), finalizedHead: tailRef.atSlot(),
lastPrunePoint: tailRef.atSlot(), lastPrunePoint: tailRef.atSlot(),
@ -669,6 +667,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# head state # head state
heads: @[headRef], heads: @[headRef],
clearanceBlck: headRef,
# The only allowed flag right now is verifyFinalization, as the others all # The only allowed flag right now is verifyFinalization, as the others all
# allow skipping some validation. # allow skipping some validation.
updateFlags: {verifyFinalization} * updateFlags, updateFlags: {verifyFinalization} * updateFlags,
@ -690,10 +690,10 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# Now that we have a head block, we need to find the most recent state that # Now that we have a head block, we need to find the most recent state that
# we have saved in the database # we have saved in the database
while cur.blck != nil and while cur.blck != nil and
not getStateData(db, cfg, dag.headState, cur, noRollback): not getState(db, cfg, dag.headState, cur, noRollback):
cur = cur.parentOrSlot() cur = cur.parentOrSlot()
if dag.headState.blck == nil: if cur.blck == nil:
fatal "No state found in head history, database corrupt?", fatal "No state found in head history, database corrupt?",
genesisRef, tailRef, headRef genesisRef, tailRef, headRef
# TODO Potentially we could recover from here instead of crashing - what # TODO Potentially we could recover from here instead of crashing - what
@ -701,11 +701,11 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
quit 1 quit 1
let let
configFork = case dag.headState.data.kind configFork = case dag.headState.kind
of BeaconStateFork.Phase0: genesisFork(cfg) of BeaconStateFork.Phase0: genesisFork(cfg)
of BeaconStateFork.Altair: altairFork(cfg) of BeaconStateFork.Altair: altairFork(cfg)
of BeaconStateFork.Bellatrix: bellatrixFork(cfg) of BeaconStateFork.Bellatrix: bellatrixFork(cfg)
statefork = getStateField(dag.headState.data, fork) statefork = getStateField(dag.headState, fork)
if stateFork != configFork: if stateFork != configFork:
error "State from database does not match network, check --network parameter", error "State from database does not match network, check --network parameter",
@ -716,8 +716,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
assign(dag.epochRefState, dag.headState) assign(dag.epochRefState, dag.headState)
dag.forkDigests = newClone ForkDigests.init( dag.forkDigests = newClone ForkDigests.init(
cfg, cfg, getStateField(dag.headState, genesis_validators_root))
getStateField(dag.headState.data, genesis_validators_root))
let forkVersions = let forkVersions =
[cfg.GENESIS_FORK_VERSION, cfg.ALTAIR_FORK_VERSION, [cfg.GENESIS_FORK_VERSION, cfg.ALTAIR_FORK_VERSION,
@ -732,7 +731,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# The state we loaded into `headState` is the last state we saved, which may # The state we loaded into `headState` is the last state we saved, which may
# come from earlier than the head block # come from earlier than the head block
var cache: StateCache var cache: StateCache
if not dag.updateStateData(dag.headState, headRef.atSlot(), false, cache): if not dag.updateState(dag.headState, headRef.atSlot(), false, cache):
fatal "Unable to load head state, database corrupt?", fatal "Unable to load head state, database corrupt?",
head = shortLog(headRef) head = shortLog(headRef)
@ -741,17 +740,17 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# Clearance most likely happens from head - assign it after rewinding head # Clearance most likely happens from head - assign it after rewinding head
assign(dag.clearanceState, dag.headState) assign(dag.clearanceState, dag.headState)
withState(dag.headState.data): withState(dag.headState):
dag.validatorMonitor[].registerState(state.data) dag.validatorMonitor[].registerState(state.data)
updateBeaconMetrics(dag.headState, cache) updateBeaconMetrics(dag.headState, dag.head.bid, cache)
# The tail block is "implicitly" finalized as it was given either as a # The tail block is "implicitly" finalized as it was given either as a
# checkpoint block, or is the genesis, thus we use it as a lower bound when # checkpoint block, or is the genesis, thus we use it as a lower bound when
# computing the finalized head # computing the finalized head
let let
finalized_checkpoint = finalized_checkpoint =
getStateField(dag.headState.data, finalized_checkpoint) getStateField(dag.headState, finalized_checkpoint)
finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), tailRef.slot) finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), tailRef.slot)
block: # Set up finalizedHead -> head block: # Set up finalizedHead -> head
@ -785,10 +784,10 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# Fill validator key cache in case we're loading an old database that doesn't # Fill validator key cache in case we're loading an old database that doesn't
# have a cache # have a cache
dag.updateValidatorKeys(getStateField(dag.headState.data, validators).asSeq()) dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq())
dag.updateFinalizedBlocks() dag.updateFinalizedBlocks()
withState(dag.headState.data): withState(dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
dag.headSyncCommittees = state.data.get_sync_committee_cache(cache) dag.headSyncCommittees = state.data.get_sync_committee_cache(cache)
@ -807,19 +806,19 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
dag dag
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest = template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
getStateField(dag.headState.data, genesis_validators_root) getStateField(dag.headState, genesis_validators_root)
func getEpochRef*( func getEpochRef*(
dag: ChainDAGRef, state: StateData, cache: var StateCache): EpochRef = dag: ChainDAGRef, state: ForkedHashedBeaconState, blck: BlockRef,
cache: var StateCache): EpochRef =
## Get a cached `EpochRef` or construct one based on the given state - always ## Get a cached `EpochRef` or construct one based on the given state - always
## returns an EpochRef instance ## returns an EpochRef instance
let let
blck = state.blck epoch = state.get_current_epoch()
epoch = state.data.get_current_epoch()
var epochRef = dag.findEpochRef(blck, epoch) var epochRef = dag.findEpochRef(blck, epoch)
if epochRef.isErr: if epochRef.isErr:
let res = EpochRef.init(dag, state, cache) let res = EpochRef.init(dag, state, blck, cache)
if epoch >= dag.finalizedHead.slot.epoch(): if epoch >= dag.finalizedHead.slot.epoch():
# Only cache epoch information for unfinalized blocks - earlier states # Only cache epoch information for unfinalized blocks - earlier states
@ -877,9 +876,9 @@ proc getEpochRef*(
if isNil(ancestor.blck): # past the tail if isNil(ancestor.blck): # past the tail
return err() return err()
dag.withUpdatedState( let epochBlck = ancestor.blck.atEpochStart(ancestor.epoch)
dag.epochRefState, ancestor.blck.atEpochStart(ancestor.epoch)) do: dag.withUpdatedState(dag.epochRefState, epochBlck) do:
ok(dag.getEpochRef(stateData, cache)) ok(dag.getEpochRef(state, blck, cache))
do: do:
err() err()
@ -904,7 +903,8 @@ func forkDigestAtEpoch*(dag: ChainDAGRef, epoch: Epoch): ForkDigest =
of BeaconStateFork.Altair: dag.forkDigests.altair of BeaconStateFork.Altair: dag.forkDigests.altair
of BeaconStateFork.Phase0: dag.forkDigests.phase0 of BeaconStateFork.Phase0: dag.forkDigests.phase0
proc getState(dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool = proc getState(
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bs: BlockSlot): bool =
## Load a state from the database given a block and a slot - this will first ## Load a state from the database given a block and a slot - this will first
## lookup the state root in the state root table then load the corresponding ## lookup the state root in the state root table then load the corresponding
## state, if it exists ## state, if it exists
@ -915,32 +915,32 @@ proc getState(dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool =
else: else:
unsafeAddr dag.headState unsafeAddr dag.headState
let v = addr state.data let v = addr state
func restore() = func restore() =
assign(v[], restoreAddr[].data) assign(v[], restoreAddr[])
getStateData(dag.db, dag.cfg, state, bs, restore) getState(dag.db, dag.cfg, state, bs, restore)
proc putState(dag: ChainDAGRef, state: StateData) = proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, blck: BlockRef) =
# Store a state and its root # Store a state and its root
logScope: logScope:
blck = shortLog(state.blck) blck = shortLog(blck)
stateSlot = shortLog(getStateField(state.data, slot)) stateSlot = shortLog(getStateField(state, slot))
stateRoot = shortLog(getStateRoot(state.data)) stateRoot = shortLog(getStateRoot(state))
if not isStateCheckpoint(state.blck.atSlot(getStateField(state.data, slot))): if not isStateCheckpoint(blck.atSlot(getStateField(state, slot))):
return return
# Don't consider legacy tables here, they are slow to read so we'll want to # Don't consider legacy tables here, they are slow to read so we'll want to
# rewrite things in the new table anyway. # rewrite things in the new table anyway.
if dag.db.containsState(getStateRoot(state.data), legacy = false): if dag.db.containsState(getStateRoot(state), legacy = false):
return return
let startTick = Moment.now() let startTick = Moment.now()
# Ideally we would save the state and the root lookup cache in a single # Ideally we would save the state and the root lookup cache in a single
# transaction to prevent database inconsistencies, but the state loading code # transaction to prevent database inconsistencies, but the state loading code
# is resilient against one or the other going missing # is resilient against one or the other going missing
withState(state.data): withState(state):
dag.db.putState(state) dag.db.putState(state)
debug "Stored state", putStateDur = Moment.now() - startTick debug "Stored state", putStateDur = Moment.now() - startTick
@ -1005,29 +1005,29 @@ proc getBlockRange*(
o # Return the index of the first non-nil item in the output o # Return the index of the first non-nil item in the output
proc advanceSlots( proc advanceSlots(
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool, dag: ChainDAGRef, state: var ForkedHashedBeaconState, blck: BlockRef,
cache: var StateCache, info: var ForkedEpochInfo) = slot: Slot, save: bool, cache: var StateCache, info: var ForkedEpochInfo) =
# Given a state, advance it zero or more slots by applying empty slot # Given a state, advance it zero or more slots by applying empty slot
# processing - the state must be positions at a slot before or equal to the # processing - the state must be positions at a slot before or equal to the
# target # target
doAssert getStateField(state.data, slot) <= slot doAssert getStateField(state, slot) <= slot
while getStateField(state.data, slot) < slot: while getStateField(state, slot) < slot:
let preEpoch = getStateField(state.data, slot).epoch let preEpoch = getStateField(state, slot).epoch
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch) loadStateCache(dag, cache, blck, getStateField(state, slot).epoch)
process_slots( process_slots(
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info, dag.cfg, state, getStateField(state, slot) + 1, cache, info,
dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct") dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct")
if save: if save:
dag.putState(state) dag.putState(state, blck)
# The reward information in the state transition is computed for epoch # The reward information in the state transition is computed for epoch
# transitions - when transitioning into epoch N, the activities in epoch # transitions - when transitioning into epoch N, the activities in epoch
# N-2 are translated into balance updates, and this is what we capture # N-2 are translated into balance updates, and this is what we capture
# in the monitor. This may be inaccurate during a deep reorg (>1 epoch) # in the monitor. This may be inaccurate during a deep reorg (>1 epoch)
# which is an acceptable tradeoff for monitoring. # which is an acceptable tradeoff for monitoring.
withState(state.data): withState(state):
let postEpoch = state.data.slot.epoch let postEpoch = state.data.slot.epoch
if preEpoch != postEpoch: if preEpoch != postEpoch:
dag.validatorMonitor[].registerEpochInfo(postEpoch, info, state.data) dag.validatorMonitor[].registerEpochInfo(postEpoch, info, state.data)
@ -1057,27 +1057,25 @@ proc applyBlock(
proc applyBlock( proc applyBlock(
dag: ChainDAGRef, dag: ChainDAGRef,
state: var StateData, blck: BlockRef, state: var ForkedHashedBeaconState, blck: BlockRef,
cache: var StateCache, info: var ForkedEpochInfo) = cache: var StateCache, info: var ForkedEpochInfo) =
# Apply a single block to the state - the state must be positioned at the # Apply a single block to the state - the state must be positioned at the
# parent of the block with a slot lower than the one of the block being # parent of the block with a slot lower than the one of the block being
# applied # applied
doAssert state.blck == blck.parent doAssert state.matches_block(blck.parent.root)
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch) loadStateCache(dag, cache, blck.parent, getStateField(state, slot).epoch)
dag.applyBlock(state.data, blck.bid, cache, info).expect( dag.applyBlock(state, blck.bid, cache, info).expect(
"Blocks from database must not fail to apply") "Blocks from database must not fail to apply")
state.blck = blck proc updateState*(
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bs: BlockSlot,
proc updateStateData*( save: bool, cache: var StateCache): bool =
dag: ChainDAGRef, state: var StateData, bs: BlockSlot, save: bool,
cache: var StateCache): bool =
## Rewind or advance state such that it matches the given block and slot - ## Rewind or advance state such that it matches the given block and slot -
## this may include replaying from an earlier snapshot if blck is on a ## this may include replaying from an earlier snapshot if blck is on a
## different branch or has advanced to a higher slot number than slot ## different branch or has advanced to a higher slot number than slot
## If `bs.slot` is higher than `bs.blck.slot`, `updateStateData` will fill in ## If `bs.slot` is higher than `bs.blck.slot`, `updateState` will fill in
## with empty/non-block slots ## with empty/non-block slots
# First, see if we're already at the requested block. If we are, also check # First, see if we're already at the requested block. If we are, also check
@ -1093,21 +1091,26 @@ proc updateStateData*(
let let
startTick = Moment.now() startTick = Moment.now()
current {.used.} = state.blck.atSlot(getStateField(state.data, slot)) current {.used.} = withState(state):
BlockSlotId.init(
BlockId(
root: state.latest_block_root,
slot: state.data.latest_block_header.slot),
state.data.slot)
var var
ancestors: seq[BlockRef] ancestors: seq[BlockRef]
found = false found = false
template exactMatch(state: StateData, bs: BlockSlot): bool = template exactMatch(state: ForkedHashedBeaconState, bs: BlockSlot): bool =
# The block is the same and we're at an early enough slot - the state can # The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot # be used to arrive at the desired blockslot
state.blck == bs.blck and getStateField(state.data, slot) == bs.slot state.matches_block_slot(bs.blck.root, bs.slot)
template canAdvance(state: StateData, bs: BlockSlot): bool = template canAdvance(state: ForkedHashedBeaconState, bs: BlockSlot): bool =
# The block is the same and we're at an early enough slot - the state can # The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot # be used to arrive at the desired blockslot
state.blck == bs.blck and getStateField(state.data, slot) <= bs.slot state.can_advance_slots(bs.blck.root, bs.slot)
# Fast path: check all caches for an exact match - this is faster than # Fast path: check all caches for an exact match - this is faster than
# advancing a state where there's epoch processing to do, by a wide margin - # advancing a state where there's epoch processing to do, by a wide margin -
@ -1212,8 +1215,13 @@ proc updateStateData*(
# Starting state has been assigned, either from memory or database # Starting state has been assigned, either from memory or database
let let
assignTick = Moment.now() assignTick = Moment.now()
ancestor {.used.} = state.blck.atSlot(getStateField(state.data, slot)) ancestor {.used.} = withState(state):
ancestorRoot {.used.} = getStateRoot(state.data) BlockSlotId.init(
BlockId(
root: state.latest_block_root,
slot: state.data.latest_block_header.slot),
state.data.slot)
ancestorRoot {.used.} = getStateRoot(state)
var info: ForkedEpochInfo var info: ForkedEpochInfo
# Time to replay all the blocks between then and now # Time to replay all the blocks between then and now
@ -1225,10 +1233,10 @@ proc updateStateData*(
dag.applyBlock(state, ancestors[i], cache, info) dag.applyBlock(state, ancestors[i], cache, info)
# ...and make sure to process empty slots as requested # ...and make sure to process empty slots as requested
dag.advanceSlots(state, bs.slot, save, cache, info) dag.advanceSlots(state, bs.blck, bs.slot, save, cache, info)
# ...and make sure to load the state cache, if it exists # ...and make sure to load the state cache, if it exists
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch) loadStateCache(dag, cache, bs.blck, getStateField(state, slot).epoch)
let let
assignDur = assignTick - startTick assignDur = assignTick - startTick
@ -1241,36 +1249,36 @@ proc updateStateData*(
# time might need tuning # time might need tuning
info "State replayed", info "State replayed",
blocks = ancestors.len, blocks = ancestors.len,
slots = getStateField(state.data, slot) - ancestor.slot, slots = getStateField(state, slot) - ancestor.slot,
current = shortLog(current), current = shortLog(current),
ancestor = shortLog(ancestor), ancestor = shortLog(ancestor),
target = shortLog(bs), target = shortLog(bs),
ancestorStateRoot = shortLog(ancestorRoot), ancestorStateRoot = shortLog(ancestorRoot),
targetStateRoot = shortLog(getStateRoot(state.data)), targetStateRoot = shortLog(getStateRoot(state)),
found, found,
assignDur, assignDur,
replayDur replayDur
elif ancestors.len > 0: elif ancestors.len > 0:
debug "State replayed", debug "State replayed",
blocks = ancestors.len, blocks = ancestors.len,
slots = getStateField(state.data, slot) - ancestor.slot, slots = getStateField(state, slot) - ancestor.slot,
current = shortLog(current), current = shortLog(current),
ancestor = shortLog(ancestor), ancestor = shortLog(ancestor),
target = shortLog(bs), target = shortLog(bs),
ancestorStateRoot = shortLog(ancestorRoot), ancestorStateRoot = shortLog(ancestorRoot),
targetStateRoot = shortLog(getStateRoot(state.data)), targetStateRoot = shortLog(getStateRoot(state)),
found, found,
assignDur, assignDur,
replayDur replayDur
else: # Normal case! else: # Normal case!
trace "State advanced", trace "State advanced",
blocks = ancestors.len, blocks = ancestors.len,
slots = getStateField(state.data, slot) - ancestor.slot, slots = getStateField(state, slot) - ancestor.slot,
current = shortLog(current), current = shortLog(current),
ancestor = shortLog(ancestor), ancestor = shortLog(ancestor),
target = shortLog(bs), target = shortLog(bs),
ancestorStateRoot = shortLog(ancestorRoot), ancestorStateRoot = shortLog(ancestorRoot),
targetStateRoot = shortLog(getStateRoot(state.data)), targetStateRoot = shortLog(getStateRoot(state)),
found, found,
assignDur, assignDur,
replayDur replayDur
@ -1354,7 +1362,7 @@ iterator syncSubcommitteePairs*(
func syncCommitteeParticipants*(dag: ChainDAGRef, func syncCommitteeParticipants*(dag: ChainDAGRef,
slot: Slot): seq[ValidatorIndex] = slot: Slot): seq[ValidatorIndex] =
withState(dag.headState.data): withState(dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
let let
period = sync_committee_period(slot) period = sync_committee_period(slot)
@ -1384,7 +1392,7 @@ func getSubcommitteePositions*(
slot: Slot, slot: Slot,
subcommitteeIdx: SyncSubcommitteeIndex, subcommitteeIdx: SyncSubcommitteeIndex,
validatorIdx: uint64): seq[uint64] = validatorIdx: uint64): seq[uint64] =
withState(dag.headState.data): withState(dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
let let
period = sync_committee_period(slot) period = sync_committee_period(slot)
@ -1481,12 +1489,12 @@ proc updateHead*(
let let
lastHead = dag.head lastHead = dag.head
lastHeadStateRoot = getStateRoot(dag.headState.data) lastHeadStateRoot = getStateRoot(dag.headState)
# Start off by making sure we have the right state - updateStateData will try # Start off by making sure we have the right state - updateStateData will try
# to use existing in-memory states to make this smooth # to use existing in-memory states to make this smooth
var cache: StateCache var cache: StateCache
if not updateStateData( if not updateState(
dag, dag.headState, newHead.atSlot(), false, cache): dag, dag.headState, newHead.atSlot(), false, cache):
# Advancing the head state should never fail, given that the tail is # Advancing the head state should never fail, given that the tail is
# implicitly finalised, the head is an ancestor of the tail and we always # implicitly finalised, the head is an ancestor of the tail and we always
@ -1495,18 +1503,19 @@ proc updateHead*(
fatal "Unable to load head state during head update, database corrupt?", fatal "Unable to load head state during head update, database corrupt?",
lastHead = shortLog(lastHead) lastHead = shortLog(lastHead)
quit 1 quit 1
dag.head = newHead
dag.db.putHeadBlock(newHead.root) dag.db.putHeadBlock(newHead.root)
updateBeaconMetrics(dag.headState, cache) updateBeaconMetrics(dag.headState, dag.head.bid, cache)
withState(dag.headState.data): withState(dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
dag.headSyncCommittees = state.data.get_sync_committee_cache(cache) dag.headSyncCommittees = state.data.get_sync_committee_cache(cache)
let let
finalized_checkpoint = finalized_checkpoint =
getStateField(dag.headState.data, finalized_checkpoint) getStateField(dag.headState, finalized_checkpoint)
finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), dag.tail.slot) finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), dag.tail.slot)
finalizedHead = newHead.atSlot(finalizedSlot) finalizedHead = newHead.atSlot(finalizedSlot)
@ -1521,19 +1530,18 @@ proc updateHead*(
notice "Updated head block with chain reorg", notice "Updated head block with chain reorg",
lastHead = shortLog(lastHead), lastHead = shortLog(lastHead),
headParent = shortLog(newHead.parent), headParent = shortLog(newHead.parent),
stateRoot = shortLog(getStateRoot(dag.headState.data)), stateRoot = shortLog(getStateRoot(dag.headState)),
headBlock = shortLog(dag.headState.blck), headBlock = shortLog(dag.head),
stateSlot = shortLog(getStateField(dag.headState.data, slot)), stateSlot = shortLog(getStateField(dag.headState, slot)),
justified = shortLog(getStateField( justified = shortLog(getStateField(
dag.headState.data, current_justified_checkpoint)), dag.headState, current_justified_checkpoint)),
finalized = shortLog(getStateField( finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
dag.headState.data, finalized_checkpoint))
if not(isNil(dag.onReorgHappened)): if not(isNil(dag.onReorgHappened)):
let data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth), let data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth),
lastHead.root, newHead.root, lastHead.root, newHead.root,
lastHeadStateRoot, lastHeadStateRoot,
getStateRoot(dag.headState.data)) getStateRoot(dag.headState))
dag.onReorgHappened(data) dag.onReorgHappened(data)
# A reasonable criterion for "reorganizations of the chain" # A reasonable criterion for "reorganizations of the chain"
@ -1543,27 +1551,25 @@ proc updateHead*(
beacon_reorgs_total.inc() beacon_reorgs_total.inc()
else: else:
debug "Updated head block", debug "Updated head block",
head = shortLog(dag.headState.blck), head = shortLog(dag.head),
stateRoot = shortLog(getStateRoot(dag.headState.data)), stateRoot = shortLog(getStateRoot(dag.headState)),
justified = shortLog(getStateField( justified = shortLog(getStateField(
dag.headState.data, current_justified_checkpoint)), dag.headState, current_justified_checkpoint)),
finalized = shortLog(getStateField( finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
dag.headState.data, finalized_checkpoint))
if not(isNil(dag.onHeadChanged)): if not(isNil(dag.onHeadChanged)):
let let
currentEpoch = epoch(newHead.slot) currentEpoch = epoch(newHead.slot)
depRoot = withState(dag.headState.data): state.proposer_dependent_root depRoot = withState(dag.headState): state.proposer_dependent_root
prevDepRoot = prevDepRoot = withState(dag.headState): state.attester_dependent_root
withState(dag.headState.data): state.attester_dependent_root
epochTransition = (finalizedHead != dag.finalizedHead) epochTransition = (finalizedHead != dag.finalizedHead)
let data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root, let data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root,
getStateRoot(dag.headState.data), getStateRoot(dag.headState),
epochTransition, depRoot, epochTransition, depRoot,
prevDepRoot) prevDepRoot)
dag.onHeadChanged(data) dag.onHeadChanged(data)
withState(dag.headState.data): withState(dag.headState):
# Every time the head changes, the "canonical" view of balances and other # Every time the head changes, the "canonical" view of balances and other
# state-related metrics change - notify the validator monitor. # state-related metrics change - notify the validator monitor.
# Doing this update during head update ensures there's a reasonable number # Doing this update during head update ensures there's a reasonable number
@ -1572,12 +1578,11 @@ proc updateHead*(
if finalizedHead != dag.finalizedHead: if finalizedHead != dag.finalizedHead:
debug "Reached new finalization checkpoint", debug "Reached new finalization checkpoint",
head = shortLog(dag.headState.blck), head = shortLog(dag.head),
stateRoot = shortLog(getStateRoot(dag.headState.data)), stateRoot = shortLog(getStateRoot(dag.headState)),
justified = shortLog(getStateField( justified = shortLog(getStateField(
dag.headState.data, current_justified_checkpoint)), dag.headState, current_justified_checkpoint)),
finalized = shortLog(getStateField( finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
dag.headState.data, finalized_checkpoint))
block: block:
# Update `dag.finalizedBlocks` with all newly finalized blocks (those # Update `dag.finalizedBlocks` with all newly finalized blocks (those
@ -1609,10 +1614,9 @@ proc updateHead*(
# Send notification about new finalization point via callback. # Send notification about new finalization point via callback.
if not(isNil(dag.onFinHappened)): if not(isNil(dag.onFinHappened)):
let stateRoot = let stateRoot =
if dag.finalizedHead.slot == dag.head.slot: if dag.finalizedHead.slot == dag.head.slot: getStateRoot(dag.headState)
getStateRoot(dag.headState.data)
elif dag.finalizedHead.slot + SLOTS_PER_HISTORICAL_ROOT > dag.head.slot: elif dag.finalizedHead.slot + SLOTS_PER_HISTORICAL_ROOT > dag.head.slot:
getStateField(dag.headState.data, state_roots).data[ getStateField(dag.headState, state_roots).data[
int(dag.finalizedHead.slot mod SLOTS_PER_HISTORICAL_ROOT)] int(dag.finalizedHead.slot mod SLOTS_PER_HISTORICAL_ROOT)]
else: else:
Eth2Digest() # The thing that finalized was >8192 blocks old? Eth2Digest() # The thing that finalized was >8192 blocks old?

View File

@ -42,7 +42,7 @@ func computeEarliestLightClientSlot*(dag: ChainDAGRef): Slot =
minSupportedSlot = max( minSupportedSlot = max(
dag.cfg.ALTAIR_FORK_EPOCH.start_slot, dag.cfg.ALTAIR_FORK_EPOCH.start_slot,
dag.lightClientCache.importTailSlot) dag.lightClientCache.importTailSlot)
currentSlot = getStateField(dag.headState.data, slot) currentSlot = getStateField(dag.headState, slot)
if currentSlot < minSupportedSlot: if currentSlot < minSupportedSlot:
return minSupportedSlot return minSupportedSlot
@ -61,7 +61,7 @@ func computeEarliestLightClientSlot*(dag: ChainDAGRef): Slot =
proc currentSyncCommitteeForPeriod( proc currentSyncCommitteeForPeriod(
dag: ChainDAGRef, dag: ChainDAGRef,
tmpState: var StateData, tmpState: var ForkedHashedBeaconState,
period: SyncCommitteePeriod): SyncCommittee = period: SyncCommitteePeriod): SyncCommittee =
## Fetch a `SyncCommittee` for a given sync committee period. ## Fetch a `SyncCommittee` for a given sync committee period.
## For non-finalized periods, follow the chain as selected by fork choice. ## For non-finalized periods, follow the chain as selected by fork choice.
@ -74,7 +74,7 @@ proc currentSyncCommitteeForPeriod(
# data for the period # data for the period
bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO") bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO")
dag.withUpdatedState(tmpState, bs) do: dag.withUpdatedState(tmpState, bs) do:
withState(stateData.data): withState(state):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.current_sync_committee state.data.current_sync_committee
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
@ -90,7 +90,7 @@ template syncCommitteeRoot(
proc syncCommitteeRootForPeriod( proc syncCommitteeRootForPeriod(
dag: ChainDAGRef, dag: ChainDAGRef,
tmpState: var StateData, tmpState: var ForkedHashedBeaconState,
period: SyncCommitteePeriod): Eth2Digest = period: SyncCommitteePeriod): Eth2Digest =
## Compute a root to uniquely identify `current_sync_committee` and ## Compute a root to uniquely identify `current_sync_committee` and
## `next_sync_committee` for a given sync committee period. ## `next_sync_committee` for a given sync committee period.
@ -102,7 +102,7 @@ proc syncCommitteeRootForPeriod(
syncCommitteeSlot = max(periodStartSlot, earliestSlot) syncCommitteeSlot = max(periodStartSlot, earliestSlot)
bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO") bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO")
dag.withUpdatedState(tmpState, bs) do: dag.withUpdatedState(tmpState, bs) do:
withState(stateData.data): withState(state):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.syncCommitteeRoot state.syncCommitteeRoot
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
@ -391,7 +391,7 @@ proc createLightClientUpdates(
proc processNewBlockForLightClient*( proc processNewBlockForLightClient*(
dag: ChainDAGRef, dag: ChainDAGRef,
state: StateData, state: ForkedHashedBeaconState,
signedBlock: ForkyTrustedSignedBeaconBlock, signedBlock: ForkyTrustedSignedBeaconBlock,
parent: BlockRef) = parent: BlockRef) =
## Update light client data with information from a new block. ## Update light client data with information from a new block.
@ -401,11 +401,11 @@ proc processNewBlockForLightClient*(
return return
when signedBlock is bellatrix.TrustedSignedBeaconBlock: when signedBlock is bellatrix.TrustedSignedBeaconBlock:
dag.cacheLightClientData(state.data.bellatrixData, signedBlock) dag.cacheLightClientData(state.bellatrixData, signedBlock)
dag.createLightClientUpdates(state.data.bellatrixData, signedBlock, parent) dag.createLightClientUpdates(state.bellatrixData, signedBlock, parent)
elif signedBlock is altair.TrustedSignedBeaconBlock: elif signedBlock is altair.TrustedSignedBeaconBlock:
dag.cacheLightClientData(state.data.altairData, signedBlock) dag.cacheLightClientData(state.altairData, signedBlock)
dag.createLightClientUpdates(state.data.altairData, signedBlock, parent) dag.createLightClientUpdates(state.altairData, signedBlock, parent)
elif signedBlock is phase0.TrustedSignedBeaconBlock: elif signedBlock is phase0.TrustedSignedBeaconBlock:
discard discard
else: else:
@ -428,7 +428,7 @@ proc processHeadChangeForLightClient*(dag: ChainDAGRef) =
let key = (period, dag.syncCommitteeRootForPeriod(tmpState[], period)) let key = (period, dag.syncCommitteeRootForPeriod(tmpState[], period))
dag.lightClientCache.bestUpdates[period] = dag.lightClientCache.bestUpdates[period] =
dag.lightClientCache.pendingBestUpdates.getOrDefault(key) dag.lightClientCache.pendingBestUpdates.getOrDefault(key)
withState(dag.headState.data): withState(dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
let key = (headPeriod, state.syncCommitteeRoot) let key = (headPeriod, state.syncCommitteeRoot)
dag.lightClientCache.bestUpdates[headPeriod] = dag.lightClientCache.bestUpdates[headPeriod] =
@ -586,7 +586,7 @@ proc initBestLightClientUpdateForPeriod(
let let
finalizedEpoch = block: finalizedEpoch = block:
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do: dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
withState(stateData.data): withState(state):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.finalized_checkpoint.epoch state.data.finalized_checkpoint.epoch
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
@ -607,7 +607,7 @@ proc initBestLightClientUpdateForPeriod(
# Fill data from attested block # Fill data from attested block
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do: dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
let bdata = dag.getForkedBlock(blck.bid).get let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata): withStateAndBlck(state, bdata):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
update.attested_header = update.attested_header =
blck.toBeaconBlockHeader blck.toBeaconBlockHeader
@ -629,7 +629,7 @@ proc initBestLightClientUpdateForPeriod(
# Fill data from finalized block # Fill data from finalized block
dag.withUpdatedState(tmpState[], finalizedBlck.atSlot) do: dag.withUpdatedState(tmpState[], finalizedBlck.atSlot) do:
let bdata = dag.getForkedBlock(blck.bid).get let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata): withStateAndBlck(state, bdata):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
update.next_sync_committee = update.next_sync_committee =
state.data.next_sync_committee state.data.next_sync_committee
@ -643,7 +643,7 @@ proc initBestLightClientUpdateForPeriod(
# Fill data from attested block # Fill data from attested block
dag.withUpdatedState(tmpState[], bestNonFinalizedRef.parent.atSlot) do: dag.withUpdatedState(tmpState[], bestNonFinalizedRef.parent.atSlot) do:
let bdata = dag.getForkedBlock(blck.bid).get let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata): withStateAndBlck(state, bdata):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
update.attested_header = update.attested_header =
blck.toBeaconBlockHeader blck.toBeaconBlockHeader
@ -705,10 +705,10 @@ proc initLightClientBootstrapForPeriod(
blck.slot >= lowSlot and blck.slot <= highSlot and blck.slot >= lowSlot and blck.slot <= highSlot and
not dag.lightClientCache.bootstrap.hasKey(blck.slot): not dag.lightClientCache.bootstrap.hasKey(blck.slot):
var cachedBootstrap {.noinit.}: CachedLightClientBootstrap var cachedBootstrap {.noinit.}: CachedLightClientBootstrap
doAssert dag.updateStateData( doAssert dag.updateState(
tmpState[], blck.atSlot, save = false, tmpCache) tmpState[], blck.atSlot, save = false, tmpCache)
withStateVars(tmpState[]): withStateVars(tmpState[]):
withState(stateData.data): withState(state):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.build_proof( state.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX, altair.CURRENT_SYNC_COMMITTEE_INDEX,
@ -756,11 +756,11 @@ proc initLightClientCache*(dag: ChainDAGRef) =
cpIndex = 0 cpIndex = 0
for i in countdown(blocksBetween.high, blocksBetween.low): for i in countdown(blocksBetween.high, blocksBetween.low):
blockRef = blocksBetween[i] blockRef = blocksBetween[i]
doAssert dag.updateStateData( doAssert dag.updateState(
dag.headState, blockRef.atSlot(blockRef.slot), save = false, cache) dag.headState, blockRef.atSlot(), save = false, cache)
withStateVars(dag.headState): withStateVars(dag.headState):
let bdata = dag.getForkedBlock(blck.bid).get let bdata = dag.getForkedBlock(blockRef.bid).get
withStateAndBlck(stateData.data, bdata): withStateAndBlck(state, bdata):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
# Cache data for `LightClientUpdate` of descendant blocks # Cache data for `LightClientUpdate` of descendant blocks
dag.cacheLightClientData(state, blck, isNew = false) dag.cacheLightClientData(state, blck, isNew = false)
@ -791,11 +791,11 @@ proc initLightClientCache*(dag: ChainDAGRef) =
dag.getBlockAtSlot(checkpoint.epoch.start_slot).expect("TODO").blck dag.getBlockAtSlot(checkpoint.epoch.start_slot).expect("TODO").blck
if cpRef != nil and cpRef.slot >= earliestSlot: if cpRef != nil and cpRef.slot >= earliestSlot:
assert cpRef.bid.root == checkpoint.root assert cpRef.bid.root == checkpoint.root
doAssert dag.updateStateData( doAssert dag.updateState(
tmpState[], cpRef.atSlot, save = false, tmpCache) tmpState[], cpRef.atSlot, save = false, tmpCache)
withStateVars(tmpState[]): withStateVars(tmpState[]):
let bdata = dag.getForkedBlock(blck.bid).get let bdata = dag.getForkedBlock(cpRef.bid).get
withStateAndBlck(stateData.data, bdata): withStateAndBlck(state, bdata):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
dag.cacheLightClientData(state, blck, isNew = false) dag.cacheLightClientData(state, blck, isNew = false)
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
@ -880,7 +880,7 @@ proc getLightClientBootstrap*(
if dag.importLightClientData == ImportLightClientData.OnDemand: if dag.importLightClientData == ImportLightClientData.OnDemand:
var tmpState = assignClone(dag.headState) var tmpState = assignClone(dag.headState)
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(slot).expect("TODO")) do: dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(slot).expect("TODO")) do:
withState(stateData.data): withState(state):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.build_proof( state.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX, altair.CURRENT_SYNC_COMMITTEE_INDEX,

View File

@ -191,7 +191,7 @@ proc storeBlock*(
vm[].registerAttestationInBlock(attestation.data, validator_index, vm[].registerAttestationInBlock(attestation.data, validator_index,
trustedBlock.message) trustedBlock.message)
withState(dag[].clearanceState.data): withState(dag[].clearanceState):
when stateFork >= BeaconStateFork.Altair and when stateFork >= BeaconStateFork.Altair and
Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+ Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices(): for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():

View File

@ -233,7 +233,7 @@ template validateBeaconBlockBellatrix(
# to the slot -- i.e. execution_payload.timestamp == # to the slot -- i.e. execution_payload.timestamp ==
# compute_timestamp_at_slot(state, block.slot). # compute_timestamp_at_slot(state, block.slot).
let timestampAtSlot = let timestampAtSlot =
withState(dag.headState.data): withState(dag.headState):
compute_timestamp_at_slot(state.data, signed_beacon_block.message.slot) compute_timestamp_at_slot(state.data, signed_beacon_block.message.slot)
if not (signed_beacon_block.message.body.execution_payload.timestamp == if not (signed_beacon_block.message.body.execution_payload.timestamp ==
timestampAtSlot): timestampAtSlot):
@ -340,8 +340,7 @@ proc validateBeaconBlock*(
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) == # compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
# store.finalized_checkpoint.root # store.finalized_checkpoint.root
let let
finalized_checkpoint = getStateField( finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
dag.headState.data, finalized_checkpoint)
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot) ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
if ancestor.isNil: if ancestor.isNil:
@ -378,7 +377,7 @@ proc validateBeaconBlock*(
# with respect to the proposer_index pubkey. # with respect to the proposer_index pubkey.
if not verify_block_signature( if not verify_block_signature(
dag.forkAtEpoch(signed_beacon_block.message.slot.epoch), dag.forkAtEpoch(signed_beacon_block.message.slot.epoch),
getStateField(dag.headState.data, genesis_validators_root), getStateField(dag.headState, genesis_validators_root),
signed_beacon_block.message.slot, signed_beacon_block.message.slot,
signed_beacon_block.root, signed_beacon_block.root,
dag.validatorKey(proposer.get()).get(), dag.validatorKey(proposer.get()).get(),
@ -497,7 +496,7 @@ proc validateAttestation*(
let let
fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch) fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch)
genesis_validators_root = genesis_validators_root =
getStateField(pool.dag.headState.data, genesis_validators_root) getStateField(pool.dag.headState, genesis_validators_root)
attesting_index = get_attesting_indices_one( attesting_index = get_attesting_indices_one(
epochRef, slot, committee_index, attestation.aggregation_bits) epochRef, slot, committee_index, attestation.aggregation_bits)
@ -691,7 +690,7 @@ proc validateAggregate*(
let let
fork = pool.dag.forkAtEpoch(aggregate.data.slot.epoch) fork = pool.dag.forkAtEpoch(aggregate.data.slot.epoch)
genesis_validators_root = genesis_validators_root =
getStateField(pool.dag.headState.data, genesis_validators_root) getStateField(pool.dag.headState, genesis_validators_root)
attesting_indices = get_attesting_indices( attesting_indices = get_attesting_indices(
epochRef, slot, committee_index, aggregate.aggregation_bits) epochRef, slot, committee_index, aggregate.aggregation_bits)
@ -777,7 +776,7 @@ proc validateAttesterSlashing*(
# [REJECT] All of the conditions within process_attester_slashing pass # [REJECT] All of the conditions within process_attester_slashing pass
# validation. # validation.
let attester_slashing_validity = let attester_slashing_validity =
check_attester_slashing(pool.dag.headState.data, attester_slashing, {}) check_attester_slashing(pool.dag.headState, attester_slashing, {})
if attester_slashing_validity.isErr: if attester_slashing_validity.isErr:
return err((ValidationResult.Reject, attester_slashing_validity.error)) return err((ValidationResult.Reject, attester_slashing_validity.error))
@ -800,7 +799,7 @@ proc validateProposerSlashing*(
# [REJECT] All of the conditions within process_proposer_slashing pass validation. # [REJECT] All of the conditions within process_proposer_slashing pass validation.
let proposer_slashing_validity = let proposer_slashing_validity =
check_proposer_slashing(pool.dag.headState.data, proposer_slashing, {}) check_proposer_slashing(pool.dag.headState, proposer_slashing, {})
if proposer_slashing_validity.isErr: if proposer_slashing_validity.isErr:
return err((ValidationResult.Reject, proposer_slashing_validity.error)) return err((ValidationResult.Reject, proposer_slashing_validity.error))
@ -813,7 +812,7 @@ proc validateVoluntaryExit*(
# [IGNORE] The voluntary exit is the first valid voluntary exit received for # [IGNORE] The voluntary exit is the first valid voluntary exit received for
# the validator with index signed_voluntary_exit.message.validator_index. # the validator with index signed_voluntary_exit.message.validator_index.
if signed_voluntary_exit.message.validator_index >= if signed_voluntary_exit.message.validator_index >=
getStateField(pool.dag.headState.data, validators).lenu64: getStateField(pool.dag.headState, validators).lenu64:
return errIgnore("VoluntaryExit: validator index too high") return errIgnore("VoluntaryExit: validator index too high")
# Given that getStateField(pool.dag.headState, validators) is a seq, # Given that getStateField(pool.dag.headState, validators) is a seq,
@ -826,7 +825,7 @@ proc validateVoluntaryExit*(
# validation. # validation.
let voluntary_exit_validity = let voluntary_exit_validity =
check_voluntary_exit( check_voluntary_exit(
pool.dag.cfg, pool.dag.headState.data, signed_voluntary_exit, {}) pool.dag.cfg, pool.dag.headState, signed_voluntary_exit, {})
if voluntary_exit_validity.isErr: if voluntary_exit_validity.isErr:
return err((ValidationResult.Reject, voluntary_exit_validity.error)) return err((ValidationResult.Reject, voluntary_exit_validity.error))

View File

@ -395,7 +395,7 @@ proc init*(T: type BeaconNode,
importLightClientData = config.importLightClientData) importLightClientData = config.importLightClientData)
quarantine = newClone(Quarantine.init()) quarantine = newClone(Quarantine.init())
databaseGenesisValidatorsRoot = databaseGenesisValidatorsRoot =
getStateField(dag.headState.data, genesis_validators_root) getStateField(dag.headState, genesis_validators_root)
if genesisStateContents.len != 0: if genesisStateContents.len != 0:
let let
@ -408,8 +408,7 @@ proc init*(T: type BeaconNode,
dataDir = config.dataDir dataDir = config.dataDir
quit 1 quit 1
let beaconClock = BeaconClock.init( let beaconClock = BeaconClock.init(getStateField(dag.headState, genesis_time))
getStateField(dag.headState.data, genesis_time))
if config.weakSubjectivityCheckpoint.isSome: if config.weakSubjectivityCheckpoint.isSome:
let let
@ -417,14 +416,14 @@ proc init*(T: type BeaconNode,
isCheckpointStale = not is_within_weak_subjectivity_period( isCheckpointStale = not is_within_weak_subjectivity_period(
cfg, cfg,
currentSlot, currentSlot,
dag.headState.data, dag.headState,
config.weakSubjectivityCheckpoint.get) config.weakSubjectivityCheckpoint.get)
if isCheckpointStale: if isCheckpointStale:
error "Weak subjectivity checkpoint is stale", error "Weak subjectivity checkpoint is stale",
currentSlot, currentSlot,
checkpoint = config.weakSubjectivityCheckpoint.get, checkpoint = config.weakSubjectivityCheckpoint.get,
headStateSlot = getStateField(dag.headState.data, slot) headStateSlot = getStateField(dag.headState, slot)
quit 1 quit 1
if eth1Monitor.isNil and config.web3Urls.len > 0: if eth1Monitor.isNil and config.web3Urls.len > 0:
@ -498,7 +497,7 @@ proc init*(T: type BeaconNode,
getBeaconTime = beaconClock.getBeaconTimeFn() getBeaconTime = beaconClock.getBeaconTimeFn()
network = createEth2Node( network = createEth2Node(
rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime, rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime,
getStateField(dag.headState.data, genesis_validators_root)) getStateField(dag.headState, genesis_validators_root))
attestationPool = newClone( attestationPool = newClone(
AttestationPool.init( AttestationPool.init(
dag, quarantine, onAttestationReceived, config.proposerBoosting)) dag, quarantine, onAttestationReceived, config.proposerBoosting))
@ -534,7 +533,7 @@ proc init*(T: type BeaconNode,
let let
slashingProtectionDB = slashingProtectionDB =
SlashingProtectionDB.init( SlashingProtectionDB.init(
getStateField(dag.headState.data, genesis_validators_root), getStateField(dag.headState, genesis_validators_root),
config.validatorsDir(), SlashingDbName) config.validatorsDir(), SlashingDbName)
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB)) validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
@ -795,7 +794,7 @@ proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Sl
# replaced as usual by trackSyncCommitteeTopics, which runs at slot end. # replaced as usual by trackSyncCommitteeTopics, which runs at slot end.
let let
syncCommittee = syncCommittee =
withState(node.dag.headState.data): withState(node.dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.current_sync_committee state.data.current_sync_committee
else: else:
@ -839,7 +838,7 @@ proc trackCurrentSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
# for epoch alignment. # for epoch alignment.
let let
syncCommittee = syncCommittee =
withState(node.dag.headState.data): withState(node.dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.current_sync_committee state.data.current_sync_committee
else: else:
@ -895,7 +894,7 @@ proc trackNextSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
let let
syncCommittee = syncCommittee =
withState(node.dag.headState.data): withState(node.dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
state.data.next_sync_committee state.data.next_sync_committee
else: else:
@ -989,7 +988,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
# it might also happen on a sufficiently fast restart # it might also happen on a sufficiently fast restart
# We "know" the actions for the current and the next epoch # We "know" the actions for the current and the next epoch
withState(node.dag.headState.data): withState(node.dag.headState):
if node.actionTracker.needsUpdate(state, slot.epoch): if node.actionTracker.needsUpdate(state, slot.epoch):
let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect(
"Getting head EpochRef should never fail") "Getting head EpochRef should never fail")
@ -1069,7 +1068,7 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
# Update upcoming actions - we do this every slot in case a reorg happens # Update upcoming actions - we do this every slot in case a reorg happens
let head = node.dag.head let head = node.dag.head
if node.isSynced(head): if node.isSynced(head):
withState(node.dag.headState.data): withState(node.dag.headState):
if node.actionTracker.needsUpdate(state, slot.epoch + 1): if node.actionTracker.needsUpdate(state, slot.epoch + 1):
let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect( let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect(
"Getting head EpochRef should never fail") "Getting head EpochRef should never fail")
@ -1158,7 +1157,7 @@ proc onSlotStart(
peers = len(node.network.peerPool), peers = len(node.network.peerPool),
head = shortLog(node.dag.head), head = shortLog(node.dag.head),
finalized = shortLog(getStateField( finalized = shortLog(getStateField(
node.dag.headState.data, finalized_checkpoint)), node.dag.headState, finalized_checkpoint)),
delay = shortLog(delay) delay = shortLog(delay)
# Check before any re-scheduling of onSlotStart() # Check before any re-scheduling of onSlotStart()
@ -1466,9 +1465,9 @@ proc start*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), node.beaconClock.now() - finalizedHead.slot.start_beacon_time(),
head = shortLog(head), head = shortLog(head),
justified = shortLog(getStateField( justified = shortLog(getStateField(
node.dag.headState.data, current_justified_checkpoint)), node.dag.headState, current_justified_checkpoint)),
finalized = shortLog(getStateField( finalized = shortLog(getStateField(
node.dag.headState.data, finalized_checkpoint)), node.dag.headState, finalized_checkpoint)),
finalizedHead = shortLog(finalizedHead), finalizedHead = shortLog(finalizedHead),
SLOTS_PER_EPOCH, SLOTS_PER_EPOCH,
SECONDS_PER_SLOT, SECONDS_PER_SLOT,
@ -1519,7 +1518,7 @@ when not defined(windows):
proc dataResolver(expr: string): string {.raises: [Defect].} = proc dataResolver(expr: string): string {.raises: [Defect].} =
template justified: untyped = node.dag.head.atEpochStart( template justified: untyped = node.dag.head.atEpochStart(
getStateField( getStateField(
node.dag.headState.data, current_justified_checkpoint).epoch) node.dag.headState, current_justified_checkpoint).epoch)
# TODO: # TODO:
# We should introduce a general API for resolving dot expressions # We should introduce a general API for resolving dot expressions
# such as `db.latest_block.slot` or `metrics.connected_peers`. # such as `db.latest_block.slot` or `metrics.connected_peers`.

View File

@ -105,9 +105,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse: router.api(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse:
return RestApiResponse.jsonResponse( return RestApiResponse.jsonResponse(
( (
genesis_time: getStateField(node.dag.headState.data, genesis_time), genesis_time: getStateField(node.dag.headState, genesis_time),
genesis_validators_root: genesis_validators_root:
getStateField(node.dag.headState.data, genesis_validators_root), getStateField(node.dag.headState, genesis_validators_root),
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
) )
) )
@ -150,12 +150,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
return RestApiResponse.jsonResponse( return RestApiResponse.jsonResponse(
( (
previous_version: previous_version: getStateField(state, fork).previous_version,
getStateField(stateData.data, fork).previous_version, current_version: getStateField(state, fork).current_version,
current_version: epoch: getStateField(state, fork).epoch
getStateField(stateData.data, fork).current_version,
epoch:
getStateField(stateData.data, fork).epoch
) )
) )
return RestApiResponse.jsonError(Http404, StateNotFoundError) return RestApiResponse.jsonError(Http404, StateNotFoundError)
@ -180,10 +177,10 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonResponse( return RestApiResponse.jsonResponse(
( (
previous_justified: previous_justified:
getStateField(stateData.data, previous_justified_checkpoint), getStateField(state, previous_justified_checkpoint),
current_justified: current_justified:
getStateField(stateData.data, current_justified_checkpoint), getStateField(state, current_justified_checkpoint),
finalized: getStateField(stateData.data, finalized_checkpoint) finalized: getStateField(state, finalized_checkpoint)
) )
) )
return RestApiResponse.jsonError(Http404, StateNotFoundError) return RestApiResponse.jsonError(Http404, StateNotFoundError)
@ -228,8 +225,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
let let
current_epoch = getStateField(stateData.data, slot).epoch() current_epoch = getStateField(state, slot).epoch()
validatorsCount = lenu64(getStateField(stateData.data, validators)) validatorsCount = lenu64(getStateField(state, validators))
let indices = let indices =
block: block:
@ -259,7 +256,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
indexset.incl(vindex) indexset.incl(vindex)
if len(keyset) > 0: if len(keyset) > 0:
let optIndices = keysToIndices(node.restKeysCache, stateData.data, let optIndices = keysToIndices(node.restKeysCache, state,
keyset.toSeq()) keyset.toSeq())
# Remove all the duplicates. # Remove all the duplicates.
for item in optIndices: for item in optIndices:
@ -277,10 +274,10 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# return empty response. # return empty response.
if len(validatorIds) == 0: if len(validatorIds) == 0:
# There is no indices, so we going to filter all the validators. # There is no indices, so we going to filter all the validators.
for index, validator in getStateField(stateData.data, for index, validator in getStateField(state,
validators).pairs(): validators).pairs():
let let
balance = getStateField(stateData.data, balances).asSeq()[index] balance = getStateField(state, balances).asSeq()[index]
status = status =
block: block:
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
@ -295,8 +292,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
else: else:
for index in indices: for index in indices:
let let
validator = getStateField(stateData.data, validators).asSeq()[index] validator = getStateField(state, validators).asSeq()[index]
balance = getStateField(stateData.data, balances).asSeq()[index] balance = getStateField(state, balances).asSeq()[index]
status = status =
block: block:
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
@ -333,15 +330,15 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
let let
current_epoch = getStateField(stateData.data, slot).epoch() current_epoch = getStateField(state, slot).epoch()
validatorsCount = lenu64(getStateField(stateData.data, validators)) validatorsCount = lenu64(getStateField(state, validators))
let vindex = let vindex =
block: block:
let vid = validator_id.get() let vid = validator_id.get()
case vid.kind case vid.kind
of ValidatorQueryKind.Key: of ValidatorQueryKind.Key:
let optIndices = keysToIndices(node.restKeysCache, stateData.data, let optIndices = keysToIndices(node.restKeysCache, state,
[vid.key]) [vid.key])
if optIndices[0].isNone(): if optIndices[0].isNone():
return RestApiResponse.jsonError(Http404, ValidatorNotFoundError) return RestApiResponse.jsonError(Http404, ValidatorNotFoundError)
@ -362,8 +359,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
index index
let let
validator = getStateField(stateData.data, validators).asSeq()[vindex] validator = getStateField(state, validators).asSeq()[vindex]
balance = getStateField(stateData.data, balances).asSeq()[vindex] balance = getStateField(state, balances).asSeq()[vindex]
status = status =
block: block:
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
@ -405,7 +402,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
ires ires
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
let validatorsCount = lenu64(getStateField(stateData.data, validators)) let validatorsCount = lenu64(getStateField(state, validators))
let indices = let indices =
block: block:
@ -434,7 +431,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
indexset.incl(vindex) indexset.incl(vindex)
if len(keyset) > 0: if len(keyset) > 0:
let optIndices = keysToIndices(node.restKeysCache, stateData.data, let optIndices = keysToIndices(node.restKeysCache, state,
keyset.toSeq()) keyset.toSeq())
# Remove all the duplicates. # Remove all the duplicates.
for item in optIndices: for item in optIndices:
@ -453,13 +450,12 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
if len(validatorIds) == 0: if len(validatorIds) == 0:
# There is no indices, so we going to return balances of all # There is no indices, so we going to return balances of all
# known validators. # known validators.
for index, balance in getStateField(stateData.data, for index, balance in getStateField(state, balances).asSeq.pairs():
balances).pairs():
res.add(RestValidatorBalance.init(ValidatorIndex(index), res.add(RestValidatorBalance.init(ValidatorIndex(index),
balance)) balance))
else: else:
for index in indices: for index in indices:
let balance = getStateField(stateData.data, balances).asSeq()[index] let balance = getStateField(state, balances).asSeq()[index]
res.add(RestValidatorBalance.init(index, balance)) res.add(RestValidatorBalance.init(index, balance))
res res
return RestApiResponse.jsonResponse(response) return RestApiResponse.jsonResponse(response)
@ -544,15 +540,14 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
proc getCommittee(slot: Slot, proc getCommittee(slot: Slot,
index: CommitteeIndex): RestBeaconStatesCommittees = index: CommitteeIndex): RestBeaconStatesCommittees =
let validators = get_beacon_committee(stateData.data, slot, index, let validators = get_beacon_committee(state, slot, index, cache)
cache)
RestBeaconStatesCommittees(index: index, slot: slot, RestBeaconStatesCommittees(index: index, slot: slot,
validators: validators) validators: validators)
proc forSlot(slot: Slot, cindex: Option[CommitteeIndex], proc forSlot(slot: Slot, cindex: Option[CommitteeIndex],
res: var seq[RestBeaconStatesCommittees]) = res: var seq[RestBeaconStatesCommittees]) =
let committees_per_slot = get_committee_count_per_slot( let committees_per_slot = get_committee_count_per_slot(
stateData.data, slot.epoch, cache) state, slot.epoch, cache)
if cindex.isNone: if cindex.isNone:
for committee_index in get_committee_indices(committees_per_slot): for committee_index in get_committee_indices(committees_per_slot):
@ -566,7 +561,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
var res: seq[RestBeaconStatesCommittees] var res: seq[RestBeaconStatesCommittees]
let qepoch = let qepoch =
if vepoch.isNone: if vepoch.isNone:
epoch(getStateField(stateData.data, slot)) epoch(getStateField(state, slot))
else: else:
vepoch.get() vepoch.get()
@ -617,7 +612,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
let keys = let keys =
block: block:
let res = syncCommitteeParticipants(stateData().data, qepoch) let res = syncCommitteeParticipants(state, qepoch)
if res.isErr(): if res.isErr():
return RestApiResponse.jsonError(Http400, return RestApiResponse.jsonError(Http400,
$res.error()) $res.error())
@ -630,8 +625,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
let indices = let indices =
block: block:
var res: seq[ValidatorIndex] var res: seq[ValidatorIndex]
let optIndices = keysToIndices(node.restKeysCache, stateData().data, let optIndices = keysToIndices(node.restKeysCache, state, keys)
keys)
# Remove all the duplicates. # Remove all the duplicates.
for item in optIndices: for item in optIndices:
if item.isNone(): if item.isNone():

View File

@ -39,12 +39,12 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
res.get() res.get()
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
return return
case stateData.data.kind case state.kind
of BeaconStateFork.Phase0: of BeaconStateFork.Phase0:
if contentType == sszMediaType: if contentType == sszMediaType:
RestApiResponse.sszResponse(stateData.data.phase0Data.data) RestApiResponse.sszResponse(state.phase0Data.data)
elif contentType == jsonMediaType: elif contentType == jsonMediaType:
RestApiResponse.jsonResponse(stateData.data.phase0Data.data) RestApiResponse.jsonResponse(state.phase0Data.data)
else: else:
RestApiResponse.jsonError(Http500, InvalidAcceptError) RestApiResponse.jsonError(Http500, InvalidAcceptError)
of BeaconStateFork.Altair, BeaconStateFork.Bellatrix: of BeaconStateFork.Altair, BeaconStateFork.Bellatrix:
@ -75,9 +75,9 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot): node.withStateForBlockSlot(bslot):
return return
if contentType == jsonMediaType: if contentType == jsonMediaType:
RestApiResponse.jsonResponsePlain(stateData.data) RestApiResponse.jsonResponsePlain(state)
elif contentType == sszMediaType: elif contentType == sszMediaType:
withState(stateData.data): withState(state):
RestApiResponse.sszResponse(state.data) RestApiResponse.sszResponse(state.data)
else: else:
RestApiResponse.jsonError(Http500, InvalidAcceptError) RestApiResponse.jsonError(Http500, InvalidAcceptError)

View File

@ -118,9 +118,9 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet, "/nimbus/v1/chain/head") do() -> RestApiResponse: router.api(MethodGet, "/nimbus/v1/chain/head") do() -> RestApiResponse:
let let
head = node.dag.head head = node.dag.head
finalized = getStateField(node.dag.headState.data, finalized_checkpoint) finalized = getStateField(node.dag.headState, finalized_checkpoint)
justified = justified =
getStateField(node.dag.headState.data, current_justified_checkpoint) getStateField(node.dag.headState, current_justified_checkpoint)
return RestApiResponse.jsonResponse( return RestApiResponse.jsonResponse(
( (
head_slot: head.slot, head_slot: head.slot,
@ -232,7 +232,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
let proposalState = assignClone(node.dag.headState) let proposalState = assignClone(node.dag.headState)
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)) do: node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)) do:
return RestApiResponse.jsonResponse( return RestApiResponse.jsonResponse(
node.getBlockProposalEth1Data(stateData.data)) node.getBlockProposalEth1Data(state))
do: do:
return RestApiResponse.jsonError(Http400, PrunedStateError) return RestApiResponse.jsonError(Http400, PrunedStateError)

View File

@ -70,8 +70,8 @@ proc getBlockSlot*(node: BeaconNode,
else: else:
err("State for given slot not found, history not available?") err("State for given slot not found, history not available?")
of StateQueryKind.Root: of StateQueryKind.Root:
if stateIdent.root == getStateRoot(node.dag.headState.data): if stateIdent.root == getStateRoot(node.dag.headState):
ok(node.dag.headState.blck.atSlot()) ok(node.dag.head.atSlot())
else: else:
# We don't have a state root -> BlockSlot mapping # We don't have a state root -> BlockSlot mapping
err("State for given root not found") err("State for given root not found")
@ -85,7 +85,7 @@ proc getBlockSlot*(node: BeaconNode,
ok(node.dag.finalizedHead) ok(node.dag.finalizedHead)
of StateIdentType.Justified: of StateIdentType.Justified:
ok(node.dag.head.atEpochStart(getStateField( ok(node.dag.head.atEpochStart(getStateField(
node.dag.headState.data, current_justified_checkpoint).epoch)) node.dag.headState, current_justified_checkpoint).epoch))
proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] = proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] =
case id.kind case id.kind
@ -140,8 +140,8 @@ template withStateForBlockSlot*(nodeParam: BeaconNode,
node = nodeParam node = nodeParam
blockSlot = blockSlotParam blockSlot = blockSlotParam
template isState(state: StateData): bool = template isState(state: ForkedHashedBeaconState): bool =
state.blck.atSlot(getStateField(state.data, slot)) == blockSlot state.matches_block_slot(blockSlot.blck.root, blockSlot.slot)
var cache {.inject, used.}: StateCache var cache {.inject, used.}: StateCache
@ -175,7 +175,7 @@ template withStateForBlockSlot*(nodeParam: BeaconNode,
else: else:
assignClone(node.dag.headState) assignClone(node.dag.headState)
if node.dag.updateStateData(stateToAdvance[], blockSlot, false, cache): if node.dag.updateState(stateToAdvance[], blockSlot, false, cache):
if cachedState == nil and node.stateTtlCache != nil: if cachedState == nil and node.stateTtlCache != nil:
# This was not a cached state, we can cache it now # This was not a cached state, we can cache it now
node.stateTtlCache.add(stateToAdvance) node.stateTtlCache.add(stateToAdvance)

View File

@ -225,7 +225,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
headSyncPeriod = sync_committee_period(headEpoch) headSyncPeriod = sync_committee_period(headEpoch)
if qSyncPeriod == headSyncPeriod: if qSyncPeriod == headSyncPeriod:
let res = withState(node.dag.headState.data): let res = withState(node.dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
produceResponse(indexList, produceResponse(indexList,
state.data.current_sync_committee.pubkeys.data, state.data.current_sync_committee.pubkeys.data,
@ -234,7 +234,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
emptyResponse() emptyResponse()
return RestApiResponse.jsonResponse(res) return RestApiResponse.jsonResponse(res)
elif qSyncPeriod == (headSyncPeriod + 1): elif qSyncPeriod == (headSyncPeriod + 1):
let res = withState(node.dag.headState.data): let res = withState(node.dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
produceResponse(indexList, produceResponse(indexList,
state.data.next_sync_committee.pubkeys.data, state.data.next_sync_committee.pubkeys.data,
@ -264,7 +264,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http404, StateNotFoundError) return RestApiResponse.jsonError(Http404, StateNotFoundError)
node.withStateForBlockSlot(bs): node.withStateForBlockSlot(bs):
let res = withState(stateData().data): let res = withState(state):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
produceResponse(indexList, produceResponse(indexList,
state.data.current_sync_committee.pubkeys.data, state.data.current_sync_committee.pubkeys.data,
@ -531,7 +531,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, return RestApiResponse.jsonError(Http400,
InvalidCommitteeIndexValueError) InvalidCommitteeIndexValueError)
if uint64(request.validator_index) >= if uint64(request.validator_index) >=
lenu64(getStateField(node.dag.headState.data, validators)): lenu64(getStateField(node.dag.headState, validators)):
return RestApiResponse.jsonError(Http400, return RestApiResponse.jsonError(Http400,
InvalidValidatorIndexValueError) InvalidValidatorIndexValueError)
if wallSlot > request.slot + 1: if wallSlot > request.slot + 1:
@ -555,7 +555,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
request.is_aggregator) request.is_aggregator)
let validator_pubkey = getStateField( let validator_pubkey = getStateField(
node.dag.headState.data, validators).asSeq()[request.validator_index].pubkey node.dag.headState, validators).asSeq()[request.validator_index].pubkey
node.validatorMonitor[].addAutoMonitor( node.validatorMonitor[].addAutoMonitor(
validator_pubkey, ValidatorIndex(request.validator_index)) validator_pubkey, ValidatorIndex(request.validator_index))
@ -583,11 +583,11 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, return RestApiResponse.jsonError(Http400,
EpochFromTheIncorrectForkError) EpochFromTheIncorrectForkError)
if uint64(item.validator_index) >= if uint64(item.validator_index) >=
lenu64(getStateField(node.dag.headState.data, validators)): lenu64(getStateField(node.dag.headState, validators)):
return RestApiResponse.jsonError(Http400, return RestApiResponse.jsonError(Http400,
InvalidValidatorIndexValueError) InvalidValidatorIndexValueError)
let validator_pubkey = getStateField( let validator_pubkey = getStateField(
node.dag.headState.data, validators).asSeq()[item.validator_index].pubkey node.dag.headState, validators).asSeq()[item.validator_index].pubkey
node.syncCommitteeMsgPool.syncCommitteeSubscriptions[validator_pubkey] = node.syncCommitteeMsgPool.syncCommitteeSubscriptions[validator_pubkey] =
item.until_epoch item.until_epoch

View File

@ -175,9 +175,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raises: [Defect, CatchableError].} = raises: [Defect, CatchableError].} =
rpcServer.rpc("get_v1_beacon_genesis") do () -> RpcBeaconGenesis: rpcServer.rpc("get_v1_beacon_genesis") do () -> RpcBeaconGenesis:
return ( return (
genesis_time: getStateField(node.dag.headState.data, genesis_time), genesis_time: getStateField(node.dag.headState, genesis_time),
genesis_validators_root: genesis_validators_root:
getStateField(node.dag.headState.data, genesis_validators_root), getStateField(node.dag.headState, genesis_validators_root),
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
) )
@ -187,23 +187,23 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork: rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
withStateForStateId(stateId): withStateForStateId(stateId):
return getStateField(stateData.data, fork) return getStateField(state, fork)
rpcServer.rpc("get_v1_beacon_states_finality_checkpoints") do ( rpcServer.rpc("get_v1_beacon_states_finality_checkpoints") do (
stateId: string) -> RpcBeaconStatesFinalityCheckpoints: stateId: string) -> RpcBeaconStatesFinalityCheckpoints:
withStateForStateId(stateId): withStateForStateId(stateId):
return (previous_justified: return (previous_justified:
getStateField(stateData.data, previous_justified_checkpoint), getStateField(state, previous_justified_checkpoint),
current_justified: current_justified:
getStateField(stateData.data, current_justified_checkpoint), getStateField(state, current_justified_checkpoint),
finalized: getStateField(stateData.data, finalized_checkpoint)) finalized: getStateField(state, finalized_checkpoint))
rpcServer.rpc("get_v1_beacon_states_stateId_validators") do ( rpcServer.rpc("get_v1_beacon_states_stateId_validators") do (
stateId: string, validatorIds: Option[seq[string]], stateId: string, validatorIds: Option[seq[string]],
status: Option[seq[string]]) -> seq[RpcBeaconStatesValidators]: status: Option[seq[string]]) -> seq[RpcBeaconStatesValidators]:
var vquery: ValidatorQuery var vquery: ValidatorQuery
var squery: StatusQuery var squery: StatusQuery
let current_epoch = getStateField(node.dag.headState.data, slot).epoch let current_epoch = getStateField(node.dag.headState, slot).epoch
template statusCheck(status, statusQuery, vstatus, current_epoch): bool = template statusCheck(status, statusQuery, vstatus, current_epoch): bool =
if status.isNone(): if status.isNone():
@ -230,7 +230,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
vquery = vqres.get() vquery = vqres.get()
if validatorIds.isNone(): if validatorIds.isNone():
for index, validator in getStateField(stateData.data, validators).pairs(): for index, validator in getStateField(state, validators).pairs():
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
if sres.isOk: if sres.isOk:
let vstatus = sres.get() let vstatus = sres.get()
@ -240,11 +240,11 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
res.add((validator: validator, res.add((validator: validator,
index: uint64(index), index: uint64(index),
status: vstatus, status: vstatus,
balance: getStateField(stateData.data, balances).asSeq()[index])) balance: getStateField(state, balances).asSeq()[index]))
else: else:
for index in vquery.ids: for index in vquery.ids:
if index < lenu64(getStateField(stateData.data, validators)): if index < lenu64(getStateField(state, validators)):
let validator = getStateField(stateData.data, validators).asSeq()[index] let validator = getStateField(state, validators).asSeq()[index]
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
if sres.isOk: if sres.isOk:
let vstatus = sres.get() let vstatus = sres.get()
@ -255,9 +255,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
res.add((validator: validator, res.add((validator: validator,
index: uint64(index), index: uint64(index),
status: vstatus, status: vstatus,
balance: getStateField(stateData.data, balances).asSeq()[index])) balance: getStateField(state, balances).asSeq()[index]))
for index, validator in getStateField(stateData.data, validators).pairs(): for index, validator in getStateField(state, validators).pairs():
if validator.pubkey in vquery.keyset: if validator.pubkey in vquery.keyset:
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
if sres.isOk: if sres.isOk:
@ -268,12 +268,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
res.add((validator: validator, res.add((validator: validator,
index: uint64(index), index: uint64(index),
status: vstatus, status: vstatus,
balance: getStateField(stateData.data, balances).asSeq()[index])) balance: getStateField(state, balances).asSeq()[index]))
return res return res
rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do ( rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do (
stateId: string, validatorId: string) -> RpcBeaconStatesValidators: stateId: string, validatorId: string) -> RpcBeaconStatesValidators:
let current_epoch = getStateField(node.dag.headState.data, slot).epoch let current_epoch = getStateField(node.dag.headState, slot).epoch
let vqres = createIdQuery([validatorId]) let vqres = createIdQuery([validatorId])
if vqres.isErr: if vqres.isErr:
raise newException(CatchableError, $vqres.error) raise newException(CatchableError, $vqres.error)
@ -282,23 +282,23 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
withStateForStateId(stateId): withStateForStateId(stateId):
if len(vquery.ids) > 0: if len(vquery.ids) > 0:
let index = vquery.ids[0] let index = vquery.ids[0]
if index < lenu64(getStateField(stateData.data, validators)): if index < lenu64(getStateField(state, validators)):
let validator = getStateField(stateData.data, validators).asSeq()[index] let validator = getStateField(state, validators).asSeq()[index]
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
if sres.isOk: if sres.isOk:
return (validator: validator, index: uint64(index), return (validator: validator, index: uint64(index),
status: sres.get(), status: sres.get(),
balance: getStateField(stateData.data, balances).asSeq()[index]) balance: getStateField(state, balances).asSeq()[index])
else: else:
raise newException(CatchableError, "Incorrect validator's state") raise newException(CatchableError, "Incorrect validator's state")
else: else:
for index, validator in getStateField(stateData.data, validators).pairs(): for index, validator in getStateField(state, validators).pairs():
if validator.pubkey in vquery.keyset: if validator.pubkey in vquery.keyset:
let sres = validator.getStatus(current_epoch) let sres = validator.getStatus(current_epoch)
if sres.isOk: if sres.isOk:
return (validator: validator, index: uint64(index), return (validator: validator, index: uint64(index),
status: sres.get(), status: sres.get(),
balance: getStateField(stateData.data, balances).asSeq()[index]) balance: getStateField(state, balances).asSeq()[index])
else: else:
raise newException(CatchableError, "Incorrect validator's state") raise newException(CatchableError, "Incorrect validator's state")
@ -308,7 +308,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
var res: seq[RpcBalance] var res: seq[RpcBalance]
withStateForStateId(stateId): withStateForStateId(stateId):
if validatorsId.isNone(): if validatorsId.isNone():
for index, value in getStateField(stateData.data, balances).pairs(): for index, value in getStateField(state, balances).pairs():
let balance = (index: uint64(index), balance: value) let balance = (index: uint64(index), balance: value)
res.add(balance) res.add(balance)
else: else:
@ -318,17 +318,17 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
var vquery = vqres.get() var vquery = vqres.get()
for index in vquery.ids: for index in vquery.ids:
if index < lenu64(getStateField(stateData.data, validators)): if index < lenu64(getStateField(state, validators)):
let validator = getStateField(stateData.data, validators).asSeq()[index] let validator = getStateField(state, validators).asSeq()[index]
vquery.keyset.excl(validator.pubkey) vquery.keyset.excl(validator.pubkey)
let balance = (index: uint64(index), let balance = (index: uint64(index),
balance: getStateField(stateData.data, balances).asSeq()[index]) balance: getStateField(state, balances).asSeq()[index])
res.add(balance) res.add(balance)
for index, validator in getStateField(stateData.data, validators).pairs(): for index, validator in getStateField(state, validators).pairs():
if validator.pubkey in vquery.keyset: if validator.pubkey in vquery.keyset:
let balance = (index: uint64(index), let balance = (index: uint64(index),
balance: getStateField(stateData.data, balances).asSeq()[index]) balance: getStateField(state, balances).asSeq()[index])
res.add(balance) res.add(balance)
return res return res
@ -339,12 +339,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
proc getCommittee(slot: Slot, proc getCommittee(slot: Slot,
index: CommitteeIndex): RpcBeaconStatesCommittees = index: CommitteeIndex): RpcBeaconStatesCommittees =
let vals = get_beacon_committee( let vals = get_beacon_committee(
stateData.data, slot, index, cache).mapIt(it.uint64) state, slot, index, cache).mapIt(it.uint64)
return (index: index.uint64, slot: slot.uint64, validators: vals) return (index: index.uint64, slot: slot.uint64, validators: vals)
proc forSlot(slot: Slot, res: var seq[RpcBeaconStatesCommittees]) = proc forSlot(slot: Slot, res: var seq[RpcBeaconStatesCommittees]) =
let committees_per_slot = let committees_per_slot =
get_committee_count_per_slot(stateData.data, slot.epoch, cache) get_committee_count_per_slot(state, slot.epoch, cache)
if index.isNone: if index.isNone:
for committee_index in get_committee_indices(committees_per_slot): for committee_index in get_committee_indices(committees_per_slot):
@ -359,7 +359,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let qepoch = let qepoch =
if epoch.isNone: if epoch.isNone:
epoch(getStateField(stateData.data, slot)) epoch(getStateField(state, slot))
else: else:
Epoch(epoch.get()) Epoch(epoch.get())

View File

@ -23,7 +23,7 @@ type
proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {. proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raises: [Defect, CatchableError].} = raises: [Defect, CatchableError].} =
rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]: rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]:
return @[getStateField(node.dag.headState.data, fork)] return @[getStateField(node.dag.headState, fork)]
rpcServer.rpc("get_v1_config_spec") do () -> JsonNode: rpcServer.rpc("get_v1_config_spec") do () -> JsonNode:
return %*{ return %*{

View File

@ -26,8 +26,8 @@ proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_debug_beacon_states_stateId") do ( rpcServer.rpc("get_v1_debug_beacon_states_stateId") do (
stateId: string) -> phase0.BeaconState: stateId: string) -> phase0.BeaconState:
withStateForStateId(stateId): withStateForStateId(stateId):
if stateData.data.kind == BeaconStateFork.Phase0: if state.kind == BeaconStateFork.Phase0:
return stateData.data.phase0Data.data return state.phase0Data.data
else: else:
raiseNoAltairSupport() raiseNoAltairSupport()

View File

@ -47,9 +47,9 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("getChainHead") do () -> JsonNode: rpcServer.rpc("getChainHead") do () -> JsonNode:
let let
head = node.dag.head head = node.dag.head
finalized = getStateField(node.dag.headState.data, finalized_checkpoint) finalized = getStateField(node.dag.headState, finalized_checkpoint)
justified = justified =
getStateField(node.dag.headState.data, current_justified_checkpoint) getStateField(node.dag.headState, current_justified_checkpoint)
return %* { return %* {
"head_slot": head.slot, "head_slot": head.slot,
"head_block_root": head.root.data.toHex(), "head_block_root": head.root.data.toHex(),
@ -109,7 +109,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let proposalState = assignClone(node.dag.headState) let proposalState = assignClone(node.dag.headState)
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)): node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)):
return node.getBlockProposalEth1Data(stateData.data) return node.getBlockProposalEth1Data(state)
do: do:
raise (ref CatchableError)(msg: "Trying to access pruned state") raise (ref CatchableError)(msg: "Trying to access pruned state")

View File

@ -26,8 +26,8 @@ template withStateForStateId*(stateId: string, body: untyped): untyped =
let let
bs = node.stateIdToBlockSlot(stateId) bs = node.stateIdToBlockSlot(stateId)
template isState(state: StateData): bool = template isState(state: ForkedHashedBeaconState): bool =
state.blck.atSlot(getStateField(state.data, slot)) == bs state.matches_block_slot(bs.blck.root, bs.slot)
if isState(node.dag.headState): if isState(node.dag.headState):
withStateVars(node.dag.headState): withStateVars(node.dag.headState):
@ -94,12 +94,12 @@ proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises:
node.dag.finalizedHead node.dag.finalizedHead
of "justified": of "justified":
node.dag.head.atEpochStart( node.dag.head.atEpochStart(
getStateField(node.dag.headState.data, current_justified_checkpoint).epoch) getStateField(node.dag.headState, current_justified_checkpoint).epoch)
else: else:
if stateId.startsWith("0x"): if stateId.startsWith("0x"):
let stateRoot = parseRoot(stateId) let stateRoot = parseRoot(stateId)
if stateRoot == getStateRoot(node.dag.headState.data): if stateRoot == getStateRoot(node.dag.headState):
node.dag.headState.blck.atSlot() node.dag.head.atSlot()
else: else:
# We don't have a state root -> BlockSlot mapping # We don't have a state root -> BlockSlot mapping
raise (ref ValueError)(msg: "State not found") raise (ref ValueError)(msg: "State not found")

View File

@ -146,8 +146,8 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
"Slot requested not in current or next wall-slot epoch") "Slot requested not in current or next wall-slot epoch")
if not verify_slot_signature( if not verify_slot_signature(
getStateField(node.dag.headState.data, fork), getStateField(node.dag.headState, fork),
getStateField(node.dag.headState.data, genesis_validators_root), getStateField(node.dag.headState, genesis_validators_root),
slot, validator_pubkey, slot_signature): slot, validator_pubkey, slot_signature):
raise newException(CatchableError, raise newException(CatchableError,
"Invalid slot signature") "Invalid slot signature")

View File

@ -8,11 +8,12 @@
import import
chronos, chronos,
chronicles, chronicles,
../spec/beaconstate,
../consensus_object_pools/block_pools_types ../consensus_object_pools/block_pools_types
type type
CacheEntry = ref object CacheEntry = ref object
state: ref StateData state: ref ForkedHashedBeaconState
lastUsed: Moment lastUsed: Moment
# This is ref object because we need to capture it by # This is ref object because we need to capture it by
@ -49,7 +50,7 @@ proc scheduleEntryExpiration(cache: StateTtlCache,
discard setTimer(Moment.now + cache.ttl, removeElement) discard setTimer(Moment.now + cache.ttl, removeElement)
proc add*(cache: StateTtlCache, state: ref StateData) = proc add*(cache: StateTtlCache, state: ref ForkedHashedBeaconState) =
var var
now = Moment.now now = Moment.now
lruTime = now lruTime = now
@ -69,7 +70,8 @@ proc add*(cache: StateTtlCache, state: ref StateData) =
cache.scheduleEntryExpiration(index) cache.scheduleEntryExpiration(index)
proc getClosestState*(cache: StateTtlCache, bs: BlockSlot): ref StateData = proc getClosestState*(
cache: StateTtlCache, bs: BlockSlot): ref ForkedHashedBeaconState =
var var
bestSlotDifference = Slot.high bestSlotDifference = Slot.high
index = -1 index = -1
@ -78,7 +80,7 @@ proc getClosestState*(cache: StateTtlCache, bs: BlockSlot): ref StateData =
if cache.entries[i] == nil: if cache.entries[i] == nil:
continue continue
let stateSlot = getStateField(cache.entries[i].state.data, slot) let stateSlot = getStateField(cache.entries[i][].state[], slot)
if stateSlot > bs.slot: if stateSlot > bs.slot:
# We can use only states that can be advanced forward in time. # We can use only states that can be advanced forward in time.
continue continue
@ -92,7 +94,7 @@ proc getClosestState*(cache: StateTtlCache, bs: BlockSlot): ref StateData =
for j in 0 ..< slotDifference: for j in 0 ..< slotDifference:
cur = cur.parentOrSlot cur = cur.parentOrSlot
if cur.blck != cache.entries[i].state.blck: if not cache.entries[i].state[].matches_block(cur.blck.root):
# The cached state and the requested BlockSlot are at different branches # The cached state and the requested BlockSlot are at different branches
# of history. # of history.
continue continue

View File

@ -945,6 +945,9 @@ func latest_block_root*(state: ForkyBeaconState, state_root: Eth2Digest): Eth2Di
func latest_block_root*(state: ForkyHashedBeaconState): Eth2Digest = func latest_block_root*(state: ForkyHashedBeaconState): Eth2Digest =
latest_block_root(state.data, state.root) latest_block_root(state.data, state.root)
func latest_block_root*(state: ForkedHashedBeaconState): Eth2Digest =
withState(state): latest_block_root(state)
func get_sync_committee_cache*( func get_sync_committee_cache*(
state: altair.BeaconState | bellatrix.BeaconState, cache: var StateCache): state: altair.BeaconState | bellatrix.BeaconState, cache: var StateCache):
SyncCommitteeCache = SyncCommitteeCache =
@ -1001,3 +1004,30 @@ func proposer_dependent_root*(state: ForkyHashedBeaconState): Eth2Digest =
func attester_dependent_root*(state: ForkyHashedBeaconState): Eth2Digest = func attester_dependent_root*(state: ForkyHashedBeaconState): Eth2Digest =
let epoch = state.data.slot.epoch let epoch = state.data.slot.epoch
state.dependent_root(if epoch == Epoch(0): epoch else: epoch - 1) state.dependent_root(if epoch == Epoch(0): epoch else: epoch - 1)
func matches_block*(
state: ForkyHashedBeaconState, block_root: Eth2Digest): bool =
## Return true iff the latest block applied to this state matches the given
## `block_root`
block_root == state.latest_block_root
func matches_block*(
state: ForkedHashedBeaconState, block_root: Eth2Digest): bool =
withState(state): state.matches_block(block_root)
func matches_block_slot*(
state: ForkyHashedBeaconState, block_root: Eth2Digest, slot: Slot): bool =
## Return true iff the latest block applied to this state matches the given
## `block_root` and the state slot has been advanced to the given slot
slot == state.data.slot and block_root == state.latest_block_root
func matches_block_slot*(
state: ForkedHashedBeaconState, block_root: Eth2Digest, slot: Slot): bool =
withState(state): state.matches_block_slot(block_root, slot)
func can_advance_slots*(
state: ForkyHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
## Return true iff we can reach the given block/slot combination simply by
## advancing slots
target_slot >= state.data.slot and block_root == state.latest_block_root
func can_advance_slots*(
state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
withState(state): state.can_advance_slots(block_root, target_slot)

View File

@ -305,7 +305,7 @@ template partialBeaconBlock(
phase0.BeaconBlock( phase0.BeaconBlock(
slot: state.data.slot, slot: state.data.slot,
proposer_index: proposer_index.uint64, proposer_index: proposer_index.uint64,
parent_root: state.latest_block_root(), parent_root: state.latest_block_root,
body: phase0.BeaconBlockBody( body: phase0.BeaconBlockBody(
randao_reveal: randao_reveal, randao_reveal: randao_reveal,
eth1_data: eth1data, eth1_data: eth1data,
@ -369,7 +369,7 @@ template partialBeaconBlock(
altair.BeaconBlock( altair.BeaconBlock(
slot: state.data.slot, slot: state.data.slot,
proposer_index: proposer_index.uint64, proposer_index: proposer_index.uint64,
parent_root: state.latest_block_root(), parent_root: state.latest_block_root,
body: altair.BeaconBlockBody( body: altair.BeaconBlockBody(
randao_reveal: randao_reveal, randao_reveal: randao_reveal,
eth1_data: eth1data, eth1_data: eth1data,
@ -434,7 +434,7 @@ template partialBeaconBlock(
bellatrix.BeaconBlock( bellatrix.BeaconBlock(
slot: state.data.slot, slot: state.data.slot,
proposer_index: proposer_index.uint64, proposer_index: proposer_index.uint64,
parent_root: state.latest_block_root(), parent_root: state.latest_block_root,
body: bellatrix.BeaconBlockBody( body: bellatrix.BeaconBlockBody(
randao_reveal: randao_reveal, randao_reveal: randao_reveal,
eth1_data: eth1data, eth1_data: eth1data,

View File

@ -111,13 +111,13 @@ proc addRemoteValidator(pool: var ValidatorPool, validators: auto,
proc addLocalValidators*(node: BeaconNode, proc addLocalValidators*(node: BeaconNode,
validators: openArray[KeystoreData]) = validators: openArray[KeystoreData]) =
withState(node.dag.headState.data): withState(node.dag.headState):
for item in validators: for item in validators:
node.addLocalValidator(state.data.validators.asSeq(), item) node.addLocalValidator(state.data.validators.asSeq(), item)
proc addRemoteValidators*(node: BeaconNode, proc addRemoteValidators*(node: BeaconNode,
validators: openArray[KeystoreData]) = validators: openArray[KeystoreData]) =
withState(node.dag.headState.data): withState(node.dag.headState):
for item in validators: for item in validators:
node.attachedValidators[].addRemoteValidator( node.attachedValidators[].addRemoteValidator(
state.data.validators.asSeq(), item) state.data.validators.asSeq(), item)
@ -253,7 +253,7 @@ proc sendSyncCommitteeMessage*(
proc sendSyncCommitteeMessages*(node: BeaconNode, proc sendSyncCommitteeMessages*(node: BeaconNode,
msgs: seq[SyncCommitteeMessage] msgs: seq[SyncCommitteeMessage]
): Future[seq[SendResult]] {.async.} = ): Future[seq[SendResult]] {.async.} =
return withState(node.dag.headState.data): return withState(node.dag.headState):
when stateFork >= BeaconStateFork.Altair: when stateFork >= BeaconStateFork.Altair:
var statuses = newSeq[Option[SendResult]](len(msgs)) var statuses = newSeq[Option[SendResult]](len(msgs))
@ -448,26 +448,26 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
var info: ForkedEpochInfo var info: ForkedEpochInfo
process_slots( process_slots(
node.dag.cfg, stateData.data, slot, cache, info, node.dag.cfg, state, slot, cache, info,
{skipLastStateRootCalculation}).expect("advancing 1 slot should not fail") {skipLastStateRootCalculation}).expect("advancing 1 slot should not fail")
let let
eth1Proposal = node.getBlockProposalEth1Data(stateData.data) eth1Proposal = node.getBlockProposalEth1Data(state)
if eth1Proposal.hasMissingDeposits: if eth1Proposal.hasMissingDeposits:
warn "Eth1 deposits not available. Skipping block proposal", slot warn "Eth1 deposits not available. Skipping block proposal", slot
return ForkedBlockResult.err("Eth1 deposits not available") return ForkedBlockResult.err("Eth1 deposits not available")
let exits = withState(stateData.data): let exits = withState(state):
node.exitPool[].getBeaconBlockExits(state.data) node.exitPool[].getBeaconBlockExits(state.data)
let res = makeBeaconBlock( let res = makeBeaconBlock(
node.dag.cfg, node.dag.cfg,
stateData.data, state,
validator_index, validator_index,
randao_reveal, randao_reveal,
eth1Proposal.vote, eth1Proposal.vote,
graffiti, graffiti,
node.attestationPool[].getAttestationsForBlock(stateData.data, cache), node.attestationPool[].getAttestationsForBlock(state, cache),
eth1Proposal.deposits, eth1Proposal.deposits,
exits, exits,
if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH: if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH:
@ -507,7 +507,7 @@ proc proposeBlock(node: BeaconNode,
let let
fork = node.dag.forkAtEpoch(slot.epoch) fork = node.dag.forkAtEpoch(slot.epoch)
genesis_validators_root = genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root) getStateField(node.dag.headState, genesis_validators_root)
randao = randao =
block: block:
let res = await validator.genRandaoReveal(fork, genesis_validators_root, let res = await validator.genRandaoReveal(fork, genesis_validators_root,
@ -639,7 +639,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
committees_per_slot = get_committee_count_per_slot(epochRef) committees_per_slot = get_committee_count_per_slot(epochRef)
fork = node.dag.forkAtEpoch(slot.epoch) fork = node.dag.forkAtEpoch(slot.epoch)
genesis_validators_root = genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root) getStateField(node.dag.headState, genesis_validators_root)
for committee_index in get_committee_indices(committees_per_slot): for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(epochRef, slot, committee_index) let committee = get_beacon_committee(epochRef, slot, committee_index)
@ -731,7 +731,7 @@ proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
for subcommitteeIdx in SyncSubcommitteeIndex: for subcommitteeIdx in SyncSubcommitteeIndex:
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx): for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
let validator = node.getAttachedValidator( let validator = node.getAttachedValidator(
getStateField(node.dag.headState.data, validators), valIdx) getStateField(node.dag.headState, validators), valIdx)
if isNil(validator) or validator.index.isNone(): if isNil(validator) or validator.index.isNone():
continue continue
asyncSpawn createAndSendSyncCommitteeMessage(node, slot, validator, asyncSpawn createAndSendSyncCommitteeMessage(node, slot, validator,
@ -787,7 +787,7 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
# to avoid the repeated offset calculations # to avoid the repeated offset calculations
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx): for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
let validator = node.getAttachedValidator( let validator = node.getAttachedValidator(
getStateField(node.dag.headState.data, validators), valIdx) getStateField(node.dag.headState, validators), valIdx)
if validator == nil: if validator == nil:
continue continue
@ -904,7 +904,7 @@ proc sendAggregatedAttestations(
fork = node.dag.forkAtEpoch(slot.epoch) fork = node.dag.forkAtEpoch(slot.epoch)
genesis_validators_root = genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root) getStateField(node.dag.headState, genesis_validators_root)
committees_per_slot = get_committee_count_per_slot(epochRef) committees_per_slot = get_committee_count_per_slot(epochRef)
var var
@ -986,14 +986,14 @@ proc updateValidatorMetrics*(node: BeaconNode) =
if v.index.isNone(): if v.index.isNone():
0.Gwei 0.Gwei
elif v.index.get().uint64 >= elif v.index.get().uint64 >=
getStateField(node.dag.headState.data, balances).lenu64: getStateField(node.dag.headState, balances).lenu64:
debug "Cannot get validator balance, index out of bounds", debug "Cannot get validator balance, index out of bounds",
pubkey = shortLog(v.pubkey), index = v.index.get(), pubkey = shortLog(v.pubkey), index = v.index.get(),
balances = getStateField(node.dag.headState.data, balances).len, balances = getStateField(node.dag.headState, balances).len,
stateRoot = getStateRoot(node.dag.headState.data) stateRoot = getStateRoot(node.dag.headState)
0.Gwei 0.Gwei
else: else:
getStateField(node.dag.headState.data, balances).asSeq()[v.index.get()] getStateField(node.dag.headState, balances).asSeq()[v.index.get()]
if i < 64: if i < 64:
attached_validator_balance.set( attached_validator_balance.set(
@ -1300,7 +1300,7 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
let let
genesis_validators_root = genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root) getStateField(node.dag.headState, genesis_validators_root)
head = node.dag.head head = node.dag.head
# Getting the slot signature is expensive but cached - in "normal" cases we'll # Getting the slot signature is expensive but cached - in "normal" cases we'll

View File

@ -252,17 +252,17 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
(ref bellatrix.HashedBeaconState)()) (ref bellatrix.HashedBeaconState)())
withTimer(timers[tLoadState]): withTimer(timers[tLoadState]):
doAssert dag.updateStateData( doAssert dag.updateState(
stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache) stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
template processBlocks(blocks: auto) = template processBlocks(blocks: auto) =
for b in blocks.mitems(): for b in blocks.mitems():
if shouldShutDown: quit QuitSuccess if shouldShutDown: quit QuitSuccess
while getStateField(stateData[].data, slot) < b.message.slot: while getStateField(stateData[], slot) < b.message.slot:
let isEpoch = (getStateField(stateData[].data, slot) + 1).is_epoch() let isEpoch = (getStateField(stateData[], slot) + 1).is_epoch()
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]): withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
process_slots( process_slots(
dag.cfg, stateData[].data, getStateField(stateData[].data, slot) + 1, cache, dag.cfg, stateData[], getStateField(stateData[], slot) + 1, cache,
info, {}).expect("Slot processing can't fail with correct inputs") info, {}).expect("Slot processing can't fail with correct inputs")
var start = Moment.now() var start = Moment.now()
@ -270,7 +270,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
if conf.resetCache: if conf.resetCache:
cache = StateCache() cache = StateCache()
let res = state_transition_block( let res = state_transition_block(
dag.cfg, stateData[].data, b, cache, {}, noRollback) dag.cfg, stateData[], b, cache, {}, noRollback)
if res.isErr(): if res.isErr():
dump("./", b) dump("./", b)
echo "State transition failed (!) ", res.error() echo "State transition failed (!) ", res.error()
@ -281,7 +281,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
withTimer(timers[tDbStore]): withTimer(timers[tDbStore]):
dbBenchmark.putBlock(b) dbBenchmark.putBlock(b)
withState(stateData[].data): withState(stateData[]):
if state.data.slot.is_epoch and conf.storeStates: if state.data.slot.is_epoch and conf.storeStates:
if state.data.slot.epoch < 2: if state.data.slot.epoch < 2:
dbBenchmark.putState(state.root, state.data) dbBenchmark.putState(state.root, state.data)
@ -416,7 +416,7 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
let tmpState = assignClone(dag.headState) let tmpState = assignClone(dag.headState)
dag.withUpdatedState(tmpState[], blckRef.atSlot(Slot(conf.slot))) do: dag.withUpdatedState(tmpState[], blckRef.atSlot(Slot(conf.slot))) do:
echo "Writing state..." echo "Writing state..."
withState(stateData.data): withState(state):
dump("./", state) dump("./", state)
do: raiseAssert "withUpdatedState failed" do: raiseAssert "withUpdatedState failed"
@ -462,7 +462,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
echo "Written all complete eras" echo "Written all complete eras"
break break
let name = withState(dag.headState.data): eraFileName(cfg, state.data, era) let name = withState(dag.headState): eraFileName(cfg, state.data, era)
if isFile(name): if isFile(name):
echo "Skipping ", name, " (already exists)" echo "Skipping ", name, " (already exists)"
else: else:
@ -481,7 +481,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
withTimer(timers[tState]): withTimer(timers[tState]):
dag.withUpdatedState(tmpState[], canonical) do: dag.withUpdatedState(tmpState[], canonical) do:
withState(stateData.data): withState(state):
group.finish(e2, state.data).get() group.finish(e2, state.data).get()
do: raiseAssert "withUpdatedState failed" do: raiseAssert "withUpdatedState failed"
@ -580,7 +580,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
(start, ends) = dag.getSlotRange(conf.perfSlot, conf.perfSlots) (start, ends) = dag.getSlotRange(conf.perfSlot, conf.perfSlots)
blockRefs = dag.getBlockRange(start, ends) blockRefs = dag.getBlockRange(start, ends)
perfs = newSeq[ValidatorPerformance]( perfs = newSeq[ValidatorPerformance](
getStateField(dag.headState.data, validators).len()) getStateField(dag.headState, validators).len())
cache = StateCache() cache = StateCache()
info = ForkedEpochInfo() info = ForkedEpochInfo()
blck: phase0.TrustedSignedBeaconBlock blck: phase0.TrustedSignedBeaconBlock
@ -591,26 +591,26 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch
let state = newClone(dag.headState) let state = newClone(dag.headState)
doAssert dag.updateStateData( doAssert dag.updateState(
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache) state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
proc processEpoch() = proc processEpoch() =
let let
prev_epoch_target_slot = prev_epoch_target_slot =
state[].data.get_previous_epoch().start_slot() state[].get_previous_epoch().start_slot()
penultimate_epoch_end_slot = penultimate_epoch_end_slot =
if prev_epoch_target_slot == 0: Slot(0) if prev_epoch_target_slot == 0: Slot(0)
else: prev_epoch_target_slot - 1 else: prev_epoch_target_slot - 1
first_slot_empty = first_slot_empty =
state[].data.get_block_root_at_slot(prev_epoch_target_slot) == state[].get_block_root_at_slot(prev_epoch_target_slot) ==
state[].data.get_block_root_at_slot(penultimate_epoch_end_slot) state[].get_block_root_at_slot(penultimate_epoch_end_slot)
let first_slot_attesters = block: let first_slot_attesters = block:
let committees_per_slot = state[].data.get_committee_count_per_slot( let committees_per_slot = state[].get_committee_count_per_slot(
prev_epoch_target_slot.epoch, cache) prev_epoch_target_slot.epoch, cache)
var indices = HashSet[ValidatorIndex]() var indices = HashSet[ValidatorIndex]()
for committee_index in get_committee_indices(committees_per_slot): for committee_index in get_committee_indices(committees_per_slot):
for validator_index in state[].data.get_beacon_committee( for validator_index in state[].get_beacon_committee(
prev_epoch_target_slot, committee_index, cache): prev_epoch_target_slot, committee_index, cache):
indices.incl(validator_index) indices.incl(validator_index)
indices indices
@ -654,32 +654,32 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
blck = db.getBlock( blck = db.getBlock(
blockRefs[blockRefs.len - bi - 1].root, blockRefs[blockRefs.len - bi - 1].root,
phase0.TrustedSignedBeaconBlock).get() phase0.TrustedSignedBeaconBlock).get()
while getStateField(state[].data, slot) < blck.message.slot: while getStateField(state[], slot) < blck.message.slot:
let let
nextSlot = getStateField(state[].data, slot) + 1 nextSlot = getStateField(state[], slot) + 1
flags = flags =
if nextSlot == blck.message.slot: {skipLastStateRootCalculation} if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
else: {} else: {}
process_slots( process_slots(
dag.cfg, state[].data, nextSlot, cache, info, flags).expect( dag.cfg, state[], nextSlot, cache, info, flags).expect(
"Slot processing can't fail with correct inputs") "Slot processing can't fail with correct inputs")
if getStateField(state[].data, slot).is_epoch(): if getStateField(state[], slot).is_epoch():
processEpoch() processEpoch()
let res = state_transition_block( let res = state_transition_block(
dag.cfg, state[].data, blck, cache, {}, noRollback) dag.cfg, state[], blck, cache, {}, noRollback)
if res.isErr: if res.isErr:
echo "State transition failed (!) ", res.error() echo "State transition failed (!) ", res.error()
quit 1 quit 1
# Capture rewards of empty slots as well # Capture rewards of empty slots as well
while getStateField(state[].data, slot) < ends: while getStateField(state[], slot) < ends:
process_slots( process_slots(
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache, dag.cfg, state[], getStateField(state[], slot) + 1, cache,
info, {}).expect("Slot processing can't fail with correct inputs") info, {}).expect("Slot processing can't fail with correct inputs")
if getStateField(state[].data, slot).is_epoch(): if getStateField(state[], slot).is_epoch():
processEpoch() processEpoch()
echo "validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty" echo "validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty"
@ -865,34 +865,34 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
var cache = StateCache() var cache = StateCache()
let slot = if startSlot > 0: startSlot - 1 else: 0.Slot let slot = if startSlot > 0: startSlot - 1 else: 0.Slot
if blockRefs.len > 0: if blockRefs.len > 0:
discard dag.updateStateData(tmpState[], blockRefs[^1].atSlot(slot), false, cache) discard dag.updateState(tmpState[], blockRefs[^1].atSlot(slot), false, cache)
else: else:
discard dag.updateStateData(tmpState[], dag.head.atSlot(slot), false, cache) discard dag.updateState(tmpState[], dag.head.atSlot(slot), false, cache)
let savedValidatorsCount = outDb.getDbValidatorsCount let savedValidatorsCount = outDb.getDbValidatorsCount
var validatorsCount = getStateField(tmpState[].data, validators).len var validatorsCount = getStateField(tmpState[], validators).len
outDb.insertValidators(tmpState[].data, savedValidatorsCount, validatorsCount) outDb.insertValidators(tmpState[], savedValidatorsCount, validatorsCount)
var previousEpochBalances: seq[uint64] var previousEpochBalances: seq[uint64]
collectBalances(previousEpochBalances, tmpState[].data) collectBalances(previousEpochBalances, tmpState[])
var forkedInfo = ForkedEpochInfo() var forkedInfo = ForkedEpochInfo()
var rewardsAndPenalties: seq[RewardsAndPenalties] var rewardsAndPenalties: seq[RewardsAndPenalties]
rewardsAndPenalties.setLen(validatorsCount) rewardsAndPenalties.setLen(validatorsCount)
var auxiliaryState: AuxiliaryState var auxiliaryState: AuxiliaryState
auxiliaryState.copyParticipationFlags(tmpState[].data) auxiliaryState.copyParticipationFlags(tmpState[])
var aggregator = ValidatorDbAggregator.init( var aggregator = ValidatorDbAggregator.init(
aggregatedFilesOutputDir, conf.resolution, endEpoch) aggregatedFilesOutputDir, conf.resolution, endEpoch)
proc processEpoch() = proc processEpoch() =
let epoch = getStateField(tmpState[].data, slot).epoch let epoch = getStateField(tmpState[], slot).epoch
info "Processing epoch ...", epoch = epoch info "Processing epoch ...", epoch = epoch
var csvLines = newStringOfCap(1000000) var csvLines = newStringOfCap(1000000)
withState(tmpState[].data): withState(tmpState[]):
withEpochInfo(forkedInfo): withEpochInfo(forkedInfo):
doAssert state.data.balances.len == info.validators.len doAssert state.data.balances.len == info.validators.len
doAssert state.data.balances.len == previousEpochBalances.len doAssert state.data.balances.len == previousEpochBalances.len
@ -929,21 +929,21 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
aggregator.advanceEpochs(epoch, shouldShutDown) aggregator.advanceEpochs(epoch, shouldShutDown)
if shouldShutDown: quit QuitSuccess if shouldShutDown: quit QuitSuccess
collectBalances(previousEpochBalances, tmpState[].data) collectBalances(previousEpochBalances, tmpState[])
proc processSlots(ends: Slot, endsFlags: UpdateFlags) = proc processSlots(ends: Slot, endsFlags: UpdateFlags) =
var currentSlot = getStateField(tmpState[].data, slot) var currentSlot = getStateField(tmpState[], slot)
while currentSlot < ends: while currentSlot < ends:
let nextSlot = currentSlot + 1 let nextSlot = currentSlot + 1
let flags = if nextSlot == ends: endsFlags else: {} let flags = if nextSlot == ends: endsFlags else: {}
if nextSlot.isEpoch: if nextSlot.isEpoch:
withState(tmpState[].data): withState(tmpState[]):
var stateData = newClone(state.data) var stateData = newClone(state.data)
rewardsAndPenalties.collectEpochRewardsAndPenalties( rewardsAndPenalties.collectEpochRewardsAndPenalties(
stateData[], cache, cfg, flags) stateData[], cache, cfg, flags)
let res = process_slots(cfg, tmpState[].data, nextSlot, cache, forkedInfo, flags) let res = process_slots(cfg, tmpState[], nextSlot, cache, forkedInfo, flags)
doAssert res.isOk, "Slot processing can't fail with correct inputs" doAssert res.isOk, "Slot processing can't fail with correct inputs"
currentSlot = nextSlot currentSlot = nextSlot
@ -952,7 +952,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
processEpoch() processEpoch()
rewardsAndPenalties.setLen(0) rewardsAndPenalties.setLen(0)
rewardsAndPenalties.setLen(validatorsCount) rewardsAndPenalties.setLen(validatorsCount)
auxiliaryState.copyParticipationFlags(tmpState[].data) auxiliaryState.copyParticipationFlags(tmpState[])
clear cache clear cache
for bi in 0 ..< blockRefs.len: for bi in 0 ..< blockRefs.len:
@ -961,15 +961,15 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
processSlots(blck.message.slot, {skipLastStateRootCalculation}) processSlots(blck.message.slot, {skipLastStateRootCalculation})
rewardsAndPenalties.collectBlockRewardsAndPenalties( rewardsAndPenalties.collectBlockRewardsAndPenalties(
tmpState[].data, forkedBlock, auxiliaryState, cache, cfg) tmpState[], forkedBlock, auxiliaryState, cache, cfg)
let res = state_transition_block( let res = state_transition_block(
cfg, tmpState[].data, blck, cache, {}, noRollback) cfg, tmpState[], blck, cache, {}, noRollback)
if res.isErr: if res.isErr:
fatal "State transition failed (!)" fatal "State transition failed (!)"
quit QuitFailure quit QuitFailure
let newValidatorsCount = getStateField(tmpState[].data, validators).len let newValidatorsCount = getStateField(tmpState[], validators).len
if newValidatorsCount > validatorsCount: if newValidatorsCount > validatorsCount:
# Resize the structures in case a new validator has appeared after # Resize the structures in case a new validator has appeared after
# the state_transition_block procedure call ... # the state_transition_block procedure call ...
@ -977,7 +977,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
previousEpochBalances.setLen(newValidatorsCount) previousEpochBalances.setLen(newValidatorsCount)
# ... and add the new validators to the database. # ... and add the new validators to the database.
outDb.insertValidators( outDb.insertValidators(
tmpState[].data, validatorsCount, newValidatorsCount) tmpState[], validatorsCount, newValidatorsCount)
validatorsCount = newValidatorsCount validatorsCount = newValidatorsCount
# Capture rewards of empty slots as well, including the epoch that got # Capture rewards of empty slots as well, including the epoch that got

View File

@ -114,20 +114,20 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
dag.withUpdatedState(tmpState[], attestationHead) do: dag.withUpdatedState(tmpState[], attestationHead) do:
let committees_per_slot = let committees_per_slot =
get_committee_count_per_slot(stateData.data, slot.epoch, cache) get_committee_count_per_slot(state, slot.epoch, cache)
for committee_index in get_committee_indices(committees_per_slot): for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee( let committee = get_beacon_committee(
stateData.data, slot, committee_index, cache) state, slot, committee_index, cache)
for index_in_committee, validator_index in committee: for index_in_committee, validator_index in committee:
if rand(r, 1.0) <= attesterRatio: if rand(r, 1.0) <= attesterRatio:
let let
data = makeAttestationData( data = makeAttestationData(
stateData.data, slot, committee_index, blck.root) state, slot, committee_index, blck.root)
sig = sig =
get_attestation_signature(getStateField(stateData.data, fork), get_attestation_signature(getStateField(state, fork),
getStateField(stateData.data, genesis_validators_root), getStateField(state, genesis_validators_root),
data, MockPrivKeys[validator_index]) data, MockPrivKeys[validator_index])
var aggregation_bits = CommitteeValidatorsBits.init(committee.len) var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
aggregation_bits.setBit index_in_committee aggregation_bits.setBit index_in_committee
@ -237,14 +237,14 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
signedContributionAndProof, res.get()[0]) signedContributionAndProof, res.get()[0])
proc getNewBlock[T]( proc getNewBlock[T](
stateData: var StateData, slot: Slot, cache: var StateCache): T = state: var ForkedHashedBeaconState, slot: Slot, cache: var StateCache): T =
let let
finalizedEpochRef = dag.getFinalizedEpochRef() finalizedEpochRef = dag.getFinalizedEpochRef()
proposerIdx = get_beacon_proposer_index( proposerIdx = get_beacon_proposer_index(
stateData.data, cache, getStateField(stateData.data, slot)).get() state, cache, getStateField(state, slot)).get()
privKey = MockPrivKeys[proposerIdx] privKey = MockPrivKeys[proposerIdx]
eth1ProposalData = eth1Chain.getBlockProposalData( eth1ProposalData = eth1Chain.getBlockProposalData(
stateData.data, state,
finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_data,
finalizedEpochRef.eth1_deposit_index) finalizedEpochRef.eth1_deposit_index)
sync_aggregate = sync_aggregate =
@ -256,11 +256,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
static: doAssert false static: doAssert false
hashedState = hashedState =
when T is phase0.SignedBeaconBlock: when T is phase0.SignedBeaconBlock:
addr stateData.data.phase0Data addr state.phase0Data
elif T is altair.SignedBeaconBlock: elif T is altair.SignedBeaconBlock:
addr stateData.data.altairData addr state.altairData
elif T is bellatrix.SignedBeaconBlock: elif T is bellatrix.SignedBeaconBlock:
addr stateData.data.bellatrixData addr state.bellatrixData
else: else:
static: doAssert false static: doAssert false
message = makeBeaconBlock( message = makeBeaconBlock(
@ -268,12 +268,12 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
hashedState[], hashedState[],
proposerIdx, proposerIdx,
privKey.genRandaoReveal( privKey.genRandaoReveal(
getStateField(stateData.data, fork), getStateField(state, fork),
getStateField(stateData.data, genesis_validators_root), getStateField(state, genesis_validators_root),
slot).toValidatorSig(), slot).toValidatorSig(),
eth1ProposalData.vote, eth1ProposalData.vote,
default(GraffitiBytes), default(GraffitiBytes),
attPool.getAttestationsForBlock(stateData.data, cache), attPool.getAttestationsForBlock(state, cache),
eth1ProposalData.deposits, eth1ProposalData.deposits,
BeaconBlockExits(), BeaconBlockExits(),
sync_aggregate, sync_aggregate,
@ -292,8 +292,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
# Careful, state no longer valid after here because of the await.. # Careful, state no longer valid after here because of the await..
newBlock.signature = withTimerRet(timers[tSignBlock]): newBlock.signature = withTimerRet(timers[tSignBlock]):
get_block_signature( get_block_signature(
getStateField(stateData.data, fork), getStateField(state, fork),
getStateField(stateData.data, genesis_validators_root), getStateField(state, genesis_validators_root),
newBlock.message.slot, newBlock.message.slot,
blockRoot, privKey).toValidatorSig() blockRoot, privKey).toValidatorSig()
@ -305,7 +305,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do: dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
let let
newBlock = getNewBlock[phase0.SignedBeaconBlock](stateData, slot, cache) newBlock = getNewBlock[phase0.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do ( added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -313,7 +313,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
attPool.addForkChoice( attPool.addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time) epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
blck() = added[]
dag.updateHead(added[], quarantine[]) dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning(): if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG() dag.pruneStateCachesDAG()
@ -327,7 +326,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do: dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
let let
newBlock = getNewBlock[altair.SignedBeaconBlock](stateData, slot, cache) newBlock = getNewBlock[altair.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do ( added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -335,7 +334,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
attPool.addForkChoice( attPool.addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time) epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
blck() = added[]
dag.updateHead(added[], quarantine[]) dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning(): if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG() dag.pruneStateCachesDAG()
@ -349,7 +347,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do: dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
let let
newBlock = getNewBlock[bellatrix.SignedBeaconBlock](stateData, slot, cache) newBlock = getNewBlock[bellatrix.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do ( added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: bellatrix.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: bellatrix.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -357,7 +355,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
attPool.addForkChoice( attPool.addForkChoice(
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time) epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
blck() = added[]
dag.updateHead(added[], quarantine[]) dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning(): if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG() dag.pruneStateCachesDAG()
@ -420,7 +417,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
# TODO if attestation pool was smarter, it would include older attestations # TODO if attestation pool was smarter, it would include older attestations
# too! # too!
verifyConsensus(dag.headState.data, attesterRatio * blockRatio) verifyConsensus(dag.headState, attesterRatio * blockRatio)
if t == tEpoch: if t == tEpoch:
echo &". slot: {shortLog(slot)} ", echo &". slot: {shortLog(slot)} ",
@ -432,9 +429,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
if replay: if replay:
withTimer(timers[tReplay]): withTimer(timers[tReplay]):
var cache = StateCache() var cache = StateCache()
doAssert dag.updateStateData( doAssert dag.updateState(
replayState[], dag.head.atSlot(Slot(slots)), false, cache) replayState[], dag.head.atSlot(Slot(slots)), false, cache)
echo "Done!" echo "Done!"
printTimers(dag.headState.data, attesters, true, timers) printTimers(dag.headState, attesters, true, timers)

View File

@ -66,7 +66,7 @@ cli do(validatorsDir: string, secretsDir: string,
warn "Unkownn validator", pubkey warn "Unkownn validator", pubkey
var var
blockRoot = withState(state[]): state.latest_block_root() blockRoot = withState(state[]): state.latest_block_root
cache: StateCache cache: StateCache
info: ForkedEpochInfo info: ForkedEpochInfo
aggregates: seq[Attestation] aggregates: seq[Attestation]

View File

@ -65,7 +65,7 @@ proc block_for_next_slot(
let attestations = let attestations =
if withAttestations: if withAttestations:
let block_root = withState(forked): state.latest_block_root() let block_root = withState(forked): state.latest_block_root
makeFullAttestations(forked, block_root, state.slot, cache) makeFullAttestations(forked, block_root, state.slot, cache)
else: else:
@[] @[]

View File

@ -162,12 +162,12 @@ proc stepOnBlock(
dag: ChainDagRef, dag: ChainDagRef,
fkChoice: ref ForkChoice, fkChoice: ref ForkChoice,
verifier: var BatchVerifier, verifier: var BatchVerifier,
state: var StateData, state: var ForkedHashedBeaconState,
stateCache: var StateCache, stateCache: var StateCache,
signedBlock: ForkySignedBeaconBlock, signedBlock: ForkySignedBeaconBlock,
time: BeaconTime): Result[BlockRef, BlockError] = time: BeaconTime): Result[BlockRef, BlockError] =
# 1. Move state to proper slot. # 1. Move state to proper slot.
doAssert dag.updateStateData( doAssert dag.updateState(
state, state,
dag.head.atSlot(time.slotOrZero), dag.head.atSlot(time.slotOrZero),
save = false, save = false,

View File

@ -73,15 +73,16 @@ suite "Attestation pool processing" & preset():
# Slot 0 is a finalized slot - won't be making attestations for it.. # Slot 0 is a finalized slot - won't be making attestations for it..
check: check:
process_slots( process_slots(
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info, dag.cfg, state[], getStateField(state[], slot) + 1, cache, info,
{}).isOk() {}).isOk()
test "Can add and retrieve simple attestations" & preset(): test "Can add and retrieve simple attestations" & preset():
let let
# Create an attestation for slot 1! # Create an attestation for slot 1!
bc0 = get_beacon_committee( bc0 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
attestation = makeAttestation(state[].data, state.blck.root, bc0[0], cache) attestation = makeAttestation(
state[], state[].latest_block_root, bc0[0], cache)
pool[].addAttestation( pool[].addAttestation(
attestation, @[bc0[0]], attestation.loadSig, attestation, @[bc0[0]], attestation.loadSig,
@ -104,11 +105,11 @@ suite "Attestation pool processing" & preset():
none(Slot), some(CommitteeIndex(attestation.data.index + 1)))) == [] none(Slot), some(CommitteeIndex(attestation.data.index + 1)))) == []
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
info, {}).isOk() info, {}).isOk()
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len == 1 attestations.len == 1
@ -116,40 +117,40 @@ suite "Attestation pool processing" & preset():
let let
root1 = addTestBlock( root1 = addTestBlock(
state.data, cache, attestations = attestations, state[], cache, attestations = attestations,
nextSlot = false).phase0Data.root nextSlot = false).phase0Data.root
bc1 = get_beacon_committee( bc1 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
att1 = makeAttestation(state[].data, root1, bc1[0], cache) att1 = makeAttestation(state[], root1, bc1[0], cache)
check: check:
withState(state.data): state.latest_block_root == root1 withState(state[]): state.latest_block_root == root1
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
info, {}).isOk() info, {}).isOk()
withState(state.data): state.latest_block_root == root1 withState(state[]): state.latest_block_root == root1
check: check:
# shouldn't include already-included attestations # shouldn't include already-included attestations
pool[].getAttestationsForBlock(state.data, cache) == [] pool[].getAttestationsForBlock(state[], cache) == []
pool[].addAttestation( pool[].addAttestation(
att1, @[bc1[0]], att1.loadSig, att1.data.slot.start_beacon_time) att1, @[bc1[0]], att1.loadSig, att1.data.slot.start_beacon_time)
check: check:
# but new ones should go in # but new ones should go in
pool[].getAttestationsForBlock(state.data, cache).len() == 1 pool[].getAttestationsForBlock(state[], cache).len() == 1
let let
att2 = makeAttestation(state[].data, root1, bc1[1], cache) att2 = makeAttestation(state[], root1, bc1[1], cache)
pool[].addAttestation( pool[].addAttestation(
att2, @[bc1[1]], att2.loadSig, att2.data.slot.start_beacon_time) att2, @[bc1[1]], att2.loadSig, att2.data.slot.start_beacon_time)
let let
combined = pool[].getAttestationsForBlock(state.data, cache) combined = pool[].getAttestationsForBlock(state[], cache)
check: check:
# New attestations should be combined with old attestations # New attestations should be combined with old attestations
@ -162,18 +163,18 @@ suite "Attestation pool processing" & preset():
check: check:
# readding the combined attestation shouldn't have an effect # readding the combined attestation shouldn't have an effect
pool[].getAttestationsForBlock(state.data, cache).len() == 1 pool[].getAttestationsForBlock(state[], cache).len() == 1
let let
# Someone votes for a different root # Someone votes for a different root
att3 = makeAttestation(state[].data, Eth2Digest(), bc1[2], cache) att3 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
pool[].addAttestation( pool[].addAttestation(
att3, @[bc1[2]], att3.loadSig, att3.data.slot.start_beacon_time) att3, @[bc1[2]], att3.loadSig, att3.data.slot.start_beacon_time)
check: check:
# We should now get both attestations for the block, but the aggregate # We should now get both attestations for the block, but the aggregate
# should be the one with the most votes # should be the one with the most votes
pool[].getAttestationsForBlock(state.data, cache).len() == 2 pool[].getAttestationsForBlock(state[], cache).len() == 2
pool[].getAggregatedAttestation(2.Slot, 0.CommitteeIndex). pool[].getAggregatedAttestation(2.Slot, 0.CommitteeIndex).
get().aggregation_bits.countOnes() == 2 get().aggregation_bits.countOnes() == 2
pool[].getAggregatedAttestation(2.Slot, hash_tree_root(att2.data)). pool[].getAggregatedAttestation(2.Slot, hash_tree_root(att2.data)).
@ -181,7 +182,7 @@ suite "Attestation pool processing" & preset():
let let
# Someone votes for a different root # Someone votes for a different root
att4 = makeAttestation(state[].data, Eth2Digest(), bc1[2], cache) att4 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
pool[].addAttestation( pool[].addAttestation(
att4, @[bc1[2]], att3.loadSig, att3.data.slot.start_beacon_time) att4, @[bc1[2]], att3.loadSig, att3.data.slot.start_beacon_time)
@ -189,14 +190,18 @@ suite "Attestation pool processing" & preset():
let let
# Create an attestation for slot 1! # Create an attestation for slot 1!
bc0 = get_beacon_committee( bc0 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
var var
att0 = makeAttestation(state[].data, state.blck.root, bc0[0], cache) att0 = makeAttestation(
state[], state[].latest_block_root, bc0[0], cache)
att0x = att0 att0x = att0
att1 = makeAttestation(state[].data, state.blck.root, bc0[1], cache) att1 = makeAttestation(
att2 = makeAttestation(state[].data, state.blck.root, bc0[2], cache) state[], state[].latest_block_root, bc0[1], cache)
att3 = makeAttestation(state[].data, state.blck.root, bc0[3], cache) att2 = makeAttestation(
state[], state[].latest_block_root, bc0[2], cache)
att3 = makeAttestation(
state[], state[].latest_block_root, bc0[3], cache)
# Both attestations include member 2 but neither is a subset of the other # Both attestations include member 2 but neither is a subset of the other
att0.combine(att2) att0.combine(att2)
@ -212,13 +217,13 @@ suite "Attestation pool processing" & preset():
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache, getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
info, {}).isOk() info, {}).isOk()
check: check:
pool[].covers(att0.data, att0.aggregation_bits) pool[].covers(att0.data, att0.aggregation_bits)
pool[].getAttestationsForBlock(state.data, cache).len() == 2 pool[].getAttestationsForBlock(state[], cache).len() == 2
# Can get either aggregate here, random! # Can get either aggregate here, random!
pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome() pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome()
@ -227,7 +232,7 @@ suite "Attestation pool processing" & preset():
att3, @[bc0[3]], att3.loadSig, att3.data.slot.start_beacon_time) att3, @[bc0[3]], att3.loadSig, att3.data.slot.start_beacon_time)
block: block:
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len() == 2 attestations.len() == 2
attestations[0].aggregation_bits.countOnes() == 3 attestations[0].aggregation_bits.countOnes() == 3
@ -240,7 +245,7 @@ suite "Attestation pool processing" & preset():
att0x, @[bc0[0]], att0x.loadSig, att0x.data.slot.start_beacon_time) att0x, @[bc0[0]], att0x.loadSig, att0x.data.slot.start_beacon_time)
block: block:
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len() == 1 attestations.len() == 1
attestations[0].aggregation_bits.countOnes() == 4 attestations[0].aggregation_bits.countOnes() == 4
@ -253,46 +258,48 @@ suite "Attestation pool processing" & preset():
root.data[0..<8] = toBytesBE(i.uint64) root.data[0..<8] = toBytesBE(i.uint64)
let let
bc0 = get_beacon_committee( bc0 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
for j in 0..<bc0.len(): for j in 0..<bc0.len():
root.data[8..<16] = toBytesBE(j.uint64) root.data[8..<16] = toBytesBE(j.uint64)
let att = makeAttestation(state[].data, root, bc0[j], cache) let att = makeAttestation(state[], root, bc0[j], cache)
pool[].addAttestation( pool[].addAttestation(
att, @[bc0[j]], att.loadSig, att.data.slot.start_beacon_time) att, @[bc0[j]], att.loadSig, att.data.slot.start_beacon_time)
inc attestations inc attestations
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
getStateField(state.data, slot) + 1, cache, info, {}).isOk() getStateField(state[], slot) + 1, cache, info, {}).isOk()
doAssert attestations.uint64 > MAX_ATTESTATIONS, doAssert attestations.uint64 > MAX_ATTESTATIONS,
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS" "6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
check: check:
# Fill block with attestations # Fill block with attestations
pool[].getAttestationsForBlock(state.data, cache).lenu64() == pool[].getAttestationsForBlock(state[], cache).lenu64() ==
MAX_ATTESTATIONS MAX_ATTESTATIONS
pool[].getAggregatedAttestation( pool[].getAggregatedAttestation(
getStateField(state.data, slot) - 1, 0.CommitteeIndex).isSome() getStateField(state[], slot) - 1, 0.CommitteeIndex).isSome()
test "Attestations may arrive in any order" & preset(): test "Attestations may arrive in any order" & preset():
var cache = StateCache() var cache = StateCache()
let let
# Create an attestation for slot 1! # Create an attestation for slot 1!
bc0 = get_beacon_committee( bc0 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
attestation0 = makeAttestation(state[].data, state.blck.root, bc0[0], cache) attestation0 = makeAttestation(
state[], state[].latest_block_root, bc0[0], cache)
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1, defaultRuntimeConfig, state[], getStateField(state[], slot) + 1,
cache, info, {}).isOk() cache, info, {}).isOk()
let let
bc1 = get_beacon_committee(state[].data, bc1 = get_beacon_committee(state[],
getStateField(state.data, slot), 0.CommitteeIndex, cache) getStateField(state[], slot), 0.CommitteeIndex, cache)
attestation1 = makeAttestation(state[].data, state.blck.root, bc1[0], cache) attestation1 = makeAttestation(
state[], state[].latest_block_root, bc1[0], cache)
# test reverse order # test reverse order
pool[].addAttestation( pool[].addAttestation(
@ -302,7 +309,7 @@ suite "Attestation pool processing" & preset():
attestation0, @[bc0[0]], attestation0.loadSig, attestation0, @[bc0[0]], attestation0.loadSig,
attestation0.data.slot.start_beacon_time) attestation0.data.slot.start_beacon_time)
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len == 1 attestations.len == 1
@ -312,11 +319,11 @@ suite "Attestation pool processing" & preset():
let let
# Create an attestation for slot 1! # Create an attestation for slot 1!
bc0 = get_beacon_committee( bc0 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
attestation0 = attestation0 =
makeAttestation(state[].data, state.blck.root, bc0[0], cache) makeAttestation(state[], state[].latest_block_root, bc0[0], cache)
attestation1 = attestation1 =
makeAttestation(state[].data, state.blck.root, bc0[1], cache) makeAttestation(state[], state[].latest_block_root, bc0[1], cache)
pool[].addAttestation( pool[].addAttestation(
attestation0, @[bc0[0]], attestation0.loadSig, attestation0, @[bc0[0]], attestation0.loadSig,
@ -327,10 +334,10 @@ suite "Attestation pool processing" & preset():
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk() MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk()
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len == 1 attestations.len == 1
@ -341,11 +348,11 @@ suite "Attestation pool processing" & preset():
var var
# Create an attestation for slot 1! # Create an attestation for slot 1!
bc0 = get_beacon_committee( bc0 = get_beacon_committee(
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache) state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
attestation0 = attestation0 = makeAttestation(
makeAttestation(state[].data, state.blck.root, bc0[0], cache) state[], state[].latest_block_root, bc0[0], cache)
attestation1 = attestation1 = makeAttestation(
makeAttestation(state[].data, state.blck.root, bc0[1], cache) state[], state[].latest_block_root, bc0[1], cache)
attestation0.combine(attestation1) attestation0.combine(attestation1)
@ -358,10 +365,10 @@ suite "Attestation pool processing" & preset():
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk() MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk()
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len == 1 attestations.len == 1
@ -370,12 +377,12 @@ suite "Attestation pool processing" & preset():
var cache = StateCache() var cache = StateCache()
var var
# Create an attestation for slot 1! # Create an attestation for slot 1!
bc0 = get_beacon_committee(state[].data, bc0 = get_beacon_committee(state[],
getStateField(state.data, slot), 0.CommitteeIndex, cache) getStateField(state[], slot), 0.CommitteeIndex, cache)
attestation0 = attestation0 = makeAttestation(
makeAttestation(state[].data, state.blck.root, bc0[0], cache) state[], state[].latest_block_root, bc0[0], cache)
attestation1 = attestation1 = makeAttestation(
makeAttestation(state[].data, state.blck.root, bc0[1], cache) state[], state[].latest_block_root, bc0[1], cache)
attestation0.combine(attestation1) attestation0.combine(attestation1)
@ -388,10 +395,10 @@ suite "Attestation pool processing" & preset():
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, defaultRuntimeConfig, state[],
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk() MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk()
let attestations = pool[].getAttestationsForBlock(state.data, cache) let attestations = pool[].getAttestationsForBlock(state[], cache)
check: check:
attestations.len == 1 attestations.len == 1
@ -399,7 +406,7 @@ suite "Attestation pool processing" & preset():
test "Fork choice returns latest block with no attestations": test "Fork choice returns latest block with no attestations":
var cache = StateCache() var cache = StateCache()
let let
b1 = addTestBlock(state.data, cache).phase0Data b1 = addTestBlock(state[], cache).phase0Data
b1Add = dag.addHeadBlock(verifier, b1) do ( b1Add = dag.addHeadBlock(verifier, b1) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -412,7 +419,7 @@ suite "Attestation pool processing" & preset():
head == b1Add[] head == b1Add[]
let let
b2 = addTestBlock(state.data, cache).phase0Data b2 = addTestBlock(state[], cache).phase0Data
b2Add = dag.addHeadBlock(verifier, b2) do ( b2Add = dag.addHeadBlock(verifier, b2) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -428,7 +435,7 @@ suite "Attestation pool processing" & preset():
test "Fork choice returns block with attestation": test "Fork choice returns block with attestation":
var cache = StateCache() var cache = StateCache()
let let
b10 = makeTestBlock(state.data, cache).phase0Data b10 = makeTestBlock(state[], cache).phase0Data
b10Add = dag.addHeadBlock(verifier, b10) do ( b10Add = dag.addHeadBlock(verifier, b10) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -442,7 +449,7 @@ suite "Attestation pool processing" & preset():
head == b10Add[] head == b10Add[]
let let
b11 = makeTestBlock(state.data, cache, b11 = makeTestBlock(state[], cache,
graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
).phase0Data ).phase0Data
b11Add = dag.addHeadBlock(verifier, b11) do ( b11Add = dag.addHeadBlock(verifier, b11) do (
@ -453,9 +460,9 @@ suite "Attestation pool processing" & preset():
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time) epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
bc1 = get_beacon_committee( bc1 = get_beacon_committee(
state[].data, getStateField(state.data, slot) - 1, 1.CommitteeIndex, state[], getStateField(state[], slot) - 1, 1.CommitteeIndex,
cache) cache)
attestation0 = makeAttestation(state[].data, b10.root, bc1[0], cache) attestation0 = makeAttestation(state[], b10.root, bc1[0], cache)
pool[].addAttestation( pool[].addAttestation(
attestation0, @[bc1[0]], attestation0.loadSig, attestation0, @[bc1[0]], attestation0.loadSig,
@ -468,8 +475,8 @@ suite "Attestation pool processing" & preset():
head2 == b10Add[] head2 == b10Add[]
let let
attestation1 = makeAttestation(state[].data, b11.root, bc1[1], cache) attestation1 = makeAttestation(state[], b11.root, bc1[1], cache)
attestation2 = makeAttestation(state[].data, b11.root, bc1[2], cache) attestation2 = makeAttestation(state[], b11.root, bc1[2], cache)
pool[].addAttestation( pool[].addAttestation(
attestation1, @[bc1[1]], attestation1.loadSig, attestation1, @[bc1[1]], attestation1.loadSig,
attestation1.data.slot.start_beacon_time) attestation1.data.slot.start_beacon_time)
@ -494,7 +501,7 @@ suite "Attestation pool processing" & preset():
test "Trying to add a block twice tags the second as an error": test "Trying to add a block twice tags the second as an error":
var cache = StateCache() var cache = StateCache()
let let
b10 = makeTestBlock(state.data, cache).phase0Data b10 = makeTestBlock(state[], cache).phase0Data
b10Add = dag.addHeadBlock(verifier, b10) do ( b10Add = dag.addHeadBlock(verifier, b10) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -525,7 +532,7 @@ suite "Attestation pool processing" & preset():
dag.updateFlags.incl {skipBLSValidation} dag.updateFlags.incl {skipBLSValidation}
var cache = StateCache() var cache = StateCache()
let let
b10 = addTestBlock(state.data, cache).phase0Data b10 = addTestBlock(state[], cache).phase0Data
b10Add = dag.addHeadBlock(verifier, b10) do ( b10Add = dag.addHeadBlock(verifier, b10) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -547,10 +554,10 @@ suite "Attestation pool processing" & preset():
for epoch in 0 ..< 5: for epoch in 0 ..< 5:
let start_slot = start_slot(Epoch epoch) let start_slot = start_slot(Epoch epoch)
let committees_per_slot = let committees_per_slot =
get_committee_count_per_slot(state[].data, Epoch epoch, cache) get_committee_count_per_slot(state[], Epoch epoch, cache)
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH: for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
let new_block = addTestBlock( let new_block = addTestBlock(
state.data, cache, attestations = attestations).phase0Data state[], cache, attestations = attestations).phase0Data
let blockRef = dag.addHeadBlock(verifier, new_block) do ( let blockRef = dag.addHeadBlock(verifier, new_block) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
@ -567,7 +574,7 @@ suite "Attestation pool processing" & preset():
attestations.setlen(0) attestations.setlen(0)
for committee_index in get_committee_indices(committees_per_slot): for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee( let committee = get_beacon_committee(
state[].data, getStateField(state.data, slot), committee_index, state[], getStateField(state[], slot), committee_index,
cache) cache)
# Create a bitfield filled with the given count per attestation, # Create a bitfield filled with the given count per attestation,
@ -578,8 +585,7 @@ suite "Attestation pool processing" & preset():
attestations.add Attestation( attestations.add Attestation(
aggregation_bits: aggregation_bits, aggregation_bits: aggregation_bits,
data: makeAttestationData( data: makeAttestationData(state[], getStateField(state[], slot),
state[].data, getStateField(state.data, slot),
committee_index, blockRef.get().root) committee_index, blockRef.get().root)
# signature: ValidatorSig() # signature: ValidatorSig()
) )

View File

@ -68,7 +68,7 @@ proc getTestStates(stateFork: BeaconStateFork): auto =
db = makeTestDB(SLOTS_PER_EPOCH) db = makeTestDB(SLOTS_PER_EPOCH)
validatorMonitor = newClone(ValidatorMonitor.init()) validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
var testStates = getTestStates(dag.headState.data, stateFork) var testStates = getTestStates(dag.headState, stateFork)
# Ensure transitions beyond just adding validators and increasing slots # Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref ForkedHashedBeaconState) -> int: sort(testStates) do (x, y: ref ForkedHashedBeaconState) -> int:
@ -338,7 +338,7 @@ suite "Beacon chain DB" & preset():
let restoreAddr = addr dag.headState let restoreAddr = addr dag.headState
func restore() = func restore() =
assign(state[], restoreAddr[].data) assign(state[], restoreAddr[])
check: check:
state[].phase0Data.data.slot == 10.Slot state[].phase0Data.data.slot == 10.Slot
@ -361,7 +361,7 @@ suite "Beacon chain DB" & preset():
let restoreAddr = addr dag.headState let restoreAddr = addr dag.headState
func restore() = func restore() =
assign(state[], restoreAddr[].data) assign(state[], restoreAddr[])
check: check:
state[].altairData.data.slot == 10.Slot state[].altairData.data.slot == 10.Slot
@ -387,7 +387,7 @@ suite "Beacon chain DB" & preset():
let restoreAddr = addr dag.headState let restoreAddr = addr dag.headState
func restore() = func restore() =
assign(state[], restoreAddr[].data) assign(state[], restoreAddr[])
check: check:
state[].bellatrixData.data.slot == 10.Slot state[].bellatrixData.data.slot == 10.Slot

View File

@ -34,7 +34,7 @@ suite "Block processor" & preset():
quarantine = newClone(Quarantine.init()) quarantine = newClone(Quarantine.init())
attestationPool = newClone(AttestationPool.init(dag, quarantine)) attestationPool = newClone(AttestationPool.init(dag, quarantine))
consensusManager = ConsensusManager.new(dag, attestationPool, quarantine) consensusManager = ConsensusManager.new(dag, attestationPool, quarantine)
state = newClone(dag.headState.data) state = newClone(dag.headState)
cache = StateCache() cache = StateCache()
b1 = addTestBlock(state[], cache).phase0Data b1 = addTestBlock(state[], cache).phase0Data
b2 = addTestBlock(state[], cache).phase0Data b2 = addTestBlock(state[], cache).phase0Data
@ -92,7 +92,7 @@ suite "Block processor" & preset():
check: check:
# ensure we loaded the correct head state # ensure we loaded the correct head state
dag2.head.root == b2.root dag2.head.root == b2.root
getStateRoot(dag2.headState.data) == b2.message.state_root getStateRoot(dag2.headState) == b2.message.state_root
dag2.getBlockRef(b1.root).isSome() dag2.getBlockRef(b1.root).isSome()
dag2.getBlockRef(b2.root).isSome() dag2.getBlockRef(b2.root).isSome()
dag2.heads.len == 1 dag2.heads.len == 1

View File

@ -63,7 +63,7 @@ suite "Block pool processing" & preset():
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new()) verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new())
quarantine = Quarantine.init() quarantine = Quarantine.init()
state = newClone(dag.headState.data) state = newClone(dag.headState)
cache = StateCache() cache = StateCache()
info = ForkedEpochInfo() info = ForkedEpochInfo()
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache) att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
@ -97,7 +97,7 @@ suite "Block pool processing" & preset():
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback) b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
b2Get = dag.getForkedBlock(b2.root) b2Get = dag.getForkedBlock(b2.root)
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch) er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
validators = getStateField(dag.headState.data, validators).lenu64() validators = getStateField(dag.headState, validators).lenu64()
check: check:
b2Get.isSome() b2Get.isSome()
@ -185,8 +185,8 @@ suite "Block pool processing" & preset():
db.getStateRoot(stateCheckpoint.blck.root, stateCheckpoint.slot).isErr() db.getStateRoot(stateCheckpoint.blck.root, stateCheckpoint.slot).isErr()
# this is required for the test to work - it's not a "public" # this is required for the test to work - it's not a "public"
# post-condition of getEpochRef # post-condition of getEpochRef
getStateField(dag.epochRefState.data, slot) == nextEpochSlot getStateField(dag.epochRefState, slot) == nextEpochSlot
assign(state[], dag.epochRefState.data) assign(state[], dag.epochRefState)
let let
bnext = addTestBlock(state[], cache).phase0Data bnext = addTestBlock(state[], cache).phase0Data
@ -214,9 +214,9 @@ suite "Block pool processing" & preset():
check: check:
dag.head == b1Add[] dag.head == b1Add[]
getStateField(dag.headState.data, slot) == b1Add[].slot getStateField(dag.headState, slot) == b1Add[].slot
test "updateStateData sanity" & preset(): test "updateState sanity" & preset():
let let
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback) b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback) b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
@ -229,39 +229,39 @@ suite "Block pool processing" & preset():
# move to specific block # move to specific block
var cache = StateCache() var cache = StateCache()
check: check:
dag.updateStateData(tmpState[], bs1, false, cache) dag.updateState(tmpState[], bs1, false, cache)
tmpState.blck == b1Add[] tmpState[].latest_block_root == b1Add[].root
getStateField(tmpState.data, slot) == bs1.slot getStateField(tmpState[], slot) == bs1.slot
# Skip slots # Skip slots
check: check:
dag.updateStateData(tmpState[], bs1_3, false, cache) # skip slots dag.updateState(tmpState[], bs1_3, false, cache) # skip slots
tmpState.blck == b1Add[] tmpState[].latest_block_root == b1Add[].root
getStateField(tmpState.data, slot) == bs1_3.slot getStateField(tmpState[], slot) == bs1_3.slot
# Move back slots, but not blocks # Move back slots, but not blocks
check: check:
dag.updateStateData(tmpState[], bs1_3.parent(), false, cache) dag.updateState(tmpState[], bs1_3.parent(), false, cache)
tmpState.blck == b1Add[] tmpState[].latest_block_root == b1Add[].root
getStateField(tmpState.data, slot) == bs1_3.parent().slot getStateField(tmpState[], slot) == bs1_3.parent().slot
# Move to different block and slot # Move to different block and slot
check: check:
dag.updateStateData(tmpState[], bs2_3, false, cache) dag.updateState(tmpState[], bs2_3, false, cache)
tmpState.blck == b2Add[] tmpState[].latest_block_root == b2Add[].root
getStateField(tmpState.data, slot) == bs2_3.slot getStateField(tmpState[], slot) == bs2_3.slot
# Move back slot and block # Move back slot and block
check: check:
dag.updateStateData(tmpState[], bs1, false, cache) dag.updateState(tmpState[], bs1, false, cache)
tmpState.blck == b1Add[] tmpState[].latest_block_root == b1Add[].root
getStateField(tmpState.data, slot) == bs1.slot getStateField(tmpState[], slot) == bs1.slot
# Move back to genesis # Move back to genesis
check: check:
dag.updateStateData(tmpState[], bs1.parent(), false, cache) dag.updateState(tmpState[], bs1.parent(), false, cache)
tmpState.blck == b1Add[].parent tmpState[].latest_block_root == b1Add[].parent.root
getStateField(tmpState.data, slot) == bs1.parent.slot getStateField(tmpState[], slot) == bs1.parent.slot
when declared(GC_fullCollect): # i386 test machines seem to run low.. when declared(GC_fullCollect): # i386 test machines seem to run low..
GC_fullCollect() GC_fullCollect()
@ -278,7 +278,7 @@ suite "Block pool altair processing" & preset():
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {}) dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new()) verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new())
quarantine = Quarantine.init() quarantine = Quarantine.init()
state = newClone(dag.headState.data) state = newClone(dag.headState)
cache = StateCache() cache = StateCache()
info = ForkedEpochInfo() info = ForkedEpochInfo()
@ -359,8 +359,8 @@ suite "chain DAG finalization tests" & preset():
test "prune heads on finalization" & preset(): test "prune heads on finalization" & preset():
# Create a fork that will not be taken # Create a fork that will not be taken
var var
blck = makeTestBlock(dag.headState.data, cache).phase0Data blck = makeTestBlock(dag.headState, cache).phase0Data
tmpState = assignClone(dag.headState.data) tmpState = assignClone(dag.headState)
check: check:
process_slots( process_slots(
defaultRuntimeConfig, tmpState[], defaultRuntimeConfig, tmpState[],
@ -372,7 +372,7 @@ suite "chain DAG finalization tests" & preset():
let status = dag.addHeadBlock(verifier, blck, nilPhase0Callback) let status = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
check: status.isOk() check: status.isOk()
assign(tmpState[], dag.headState.data) assign(tmpState[], dag.headState)
# skip slots so we can test gappy getBlockAtSlot # skip slots so we can test gappy getBlockAtSlot
check process_slots( check process_slots(
@ -410,7 +410,7 @@ suite "chain DAG finalization tests" & preset():
dag.containsForkBlock(dag.finalizedHead.blck.root) dag.containsForkBlock(dag.finalizedHead.blck.root)
check: check:
dag.db.immutableValidators.len() == getStateField(dag.headState.data, validators).len() dag.db.immutableValidators.len() == getStateField(dag.headState, validators).len()
let let
finalER = dag.getEpochRef( finalER = dag.getEpochRef(
@ -428,11 +428,11 @@ suite "chain DAG finalization tests" & preset():
block: block:
let tmpStateData = assignClone(dag.headState) let tmpStateData = assignClone(dag.headState)
# Check that cached data is available after updateStateData - since we # Check that cached data is available after updateState - since we
# just processed the head the relevant epochrefs should not have been # just processed the head the relevant epochrefs should not have been
# evicted yet # evicted yet
cache = StateCache() cache = StateCache()
check: updateStateData( check: updateState(
dag, tmpStateData[], dag.head.atSlot(dag.head.slot), false, cache) dag, tmpStateData[], dag.head.atSlot(dag.head.slot), false, cache)
check: check:
@ -467,15 +467,15 @@ suite "chain DAG finalization tests" & preset():
dag2.head.root == dag.head.root dag2.head.root == dag.head.root
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
dag2.finalizedHead.slot == dag.finalizedHead.slot dag2.finalizedHead.slot == dag.finalizedHead.slot
getStateRoot(dag2.headState.data) == getStateRoot(dag.headState.data) getStateRoot(dag2.headState) == getStateRoot(dag.headState)
test "orphaned epoch block" & preset(): test "orphaned epoch block" & preset():
let prestate = (ref ForkedHashedBeaconState)(kind: BeaconStateFork.Phase0) let prestate = (ref ForkedHashedBeaconState)(kind: BeaconStateFork.Phase0)
for i in 0 ..< SLOTS_PER_EPOCH: for i in 0 ..< SLOTS_PER_EPOCH:
if i == SLOTS_PER_EPOCH - 1: if i == SLOTS_PER_EPOCH - 1:
assign(prestate[], dag.headState.data) assign(prestate[], dag.headState)
let blck = makeTestBlock(dag.headState.data, cache).phase0Data let blck = makeTestBlock(dag.headState, cache).phase0Data
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback) let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
check: added.isOk() check: added.isOk()
dag.updateHead(added[], quarantine) dag.updateHead(added[], quarantine)
@ -508,7 +508,7 @@ suite "chain DAG finalization tests" & preset():
test "init with gaps" & preset(): test "init with gaps" & preset():
for blck in makeTestBlocks( for blck in makeTestBlocks(
dag.headState.data, cache, int(SLOTS_PER_EPOCH * 6 - 2), dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2),
true): true):
let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback) let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
check: added.isOk() check: added.isOk()
@ -518,13 +518,13 @@ suite "chain DAG finalization tests" & preset():
# Advance past epoch so that the epoch transition is gapped # Advance past epoch so that the epoch transition is gapped
check: check:
process_slots( process_slots(
defaultRuntimeConfig, dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2), defaultRuntimeConfig, dag.headState, Slot(SLOTS_PER_EPOCH * 6 + 2),
cache, info, {}).isOk() cache, info, {}).isOk()
let blck = makeTestBlock( let blck = makeTestBlock(
dag.headState.data, cache, dag.headState, cache,
attestations = makeFullAttestations( attestations = makeFullAttestations(
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot), dag.headState, dag.head.root, getStateField(dag.headState, slot),
cache, {})).phase0Data cache, {})).phase0Data
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback) let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
@ -540,11 +540,11 @@ suite "chain DAG finalization tests" & preset():
while cur.slot >= dag.finalizedHead.slot: while cur.slot >= dag.finalizedHead.slot:
assign(tmpStateData[], dag.headState) assign(tmpStateData[], dag.headState)
check: check:
dag.updateStateData(tmpStateData[], cur.atSlot(cur.slot), false, cache) dag.updateState(tmpStateData[], cur.atSlot(cur.slot), false, cache)
dag.getForkedBlock(cur.bid).get().phase0Data.message.state_root == dag.getForkedBlock(cur.bid).get().phase0Data.message.state_root ==
getStateRoot(tmpStateData[].data) getStateRoot(tmpStateData[])
getStateRoot(tmpStateData[].data) == hash_tree_root( getStateRoot(tmpStateData[]) == hash_tree_root(
tmpStateData[].data.phase0Data.data) tmpStateData[].phase0Data.data)
cur = cur.parent cur = cur.parent
let let
@ -557,7 +557,7 @@ suite "chain DAG finalization tests" & preset():
dag2.head.root == dag.head.root dag2.head.root == dag.head.root
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
dag2.finalizedHead.slot == dag.finalizedHead.slot dag2.finalizedHead.slot == dag.finalizedHead.slot
getStateRoot(dag2.headState.data) == getStateRoot(dag.headState.data) getStateRoot(dag2.headState) == getStateRoot(dag.headState)
suite "Old database versions" & preset(): suite "Old database versions" & preset():
setup: setup:
@ -580,7 +580,7 @@ suite "Old database versions" & preset():
# preInit a database to a v1.0.12 state # preInit a database to a v1.0.12 state
db.putStateRoot( db.putStateRoot(
genState[].latest_block_root(), genState[].data.slot, genState[].root) genState[].latest_block_root, genState[].data.slot, genState[].root)
db.putStateV0(genState[].root, genState[].data) db.putStateV0(genState[].root, genState[].data)
db.putBlockV0(genBlock) db.putBlockV0(genBlock)
@ -591,7 +591,7 @@ suite "Old database versions" & preset():
var var
validatorMonitor = newClone(ValidatorMonitor.init()) validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db,validatorMonitor, {}) dag = init(ChainDAGRef, defaultRuntimeConfig, db,validatorMonitor, {})
state = newClone(dag.headState.data) state = newClone(dag.headState)
cache = StateCache() cache = StateCache()
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache) att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
@ -617,7 +617,7 @@ suite "Diverging hardforks":
quarantine = newClone(Quarantine.init()) quarantine = newClone(Quarantine.init())
cache = StateCache() cache = StateCache()
info = ForkedEpochInfo() info = ForkedEpochInfo()
tmpState = assignClone(dag.headState.data) tmpState = assignClone(dag.headState)
test "Tail block only in common": test "Tail block only in common":
check: check:

View File

@ -34,7 +34,7 @@ suite "Exit pool testing suite":
pool[].addMessage(msg) pool[].addMessage(msg)
check: pool[].isSeen(msg) check: pool[].isSeen(msg)
withState(dag.headState.data): withState(dag.headState):
check: check:
pool[].getBeaconBlockExits(state.data).proposer_slashings.lenu64 == pool[].getBeaconBlockExits(state.data).proposer_slashings.lenu64 ==
min(i + 1, MAX_PROPOSER_SLASHINGS) min(i + 1, MAX_PROPOSER_SLASHINGS)
@ -54,7 +54,7 @@ suite "Exit pool testing suite":
pool[].addMessage(msg) pool[].addMessage(msg)
check: pool[].isSeen(msg) check: pool[].isSeen(msg)
withState(dag.headState.data): withState(dag.headState):
check: check:
pool[].getBeaconBlockExits(state.data).attester_slashings.lenu64 == pool[].getBeaconBlockExits(state.data).attester_slashings.lenu64 ==
min(i + 1, MAX_ATTESTER_SLASHINGS) min(i + 1, MAX_ATTESTER_SLASHINGS)
@ -70,7 +70,7 @@ suite "Exit pool testing suite":
pool[].addMessage(msg) pool[].addMessage(msg)
check: pool[].isSeen(msg) check: pool[].isSeen(msg)
withState(dag.headState.data): withState(dag.headState):
check: check:
pool[].getBeaconBlockExits(state.data).voluntary_exits.lenu64 == pool[].getBeaconBlockExits(state.data).voluntary_exits.lenu64 ==
min(i + 1, MAX_VOLUNTARY_EXITS) min(i + 1, MAX_VOLUNTARY_EXITS)

View File

@ -51,17 +51,17 @@ suite "Gossip validation " & preset():
# Slot 0 is a finalized slot - won't be making attestations for it.. # Slot 0 is a finalized slot - won't be making attestations for it..
check: check:
process_slots( process_slots(
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1, defaultRuntimeConfig, state[], getStateField(state[], slot) + 1,
cache, info, {}).isOk() cache, info, {}).isOk()
test "Empty committee when no committee for slot": test "Empty committee when no committee for slot":
template committee(idx: uint64): untyped = template committee(idx: uint64): untyped =
get_beacon_committee( get_beacon_committee(
dag.headState.data, dag.head.slot, idx.CommitteeIndex, cache) dag.headState, dag.head.slot, idx.CommitteeIndex, cache)
template committeeLen(idx: uint64): untyped = template committeeLen(idx: uint64): untyped =
get_beacon_committee_len( get_beacon_committee_len(
dag.headState.data, dag.head.slot, idx.CommitteeIndex, cache) dag.headState, dag.head.slot, idx.CommitteeIndex, cache)
check: check:
committee(0).len > 0 committee(0).len > 0
@ -75,7 +75,7 @@ suite "Gossip validation " & preset():
var var
cache: StateCache cache: StateCache
for blck in makeTestBlocks( for blck in makeTestBlocks(
dag.headState.data, cache, int(SLOTS_PER_EPOCH * 5), false): dag.headState, cache, int(SLOTS_PER_EPOCH * 5), false):
let added = dag.addHeadBlock(verifier, blck.phase0Data) do ( let added = dag.addHeadBlock(verifier, blck.phase0Data) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef): epochRef: EpochRef):
@ -90,15 +90,15 @@ suite "Gossip validation " & preset():
var var
# Create attestations for slot 1 # Create attestations for slot 1
beacon_committee = get_beacon_committee( beacon_committee = get_beacon_committee(
dag.headState.data, dag.head.slot, 0.CommitteeIndex, cache) dag.headState, dag.head.slot, 0.CommitteeIndex, cache)
att_1_0 = makeAttestation( att_1_0 = makeAttestation(
dag.headState.data, dag.head.root, beacon_committee[0], cache) dag.headState, dag.head.root, beacon_committee[0], cache)
att_1_1 = makeAttestation( att_1_1 = makeAttestation(
dag.headState.data, dag.head.root, beacon_committee[1], cache) dag.headState, dag.head.root, beacon_committee[1], cache)
committees_per_slot = committees_per_slot =
get_committee_count_per_slot(dag.headState.data, get_committee_count_per_slot(
att_1_0.data.slot.epoch, cache) dag.headState, att_1_0.data.slot.epoch, cache)
subnet = compute_subnet_for_attestation( subnet = compute_subnet_for_attestation(
committees_per_slot, committees_per_slot,
@ -194,7 +194,7 @@ suite "Gossip validation - Extra": # Not based on preset config
cfg, makeTestDB(num_validators), validatorMonitor, {}) cfg, makeTestDB(num_validators), validatorMonitor, {})
var cache = StateCache() var cache = StateCache()
for blck in makeTestBlocks( for blck in makeTestBlocks(
dag.headState.data, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg): dag.headState, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg):
let added = let added =
case blck.kind case blck.kind
of BeaconBlockFork.Phase0: of BeaconBlockFork.Phase0:
@ -209,7 +209,7 @@ suite "Gossip validation - Extra": # Not based on preset config
check: added.isOk() check: added.isOk()
dag.updateHead(added[], quarantine[]) dag.updateHead(added[], quarantine[])
dag dag
state = assignClone(dag.headState.data.altairData) state = assignClone(dag.headState.altairData)
slot = state[].data.slot slot = state[].data.slot
subcommitteeIdx = 0.SyncSubcommitteeIndex subcommitteeIdx = 0.SyncSubcommitteeIndex

View File

@ -38,7 +38,7 @@ suite "Light client" & preset():
var cache: StateCache var cache: StateCache
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
while true: while true:
var slot = getStateField(dag.headState.data, slot) var slot = getStateField(dag.headState, slot)
doAssert targetSlot >= slot doAssert targetSlot >= slot
if targetSlot == slot: break if targetSlot == slot: break
@ -51,13 +51,13 @@ suite "Light client" & preset():
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot: if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot:
var info: ForkedEpochInfo var info: ForkedEpochInfo
doAssert process_slots(cfg, dag.headState.data, checkpointSlot, doAssert process_slots(cfg, dag.headState, checkpointSlot,
cache, info, flags = {}).isOk() cache, info, flags = {}).isOk()
slot = checkpointSlot slot = checkpointSlot
# Create blocks for final few epochs # Create blocks for final few epochs
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod) let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
for blck in makeTestBlocks(dag.headState.data, cache, blocks.int, for blck in makeTestBlocks(dag.headState, cache, blocks.int,
attested, syncCommitteeRatio, cfg): attested, syncCommitteeRatio, cfg):
let added = let added =
case blck.kind case blck.kind
@ -88,21 +88,21 @@ suite "Light client" & preset():
test "Pre-Altair": test "Pre-Altair":
# Genesis # Genesis
check: check:
dag.headState.data.kind == BeaconStateFork.Phase0 dag.headState.kind == BeaconStateFork.Phase0
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
dag.getLatestLightClientUpdate.isNone dag.getLatestLightClientUpdate.isNone
# Advance to last slot before Altair # Advance to last slot before Altair
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[]) dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
check: check:
dag.headState.data.kind == BeaconStateFork.Phase0 dag.headState.kind == BeaconStateFork.Phase0
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
dag.getLatestLightClientUpdate.isNone dag.getLatestLightClientUpdate.isNone
# Advance to Altair # Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[]) dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
check: check:
dag.headState.data.kind == BeaconStateFork.Altair dag.headState.kind == BeaconStateFork.Altair
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
dag.getLatestLightClientUpdate.isNone dag.getLatestLightClientUpdate.isNone
@ -113,7 +113,7 @@ suite "Light client" & preset():
# Track trusted checkpoint for light client # Track trusted checkpoint for light client
let let
genesis_validators_root = dag.genesisValidatorsRoot genesis_validators_root = dag.genesisValidatorsRoot
trusted_block_root = dag.headState.blck.root trusted_block_root = dag.head.root
# Advance to target slot # Advance to target slot
const const
@ -121,7 +121,7 @@ suite "Light client" & preset():
periodEpoch = headPeriod.start_epoch periodEpoch = headPeriod.start_epoch
headSlot = (periodEpoch + 2).start_slot + 5 headSlot = (periodEpoch + 2).start_slot + 5
dag.advanceToSlot(headSlot, verifier, quarantine[]) dag.advanceToSlot(headSlot, verifier, quarantine[])
let currentSlot = getStateField(dag.headState.data, slot) let currentSlot = getStateField(dag.headState, slot)
# Initialize light client store # Initialize light client store
let bootstrap = dag.getLightClientBootstrap(trusted_block_root) let bootstrap = dag.getLightClientBootstrap(trusted_block_root)
@ -158,7 +158,7 @@ suite "Light client" & preset():
store, latestUpdate.get, currentSlot, cfg, genesis_validators_root) store, latestUpdate.get, currentSlot, cfg, genesis_validators_root)
check: check:
latestUpdate.isSome latestUpdate.isSome
latestUpdate.get.attested_header.slot == dag.headState.blck.parent.slot latestUpdate.get.attested_header.slot == dag.head.parent.slot
res.isOk res.isOk
store.finalized_header == latestUpdate.get.finalized_header store.finalized_header == latestUpdate.get.finalized_header
store.optimistic_header == latestUpdate.get.attested_header store.optimistic_header == latestUpdate.get.attested_header
@ -171,7 +171,7 @@ suite "Light client" & preset():
skip skip
return return
let genesisState = assignClone dag.headState.data let genesisState = assignClone dag.headState
# Advance to target slot for checkpoint # Advance to target slot for checkpoint
let finalizedSlot = let finalizedSlot =
@ -182,7 +182,7 @@ suite "Light client" & preset():
let cpDb = BeaconChainDB.new("", inMemory = true) let cpDb = BeaconChainDB.new("", inMemory = true)
ChainDAGRef.preInit( ChainDAGRef.preInit(
cpDB, genesisState[], cpDB, genesisState[],
dag.headState.data, dag.getForkedBlock(dag.head.bid).get) dag.headState, dag.getForkedBlock(dag.head.bid).get)
let cpDag = ChainDAGRef.init( let cpDag = ChainDAGRef.init(
cfg, cpDb, validatorMonitor, {}, cfg, cpDb, validatorMonitor, {},
serveLightClientData = true, serveLightClientData = true,

View File

@ -53,17 +53,17 @@ suite "Beacon state" & preset():
info: ForkedEpochInfo info: ForkedEpochInfo
check: # Works for genesis block check: # Works for genesis block
state[].phase0Data.latest_block_root() == genBlock.root state[].phase0Data.latest_block_root == genBlock.root
process_slots(cfg, state[], Slot 1, cache, info, {}).isOk() process_slots(cfg, state[], Slot 1, cache, info, {}).isOk()
state[].phase0Data.latest_block_root() == genBlock.root state[].phase0Data.latest_block_root == genBlock.root
let blck = addTestBlock( let blck = addTestBlock(
state[], cache, nextSlot = false, flags = {skipBlsValidation}).phase0Data state[], cache, nextSlot = false, flags = {skipBlsValidation}).phase0Data
check: # Works for random blocks check: # Works for random blocks
state[].phase0Data.latest_block_root() == blck.root state[].phase0Data.latest_block_root == blck.root
process_slots(cfg, state[], Slot 2, cache, info, {}).isOk() process_slots(cfg, state[], Slot 2, cache, info, {}).isOk()
state[].phase0Data.latest_block_root() == blck.root state[].phase0Data.latest_block_root == blck.root
test "get_beacon_proposer_index": test "get_beacon_proposer_index":
var var

View File

@ -27,7 +27,7 @@ suite "state diff tests" & preset():
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
test "random slot differences" & preset(): test "random slot differences" & preset():
let testStates = getTestStates(dag.headState.data, BeaconStateFork.Altair) let testStates = getTestStates(dag.headState, BeaconStateFork.Altair)
for i in 0 ..< testStates.len: for i in 0 ..< testStates.len:
for j in (i+1) ..< testStates.len: for j in (i+1) ..< testStates.len:

View File

@ -402,7 +402,7 @@ iterator makeTestBlocks*(
state = assignClone(state) state = assignClone(state)
for _ in 0..<blocks: for _ in 0..<blocks:
let let
parent_root = withState(state[]): state.latest_block_root() parent_root = withState(state[]): state.latest_block_root
attestations = attestations =
if attested: if attested:
makeFullAttestations( makeFullAttestations(