remove StateData (#3507)
One more step on the journey to reduce `BlockRef` usage across the codebase - this one gets rid of `StateData` whose job was to keep track of which block was last assigned to a state - these duties have now been taken over by `latest_block_root`, a fairly recent addition that computes this block root from state data (at a small cost that should be insignificant) 99% mechanical change.
This commit is contained in:
parent
6d1d31dd01
commit
c64bf045f3
|
@ -67,7 +67,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
+ getBlockRef returns none for missing blocks OK
|
||||
+ loading tail block works [Preset: mainnet] OK
|
||||
+ updateHead updates head and headState [Preset: mainnet] OK
|
||||
+ updateStateData sanity [Preset: mainnet] OK
|
||||
+ updateState sanity [Preset: mainnet] OK
|
||||
```
|
||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||
## Block processor [Preset: mainnet]
|
||||
|
|
|
@ -664,7 +664,7 @@ proc putState*(db: BeaconChainDB, key: Eth2Digest, value: ForkyBeaconState) =
|
|||
|
||||
proc putState*(db: BeaconChainDB, state: ForkyHashedBeaconState) =
|
||||
db.withManyWrites:
|
||||
db.putStateRoot(state.latest_block_root(), state.data.slot, state.root)
|
||||
db.putStateRoot(state.latest_block_root, state.data.slot, state.root)
|
||||
db.putState(state.root, state.data)
|
||||
|
||||
# For testing rollback
|
||||
|
|
|
@ -149,9 +149,8 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef,
|
|||
|
||||
info "Fork choice initialized",
|
||||
justified_epoch = getStateField(
|
||||
dag.headState.data, current_justified_checkpoint).epoch,
|
||||
finalized_epoch = getStateField(
|
||||
dag.headState.data, finalized_checkpoint).epoch,
|
||||
dag.headState, current_justified_checkpoint).epoch,
|
||||
finalized_epoch = getStateField(dag.headState, finalized_checkpoint).epoch,
|
||||
finalized_root = shortLog(dag.finalizedHead.blck.root)
|
||||
|
||||
T(
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
import
|
||||
chronicles,
|
||||
stew/[assign2, results],
|
||||
../spec/[forks, signatures, signatures_batch, state_transition],
|
||||
../spec/[beaconstate, forks, signatures, signatures_batch, state_transition],
|
||||
"."/[block_dag, blockchain_dag, blockchain_dag_light_client]
|
||||
|
||||
export results, signatures_batch, block_dag, blockchain_dag
|
||||
|
@ -27,16 +27,15 @@ logScope:
|
|||
|
||||
proc addResolvedHeadBlock(
|
||||
dag: ChainDAGRef,
|
||||
state: var StateData,
|
||||
state: var ForkedHashedBeaconState,
|
||||
trustedBlock: ForkyTrustedSignedBeaconBlock,
|
||||
parent: BlockRef, cache: var StateCache,
|
||||
onBlockAdded: OnPhase0BlockAdded | OnAltairBlockAdded | OnBellatrixBlockAdded,
|
||||
stateDataDur, sigVerifyDur, stateVerifyDur: Duration
|
||||
): BlockRef =
|
||||
doAssert getStateField(state.data, slot) == trustedBlock.message.slot,
|
||||
"state must match block"
|
||||
doAssert state.blck.root == trustedBlock.message.parent_root,
|
||||
"the StateData passed into the addResolved function not yet updated!"
|
||||
doAssert state.matches_block_slot(
|
||||
trustedBlock.root, trustedBlock.message.slot),
|
||||
"Given state must have the new block applied"
|
||||
|
||||
let
|
||||
blockRoot = trustedBlock.root
|
||||
|
@ -63,17 +62,16 @@ proc addResolvedHeadBlock(
|
|||
|
||||
# Up to here, state.data was referring to the new state after the block had
|
||||
# been applied but the `blck` field was still set to the parent
|
||||
state.blck = blockRef
|
||||
dag.clearanceBlck = blockRef
|
||||
|
||||
# Regardless of the chain we're on, the deposits come in the same order so
|
||||
# as soon as we import a block, we'll also update the shared public key
|
||||
# cache
|
||||
|
||||
dag.updateValidatorKeys(getStateField(state.data, validators).asSeq())
|
||||
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
|
||||
|
||||
# Getting epochRef with the state will potentially create a new EpochRef
|
||||
let
|
||||
epochRef = dag.getEpochRef(state, cache)
|
||||
epochRef = dag.getEpochRef(state, blockRef, cache)
|
||||
epochRefTick = Moment.now()
|
||||
|
||||
debug "Block resolved",
|
||||
|
@ -101,15 +99,12 @@ proc checkStateTransition(
|
|||
cache: var StateCache): Result[void, BlockError] =
|
||||
## Ensure block can be applied on a state
|
||||
func restore(v: var ForkedHashedBeaconState) =
|
||||
# TODO address this ugly workaround - there should probably be a
|
||||
# `state_transition` that takes a `StateData` instead and updates
|
||||
# the block as well
|
||||
doAssert v.addr == addr dag.clearanceState.data
|
||||
assign(dag.clearanceState, dag.headState)
|
||||
|
||||
let res = state_transition_block(
|
||||
dag.cfg, dag.clearanceState.data, signedBlock,
|
||||
dag.cfg, dag.clearanceState, signedBlock,
|
||||
cache, dag.updateFlags, restore)
|
||||
|
||||
if res.isErr():
|
||||
info "Invalid block",
|
||||
blockRoot = shortLog(signedBlock.root),
|
||||
|
@ -127,16 +122,15 @@ proc advanceClearanceState*(dag: ChainDAGRef) =
|
|||
# epoch transition ahead of time.
|
||||
# Notably, we use the clearance state here because that's where the block will
|
||||
# first be seen - later, this state will be copied to the head state!
|
||||
if dag.clearanceState.blck.slot == getStateField(dag.clearanceState.data, slot):
|
||||
let next =
|
||||
dag.clearanceState.blck.atSlot(dag.clearanceState.blck.slot + 1)
|
||||
if dag.clearanceBlck.slot == getStateField(dag.clearanceState, slot):
|
||||
let next = dag.clearanceBlck.atSlot(dag.clearanceBlck.slot + 1)
|
||||
|
||||
let startTick = Moment.now()
|
||||
var cache = StateCache()
|
||||
if not updateStateData(dag, dag.clearanceState, next, true, cache):
|
||||
if not updateState(dag, dag.clearanceState, next, true, cache):
|
||||
# The next head update will likely fail - something is very wrong here
|
||||
error "Cannot advance to next slot, database corrupt?",
|
||||
clearance = shortLog(dag.clearanceState.blck),
|
||||
clearance = shortLog(dag.clearanceBlck),
|
||||
next = shortLog(next)
|
||||
else:
|
||||
debug "Prepared clearance state for next block",
|
||||
|
@ -222,7 +216,7 @@ proc addHeadBlock*(
|
|||
# by the time a new block reaches this point, the parent block will already
|
||||
# have "established" itself in the network to some degree at least.
|
||||
var cache = StateCache()
|
||||
if not updateStateData(
|
||||
if not updateState(
|
||||
dag, dag.clearanceState, parent.atSlot(signedBlock.message.slot), true,
|
||||
cache):
|
||||
# We should never end up here - the parent must be a block no older than and
|
||||
|
@ -230,8 +224,9 @@ proc addHeadBlock*(
|
|||
# load its corresponding state
|
||||
error "Unable to load clearance state for parent block, database corrupt?",
|
||||
parent = shortLog(parent.atSlot(signedBlock.message.slot)),
|
||||
clearance = shortLog(dag.clearanceState.blck)
|
||||
clearanceBlock = shortLog(dag.clearanceBlck)
|
||||
return err(BlockError.MissingParent)
|
||||
dag.clearanceBlck = parent
|
||||
|
||||
let stateDataTick = Moment.now()
|
||||
|
||||
|
@ -241,7 +236,7 @@ proc addHeadBlock*(
|
|||
var sigs: seq[SignatureSet]
|
||||
if (let e = sigs.collectSignatureSets(
|
||||
signedBlock, dag.db.immutableValidators,
|
||||
dag.clearanceState.data, cache); e.isErr()):
|
||||
dag.clearanceState, cache); e.isErr()):
|
||||
# A PublicKey or Signature isn't on the BLS12-381 curve
|
||||
info "Unable to load signature sets",
|
||||
err = e.error()
|
||||
|
@ -354,7 +349,7 @@ proc addBackfillBlock*(
|
|||
|
||||
if not verify_block_signature(
|
||||
dag.forkAtEpoch(blck.slot.epoch),
|
||||
getStateField(dag.headState.data, genesis_validators_root),
|
||||
getStateField(dag.headState, genesis_validators_root),
|
||||
blck.slot,
|
||||
signedBlock.root,
|
||||
proposerKey.get(),
|
||||
|
|
|
@ -140,6 +140,9 @@ type
|
|||
## go - the tail block is unique in that its parent is set to `nil`, even
|
||||
## in the case where an earlier genesis block exists.
|
||||
|
||||
head*: BlockRef
|
||||
## The most recently known head, as chosen by fork choice
|
||||
|
||||
backfill*: BeaconBlockSummary
|
||||
## The backfill points to the oldest block with an unbroken ancestry from
|
||||
## dag.tail - when backfilling, we'll move backwards in time starting
|
||||
|
@ -162,17 +165,19 @@ type
|
|||
# -----------------------------------
|
||||
# Rewinder - Mutable state processing
|
||||
|
||||
headState*: StateData
|
||||
headState*: ForkedHashedBeaconState
|
||||
## State given by the head block - must only be updated in `updateHead` -
|
||||
## always matches dag.head
|
||||
|
||||
epochRefState*: StateData
|
||||
epochRefState*: ForkedHashedBeaconState
|
||||
## State used to produce epochRef instances - must only be used in
|
||||
## `getEpochRef`
|
||||
|
||||
clearanceState*: StateData
|
||||
clearanceState*: ForkedHashedBeaconState
|
||||
## Cached state used during block clearance - must only be used in
|
||||
## clearance module
|
||||
clearanceBlck*: BlockRef
|
||||
## The latest block that was applied to the clearance state
|
||||
|
||||
updateFlags*: UpdateFlags
|
||||
|
||||
|
@ -249,12 +254,6 @@ type
|
|||
# balances, as used in fork choice
|
||||
effective_balances_bytes*: seq[byte]
|
||||
|
||||
StateData* = object
|
||||
data*: ForkedHashedBeaconState
|
||||
|
||||
blck*: BlockRef
|
||||
## The block associated with the state found in data
|
||||
|
||||
# TODO when Nim 1.2 support is dropped, make these generic. 1.2 generates
|
||||
# invalid C code, which gcc refuses to compile. Example test case:
|
||||
# type
|
||||
|
|
|
@ -56,31 +56,32 @@ proc putBlock*(
|
|||
dag: ChainDAGRef, signedBlock: ForkyTrustedSignedBeaconBlock) =
|
||||
dag.db.putBlock(signedBlock)
|
||||
|
||||
proc updateStateData*(
|
||||
dag: ChainDAGRef, state: var StateData, bs: BlockSlot, save: bool,
|
||||
proc updateState*(
|
||||
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bs: BlockSlot, save: bool,
|
||||
cache: var StateCache): bool {.gcsafe.}
|
||||
|
||||
template withStateVars*(
|
||||
stateDataInternal: var StateData, body: untyped): untyped =
|
||||
stateInternal: var ForkedHashedBeaconState, body: untyped): untyped =
|
||||
## Inject a few more descriptive names for the members of `stateData` -
|
||||
## the stateData instance may get mutated through these names as well
|
||||
template stateData(): StateData {.inject, used.} = stateDataInternal
|
||||
template state(): ForkedHashedBeaconState {.inject, used.} = stateInternal
|
||||
template stateRoot(): Eth2Digest {.inject, used.} =
|
||||
getStateRoot(stateDataInternal.data)
|
||||
template blck(): BlockRef {.inject, used.} = stateDataInternal.blck
|
||||
getStateRoot(stateInternal)
|
||||
|
||||
body
|
||||
|
||||
template withUpdatedState*(
|
||||
dag: ChainDAGRef, stateData: var StateData, blockSlot: BlockSlot,
|
||||
okBody: untyped, failureBody: untyped): untyped =
|
||||
dag: ChainDAGRef, state: var ForkedHashedBeaconState,
|
||||
blockSlot: BlockSlot, okBody: untyped, failureBody: untyped): untyped =
|
||||
## Helper template that updates stateData to a particular BlockSlot - usage of
|
||||
## stateData is unsafe outside of block, or across `await` boundaries
|
||||
|
||||
block:
|
||||
var cache {.inject.} = StateCache()
|
||||
if updateStateData(dag, stateData, blockSlot, false, cache):
|
||||
withStateVars(stateData):
|
||||
if updateState(dag, state, blockSlot, false, cache):
|
||||
template blck(): BlockRef {.inject, used.} = blockSlot.blck
|
||||
|
||||
withStateVars(state):
|
||||
okBody
|
||||
else:
|
||||
failureBody
|
||||
|
@ -133,33 +134,31 @@ func validatorKey*(
|
|||
validatorKey(epochRef.dag, index)
|
||||
|
||||
func init*(
|
||||
T: type EpochRef, dag: ChainDAGRef, state: StateData,
|
||||
cache: var StateCache): T =
|
||||
T: type EpochRef, dag: ChainDAGRef, state: ForkedHashedBeaconState,
|
||||
blck: BlockRef, cache: var StateCache): T =
|
||||
let
|
||||
epoch = state.data.get_current_epoch()
|
||||
proposer_dependent_root = withState(state.data):
|
||||
state.proposer_dependent_root
|
||||
attester_dependent_root = withState(state.data):
|
||||
state.attester_dependent_root
|
||||
epoch = state.get_current_epoch()
|
||||
proposer_dependent_root = withState(state): state.proposer_dependent_root
|
||||
attester_dependent_root = withState(state): state.attester_dependent_root
|
||||
epochRef = EpochRef(
|
||||
dag: dag, # This gives access to the validator pubkeys through an EpochRef
|
||||
key: state.blck.epochAncestor(epoch),
|
||||
eth1_data: getStateField(state.data, eth1_data),
|
||||
eth1_deposit_index: getStateField(state.data, eth1_deposit_index),
|
||||
key: blck.epochAncestor(epoch),
|
||||
eth1_data: getStateField(state, eth1_data),
|
||||
eth1_deposit_index: getStateField(state, eth1_deposit_index),
|
||||
current_justified_checkpoint:
|
||||
getStateField(state.data, current_justified_checkpoint),
|
||||
finalized_checkpoint: getStateField(state.data, finalized_checkpoint),
|
||||
getStateField(state, current_justified_checkpoint),
|
||||
finalized_checkpoint: getStateField(state, finalized_checkpoint),
|
||||
proposer_dependent_root: proposer_dependent_root,
|
||||
shuffled_active_validator_indices:
|
||||
cache.get_shuffled_active_validator_indices(state.data, epoch),
|
||||
cache.get_shuffled_active_validator_indices(state, epoch),
|
||||
attester_dependent_root: attester_dependent_root,
|
||||
merge_transition_complete:
|
||||
case state.data.kind:
|
||||
case state.kind:
|
||||
of BeaconStateFork.Phase0: false
|
||||
of BeaconStateFork.Altair: false
|
||||
of BeaconStateFork.Bellatrix:
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/beacon-chain.md#is_merge_transition_complete
|
||||
state.data.bellatrixData.data.latest_execution_payload_header !=
|
||||
state.bellatrixData.data.latest_execution_payload_header !=
|
||||
ExecutionPayloadHeader()
|
||||
)
|
||||
epochStart = epoch.start_slot()
|
||||
|
@ -168,7 +167,7 @@ func init*(
|
|||
|
||||
for i in 0'u64..<SLOTS_PER_EPOCH:
|
||||
epochRef.beacon_proposers[i] = get_beacon_proposer_index(
|
||||
state.data, cache, epochStart + i)
|
||||
state, cache, epochStart + i)
|
||||
|
||||
# When fork choice runs, it will need the effective balance of the justified
|
||||
# checkpoint - we pre-load the balances here to avoid rewinding the justified
|
||||
|
@ -182,9 +181,8 @@ func init*(
|
|||
|
||||
epochRef.effective_balances_bytes =
|
||||
snappyEncode(SSZ.encode(
|
||||
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](get_effective_balances(
|
||||
getStateField(state.data, validators).asSeq,
|
||||
epoch))))
|
||||
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](
|
||||
get_effective_balances(getStateField(state, validators).asSeq, epoch))))
|
||||
|
||||
epochRef
|
||||
|
||||
|
@ -386,9 +384,9 @@ proc getState(
|
|||
|
||||
true
|
||||
|
||||
proc getStateData(
|
||||
db: BeaconChainDB, cfg: RuntimeConfig, state: var StateData, bs: BlockSlot,
|
||||
rollback: RollbackProc): bool =
|
||||
proc getState(
|
||||
db: BeaconChainDB, cfg: RuntimeConfig, state: var ForkedHashedBeaconState,
|
||||
bs: BlockSlot, rollback: RollbackProc): bool =
|
||||
if not bs.isStateCheckpoint():
|
||||
return false
|
||||
|
||||
|
@ -396,11 +394,9 @@ proc getStateData(
|
|||
if not root.isSome():
|
||||
return false
|
||||
|
||||
if not db.getState(cfg, bs.slot, root.get(), state.data, rollback):
|
||||
if not db.getState(cfg, bs.slot, root.get(), state, rollback):
|
||||
return false
|
||||
|
||||
state.blck = bs.blck
|
||||
|
||||
true
|
||||
|
||||
proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest):
|
||||
|
@ -422,7 +418,7 @@ proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest):
|
|||
proc getBlock*(
|
||||
dag: ChainDAGRef, bid: BlockId,
|
||||
T: type ForkyTrustedSignedBeaconBlock): Opt[T] =
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
dag.db.getBlock(bid.root, T)
|
||||
|
||||
proc getBlockSSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool =
|
||||
|
@ -453,14 +449,15 @@ proc getForkedBlock*(
|
|||
# In case we didn't have a summary - should be rare, but ..
|
||||
dag.db.getForkedBlock(root)
|
||||
|
||||
proc updateBeaconMetrics(state: StateData, cache: var StateCache) =
|
||||
proc updateBeaconMetrics(
|
||||
state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) =
|
||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
||||
# both non-negative, so difference can't overflow or underflow int64
|
||||
|
||||
beacon_head_root.set(state.blck.root.toGaugeValue)
|
||||
beacon_head_slot.set(state.blck.slot.toGaugeValue)
|
||||
beacon_head_root.set(bid.root.toGaugeValue)
|
||||
beacon_head_slot.set(bid.slot.toGaugeValue)
|
||||
|
||||
withState(state.data):
|
||||
withState(state):
|
||||
beacon_pending_deposits.set(
|
||||
(state.data.eth1_data.deposit_count -
|
||||
state.data.eth1_deposit_index).toGaugeValue)
|
||||
|
@ -662,6 +659,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
validatorMonitor: validatorMonitor,
|
||||
genesis: genesisRef,
|
||||
tail: tailRef,
|
||||
head: headRef,
|
||||
backfill: backfill,
|
||||
finalizedHead: tailRef.atSlot(),
|
||||
lastPrunePoint: tailRef.atSlot(),
|
||||
|
@ -669,6 +667,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
# head state
|
||||
heads: @[headRef],
|
||||
|
||||
clearanceBlck: headRef,
|
||||
|
||||
# The only allowed flag right now is verifyFinalization, as the others all
|
||||
# allow skipping some validation.
|
||||
updateFlags: {verifyFinalization} * updateFlags,
|
||||
|
@ -690,10 +690,10 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
# Now that we have a head block, we need to find the most recent state that
|
||||
# we have saved in the database
|
||||
while cur.blck != nil and
|
||||
not getStateData(db, cfg, dag.headState, cur, noRollback):
|
||||
not getState(db, cfg, dag.headState, cur, noRollback):
|
||||
cur = cur.parentOrSlot()
|
||||
|
||||
if dag.headState.blck == nil:
|
||||
if cur.blck == nil:
|
||||
fatal "No state found in head history, database corrupt?",
|
||||
genesisRef, tailRef, headRef
|
||||
# TODO Potentially we could recover from here instead of crashing - what
|
||||
|
@ -701,11 +701,11 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
quit 1
|
||||
|
||||
let
|
||||
configFork = case dag.headState.data.kind
|
||||
configFork = case dag.headState.kind
|
||||
of BeaconStateFork.Phase0: genesisFork(cfg)
|
||||
of BeaconStateFork.Altair: altairFork(cfg)
|
||||
of BeaconStateFork.Bellatrix: bellatrixFork(cfg)
|
||||
statefork = getStateField(dag.headState.data, fork)
|
||||
statefork = getStateField(dag.headState, fork)
|
||||
|
||||
if stateFork != configFork:
|
||||
error "State from database does not match network, check --network parameter",
|
||||
|
@ -716,8 +716,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
assign(dag.epochRefState, dag.headState)
|
||||
|
||||
dag.forkDigests = newClone ForkDigests.init(
|
||||
cfg,
|
||||
getStateField(dag.headState.data, genesis_validators_root))
|
||||
cfg, getStateField(dag.headState, genesis_validators_root))
|
||||
|
||||
let forkVersions =
|
||||
[cfg.GENESIS_FORK_VERSION, cfg.ALTAIR_FORK_VERSION,
|
||||
|
@ -732,7 +731,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
# The state we loaded into `headState` is the last state we saved, which may
|
||||
# come from earlier than the head block
|
||||
var cache: StateCache
|
||||
if not dag.updateStateData(dag.headState, headRef.atSlot(), false, cache):
|
||||
if not dag.updateState(dag.headState, headRef.atSlot(), false, cache):
|
||||
fatal "Unable to load head state, database corrupt?",
|
||||
head = shortLog(headRef)
|
||||
|
||||
|
@ -741,17 +740,17 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
# Clearance most likely happens from head - assign it after rewinding head
|
||||
assign(dag.clearanceState, dag.headState)
|
||||
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
dag.validatorMonitor[].registerState(state.data)
|
||||
|
||||
updateBeaconMetrics(dag.headState, cache)
|
||||
updateBeaconMetrics(dag.headState, dag.head.bid, cache)
|
||||
|
||||
# The tail block is "implicitly" finalized as it was given either as a
|
||||
# checkpoint block, or is the genesis, thus we use it as a lower bound when
|
||||
# computing the finalized head
|
||||
let
|
||||
finalized_checkpoint =
|
||||
getStateField(dag.headState.data, finalized_checkpoint)
|
||||
getStateField(dag.headState, finalized_checkpoint)
|
||||
finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), tailRef.slot)
|
||||
|
||||
block: # Set up finalizedHead -> head
|
||||
|
@ -785,10 +784,10 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
|
||||
# Fill validator key cache in case we're loading an old database that doesn't
|
||||
# have a cache
|
||||
dag.updateValidatorKeys(getStateField(dag.headState.data, validators).asSeq())
|
||||
dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq())
|
||||
dag.updateFinalizedBlocks()
|
||||
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
dag.headSyncCommittees = state.data.get_sync_committee_cache(cache)
|
||||
|
||||
|
@ -807,19 +806,19 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|||
dag
|
||||
|
||||
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
|
||||
getStateField(dag.headState.data, genesis_validators_root)
|
||||
getStateField(dag.headState, genesis_validators_root)
|
||||
|
||||
func getEpochRef*(
|
||||
dag: ChainDAGRef, state: StateData, cache: var StateCache): EpochRef =
|
||||
dag: ChainDAGRef, state: ForkedHashedBeaconState, blck: BlockRef,
|
||||
cache: var StateCache): EpochRef =
|
||||
## Get a cached `EpochRef` or construct one based on the given state - always
|
||||
## returns an EpochRef instance
|
||||
let
|
||||
blck = state.blck
|
||||
epoch = state.data.get_current_epoch()
|
||||
epoch = state.get_current_epoch()
|
||||
|
||||
var epochRef = dag.findEpochRef(blck, epoch)
|
||||
if epochRef.isErr:
|
||||
let res = EpochRef.init(dag, state, cache)
|
||||
let res = EpochRef.init(dag, state, blck, cache)
|
||||
|
||||
if epoch >= dag.finalizedHead.slot.epoch():
|
||||
# Only cache epoch information for unfinalized blocks - earlier states
|
||||
|
@ -877,9 +876,9 @@ proc getEpochRef*(
|
|||
if isNil(ancestor.blck): # past the tail
|
||||
return err()
|
||||
|
||||
dag.withUpdatedState(
|
||||
dag.epochRefState, ancestor.blck.atEpochStart(ancestor.epoch)) do:
|
||||
ok(dag.getEpochRef(stateData, cache))
|
||||
let epochBlck = ancestor.blck.atEpochStart(ancestor.epoch)
|
||||
dag.withUpdatedState(dag.epochRefState, epochBlck) do:
|
||||
ok(dag.getEpochRef(state, blck, cache))
|
||||
do:
|
||||
err()
|
||||
|
||||
|
@ -904,7 +903,8 @@ func forkDigestAtEpoch*(dag: ChainDAGRef, epoch: Epoch): ForkDigest =
|
|||
of BeaconStateFork.Altair: dag.forkDigests.altair
|
||||
of BeaconStateFork.Phase0: dag.forkDigests.phase0
|
||||
|
||||
proc getState(dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool =
|
||||
proc getState(
|
||||
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bs: BlockSlot): bool =
|
||||
## Load a state from the database given a block and a slot - this will first
|
||||
## lookup the state root in the state root table then load the corresponding
|
||||
## state, if it exists
|
||||
|
@ -915,32 +915,32 @@ proc getState(dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool =
|
|||
else:
|
||||
unsafeAddr dag.headState
|
||||
|
||||
let v = addr state.data
|
||||
let v = addr state
|
||||
func restore() =
|
||||
assign(v[], restoreAddr[].data)
|
||||
assign(v[], restoreAddr[])
|
||||
|
||||
getStateData(dag.db, dag.cfg, state, bs, restore)
|
||||
getState(dag.db, dag.cfg, state, bs, restore)
|
||||
|
||||
proc putState(dag: ChainDAGRef, state: StateData) =
|
||||
proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, blck: BlockRef) =
|
||||
# Store a state and its root
|
||||
logScope:
|
||||
blck = shortLog(state.blck)
|
||||
stateSlot = shortLog(getStateField(state.data, slot))
|
||||
stateRoot = shortLog(getStateRoot(state.data))
|
||||
blck = shortLog(blck)
|
||||
stateSlot = shortLog(getStateField(state, slot))
|
||||
stateRoot = shortLog(getStateRoot(state))
|
||||
|
||||
if not isStateCheckpoint(state.blck.atSlot(getStateField(state.data, slot))):
|
||||
if not isStateCheckpoint(blck.atSlot(getStateField(state, slot))):
|
||||
return
|
||||
|
||||
# Don't consider legacy tables here, they are slow to read so we'll want to
|
||||
# rewrite things in the new table anyway.
|
||||
if dag.db.containsState(getStateRoot(state.data), legacy = false):
|
||||
if dag.db.containsState(getStateRoot(state), legacy = false):
|
||||
return
|
||||
|
||||
let startTick = Moment.now()
|
||||
# Ideally we would save the state and the root lookup cache in a single
|
||||
# transaction to prevent database inconsistencies, but the state loading code
|
||||
# is resilient against one or the other going missing
|
||||
withState(state.data):
|
||||
withState(state):
|
||||
dag.db.putState(state)
|
||||
|
||||
debug "Stored state", putStateDur = Moment.now() - startTick
|
||||
|
@ -1005,29 +1005,29 @@ proc getBlockRange*(
|
|||
o # Return the index of the first non-nil item in the output
|
||||
|
||||
proc advanceSlots(
|
||||
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
|
||||
cache: var StateCache, info: var ForkedEpochInfo) =
|
||||
dag: ChainDAGRef, state: var ForkedHashedBeaconState, blck: BlockRef,
|
||||
slot: Slot, save: bool, cache: var StateCache, info: var ForkedEpochInfo) =
|
||||
# Given a state, advance it zero or more slots by applying empty slot
|
||||
# processing - the state must be positions at a slot before or equal to the
|
||||
# target
|
||||
doAssert getStateField(state.data, slot) <= slot
|
||||
doAssert getStateField(state, slot) <= slot
|
||||
|
||||
while getStateField(state.data, slot) < slot:
|
||||
let preEpoch = getStateField(state.data, slot).epoch
|
||||
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
||||
while getStateField(state, slot) < slot:
|
||||
let preEpoch = getStateField(state, slot).epoch
|
||||
loadStateCache(dag, cache, blck, getStateField(state, slot).epoch)
|
||||
|
||||
process_slots(
|
||||
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info,
|
||||
dag.cfg, state, getStateField(state, slot) + 1, cache, info,
|
||||
dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct")
|
||||
if save:
|
||||
dag.putState(state)
|
||||
dag.putState(state, blck)
|
||||
|
||||
# The reward information in the state transition is computed for epoch
|
||||
# transitions - when transitioning into epoch N, the activities in epoch
|
||||
# N-2 are translated into balance updates, and this is what we capture
|
||||
# in the monitor. This may be inaccurate during a deep reorg (>1 epoch)
|
||||
# which is an acceptable tradeoff for monitoring.
|
||||
withState(state.data):
|
||||
withState(state):
|
||||
let postEpoch = state.data.slot.epoch
|
||||
if preEpoch != postEpoch:
|
||||
dag.validatorMonitor[].registerEpochInfo(postEpoch, info, state.data)
|
||||
|
@ -1057,27 +1057,25 @@ proc applyBlock(
|
|||
|
||||
proc applyBlock(
|
||||
dag: ChainDAGRef,
|
||||
state: var StateData, blck: BlockRef,
|
||||
state: var ForkedHashedBeaconState, blck: BlockRef,
|
||||
cache: var StateCache, info: var ForkedEpochInfo) =
|
||||
# Apply a single block to the state - the state must be positioned at the
|
||||
# parent of the block with a slot lower than the one of the block being
|
||||
# applied
|
||||
doAssert state.blck == blck.parent
|
||||
doAssert state.matches_block(blck.parent.root)
|
||||
|
||||
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
||||
loadStateCache(dag, cache, blck.parent, getStateField(state, slot).epoch)
|
||||
|
||||
dag.applyBlock(state.data, blck.bid, cache, info).expect(
|
||||
dag.applyBlock(state, blck.bid, cache, info).expect(
|
||||
"Blocks from database must not fail to apply")
|
||||
|
||||
state.blck = blck
|
||||
|
||||
proc updateStateData*(
|
||||
dag: ChainDAGRef, state: var StateData, bs: BlockSlot, save: bool,
|
||||
cache: var StateCache): bool =
|
||||
proc updateState*(
|
||||
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bs: BlockSlot,
|
||||
save: bool, cache: var StateCache): bool =
|
||||
## Rewind or advance state such that it matches the given block and slot -
|
||||
## this may include replaying from an earlier snapshot if blck is on a
|
||||
## different branch or has advanced to a higher slot number than slot
|
||||
## If `bs.slot` is higher than `bs.blck.slot`, `updateStateData` will fill in
|
||||
## If `bs.slot` is higher than `bs.blck.slot`, `updateState` will fill in
|
||||
## with empty/non-block slots
|
||||
|
||||
# First, see if we're already at the requested block. If we are, also check
|
||||
|
@ -1093,21 +1091,26 @@ proc updateStateData*(
|
|||
|
||||
let
|
||||
startTick = Moment.now()
|
||||
current {.used.} = state.blck.atSlot(getStateField(state.data, slot))
|
||||
current {.used.} = withState(state):
|
||||
BlockSlotId.init(
|
||||
BlockId(
|
||||
root: state.latest_block_root,
|
||||
slot: state.data.latest_block_header.slot),
|
||||
state.data.slot)
|
||||
|
||||
var
|
||||
ancestors: seq[BlockRef]
|
||||
found = false
|
||||
|
||||
template exactMatch(state: StateData, bs: BlockSlot): bool =
|
||||
template exactMatch(state: ForkedHashedBeaconState, bs: BlockSlot): bool =
|
||||
# The block is the same and we're at an early enough slot - the state can
|
||||
# be used to arrive at the desired blockslot
|
||||
state.blck == bs.blck and getStateField(state.data, slot) == bs.slot
|
||||
state.matches_block_slot(bs.blck.root, bs.slot)
|
||||
|
||||
template canAdvance(state: StateData, bs: BlockSlot): bool =
|
||||
template canAdvance(state: ForkedHashedBeaconState, bs: BlockSlot): bool =
|
||||
# The block is the same and we're at an early enough slot - the state can
|
||||
# be used to arrive at the desired blockslot
|
||||
state.blck == bs.blck and getStateField(state.data, slot) <= bs.slot
|
||||
state.can_advance_slots(bs.blck.root, bs.slot)
|
||||
|
||||
# Fast path: check all caches for an exact match - this is faster than
|
||||
# advancing a state where there's epoch processing to do, by a wide margin -
|
||||
|
@ -1212,8 +1215,13 @@ proc updateStateData*(
|
|||
# Starting state has been assigned, either from memory or database
|
||||
let
|
||||
assignTick = Moment.now()
|
||||
ancestor {.used.} = state.blck.atSlot(getStateField(state.data, slot))
|
||||
ancestorRoot {.used.} = getStateRoot(state.data)
|
||||
ancestor {.used.} = withState(state):
|
||||
BlockSlotId.init(
|
||||
BlockId(
|
||||
root: state.latest_block_root,
|
||||
slot: state.data.latest_block_header.slot),
|
||||
state.data.slot)
|
||||
ancestorRoot {.used.} = getStateRoot(state)
|
||||
|
||||
var info: ForkedEpochInfo
|
||||
# Time to replay all the blocks between then and now
|
||||
|
@ -1225,10 +1233,10 @@ proc updateStateData*(
|
|||
dag.applyBlock(state, ancestors[i], cache, info)
|
||||
|
||||
# ...and make sure to process empty slots as requested
|
||||
dag.advanceSlots(state, bs.slot, save, cache, info)
|
||||
dag.advanceSlots(state, bs.blck, bs.slot, save, cache, info)
|
||||
|
||||
# ...and make sure to load the state cache, if it exists
|
||||
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
||||
loadStateCache(dag, cache, bs.blck, getStateField(state, slot).epoch)
|
||||
|
||||
let
|
||||
assignDur = assignTick - startTick
|
||||
|
@ -1241,36 +1249,36 @@ proc updateStateData*(
|
|||
# time might need tuning
|
||||
info "State replayed",
|
||||
blocks = ancestors.len,
|
||||
slots = getStateField(state.data, slot) - ancestor.slot,
|
||||
slots = getStateField(state, slot) - ancestor.slot,
|
||||
current = shortLog(current),
|
||||
ancestor = shortLog(ancestor),
|
||||
target = shortLog(bs),
|
||||
ancestorStateRoot = shortLog(ancestorRoot),
|
||||
targetStateRoot = shortLog(getStateRoot(state.data)),
|
||||
targetStateRoot = shortLog(getStateRoot(state)),
|
||||
found,
|
||||
assignDur,
|
||||
replayDur
|
||||
elif ancestors.len > 0:
|
||||
debug "State replayed",
|
||||
blocks = ancestors.len,
|
||||
slots = getStateField(state.data, slot) - ancestor.slot,
|
||||
slots = getStateField(state, slot) - ancestor.slot,
|
||||
current = shortLog(current),
|
||||
ancestor = shortLog(ancestor),
|
||||
target = shortLog(bs),
|
||||
ancestorStateRoot = shortLog(ancestorRoot),
|
||||
targetStateRoot = shortLog(getStateRoot(state.data)),
|
||||
targetStateRoot = shortLog(getStateRoot(state)),
|
||||
found,
|
||||
assignDur,
|
||||
replayDur
|
||||
else: # Normal case!
|
||||
trace "State advanced",
|
||||
blocks = ancestors.len,
|
||||
slots = getStateField(state.data, slot) - ancestor.slot,
|
||||
slots = getStateField(state, slot) - ancestor.slot,
|
||||
current = shortLog(current),
|
||||
ancestor = shortLog(ancestor),
|
||||
target = shortLog(bs),
|
||||
ancestorStateRoot = shortLog(ancestorRoot),
|
||||
targetStateRoot = shortLog(getStateRoot(state.data)),
|
||||
targetStateRoot = shortLog(getStateRoot(state)),
|
||||
found,
|
||||
assignDur,
|
||||
replayDur
|
||||
|
@ -1354,7 +1362,7 @@ iterator syncSubcommitteePairs*(
|
|||
|
||||
func syncCommitteeParticipants*(dag: ChainDAGRef,
|
||||
slot: Slot): seq[ValidatorIndex] =
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
let
|
||||
period = sync_committee_period(slot)
|
||||
|
@ -1384,7 +1392,7 @@ func getSubcommitteePositions*(
|
|||
slot: Slot,
|
||||
subcommitteeIdx: SyncSubcommitteeIndex,
|
||||
validatorIdx: uint64): seq[uint64] =
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
let
|
||||
period = sync_committee_period(slot)
|
||||
|
@ -1481,12 +1489,12 @@ proc updateHead*(
|
|||
|
||||
let
|
||||
lastHead = dag.head
|
||||
lastHeadStateRoot = getStateRoot(dag.headState.data)
|
||||
lastHeadStateRoot = getStateRoot(dag.headState)
|
||||
|
||||
# Start off by making sure we have the right state - updateStateData will try
|
||||
# to use existing in-memory states to make this smooth
|
||||
var cache: StateCache
|
||||
if not updateStateData(
|
||||
if not updateState(
|
||||
dag, dag.headState, newHead.atSlot(), false, cache):
|
||||
# Advancing the head state should never fail, given that the tail is
|
||||
# implicitly finalised, the head is an ancestor of the tail and we always
|
||||
|
@ -1495,18 +1503,19 @@ proc updateHead*(
|
|||
fatal "Unable to load head state during head update, database corrupt?",
|
||||
lastHead = shortLog(lastHead)
|
||||
quit 1
|
||||
dag.head = newHead
|
||||
|
||||
dag.db.putHeadBlock(newHead.root)
|
||||
|
||||
updateBeaconMetrics(dag.headState, cache)
|
||||
updateBeaconMetrics(dag.headState, dag.head.bid, cache)
|
||||
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
dag.headSyncCommittees = state.data.get_sync_committee_cache(cache)
|
||||
|
||||
let
|
||||
finalized_checkpoint =
|
||||
getStateField(dag.headState.data, finalized_checkpoint)
|
||||
getStateField(dag.headState, finalized_checkpoint)
|
||||
finalizedSlot = max(finalized_checkpoint.epoch.start_slot(), dag.tail.slot)
|
||||
finalizedHead = newHead.atSlot(finalizedSlot)
|
||||
|
||||
|
@ -1521,19 +1530,18 @@ proc updateHead*(
|
|||
notice "Updated head block with chain reorg",
|
||||
lastHead = shortLog(lastHead),
|
||||
headParent = shortLog(newHead.parent),
|
||||
stateRoot = shortLog(getStateRoot(dag.headState.data)),
|
||||
headBlock = shortLog(dag.headState.blck),
|
||||
stateSlot = shortLog(getStateField(dag.headState.data, slot)),
|
||||
stateRoot = shortLog(getStateRoot(dag.headState)),
|
||||
headBlock = shortLog(dag.head),
|
||||
stateSlot = shortLog(getStateField(dag.headState, slot)),
|
||||
justified = shortLog(getStateField(
|
||||
dag.headState.data, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(
|
||||
dag.headState.data, finalized_checkpoint))
|
||||
dag.headState, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
|
||||
|
||||
if not(isNil(dag.onReorgHappened)):
|
||||
let data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth),
|
||||
lastHead.root, newHead.root,
|
||||
lastHeadStateRoot,
|
||||
getStateRoot(dag.headState.data))
|
||||
getStateRoot(dag.headState))
|
||||
dag.onReorgHappened(data)
|
||||
|
||||
# A reasonable criterion for "reorganizations of the chain"
|
||||
|
@ -1543,27 +1551,25 @@ proc updateHead*(
|
|||
beacon_reorgs_total.inc()
|
||||
else:
|
||||
debug "Updated head block",
|
||||
head = shortLog(dag.headState.blck),
|
||||
stateRoot = shortLog(getStateRoot(dag.headState.data)),
|
||||
head = shortLog(dag.head),
|
||||
stateRoot = shortLog(getStateRoot(dag.headState)),
|
||||
justified = shortLog(getStateField(
|
||||
dag.headState.data, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(
|
||||
dag.headState.data, finalized_checkpoint))
|
||||
dag.headState, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
|
||||
|
||||
if not(isNil(dag.onHeadChanged)):
|
||||
let
|
||||
currentEpoch = epoch(newHead.slot)
|
||||
depRoot = withState(dag.headState.data): state.proposer_dependent_root
|
||||
prevDepRoot =
|
||||
withState(dag.headState.data): state.attester_dependent_root
|
||||
depRoot = withState(dag.headState): state.proposer_dependent_root
|
||||
prevDepRoot = withState(dag.headState): state.attester_dependent_root
|
||||
epochTransition = (finalizedHead != dag.finalizedHead)
|
||||
let data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root,
|
||||
getStateRoot(dag.headState.data),
|
||||
getStateRoot(dag.headState),
|
||||
epochTransition, depRoot,
|
||||
prevDepRoot)
|
||||
dag.onHeadChanged(data)
|
||||
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
# Every time the head changes, the "canonical" view of balances and other
|
||||
# state-related metrics change - notify the validator monitor.
|
||||
# Doing this update during head update ensures there's a reasonable number
|
||||
|
@ -1572,12 +1578,11 @@ proc updateHead*(
|
|||
|
||||
if finalizedHead != dag.finalizedHead:
|
||||
debug "Reached new finalization checkpoint",
|
||||
head = shortLog(dag.headState.blck),
|
||||
stateRoot = shortLog(getStateRoot(dag.headState.data)),
|
||||
head = shortLog(dag.head),
|
||||
stateRoot = shortLog(getStateRoot(dag.headState)),
|
||||
justified = shortLog(getStateField(
|
||||
dag.headState.data, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(
|
||||
dag.headState.data, finalized_checkpoint))
|
||||
dag.headState, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
|
||||
|
||||
block:
|
||||
# Update `dag.finalizedBlocks` with all newly finalized blocks (those
|
||||
|
@ -1609,10 +1614,9 @@ proc updateHead*(
|
|||
# Send notification about new finalization point via callback.
|
||||
if not(isNil(dag.onFinHappened)):
|
||||
let stateRoot =
|
||||
if dag.finalizedHead.slot == dag.head.slot:
|
||||
getStateRoot(dag.headState.data)
|
||||
if dag.finalizedHead.slot == dag.head.slot: getStateRoot(dag.headState)
|
||||
elif dag.finalizedHead.slot + SLOTS_PER_HISTORICAL_ROOT > dag.head.slot:
|
||||
getStateField(dag.headState.data, state_roots).data[
|
||||
getStateField(dag.headState, state_roots).data[
|
||||
int(dag.finalizedHead.slot mod SLOTS_PER_HISTORICAL_ROOT)]
|
||||
else:
|
||||
Eth2Digest() # The thing that finalized was >8192 blocks old?
|
||||
|
|
|
@ -42,7 +42,7 @@ func computeEarliestLightClientSlot*(dag: ChainDAGRef): Slot =
|
|||
minSupportedSlot = max(
|
||||
dag.cfg.ALTAIR_FORK_EPOCH.start_slot,
|
||||
dag.lightClientCache.importTailSlot)
|
||||
currentSlot = getStateField(dag.headState.data, slot)
|
||||
currentSlot = getStateField(dag.headState, slot)
|
||||
if currentSlot < minSupportedSlot:
|
||||
return minSupportedSlot
|
||||
|
||||
|
@ -61,7 +61,7 @@ func computeEarliestLightClientSlot*(dag: ChainDAGRef): Slot =
|
|||
|
||||
proc currentSyncCommitteeForPeriod(
|
||||
dag: ChainDAGRef,
|
||||
tmpState: var StateData,
|
||||
tmpState: var ForkedHashedBeaconState,
|
||||
period: SyncCommitteePeriod): SyncCommittee =
|
||||
## Fetch a `SyncCommittee` for a given sync committee period.
|
||||
## For non-finalized periods, follow the chain as selected by fork choice.
|
||||
|
@ -74,7 +74,7 @@ proc currentSyncCommitteeForPeriod(
|
|||
# data for the period
|
||||
bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO")
|
||||
dag.withUpdatedState(tmpState, bs) do:
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.current_sync_committee
|
||||
else: raiseAssert "Unreachable"
|
||||
|
@ -90,7 +90,7 @@ template syncCommitteeRoot(
|
|||
|
||||
proc syncCommitteeRootForPeriod(
|
||||
dag: ChainDAGRef,
|
||||
tmpState: var StateData,
|
||||
tmpState: var ForkedHashedBeaconState,
|
||||
period: SyncCommitteePeriod): Eth2Digest =
|
||||
## Compute a root to uniquely identify `current_sync_committee` and
|
||||
## `next_sync_committee` for a given sync committee period.
|
||||
|
@ -102,7 +102,7 @@ proc syncCommitteeRootForPeriod(
|
|||
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
|
||||
bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO")
|
||||
dag.withUpdatedState(tmpState, bs) do:
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.syncCommitteeRoot
|
||||
else: raiseAssert "Unreachable"
|
||||
|
@ -391,7 +391,7 @@ proc createLightClientUpdates(
|
|||
|
||||
proc processNewBlockForLightClient*(
|
||||
dag: ChainDAGRef,
|
||||
state: StateData,
|
||||
state: ForkedHashedBeaconState,
|
||||
signedBlock: ForkyTrustedSignedBeaconBlock,
|
||||
parent: BlockRef) =
|
||||
## Update light client data with information from a new block.
|
||||
|
@ -401,11 +401,11 @@ proc processNewBlockForLightClient*(
|
|||
return
|
||||
|
||||
when signedBlock is bellatrix.TrustedSignedBeaconBlock:
|
||||
dag.cacheLightClientData(state.data.bellatrixData, signedBlock)
|
||||
dag.createLightClientUpdates(state.data.bellatrixData, signedBlock, parent)
|
||||
dag.cacheLightClientData(state.bellatrixData, signedBlock)
|
||||
dag.createLightClientUpdates(state.bellatrixData, signedBlock, parent)
|
||||
elif signedBlock is altair.TrustedSignedBeaconBlock:
|
||||
dag.cacheLightClientData(state.data.altairData, signedBlock)
|
||||
dag.createLightClientUpdates(state.data.altairData, signedBlock, parent)
|
||||
dag.cacheLightClientData(state.altairData, signedBlock)
|
||||
dag.createLightClientUpdates(state.altairData, signedBlock, parent)
|
||||
elif signedBlock is phase0.TrustedSignedBeaconBlock:
|
||||
discard
|
||||
else:
|
||||
|
@ -428,7 +428,7 @@ proc processHeadChangeForLightClient*(dag: ChainDAGRef) =
|
|||
let key = (period, dag.syncCommitteeRootForPeriod(tmpState[], period))
|
||||
dag.lightClientCache.bestUpdates[period] =
|
||||
dag.lightClientCache.pendingBestUpdates.getOrDefault(key)
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
let key = (headPeriod, state.syncCommitteeRoot)
|
||||
dag.lightClientCache.bestUpdates[headPeriod] =
|
||||
|
@ -586,7 +586,7 @@ proc initBestLightClientUpdateForPeriod(
|
|||
let
|
||||
finalizedEpoch = block:
|
||||
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.finalized_checkpoint.epoch
|
||||
else: raiseAssert "Unreachable"
|
||||
|
@ -607,7 +607,7 @@ proc initBestLightClientUpdateForPeriod(
|
|||
# Fill data from attested block
|
||||
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
withStateAndBlck(stateData.data, bdata):
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.attested_header =
|
||||
blck.toBeaconBlockHeader
|
||||
|
@ -629,7 +629,7 @@ proc initBestLightClientUpdateForPeriod(
|
|||
# Fill data from finalized block
|
||||
dag.withUpdatedState(tmpState[], finalizedBlck.atSlot) do:
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
withStateAndBlck(stateData.data, bdata):
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.next_sync_committee =
|
||||
state.data.next_sync_committee
|
||||
|
@ -643,7 +643,7 @@ proc initBestLightClientUpdateForPeriod(
|
|||
# Fill data from attested block
|
||||
dag.withUpdatedState(tmpState[], bestNonFinalizedRef.parent.atSlot) do:
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
withStateAndBlck(stateData.data, bdata):
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.attested_header =
|
||||
blck.toBeaconBlockHeader
|
||||
|
@ -705,10 +705,10 @@ proc initLightClientBootstrapForPeriod(
|
|||
blck.slot >= lowSlot and blck.slot <= highSlot and
|
||||
not dag.lightClientCache.bootstrap.hasKey(blck.slot):
|
||||
var cachedBootstrap {.noinit.}: CachedLightClientBootstrap
|
||||
doAssert dag.updateStateData(
|
||||
doAssert dag.updateState(
|
||||
tmpState[], blck.atSlot, save = false, tmpCache)
|
||||
withStateVars(tmpState[]):
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.build_proof(
|
||||
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
|
@ -756,11 +756,11 @@ proc initLightClientCache*(dag: ChainDAGRef) =
|
|||
cpIndex = 0
|
||||
for i in countdown(blocksBetween.high, blocksBetween.low):
|
||||
blockRef = blocksBetween[i]
|
||||
doAssert dag.updateStateData(
|
||||
dag.headState, blockRef.atSlot(blockRef.slot), save = false, cache)
|
||||
doAssert dag.updateState(
|
||||
dag.headState, blockRef.atSlot(), save = false, cache)
|
||||
withStateVars(dag.headState):
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
withStateAndBlck(stateData.data, bdata):
|
||||
let bdata = dag.getForkedBlock(blockRef.bid).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
# Cache data for `LightClientUpdate` of descendant blocks
|
||||
dag.cacheLightClientData(state, blck, isNew = false)
|
||||
|
@ -791,11 +791,11 @@ proc initLightClientCache*(dag: ChainDAGRef) =
|
|||
dag.getBlockAtSlot(checkpoint.epoch.start_slot).expect("TODO").blck
|
||||
if cpRef != nil and cpRef.slot >= earliestSlot:
|
||||
assert cpRef.bid.root == checkpoint.root
|
||||
doAssert dag.updateStateData(
|
||||
doAssert dag.updateState(
|
||||
tmpState[], cpRef.atSlot, save = false, tmpCache)
|
||||
withStateVars(tmpState[]):
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
withStateAndBlck(stateData.data, bdata):
|
||||
let bdata = dag.getForkedBlock(cpRef.bid).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
dag.cacheLightClientData(state, blck, isNew = false)
|
||||
else: raiseAssert "Unreachable"
|
||||
|
@ -880,7 +880,7 @@ proc getLightClientBootstrap*(
|
|||
if dag.importLightClientData == ImportLightClientData.OnDemand:
|
||||
var tmpState = assignClone(dag.headState)
|
||||
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(slot).expect("TODO")) do:
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.build_proof(
|
||||
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
|
|
|
@ -191,7 +191,7 @@ proc storeBlock*(
|
|||
vm[].registerAttestationInBlock(attestation.data, validator_index,
|
||||
trustedBlock.message)
|
||||
|
||||
withState(dag[].clearanceState.data):
|
||||
withState(dag[].clearanceState):
|
||||
when stateFork >= BeaconStateFork.Altair and
|
||||
Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+
|
||||
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():
|
||||
|
|
|
@ -233,7 +233,7 @@ template validateBeaconBlockBellatrix(
|
|||
# to the slot -- i.e. execution_payload.timestamp ==
|
||||
# compute_timestamp_at_slot(state, block.slot).
|
||||
let timestampAtSlot =
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
compute_timestamp_at_slot(state.data, signed_beacon_block.message.slot)
|
||||
if not (signed_beacon_block.message.body.execution_payload.timestamp ==
|
||||
timestampAtSlot):
|
||||
|
@ -340,8 +340,7 @@ proc validateBeaconBlock*(
|
|||
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
|
||||
# store.finalized_checkpoint.root
|
||||
let
|
||||
finalized_checkpoint = getStateField(
|
||||
dag.headState.data, finalized_checkpoint)
|
||||
finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
|
||||
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
|
||||
|
||||
if ancestor.isNil:
|
||||
|
@ -378,7 +377,7 @@ proc validateBeaconBlock*(
|
|||
# with respect to the proposer_index pubkey.
|
||||
if not verify_block_signature(
|
||||
dag.forkAtEpoch(signed_beacon_block.message.slot.epoch),
|
||||
getStateField(dag.headState.data, genesis_validators_root),
|
||||
getStateField(dag.headState, genesis_validators_root),
|
||||
signed_beacon_block.message.slot,
|
||||
signed_beacon_block.root,
|
||||
dag.validatorKey(proposer.get()).get(),
|
||||
|
@ -497,7 +496,7 @@ proc validateAttestation*(
|
|||
let
|
||||
fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(pool.dag.headState.data, genesis_validators_root)
|
||||
getStateField(pool.dag.headState, genesis_validators_root)
|
||||
attesting_index = get_attesting_indices_one(
|
||||
epochRef, slot, committee_index, attestation.aggregation_bits)
|
||||
|
||||
|
@ -691,7 +690,7 @@ proc validateAggregate*(
|
|||
let
|
||||
fork = pool.dag.forkAtEpoch(aggregate.data.slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(pool.dag.headState.data, genesis_validators_root)
|
||||
getStateField(pool.dag.headState, genesis_validators_root)
|
||||
attesting_indices = get_attesting_indices(
|
||||
epochRef, slot, committee_index, aggregate.aggregation_bits)
|
||||
|
||||
|
@ -777,7 +776,7 @@ proc validateAttesterSlashing*(
|
|||
# [REJECT] All of the conditions within process_attester_slashing pass
|
||||
# validation.
|
||||
let attester_slashing_validity =
|
||||
check_attester_slashing(pool.dag.headState.data, attester_slashing, {})
|
||||
check_attester_slashing(pool.dag.headState, attester_slashing, {})
|
||||
if attester_slashing_validity.isErr:
|
||||
return err((ValidationResult.Reject, attester_slashing_validity.error))
|
||||
|
||||
|
@ -800,7 +799,7 @@ proc validateProposerSlashing*(
|
|||
|
||||
# [REJECT] All of the conditions within process_proposer_slashing pass validation.
|
||||
let proposer_slashing_validity =
|
||||
check_proposer_slashing(pool.dag.headState.data, proposer_slashing, {})
|
||||
check_proposer_slashing(pool.dag.headState, proposer_slashing, {})
|
||||
if proposer_slashing_validity.isErr:
|
||||
return err((ValidationResult.Reject, proposer_slashing_validity.error))
|
||||
|
||||
|
@ -813,7 +812,7 @@ proc validateVoluntaryExit*(
|
|||
# [IGNORE] The voluntary exit is the first valid voluntary exit received for
|
||||
# the validator with index signed_voluntary_exit.message.validator_index.
|
||||
if signed_voluntary_exit.message.validator_index >=
|
||||
getStateField(pool.dag.headState.data, validators).lenu64:
|
||||
getStateField(pool.dag.headState, validators).lenu64:
|
||||
return errIgnore("VoluntaryExit: validator index too high")
|
||||
|
||||
# Given that getStateField(pool.dag.headState, validators) is a seq,
|
||||
|
@ -826,7 +825,7 @@ proc validateVoluntaryExit*(
|
|||
# validation.
|
||||
let voluntary_exit_validity =
|
||||
check_voluntary_exit(
|
||||
pool.dag.cfg, pool.dag.headState.data, signed_voluntary_exit, {})
|
||||
pool.dag.cfg, pool.dag.headState, signed_voluntary_exit, {})
|
||||
if voluntary_exit_validity.isErr:
|
||||
return err((ValidationResult.Reject, voluntary_exit_validity.error))
|
||||
|
||||
|
|
|
@ -395,7 +395,7 @@ proc init*(T: type BeaconNode,
|
|||
importLightClientData = config.importLightClientData)
|
||||
quarantine = newClone(Quarantine.init())
|
||||
databaseGenesisValidatorsRoot =
|
||||
getStateField(dag.headState.data, genesis_validators_root)
|
||||
getStateField(dag.headState, genesis_validators_root)
|
||||
|
||||
if genesisStateContents.len != 0:
|
||||
let
|
||||
|
@ -408,8 +408,7 @@ proc init*(T: type BeaconNode,
|
|||
dataDir = config.dataDir
|
||||
quit 1
|
||||
|
||||
let beaconClock = BeaconClock.init(
|
||||
getStateField(dag.headState.data, genesis_time))
|
||||
let beaconClock = BeaconClock.init(getStateField(dag.headState, genesis_time))
|
||||
|
||||
if config.weakSubjectivityCheckpoint.isSome:
|
||||
let
|
||||
|
@ -417,14 +416,14 @@ proc init*(T: type BeaconNode,
|
|||
isCheckpointStale = not is_within_weak_subjectivity_period(
|
||||
cfg,
|
||||
currentSlot,
|
||||
dag.headState.data,
|
||||
dag.headState,
|
||||
config.weakSubjectivityCheckpoint.get)
|
||||
|
||||
if isCheckpointStale:
|
||||
error "Weak subjectivity checkpoint is stale",
|
||||
currentSlot,
|
||||
checkpoint = config.weakSubjectivityCheckpoint.get,
|
||||
headStateSlot = getStateField(dag.headState.data, slot)
|
||||
headStateSlot = getStateField(dag.headState, slot)
|
||||
quit 1
|
||||
|
||||
if eth1Monitor.isNil and config.web3Urls.len > 0:
|
||||
|
@ -498,7 +497,7 @@ proc init*(T: type BeaconNode,
|
|||
getBeaconTime = beaconClock.getBeaconTimeFn()
|
||||
network = createEth2Node(
|
||||
rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime,
|
||||
getStateField(dag.headState.data, genesis_validators_root))
|
||||
getStateField(dag.headState, genesis_validators_root))
|
||||
attestationPool = newClone(
|
||||
AttestationPool.init(
|
||||
dag, quarantine, onAttestationReceived, config.proposerBoosting))
|
||||
|
@ -534,7 +533,7 @@ proc init*(T: type BeaconNode,
|
|||
let
|
||||
slashingProtectionDB =
|
||||
SlashingProtectionDB.init(
|
||||
getStateField(dag.headState.data, genesis_validators_root),
|
||||
getStateField(dag.headState, genesis_validators_root),
|
||||
config.validatorsDir(), SlashingDbName)
|
||||
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
|
||||
|
||||
|
@ -795,7 +794,7 @@ proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Sl
|
|||
# replaced as usual by trackSyncCommitteeTopics, which runs at slot end.
|
||||
let
|
||||
syncCommittee =
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.current_sync_committee
|
||||
else:
|
||||
|
@ -839,7 +838,7 @@ proc trackCurrentSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
|
|||
# for epoch alignment.
|
||||
let
|
||||
syncCommittee =
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.current_sync_committee
|
||||
else:
|
||||
|
@ -895,7 +894,7 @@ proc trackNextSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
|
|||
|
||||
let
|
||||
syncCommittee =
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.next_sync_committee
|
||||
else:
|
||||
|
@ -989,7 +988,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
|||
# it might also happen on a sufficiently fast restart
|
||||
|
||||
# We "know" the actions for the current and the next epoch
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
if node.actionTracker.needsUpdate(state, slot.epoch):
|
||||
let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect(
|
||||
"Getting head EpochRef should never fail")
|
||||
|
@ -1069,7 +1068,7 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
|||
# Update upcoming actions - we do this every slot in case a reorg happens
|
||||
let head = node.dag.head
|
||||
if node.isSynced(head):
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
if node.actionTracker.needsUpdate(state, slot.epoch + 1):
|
||||
let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect(
|
||||
"Getting head EpochRef should never fail")
|
||||
|
@ -1158,7 +1157,7 @@ proc onSlotStart(
|
|||
peers = len(node.network.peerPool),
|
||||
head = shortLog(node.dag.head),
|
||||
finalized = shortLog(getStateField(
|
||||
node.dag.headState.data, finalized_checkpoint)),
|
||||
node.dag.headState, finalized_checkpoint)),
|
||||
delay = shortLog(delay)
|
||||
|
||||
# Check before any re-scheduling of onSlotStart()
|
||||
|
@ -1466,9 +1465,9 @@ proc start*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
|||
node.beaconClock.now() - finalizedHead.slot.start_beacon_time(),
|
||||
head = shortLog(head),
|
||||
justified = shortLog(getStateField(
|
||||
node.dag.headState.data, current_justified_checkpoint)),
|
||||
node.dag.headState, current_justified_checkpoint)),
|
||||
finalized = shortLog(getStateField(
|
||||
node.dag.headState.data, finalized_checkpoint)),
|
||||
node.dag.headState, finalized_checkpoint)),
|
||||
finalizedHead = shortLog(finalizedHead),
|
||||
SLOTS_PER_EPOCH,
|
||||
SECONDS_PER_SLOT,
|
||||
|
@ -1519,7 +1518,7 @@ when not defined(windows):
|
|||
proc dataResolver(expr: string): string {.raises: [Defect].} =
|
||||
template justified: untyped = node.dag.head.atEpochStart(
|
||||
getStateField(
|
||||
node.dag.headState.data, current_justified_checkpoint).epoch)
|
||||
node.dag.headState, current_justified_checkpoint).epoch)
|
||||
# TODO:
|
||||
# We should introduce a general API for resolving dot expressions
|
||||
# such as `db.latest_block.slot` or `metrics.connected_peers`.
|
||||
|
|
|
@ -105,9 +105,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
router.api(MethodGet, "/eth/v1/beacon/genesis") do () -> RestApiResponse:
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
genesis_time: getStateField(node.dag.headState.data, genesis_time),
|
||||
genesis_time: getStateField(node.dag.headState, genesis_time),
|
||||
genesis_validators_root:
|
||||
getStateField(node.dag.headState.data, genesis_validators_root),
|
||||
getStateField(node.dag.headState, genesis_validators_root),
|
||||
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
|
||||
)
|
||||
)
|
||||
|
@ -150,12 +150,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
node.withStateForBlockSlot(bslot):
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
previous_version:
|
||||
getStateField(stateData.data, fork).previous_version,
|
||||
current_version:
|
||||
getStateField(stateData.data, fork).current_version,
|
||||
epoch:
|
||||
getStateField(stateData.data, fork).epoch
|
||||
previous_version: getStateField(state, fork).previous_version,
|
||||
current_version: getStateField(state, fork).current_version,
|
||||
epoch: getStateField(state, fork).epoch
|
||||
)
|
||||
)
|
||||
return RestApiResponse.jsonError(Http404, StateNotFoundError)
|
||||
|
@ -180,10 +177,10 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
previous_justified:
|
||||
getStateField(stateData.data, previous_justified_checkpoint),
|
||||
getStateField(state, previous_justified_checkpoint),
|
||||
current_justified:
|
||||
getStateField(stateData.data, current_justified_checkpoint),
|
||||
finalized: getStateField(stateData.data, finalized_checkpoint)
|
||||
getStateField(state, current_justified_checkpoint),
|
||||
finalized: getStateField(state, finalized_checkpoint)
|
||||
)
|
||||
)
|
||||
return RestApiResponse.jsonError(Http404, StateNotFoundError)
|
||||
|
@ -228,8 +225,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
let
|
||||
current_epoch = getStateField(stateData.data, slot).epoch()
|
||||
validatorsCount = lenu64(getStateField(stateData.data, validators))
|
||||
current_epoch = getStateField(state, slot).epoch()
|
||||
validatorsCount = lenu64(getStateField(state, validators))
|
||||
|
||||
let indices =
|
||||
block:
|
||||
|
@ -259,7 +256,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
indexset.incl(vindex)
|
||||
|
||||
if len(keyset) > 0:
|
||||
let optIndices = keysToIndices(node.restKeysCache, stateData.data,
|
||||
let optIndices = keysToIndices(node.restKeysCache, state,
|
||||
keyset.toSeq())
|
||||
# Remove all the duplicates.
|
||||
for item in optIndices:
|
||||
|
@ -277,10 +274,10 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# return empty response.
|
||||
if len(validatorIds) == 0:
|
||||
# There is no indices, so we going to filter all the validators.
|
||||
for index, validator in getStateField(stateData.data,
|
||||
for index, validator in getStateField(state,
|
||||
validators).pairs():
|
||||
let
|
||||
balance = getStateField(stateData.data, balances).asSeq()[index]
|
||||
balance = getStateField(state, balances).asSeq()[index]
|
||||
status =
|
||||
block:
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
|
@ -295,8 +292,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
else:
|
||||
for index in indices:
|
||||
let
|
||||
validator = getStateField(stateData.data, validators).asSeq()[index]
|
||||
balance = getStateField(stateData.data, balances).asSeq()[index]
|
||||
validator = getStateField(state, validators).asSeq()[index]
|
||||
balance = getStateField(state, balances).asSeq()[index]
|
||||
status =
|
||||
block:
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
|
@ -333,15 +330,15 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
let
|
||||
current_epoch = getStateField(stateData.data, slot).epoch()
|
||||
validatorsCount = lenu64(getStateField(stateData.data, validators))
|
||||
current_epoch = getStateField(state, slot).epoch()
|
||||
validatorsCount = lenu64(getStateField(state, validators))
|
||||
|
||||
let vindex =
|
||||
block:
|
||||
let vid = validator_id.get()
|
||||
case vid.kind
|
||||
of ValidatorQueryKind.Key:
|
||||
let optIndices = keysToIndices(node.restKeysCache, stateData.data,
|
||||
let optIndices = keysToIndices(node.restKeysCache, state,
|
||||
[vid.key])
|
||||
if optIndices[0].isNone():
|
||||
return RestApiResponse.jsonError(Http404, ValidatorNotFoundError)
|
||||
|
@ -362,8 +359,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
index
|
||||
|
||||
let
|
||||
validator = getStateField(stateData.data, validators).asSeq()[vindex]
|
||||
balance = getStateField(stateData.data, balances).asSeq()[vindex]
|
||||
validator = getStateField(state, validators).asSeq()[vindex]
|
||||
balance = getStateField(state, balances).asSeq()[vindex]
|
||||
status =
|
||||
block:
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
|
@ -405,7 +402,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
ires
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
let validatorsCount = lenu64(getStateField(stateData.data, validators))
|
||||
let validatorsCount = lenu64(getStateField(state, validators))
|
||||
|
||||
let indices =
|
||||
block:
|
||||
|
@ -434,7 +431,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
indexset.incl(vindex)
|
||||
|
||||
if len(keyset) > 0:
|
||||
let optIndices = keysToIndices(node.restKeysCache, stateData.data,
|
||||
let optIndices = keysToIndices(node.restKeysCache, state,
|
||||
keyset.toSeq())
|
||||
# Remove all the duplicates.
|
||||
for item in optIndices:
|
||||
|
@ -453,13 +450,12 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if len(validatorIds) == 0:
|
||||
# There is no indices, so we going to return balances of all
|
||||
# known validators.
|
||||
for index, balance in getStateField(stateData.data,
|
||||
balances).pairs():
|
||||
for index, balance in getStateField(state, balances).asSeq.pairs():
|
||||
res.add(RestValidatorBalance.init(ValidatorIndex(index),
|
||||
balance))
|
||||
else:
|
||||
for index in indices:
|
||||
let balance = getStateField(stateData.data, balances).asSeq()[index]
|
||||
let balance = getStateField(state, balances).asSeq()[index]
|
||||
res.add(RestValidatorBalance.init(index, balance))
|
||||
res
|
||||
return RestApiResponse.jsonResponse(response)
|
||||
|
@ -544,15 +540,14 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
node.withStateForBlockSlot(bslot):
|
||||
proc getCommittee(slot: Slot,
|
||||
index: CommitteeIndex): RestBeaconStatesCommittees =
|
||||
let validators = get_beacon_committee(stateData.data, slot, index,
|
||||
cache)
|
||||
let validators = get_beacon_committee(state, slot, index, cache)
|
||||
RestBeaconStatesCommittees(index: index, slot: slot,
|
||||
validators: validators)
|
||||
|
||||
proc forSlot(slot: Slot, cindex: Option[CommitteeIndex],
|
||||
res: var seq[RestBeaconStatesCommittees]) =
|
||||
let committees_per_slot = get_committee_count_per_slot(
|
||||
stateData.data, slot.epoch, cache)
|
||||
state, slot.epoch, cache)
|
||||
|
||||
if cindex.isNone:
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
|
@ -566,7 +561,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
var res: seq[RestBeaconStatesCommittees]
|
||||
let qepoch =
|
||||
if vepoch.isNone:
|
||||
epoch(getStateField(stateData.data, slot))
|
||||
epoch(getStateField(state, slot))
|
||||
else:
|
||||
vepoch.get()
|
||||
|
||||
|
@ -617,7 +612,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
node.withStateForBlockSlot(bslot):
|
||||
let keys =
|
||||
block:
|
||||
let res = syncCommitteeParticipants(stateData().data, qepoch)
|
||||
let res = syncCommitteeParticipants(state, qepoch)
|
||||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
$res.error())
|
||||
|
@ -630,8 +625,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let indices =
|
||||
block:
|
||||
var res: seq[ValidatorIndex]
|
||||
let optIndices = keysToIndices(node.restKeysCache, stateData().data,
|
||||
keys)
|
||||
let optIndices = keysToIndices(node.restKeysCache, state, keys)
|
||||
# Remove all the duplicates.
|
||||
for item in optIndices:
|
||||
if item.isNone():
|
||||
|
|
|
@ -39,12 +39,12 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
res.get()
|
||||
node.withStateForBlockSlot(bslot):
|
||||
return
|
||||
case stateData.data.kind
|
||||
case state.kind
|
||||
of BeaconStateFork.Phase0:
|
||||
if contentType == sszMediaType:
|
||||
RestApiResponse.sszResponse(stateData.data.phase0Data.data)
|
||||
RestApiResponse.sszResponse(state.phase0Data.data)
|
||||
elif contentType == jsonMediaType:
|
||||
RestApiResponse.jsonResponse(stateData.data.phase0Data.data)
|
||||
RestApiResponse.jsonResponse(state.phase0Data.data)
|
||||
else:
|
||||
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|
||||
of BeaconStateFork.Altair, BeaconStateFork.Bellatrix:
|
||||
|
@ -75,9 +75,9 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
node.withStateForBlockSlot(bslot):
|
||||
return
|
||||
if contentType == jsonMediaType:
|
||||
RestApiResponse.jsonResponsePlain(stateData.data)
|
||||
RestApiResponse.jsonResponsePlain(state)
|
||||
elif contentType == sszMediaType:
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
RestApiResponse.sszResponse(state.data)
|
||||
else:
|
||||
RestApiResponse.jsonError(Http500, InvalidAcceptError)
|
||||
|
|
|
@ -118,9 +118,9 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
router.api(MethodGet, "/nimbus/v1/chain/head") do() -> RestApiResponse:
|
||||
let
|
||||
head = node.dag.head
|
||||
finalized = getStateField(node.dag.headState.data, finalized_checkpoint)
|
||||
finalized = getStateField(node.dag.headState, finalized_checkpoint)
|
||||
justified =
|
||||
getStateField(node.dag.headState.data, current_justified_checkpoint)
|
||||
getStateField(node.dag.headState, current_justified_checkpoint)
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
head_slot: head.slot,
|
||||
|
@ -232,7 +232,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)) do:
|
||||
return RestApiResponse.jsonResponse(
|
||||
node.getBlockProposalEth1Data(stateData.data))
|
||||
node.getBlockProposalEth1Data(state))
|
||||
do:
|
||||
return RestApiResponse.jsonError(Http400, PrunedStateError)
|
||||
|
||||
|
|
|
@ -70,8 +70,8 @@ proc getBlockSlot*(node: BeaconNode,
|
|||
else:
|
||||
err("State for given slot not found, history not available?")
|
||||
of StateQueryKind.Root:
|
||||
if stateIdent.root == getStateRoot(node.dag.headState.data):
|
||||
ok(node.dag.headState.blck.atSlot())
|
||||
if stateIdent.root == getStateRoot(node.dag.headState):
|
||||
ok(node.dag.head.atSlot())
|
||||
else:
|
||||
# We don't have a state root -> BlockSlot mapping
|
||||
err("State for given root not found")
|
||||
|
@ -85,7 +85,7 @@ proc getBlockSlot*(node: BeaconNode,
|
|||
ok(node.dag.finalizedHead)
|
||||
of StateIdentType.Justified:
|
||||
ok(node.dag.head.atEpochStart(getStateField(
|
||||
node.dag.headState.data, current_justified_checkpoint).epoch))
|
||||
node.dag.headState, current_justified_checkpoint).epoch))
|
||||
|
||||
proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] =
|
||||
case id.kind
|
||||
|
@ -140,8 +140,8 @@ template withStateForBlockSlot*(nodeParam: BeaconNode,
|
|||
node = nodeParam
|
||||
blockSlot = blockSlotParam
|
||||
|
||||
template isState(state: StateData): bool =
|
||||
state.blck.atSlot(getStateField(state.data, slot)) == blockSlot
|
||||
template isState(state: ForkedHashedBeaconState): bool =
|
||||
state.matches_block_slot(blockSlot.blck.root, blockSlot.slot)
|
||||
|
||||
var cache {.inject, used.}: StateCache
|
||||
|
||||
|
@ -175,7 +175,7 @@ template withStateForBlockSlot*(nodeParam: BeaconNode,
|
|||
else:
|
||||
assignClone(node.dag.headState)
|
||||
|
||||
if node.dag.updateStateData(stateToAdvance[], blockSlot, false, cache):
|
||||
if node.dag.updateState(stateToAdvance[], blockSlot, false, cache):
|
||||
if cachedState == nil and node.stateTtlCache != nil:
|
||||
# This was not a cached state, we can cache it now
|
||||
node.stateTtlCache.add(stateToAdvance)
|
||||
|
|
|
@ -225,7 +225,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
headSyncPeriod = sync_committee_period(headEpoch)
|
||||
|
||||
if qSyncPeriod == headSyncPeriod:
|
||||
let res = withState(node.dag.headState.data):
|
||||
let res = withState(node.dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
produceResponse(indexList,
|
||||
state.data.current_sync_committee.pubkeys.data,
|
||||
|
@ -234,7 +234,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
emptyResponse()
|
||||
return RestApiResponse.jsonResponse(res)
|
||||
elif qSyncPeriod == (headSyncPeriod + 1):
|
||||
let res = withState(node.dag.headState.data):
|
||||
let res = withState(node.dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
produceResponse(indexList,
|
||||
state.data.next_sync_committee.pubkeys.data,
|
||||
|
@ -264,7 +264,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http404, StateNotFoundError)
|
||||
|
||||
node.withStateForBlockSlot(bs):
|
||||
let res = withState(stateData().data):
|
||||
let res = withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
produceResponse(indexList,
|
||||
state.data.current_sync_committee.pubkeys.data,
|
||||
|
@ -531,7 +531,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidCommitteeIndexValueError)
|
||||
if uint64(request.validator_index) >=
|
||||
lenu64(getStateField(node.dag.headState.data, validators)):
|
||||
lenu64(getStateField(node.dag.headState, validators)):
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidValidatorIndexValueError)
|
||||
if wallSlot > request.slot + 1:
|
||||
|
@ -555,7 +555,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
request.is_aggregator)
|
||||
|
||||
let validator_pubkey = getStateField(
|
||||
node.dag.headState.data, validators).asSeq()[request.validator_index].pubkey
|
||||
node.dag.headState, validators).asSeq()[request.validator_index].pubkey
|
||||
|
||||
node.validatorMonitor[].addAutoMonitor(
|
||||
validator_pubkey, ValidatorIndex(request.validator_index))
|
||||
|
@ -583,11 +583,11 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http400,
|
||||
EpochFromTheIncorrectForkError)
|
||||
if uint64(item.validator_index) >=
|
||||
lenu64(getStateField(node.dag.headState.data, validators)):
|
||||
lenu64(getStateField(node.dag.headState, validators)):
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidValidatorIndexValueError)
|
||||
let validator_pubkey = getStateField(
|
||||
node.dag.headState.data, validators).asSeq()[item.validator_index].pubkey
|
||||
node.dag.headState, validators).asSeq()[item.validator_index].pubkey
|
||||
|
||||
node.syncCommitteeMsgPool.syncCommitteeSubscriptions[validator_pubkey] =
|
||||
item.until_epoch
|
||||
|
|
|
@ -175,9 +175,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
raises: [Defect, CatchableError].} =
|
||||
rpcServer.rpc("get_v1_beacon_genesis") do () -> RpcBeaconGenesis:
|
||||
return (
|
||||
genesis_time: getStateField(node.dag.headState.data, genesis_time),
|
||||
genesis_time: getStateField(node.dag.headState, genesis_time),
|
||||
genesis_validators_root:
|
||||
getStateField(node.dag.headState.data, genesis_validators_root),
|
||||
getStateField(node.dag.headState, genesis_validators_root),
|
||||
genesis_fork_version: node.dag.cfg.GENESIS_FORK_VERSION
|
||||
)
|
||||
|
||||
|
@ -187,23 +187,23 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
|
||||
withStateForStateId(stateId):
|
||||
return getStateField(stateData.data, fork)
|
||||
return getStateField(state, fork)
|
||||
|
||||
rpcServer.rpc("get_v1_beacon_states_finality_checkpoints") do (
|
||||
stateId: string) -> RpcBeaconStatesFinalityCheckpoints:
|
||||
withStateForStateId(stateId):
|
||||
return (previous_justified:
|
||||
getStateField(stateData.data, previous_justified_checkpoint),
|
||||
getStateField(state, previous_justified_checkpoint),
|
||||
current_justified:
|
||||
getStateField(stateData.data, current_justified_checkpoint),
|
||||
finalized: getStateField(stateData.data, finalized_checkpoint))
|
||||
getStateField(state, current_justified_checkpoint),
|
||||
finalized: getStateField(state, finalized_checkpoint))
|
||||
|
||||
rpcServer.rpc("get_v1_beacon_states_stateId_validators") do (
|
||||
stateId: string, validatorIds: Option[seq[string]],
|
||||
status: Option[seq[string]]) -> seq[RpcBeaconStatesValidators]:
|
||||
var vquery: ValidatorQuery
|
||||
var squery: StatusQuery
|
||||
let current_epoch = getStateField(node.dag.headState.data, slot).epoch
|
||||
let current_epoch = getStateField(node.dag.headState, slot).epoch
|
||||
|
||||
template statusCheck(status, statusQuery, vstatus, current_epoch): bool =
|
||||
if status.isNone():
|
||||
|
@ -230,7 +230,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
vquery = vqres.get()
|
||||
|
||||
if validatorIds.isNone():
|
||||
for index, validator in getStateField(stateData.data, validators).pairs():
|
||||
for index, validator in getStateField(state, validators).pairs():
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
if sres.isOk:
|
||||
let vstatus = sres.get()
|
||||
|
@ -240,11 +240,11 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
res.add((validator: validator,
|
||||
index: uint64(index),
|
||||
status: vstatus,
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index]))
|
||||
balance: getStateField(state, balances).asSeq()[index]))
|
||||
else:
|
||||
for index in vquery.ids:
|
||||
if index < lenu64(getStateField(stateData.data, validators)):
|
||||
let validator = getStateField(stateData.data, validators).asSeq()[index]
|
||||
if index < lenu64(getStateField(state, validators)):
|
||||
let validator = getStateField(state, validators).asSeq()[index]
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
if sres.isOk:
|
||||
let vstatus = sres.get()
|
||||
|
@ -255,9 +255,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
res.add((validator: validator,
|
||||
index: uint64(index),
|
||||
status: vstatus,
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index]))
|
||||
balance: getStateField(state, balances).asSeq()[index]))
|
||||
|
||||
for index, validator in getStateField(stateData.data, validators).pairs():
|
||||
for index, validator in getStateField(state, validators).pairs():
|
||||
if validator.pubkey in vquery.keyset:
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
if sres.isOk:
|
||||
|
@ -268,12 +268,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
res.add((validator: validator,
|
||||
index: uint64(index),
|
||||
status: vstatus,
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index]))
|
||||
balance: getStateField(state, balances).asSeq()[index]))
|
||||
return res
|
||||
|
||||
rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do (
|
||||
stateId: string, validatorId: string) -> RpcBeaconStatesValidators:
|
||||
let current_epoch = getStateField(node.dag.headState.data, slot).epoch
|
||||
let current_epoch = getStateField(node.dag.headState, slot).epoch
|
||||
let vqres = createIdQuery([validatorId])
|
||||
if vqres.isErr:
|
||||
raise newException(CatchableError, $vqres.error)
|
||||
|
@ -282,23 +282,23 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
withStateForStateId(stateId):
|
||||
if len(vquery.ids) > 0:
|
||||
let index = vquery.ids[0]
|
||||
if index < lenu64(getStateField(stateData.data, validators)):
|
||||
let validator = getStateField(stateData.data, validators).asSeq()[index]
|
||||
if index < lenu64(getStateField(state, validators)):
|
||||
let validator = getStateField(state, validators).asSeq()[index]
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
if sres.isOk:
|
||||
return (validator: validator, index: uint64(index),
|
||||
status: sres.get(),
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index])
|
||||
balance: getStateField(state, balances).asSeq()[index])
|
||||
else:
|
||||
raise newException(CatchableError, "Incorrect validator's state")
|
||||
else:
|
||||
for index, validator in getStateField(stateData.data, validators).pairs():
|
||||
for index, validator in getStateField(state, validators).pairs():
|
||||
if validator.pubkey in vquery.keyset:
|
||||
let sres = validator.getStatus(current_epoch)
|
||||
if sres.isOk:
|
||||
return (validator: validator, index: uint64(index),
|
||||
status: sres.get(),
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index])
|
||||
balance: getStateField(state, balances).asSeq()[index])
|
||||
else:
|
||||
raise newException(CatchableError, "Incorrect validator's state")
|
||||
|
||||
|
@ -308,7 +308,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
var res: seq[RpcBalance]
|
||||
withStateForStateId(stateId):
|
||||
if validatorsId.isNone():
|
||||
for index, value in getStateField(stateData.data, balances).pairs():
|
||||
for index, value in getStateField(state, balances).pairs():
|
||||
let balance = (index: uint64(index), balance: value)
|
||||
res.add(balance)
|
||||
else:
|
||||
|
@ -318,17 +318,17 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
var vquery = vqres.get()
|
||||
for index in vquery.ids:
|
||||
if index < lenu64(getStateField(stateData.data, validators)):
|
||||
let validator = getStateField(stateData.data, validators).asSeq()[index]
|
||||
if index < lenu64(getStateField(state, validators)):
|
||||
let validator = getStateField(state, validators).asSeq()[index]
|
||||
vquery.keyset.excl(validator.pubkey)
|
||||
let balance = (index: uint64(index),
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index])
|
||||
balance: getStateField(state, balances).asSeq()[index])
|
||||
res.add(balance)
|
||||
|
||||
for index, validator in getStateField(stateData.data, validators).pairs():
|
||||
for index, validator in getStateField(state, validators).pairs():
|
||||
if validator.pubkey in vquery.keyset:
|
||||
let balance = (index: uint64(index),
|
||||
balance: getStateField(stateData.data, balances).asSeq()[index])
|
||||
balance: getStateField(state, balances).asSeq()[index])
|
||||
res.add(balance)
|
||||
return res
|
||||
|
||||
|
@ -339,12 +339,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
proc getCommittee(slot: Slot,
|
||||
index: CommitteeIndex): RpcBeaconStatesCommittees =
|
||||
let vals = get_beacon_committee(
|
||||
stateData.data, slot, index, cache).mapIt(it.uint64)
|
||||
state, slot, index, cache).mapIt(it.uint64)
|
||||
return (index: index.uint64, slot: slot.uint64, validators: vals)
|
||||
|
||||
proc forSlot(slot: Slot, res: var seq[RpcBeaconStatesCommittees]) =
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
|
||||
if index.isNone:
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
|
@ -359,7 +359,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
let qepoch =
|
||||
if epoch.isNone:
|
||||
epoch(getStateField(stateData.data, slot))
|
||||
epoch(getStateField(state, slot))
|
||||
else:
|
||||
Epoch(epoch.get())
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ type
|
|||
proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
||||
raises: [Defect, CatchableError].} =
|
||||
rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]:
|
||||
return @[getStateField(node.dag.headState.data, fork)]
|
||||
return @[getStateField(node.dag.headState, fork)]
|
||||
|
||||
rpcServer.rpc("get_v1_config_spec") do () -> JsonNode:
|
||||
return %*{
|
||||
|
|
|
@ -26,8 +26,8 @@ proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
rpcServer.rpc("get_v1_debug_beacon_states_stateId") do (
|
||||
stateId: string) -> phase0.BeaconState:
|
||||
withStateForStateId(stateId):
|
||||
if stateData.data.kind == BeaconStateFork.Phase0:
|
||||
return stateData.data.phase0Data.data
|
||||
if state.kind == BeaconStateFork.Phase0:
|
||||
return state.phase0Data.data
|
||||
else:
|
||||
raiseNoAltairSupport()
|
||||
|
||||
|
|
|
@ -47,9 +47,9 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
rpcServer.rpc("getChainHead") do () -> JsonNode:
|
||||
let
|
||||
head = node.dag.head
|
||||
finalized = getStateField(node.dag.headState.data, finalized_checkpoint)
|
||||
finalized = getStateField(node.dag.headState, finalized_checkpoint)
|
||||
justified =
|
||||
getStateField(node.dag.headState.data, current_justified_checkpoint)
|
||||
getStateField(node.dag.headState, current_justified_checkpoint)
|
||||
return %* {
|
||||
"head_slot": head.slot,
|
||||
"head_block_root": head.root.data.toHex(),
|
||||
|
@ -109,7 +109,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
|
||||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)):
|
||||
return node.getBlockProposalEth1Data(stateData.data)
|
||||
return node.getBlockProposalEth1Data(state)
|
||||
do:
|
||||
raise (ref CatchableError)(msg: "Trying to access pruned state")
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@ template withStateForStateId*(stateId: string, body: untyped): untyped =
|
|||
let
|
||||
bs = node.stateIdToBlockSlot(stateId)
|
||||
|
||||
template isState(state: StateData): bool =
|
||||
state.blck.atSlot(getStateField(state.data, slot)) == bs
|
||||
template isState(state: ForkedHashedBeaconState): bool =
|
||||
state.matches_block_slot(bs.blck.root, bs.slot)
|
||||
|
||||
if isState(node.dag.headState):
|
||||
withStateVars(node.dag.headState):
|
||||
|
@ -94,12 +94,12 @@ proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises:
|
|||
node.dag.finalizedHead
|
||||
of "justified":
|
||||
node.dag.head.atEpochStart(
|
||||
getStateField(node.dag.headState.data, current_justified_checkpoint).epoch)
|
||||
getStateField(node.dag.headState, current_justified_checkpoint).epoch)
|
||||
else:
|
||||
if stateId.startsWith("0x"):
|
||||
let stateRoot = parseRoot(stateId)
|
||||
if stateRoot == getStateRoot(node.dag.headState.data):
|
||||
node.dag.headState.blck.atSlot()
|
||||
if stateRoot == getStateRoot(node.dag.headState):
|
||||
node.dag.head.atSlot()
|
||||
else:
|
||||
# We don't have a state root -> BlockSlot mapping
|
||||
raise (ref ValueError)(msg: "State not found")
|
||||
|
|
|
@ -146,8 +146,8 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
"Slot requested not in current or next wall-slot epoch")
|
||||
|
||||
if not verify_slot_signature(
|
||||
getStateField(node.dag.headState.data, fork),
|
||||
getStateField(node.dag.headState.data, genesis_validators_root),
|
||||
getStateField(node.dag.headState, fork),
|
||||
getStateField(node.dag.headState, genesis_validators_root),
|
||||
slot, validator_pubkey, slot_signature):
|
||||
raise newException(CatchableError,
|
||||
"Invalid slot signature")
|
||||
|
|
|
@ -8,11 +8,12 @@
|
|||
import
|
||||
chronos,
|
||||
chronicles,
|
||||
../spec/beaconstate,
|
||||
../consensus_object_pools/block_pools_types
|
||||
|
||||
type
|
||||
CacheEntry = ref object
|
||||
state: ref StateData
|
||||
state: ref ForkedHashedBeaconState
|
||||
lastUsed: Moment
|
||||
|
||||
# This is ref object because we need to capture it by
|
||||
|
@ -49,7 +50,7 @@ proc scheduleEntryExpiration(cache: StateTtlCache,
|
|||
|
||||
discard setTimer(Moment.now + cache.ttl, removeElement)
|
||||
|
||||
proc add*(cache: StateTtlCache, state: ref StateData) =
|
||||
proc add*(cache: StateTtlCache, state: ref ForkedHashedBeaconState) =
|
||||
var
|
||||
now = Moment.now
|
||||
lruTime = now
|
||||
|
@ -69,7 +70,8 @@ proc add*(cache: StateTtlCache, state: ref StateData) =
|
|||
|
||||
cache.scheduleEntryExpiration(index)
|
||||
|
||||
proc getClosestState*(cache: StateTtlCache, bs: BlockSlot): ref StateData =
|
||||
proc getClosestState*(
|
||||
cache: StateTtlCache, bs: BlockSlot): ref ForkedHashedBeaconState =
|
||||
var
|
||||
bestSlotDifference = Slot.high
|
||||
index = -1
|
||||
|
@ -78,7 +80,7 @@ proc getClosestState*(cache: StateTtlCache, bs: BlockSlot): ref StateData =
|
|||
if cache.entries[i] == nil:
|
||||
continue
|
||||
|
||||
let stateSlot = getStateField(cache.entries[i].state.data, slot)
|
||||
let stateSlot = getStateField(cache.entries[i][].state[], slot)
|
||||
if stateSlot > bs.slot:
|
||||
# We can use only states that can be advanced forward in time.
|
||||
continue
|
||||
|
@ -92,7 +94,7 @@ proc getClosestState*(cache: StateTtlCache, bs: BlockSlot): ref StateData =
|
|||
for j in 0 ..< slotDifference:
|
||||
cur = cur.parentOrSlot
|
||||
|
||||
if cur.blck != cache.entries[i].state.blck:
|
||||
if not cache.entries[i].state[].matches_block(cur.blck.root):
|
||||
# The cached state and the requested BlockSlot are at different branches
|
||||
# of history.
|
||||
continue
|
||||
|
|
|
@ -945,6 +945,9 @@ func latest_block_root*(state: ForkyBeaconState, state_root: Eth2Digest): Eth2Di
|
|||
func latest_block_root*(state: ForkyHashedBeaconState): Eth2Digest =
|
||||
latest_block_root(state.data, state.root)
|
||||
|
||||
func latest_block_root*(state: ForkedHashedBeaconState): Eth2Digest =
|
||||
withState(state): latest_block_root(state)
|
||||
|
||||
func get_sync_committee_cache*(
|
||||
state: altair.BeaconState | bellatrix.BeaconState, cache: var StateCache):
|
||||
SyncCommitteeCache =
|
||||
|
@ -1001,3 +1004,30 @@ func proposer_dependent_root*(state: ForkyHashedBeaconState): Eth2Digest =
|
|||
func attester_dependent_root*(state: ForkyHashedBeaconState): Eth2Digest =
|
||||
let epoch = state.data.slot.epoch
|
||||
state.dependent_root(if epoch == Epoch(0): epoch else: epoch - 1)
|
||||
|
||||
func matches_block*(
|
||||
state: ForkyHashedBeaconState, block_root: Eth2Digest): bool =
|
||||
## Return true iff the latest block applied to this state matches the given
|
||||
## `block_root`
|
||||
block_root == state.latest_block_root
|
||||
func matches_block*(
|
||||
state: ForkedHashedBeaconState, block_root: Eth2Digest): bool =
|
||||
withState(state): state.matches_block(block_root)
|
||||
|
||||
func matches_block_slot*(
|
||||
state: ForkyHashedBeaconState, block_root: Eth2Digest, slot: Slot): bool =
|
||||
## Return true iff the latest block applied to this state matches the given
|
||||
## `block_root` and the state slot has been advanced to the given slot
|
||||
slot == state.data.slot and block_root == state.latest_block_root
|
||||
func matches_block_slot*(
|
||||
state: ForkedHashedBeaconState, block_root: Eth2Digest, slot: Slot): bool =
|
||||
withState(state): state.matches_block_slot(block_root, slot)
|
||||
|
||||
func can_advance_slots*(
|
||||
state: ForkyHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
|
||||
## Return true iff we can reach the given block/slot combination simply by
|
||||
## advancing slots
|
||||
target_slot >= state.data.slot and block_root == state.latest_block_root
|
||||
func can_advance_slots*(
|
||||
state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool =
|
||||
withState(state): state.can_advance_slots(block_root, target_slot)
|
||||
|
|
|
@ -305,7 +305,7 @@ template partialBeaconBlock(
|
|||
phase0.BeaconBlock(
|
||||
slot: state.data.slot,
|
||||
proposer_index: proposer_index.uint64,
|
||||
parent_root: state.latest_block_root(),
|
||||
parent_root: state.latest_block_root,
|
||||
body: phase0.BeaconBlockBody(
|
||||
randao_reveal: randao_reveal,
|
||||
eth1_data: eth1data,
|
||||
|
@ -369,7 +369,7 @@ template partialBeaconBlock(
|
|||
altair.BeaconBlock(
|
||||
slot: state.data.slot,
|
||||
proposer_index: proposer_index.uint64,
|
||||
parent_root: state.latest_block_root(),
|
||||
parent_root: state.latest_block_root,
|
||||
body: altair.BeaconBlockBody(
|
||||
randao_reveal: randao_reveal,
|
||||
eth1_data: eth1data,
|
||||
|
@ -434,7 +434,7 @@ template partialBeaconBlock(
|
|||
bellatrix.BeaconBlock(
|
||||
slot: state.data.slot,
|
||||
proposer_index: proposer_index.uint64,
|
||||
parent_root: state.latest_block_root(),
|
||||
parent_root: state.latest_block_root,
|
||||
body: bellatrix.BeaconBlockBody(
|
||||
randao_reveal: randao_reveal,
|
||||
eth1_data: eth1data,
|
||||
|
|
|
@ -111,13 +111,13 @@ proc addRemoteValidator(pool: var ValidatorPool, validators: auto,
|
|||
|
||||
proc addLocalValidators*(node: BeaconNode,
|
||||
validators: openArray[KeystoreData]) =
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
for item in validators:
|
||||
node.addLocalValidator(state.data.validators.asSeq(), item)
|
||||
|
||||
proc addRemoteValidators*(node: BeaconNode,
|
||||
validators: openArray[KeystoreData]) =
|
||||
withState(node.dag.headState.data):
|
||||
withState(node.dag.headState):
|
||||
for item in validators:
|
||||
node.attachedValidators[].addRemoteValidator(
|
||||
state.data.validators.asSeq(), item)
|
||||
|
@ -253,7 +253,7 @@ proc sendSyncCommitteeMessage*(
|
|||
proc sendSyncCommitteeMessages*(node: BeaconNode,
|
||||
msgs: seq[SyncCommitteeMessage]
|
||||
): Future[seq[SendResult]] {.async.} =
|
||||
return withState(node.dag.headState.data):
|
||||
return withState(node.dag.headState):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
var statuses = newSeq[Option[SendResult]](len(msgs))
|
||||
|
||||
|
@ -448,26 +448,26 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
var info: ForkedEpochInfo
|
||||
|
||||
process_slots(
|
||||
node.dag.cfg, stateData.data, slot, cache, info,
|
||||
node.dag.cfg, state, slot, cache, info,
|
||||
{skipLastStateRootCalculation}).expect("advancing 1 slot should not fail")
|
||||
|
||||
let
|
||||
eth1Proposal = node.getBlockProposalEth1Data(stateData.data)
|
||||
eth1Proposal = node.getBlockProposalEth1Data(state)
|
||||
|
||||
if eth1Proposal.hasMissingDeposits:
|
||||
warn "Eth1 deposits not available. Skipping block proposal", slot
|
||||
return ForkedBlockResult.err("Eth1 deposits not available")
|
||||
|
||||
let exits = withState(stateData.data):
|
||||
let exits = withState(state):
|
||||
node.exitPool[].getBeaconBlockExits(state.data)
|
||||
let res = makeBeaconBlock(
|
||||
node.dag.cfg,
|
||||
stateData.data,
|
||||
state,
|
||||
validator_index,
|
||||
randao_reveal,
|
||||
eth1Proposal.vote,
|
||||
graffiti,
|
||||
node.attestationPool[].getAttestationsForBlock(stateData.data, cache),
|
||||
node.attestationPool[].getAttestationsForBlock(state, cache),
|
||||
eth1Proposal.deposits,
|
||||
exits,
|
||||
if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
||||
|
@ -507,7 +507,7 @@ proc proposeBlock(node: BeaconNode,
|
|||
let
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState.data, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
randao =
|
||||
block:
|
||||
let res = await validator.genRandaoReveal(fork, genesis_validators_root,
|
||||
|
@ -639,7 +639,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState.data, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
let committee = get_beacon_committee(epochRef, slot, committee_index)
|
||||
|
@ -731,7 +731,7 @@ proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|||
for subcommitteeIdx in SyncSubcommitteeIndex:
|
||||
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
||||
let validator = node.getAttachedValidator(
|
||||
getStateField(node.dag.headState.data, validators), valIdx)
|
||||
getStateField(node.dag.headState, validators), valIdx)
|
||||
if isNil(validator) or validator.index.isNone():
|
||||
continue
|
||||
asyncSpawn createAndSendSyncCommitteeMessage(node, slot, validator,
|
||||
|
@ -787,7 +787,7 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
|
|||
# to avoid the repeated offset calculations
|
||||
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
||||
let validator = node.getAttachedValidator(
|
||||
getStateField(node.dag.headState.data, validators), valIdx)
|
||||
getStateField(node.dag.headState, validators), valIdx)
|
||||
if validator == nil:
|
||||
continue
|
||||
|
||||
|
@ -904,7 +904,7 @@ proc sendAggregatedAttestations(
|
|||
|
||||
fork = node.dag.forkAtEpoch(slot.epoch)
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState.data, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
committees_per_slot = get_committee_count_per_slot(epochRef)
|
||||
|
||||
var
|
||||
|
@ -986,14 +986,14 @@ proc updateValidatorMetrics*(node: BeaconNode) =
|
|||
if v.index.isNone():
|
||||
0.Gwei
|
||||
elif v.index.get().uint64 >=
|
||||
getStateField(node.dag.headState.data, balances).lenu64:
|
||||
getStateField(node.dag.headState, balances).lenu64:
|
||||
debug "Cannot get validator balance, index out of bounds",
|
||||
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
||||
balances = getStateField(node.dag.headState.data, balances).len,
|
||||
stateRoot = getStateRoot(node.dag.headState.data)
|
||||
balances = getStateField(node.dag.headState, balances).len,
|
||||
stateRoot = getStateRoot(node.dag.headState)
|
||||
0.Gwei
|
||||
else:
|
||||
getStateField(node.dag.headState.data, balances).asSeq()[v.index.get()]
|
||||
getStateField(node.dag.headState, balances).asSeq()[v.index.get()]
|
||||
|
||||
if i < 64:
|
||||
attached_validator_balance.set(
|
||||
|
@ -1300,7 +1300,7 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
|||
|
||||
let
|
||||
genesis_validators_root =
|
||||
getStateField(node.dag.headState.data, genesis_validators_root)
|
||||
getStateField(node.dag.headState, genesis_validators_root)
|
||||
head = node.dag.head
|
||||
|
||||
# Getting the slot signature is expensive but cached - in "normal" cases we'll
|
||||
|
|
|
@ -252,17 +252,17 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
(ref bellatrix.HashedBeaconState)())
|
||||
|
||||
withTimer(timers[tLoadState]):
|
||||
doAssert dag.updateStateData(
|
||||
doAssert dag.updateState(
|
||||
stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
|
||||
template processBlocks(blocks: auto) =
|
||||
for b in blocks.mitems():
|
||||
if shouldShutDown: quit QuitSuccess
|
||||
while getStateField(stateData[].data, slot) < b.message.slot:
|
||||
let isEpoch = (getStateField(stateData[].data, slot) + 1).is_epoch()
|
||||
while getStateField(stateData[], slot) < b.message.slot:
|
||||
let isEpoch = (getStateField(stateData[], slot) + 1).is_epoch()
|
||||
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
||||
process_slots(
|
||||
dag.cfg, stateData[].data, getStateField(stateData[].data, slot) + 1, cache,
|
||||
dag.cfg, stateData[], getStateField(stateData[], slot) + 1, cache,
|
||||
info, {}).expect("Slot processing can't fail with correct inputs")
|
||||
|
||||
var start = Moment.now()
|
||||
|
@ -270,7 +270,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
if conf.resetCache:
|
||||
cache = StateCache()
|
||||
let res = state_transition_block(
|
||||
dag.cfg, stateData[].data, b, cache, {}, noRollback)
|
||||
dag.cfg, stateData[], b, cache, {}, noRollback)
|
||||
if res.isErr():
|
||||
dump("./", b)
|
||||
echo "State transition failed (!) ", res.error()
|
||||
|
@ -281,7 +281,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
withTimer(timers[tDbStore]):
|
||||
dbBenchmark.putBlock(b)
|
||||
|
||||
withState(stateData[].data):
|
||||
withState(stateData[]):
|
||||
if state.data.slot.is_epoch and conf.storeStates:
|
||||
if state.data.slot.epoch < 2:
|
||||
dbBenchmark.putState(state.root, state.data)
|
||||
|
@ -416,7 +416,7 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
|||
let tmpState = assignClone(dag.headState)
|
||||
dag.withUpdatedState(tmpState[], blckRef.atSlot(Slot(conf.slot))) do:
|
||||
echo "Writing state..."
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
dump("./", state)
|
||||
do: raiseAssert "withUpdatedState failed"
|
||||
|
||||
|
@ -462,7 +462,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
|
|||
echo "Written all complete eras"
|
||||
break
|
||||
|
||||
let name = withState(dag.headState.data): eraFileName(cfg, state.data, era)
|
||||
let name = withState(dag.headState): eraFileName(cfg, state.data, era)
|
||||
if isFile(name):
|
||||
echo "Skipping ", name, " (already exists)"
|
||||
else:
|
||||
|
@ -481,7 +481,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
|
|||
|
||||
withTimer(timers[tState]):
|
||||
dag.withUpdatedState(tmpState[], canonical) do:
|
||||
withState(stateData.data):
|
||||
withState(state):
|
||||
group.finish(e2, state.data).get()
|
||||
do: raiseAssert "withUpdatedState failed"
|
||||
|
||||
|
@ -580,7 +580,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
|||
(start, ends) = dag.getSlotRange(conf.perfSlot, conf.perfSlots)
|
||||
blockRefs = dag.getBlockRange(start, ends)
|
||||
perfs = newSeq[ValidatorPerformance](
|
||||
getStateField(dag.headState.data, validators).len())
|
||||
getStateField(dag.headState, validators).len())
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
blck: phase0.TrustedSignedBeaconBlock
|
||||
|
@ -591,26 +591,26 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
|||
blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch
|
||||
|
||||
let state = newClone(dag.headState)
|
||||
doAssert dag.updateStateData(
|
||||
doAssert dag.updateState(
|
||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
|
||||
proc processEpoch() =
|
||||
let
|
||||
prev_epoch_target_slot =
|
||||
state[].data.get_previous_epoch().start_slot()
|
||||
state[].get_previous_epoch().start_slot()
|
||||
penultimate_epoch_end_slot =
|
||||
if prev_epoch_target_slot == 0: Slot(0)
|
||||
else: prev_epoch_target_slot - 1
|
||||
first_slot_empty =
|
||||
state[].data.get_block_root_at_slot(prev_epoch_target_slot) ==
|
||||
state[].data.get_block_root_at_slot(penultimate_epoch_end_slot)
|
||||
state[].get_block_root_at_slot(prev_epoch_target_slot) ==
|
||||
state[].get_block_root_at_slot(penultimate_epoch_end_slot)
|
||||
|
||||
let first_slot_attesters = block:
|
||||
let committees_per_slot = state[].data.get_committee_count_per_slot(
|
||||
let committees_per_slot = state[].get_committee_count_per_slot(
|
||||
prev_epoch_target_slot.epoch, cache)
|
||||
var indices = HashSet[ValidatorIndex]()
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
for validator_index in state[].data.get_beacon_committee(
|
||||
for validator_index in state[].get_beacon_committee(
|
||||
prev_epoch_target_slot, committee_index, cache):
|
||||
indices.incl(validator_index)
|
||||
indices
|
||||
|
@ -654,32 +654,32 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
|||
blck = db.getBlock(
|
||||
blockRefs[blockRefs.len - bi - 1].root,
|
||||
phase0.TrustedSignedBeaconBlock).get()
|
||||
while getStateField(state[].data, slot) < blck.message.slot:
|
||||
while getStateField(state[], slot) < blck.message.slot:
|
||||
let
|
||||
nextSlot = getStateField(state[].data, slot) + 1
|
||||
nextSlot = getStateField(state[], slot) + 1
|
||||
flags =
|
||||
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
|
||||
else: {}
|
||||
process_slots(
|
||||
dag.cfg, state[].data, nextSlot, cache, info, flags).expect(
|
||||
dag.cfg, state[], nextSlot, cache, info, flags).expect(
|
||||
"Slot processing can't fail with correct inputs")
|
||||
|
||||
if getStateField(state[].data, slot).is_epoch():
|
||||
if getStateField(state[], slot).is_epoch():
|
||||
processEpoch()
|
||||
|
||||
let res = state_transition_block(
|
||||
dag.cfg, state[].data, blck, cache, {}, noRollback)
|
||||
dag.cfg, state[], blck, cache, {}, noRollback)
|
||||
if res.isErr:
|
||||
echo "State transition failed (!) ", res.error()
|
||||
quit 1
|
||||
|
||||
# Capture rewards of empty slots as well
|
||||
while getStateField(state[].data, slot) < ends:
|
||||
while getStateField(state[], slot) < ends:
|
||||
process_slots(
|
||||
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
||||
dag.cfg, state[], getStateField(state[], slot) + 1, cache,
|
||||
info, {}).expect("Slot processing can't fail with correct inputs")
|
||||
|
||||
if getStateField(state[].data, slot).is_epoch():
|
||||
if getStateField(state[], slot).is_epoch():
|
||||
processEpoch()
|
||||
|
||||
echo "validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty"
|
||||
|
@ -865,34 +865,34 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
var cache = StateCache()
|
||||
let slot = if startSlot > 0: startSlot - 1 else: 0.Slot
|
||||
if blockRefs.len > 0:
|
||||
discard dag.updateStateData(tmpState[], blockRefs[^1].atSlot(slot), false, cache)
|
||||
discard dag.updateState(tmpState[], blockRefs[^1].atSlot(slot), false, cache)
|
||||
else:
|
||||
discard dag.updateStateData(tmpState[], dag.head.atSlot(slot), false, cache)
|
||||
discard dag.updateState(tmpState[], dag.head.atSlot(slot), false, cache)
|
||||
|
||||
let savedValidatorsCount = outDb.getDbValidatorsCount
|
||||
var validatorsCount = getStateField(tmpState[].data, validators).len
|
||||
outDb.insertValidators(tmpState[].data, savedValidatorsCount, validatorsCount)
|
||||
var validatorsCount = getStateField(tmpState[], validators).len
|
||||
outDb.insertValidators(tmpState[], savedValidatorsCount, validatorsCount)
|
||||
|
||||
var previousEpochBalances: seq[uint64]
|
||||
collectBalances(previousEpochBalances, tmpState[].data)
|
||||
collectBalances(previousEpochBalances, tmpState[])
|
||||
|
||||
var forkedInfo = ForkedEpochInfo()
|
||||
var rewardsAndPenalties: seq[RewardsAndPenalties]
|
||||
rewardsAndPenalties.setLen(validatorsCount)
|
||||
|
||||
var auxiliaryState: AuxiliaryState
|
||||
auxiliaryState.copyParticipationFlags(tmpState[].data)
|
||||
auxiliaryState.copyParticipationFlags(tmpState[])
|
||||
|
||||
var aggregator = ValidatorDbAggregator.init(
|
||||
aggregatedFilesOutputDir, conf.resolution, endEpoch)
|
||||
|
||||
proc processEpoch() =
|
||||
let epoch = getStateField(tmpState[].data, slot).epoch
|
||||
let epoch = getStateField(tmpState[], slot).epoch
|
||||
info "Processing epoch ...", epoch = epoch
|
||||
|
||||
var csvLines = newStringOfCap(1000000)
|
||||
|
||||
withState(tmpState[].data):
|
||||
withState(tmpState[]):
|
||||
withEpochInfo(forkedInfo):
|
||||
doAssert state.data.balances.len == info.validators.len
|
||||
doAssert state.data.balances.len == previousEpochBalances.len
|
||||
|
@ -929,21 +929,21 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
aggregator.advanceEpochs(epoch, shouldShutDown)
|
||||
|
||||
if shouldShutDown: quit QuitSuccess
|
||||
collectBalances(previousEpochBalances, tmpState[].data)
|
||||
collectBalances(previousEpochBalances, tmpState[])
|
||||
|
||||
proc processSlots(ends: Slot, endsFlags: UpdateFlags) =
|
||||
var currentSlot = getStateField(tmpState[].data, slot)
|
||||
var currentSlot = getStateField(tmpState[], slot)
|
||||
while currentSlot < ends:
|
||||
let nextSlot = currentSlot + 1
|
||||
let flags = if nextSlot == ends: endsFlags else: {}
|
||||
|
||||
if nextSlot.isEpoch:
|
||||
withState(tmpState[].data):
|
||||
withState(tmpState[]):
|
||||
var stateData = newClone(state.data)
|
||||
rewardsAndPenalties.collectEpochRewardsAndPenalties(
|
||||
stateData[], cache, cfg, flags)
|
||||
|
||||
let res = process_slots(cfg, tmpState[].data, nextSlot, cache, forkedInfo, flags)
|
||||
let res = process_slots(cfg, tmpState[], nextSlot, cache, forkedInfo, flags)
|
||||
doAssert res.isOk, "Slot processing can't fail with correct inputs"
|
||||
|
||||
currentSlot = nextSlot
|
||||
|
@ -952,7 +952,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
processEpoch()
|
||||
rewardsAndPenalties.setLen(0)
|
||||
rewardsAndPenalties.setLen(validatorsCount)
|
||||
auxiliaryState.copyParticipationFlags(tmpState[].data)
|
||||
auxiliaryState.copyParticipationFlags(tmpState[])
|
||||
clear cache
|
||||
|
||||
for bi in 0 ..< blockRefs.len:
|
||||
|
@ -961,15 +961,15 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
processSlots(blck.message.slot, {skipLastStateRootCalculation})
|
||||
|
||||
rewardsAndPenalties.collectBlockRewardsAndPenalties(
|
||||
tmpState[].data, forkedBlock, auxiliaryState, cache, cfg)
|
||||
tmpState[], forkedBlock, auxiliaryState, cache, cfg)
|
||||
|
||||
let res = state_transition_block(
|
||||
cfg, tmpState[].data, blck, cache, {}, noRollback)
|
||||
cfg, tmpState[], blck, cache, {}, noRollback)
|
||||
if res.isErr:
|
||||
fatal "State transition failed (!)"
|
||||
quit QuitFailure
|
||||
|
||||
let newValidatorsCount = getStateField(tmpState[].data, validators).len
|
||||
let newValidatorsCount = getStateField(tmpState[], validators).len
|
||||
if newValidatorsCount > validatorsCount:
|
||||
# Resize the structures in case a new validator has appeared after
|
||||
# the state_transition_block procedure call ...
|
||||
|
@ -977,7 +977,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
previousEpochBalances.setLen(newValidatorsCount)
|
||||
# ... and add the new validators to the database.
|
||||
outDb.insertValidators(
|
||||
tmpState[].data, validatorsCount, newValidatorsCount)
|
||||
tmpState[], validatorsCount, newValidatorsCount)
|
||||
validatorsCount = newValidatorsCount
|
||||
|
||||
# Capture rewards of empty slots as well, including the epoch that got
|
||||
|
|
|
@ -114,20 +114,20 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
dag.withUpdatedState(tmpState[], attestationHead) do:
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
let committee = get_beacon_committee(
|
||||
stateData.data, slot, committee_index, cache)
|
||||
state, slot, committee_index, cache)
|
||||
|
||||
for index_in_committee, validator_index in committee:
|
||||
if rand(r, 1.0) <= attesterRatio:
|
||||
let
|
||||
data = makeAttestationData(
|
||||
stateData.data, slot, committee_index, blck.root)
|
||||
state, slot, committee_index, blck.root)
|
||||
sig =
|
||||
get_attestation_signature(getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
get_attestation_signature(getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
data, MockPrivKeys[validator_index])
|
||||
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||
aggregation_bits.setBit index_in_committee
|
||||
|
@ -237,14 +237,14 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
signedContributionAndProof, res.get()[0])
|
||||
|
||||
proc getNewBlock[T](
|
||||
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
||||
state: var ForkedHashedBeaconState, slot: Slot, cache: var StateCache): T =
|
||||
let
|
||||
finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||
proposerIdx = get_beacon_proposer_index(
|
||||
stateData.data, cache, getStateField(stateData.data, slot)).get()
|
||||
state, cache, getStateField(state, slot)).get()
|
||||
privKey = MockPrivKeys[proposerIdx]
|
||||
eth1ProposalData = eth1Chain.getBlockProposalData(
|
||||
stateData.data,
|
||||
state,
|
||||
finalizedEpochRef.eth1_data,
|
||||
finalizedEpochRef.eth1_deposit_index)
|
||||
sync_aggregate =
|
||||
|
@ -256,11 +256,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
static: doAssert false
|
||||
hashedState =
|
||||
when T is phase0.SignedBeaconBlock:
|
||||
addr stateData.data.phase0Data
|
||||
addr state.phase0Data
|
||||
elif T is altair.SignedBeaconBlock:
|
||||
addr stateData.data.altairData
|
||||
addr state.altairData
|
||||
elif T is bellatrix.SignedBeaconBlock:
|
||||
addr stateData.data.bellatrixData
|
||||
addr state.bellatrixData
|
||||
else:
|
||||
static: doAssert false
|
||||
message = makeBeaconBlock(
|
||||
|
@ -268,12 +268,12 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
hashedState[],
|
||||
proposerIdx,
|
||||
privKey.genRandaoReveal(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
slot).toValidatorSig(),
|
||||
eth1ProposalData.vote,
|
||||
default(GraffitiBytes),
|
||||
attPool.getAttestationsForBlock(stateData.data, cache),
|
||||
attPool.getAttestationsForBlock(state, cache),
|
||||
eth1ProposalData.deposits,
|
||||
BeaconBlockExits(),
|
||||
sync_aggregate,
|
||||
|
@ -292,8 +292,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
# Careful, state no longer valid after here because of the await..
|
||||
newBlock.signature = withTimerRet(timers[tSignBlock]):
|
||||
get_block_signature(
|
||||
getStateField(stateData.data, fork),
|
||||
getStateField(stateData.data, genesis_validators_root),
|
||||
getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
newBlock.message.slot,
|
||||
blockRoot, privKey).toValidatorSig()
|
||||
|
||||
|
@ -305,7 +305,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
|
||||
let
|
||||
newBlock = getNewBlock[phase0.SignedBeaconBlock](stateData, slot, cache)
|
||||
newBlock = getNewBlock[phase0.SignedBeaconBlock](state, slot, cache)
|
||||
added = dag.addHeadBlock(verifier, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -313,7 +313,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
attPool.addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
|
||||
|
||||
blck() = added[]
|
||||
dag.updateHead(added[], quarantine[])
|
||||
if dag.needStateCachesAndForkChoicePruning():
|
||||
dag.pruneStateCachesDAG()
|
||||
|
@ -327,7 +326,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
|
||||
let
|
||||
newBlock = getNewBlock[altair.SignedBeaconBlock](stateData, slot, cache)
|
||||
newBlock = getNewBlock[altair.SignedBeaconBlock](state, slot, cache)
|
||||
added = dag.addHeadBlock(verifier, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -335,7 +334,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
attPool.addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
|
||||
|
||||
blck() = added[]
|
||||
dag.updateHead(added[], quarantine[])
|
||||
if dag.needStateCachesAndForkChoicePruning():
|
||||
dag.pruneStateCachesDAG()
|
||||
|
@ -349,7 +347,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
|
||||
let
|
||||
newBlock = getNewBlock[bellatrix.SignedBeaconBlock](stateData, slot, cache)
|
||||
newBlock = getNewBlock[bellatrix.SignedBeaconBlock](state, slot, cache)
|
||||
added = dag.addHeadBlock(verifier, newBlock) do (
|
||||
blckRef: BlockRef, signedBlock: bellatrix.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -357,7 +355,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
attPool.addForkChoice(
|
||||
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
|
||||
|
||||
blck() = added[]
|
||||
dag.updateHead(added[], quarantine[])
|
||||
if dag.needStateCachesAndForkChoicePruning():
|
||||
dag.pruneStateCachesDAG()
|
||||
|
@ -420,7 +417,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
# TODO if attestation pool was smarter, it would include older attestations
|
||||
# too!
|
||||
verifyConsensus(dag.headState.data, attesterRatio * blockRatio)
|
||||
verifyConsensus(dag.headState, attesterRatio * blockRatio)
|
||||
|
||||
if t == tEpoch:
|
||||
echo &". slot: {shortLog(slot)} ",
|
||||
|
@ -432,9 +429,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
if replay:
|
||||
withTimer(timers[tReplay]):
|
||||
var cache = StateCache()
|
||||
doAssert dag.updateStateData(
|
||||
doAssert dag.updateState(
|
||||
replayState[], dag.head.atSlot(Slot(slots)), false, cache)
|
||||
|
||||
echo "Done!"
|
||||
|
||||
printTimers(dag.headState.data, attesters, true, timers)
|
||||
printTimers(dag.headState, attesters, true, timers)
|
||||
|
|
|
@ -66,7 +66,7 @@ cli do(validatorsDir: string, secretsDir: string,
|
|||
warn "Unkownn validator", pubkey
|
||||
|
||||
var
|
||||
blockRoot = withState(state[]): state.latest_block_root()
|
||||
blockRoot = withState(state[]): state.latest_block_root
|
||||
cache: StateCache
|
||||
info: ForkedEpochInfo
|
||||
aggregates: seq[Attestation]
|
||||
|
|
|
@ -65,7 +65,7 @@ proc block_for_next_slot(
|
|||
|
||||
let attestations =
|
||||
if withAttestations:
|
||||
let block_root = withState(forked): state.latest_block_root()
|
||||
let block_root = withState(forked): state.latest_block_root
|
||||
makeFullAttestations(forked, block_root, state.slot, cache)
|
||||
else:
|
||||
@[]
|
||||
|
|
|
@ -162,12 +162,12 @@ proc stepOnBlock(
|
|||
dag: ChainDagRef,
|
||||
fkChoice: ref ForkChoice,
|
||||
verifier: var BatchVerifier,
|
||||
state: var StateData,
|
||||
state: var ForkedHashedBeaconState,
|
||||
stateCache: var StateCache,
|
||||
signedBlock: ForkySignedBeaconBlock,
|
||||
time: BeaconTime): Result[BlockRef, BlockError] =
|
||||
# 1. Move state to proper slot.
|
||||
doAssert dag.updateStateData(
|
||||
doAssert dag.updateState(
|
||||
state,
|
||||
dag.head.atSlot(time.slotOrZero),
|
||||
save = false,
|
||||
|
|
|
@ -73,15 +73,16 @@ suite "Attestation pool processing" & preset():
|
|||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
check:
|
||||
process_slots(
|
||||
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info,
|
||||
dag.cfg, state[], getStateField(state[], slot) + 1, cache, info,
|
||||
{}).isOk()
|
||||
|
||||
test "Can add and retrieve simple attestations" & preset():
|
||||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
attestation = makeAttestation(state[].data, state.blck.root, bc0[0], cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
attestation = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[0], cache)
|
||||
|
||||
pool[].addAttestation(
|
||||
attestation, @[bc0[0]], attestation.loadSig,
|
||||
|
@ -104,11 +105,11 @@ suite "Attestation pool processing" & preset():
|
|||
none(Slot), some(CommitteeIndex(attestation.data.index + 1)))) == []
|
||||
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
defaultRuntimeConfig, state[],
|
||||
getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
info, {}).isOk()
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -116,40 +117,40 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
let
|
||||
root1 = addTestBlock(
|
||||
state.data, cache, attestations = attestations,
|
||||
state[], cache, attestations = attestations,
|
||||
nextSlot = false).phase0Data.root
|
||||
bc1 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
att1 = makeAttestation(state[].data, root1, bc1[0], cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
att1 = makeAttestation(state[], root1, bc1[0], cache)
|
||||
|
||||
check:
|
||||
withState(state.data): state.latest_block_root == root1
|
||||
withState(state[]): state.latest_block_root == root1
|
||||
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
defaultRuntimeConfig, state[],
|
||||
getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
info, {}).isOk()
|
||||
|
||||
withState(state.data): state.latest_block_root == root1
|
||||
withState(state[]): state.latest_block_root == root1
|
||||
|
||||
check:
|
||||
# shouldn't include already-included attestations
|
||||
pool[].getAttestationsForBlock(state.data, cache) == []
|
||||
pool[].getAttestationsForBlock(state[], cache) == []
|
||||
|
||||
pool[].addAttestation(
|
||||
att1, @[bc1[0]], att1.loadSig, att1.data.slot.start_beacon_time)
|
||||
|
||||
check:
|
||||
# but new ones should go in
|
||||
pool[].getAttestationsForBlock(state.data, cache).len() == 1
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 1
|
||||
|
||||
let
|
||||
att2 = makeAttestation(state[].data, root1, bc1[1], cache)
|
||||
att2 = makeAttestation(state[], root1, bc1[1], cache)
|
||||
pool[].addAttestation(
|
||||
att2, @[bc1[1]], att2.loadSig, att2.data.slot.start_beacon_time)
|
||||
|
||||
let
|
||||
combined = pool[].getAttestationsForBlock(state.data, cache)
|
||||
combined = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
# New attestations should be combined with old attestations
|
||||
|
@ -162,18 +163,18 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
# readding the combined attestation shouldn't have an effect
|
||||
pool[].getAttestationsForBlock(state.data, cache).len() == 1
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 1
|
||||
|
||||
let
|
||||
# Someone votes for a different root
|
||||
att3 = makeAttestation(state[].data, Eth2Digest(), bc1[2], cache)
|
||||
att3 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
|
||||
pool[].addAttestation(
|
||||
att3, @[bc1[2]], att3.loadSig, att3.data.slot.start_beacon_time)
|
||||
|
||||
check:
|
||||
# We should now get both attestations for the block, but the aggregate
|
||||
# should be the one with the most votes
|
||||
pool[].getAttestationsForBlock(state.data, cache).len() == 2
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 2
|
||||
pool[].getAggregatedAttestation(2.Slot, 0.CommitteeIndex).
|
||||
get().aggregation_bits.countOnes() == 2
|
||||
pool[].getAggregatedAttestation(2.Slot, hash_tree_root(att2.data)).
|
||||
|
@ -181,7 +182,7 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
let
|
||||
# Someone votes for a different root
|
||||
att4 = makeAttestation(state[].data, Eth2Digest(), bc1[2], cache)
|
||||
att4 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
|
||||
pool[].addAttestation(
|
||||
att4, @[bc1[2]], att3.loadSig, att3.data.slot.start_beacon_time)
|
||||
|
||||
|
@ -189,14 +190,18 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
|
||||
var
|
||||
att0 = makeAttestation(state[].data, state.blck.root, bc0[0], cache)
|
||||
att0 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[0], cache)
|
||||
att0x = att0
|
||||
att1 = makeAttestation(state[].data, state.blck.root, bc0[1], cache)
|
||||
att2 = makeAttestation(state[].data, state.blck.root, bc0[2], cache)
|
||||
att3 = makeAttestation(state[].data, state.blck.root, bc0[3], cache)
|
||||
att1 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[1], cache)
|
||||
att2 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[2], cache)
|
||||
att3 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[3], cache)
|
||||
|
||||
# Both attestations include member 2 but neither is a subset of the other
|
||||
att0.combine(att2)
|
||||
|
@ -212,13 +217,13 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
defaultRuntimeConfig, state[],
|
||||
getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
info, {}).isOk()
|
||||
|
||||
check:
|
||||
pool[].covers(att0.data, att0.aggregation_bits)
|
||||
pool[].getAttestationsForBlock(state.data, cache).len() == 2
|
||||
pool[].getAttestationsForBlock(state[], cache).len() == 2
|
||||
# Can get either aggregate here, random!
|
||||
pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome()
|
||||
|
||||
|
@ -227,7 +232,7 @@ suite "Attestation pool processing" & preset():
|
|||
att3, @[bc0[3]], att3.loadSig, att3.data.slot.start_beacon_time)
|
||||
|
||||
block:
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
check:
|
||||
attestations.len() == 2
|
||||
attestations[0].aggregation_bits.countOnes() == 3
|
||||
|
@ -240,7 +245,7 @@ suite "Attestation pool processing" & preset():
|
|||
att0x, @[bc0[0]], att0x.loadSig, att0x.data.slot.start_beacon_time)
|
||||
|
||||
block:
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
check:
|
||||
attestations.len() == 1
|
||||
attestations[0].aggregation_bits.countOnes() == 4
|
||||
|
@ -253,46 +258,48 @@ suite "Attestation pool processing" & preset():
|
|||
root.data[0..<8] = toBytesBE(i.uint64)
|
||||
let
|
||||
bc0 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
|
||||
for j in 0..<bc0.len():
|
||||
root.data[8..<16] = toBytesBE(j.uint64)
|
||||
let att = makeAttestation(state[].data, root, bc0[j], cache)
|
||||
let att = makeAttestation(state[], root, bc0[j], cache)
|
||||
pool[].addAttestation(
|
||||
att, @[bc0[j]], att.loadSig, att.data.slot.start_beacon_time)
|
||||
inc attestations
|
||||
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
getStateField(state.data, slot) + 1, cache, info, {}).isOk()
|
||||
defaultRuntimeConfig, state[],
|
||||
getStateField(state[], slot) + 1, cache, info, {}).isOk()
|
||||
|
||||
doAssert attestations.uint64 > MAX_ATTESTATIONS,
|
||||
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
|
||||
check:
|
||||
# Fill block with attestations
|
||||
pool[].getAttestationsForBlock(state.data, cache).lenu64() ==
|
||||
pool[].getAttestationsForBlock(state[], cache).lenu64() ==
|
||||
MAX_ATTESTATIONS
|
||||
pool[].getAggregatedAttestation(
|
||||
getStateField(state.data, slot) - 1, 0.CommitteeIndex).isSome()
|
||||
getStateField(state[], slot) - 1, 0.CommitteeIndex).isSome()
|
||||
|
||||
test "Attestations may arrive in any order" & preset():
|
||||
var cache = StateCache()
|
||||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(state[].data, state.blck.root, bc0[0], cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[0], cache)
|
||||
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
||||
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1,
|
||||
cache, info, {}).isOk()
|
||||
|
||||
let
|
||||
bc1 = get_beacon_committee(state[].data,
|
||||
getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
attestation1 = makeAttestation(state[].data, state.blck.root, bc1[0], cache)
|
||||
bc1 = get_beacon_committee(state[],
|
||||
getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
attestation1 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc1[0], cache)
|
||||
|
||||
# test reverse order
|
||||
pool[].addAttestation(
|
||||
|
@ -302,7 +309,7 @@ suite "Attestation pool processing" & preset():
|
|||
attestation0, @[bc0[0]], attestation0.loadSig,
|
||||
attestation0.data.slot.start_beacon_time)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -312,11 +319,11 @@ suite "Attestation pool processing" & preset():
|
|||
let
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
attestation0 =
|
||||
makeAttestation(state[].data, state.blck.root, bc0[0], cache)
|
||||
makeAttestation(state[], state[].latest_block_root, bc0[0], cache)
|
||||
attestation1 =
|
||||
makeAttestation(state[].data, state.blck.root, bc0[1], cache)
|
||||
makeAttestation(state[], state[].latest_block_root, bc0[1], cache)
|
||||
|
||||
pool[].addAttestation(
|
||||
attestation0, @[bc0[0]], attestation0.loadSig,
|
||||
|
@ -327,10 +334,10 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
defaultRuntimeConfig, state[],
|
||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk()
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -341,11 +348,11 @@ suite "Attestation pool processing" & preset():
|
|||
var
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 =
|
||||
makeAttestation(state[].data, state.blck.root, bc0[0], cache)
|
||||
attestation1 =
|
||||
makeAttestation(state[].data, state.blck.root, bc0[1], cache)
|
||||
state[], getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[1], cache)
|
||||
|
||||
attestation0.combine(attestation1)
|
||||
|
||||
|
@ -358,10 +365,10 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
defaultRuntimeConfig, state[],
|
||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk()
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -370,12 +377,12 @@ suite "Attestation pool processing" & preset():
|
|||
var cache = StateCache()
|
||||
var
|
||||
# Create an attestation for slot 1!
|
||||
bc0 = get_beacon_committee(state[].data,
|
||||
getStateField(state.data, slot), 0.CommitteeIndex, cache)
|
||||
attestation0 =
|
||||
makeAttestation(state[].data, state.blck.root, bc0[0], cache)
|
||||
attestation1 =
|
||||
makeAttestation(state[].data, state.blck.root, bc0[1], cache)
|
||||
bc0 = get_beacon_committee(state[],
|
||||
getStateField(state[], slot), 0.CommitteeIndex, cache)
|
||||
attestation0 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[0], cache)
|
||||
attestation1 = makeAttestation(
|
||||
state[], state[].latest_block_root, bc0[1], cache)
|
||||
|
||||
attestation0.combine(attestation1)
|
||||
|
||||
|
@ -388,10 +395,10 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data,
|
||||
defaultRuntimeConfig, state[],
|
||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {}).isOk()
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||
let attestations = pool[].getAttestationsForBlock(state[], cache)
|
||||
|
||||
check:
|
||||
attestations.len == 1
|
||||
|
@ -399,7 +406,7 @@ suite "Attestation pool processing" & preset():
|
|||
test "Fork choice returns latest block with no attestations":
|
||||
var cache = StateCache()
|
||||
let
|
||||
b1 = addTestBlock(state.data, cache).phase0Data
|
||||
b1 = addTestBlock(state[], cache).phase0Data
|
||||
b1Add = dag.addHeadBlock(verifier, b1) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -412,7 +419,7 @@ suite "Attestation pool processing" & preset():
|
|||
head == b1Add[]
|
||||
|
||||
let
|
||||
b2 = addTestBlock(state.data, cache).phase0Data
|
||||
b2 = addTestBlock(state[], cache).phase0Data
|
||||
b2Add = dag.addHeadBlock(verifier, b2) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -428,7 +435,7 @@ suite "Attestation pool processing" & preset():
|
|||
test "Fork choice returns block with attestation":
|
||||
var cache = StateCache()
|
||||
let
|
||||
b10 = makeTestBlock(state.data, cache).phase0Data
|
||||
b10 = makeTestBlock(state[], cache).phase0Data
|
||||
b10Add = dag.addHeadBlock(verifier, b10) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -442,7 +449,7 @@ suite "Attestation pool processing" & preset():
|
|||
head == b10Add[]
|
||||
|
||||
let
|
||||
b11 = makeTestBlock(state.data, cache,
|
||||
b11 = makeTestBlock(state[], cache,
|
||||
graffiti = GraffitiBytes [1'u8, 0, 0, 0 ,0 ,0 ,0 ,0 ,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
||||
).phase0Data
|
||||
b11Add = dag.addHeadBlock(verifier, b11) do (
|
||||
|
@ -453,9 +460,9 @@ suite "Attestation pool processing" & preset():
|
|||
epochRef, blckRef, signedBlock.message, blckRef.slot.start_beacon_time)
|
||||
|
||||
bc1 = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot) - 1, 1.CommitteeIndex,
|
||||
state[], getStateField(state[], slot) - 1, 1.CommitteeIndex,
|
||||
cache)
|
||||
attestation0 = makeAttestation(state[].data, b10.root, bc1[0], cache)
|
||||
attestation0 = makeAttestation(state[], b10.root, bc1[0], cache)
|
||||
|
||||
pool[].addAttestation(
|
||||
attestation0, @[bc1[0]], attestation0.loadSig,
|
||||
|
@ -468,8 +475,8 @@ suite "Attestation pool processing" & preset():
|
|||
head2 == b10Add[]
|
||||
|
||||
let
|
||||
attestation1 = makeAttestation(state[].data, b11.root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state[].data, b11.root, bc1[2], cache)
|
||||
attestation1 = makeAttestation(state[], b11.root, bc1[1], cache)
|
||||
attestation2 = makeAttestation(state[], b11.root, bc1[2], cache)
|
||||
pool[].addAttestation(
|
||||
attestation1, @[bc1[1]], attestation1.loadSig,
|
||||
attestation1.data.slot.start_beacon_time)
|
||||
|
@ -494,7 +501,7 @@ suite "Attestation pool processing" & preset():
|
|||
test "Trying to add a block twice tags the second as an error":
|
||||
var cache = StateCache()
|
||||
let
|
||||
b10 = makeTestBlock(state.data, cache).phase0Data
|
||||
b10 = makeTestBlock(state[], cache).phase0Data
|
||||
b10Add = dag.addHeadBlock(verifier, b10) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -525,7 +532,7 @@ suite "Attestation pool processing" & preset():
|
|||
dag.updateFlags.incl {skipBLSValidation}
|
||||
var cache = StateCache()
|
||||
let
|
||||
b10 = addTestBlock(state.data, cache).phase0Data
|
||||
b10 = addTestBlock(state[], cache).phase0Data
|
||||
b10Add = dag.addHeadBlock(verifier, b10) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -547,10 +554,10 @@ suite "Attestation pool processing" & preset():
|
|||
for epoch in 0 ..< 5:
|
||||
let start_slot = start_slot(Epoch epoch)
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state[].data, Epoch epoch, cache)
|
||||
get_committee_count_per_slot(state[], Epoch epoch, cache)
|
||||
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
||||
let new_block = addTestBlock(
|
||||
state.data, cache, attestations = attestations).phase0Data
|
||||
state[], cache, attestations = attestations).phase0Data
|
||||
|
||||
let blockRef = dag.addHeadBlock(verifier, new_block) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
|
@ -567,7 +574,7 @@ suite "Attestation pool processing" & preset():
|
|||
attestations.setlen(0)
|
||||
for committee_index in get_committee_indices(committees_per_slot):
|
||||
let committee = get_beacon_committee(
|
||||
state[].data, getStateField(state.data, slot), committee_index,
|
||||
state[], getStateField(state[], slot), committee_index,
|
||||
cache)
|
||||
|
||||
# Create a bitfield filled with the given count per attestation,
|
||||
|
@ -578,8 +585,7 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
attestations.add Attestation(
|
||||
aggregation_bits: aggregation_bits,
|
||||
data: makeAttestationData(
|
||||
state[].data, getStateField(state.data, slot),
|
||||
data: makeAttestationData(state[], getStateField(state[], slot),
|
||||
committee_index, blockRef.get().root)
|
||||
# signature: ValidatorSig()
|
||||
)
|
||||
|
|
|
@ -68,7 +68,7 @@ proc getTestStates(stateFork: BeaconStateFork): auto =
|
|||
db = makeTestDB(SLOTS_PER_EPOCH)
|
||||
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
||||
var testStates = getTestStates(dag.headState.data, stateFork)
|
||||
var testStates = getTestStates(dag.headState, stateFork)
|
||||
|
||||
# Ensure transitions beyond just adding validators and increasing slots
|
||||
sort(testStates) do (x, y: ref ForkedHashedBeaconState) -> int:
|
||||
|
@ -338,7 +338,7 @@ suite "Beacon chain DB" & preset():
|
|||
let restoreAddr = addr dag.headState
|
||||
|
||||
func restore() =
|
||||
assign(state[], restoreAddr[].data)
|
||||
assign(state[], restoreAddr[])
|
||||
|
||||
check:
|
||||
state[].phase0Data.data.slot == 10.Slot
|
||||
|
@ -361,7 +361,7 @@ suite "Beacon chain DB" & preset():
|
|||
let restoreAddr = addr dag.headState
|
||||
|
||||
func restore() =
|
||||
assign(state[], restoreAddr[].data)
|
||||
assign(state[], restoreAddr[])
|
||||
|
||||
check:
|
||||
state[].altairData.data.slot == 10.Slot
|
||||
|
@ -387,7 +387,7 @@ suite "Beacon chain DB" & preset():
|
|||
let restoreAddr = addr dag.headState
|
||||
|
||||
func restore() =
|
||||
assign(state[], restoreAddr[].data)
|
||||
assign(state[], restoreAddr[])
|
||||
|
||||
check:
|
||||
state[].bellatrixData.data.slot == 10.Slot
|
||||
|
|
|
@ -34,7 +34,7 @@ suite "Block processor" & preset():
|
|||
quarantine = newClone(Quarantine.init())
|
||||
attestationPool = newClone(AttestationPool.init(dag, quarantine))
|
||||
consensusManager = ConsensusManager.new(dag, attestationPool, quarantine)
|
||||
state = newClone(dag.headState.data)
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
b1 = addTestBlock(state[], cache).phase0Data
|
||||
b2 = addTestBlock(state[], cache).phase0Data
|
||||
|
@ -92,7 +92,7 @@ suite "Block processor" & preset():
|
|||
check:
|
||||
# ensure we loaded the correct head state
|
||||
dag2.head.root == b2.root
|
||||
getStateRoot(dag2.headState.data) == b2.message.state_root
|
||||
getStateRoot(dag2.headState) == b2.message.state_root
|
||||
dag2.getBlockRef(b1.root).isSome()
|
||||
dag2.getBlockRef(b2.root).isSome()
|
||||
dag2.heads.len == 1
|
||||
|
|
|
@ -63,7 +63,7 @@ suite "Block pool processing" & preset():
|
|||
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
||||
verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new())
|
||||
quarantine = Quarantine.init()
|
||||
state = newClone(dag.headState.data)
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
||||
|
@ -97,7 +97,7 @@ suite "Block pool processing" & preset():
|
|||
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
||||
b2Get = dag.getForkedBlock(b2.root)
|
||||
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
|
||||
validators = getStateField(dag.headState.data, validators).lenu64()
|
||||
validators = getStateField(dag.headState, validators).lenu64()
|
||||
|
||||
check:
|
||||
b2Get.isSome()
|
||||
|
@ -185,8 +185,8 @@ suite "Block pool processing" & preset():
|
|||
db.getStateRoot(stateCheckpoint.blck.root, stateCheckpoint.slot).isErr()
|
||||
# this is required for the test to work - it's not a "public"
|
||||
# post-condition of getEpochRef
|
||||
getStateField(dag.epochRefState.data, slot) == nextEpochSlot
|
||||
assign(state[], dag.epochRefState.data)
|
||||
getStateField(dag.epochRefState, slot) == nextEpochSlot
|
||||
assign(state[], dag.epochRefState)
|
||||
|
||||
let
|
||||
bnext = addTestBlock(state[], cache).phase0Data
|
||||
|
@ -214,9 +214,9 @@ suite "Block pool processing" & preset():
|
|||
|
||||
check:
|
||||
dag.head == b1Add[]
|
||||
getStateField(dag.headState.data, slot) == b1Add[].slot
|
||||
getStateField(dag.headState, slot) == b1Add[].slot
|
||||
|
||||
test "updateStateData sanity" & preset():
|
||||
test "updateState sanity" & preset():
|
||||
let
|
||||
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
||||
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
||||
|
@ -229,39 +229,39 @@ suite "Block pool processing" & preset():
|
|||
# move to specific block
|
||||
var cache = StateCache()
|
||||
check:
|
||||
dag.updateStateData(tmpState[], bs1, false, cache)
|
||||
tmpState.blck == b1Add[]
|
||||
getStateField(tmpState.data, slot) == bs1.slot
|
||||
dag.updateState(tmpState[], bs1, false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].root
|
||||
getStateField(tmpState[], slot) == bs1.slot
|
||||
|
||||
# Skip slots
|
||||
check:
|
||||
dag.updateStateData(tmpState[], bs1_3, false, cache) # skip slots
|
||||
tmpState.blck == b1Add[]
|
||||
getStateField(tmpState.data, slot) == bs1_3.slot
|
||||
dag.updateState(tmpState[], bs1_3, false, cache) # skip slots
|
||||
tmpState[].latest_block_root == b1Add[].root
|
||||
getStateField(tmpState[], slot) == bs1_3.slot
|
||||
|
||||
# Move back slots, but not blocks
|
||||
check:
|
||||
dag.updateStateData(tmpState[], bs1_3.parent(), false, cache)
|
||||
tmpState.blck == b1Add[]
|
||||
getStateField(tmpState.data, slot) == bs1_3.parent().slot
|
||||
dag.updateState(tmpState[], bs1_3.parent(), false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].root
|
||||
getStateField(tmpState[], slot) == bs1_3.parent().slot
|
||||
|
||||
# Move to different block and slot
|
||||
check:
|
||||
dag.updateStateData(tmpState[], bs2_3, false, cache)
|
||||
tmpState.blck == b2Add[]
|
||||
getStateField(tmpState.data, slot) == bs2_3.slot
|
||||
dag.updateState(tmpState[], bs2_3, false, cache)
|
||||
tmpState[].latest_block_root == b2Add[].root
|
||||
getStateField(tmpState[], slot) == bs2_3.slot
|
||||
|
||||
# Move back slot and block
|
||||
check:
|
||||
dag.updateStateData(tmpState[], bs1, false, cache)
|
||||
tmpState.blck == b1Add[]
|
||||
getStateField(tmpState.data, slot) == bs1.slot
|
||||
dag.updateState(tmpState[], bs1, false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].root
|
||||
getStateField(tmpState[], slot) == bs1.slot
|
||||
|
||||
# Move back to genesis
|
||||
check:
|
||||
dag.updateStateData(tmpState[], bs1.parent(), false, cache)
|
||||
tmpState.blck == b1Add[].parent
|
||||
getStateField(tmpState.data, slot) == bs1.parent.slot
|
||||
dag.updateState(tmpState[], bs1.parent(), false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].parent.root
|
||||
getStateField(tmpState[], slot) == bs1.parent.slot
|
||||
|
||||
when declared(GC_fullCollect): # i386 test machines seem to run low..
|
||||
GC_fullCollect()
|
||||
|
@ -278,7 +278,7 @@ suite "Block pool altair processing" & preset():
|
|||
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
|
||||
verifier = BatchVerifier(rng: keys.newRng(), taskpool: Taskpool.new())
|
||||
quarantine = Quarantine.init()
|
||||
state = newClone(dag.headState.data)
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
|
||||
|
@ -359,8 +359,8 @@ suite "chain DAG finalization tests" & preset():
|
|||
test "prune heads on finalization" & preset():
|
||||
# Create a fork that will not be taken
|
||||
var
|
||||
blck = makeTestBlock(dag.headState.data, cache).phase0Data
|
||||
tmpState = assignClone(dag.headState.data)
|
||||
blck = makeTestBlock(dag.headState, cache).phase0Data
|
||||
tmpState = assignClone(dag.headState)
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, tmpState[],
|
||||
|
@ -372,7 +372,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
let status = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
||||
check: status.isOk()
|
||||
|
||||
assign(tmpState[], dag.headState.data)
|
||||
assign(tmpState[], dag.headState)
|
||||
|
||||
# skip slots so we can test gappy getBlockAtSlot
|
||||
check process_slots(
|
||||
|
@ -410,7 +410,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
dag.containsForkBlock(dag.finalizedHead.blck.root)
|
||||
|
||||
check:
|
||||
dag.db.immutableValidators.len() == getStateField(dag.headState.data, validators).len()
|
||||
dag.db.immutableValidators.len() == getStateField(dag.headState, validators).len()
|
||||
|
||||
let
|
||||
finalER = dag.getEpochRef(
|
||||
|
@ -428,11 +428,11 @@ suite "chain DAG finalization tests" & preset():
|
|||
block:
|
||||
let tmpStateData = assignClone(dag.headState)
|
||||
|
||||
# Check that cached data is available after updateStateData - since we
|
||||
# Check that cached data is available after updateState - since we
|
||||
# just processed the head the relevant epochrefs should not have been
|
||||
# evicted yet
|
||||
cache = StateCache()
|
||||
check: updateStateData(
|
||||
check: updateState(
|
||||
dag, tmpStateData[], dag.head.atSlot(dag.head.slot), false, cache)
|
||||
|
||||
check:
|
||||
|
@ -467,15 +467,15 @@ suite "chain DAG finalization tests" & preset():
|
|||
dag2.head.root == dag.head.root
|
||||
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
|
||||
dag2.finalizedHead.slot == dag.finalizedHead.slot
|
||||
getStateRoot(dag2.headState.data) == getStateRoot(dag.headState.data)
|
||||
getStateRoot(dag2.headState) == getStateRoot(dag.headState)
|
||||
|
||||
test "orphaned epoch block" & preset():
|
||||
let prestate = (ref ForkedHashedBeaconState)(kind: BeaconStateFork.Phase0)
|
||||
for i in 0 ..< SLOTS_PER_EPOCH:
|
||||
if i == SLOTS_PER_EPOCH - 1:
|
||||
assign(prestate[], dag.headState.data)
|
||||
assign(prestate[], dag.headState)
|
||||
|
||||
let blck = makeTestBlock(dag.headState.data, cache).phase0Data
|
||||
let blck = makeTestBlock(dag.headState, cache).phase0Data
|
||||
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine)
|
||||
|
@ -508,7 +508,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
|
||||
test "init with gaps" & preset():
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState.data, cache, int(SLOTS_PER_EPOCH * 6 - 2),
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2),
|
||||
true):
|
||||
let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
||||
check: added.isOk()
|
||||
|
@ -518,13 +518,13 @@ suite "chain DAG finalization tests" & preset():
|
|||
# Advance past epoch so that the epoch transition is gapped
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2),
|
||||
defaultRuntimeConfig, dag.headState, Slot(SLOTS_PER_EPOCH * 6 + 2),
|
||||
cache, info, {}).isOk()
|
||||
|
||||
let blck = makeTestBlock(
|
||||
dag.headState.data, cache,
|
||||
dag.headState, cache,
|
||||
attestations = makeFullAttestations(
|
||||
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot),
|
||||
dag.headState, dag.head.root, getStateField(dag.headState, slot),
|
||||
cache, {})).phase0Data
|
||||
|
||||
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
||||
|
@ -540,11 +540,11 @@ suite "chain DAG finalization tests" & preset():
|
|||
while cur.slot >= dag.finalizedHead.slot:
|
||||
assign(tmpStateData[], dag.headState)
|
||||
check:
|
||||
dag.updateStateData(tmpStateData[], cur.atSlot(cur.slot), false, cache)
|
||||
dag.updateState(tmpStateData[], cur.atSlot(cur.slot), false, cache)
|
||||
dag.getForkedBlock(cur.bid).get().phase0Data.message.state_root ==
|
||||
getStateRoot(tmpStateData[].data)
|
||||
getStateRoot(tmpStateData[].data) == hash_tree_root(
|
||||
tmpStateData[].data.phase0Data.data)
|
||||
getStateRoot(tmpStateData[])
|
||||
getStateRoot(tmpStateData[]) == hash_tree_root(
|
||||
tmpStateData[].phase0Data.data)
|
||||
cur = cur.parent
|
||||
|
||||
let
|
||||
|
@ -557,7 +557,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
dag2.head.root == dag.head.root
|
||||
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
|
||||
dag2.finalizedHead.slot == dag.finalizedHead.slot
|
||||
getStateRoot(dag2.headState.data) == getStateRoot(dag.headState.data)
|
||||
getStateRoot(dag2.headState) == getStateRoot(dag.headState)
|
||||
|
||||
suite "Old database versions" & preset():
|
||||
setup:
|
||||
|
@ -580,7 +580,7 @@ suite "Old database versions" & preset():
|
|||
|
||||
# preInit a database to a v1.0.12 state
|
||||
db.putStateRoot(
|
||||
genState[].latest_block_root(), genState[].data.slot, genState[].root)
|
||||
genState[].latest_block_root, genState[].data.slot, genState[].root)
|
||||
db.putStateV0(genState[].root, genState[].data)
|
||||
|
||||
db.putBlockV0(genBlock)
|
||||
|
@ -591,7 +591,7 @@ suite "Old database versions" & preset():
|
|||
var
|
||||
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||
dag = init(ChainDAGRef, defaultRuntimeConfig, db,validatorMonitor, {})
|
||||
state = newClone(dag.headState.data)
|
||||
state = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
||||
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
|
||||
|
@ -617,7 +617,7 @@ suite "Diverging hardforks":
|
|||
quarantine = newClone(Quarantine.init())
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
tmpState = assignClone(dag.headState.data)
|
||||
tmpState = assignClone(dag.headState)
|
||||
|
||||
test "Tail block only in common":
|
||||
check:
|
||||
|
|
|
@ -34,7 +34,7 @@ suite "Exit pool testing suite":
|
|||
|
||||
pool[].addMessage(msg)
|
||||
check: pool[].isSeen(msg)
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
check:
|
||||
pool[].getBeaconBlockExits(state.data).proposer_slashings.lenu64 ==
|
||||
min(i + 1, MAX_PROPOSER_SLASHINGS)
|
||||
|
@ -54,7 +54,7 @@ suite "Exit pool testing suite":
|
|||
|
||||
pool[].addMessage(msg)
|
||||
check: pool[].isSeen(msg)
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
check:
|
||||
pool[].getBeaconBlockExits(state.data).attester_slashings.lenu64 ==
|
||||
min(i + 1, MAX_ATTESTER_SLASHINGS)
|
||||
|
@ -70,7 +70,7 @@ suite "Exit pool testing suite":
|
|||
|
||||
pool[].addMessage(msg)
|
||||
check: pool[].isSeen(msg)
|
||||
withState(dag.headState.data):
|
||||
withState(dag.headState):
|
||||
check:
|
||||
pool[].getBeaconBlockExits(state.data).voluntary_exits.lenu64 ==
|
||||
min(i + 1, MAX_VOLUNTARY_EXITS)
|
||||
|
|
|
@ -51,17 +51,17 @@ suite "Gossip validation " & preset():
|
|||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
check:
|
||||
process_slots(
|
||||
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
||||
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1,
|
||||
cache, info, {}).isOk()
|
||||
|
||||
test "Empty committee when no committee for slot":
|
||||
template committee(idx: uint64): untyped =
|
||||
get_beacon_committee(
|
||||
dag.headState.data, dag.head.slot, idx.CommitteeIndex, cache)
|
||||
dag.headState, dag.head.slot, idx.CommitteeIndex, cache)
|
||||
|
||||
template committeeLen(idx: uint64): untyped =
|
||||
get_beacon_committee_len(
|
||||
dag.headState.data, dag.head.slot, idx.CommitteeIndex, cache)
|
||||
dag.headState, dag.head.slot, idx.CommitteeIndex, cache)
|
||||
|
||||
check:
|
||||
committee(0).len > 0
|
||||
|
@ -75,7 +75,7 @@ suite "Gossip validation " & preset():
|
|||
var
|
||||
cache: StateCache
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState.data, cache, int(SLOTS_PER_EPOCH * 5), false):
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH * 5), false):
|
||||
let added = dag.addHeadBlock(verifier, blck.phase0Data) do (
|
||||
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
|
||||
epochRef: EpochRef):
|
||||
|
@ -90,15 +90,15 @@ suite "Gossip validation " & preset():
|
|||
var
|
||||
# Create attestations for slot 1
|
||||
beacon_committee = get_beacon_committee(
|
||||
dag.headState.data, dag.head.slot, 0.CommitteeIndex, cache)
|
||||
dag.headState, dag.head.slot, 0.CommitteeIndex, cache)
|
||||
att_1_0 = makeAttestation(
|
||||
dag.headState.data, dag.head.root, beacon_committee[0], cache)
|
||||
dag.headState, dag.head.root, beacon_committee[0], cache)
|
||||
att_1_1 = makeAttestation(
|
||||
dag.headState.data, dag.head.root, beacon_committee[1], cache)
|
||||
dag.headState, dag.head.root, beacon_committee[1], cache)
|
||||
|
||||
committees_per_slot =
|
||||
get_committee_count_per_slot(dag.headState.data,
|
||||
att_1_0.data.slot.epoch, cache)
|
||||
get_committee_count_per_slot(
|
||||
dag.headState, att_1_0.data.slot.epoch, cache)
|
||||
|
||||
subnet = compute_subnet_for_attestation(
|
||||
committees_per_slot,
|
||||
|
@ -194,7 +194,7 @@ suite "Gossip validation - Extra": # Not based on preset config
|
|||
cfg, makeTestDB(num_validators), validatorMonitor, {})
|
||||
var cache = StateCache()
|
||||
for blck in makeTestBlocks(
|
||||
dag.headState.data, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg):
|
||||
dag.headState, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg):
|
||||
let added =
|
||||
case blck.kind
|
||||
of BeaconBlockFork.Phase0:
|
||||
|
@ -209,7 +209,7 @@ suite "Gossip validation - Extra": # Not based on preset config
|
|||
check: added.isOk()
|
||||
dag.updateHead(added[], quarantine[])
|
||||
dag
|
||||
state = assignClone(dag.headState.data.altairData)
|
||||
state = assignClone(dag.headState.altairData)
|
||||
slot = state[].data.slot
|
||||
|
||||
subcommitteeIdx = 0.SyncSubcommitteeIndex
|
||||
|
|
|
@ -38,7 +38,7 @@ suite "Light client" & preset():
|
|||
var cache: StateCache
|
||||
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
|
||||
while true:
|
||||
var slot = getStateField(dag.headState.data, slot)
|
||||
var slot = getStateField(dag.headState, slot)
|
||||
doAssert targetSlot >= slot
|
||||
if targetSlot == slot: break
|
||||
|
||||
|
@ -51,13 +51,13 @@ suite "Light client" & preset():
|
|||
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
|
||||
if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot:
|
||||
var info: ForkedEpochInfo
|
||||
doAssert process_slots(cfg, dag.headState.data, checkpointSlot,
|
||||
doAssert process_slots(cfg, dag.headState, checkpointSlot,
|
||||
cache, info, flags = {}).isOk()
|
||||
slot = checkpointSlot
|
||||
|
||||
# Create blocks for final few epochs
|
||||
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
|
||||
for blck in makeTestBlocks(dag.headState.data, cache, blocks.int,
|
||||
for blck in makeTestBlocks(dag.headState, cache, blocks.int,
|
||||
attested, syncCommitteeRatio, cfg):
|
||||
let added =
|
||||
case blck.kind
|
||||
|
@ -88,21 +88,21 @@ suite "Light client" & preset():
|
|||
test "Pre-Altair":
|
||||
# Genesis
|
||||
check:
|
||||
dag.headState.data.kind == BeaconStateFork.Phase0
|
||||
dag.headState.kind == BeaconStateFork.Phase0
|
||||
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
|
||||
dag.getLatestLightClientUpdate.isNone
|
||||
|
||||
# Advance to last slot before Altair
|
||||
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
|
||||
check:
|
||||
dag.headState.data.kind == BeaconStateFork.Phase0
|
||||
dag.headState.kind == BeaconStateFork.Phase0
|
||||
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
|
||||
dag.getLatestLightClientUpdate.isNone
|
||||
|
||||
# Advance to Altair
|
||||
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
|
||||
check:
|
||||
dag.headState.data.kind == BeaconStateFork.Altair
|
||||
dag.headState.kind == BeaconStateFork.Altair
|
||||
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
|
||||
dag.getLatestLightClientUpdate.isNone
|
||||
|
||||
|
@ -113,7 +113,7 @@ suite "Light client" & preset():
|
|||
# Track trusted checkpoint for light client
|
||||
let
|
||||
genesis_validators_root = dag.genesisValidatorsRoot
|
||||
trusted_block_root = dag.headState.blck.root
|
||||
trusted_block_root = dag.head.root
|
||||
|
||||
# Advance to target slot
|
||||
const
|
||||
|
@ -121,7 +121,7 @@ suite "Light client" & preset():
|
|||
periodEpoch = headPeriod.start_epoch
|
||||
headSlot = (periodEpoch + 2).start_slot + 5
|
||||
dag.advanceToSlot(headSlot, verifier, quarantine[])
|
||||
let currentSlot = getStateField(dag.headState.data, slot)
|
||||
let currentSlot = getStateField(dag.headState, slot)
|
||||
|
||||
# Initialize light client store
|
||||
let bootstrap = dag.getLightClientBootstrap(trusted_block_root)
|
||||
|
@ -158,7 +158,7 @@ suite "Light client" & preset():
|
|||
store, latestUpdate.get, currentSlot, cfg, genesis_validators_root)
|
||||
check:
|
||||
latestUpdate.isSome
|
||||
latestUpdate.get.attested_header.slot == dag.headState.blck.parent.slot
|
||||
latestUpdate.get.attested_header.slot == dag.head.parent.slot
|
||||
res.isOk
|
||||
store.finalized_header == latestUpdate.get.finalized_header
|
||||
store.optimistic_header == latestUpdate.get.attested_header
|
||||
|
@ -171,7 +171,7 @@ suite "Light client" & preset():
|
|||
skip
|
||||
return
|
||||
|
||||
let genesisState = assignClone dag.headState.data
|
||||
let genesisState = assignClone dag.headState
|
||||
|
||||
# Advance to target slot for checkpoint
|
||||
let finalizedSlot =
|
||||
|
@ -182,7 +182,7 @@ suite "Light client" & preset():
|
|||
let cpDb = BeaconChainDB.new("", inMemory = true)
|
||||
ChainDAGRef.preInit(
|
||||
cpDB, genesisState[],
|
||||
dag.headState.data, dag.getForkedBlock(dag.head.bid).get)
|
||||
dag.headState, dag.getForkedBlock(dag.head.bid).get)
|
||||
let cpDag = ChainDAGRef.init(
|
||||
cfg, cpDb, validatorMonitor, {},
|
||||
serveLightClientData = true,
|
||||
|
|
|
@ -53,17 +53,17 @@ suite "Beacon state" & preset():
|
|||
info: ForkedEpochInfo
|
||||
|
||||
check: # Works for genesis block
|
||||
state[].phase0Data.latest_block_root() == genBlock.root
|
||||
state[].phase0Data.latest_block_root == genBlock.root
|
||||
process_slots(cfg, state[], Slot 1, cache, info, {}).isOk()
|
||||
state[].phase0Data.latest_block_root() == genBlock.root
|
||||
state[].phase0Data.latest_block_root == genBlock.root
|
||||
|
||||
let blck = addTestBlock(
|
||||
state[], cache, nextSlot = false, flags = {skipBlsValidation}).phase0Data
|
||||
|
||||
check: # Works for random blocks
|
||||
state[].phase0Data.latest_block_root() == blck.root
|
||||
state[].phase0Data.latest_block_root == blck.root
|
||||
process_slots(cfg, state[], Slot 2, cache, info, {}).isOk()
|
||||
state[].phase0Data.latest_block_root() == blck.root
|
||||
state[].phase0Data.latest_block_root == blck.root
|
||||
|
||||
test "get_beacon_proposer_index":
|
||||
var
|
||||
|
|
|
@ -27,7 +27,7 @@ suite "state diff tests" & preset():
|
|||
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
||||
|
||||
test "random slot differences" & preset():
|
||||
let testStates = getTestStates(dag.headState.data, BeaconStateFork.Altair)
|
||||
let testStates = getTestStates(dag.headState, BeaconStateFork.Altair)
|
||||
|
||||
for i in 0 ..< testStates.len:
|
||||
for j in (i+1) ..< testStates.len:
|
||||
|
|
|
@ -402,7 +402,7 @@ iterator makeTestBlocks*(
|
|||
state = assignClone(state)
|
||||
for _ in 0..<blocks:
|
||||
let
|
||||
parent_root = withState(state[]): state.latest_block_root()
|
||||
parent_root = withState(state[]): state.latest_block_root
|
||||
attestations =
|
||||
if attested:
|
||||
makeFullAttestations(
|
||||
|
|
Loading…
Reference in New Issue