use ForkedHashedBeaconState in StateData (#2634)

* use ForkedHashedBeaconState in StateData

* fix FAR_FUTURE_EPOCH -> slot overflow; almost always use assign()

* avoid stack allocation in maybeUpgradeStateToAltair()

* create and use dispatch functions for check_attester_slashing(), check_proposer_slashing(), and check_voluntary_exit()

* use getStateRoot() instead of various state.data.hbsPhase0.root

* remove withStateVars.hashedState(), which doesn't work as a design anymore

* introduce spec/datatypes/altair into beacon_chain_db

* fix inefficient codegen for getStateField(largeStateField)

* state_transition_slots() doesn't either need/use blocks or runtime presets

* combine process_slots(HBS)/state_transition_slots(HBS) which differ only in last-slot htr optimization

* getStateField(StateData, ...) was replaced by getStateField(ForkedHashedBeaconState, ...)

* fix rollback

* switch some state_transition(), process_slots, makeTestBlocks(), etc to use ForkedHashedBeaconState

* remove state_transition(phase0.HashedBeaconState)

* remove process_slots(phase0.HashedBeaconState)

* remove state_transition_block(phase0.HashedBeaconState)

* remove unused callWithBS(); separate case expression from if statement

* switch back from nested-ref-object construction to (ref Foo)(Bar())
This commit is contained in:
tersec 2021-06-11 17:51:46 +00:00 committed by GitHub
parent f2588be9ab
commit 146fa48454
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
57 changed files with 1050 additions and 957 deletions

View File

@ -13,7 +13,8 @@ import
serialization, chronicles, snappy,
eth/db/[kvstore, kvstore_sqlite3],
./networking/network_metadata, ./beacon_chain_db_immutable,
./spec/[crypto, datatypes, digest, state_transition],
./spec/[crypto, digest, state_transition],
./spec/datatypes/[phase0, altair],
./ssz/[ssz_serialization, merkleization],
./filepath
@ -159,14 +160,14 @@ func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
result[0] = byte ord(kind)
result[1 .. ^1] = key
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
func subkey(kind: type phase0.BeaconState, key: Eth2Digest): auto =
subkey(kHashToState, key.data)
func subkey(
kind: type BeaconStateNoImmutableValidators, key: Eth2Digest): auto =
subkey(kHashToStateOnlyMutableValidators, key.data)
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
func subkey(kind: type phase0.SignedBeaconBlock, key: Eth2Digest): auto =
subkey(kHashToBlock, key.data)
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
@ -459,7 +460,7 @@ proc close*(db: BeaconchainDB) =
db.db = nil
func toBeaconBlockSummary(v: SomeBeaconBlock): BeaconBlockSummary =
func toBeaconBlockSummary(v: SomeSomeBeaconBlock): BeaconBlockSummary =
BeaconBlockSummary(
slot: v.slot,
parent_root: v.parent_root,
@ -470,7 +471,7 @@ proc putBeaconBlockSummary(
# Summaries are too simple / small to compress, store them as plain SSZ
db.summaries.putSSZ(root.data, value)
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
proc putBlock*(db: BeaconChainDB, value: phase0.TrustedSignedBeaconBlock) =
db.blocks.putSnappySSZ(value.root.data, value)
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
@ -485,13 +486,13 @@ proc updateImmutableValidators*(
db.immutableValidatorsDb.add immutableValidator
db.immutableValidators.add immutableValidator
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: phase0.BeaconState) =
db.updateImmutableValidators(value.validators.asSeq())
db.statesNoVal.putSnappySSZ(
key.data,
isomorphicCast[BeaconStateNoImmutableValidators](value))
proc putState*(db: BeaconChainDB, value: BeaconState) =
proc putState*(db: BeaconChainDB, value: phase0.BeaconState) =
db.putState(hash_tree_root(value), value)
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
@ -535,18 +536,20 @@ proc putEth2FinalizedTo*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
proc getBlock(db: BeaconChainDBV0, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
proc getBlock(db: BeaconChainDBV0, key: Eth2Digest): Opt[phase0.TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
if db.backend.getSnappySSZ(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
result.ok(default(phase0.TrustedSignedBeaconBlock))
if db.backend.getSnappySSZ(
subkey(phase0.SignedBeaconBlock, key), result.get) != GetResult.found:
result.err()
else:
# set root after deserializing (so it doesn't get zeroed)
result.get().root = key
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
proc getBlock*(db: BeaconChainDB, key: Eth2Digest):
Opt[phase0.TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
result.ok(default(phase0.TrustedSignedBeaconBlock))
if db.blocks.getSnappySSZ(key.data, result.get) != GetResult.found:
result = db.v0.getBlock(key)
else:
@ -555,7 +558,7 @@ proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock
proc getStateOnlyMutableValidators(
immutableValidators: openArray[ImmutableValidatorData2],
store: KvStoreRef, key: openArray[byte], output: var BeaconState,
store: KvStoreRef, key: openArray[byte], output: var phase0.BeaconState,
rollback: RollbackProc): bool =
## Load state into `output` - BeaconState is large so we want to avoid
## re-allocating it if possible
@ -598,7 +601,7 @@ proc getStateOnlyMutableValidators(
proc getState(
db: BeaconChainDBV0,
immutableValidators: openArray[ImmutableValidatorData2],
key: Eth2Digest, output: var BeaconState,
key: Eth2Digest, output: var phase0.BeaconState,
rollback: RollbackProc): bool =
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
@ -615,7 +618,7 @@ proc getState(
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
return true
case db.backend.getSnappySSZ(subkey(BeaconState, key), output)
case db.backend.getSnappySSZ(subkey(phase0.BeaconState, key), output)
of GetResult.found:
true
of GetResult.notFound:
@ -625,7 +628,7 @@ proc getState(
false
proc getState*(
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
db: BeaconChainDB, key: Eth2Digest, output: var phase0.BeaconState,
rollback: RollbackProc): bool =
## Load state into `output` - BeaconState is large so we want to avoid
## re-allocating it if possible
@ -692,7 +695,7 @@ proc getEth2FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
if r != found: return db.v0.getEth2FinalizedTo()
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
db.backend.contains(subkey(SignedBeaconBlock, key)).expectDb()
db.backend.contains(subkey(phase0.SignedBeaconBlock, key)).expectDb()
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
db.blocks.contains(key.data).expectDb() or db.v0.containsBlock(key)
@ -701,25 +704,25 @@ proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool =
let sk = subkey(BeaconStateNoImmutableValidators, key)
db.stateStore.contains(sk).expectDb() or
db.backend.contains(sk).expectDb() or
db.backend.contains(subkey(BeaconState, key)).expectDb()
db.backend.contains(subkey(phase0.BeaconState, key)).expectDb()
proc containsState*(db: BeaconChainDB, key: Eth2Digest, legacy: bool = true): bool =
db.statesNoVal.contains(key.data).expectDb or
(legacy and db.v0.containsState(key))
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
TrustedSignedBeaconBlock =
phase0.TrustedSignedBeaconBlock =
## Load a chain of ancestors for blck - returns a list of blocks with the
## oldest block last (blck will be at result[0]).
##
## The search will go on until the ancestor cannot be found.
var
res: TrustedSignedBeaconBlock
res: phase0.TrustedSignedBeaconBlock
root = root
while db.blocks.getSnappySSZ(root.data, res) == GetResult.found or
db.v0.backend.getSnappySSZ(
subkey(SignedBeaconBlock, root), res) == GetResult.found:
subkey(phase0.SignedBeaconBlock, root), res) == GetResult.found:
res.root = root
yield res
root = res.message.parent_root
@ -757,7 +760,7 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
var
summaries = db.loadSummaries()
res: RootedSummary
blck: TrustedSignedBeaconBlock
blck: phase0.TrustedSignedBeaconBlock
newSummaries: seq[RootedSummary]
res.root = root
@ -790,7 +793,8 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
do: # Summary was not found in summary table, look elsewhere
if db.v0.backend.getSnappySSZ(subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found:
yield res
elif db.v0.backend.getSnappySSZ(subkey(SignedBeaconBlock, res.root), blck) == GetResult.found:
elif db.v0.backend.getSnappySSZ(
subkey(phase0.SignedBeaconBlock, res.root), blck) == GetResult.found:
res.summary = blck.message.toBeaconBlockSummary()
yield res
else:

View File

@ -14,9 +14,11 @@ import
metrics,
chronicles, stew/byteutils, json_serialization/std/sets as jsonSets,
# Internal
../spec/[beaconstate, datatypes, crypto, digest],
../spec/[
beaconstate, datatypes, crypto, digest, forkedbeaconstate_helpers,
validator],
../ssz/merkleization,
"."/[spec_cache, blockchain_dag, block_quarantine, statedata_helpers],
"."/[spec_cache, blockchain_dag, block_quarantine],
".."/[beacon_clock, beacon_node_types, extras],
../fork_choice/fork_choice
@ -83,9 +85,9 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef, quarantine: QuarantineRef)
info "Fork choice initialized",
justified_epoch = getStateField(
dag.headState, current_justified_checkpoint).epoch,
dag.headState.data, current_justified_checkpoint).epoch,
finalized_epoch = getStateField(
dag.headState, finalized_checkpoint).epoch,
dag.headState.data, finalized_checkpoint).epoch,
finalized_root = shortlog(dag.finalizedHead.blck.root)
T(
@ -370,16 +372,16 @@ func add(
do:
attCache[key] = aggregation_bits
func init(T: type AttestationCache, state: StateData): T =
func init(T: type AttestationCache, state: HashedBeaconState): T =
# Load attestations that are scheduled for being given rewards for
for i in 0..<getStateField(state, previous_epoch_attestations).len():
for i in 0..<state.data.previous_epoch_attestations.len():
result.add(
getStateField(state, previous_epoch_attestations)[i].data,
getStateField(state, previous_epoch_attestations)[i].aggregation_bits)
for i in 0..<getStateField(state, current_epoch_attestations).len():
state.data.previous_epoch_attestations[i].data,
state.data.previous_epoch_attestations[i].aggregation_bits)
for i in 0..<state.data.current_epoch_attestations.len():
result.add(
getStateField(state, current_epoch_attestations)[i].data,
getStateField(state, current_epoch_attestations)[i].aggregation_bits)
state.data.current_epoch_attestations[i].data,
state.data.current_epoch_attestations[i].aggregation_bits)
proc score(
attCache: var AttestationCache, data: AttestationData,
@ -403,12 +405,12 @@ proc score(
bitsScore
proc getAttestationsForBlock*(pool: var AttestationPool,
state: StateData,
state: HashedBeaconState,
cache: var StateCache): seq[Attestation] =
## Retrieve attestations that may be added to a new block at the slot of the
## given state
## https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attestations
let newBlockSlot = getStateField(state, slot).uint64
let newBlockSlot = state.data.slot.uint64
if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY:
return # Too close to genesis
@ -448,7 +450,7 @@ proc getAttestationsForBlock*(pool: var AttestationPool,
# attestation to - there might have been a fork between when we first
# saw the attestation and the time that we added it
if not check_attestation(
state.data.data, attestation, {skipBlsValidation}, cache).isOk():
state.data, attestation, {skipBlsValidation}, cache).isOk():
continue
let score = attCache.score(
@ -472,10 +474,10 @@ proc getAttestationsForBlock*(pool: var AttestationPool,
#
# A possible improvement here would be to use a maximum cover algorithm.
var
prevEpoch = state.get_previous_epoch()
prevEpoch = state.data.get_previous_epoch()
prevEpochSpace =
getStateField(state, previous_epoch_attestations).maxLen -
getStateField(state, previous_epoch_attestations).len()
state.data.previous_epoch_attestations.maxLen -
state.data.previous_epoch_attestations.len()
var res: seq[Attestation]
let totalCandidates = candidates.len()

View File

@ -13,7 +13,9 @@ import
stew/[assign2, results],
eth/keys,
../extras, ../beacon_clock,
../spec/[crypto, datatypes, digest, helpers, signatures, signatures_batch, state_transition],
../spec/[
crypto, datatypes, digest, forkedbeaconstate_helpers, helpers, signatures,
signatures_batch, state_transition],
./block_pools_types, ./blockchain_dag, ./block_quarantine
from libp2p/protocols/pubsub/pubsub import ValidationResult
@ -80,7 +82,7 @@ proc addResolvedBlock(
stateVerifyDur: Duration
) =
# TODO move quarantine processing out of here
doAssert getStateField(state, slot) == trustedBlock.message.slot,
doAssert getStateField(state.data, slot) == trustedBlock.message.slot,
"state must match block"
doAssert state.blck.root == trustedBlock.message.parent_root,
"the StateData passed into the addResolved function not yet updated!"
@ -116,7 +118,7 @@ proc addResolvedBlock(
# as soon as we import a block, we'll also update the shared public key
# cache
dag.updateValidatorKeys(getStateField(state, validators).asSeq())
dag.updateValidatorKeys(getStateField(state.data, validators).asSeq())
# Getting epochRef with the state will potentially create a new EpochRef
let
@ -134,7 +136,7 @@ proc addResolvedBlock(
# Notify others of the new block before processing the quarantine, such that
# notifications for parents happens before those of the children
if onBlockAdded != nil:
onBlockAdded(blockRef, trustedBlock, epochRef, state.data)
onBlockAdded(blockRef, trustedBlock, epochRef, state.data.hbsPhase0)
# Now that we have the new block, we should see if any of the previously
# unresolved blocks magically become resolved
@ -161,7 +163,7 @@ proc checkStateTransition(
dag: ChainDAGRef, signedBlock: SomeSignedBeaconBlock,
cache: var StateCache): (ValidationResult, BlockError) =
## Ensure block can be applied on a state
func restore(v: var HashedBeaconState) =
func restore(v: var ForkedHashedBeaconState) =
# TODO address this ugly workaround - there should probably be a
# `state_transition` that takes a `StateData` instead and updates
# the block as well
@ -172,9 +174,12 @@ proc checkStateTransition(
blck = shortLog(signedBlock.message)
blockRoot = shortLog(signedBlock.root)
# TODO this won't transition because FAR_FUTURE_SLOT, so it's
# fine, for now, but in general, blockchain_dag.addBlock must
# match the transition here.
if not state_transition_block(
dag.runtimePreset, dag.clearanceState.data, signedBlock,
cache, dag.updateFlags, restore):
cache, dag.updateFlags, restore, FAR_FUTURE_SLOT):
info "Invalid block"
return (ValidationResult.Reject, Invalid)
@ -187,7 +192,7 @@ proc advanceClearanceState*(dag: ChainDagRef) =
# epoch transition ahead of time.
# Notably, we use the clearance state here because that's where the block will
# first be seen - later, this state will be copied to the head state!
if dag.clearanceState.blck.slot == getStateField(dag.clearanceState, slot):
if dag.clearanceState.blck.slot == getStateField(dag.clearanceState.data, slot):
let next =
dag.clearanceState.blck.atSlot(dag.clearanceState.blck.slot + 1)
@ -247,10 +252,9 @@ proc addRawBlockKnownParent(
# First, batch-verify all signatures in block
if skipBLSValidation notin dag.updateFlags:
# TODO: remove skipBLSValidation
var sigs: seq[SignatureSet]
if sigs.collectSignatureSets(
signedBlock, dag.db.immutableValidators, dag.clearanceState, cache).isErr():
signedBlock, dag.db.immutableValidators, dag.clearanceState.data, cache).isErr():
# A PublicKey or Signature isn't on the BLS12-381 curve
return err((ValidationResult.Reject, Invalid))
if not quarantine.batchVerify(sigs):

View File

@ -14,7 +14,8 @@ import
stew/[endians2], chronicles,
eth/keys,
# Internals
../spec/[datatypes, crypto, digest, signatures_batch],
../spec/[
datatypes, crypto, digest, signatures_batch, forkedbeaconstate_helpers],
../beacon_chain_db, ../extras
export sets, tables
@ -182,12 +183,31 @@ type
# balances, as used in fork choice
effective_balances_bytes*: seq[byte]
BlockRef* = ref object
## Node in object graph guaranteed to lead back to tail block, and to have
## a corresponding entry in database.
## Block graph should form a tree - in particular, there are no cycles.
root*: Eth2Digest ##\
## Root that can be used to retrieve block data from database
parent*: BlockRef ##\
## Not nil, except for the tail
slot*: Slot # could calculate this by walking to root, but..
BlockData* = object
## Body and graph in one
data*: TrustedSignedBeaconBlock # We trust all blocks we have a ref for
refs*: BlockRef
StateData* = object
data*: ForkedHashedBeaconState
blck*: BlockRef ##\
## The block associated with the state found in data
BlockSlot* = object
## Unique identifier for a particular fork and time in the block chain -
## normally, there's a block for every slot, but in the case a block is not

View File

@ -14,10 +14,10 @@ import
../ssz/[ssz_serialization, merkleization], ../beacon_chain_db, ../extras,
../spec/[
crypto, digest, helpers, validator, state_transition,
beaconstate],
beaconstate, forkedbeaconstate_helpers],
../spec/datatypes/[phase0, altair],
../beacon_clock,
"."/[block_pools_types, block_quarantine, statedata_helpers]
"."/[block_pools_types, block_quarantine]
export block_pools_types, helpers, phase0
@ -57,10 +57,8 @@ template withStateVars*(
## Inject a few more descriptive names for the members of `stateData` -
## the stateData instance may get mutated through these names as well
template stateData(): StateData {.inject, used.} = stateDataInternal
template hashedState(): HashedBeaconState {.inject, used.} =
stateDataInternal.data
template stateRoot(): Eth2Digest {.inject, used.} =
stateDataInternal.data.root
getStateRoot(stateDataInternal.data)
template blck(): BlockRef {.inject, used.} = stateDataInternal.blck
template root(): Eth2Digest {.inject, used.} = stateDataInternal.data.root
@ -140,20 +138,20 @@ func init*(
T: type EpochRef, dag: ChainDAGRef, state: StateData,
cache: var StateCache): T =
let
epoch = state.get_current_epoch()
epoch = state.data.get_current_epoch()
epochRef = EpochRef(
dag: dag, # This gives access to the validator pubkeys through an EpochRef
key: state.blck.epochAncestor(epoch),
eth1_data: getStateField(state, eth1_data),
eth1_deposit_index: getStateField(state, eth1_deposit_index),
eth1_data: getStateField(state.data, eth1_data),
eth1_deposit_index: getStateField(state.data, eth1_deposit_index),
current_justified_checkpoint:
getStateField(state, current_justified_checkpoint),
finalized_checkpoint: getStateField(state, finalized_checkpoint),
getStateField(state.data, current_justified_checkpoint),
finalized_checkpoint: getStateField(state.data, finalized_checkpoint),
shuffled_active_validator_indices:
cache.get_shuffled_active_validator_indices(state, epoch))
cache.get_shuffled_active_validator_indices(state.data, epoch))
for i in 0'u64..<SLOTS_PER_EPOCH:
epochRef.beacon_proposers[i] = get_beacon_proposer_index(
state.data.data, cache, epoch.compute_start_slot_at_epoch() + i)
state.data, cache, epoch.compute_start_slot_at_epoch() + i)
# When fork choice runs, it will need the effective balance of the justified
# checkpoint - we pre-load the balances here to avoid rewinding the justified
@ -168,7 +166,8 @@ func init*(
epochRef.effective_balances_bytes =
snappyEncode(SSZ.encode(
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](get_effective_balances(
getStateField(state, validators).asSeq, get_current_epoch(state)))))
getStateField(state.data, validators).asSeq,
get_current_epoch(state.data)))))
epochRef
@ -391,8 +390,8 @@ proc init*(T: type ChainDAGRef,
if cur.isStateCheckpoint():
let root = db.getStateRoot(cur.blck.root, cur.slot)
if root.isSome():
if db.getState(root.get(), tmpState.data.data, noRollback):
tmpState.data.root = root.get()
if db.getState(root.get(), tmpState.data.hbsPhase0.data, noRollback):
tmpState.data.hbsPhase0.root = root.get()
tmpState.blck = cur.blck
break
@ -431,7 +430,7 @@ proc init*(T: type ChainDAGRef,
# When we start from a snapshot state, the `finalized_checkpoint` in the
# snapshot will point to an even older state, but we trust the tail state
# (the snapshot) to be finalized, hence the `max` expression below.
let finalizedEpoch = max(getStateField(dag.headState, finalized_checkpoint).epoch,
let finalizedEpoch = max(getStateField(dag.headState.data, finalized_checkpoint).epoch,
tailRef.slot.epoch)
dag.finalizedHead = headRef.atEpochStart(finalizedEpoch)
@ -452,7 +451,7 @@ func getEpochRef*(
dag: ChainDAGRef, state: StateData, cache: var StateCache): EpochRef =
let
blck = state.blck
epoch = state.get_current_epoch()
epoch = state.data.get_current_epoch()
var epochRef = dag.findEpochRef(blck, epoch)
if epochRef == nil:
@ -509,13 +508,13 @@ proc getState(
unsafeAddr dag.headState
func restore(v: var phase0.BeaconState) =
assign(v, restoreAddr[].data.data)
assign(v, restoreAddr[].data.hbsPhase0.data)
if not dag.db.getState(stateRoot, state.data.data, restore):
if not dag.db.getState(stateRoot, state.data.hbsPhase0.data, restore):
return false
state.blck = blck
state.data.root = stateRoot
state.data.hbsPhase0.root = stateRoot
true
@ -543,25 +542,25 @@ proc putState(dag: ChainDAGRef, state: var StateData) =
# Store a state and its root
logScope:
blck = shortLog(state.blck)
stateSlot = shortLog(getStateField(state, slot))
stateRoot = shortLog(state.data.root)
stateSlot = shortLog(getStateField(state.data, slot))
stateRoot = shortLog(getStateRoot(state.data))
if not isStateCheckpoint(state.blck.atSlot(getStateField(state, slot))):
if not isStateCheckpoint(state.blck.atSlot(getStateField(state.data, slot))):
return
# Don't consider legacy tables here, they are slow to read so we'll want to
# rewrite things in the new database anyway.
if dag.db.containsState(state.data.root, legacy = false):
if dag.db.containsState(getStateRoot(state.data), legacy = false):
return
let startTick = Moment.now()
# Ideally we would save the state and the root lookup cache in a single
# transaction to prevent database inconsistencies, but the state loading code
# is resilient against one or the other going missing
dag.db.putState(state.data.root, state.data.data)
dag.db.putState(getStateRoot(state.data), state.data.hbsPhase0.data)
dag.db.putStateRoot(
state.blck.root, getStateField(state, slot), state.data.root)
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
debug "Stored state", putStateDur = Moment.now() - startTick
@ -659,13 +658,13 @@ proc advanceSlots(
# Given a state, advance it zero or more slots by applying empty slot
# processing - the state must be positions at a slot before or equal to the
# target
doAssert getStateField(state, slot) <= slot
while getStateField(state, slot) < slot:
loadStateCache(dag, cache, state.blck, getStateField(state, slot).epoch)
doAssert getStateField(state.data, slot) <= slot
while getStateField(state.data, slot) < slot:
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
doAssert process_slots(
state.data, getStateField(state, slot) + 1, cache, rewards,
dag.updateFlags),
state.data, getStateField(state.data, slot) + 1, cache, rewards,
dag.updateFlags, FAR_FUTURE_SLOT),
"process_slots shouldn't fail when state slot is correct"
if save:
dag.putState(state)
@ -680,11 +679,12 @@ proc applyBlock(
doAssert state.blck == blck.refs.parent
var statePtr = unsafeAddr state # safe because `restore` is locally scoped
func restore(v: var phase0.HashedBeaconState) =
func restore(v: var ForkedHashedBeaconState) =
doAssert (addr(statePtr.data) == addr v)
# TODO the block_clearance version uses assign() here
statePtr[] = dag.headState
loadStateCache(dag, cache, state.blck, getStateField(state, slot).epoch)
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
let ok = state_transition(
dag.runtimePreset, state.data, blck.data,
@ -718,12 +718,12 @@ proc updateStateData*(
template exactMatch(state: StateData, bs: BlockSlot): bool =
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
state.blck == bs.blck and getStateField(state, slot) == bs.slot
state.blck == bs.blck and getStateField(state.data, slot) == bs.slot
template canAdvance(state: StateData, bs: BlockSlot): bool =
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
state.blck == bs.blck and getStateField(state, slot) <= bs.slot
state.blck == bs.blck and getStateField(state.data, slot) <= bs.slot
# Fast path: check all caches for an exact match - this is faster than
# advancing a state where there's epoch processing to do, by a wide margin -
@ -781,7 +781,7 @@ proc updateStateData*(
if not found:
debug "UpdateStateData cache miss",
bs, stateBlock = state.blck, stateSlot = getStateField(state, slot)
bs, stateBlock = state.blck, stateSlot = getStateField(state.data, slot)
# Either the state is too new or was created by applying a different block.
# We'll now resort to loading the state from the database then reapplying
@ -817,8 +817,8 @@ proc updateStateData*(
# Starting state has been assigned, either from memory or database
let
assignTick = Moment.now()
startSlot {.used.} = getStateField(state, slot) # used in logs below
startRoot {.used.} = state.data.root
startSlot {.used.} = getStateField(state.data, slot) # used in logs below
startRoot {.used.} = getStateRoot(state.data)
var rewards: RewardInfo
# Time to replay all the blocks between then and now
for i in countdown(ancestors.len - 1, 0):
@ -834,7 +834,7 @@ proc updateStateData*(
dag.advanceSlots(state, bs.slot, save, cache, rewards)
# ...and make sure to load the state cache, if it exists
loadStateCache(dag, cache, state.blck, getStateField(state, slot).epoch)
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
let
assignDur = assignTick - startTick
@ -842,9 +842,9 @@ proc updateStateData*(
logScope:
blocks = ancestors.len
slots = getStateField(state, slot) - startSlot
stateRoot = shortLog(state.data.root)
stateSlot = getStateField(state, slot)
slots = getStateField(state.data, slot) - startSlot
stateRoot = shortLog(getStateRoot(state.data))
stateSlot = getStateField(state.data, slot)
startRoot = shortLog(startRoot)
startSlot
blck = shortLog(bs)
@ -993,7 +993,7 @@ proc updateHead*(
let
finalizedHead = newHead.atEpochStart(
getStateField(dag.headState, finalized_checkpoint).epoch)
getStateField(dag.headState.data, finalized_checkpoint).epoch)
doAssert (not finalizedHead.blck.isNil),
"Block graph should always lead to a finalized block"
@ -1002,33 +1002,34 @@ proc updateHead*(
notice "Updated head block with chain reorg",
lastHead = shortLog(lastHead),
headParent = shortLog(newHead.parent),
stateRoot = shortLog(dag.headState.data.root),
stateRoot = shortLog(getStateRoot(dag.headState.data)),
headBlock = shortLog(dag.headState.blck),
stateSlot = shortLog(getStateField(dag.headState, slot)),
justified =
shortLog(getStateField(dag.headState, current_justified_checkpoint)),
finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
stateSlot = shortLog(getStateField(dag.headState.data, slot)),
justified = shortLog(getStateField(
dag.headState.data, current_justified_checkpoint)),
finalized = shortLog(getStateField(
dag.headState.data, finalized_checkpoint))
# A reasonable criterion for "reorganizations of the chain"
quarantine.clearQuarantine()
beacon_reorgs_total.inc()
else:
debug "Updated head block",
stateRoot = shortLog(dag.headState.data.root),
stateRoot = shortLog(getStateRoot(dag.headState.data)),
headBlock = shortLog(dag.headState.blck),
stateSlot = shortLog(getStateField(dag.headState, slot)),
stateSlot = shortLog(getStateField(dag.headState.data, slot)),
justified = shortLog(getStateField(
dag.headState, current_justified_checkpoint)),
dag.headState.data, current_justified_checkpoint)),
finalized = shortLog(getStateField(
dag.headState, finalized_checkpoint))
dag.headState.data, finalized_checkpoint))
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
# both non-negative, so difference can't overflow or underflow int64
beacon_pending_deposits.set(
getStateField(dag.headState, eth1_data).deposit_count.toGaugeValue -
getStateField(dag.headState, eth1_deposit_index).toGaugeValue)
getStateField(dag.headState.data, eth1_data).deposit_count.toGaugeValue -
getStateField(dag.headState.data, eth1_deposit_index).toGaugeValue)
beacon_processed_deposits_total.set(
getStateField(dag.headState, eth1_deposit_index).toGaugeValue)
getStateField(dag.headState.data, eth1_deposit_index).toGaugeValue)
beacon_head_root.set newHead.root.toGaugeValue
beacon_head_slot.set newHead.slot.toGaugeValue
@ -1039,16 +1040,16 @@ proc updateHead*(
# updating them until a block confirms the change
beacon_current_justified_epoch.set(
getStateField(
dag.headState, current_justified_checkpoint).epoch.toGaugeValue)
dag.headState.data, current_justified_checkpoint).epoch.toGaugeValue)
beacon_current_justified_root.set(
getStateField(
dag.headState, current_justified_checkpoint).root.toGaugeValue)
dag.headState.data, current_justified_checkpoint).root.toGaugeValue)
beacon_previous_justified_epoch.set(
getStateField(
dag.headState, previous_justified_checkpoint).epoch.toGaugeValue)
dag.headState.data, previous_justified_checkpoint).epoch.toGaugeValue)
beacon_previous_justified_root.set(
getStateField(
dag.headState, previous_justified_checkpoint).root.toGaugeValue)
dag.headState.data, previous_justified_checkpoint).root.toGaugeValue)
let epochRef = getEpochRef(dag, newHead, newHead.slot.epoch)
beacon_active_validators.set(
@ -1061,10 +1062,10 @@ proc updateHead*(
dag.finalizedHead = finalizedHead
beacon_finalized_epoch.set(
getStateField(dag.headState, finalized_checkpoint).epoch.toGaugeValue)
beacon_finalized_root.set(
getStateField(dag.headState, finalized_checkpoint).root.toGaugeValue)
beacon_finalized_epoch.set(getStateField(
dag.headState.data, finalized_checkpoint).epoch.toGaugeValue)
beacon_finalized_root.set(getStateField(
dag.headState.data, finalized_checkpoint).root.toGaugeValue)
# Pruning the block dag is required every time the finalized head changes
# in order to clear out blocks that are no longer viable and should

View File

@ -13,7 +13,7 @@ import
# Status libraries
chronicles,
# Internal
../spec/[crypto, datatypes, helpers],
../spec/[crypto, datatypes, forkedbeaconstate_helpers, helpers],
"."/[blockchain_dag, block_quarantine],
../beacon_node_types
@ -111,7 +111,7 @@ func getExitMessagesForBlock[T](
if allIt(
getValidatorIndices(exit_message),
getStateField(pool.dag.headState, validators)[it].exit_epoch !=
getStateField(pool.dag.headState.data, validators)[it].exit_epoch !=
FAR_FUTURE_EPOCH):
# A beacon block exit message already targeted all these validators
continue

View File

@ -1,61 +0,0 @@
# beacon_chain
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
../spec/[beaconstate, datatypes, digest, helpers, presets, validator]
# State-related functionality based on StateData instead of BeaconState
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(stateData: StateData): Epoch =
## Return the current epoch.
getStateField(stateData, slot).epoch
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(stateData: StateData): Epoch =
## Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
let current_epoch = get_current_epoch(stateData)
if current_epoch == GENESIS_EPOCH:
GENESIS_EPOCH
else:
current_epoch - 1
# Dispatch functions
func get_beacon_committee*(
state: StateData, slot: Slot, index: CommitteeIndex,
cache: var StateCache): seq[ValidatorIndex] =
# This one is used by tests/, ncli/, and a couple of places in RPC
# TODO use the iterator version alone, to remove the risk of using
# diverging get_beacon_committee() in tests and beacon_chain/ by a
# wrapper approach (e.g., toSeq). This is a perf tradeoff for test
# correctness/consistency.
get_beacon_committee(state.data.data, slot, index, cache)
func get_committee_count_per_slot*(state: StateData,
epoch: Epoch,
cache: var StateCache): uint64 =
# Return the number of committees at ``epoch``.
get_committee_count_per_slot(state.data.data, epoch, cache)
template hash_tree_root*(stateData: StateData): Eth2Digest =
# Dispatch here based on type/fork of state. Since StateData is a ref object
# type, if Nim chooses the wrong overload, it will simply fail to compile.
stateData.data.root
func get_shuffled_active_validator_indices*(
cache: var StateCache, state: StateData, epoch: Epoch):
var seq[ValidatorIndex] =
cache.get_shuffled_active_validator_indices(state.data.data, epoch)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: StateData,
slot: Slot): Eth2Digest =
## Return the block root at a recent ``slot``.
get_block_root_at_slot(state.data.data, slot)

View File

@ -15,7 +15,7 @@ import
web3, web3/ethtypes as web3Types, web3/ethhexstrings, eth/common/eth_types,
eth/async_utils, stew/byteutils,
# Local modules:
../spec/[datatypes, digest, crypto, helpers],
../spec/[datatypes, digest, crypto, forkedbeaconstate_helpers, helpers],
../networking/network_metadata,
../consensus_object_pools/block_pools_types,
../ssz,
@ -281,15 +281,16 @@ template toGaugeValue(x: Quantity): int64 =
# "Invalid configuration: GENESIS_DELAY is set too low"
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
func compute_time_at_slot(state: StateData, slot: Slot): uint64 =
getStateField(state, genesis_time) + slot * SECONDS_PER_SLOT
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
genesis_time + slot * SECONDS_PER_SLOT
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
func voting_period_start_time*(state: StateData): uint64 =
func voting_period_start_time*(state: ForkedHashedBeaconState): uint64 =
let eth1_voting_period_start_slot =
getStateField(state, slot) - getStateField(state, slot) mod
SLOTS_PER_ETH1_VOTING_PERIOD.uint64
compute_time_at_slot(state, eth1_voting_period_start_slot)
compute_time_at_slot(
getStateField(state, genesis_time), eth1_voting_period_start_slot)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
func is_candidate_block(preset: RuntimePreset,
@ -696,7 +697,7 @@ template trackFinalizedState*(m: Eth1Monitor,
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#get_eth1_data
proc getBlockProposalData*(chain: var Eth1Chain,
state: StateData,
state: ForkedHashedBeaconState,
finalizedEth1Data: Eth1Data,
finalizedStateDepositIndex: uint64): BlockProposalEth1Data =
let
@ -764,7 +765,7 @@ proc getBlockProposalData*(chain: var Eth1Chain,
result.hasMissingDeposits = true
template getBlockProposalData*(m: Eth1Monitor,
state: StateData,
state: ForkedHashedBeaconState,
finalizedEth1Data: Eth1Data,
finalizedStateDepositIndex: uint64): BlockProposalEth1Data =
getBlockProposalData(m.eth1Chain, state, finalizedEth1Data, finalizedStateDepositIndex)

View File

@ -30,5 +30,9 @@ type
slotProcessed ##\
## Allow blocks to be applied to states with the same slot number as the
## block which is what happens when `process_block` is called separately
skipLastStateRootCalculation ##\
## When process_slots() is being called as part of a state_transition(),
## the hash_tree_root() from the block will fill in the state.root so it
## should skip calculating that last state root.
UpdateFlags* = set[UpdateFlag]

View File

@ -16,7 +16,8 @@ import
# Internals
../spec/[
beaconstate, state_transition_block,
datatypes, crypto, digest, helpers, network, signatures],
datatypes, crypto, digest, forkedbeaconstate_helpers, helpers, network,
signatures],
../consensus_object_pools/[
spec_cache, blockchain_dag, block_quarantine, spec_cache,
attestation_pool, exit_pool
@ -249,9 +250,9 @@ proc validateAttestation*(
"validateAttestation: number of aggregation bits and committee size mismatch")))
let
fork = getStateField(pool.dag.headState, fork)
fork = getStateField(pool.dag.headState.data, fork)
genesis_validators_root =
getStateField(pool.dag.headState, genesis_validators_root)
getStateField(pool.dag.headState.data, genesis_validators_root)
attesting_index = get_attesting_indices_one(
epochRef, attestation.data, attestation.aggregation_bits)
@ -428,9 +429,9 @@ proc validateAggregate*(
# 3. [REJECT] The signature of aggregate is valid.
let
fork = getStateField(pool.dag.headState, fork)
fork = getStateField(pool.dag.headState.data, fork)
genesis_validators_root =
getStateField(pool.dag.headState, genesis_validators_root)
getStateField(pool.dag.headState.data, genesis_validators_root)
let deferredCrypto = batchCrypto
.scheduleAggregateChecks(
@ -593,7 +594,8 @@ proc isValidBeaconBlock*(
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
# store.finalized_checkpoint.root
let
finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
finalized_checkpoint = getStateField(
dag.headState.data, finalized_checkpoint)
ancestor = get_ancestor(
parent_ref, compute_start_slot_at_epoch(finalized_checkpoint.epoch))
@ -626,8 +628,8 @@ proc isValidBeaconBlock*(
# [REJECT] The proposer signature, signed_beacon_block.signature, is valid
# with respect to the proposer_index pubkey.
if not verify_block_signature(
getStateField(dag.headState, fork),
getStateField(dag.headState, genesis_validators_root),
getStateField(dag.headState.data, fork),
getStateField(dag.headState.data, genesis_validators_root),
signed_beacon_block.message.slot,
signed_beacon_block.message,
dag.validatorKey(proposer.get()).get(),
@ -666,8 +668,7 @@ proc validateAttesterSlashing*(
# [REJECT] All of the conditions within process_attester_slashing pass
# validation.
let attester_slashing_validity =
check_attester_slashing(
pool.dag.headState.data.data, attester_slashing, {})
check_attester_slashing(pool.dag.headState.data, attester_slashing, {})
if attester_slashing_validity.isErr:
return err((ValidationResult.Reject, attester_slashing_validity.error))
@ -696,8 +697,7 @@ proc validateProposerSlashing*(
# [REJECT] All of the conditions within process_proposer_slashing pass validation.
let proposer_slashing_validity =
check_proposer_slashing(
pool.dag.headState.data.data, proposer_slashing, {})
check_proposer_slashing(pool.dag.headState.data, proposer_slashing, {})
if proposer_slashing_validity.isErr:
return err((ValidationResult.Reject, proposer_slashing_validity.error))
@ -715,7 +715,7 @@ proc validateVoluntaryExit*(
# [IGNORE] The voluntary exit is the first valid voluntary exit received for
# the validator with index signed_voluntary_exit.message.validator_index.
if signed_voluntary_exit.message.validator_index >=
getStateField(pool.dag.headState, validators).lenu64:
getStateField(pool.dag.headState.data, validators).lenu64:
return err((ValidationResult.Ignore, cstring(
"validateVoluntaryExit: validator index too high")))
@ -730,8 +730,7 @@ proc validateVoluntaryExit*(
# [REJECT] All of the conditions within process_voluntary_exit pass
# validation.
let voluntary_exit_validity =
check_voluntary_exit(
pool.dag.headState.data.data, signed_voluntary_exit, {})
check_voluntary_exit(pool.dag.headState.data, signed_voluntary_exit, {})
if voluntary_exit_validity.isErr:
return err((ValidationResult.Reject, voluntary_exit_validity.error))

View File

@ -40,8 +40,9 @@ import
./rpc/[beacon_api, config_api, debug_api, event_api, nimbus_api, node_api,
validator_api],
./spec/[
datatypes, digest, crypto, beaconstate, eth2_apis/beacon_rpc_client,
helpers, network, presets, weak_subjectivity, signatures],
datatypes, digest, crypto, forkedbeaconstate_helpers, beaconstate,
eth2_apis/beacon_rpc_client, helpers, network, presets, weak_subjectivity,
signatures],
./consensus_object_pools/[
blockchain_dag, block_quarantine, block_clearance, block_pools_types,
attestation_pool, exit_pool, spec_cache],
@ -245,10 +246,10 @@ proc init*(T: type BeaconNode,
else: {}
dag = ChainDAGRef.init(runtimePreset, db, chainDagFlags)
beaconClock =
BeaconClock.init(getStateField(dag.headState, genesis_time))
BeaconClock.init(getStateField(dag.headState.data, genesis_time))
quarantine = QuarantineRef.init(rng)
databaseGenesisValidatorsRoot =
getStateField(dag.headState, genesis_validators_root)
getStateField(dag.headState.data, genesis_validators_root)
if genesisStateContents.len != 0:
let
@ -266,14 +267,14 @@ proc init*(T: type BeaconNode,
currentSlot = beaconClock.now.slotOrZero
isCheckpointStale = not is_within_weak_subjectivity_period(
currentSlot,
dag.headState,
dag.headState.data,
config.weakSubjectivityCheckpoint.get)
if isCheckpointStale:
error "Weak subjectivity checkpoint is stale",
currentSlot,
checkpoint = config.weakSubjectivityCheckpoint.get,
headStateSlot = getStateField(dag.headState, slot)
headStateSlot = getStateField(dag.headState.data, slot)
quit 1
if checkpointState != nil:
@ -314,8 +315,8 @@ proc init*(T: type BeaconNode,
nickname = if config.nodeName == "auto": shortForm(netKeys)
else: config.nodeName
enrForkId = getENRForkID(
getStateField(dag.headState, fork),
getStateField(dag.headState, genesis_validators_root))
getStateField(dag.headState.data, fork),
getStateField(dag.headState.data, genesis_validators_root))
topicBeaconBlocks = getBeaconBlocksTopic(enrForkId.fork_digest)
topicAggregateAndProofs = getAggregateAndProofsTopic(enrForkId.fork_digest)
network = createEth2Node(rng, config, netKeys, enrForkId)
@ -337,7 +338,7 @@ proc init*(T: type BeaconNode,
let
slashingProtectionDB =
SlashingProtectionDB.init(
getStateField(dag.headState, genesis_validators_root),
getStateField(dag.headState.data, genesis_validators_root),
config.validatorsDir(), SlashingDbName)
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
@ -442,9 +443,9 @@ func toBitArray(stabilitySubnets: auto): BitArray[ATTESTATION_SUBNET_COUNT] =
proc getAttachedValidators(node: BeaconNode):
Table[ValidatorIndex, AttachedValidator] =
for validatorIndex in 0 ..<
getStateField(node.dag.headState, validators).len:
getStateField(node.dag.headState.data, validators).len:
let attachedValidator = node.getAttachedValidator(
getStateField(node.dag.headState, validators),
getStateField(node.dag.headState.data, validators),
validatorIndex.ValidatorIndex)
if attachedValidator.isNil:
continue
@ -476,9 +477,9 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
is_aggregator(
committeeLen,
await attachedValidators[it.ValidatorIndex].getSlotSig(
getStateField(node.dag.headState, fork),
getStateField(node.dag.headState.data, fork),
getStateField(
node.dag.headState, genesis_validators_root), slot)))
node.dag.headState.data, genesis_validators_root), slot)))
node.attestationSubnets.lastCalculatedEpoch = epoch
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
@ -557,10 +558,10 @@ proc cycleAttestationSubnetsPerEpoch(
# wallSlot, it would have to look more than MIN_SEED_LOOKAHEAD epochs
# ahead to compute the shuffling determining the beacon committees.
static: doAssert MIN_SEED_LOOKAHEAD == 1
if getStateField(node.dag.headState, slot).epoch != wallSlot.epoch:
if getStateField(node.dag.headState.data, slot).epoch != wallSlot.epoch:
debug "Requested attestation subnets too far in advance",
wallSlot,
stateSlot = getStateField(node.dag.headState, slot)
stateSlot = getStateField(node.dag.headState.data, slot)
return prevStabilitySubnets
# This works so long as at least one block in an epoch provides a basis for
@ -1365,7 +1366,8 @@ proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
proc dataResolver(expr: string): string {.raises: [Defect].} =
template justified: untyped = node.dag.head.atEpochStart(
getStateField(node.dag.headState, current_justified_checkpoint).epoch)
getStateField(
node.dag.headState.data, current_justified_checkpoint).epoch)
# TODO:
# We should introduce a general API for resolving dot expressions
# such as `db.latest_block.slot` or `metrics.connected_peers`.

View File

@ -17,8 +17,8 @@ import
../networking/eth2_network,
../validators/validator_duties,
../gossip_processing/gossip_validation,
../consensus_object_pools/[blockchain_dag, statedata_helpers],
../spec/[crypto, digest, datatypes, network],
../consensus_object_pools/blockchain_dag,
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
../spec/eth2_apis/callsigs_types,
../ssz/merkleization,
./rpc_utils, ./eth2_json_rpc_serialization
@ -182,9 +182,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raises: [Exception].} = # TODO fix json-rpc
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
return (
genesis_time: getStateField(node.dag.headState, genesis_time),
genesis_time: getStateField(node.dag.headState.data, genesis_time),
genesis_validators_root:
getStateField(node.dag.headState, genesis_validators_root),
getStateField(node.dag.headState.data, genesis_validators_root),
genesis_fork_version: node.runtimePreset.GENESIS_FORK_VERSION
)
@ -194,23 +194,23 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_beacon_states_fork") do (stateId: string) -> Fork:
withStateForStateId(stateId):
return getStateField(stateData, fork)
return getStateField(stateData.data, fork)
rpcServer.rpc("get_v1_beacon_states_finality_checkpoints") do (
stateId: string) -> BeaconStatesFinalityCheckpointsTuple:
withStateForStateId(stateId):
return (previous_justified:
getStateField(stateData, previous_justified_checkpoint),
getStateField(stateData.data, previous_justified_checkpoint),
current_justified:
getStateField(stateData, current_justified_checkpoint),
finalized: getStateField(stateData, finalized_checkpoint))
getStateField(stateData.data, current_justified_checkpoint),
finalized: getStateField(stateData.data, finalized_checkpoint))
rpcServer.rpc("get_v1_beacon_states_stateId_validators") do (
stateId: string, validatorIds: Option[seq[string]],
status: Option[seq[string]]) -> seq[BeaconStatesValidatorsTuple]:
var vquery: ValidatorQuery
var squery: StatusQuery
let current_epoch = getStateField(node.dag.headState, slot).epoch
let current_epoch = getStateField(node.dag.headState.data, slot).epoch
template statusCheck(status, statusQuery, vstatus, current_epoch): bool =
if status.isNone():
@ -237,7 +237,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
vquery = vqres.get()
if validatorIds.isNone():
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
let sres = validator.getStatus(current_epoch)
if sres.isOk:
let vstatus = sres.get()
@ -247,11 +247,11 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
res.add((validator: validator,
index: uint64(index),
status: vstatus,
balance: getStateField(stateData, balances)[index]))
balance: getStateField(stateData.data, balances)[index]))
else:
for index in vquery.ids:
if index < lenu64(getStateField(stateData, validators)):
let validator = getStateField(stateData, validators)[index]
if index < lenu64(getStateField(stateData.data, validators)):
let validator = getStateField(stateData.data, validators)[index]
let sres = validator.getStatus(current_epoch)
if sres.isOk:
let vstatus = sres.get()
@ -262,9 +262,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
res.add((validator: validator,
index: uint64(index),
status: vstatus,
balance: getStateField(stateData, balances)[index]))
balance: getStateField(stateData.data, balances)[index]))
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
if validator.pubkey in vquery.keyset:
let sres = validator.getStatus(current_epoch)
if sres.isOk:
@ -275,12 +275,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
res.add((validator: validator,
index: uint64(index),
status: vstatus,
balance: getStateField(stateData, balances)[index]))
balance: getStateField(stateData.data, balances)[index]))
return res
rpcServer.rpc("get_v1_beacon_states_stateId_validators_validatorId") do (
stateId: string, validatorId: string) -> BeaconStatesValidatorsTuple:
let current_epoch = getStateField(node.dag.headState, slot).epoch
let current_epoch = getStateField(node.dag.headState.data, slot).epoch
let vqres = createIdQuery([validatorId])
if vqres.isErr:
raise newException(CatchableError, vqres.error)
@ -289,23 +289,23 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
withStateForStateId(stateId):
if len(vquery.ids) > 0:
let index = vquery.ids[0]
if index < lenu64(getStateField(stateData, validators)):
let validator = getStateField(stateData, validators)[index]
if index < lenu64(getStateField(stateData.data, validators)):
let validator = getStateField(stateData.data, validators)[index]
let sres = validator.getStatus(current_epoch)
if sres.isOk:
return (validator: validator, index: uint64(index),
status: sres.get(),
balance: getStateField(stateData, balances)[index])
balance: getStateField(stateData.data, balances)[index])
else:
raise newException(CatchableError, "Incorrect validator's state")
else:
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
if validator.pubkey in vquery.keyset:
let sres = validator.getStatus(current_epoch)
if sres.isOk:
return (validator: validator, index: uint64(index),
status: sres.get(),
balance: getStateField(stateData, balances)[index])
balance: getStateField(stateData.data, balances)[index])
else:
raise newException(CatchableError, "Incorrect validator's state")
@ -315,7 +315,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
var res: seq[BalanceTuple]
withStateForStateId(stateId):
if validatorsId.isNone():
for index, value in getStateField(stateData, balances).pairs():
for index, value in getStateField(stateData.data, balances).pairs():
let balance = (index: uint64(index), balance: value)
res.add(balance)
else:
@ -325,17 +325,17 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
var vquery = vqres.get()
for index in vquery.ids:
if index < lenu64(getStateField(stateData, validators)):
let validator = getStateField(stateData, validators)[index]
if index < lenu64(getStateField(stateData.data, validators)):
let validator = getStateField(stateData.data, validators)[index]
vquery.keyset.excl(validator.pubkey)
let balance = (index: uint64(index),
balance: getStateField(stateData, balances)[index])
balance: getStateField(stateData.data, balances)[index])
res.add(balance)
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
if validator.pubkey in vquery.keyset:
let balance = (index: uint64(index),
balance: getStateField(stateData, balances)[index])
balance: getStateField(stateData.data, balances)[index])
res.add(balance)
return res
@ -346,12 +346,12 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
proc getCommittee(slot: Slot,
index: CommitteeIndex): BeaconStatesCommitteesTuple =
let vals = get_beacon_committee(
stateData, slot, index, cache).mapIt(it.uint64)
stateData.data, slot, index, cache).mapIt(it.uint64)
return (index: index.uint64, slot: slot.uint64, validators: vals)
proc forSlot(slot: Slot, res: var seq[BeaconStatesCommitteesTuple]) =
let committees_per_slot =
get_committee_count_per_slot(stateData, slot.epoch, cache)
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
if index.isNone:
for committee_index in 0'u64..<committees_per_slot:
@ -364,7 +364,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let qepoch =
if epoch.isNone:
compute_epoch_at_slot(getStateField(stateData, slot))
compute_epoch_at_slot(getStateField(stateData.data, slot))
else:
Epoch(epoch.get())

View File

@ -9,10 +9,10 @@ import
chronicles,
nimcrypto/utils as ncrutils,
../beacon_node_common, ../networking/eth2_network,
../consensus_object_pools/[blockchain_dag, exit_pool, statedata_helpers],
../consensus_object_pools/[blockchain_dag, exit_pool],
../gossip_processing/gossip_validation,
../validators/validator_duties,
../spec/[crypto, digest, datatypes, network],
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
../ssz/merkleization,
./eth2_json_rest_serialization, ./rest_utils
@ -127,9 +127,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet, "/api/eth/v1/beacon/genesis") do () -> RestApiResponse:
return RestApiResponse.jsonResponse(
(
genesis_time: getStateField(node.dag.headState, genesis_time),
genesis_time: getStateField(node.dag.headState.data, genesis_time),
genesis_validators_root:
getStateField(node.dag.headState, genesis_validators_root),
getStateField(node.dag.headState.data, genesis_validators_root),
genesis_fork_version: node.runtimePreset.GENESIS_FORK_VERSION
)
)
@ -167,9 +167,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot):
return RestApiResponse.jsonResponse(
(
previous_version: getStateField(stateData, fork).previous_version,
current_version: getStateField(stateData, fork).current_version,
epoch: getStateField(stateData, fork).epoch
previous_version: getStateField(stateData.data, fork).previous_version,
current_version: getStateField(stateData.data, fork).current_version,
epoch: getStateField(stateData.data, fork).epoch
)
)
return RestApiResponse.jsonError(Http500, InternalServerError)
@ -192,10 +192,10 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonResponse(
(
previous_justified:
getStateField(stateData, previous_justified_checkpoint),
getStateField(stateData.data, previous_justified_checkpoint),
current_justified:
getStateField(stateData, current_justified_checkpoint),
finalized: getStateField(stateData, finalized_checkpoint)
getStateField(stateData.data, current_justified_checkpoint),
finalized: getStateField(stateData.data, finalized_checkpoint)
)
)
return RestApiResponse.jsonError(Http500, InternalServerError)
@ -268,9 +268,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
(res1, res2)
node.withStateForBlockSlot(bslot):
let current_epoch = get_current_epoch(node.dag.headState)
let current_epoch = get_current_epoch(node.dag.headState.data)
var res: seq[RestValidator]
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
let includeFlag =
(len(keySet) == 0) and (len(indexSet) == 0) or
(len(indexSet) > 0 and (ValidatorIndex(index) in indexSet)) or
@ -283,7 +283,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
res.add(RestValidator(
index: ValidatorIndex(index),
balance:
Base10.toString(getStateField(stateData, balances)[index]),
Base10.toString(getStateField(stateData.data, balances)[index]),
status: toString(vstatus),
validator: validator
))
@ -309,11 +309,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, InvalidValidatorIdValueError,
$validator_id.error())
node.withStateForBlockSlot(bslot):
let current_epoch = get_current_epoch(node.dag.headState)
let current_epoch = get_current_epoch(node.dag.headState.data)
let vid = validator_id.get()
case vid.kind
of ValidatorQueryKind.Key:
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
if validator.pubkey == vid.key:
let sres = validator.getStatus(current_epoch)
if sres.isOk():
@ -321,7 +321,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
(
index: ValidatorIndex(index),
balance:
Base10.toString(getStateField(stateData, balances)[index]),
Base10.toString(getStateField(stateData.data, balances)[index]),
status: toString(sres.get()),
validator: validator
)
@ -344,15 +344,15 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
UnsupportedValidatorIndexValueError)
vres.get()
if uint64(vindex) >= uint64(len(getStateField(stateData, validators))):
if uint64(vindex) >= uint64(len(getStateField(stateData.data, validators))):
return RestApiResponse.jsonError(Http404, ValidatorNotFoundError)
let validator = getStateField(stateData, validators)[vindex]
let validator = getStateField(stateData.data, validators)[vindex]
let sres = validator.getStatus(current_epoch)
if sres.isOk():
return RestApiResponse.jsonResponse(
(
index: vindex,
balance: Base10.toString(getStateField(stateData, balances)[vindex]),
balance: Base10.toString(getStateField(stateData.data, balances)[vindex]),
status: toString(sres.get()),
validator: validator
)
@ -416,9 +416,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
res2.incl(vitem)
(res1, res2)
node.withStateForBlockSlot(bslot):
let current_epoch = get_current_epoch(node.dag.headState)
let current_epoch = get_current_epoch(node.dag.headState.data)
var res: seq[RestValidatorBalance]
for index, validator in getStateField(stateData, validators).pairs():
for index, validator in getStateField(stateData.data, validators).pairs():
let includeFlag =
(len(keySet) == 0) and (len(indexSet) == 0) or
(len(indexSet) > 0 and (ValidatorIndex(index) in indexSet)) or
@ -430,7 +430,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
res.add(RestValidatorBalance(
index: ValidatorIndex(index),
balance:
Base10.toString(getStateField(stateData, balances)[index]),
Base10.toString(getStateField(stateData.data, balances)[index]),
))
return RestApiResponse.jsonResponse(res)
@ -485,7 +485,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.withStateForBlockSlot(bslot):
proc getCommittee(slot: Slot,
index: CommitteeIndex): RestBeaconStatesCommittees =
let validators = get_beacon_committee(stateData, slot, index,
let validators = get_beacon_committee(stateData.data, slot, index,
cache).mapIt(it)
RestBeaconStatesCommittees(index: index, slot: slot,
validators: validators)
@ -493,7 +493,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
proc forSlot(slot: Slot, cindex: Option[CommitteeIndex],
res: var seq[RestBeaconStatesCommittees]) =
let committees_per_slot =
get_committee_count_per_slot(stateData, Epoch(slot), cache)
get_committee_count_per_slot(stateData.data, Epoch(slot), cache)
if cindex.isNone:
for committee_index in 0'u64 ..< committees_per_slot:
@ -506,7 +506,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
var res: seq[RestBeaconStatesCommittees]
let qepoch =
if vepoch.isNone:
compute_epoch_at_slot(getStateField(stateData, slot))
compute_epoch_at_slot(getStateField(stateData.data, slot))
else:
vepoch.get()

View File

@ -14,7 +14,7 @@ import
nimcrypto/utils as ncrutils,
../beacon_node_common,
../eth1/eth1_monitor,
../spec/[datatypes, digest, presets]
../spec/[datatypes, digest, forkedbeaconstate_helpers, presets]
logScope: topics = "configapi"
@ -33,7 +33,7 @@ func getDepositAddress(node: BeaconNode): string =
proc installConfigApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
raises: [Exception].} = # TODO fix json-rpc
rpcServer.rpc("get_v1_config_fork_schedule") do () -> seq[Fork]:
return @[getStateField(node.dag.headState, fork)]
return @[getStateField(node.dag.headState.data, fork)]
rpcServer.rpc("get_v1_config_spec") do () -> JsonNode:
return %*{

View File

@ -11,7 +11,7 @@ import
chronicles,
nimcrypto/utils as ncrutils,
../beacon_node_common, ../eth1/eth1_monitor,
../spec/[datatypes, digest, presets],
../spec/[datatypes, digest, forkedbeaconstate_helpers, presets],
./eth2_json_rest_serialization, ./rest_utils
logScope: topics = "rest_config"
@ -28,7 +28,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
# TODO: Implemenation needs a fix, when forks infrastructure will be
# established.
return RestApiResponse.jsonResponse(
[getStateField(node.dag.headState, fork)]
[getStateField(node.dag.headState.data, fork)]
)
router.api(MethodGet,

View File

@ -26,7 +26,7 @@ proc installDebugApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("get_v1_debug_beacon_states_stateId") do (
stateId: string) -> BeaconState:
withStateForStateId(stateId):
return stateData.data.data
return stateData.data.hbsPhase0.data
rpcServer.rpc("get_v1_debug_beacon_heads") do () -> seq[tuple[root: Eth2Digest, slot: Slot]]:
return node.dag.heads.mapIt((it.root, it.slot))

View File

@ -23,7 +23,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
$bres.error())
bres.get()
node.withStateForBlockSlot(bslot):
return RestApiResponse.jsonResponse(stateData.data.data)
return RestApiResponse.jsonResponse(stateData.data.hbsPhase0.data)
return RestApiResponse.jsonError(Http500, InternalServerError)
router.api(MethodGet,

View File

@ -18,7 +18,7 @@ import
".."/[
beacon_node_common, nimbus_binary_common, networking/eth2_network,
eth1/eth1_monitor, validators/validator_duties],
../spec/[digest, datatypes, presets]
../spec/[digest, datatypes, forkedbeaconstate_helpers, presets]
logScope: topics = "nimbusapi"
@ -43,9 +43,9 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("getChainHead") do () -> JsonNode:
let
head = node.dag.head
finalized = getStateField(node.dag.headState, finalized_checkpoint)
finalized = getStateField(node.dag.headState.data, finalized_checkpoint)
justified =
getStateField(node.dag.headState, current_justified_checkpoint)
getStateField(node.dag.headState.data, current_justified_checkpoint)
return %* {
"head_slot": head.slot,
"head_block_root": head.root.data.toHex(),
@ -105,7 +105,7 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let proposalState = assignClone(node.dag.headState)
node.dag.withState(proposalState[], head.atSlot(wallSlot)):
return node.getBlockProposalEth1Data(stateData)
return node.getBlockProposalEth1Data(stateData.data)
rpcServer.rpc("debug_getChronosFutures") do () -> seq[FutureInfo]:
when defined(chronosFutureTracking):

View File

@ -14,6 +14,7 @@ import
./eth2_json_rest_serialization, ./rest_utils,
../eth1/eth1_monitor,
../validators/validator_duties,
../spec/forkedbeaconstate_helpers,
../beacon_node_common, ../nimbus_binary_common
logScope: topics = "rest_nimbusapi"
@ -112,9 +113,9 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
router.api(MethodGet, "/api/nimbus/v1/chain/head") do() -> RestApiResponse:
let
head = node.dag.head
finalized = getStateField(node.dag.headState, finalized_checkpoint)
finalized = getStateField(node.dag.headState.data, finalized_checkpoint)
justified =
getStateField(node.dag.headState, current_justified_checkpoint)
getStateField(node.dag.headState.data, current_justified_checkpoint)
return RestApiResponse.jsonResponse(
(
head_slot: head.slot,
@ -205,7 +206,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
let proposalState = assignClone(node.dag.headState)
node.dag.withState(proposalState[], head.atSlot(wallSlot)):
return RestApiResponse.jsonResponse(
node.getBlockProposalEth1Data(stateData))
node.getBlockProposalEth1Data(stateData.data))
router.api(MethodGet, "/api/nimbus/v1/debug/chronos/futures") do (
) -> RestApiResponse:

View File

@ -4,7 +4,7 @@ import presto,
faststreams/[outputs],
serialization, json_serialization,
nimcrypto/utils as ncrutils,
../spec/[crypto, digest, datatypes],
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers],
../beacon_node_common,
../consensus_object_pools/[block_pools_types, blockchain_dag]
export blockchain_dag, presto
@ -508,8 +508,8 @@ proc getBlockSlot*(node: BeaconNode,
of StateIdentType.Finalized:
ok(node.dag.finalizedHead)
of StateIdentType.Justified:
ok(node.dag.head.atEpochStart(
getStateField(node.dag.headState, current_justified_checkpoint).epoch))
ok(node.dag.head.atEpochStart(getStateField(
node.dag.headState.data, current_justified_checkpoint).epoch))
proc getBlockDataFromBlockIdent*(node: BeaconNode,
id: BlockIdent): Result[BlockData, cstring] =
@ -537,7 +537,7 @@ proc getBlockDataFromBlockIdent*(node: BeaconNode,
template withStateForBlockSlot*(node: BeaconNode,
blockSlot: BlockSlot, body: untyped): untyped =
template isState(state: StateData): bool =
state.blck.atSlot(getStateField(state, slot)) == blockSlot
state.blck.atSlot(getStateField(state.data, slot)) == blockSlot
if isState(node.dag.headState):
withStateVars(node.dag.headState):

View File

@ -12,7 +12,7 @@ import
stew/byteutils,
../beacon_node_common, ../validators/validator_duties,
../consensus_object_pools/[block_pools_types, blockchain_dag],
../spec/[datatypes, digest, helpers]
../spec/[datatypes, digest, forkedbeaconstate_helpers, helpers]
export blockchain_dag
@ -21,7 +21,7 @@ template withStateForStateId*(stateId: string, body: untyped): untyped =
bs = node.stateIdToBlockSlot(stateId)
template isState(state: StateData): bool =
state.blck.atSlot(getStateField(state, slot)) == bs
state.blck.atSlot(getStateField(state.data, slot)) == bs
if isState(node.dag.headState):
withStateVars(node.dag.headState):
@ -75,7 +75,7 @@ proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises:
node.dag.finalizedHead
of "justified":
node.dag.head.atEpochStart(
getStateField(node.dag.headState, current_justified_checkpoint).epoch)
getStateField(node.dag.headState.data, current_justified_checkpoint).epoch)
else:
if stateId.startsWith("0x"):
let blckRoot = parseRoot(stateId)

View File

@ -17,7 +17,7 @@ import
chronicles,
# Local modules
../spec/[datatypes, digest, crypto, helpers, network, signatures],
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, helpers, network, signatures],
../spec/eth2_apis/callsigs_types,
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool], ../ssz/merkleization,
../beacon_node_common, ../beacon_node_types,
@ -141,8 +141,8 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
"Slot requested not in current or next wall-slot epoch")
if not verify_slot_signature(
getStateField(node.dag.headState, fork),
getStateField(node.dag.headState, genesis_validators_root),
getStateField(node.dag.headState.data, fork),
getStateField(node.dag.headState.data, genesis_validators_root),
slot, validator_pubkey, slot_signature):
raise newException(CatchableError,
"Invalid slot signature")

View File

@ -12,7 +12,7 @@ import
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool],
../gossip_processing/gossip_validation,
../validators/validator_duties,
../spec/[crypto, digest, datatypes, network],
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
../ssz/merkleization,
./eth2_json_rest_serialization, ./rest_utils
@ -342,10 +342,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
block:
let idx = request.validator_index
if uint64(idx) >=
lenu64(getStateField(node.dag.headState, validators)):
lenu64(getStateField(node.dag.headState.data, validators)):
return RestApiResponse.jsonError(Http400,
InvalidValidatorIndexValueError)
getStateField(node.dag.headState, validators)[idx].pubkey
getStateField(node.dag.headState.data, validators)[idx].pubkey
let wallSlot = node.beaconClock.now.slotOrZero
if wallSlot > request.slot + 1:

View File

@ -513,11 +513,14 @@ proc is_valid_indexed_attestation*(
ok()
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_attesting_indices
iterator get_attesting_indices*(state: SomeBeaconState,
func get_attesting_indices*(state: SomeBeaconState,
data: AttestationData,
bits: CommitteeValidatorsBits,
cache: var StateCache): ValidatorIndex =
cache: var StateCache): seq[ValidatorIndex] =
## Return the set of attesting indices corresponding to ``data`` and ``bits``.
var res: seq[ValidatorIndex]
# Can't be an iterator due to https://github.com/nim-lang/Nim/issues/18188
if bits.lenu64 != get_beacon_committee_len(
state, data.slot, data.index.CommitteeIndex, cache):
trace "get_attesting_indices: inconsistent aggregation and committee length"
@ -526,9 +529,11 @@ iterator get_attesting_indices*(state: SomeBeaconState,
for index in get_beacon_committee(
state, data.slot, data.index.CommitteeIndex, cache):
if bits[i]:
yield index
res.add index
inc i
res
proc is_valid_indexed_attestation*(
state: SomeBeaconState, attestation: SomeAttestation, flags: UpdateFlags,
cache: var StateCache): Result[void, cstring] =
@ -702,6 +707,10 @@ proc process_attestation*(
# In the spec, attestation validation is mixed with state mutation, so here
# we've split it into two functions so that the validation logic can be
# reused when looking for suitable blocks to include in attestations.
#
# TODO this should be two separate functions, but
# https://github.com/nim-lang/Nim/issues/18202 means that this being called
# by process_operations() in state_transition_block fails that way.
let proposer_index = get_beacon_proposer_index(state, cache)
if proposer_index.isNone:
@ -709,10 +718,6 @@ proc process_attestation*(
? check_attestation(state, attestation, flags, cache)
# TODO this should be split between two functions, but causes type errors
# in state_transition_block.process_operations()
# TODO investigate and, if real, file Nim bug
# For phase0
template addPendingAttestation(attestations: typed) =
# The genericSeqAssign generated by the compiler to copy the attestation

View File

@ -255,8 +255,7 @@ type
# phase 0 version of symbols; anywhere which specially handles it will
# have to do so itself.
SomeBeaconState* = BeaconState | phase0.BeaconState
SomeHashedBeaconState* = HashedBeaconState | phase0.HashedBeaconState # probably not useful long-term,
## since process_slots will need to be StateData
SomeHashedBeaconState* = HashedBeaconState | phase0.HashedBeaconState
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object

View File

@ -922,9 +922,6 @@ proc readValue*(r: var JsonReader, T: type GraffitiBytes): T
except ValueError as err:
r.raiseUnexpectedValue err.msg
template getStateField*(stateData, fieldName: untyped): untyped =
stateData.data.data.fieldName
proc load*(
validators: openArray[ImmutableValidatorData2],
index: ValidatorIndex | uint64): Option[CookedPubKey] =

View File

@ -97,25 +97,6 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
BlockRef* = ref object
## Node in object graph guaranteed to lead back to tail block, and to have
## a corresponding entry in database.
## Block graph should form a tree - in particular, there are no cycles.
root*: Eth2Digest ##\
## Root that can be used to retrieve block data from database
parent*: BlockRef ##\
## Not nil, except for the tail
slot*: Slot # could calculate this by walking to root, but..
StateData* = object
data*: HashedBeaconState
blck*: BlockRef ##\
## The block associated with the state found in data
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose

View File

@ -0,0 +1,171 @@
# beacon_chain
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
std/macros,
chronicles,
stew/[assign2, results],
../extras,
../spec/[
beaconstate, digest, helpers, presets, state_transition_block, validator],
./datatypes/[phase0, altair]
type
BeaconStateFork* = enum
forkPhase0,
forkAltair
ForkedHashedBeaconState* = object
case beaconStateFork*: BeaconStateFork
of forkPhase0: hbsPhase0*: phase0.HashedBeaconState
of forkAltair: hbsAltair*: altair.HashedBeaconState
# State-related functionality based on ForkedHashedBeaconState instead of BeaconState
# Dispatch functions
func assign*(tgt: var ForkedHashedBeaconState, src: ForkedHashedBeaconState) =
if tgt.beaconStateFork == src.beaconStateFork:
if tgt.beaconStateFork == forkPhase0:
assign(tgt.hbsPhase0, src.hbsPhase0):
elif tgt.beaconStateFork == forkAltair:
assign(tgt.hbsAltair, src.hbsAltair):
else:
doAssert false
else:
# Ensure case object and discriminator get updated simultaneously, even
# with nimOldCaseObjects. This is infrequent.
tgt = src
macro getStateField*(s, y: untyped): untyped =
result = quote do:
(if `s`.beaconStateFork == forkPhase0:
unsafeAddr (`s`.hbsPhase0.data.`y`) else:
unsafeAddr (`s`.hbsAltair.data.`y`))[]
template getStateRoot*(x: ForkedHashedBeaconState): Eth2Digest =
case x.beaconStateFork:
of forkPhase0: x.hbsPhase0.root
of forkAltair: x.hbsAltair.root
template hash_tree_root*(x: ForkedHashedBeaconState): Eth2Digest =
case x.beaconStateFork:
of forkPhase0: hash_tree_root(x.hbsPhase0.data)
of forkAltair: hash_tree_root(x.hbsAltair.data)
func get_beacon_committee*(
state: ForkedHashedBeaconState, slot: Slot, index: CommitteeIndex,
cache: var StateCache): seq[ValidatorIndex] =
# This one is used by tests/, ncli/, and a couple of places in RPC
# TODO use the iterator version alone, to remove the risk of using
# diverging get_beacon_committee() in tests and beacon_chain/ by a
# wrapper approach (e.g., toSeq). This is a perf tradeoff for test
# correctness/consistency.
case state.beaconStateFork:
of forkPhase0: get_beacon_committee(state.hbsPhase0.data, slot, index, cache)
of forkAltair: get_beacon_committee(state.hbsAltair.data, slot, index, cache)
func get_committee_count_per_slot*(state: ForkedHashedBeaconState,
epoch: Epoch,
cache: var StateCache): uint64 =
## Return the number of committees at ``epoch``.
case state.beaconStateFork:
of forkPhase0: get_committee_count_per_slot(state.hbsPhase0.data, epoch, cache)
of forkAltair: get_committee_count_per_slot(state.hbsAltair.data, epoch, cache)
func get_beacon_proposer_index*(state: ForkedHashedBeaconState,
cache: var StateCache, slot: Slot):
Option[ValidatorIndex] =
case state.beaconStateFork:
of forkPhase0: get_beacon_proposer_index(state.hbsPhase0.data, cache, slot)
of forkAltair: get_beacon_proposer_index(state.hbsAltair.data, cache, slot)
func get_shuffled_active_validator_indices*(
cache: var StateCache, state: ForkedHashedBeaconState, epoch: Epoch):
seq[ValidatorIndex] =
case state.beaconStateFork:
of forkPhase0:
cache.get_shuffled_active_validator_indices(state.hbsPhase0.data, epoch)
of forkAltair:
cache.get_shuffled_active_validator_indices(state.hbsAltair.data, epoch)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: ForkedHashedBeaconState,
slot: Slot): Eth2Digest =
## Return the block root at a recent ``slot``.
case state.beaconStateFork:
of forkPhase0: get_block_root_at_slot(state.hbsPhase0.data, slot)
of forkAltair: get_block_root_at_slot(state.hbsAltair.data, slot)
proc get_attesting_indices*(state: ForkedHashedBeaconState;
data: AttestationData;
bits: CommitteeValidatorsBits;
cache: var StateCache): seq[ValidatorIndex] =
# TODO when https://github.com/nim-lang/Nim/issues/18188 fixed, use an
# iterator
var idxBuf: seq[ValidatorIndex]
doAssert state.beaconStateFork == forkPhase0
for vidx in state.hbsPhase0.data.get_attesting_indices(data, bits, cache):
idxBuf.add vidx
if true: return idxBuf
if state.beaconStateFork == forkPhase0:
for vidx in state.hbsPhase0.data.get_attesting_indices(data, bits, cache):
idxBuf.add vidx
elif state.beaconStateFork == forkAltair:
for vidx in state.hbsAltair.data.get_attesting_indices(data, bits, cache):
idxBuf.add vidx
else:
doAssert false
idxBuf
proc check_attester_slashing*(
state: var ForkedHashedBeaconState; attester_slashing: SomeAttesterSlashing;
flags: UpdateFlags): Result[seq[ValidatorIndex], cstring] =
case state.beaconStateFork:
of forkPhase0:
check_attester_slashing(state.hbsPhase0.data, attester_slashing, flags)
of forkAltair:
check_attester_slashing(state.hbsAltair.data, attester_slashing, flags)
proc check_proposer_slashing*(
state: var ForkedHashedBeaconState; proposer_slashing: SomeProposerSlashing;
flags: UpdateFlags): Result[void, cstring] =
case state.beaconStateFork:
of forkPhase0:
check_proposer_slashing(state.hbsPhase0.data, proposer_slashing, flags)
of forkAltair:
check_proposer_slashing(state.hbsAltair.data, proposer_slashing, flags)
proc check_voluntary_exit*(
state: ForkedHashedBeaconState; signed_voluntary_exit: SomeSignedVoluntaryExit;
flags: UpdateFlags): Result[void, cstring] =
case state.beaconStateFork:
of forkPhase0:
check_voluntary_exit(state.hbsPhase0.data, signed_voluntary_exit, flags)
of forkAltair:
check_voluntary_exit(state.hbsAltair.data, signed_voluntary_exit, flags)
# Derived utilities
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(stateData: ForkedHashedBeaconState): Epoch =
## Return the current epoch.
getStateField(stateData, slot).epoch
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(stateData: ForkedHashedBeaconState): Epoch =
## Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
let current_epoch = get_current_epoch(stateData)
if current_epoch == GENESIS_EPOCH:
GENESIS_EPOCH
else:
current_epoch - 1

View File

@ -13,7 +13,9 @@ import
stew/[byteutils, results],
# Internal
../ssz/merkleization,
"."/[crypto, datatypes, helpers, presets, beaconstate, digest]
"."/[
crypto, datatypes, helpers, presets, beaconstate, digest,
forkedbeaconstate_helpers]
# Otherwise, error.
import chronicles
@ -99,7 +101,7 @@ proc addIndexedAttestation(
sigs: var seq[SignatureSet],
attestation: IndexedAttestation,
validatorKeys: auto,
state: StateData,
state: ForkedHashedBeaconState,
): Result[void, cstring] =
## Add an indexed attestation for batched BLS verification
## purposes
@ -124,12 +126,13 @@ proc addAttestation(
sigs: var seq[SignatureSet],
attestation: Attestation,
validatorKeys: auto,
state: StateData,
state: ForkedHashedBeaconState,
cache: var StateCache
): Result[void, cstring] =
var inited = false
var attestersAgg{.noInit.}: AggregatePublicKey
for valIndex in state.data.data.get_attesting_indices(
for valIndex in state.get_attesting_indices(
attestation.data,
attestation.aggregation_bits,
cache
@ -251,7 +254,7 @@ proc collectSignatureSets*(
sigs: var seq[SignatureSet],
signed_block: SignedBeaconBlock,
validatorKeys: auto,
state: StateData,
state: ForkedHashedBeaconState,
cache: var StateCache): Result[void, cstring] =
## Collect all signatures in a single signed block.
## This includes

View File

@ -46,7 +46,7 @@ import
stew/results,
../extras, ../ssz/merkleization, metrics,
./datatypes/[phase0, altair], ./crypto, ./digest, ./helpers, ./signatures, ./validator, ./beaconstate,
./state_transition_block, ./state_transition_epoch,
./state_transition_block, ./state_transition_epoch, forkedbeaconstate_helpers,
../../nbench/bench_lab
# TODO why need anything except the first two?
@ -163,174 +163,73 @@ proc advance_slot(
state.slot += 1
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc process_slots*(state: var SomeHashedBeaconState, slot: Slot,
cache: var StateCache, rewards: var RewardInfo,
flags: UpdateFlags = {}): bool {.nbench.} =
## Process one or more slot transitions without blocks - if the slot transtion
## passes an epoch boundary, epoch processing will run and `rewards` will be
## updated, else it will be cleared
if not (state.data.slot < slot):
if slotProcessed notin flags or state.data.slot != slot:
notice(
"Unusual request for a slot in the past",
state_root = shortLog(state.root),
current_slot = state.data.slot,
target_slot = slot
)
return false
# Catch up to the target slot
while state.data.slot < slot:
advance_slot(state.data, state.root, flags, cache, rewards)
# The root must be updated on every slot update, or the next `process_slot`
# will be incorrect
state.root = hash_tree_root(state.data)
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.6/specs/altair/fork.md#upgrading-the-state
# says to put upgrading here too, TODO. It may not work in state reply
# otherwise, since updateStateData() uses this function.
true
func noRollback*(state: var phase0.HashedBeaconState) =
trace "Skipping rollback of broken state"
type
BeaconStateFork* = enum
forkPhase0,
forkAltair
ForkedHashedBeaconState* = object
case beaconStateFork*: BeaconStateFork
of forkPhase0: hbsPhase0*: phase0.HashedBeaconState
of forkAltair: hbsAltair*: altair.HashedBeaconState
# Dispatch functions
template getStateField*(x: ForkedHashedBeaconState, y: untyped): untyped =
case x.beaconStateFork:
of forkPhase0: x.hbsPhase0.data.y
of forkAltair: x.hbsAltair.data.y
template getStateField*(x: var ForkedHashedBeaconState, y: untyped): untyped =
case x.beaconStateFork:
of forkPhase0: x.hbsPhase0.data.y
of forkAltair: x.hbsAltair.data.y
template getStateRoot*(x: ForkedHashedBeaconState): Eth2Digest =
case x.beaconStateFork:
of forkPhase0: x.hbsPhase0.root
of forkAltair: x.hbsAltair.root
template hash_tree_root*(x: ForkedHashedBeaconState): Eth2Digest =
case x.beaconStateFork:
of forkPhase0: hash_tree_root(x.hbsPhase0.data)
of forkAltair: hash_tree_root(x.hbsAltair.data)
template callWithBS*(op: untyped, y: ForkedHashedBeaconState): untyped =
let bs {.inject.} =
case y.beaconStateFork:
of forkPhase0: y.hbsPhase0.data
of forkAltair: y.hbsAltair.data
op
proc maybeUpgradeStateToAltair(
state: var ForkedHashedBeaconState, altairForkSlot: Slot) =
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.0-alpha.6/specs/altair/fork.md#upgrading-the-state
# Both state_transition_slots() and state_transition_block() call this, so
# only run it once by checking for existing fork.
# Both process_slots() and state_transition_block() call this, so only run it
# once by checking for existing fork.
if getStateField(state, slot) == altairForkSlot and
state.beaconStateFork == forkPhase0:
var newState = upgrade_to_altair(state.hbsPhase0.data)
state = ForkedHashedBeaconState(
state = (ref ForkedHashedBeaconState)(
beaconStateFork: forkAltair,
hbsAltair: altair.HashedBeaconState(
root: hash_tree_root(newState[]), data: newState[]))
root: hash_tree_root(newState[]), data: newState[]))[]
proc state_transition_slots(
preset: RuntimePreset,
state: var ForkedHashedBeaconState,
signedBlock: phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock | phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
proc process_slots*(
state: var ForkedHashedBeaconState, slot: Slot,
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
altairForkSlot: Slot): bool {.nbench.} =
let slot = signedBlock.message.slot
if not (getStateField(state, slot) < slot):
if slotProcessed notin flags or getStateField(state, slot) != slot:
notice "Unusual request for a slot in the past",
state_root = shortLog(getStateRoot(state)),
current_slot = getStateField(state, slot),
target_slot = slot
return false
# Update the state so its slot matches that of the block
while getStateField(state, slot) < slot:
case state.beaconStateFork:
of forkPhase0:
advance_slot(
state.hbsPhase0.data, state.hbsPhase0.root, flags, cache, rewards)
state.hbsPhase0.data, getStateRoot(state), flags, cache, rewards)
if state.hbsPhase0.data.slot < slot:
# Don't update state root for the slot of the block
state.hbsPhase0.root = hash_tree_root(state.hbsPhase0.data)
if skipLastStateRootCalculation notin flags or
getStateField(state, slot) < slot:
# Don't update state root for the slot of the block if going to process
# block after
state.hbsPhase0.root = hash_tree_root(state)
of forkAltair:
advance_slot(
state.hbsAltair.data, state.hbsAltair.root, flags, cache, rewards)
if getStateField(state, slot) < slot:
# Don't update state root for the slot of the block
state.hbsAltair.root = hash_tree_root(state.hbsAltair.data)
if skipLastStateRootCalculation notin flags or
getStateField(state, slot) < slot:
# Don't update state root for the slot of the block if going to process
# block after
state.hbsAltair.root = hash_tree_root(state)
maybeUpgradeStateToAltair(state, altairForkSlot)
true
proc state_transition_slots(
proc state_transition_block_aux(
preset: RuntimePreset,
state: var SomeHashedBeaconState,
signedBlock: phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags):
bool {.nbench.} =
# TODO remove when the HashedBeaconState state_transition is removed; it's
# to avoid requiring a wrapped/memory-copied version
let slot = signedBlock.message.slot
if not (state.data.slot < slot):
if slotProcessed notin flags or state.data.slot != slot:
notice "State must precede block",
state_root = shortLog(state.root),
current_slot = state.data.slot,
blck = shortLog(signedBlock)
return false
# Update the state so its slot matches that of the block
while state.data.slot < slot:
advance_slot(state.data, state.root, flags, cache, rewards)
if state.data.slot < slot:
# Don't update state root for the slot of the block
state.root = hash_tree_root(state.data)
true
proc state_transition_block*(
preset: RuntimePreset,
state: var SomeHashedBeaconState,
signedBlock: phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
cache: var StateCache, flags: UpdateFlags, rollback: RollbackHashedProc):
bool {.nbench.} =
## `rollback` is called if the transition fails and the given state has been
## partially changed. If a temporary state was given to `state_transition`,
## it is safe to use `noRollback` and leave it broken, else the state
## object should be rolled back to a consistent state. If the transition fails
## before the state has been updated, `rollback` will not be called.
cache: var StateCache, flags: UpdateFlags): bool {.nbench.} =
# Block updates - these happen when there's a new block being suggested
# by the block proposer. Every actor in the network will update its state
# according to the contents of this block - but first they will validate
# that the block is sane.
doAssert not rollback.isNil, "use noRollback if it's ok to mess up state"
if not (skipBLSValidation in flags or
verify_block_signature(state.data, signedBlock)):
when not (state is altair.HashedBeaconState):
# TODO re-enable in Altair
rollback(state)
return false
trace "state_transition: processing block, signature passed",
@ -346,16 +245,10 @@ proc state_transition_block*(
eth1_deposit_index = state.data.eth1_deposit_index,
deposit_root = shortLog(state.data.eth1_data.deposit_root),
error = res.error
when not (state is altair.HashedBeaconState):
# TODO re-enable in Altair
rollback(state)
return false
if not (skipStateRootValidation in flags or
verifyStateRoot(state.data, signedBlock.message)):
when not (state is altair.HashedBeaconState):
# TODO re-enable in Altair
rollback(state)
return false
# only blocks currently being produced have an empty state root - we use a
@ -366,6 +259,13 @@ proc state_transition_block*(
true
type
RollbackForkedHashedProc* =
proc(state: var ForkedHashedBeaconState) {.gcsafe, raises: [Defect].}
func noRollback*(state: var ForkedHashedBeaconState) =
trace "Skipping rollback of broken state"
proc state_transition_block*(
preset: RuntimePreset,
state: var ForkedHashedBeaconState,
@ -373,21 +273,28 @@ proc state_transition_block*(
phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.SigVerifiedSignedBeaconBlock,
cache: var StateCache, flags: UpdateFlags,
rollback: RollbackHashedProc, altairForkSlot: Slot): bool {.nbench.} =
rollback: RollbackForkedHashedProc, altairForkSlot: Slot): bool {.nbench.} =
## `rollback` is called if the transition fails and the given state has been
## partially changed. If a temporary state was given to `state_transition`,
## it is safe to use `noRollback` and leave it broken, else the state
## object should be rolled back to a consistent state. If the transition fails
## before the state has been updated, `rollback` will not be called.
doAssert not rollback.isNil, "use noRollback if it's ok to mess up state"
# Ensure state_transition_block()-only callers trigger this
maybeUpgradeStateToAltair(state, altairForkSlot)
case state.beaconStateFork:
of forkPhase0: state_transition_block(
preset, state.hbsPhase0, signedBlock, cache, flags, rollback)
of forkAltair: state_transition_block(
preset, state.hbsAltair, signedBlock, cache, flags, rollback)
let success = case state.beaconStateFork:
of forkPhase0: state_transition_block_aux(
preset, state.hbsPhase0, signedBlock, cache, flags)
of forkAltair: state_transition_block_aux(
preset, state.hbsAltair, signedBlock, cache, flags)
if not success:
rollback(state)
return false
true
proc state_transition*(
preset: RuntimePreset,
@ -395,8 +302,8 @@ proc state_transition*(
signedBlock: phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
rollback: RollbackHashedProc,
altairForkEpoch: Epoch = FAR_FUTURE_EPOCH): bool {.nbench.} =
rollback: RollbackForkedHashedProc,
altairForkSlot: Slot = FAR_FUTURE_SLOT): bool {.nbench.} =
## Apply a block to the state, advancing the slot counter as necessary. The
## given state must be of a lower slot, or, in case the `slotProcessed` flag
## is set, can be the slot state of the same slot as the block (where the
@ -412,46 +319,12 @@ proc state_transition*(
## it is safe to use `noRollback` and leave it broken, else the state
## object should be rolled back to a consistent state. If the transition fails
## before the state has been updated, `rollback` will not be called.
let slot = signedBlock.message.slot
if not (getStateField(state, slot) < slot):
if slotProcessed notin flags or getStateField(state, slot) != slot:
notice "State must precede block",
state_root = shortLog(getStateRoot(state)),
current_slot = getStateField(state, slot),
blck = shortLog(signedBlock)
return false
if not state_transition_slots(
preset, state, signedBlock, cache, rewards, flags,
altairForkEpoch.compute_start_slot_at_epoch):
if not process_slots(
state, signedBlock.message.slot, cache, rewards,
flags + {skipLastStateRootCalculation}, altairForkSlot):
return false
state_transition_block(
preset, state, signedBlock, cache, flags, rollback,
altairForkEpoch.compute_start_slot_at_epoch)
proc state_transition*(
preset: RuntimePreset,
state: var SomeHashedBeaconState,
signedBlock: phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock |
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock,
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
rollback: RollbackHashedProc): bool =
# Does not follow hard forks; suitable only where that's irrelevant.
# TODO remove when callers gone
let slot = signedBlock.message.slot
if not (state.data.slot < slot):
if slotProcessed notin flags or state.data.slot != slot:
notice "State must precede block",
state_root = shortLog(state.root),
current_slot = state.data.slot,
blck = shortLog(signedBlock)
return false
if not state_transition_slots(
preset, state, signedBlock, cache, rewards, flags):
return false
state_transition_block(
preset, state, signedBlock, cache, flags, rollback)
preset, state, signedBlock, cache, flags, rollback, altairForkSlot)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#preparing-for-a-beaconblock
proc makeBeaconBlock*(

View File

@ -8,17 +8,16 @@
{.push raises: [Defect].}
import
./datatypes, ./digest, ./helpers,
../consensus_object_pools/statedata_helpers
./datatypes, ./digest, ./forkedbeaconstate_helpers, ./helpers
const
SAFETY_DECAY* = 10'u64
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/weak-subjectivity.md#calculating-the-weak-subjectivity-period
func compute_weak_subjectivity_period(state: StateData): uint64 =
func compute_weak_subjectivity_period(state: ForkedHashedBeaconState): uint64 =
var weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
let validator_count =
get_active_validator_indices_len(state.data.data, get_current_epoch(state))
get_active_validator_indices_len(state.hbsPhase0.data, get_current_epoch(state))
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT div (2 * 100)
else:
@ -27,7 +26,7 @@ func compute_weak_subjectivity_period(state: StateData): uint64 =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/weak-subjectivity.md#checking-for-stale-weak-subjectivity-checkpoint
func is_within_weak_subjectivity_period*(current_slot: Slot,
ws_state: StateData,
ws_state: ForkedHashedBeaconState,
ws_checkpoint: Checkpoint): bool =
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
doAssert getStateField(ws_state, latest_block_header).state_root ==

View File

@ -10,7 +10,8 @@
import
os, strformat, chronicles,
./ssz_serialization,
../spec/[crypto, datatypes, digest]
../spec/[crypto, datatypes, digest],
../consensus_object_pools/block_pools_types
# Dump errors are generally not fatal where used currently - the code calling
# these functions, like most code, is not exception safe

View File

@ -10,7 +10,7 @@
import
options, tables, sets, macros,
chronicles, chronos, stew/ranges/bitranges, libp2p/switch,
../spec/[datatypes, network, crypto, digest],
../spec/[crypto, datatypes, digest, forkedbeaconstate_helpers, network],
../beacon_node_types,
../networking/eth2_network,
../consensus_object_pools/blockchain_dag
@ -85,9 +85,9 @@ proc getCurrentStatus*(state: BeaconSyncNetworkState): StatusMsg {.gcsafe.} =
StatusMsg(
forkDigest: state.forkDigest,
finalizedRoot:
getStateField(dag.headState, finalized_checkpoint).root,
getStateField(dag.headState.data, finalized_checkpoint).root,
finalizedEpoch:
getStateField(dag.headState, finalized_checkpoint).epoch,
getStateField(dag.headState.data, finalized_checkpoint).epoch,
headRoot: headBlock.root,
headSlot: headBlock.slot)

View File

@ -21,7 +21,8 @@ import
# Local modules
../spec/[
datatypes, digest, crypto, helpers, network, signatures, state_transition],
datatypes, digest, crypto, forkedbeaconstate_helpers, helpers, network,
signatures, state_transition],
../conf, ../beacon_clock,
../consensus_object_pools/[
spec_cache, blockchain_dag, block_clearance,
@ -56,7 +57,7 @@ logScope: topics = "beacval"
proc findValidator(validators: auto, pubKey: ValidatorPubKey):
Option[ValidatorIndex] =
let idx = validators.asSeq.findIt(it.pubKey == pubKey)
let idx = validators.findIt(it.pubKey == pubKey)
if idx == -1:
# We allow adding a validator even if its key is not in the state registry:
# it might be that the deposit for this validator has not yet been processed
@ -66,16 +67,17 @@ proc findValidator(validators: auto, pubKey: ValidatorPubKey):
some(idx.ValidatorIndex)
proc addLocalValidator(node: BeaconNode,
stateData: StateData,
validators: openArray[Validator],
privKey: ValidatorPrivKey) =
let pubKey = privKey.toPubKey()
node.attachedValidators[].addLocalValidator(
pubKey, privKey,
findValidator(getStateField(stateData, validators), pubKey.toPubKey()))
findValidator(validators, pubKey.toPubKey()))
proc addLocalValidators*(node: BeaconNode) =
for validatorKey in node.config.validatorKeys:
node.addLocalValidator node.dag.headState, validatorKey
node.addLocalValidator(
getStateField(node.dag.headState.data, validators).asSeq, validatorKey)
proc addRemoteValidators*(node: BeaconNode) {.raises: [Defect, OSError, IOError].} =
# load all the validators from the child process - loop until `end`
@ -85,7 +87,7 @@ proc addRemoteValidators*(node: BeaconNode) {.raises: [Defect, OSError, IOError]
let
key = ValidatorPubKey.fromHex(line).get()
index = findValidator(
getStateField(node.dag.headState, validators), key)
getStateField(node.dag.headState.data, validators).asSeq, key)
pk = key.load()
if pk.isSome():
let v = AttachedValidator(pubKey: pk.get(),
@ -254,19 +256,20 @@ proc createAndSendAttestation(node: BeaconNode,
notice "Error sending attestation", err = exc.msg
proc getBlockProposalEth1Data*(node: BeaconNode,
stateData: StateData): BlockProposalEth1Data =
state: ForkedHashedBeaconState):
BlockProposalEth1Data =
if node.eth1Monitor.isNil:
var pendingDepositsCount =
getStateField(stateData, eth1_data).deposit_count -
getStateField(stateData, eth1_deposit_index)
getStateField(state, eth1_data).deposit_count -
getStateField(state, eth1_deposit_index)
if pendingDepositsCount > 0:
result.hasMissingDeposits = true
else:
result.vote = getStateField(stateData, eth1_data)
result.vote = getStateField(state, eth1_data)
else:
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
result = node.eth1Monitor.getBlockProposalData(
stateData, finalizedEpochRef.eth1_data,
state, finalizedEpochRef.eth1_data,
finalizedEpochRef.eth1_deposit_index)
func getOpaqueTransaction(s: string): OpaqueTransaction =
@ -302,7 +305,7 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
node.dag.withState(proposalState[], head.atSlot(slot)):
let
eth1Proposal = node.getBlockProposalEth1Data(stateData)
eth1Proposal = node.getBlockProposalEth1Data(stateData.data)
poolPtr = unsafeAddr node.dag # safe because restore is short-lived
if eth1Proposal.hasMissingDeposits:
@ -318,13 +321,14 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
return makeBeaconBlock(
node.runtimePreset,
hashedState,
stateData.data.hbsPhase0,
validator_index,
head.root,
randao_reveal,
eth1Proposal.vote,
graffiti,
node.attestationPool[].getAttestationsForBlock(stateData, cache),
node.attestationPool[].getAttestationsForBlock(
stateData.data.hbsPhase0, cache),
eth1Proposal.deposits,
node.exitPool[].getProposerSlashingsForBlock(),
node.exitPool[].getAttesterSlashingsForBlock(),
@ -381,9 +385,9 @@ proc proposeBlock(node: BeaconNode,
return head
let
fork = getStateField(node.dag.headState, fork)
fork = getStateField(node.dag.headState.data, fork)
genesis_validators_root =
getStateField(node.dag.headState, genesis_validators_root)
getStateField(node.dag.headState.data, genesis_validators_root)
randao = await validator.genRandaoReveal(
fork, genesis_validators_root, slot)
message = await makeBeaconBlockForHeadAndSlot(
@ -455,9 +459,9 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
epochRef = node.dag.getEpochRef(
attestationHead.blck, slot.compute_epoch_at_slot())
committees_per_slot = get_committee_count_per_slot(epochRef)
fork = getStateField(node.dag.headState, fork)
fork = getStateField(node.dag.headState.data, fork)
genesis_validators_root =
getStateField(node.dag.headState, genesis_validators_root)
getStateField(node.dag.headState.data, genesis_validators_root)
for committee_index in get_committee_indices(epochRef):
let committee = get_beacon_committee(epochRef, slot, committee_index)
@ -528,9 +532,9 @@ proc broadcastAggregatedAttestations(
let
epochRef = node.dag.getEpochRef(aggregationHead, aggregationSlot.epoch)
fork = getStateField(node.dag.headState, fork)
fork = getStateField(node.dag.headState.data, fork)
genesis_validators_root =
getStateField(node.dag.headState, genesis_validators_root)
getStateField(node.dag.headState.data, genesis_validators_root)
committees_per_slot = get_committee_count_per_slot(epochRef)
var
@ -589,14 +593,14 @@ proc updateValidatorMetrics*(node: BeaconNode) =
if v.index.isNone():
0.Gwei
elif v.index.get().uint64 >=
getStateField(node.dag.headState, balances).lenu64:
getStateField(node.dag.headState.data, balances).lenu64:
debug "Cannot get validator balance, index out of bounds",
pubkey = shortLog(v.pubkey), index = v.index.get(),
balances = getStateField(node.dag.headState, balances).len,
stateRoot = node.dag.headState.data.root
balances = getStateField(node.dag.headState.data, balances).len,
stateRoot = getStateRoot(node.dag.headState.data)
0.Gwei
else:
getStateField(node.dag.headState, balances)[v.index.get()]
getStateField(node.dag.headState.data, balances)[v.index.get()]
if i < 64:
attached_validator_balance.set(

View File

@ -12,8 +12,8 @@ import
confutils/defs, serialization, chronicles,
# Beacon-chain
../beacon_chain/spec/[
datatypes, crypto, helpers, beaconstate, helpers,
state_transition_block, state_transition_epoch, state_transition],
beaconstate, crypto, datatypes, forkedbeaconstate_helpers, helpers,
state_transition, state_transition_block],
../beacon_chain/extras,
../beacon_chain/ssz/[merkleization, ssz_serialization],
../tests/official/fixtures_utils
@ -149,10 +149,11 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
rewards = RewardInfo()
echo "Running: ", prePath
let state = (ref HashedBeaconState)(
data: parseSSZ(prePath, BeaconState)
let state = (ref ForkedHashedBeaconState)(
hbsPhase0: HashedBeaconState(data: parseSSZ(prePath, BeaconState)),
beaconStateFork: forkPhase0
)
state.root = hash_tree_root(state.data)
state.hbsPhase0.root = hash_tree_root(state[])
for i in 0 ..< blocksQty:
let blockPath = dir / blocksPrefix & $i & ".ssz"
@ -173,13 +174,15 @@ proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
let prePath = dir / preState & ".ssz"
echo "Running: ", prePath
let state = (ref HashedBeaconState)(
data: parseSSZ(prePath, BeaconState)
)
state.root = hash_tree_root(state.data)
let state = (ref ForkedHashedBeaconState)(
hbsPhase0: HashedBeaconState(data: parseSSZ(prePath, BeaconState)),
beaconStateFork: forkPhase0)
state.hbsPhase0.root = hash_tree_root(state[])
# Shouldn't necessarily assert, because nbench can run test suite
discard process_slots(state[], state.data.slot + numSlots, cache, rewards)
discard process_slots(
state[], getStateField(state[], slot) + numSlots, cache, rewards, {},
FAR_FUTURE_SLOT)
template processEpochScenarioImpl(
dir, preState: string,

View File

@ -3,7 +3,8 @@ import
confutils, chronicles, json_serialization,
stew/byteutils,
../research/simutils,
../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition],
../beacon_chain/spec/[
crypto, datatypes, digest, forkedbeaconstate_helpers, helpers, state_transition],
../beacon_chain/extras,
../beacon_chain/networking/network_metadata,
../beacon_chain/ssz/[merkleization, ssz_serialization]
@ -75,13 +76,15 @@ type
proc doTransition(conf: NcliConf) =
let
stateY = (ref HashedBeaconState)(
data: SSZ.loadFile(conf.preState, BeaconState),
stateY = (ref ForkedHashedBeaconState)(
hbsPhase0: HashedBeaconState(
data: SSZ.loadFile(conf.preState, BeaconState)),
beaconStateFork: forkPhase0
)
blckX = SSZ.loadFile(conf.blck, SignedBeaconBlock)
flags = if not conf.verifyStateRoot: {skipStateRootValidation} else: {}
stateY.root = hash_tree_root(stateY.data)
stateY.hbsPhase0.root = hash_tree_root(stateY[])
var
cache = StateCache()
@ -91,7 +94,7 @@ proc doTransition(conf: NcliConf) =
error "State transition failed"
quit 1
else:
SSZ.saveFile(conf.postState, stateY.data)
SSZ.saveFile(conf.postState, stateY.hbsPhase0.data)
proc doSlots(conf: NcliConf) =
type
@ -103,22 +106,26 @@ proc doSlots(conf: NcliConf) =
var timers: array[Timers, RunningStat]
let
stateY = withTimerRet(timers[tLoadState]): (ref HashedBeaconState)(
data: SSZ.loadFile(conf.preState2, BeaconState),
stateY = withTimerRet(timers[tLoadState]): (ref ForkedHashedBeaconState)(
hbsPhase0: HashedBeaconState(
data: SSZ.loadFile(conf.preState2, BeaconState)),
beaconStateFork: forkPhase0
)
stateY.root = hash_tree_root(stateY.data)
stateY.hbsPhase0.root = hash_tree_root(stateY[])
var
cache = StateCache()
rewards = RewardInfo()
for i in 0'u64..<conf.slot:
let isEpoch = (stateY[].data.slot + 1).isEpoch
let isEpoch = (getStateField(stateY[], slot) + 1).isEpoch
withTimer(timers[if isEpoch: tApplyEpochSlot else: tApplySlot]):
doAssert process_slots(stateY[], stateY[].data.slot + 1, cache, rewards)
doAssert process_slots(
stateY[], getStateField(stateY[], slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
withTimer(timers[tSaveState]):
SSZ.saveFile(conf.postState, stateY.data)
SSZ.saveFile(conf.postState, stateY.hbsPhase0.data)
printTimers(false, timers)

View File

@ -3,9 +3,10 @@ import
chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3,
../beacon_chain/networking/network_metadata,
../beacon_chain/[beacon_chain_db, extras],
../beacon_chain/consensus_object_pools/[blockchain_dag, statedata_helpers],
../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition,
state_transition_epoch, presets],
../beacon_chain/consensus_object_pools/blockchain_dag,
../beacon_chain/spec/[crypto, datatypes, digest, forkedbeaconstate_helpers,
helpers, state_transition, state_transition_epoch,
presets],
../beacon_chain/ssz, ../beacon_chain/ssz/sszdump,
../research/simutils, ./e2store
@ -196,11 +197,12 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
for b in blocks.mitems():
while getStateField(state[], slot) < b.message.slot:
let isEpoch = (getStateField(state[], slot) + 1).isEpoch()
while getStateField(state[].data, slot) < b.message.slot:
let isEpoch = (getStateField(state[].data, slot) + 1).isEpoch()
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
let ok = process_slots(
state[].data, getStateField(state[], slot) + 1, cache, rewards, {})
state[].data, getStateField(state[].data, slot) + 1, cache, rewards,
{}, FAR_FUTURE_SLOT)
doAssert ok, "Slot processing can't fail with correct inputs"
var start = Moment.now()
@ -208,7 +210,7 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
if conf.resetCache:
cache = StateCache()
if not state_transition_block(
runtimePreset, state[].data, b, cache, {}, noRollback):
runtimePreset, state[].data, b, cache, {}, noRollback, FAR_FUTURE_SLOT):
dump("./", b)
echo "State transition failed (!)"
quit 1
@ -218,20 +220,20 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
withTimer(timers[tDbStore]):
dbBenchmark.putBlock(b)
if getStateField(state[], slot).isEpoch and conf.storeStates:
if getStateField(state[], slot).epoch < 2:
dbBenchmark.putState(state[].data.root, state[].data.data)
if getStateField(state[].data, slot).isEpoch and conf.storeStates:
if getStateField(state[].data, slot).epoch < 2:
dbBenchmark.putState(getStateRoot(state[].data), state[].data.hbsPhase0.data)
dbBenchmark.checkpoint()
else:
withTimer(timers[tDbStore]):
dbBenchmark.putState(state[].data.root, state[].data.data)
dbBenchmark.putState(getStateRoot(state[].data), state[].data.hbsPhase0.data)
dbBenchmark.checkpoint()
withTimer(timers[tDbLoad]):
doAssert dbBenchmark.getState(state[].data.root, loadedState[], noRollback)
doAssert dbBenchmark.getState(getStateRoot(state[].data), loadedState[], noRollback)
if getStateField(state[], slot).epoch mod 16 == 0:
doAssert hash_tree_root(state[]) == hash_tree_root(loadedState[])
if getStateField(state[].data, slot).epoch mod 16 == 0:
doAssert hash_tree_root(state[].data) == hash_tree_root(loadedState[])
printTimers(false, timers)
@ -368,7 +370,7 @@ proc cmdRewindState(conf: DbConf, preset: RuntimePreset) =
let tmpState = assignClone(dag.headState)
dag.withState(tmpState[], blckRef.atSlot(Slot(conf.slot))):
echo "Writing state..."
dump("./", hashedState, blck)
dump("./", stateData.data.hbsPhase0, blck)
proc atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
if slot == 0:
@ -406,7 +408,7 @@ proc cmdExportEra(conf: DbConf, preset: RuntimePreset) =
defer: e2s.close()
dag.withState(tmpState[], canonical):
e2s.appendRecord(stateData.data.data).get()
e2s.appendRecord(stateData.data.hbsPhase0.data).get()
var
ancestors: seq[BlockRef]
@ -455,7 +457,7 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
(start, ends) = dag.getSlotRange(conf.perfSlot, conf.perfSlots)
blockRefs = dag.getBlockRange(start, ends)
perfs = newSeq[ValidatorPerformance](
getStateField(dag.headState, validators).len())
getStateField(dag.headState.data, validators).len())
cache = StateCache()
rewards = RewardInfo()
blck: TrustedSignedBeaconBlock
@ -472,20 +474,20 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
proc processEpoch() =
let
prev_epoch_target_slot =
state[].get_previous_epoch().compute_start_slot_at_epoch()
state[].data.get_previous_epoch().compute_start_slot_at_epoch()
penultimate_epoch_end_slot =
if prev_epoch_target_slot == 0: Slot(0)
else: prev_epoch_target_slot - 1
first_slot_empty =
state[].get_block_root_at_slot(prev_epoch_target_slot) ==
state[].get_block_root_at_slot(penultimate_epoch_end_slot)
state[].data.get_block_root_at_slot(prev_epoch_target_slot) ==
state[].data.get_block_root_at_slot(penultimate_epoch_end_slot)
let first_slot_attesters = block:
let committee_count = state[].get_committee_count_per_slot(
let committee_count = state[].data.get_committee_count_per_slot(
prev_epoch_target_slot.epoch, cache)
var indices = HashSet[ValidatorIndex]()
for committee_index in 0..<committee_count:
for validator_index in state[].get_beacon_committee(
for validator_index in state[].data.get_beacon_committee(
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
indices.incl(validator_index)
indices
@ -521,26 +523,28 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
for bi in 0..<blockRefs.len:
blck = db.getBlock(blockRefs[blockRefs.len - bi - 1].root).get()
while getStateField(state[], slot) < blck.message.slot:
while getStateField(state[].data, slot) < blck.message.slot:
let ok = process_slots(
state[].data, getStateField(state[], slot) + 1, cache, rewards, {})
state[].data, getStateField(state[].data, slot) + 1, cache, rewards,
{}, FAR_FUTURE_SLOT)
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[], slot).isEpoch():
if getStateField(state[].data, slot).isEpoch():
processEpoch()
if not state_transition_block(
runtimePreset, state[].data, blck, cache, {}, noRollback):
runtimePreset, state[].data, blck, cache, {}, noRollback, FAR_FUTURE_SLOT):
echo "State transition failed (!)"
quit 1
# Capture rewards of empty slots as well
while getStateField(state[], slot) < ends:
while getStateField(state[].data, slot) < ends:
let ok = process_slots(
state[].data, getStateField(state[], slot) + 1, cache, rewards, {})
state[].data, getStateField(state[].data, slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[], slot).isEpoch():
if getStateField(state[].data, slot).isEpoch():
processEpoch()
echo "validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty"
@ -666,11 +670,11 @@ proc cmdValidatorDb(conf: DbConf, runtimePreset: RuntimePreset) =
outDb.exec("BEGIN TRANSACTION;").expect("DB")
for i in vals..<getStateField(dag.headState, validators).len():
for i in vals..<getStateField(dag.headState.data, validators).len():
insertValidator.exec((
i,
getStateField(dag.headState, validators).data[i].pubkey.toRaw(),
getStateField(dag.headState, validators).data[i].withdrawal_credentials.data)).expect("DB")
getStateField(dag.headState.data, validators).data[i].pubkey.toRaw(),
getStateField(dag.headState.data, validators).data[i].withdrawal_credentials.data)).expect("DB")
outDb.exec("COMMIT;").expect("DB")
@ -702,10 +706,10 @@ proc cmdValidatorDb(conf: DbConf, runtimePreset: RuntimePreset) =
false, cache)
proc processEpoch() =
echo getStateField(state[], slot).epoch
echo getStateField(state[].data, slot).epoch
outDb.exec("BEGIN TRANSACTION;").expect("DB")
insertEpochInfo.exec(
(getStateField(state[], slot).epoch.int64,
(getStateField(state[].data, slot).epoch.int64,
rewards.total_balances.current_epoch_raw.int64,
rewards.total_balances.previous_epoch_raw.int64,
rewards.total_balances.current_epoch_attesters_raw.int64,
@ -737,7 +741,7 @@ proc cmdValidatorDb(conf: DbConf, runtimePreset: RuntimePreset) =
delay.isSome() and delay.get() == 1):
insertValidatorInfo.exec(
(index.int64,
getStateField(state[], slot).epoch.int64,
getStateField(state[].data, slot).epoch.int64,
status.delta.rewards.int64,
status.delta.penalties.int64,
int64(source_attester), # Source delta
@ -748,27 +752,29 @@ proc cmdValidatorDb(conf: DbConf, runtimePreset: RuntimePreset) =
for bi in 0..<blockRefs.len:
blck = db.getBlock(blockRefs[blockRefs.len - bi - 1].root).get()
while getStateField(state[], slot) < blck.message.slot:
while getStateField(state[].data, slot) < blck.message.slot:
let ok = process_slots(
state[].data, getStateField(state[], slot) + 1, cache, rewards, {})
state[].data, getStateField(state[].data, slot) + 1, cache, rewards,
{}, FAR_FUTURE_SLOT)
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[], slot).isEpoch():
if getStateField(state[].data, slot).isEpoch():
processEpoch()
if not state_transition_block(
runtimePreset, state[].data, blck, cache, {}, noRollback):
runtimePreset, state[].data, blck, cache, {}, noRollback, FAR_FUTURE_SLOT):
echo "State transition failed (!)"
quit 1
# Capture rewards of empty slots as well, including the epoch that got
# finalized
while getStateField(state[], slot) <= ends:
while getStateField(state[].data, slot) <= ends:
let ok = process_slots(
state[].data, getStateField(state[], slot) + 1, cache, rewards, {})
state[].data, getStateField(state[].data, slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[], slot).isEpoch():
if getStateField(state[].data, slot).isEpoch():
processEpoch()
when isMainModule:

View File

@ -4,8 +4,9 @@
import
stew/ptrops, stew/ranges/ptr_arith, chronicles,
../beacon_chain/extras,
../beacon_chain/spec/[crypto, datatypes, digest, validator, beaconstate,
state_transition_block, state_transition, presets],
../beacon_chain/spec/[
beaconstate, crypto, datatypes, digest, forkedbeaconstate_helpers, presets,
validator, state_transition, state_transition_block],
../beacon_chain/ssz/[merkleization, ssz_serialization]
type
@ -110,17 +111,19 @@ proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
# and requiring HashedBeaconState (yet). So to keep consistent, puts wrapper
# only in one function.
proc state_transition(
preset: RuntimePreset, data: auto, blck: auto, flags: auto, rollback: RollbackHashedProc):
auto =
preset: RuntimePreset, data: auto, blck: auto, flags: auto,
rollback: RollbackForkedHashedProc): auto =
var
hashedState =
HashedBeaconState(data: data.state, root: hash_tree_root(data.state))
fhState = (ref ForkedHashedBeaconState)(
hbsPhase0: HashedBeaconState(
data: data.state, root: hash_tree_root(data.state)),
beaconStateFork: forkPhase0)
cache = StateCache()
rewards = RewardInfo()
result =
state_transition(
preset, hashedState, blck, cache, rewards, flags, rollback)
data.state = hashedState.data
preset, fhState[], blck, cache, rewards, flags, rollback)
data.state = fhState.hbsPhase0.data
decodeAndProcess(BlockInput):
state_transition(defaultRuntimePreset, data, data.beaconBlock, flags, noRollback)

View File

@ -19,15 +19,15 @@ import
options, random, tables, os,
confutils, chronicles, eth/db/kvstore_sqlite3,
eth/keys,
../tests/[testblockutil],
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, presets,
helpers, validator, signatures, state_transition],
../tests/testblockutil,
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
forkedbeaconstate_helpers, presets,
helpers, signatures, state_transition],
../beacon_chain/[beacon_node_types, beacon_chain_db, extras],
../beacon_chain/eth1/eth1_monitor,
../beacon_chain/validators/validator_pool,
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
block_clearance, attestation_pool,
statedata_helpers],
block_clearance, attestation_pool],
../beacon_chain/ssz/[merkleization, ssz_serialization],
./simutils
@ -97,22 +97,22 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
dag.withState(tmpState[], attestationHead):
let committees_per_slot =
get_committee_count_per_slot(stateData, slot.epoch, cache)
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
stateData, slot, committee_index.CommitteeIndex, cache)
stateData.data, slot, committee_index.CommitteeIndex, cache)
for index_in_committee, validatorIdx in committee:
if rand(r, 1.0) <= attesterRatio:
let
data = makeAttestationData(
stateData, slot, committee_index.CommitteeIndex, blck.root)
stateData.data, slot, committee_index.CommitteeIndex, blck.root)
sig =
get_attestation_signature(getStateField(stateData, fork),
getStateField(stateData, genesis_validators_root),
get_attestation_signature(getStateField(stateData.data, fork),
getStateField(stateData.data, genesis_validators_root),
data, hackPrivKey(
getStateField(stateData, validators)[validatorIdx]))
getStateField(stateData.data, validators)[validatorIdx]))
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
aggregation_bits.setBit index_in_committee
@ -134,25 +134,25 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
let
finalizedEpochRef = dag.getFinalizedEpochRef()
proposerIdx = get_beacon_proposer_index(
stateData.data.data, cache).get()
stateData.data, cache, getStateField(stateData.data, slot)).get()
privKey = hackPrivKey(
getStateField(stateData, validators)[proposerIdx])
getStateField(stateData.data, validators)[proposerIdx])
eth1ProposalData = eth1Chain.getBlockProposalData(
stateData,
stateData.data,
finalizedEpochRef.eth1_data,
finalizedEpochRef.eth1_deposit_index)
message = makeBeaconBlock(
runtimePreset,
hashedState,
stateData.data.hbsPhase0,
proposerIdx,
head.root,
privKey.genRandaoReveal(
getStateField(stateData, fork),
getStateField(stateData, genesis_validators_root),
getStateField(stateData.data, fork),
getStateField(stateData.data, genesis_validators_root),
slot).toValidatorSig(),
eth1ProposalData.vote,
default(GraffitiBytes),
attPool.getAttestationsForBlock(stateData, cache),
attPool.getAttestationsForTestBlock(stateData, cache),
eth1ProposalData.deposits,
@[],
@[],
@ -172,8 +172,8 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
# Careful, state no longer valid after here because of the await..
newBlock.signature = withTimerRet(timers[tSignBlock]):
get_block_signature(
getStateField(stateData, fork),
getStateField(stateData, genesis_validators_root),
getStateField(stateData.data, fork),
getStateField(stateData.data, genesis_validators_root),
newBlock.message.slot,
blockRoot, privKey).toValidatorSig()
@ -235,7 +235,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
# TODO if attestation pool was smarter, it would include older attestations
# too!
verifyConsensus(dag.headState, attesterRatio * blockRatio)
verifyConsensus(dag.headState.data, attesterRatio * blockRatio)
if t == tEpoch:
echo &". slot: {shortLog(slot)} ",
@ -252,4 +252,4 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
echo "Done!"
printTimers(dag.headState, attesters, true, timers)
printTimers(dag.headState.data, attesters, true, timers)

View File

@ -3,9 +3,10 @@ import
../tests/testblockutil,
../beacon_chain/[extras, beacon_chain_db],
../beacon_chain/ssz/[merkleization, ssz_serialization],
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, presets],
../beacon_chain/consensus_object_pools/[
blockchain_dag, block_pools_types, statedata_helpers],
../beacon_chain/spec/[
beaconstate, crypto, datatypes, digest, forkedbeaconstate_helpers,
helpers, presets],
../beacon_chain/consensus_object_pools/[blockchain_dag, block_pools_types],
../beacon_chain/eth1/eth1_monitor
template withTimer*(stats: var RunningStat, body: untyped) =
@ -43,7 +44,7 @@ func verifyConsensus*(state: BeaconState, attesterRatio: auto) =
if current_epoch >= 4:
doAssert state.finalized_checkpoint.epoch + 2 >= current_epoch
func verifyConsensus*(state: StateData, attesterRatio: auto) =
func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: auto) =
if attesterRatio < 0.63:
doAssert getStateField(state, current_justified_checkpoint).epoch == 0
doAssert getStateField(state, finalized_checkpoint).epoch == 0
@ -144,7 +145,7 @@ proc printTimers*[Timers: enum](
printTimers(validate, timers)
proc printTimers*[Timers: enum](
state: StateData, attesters: RunningStat, validate: bool,
state: ForkedHashedBeaconState, attesters: RunningStat, validate: bool,
timers: array[Timers, RunningStat]) =
echo "Validators: ", getStateField(state, validators).len, ", epoch length: ", SLOTS_PER_EPOCH
echo "Validators per attestation (mean): ", attesters.mean

View File

@ -13,7 +13,8 @@ import
strformat,
options, sequtils, random, tables,
../tests/testblockutil,
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
../beacon_chain/spec/[
beaconstate, crypto, datatypes, digest, forkedbeaconstate_helpers, helpers],
../beacon_chain/extras,
../beacon_chain/ssz/[merkleization, ssz_serialization],
./simutils
@ -42,15 +43,16 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
validate = true):
let
flags = if validate: {} else: {skipBlsValidation}
(state, _) = loadGenesis(validators, validate)
genesisBlock = get_initial_beacon_block(state.data)
(hashedState, _) = loadGenesis(validators, validate)
genesisBlock = get_initial_beacon_block(hashedState.data)
state = (ref ForkedHashedBeaconState)(
hbsPhase0: hashedState[], beaconStateFork: forkPhase0)
echo "Starting simulation..."
var
attestations = initTable[Slot, seq[Attestation]]()
latest_block_root = hash_tree_root(genesisBlock.message)
blockrefs = @[BlockRef(root: latest_block_root, slot: 0.Slot)]
timers: array[Timers, RunningStat]
attesters: RunningStat
r = initRand(1)
@ -59,16 +61,16 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
proc maybeWrite(last: bool) =
if write_last_json:
if state[].data.slot mod json_interval.uint64 == 0:
if getStateField(state[], slot) mod json_interval.uint64 == 0:
write(stdout, ":")
else:
write(stdout, ".")
if last:
writeJson("state.json", state[])
writeJson("state.json", state[].hbsPhase0)
else:
if state[].data.slot mod json_interval.uint64 == 0:
writeJson(jsonName(prefix, state[].data.slot), state[].data)
if getStateField(state[], slot) mod json_interval.uint64 == 0:
writeJson(jsonName(prefix, getStateField(state[], slot)), state[].hbsPhase0.data)
write(stdout, ":")
else:
write(stdout, ".")
@ -79,10 +81,10 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
for i in 0..<slots:
maybeWrite(false)
verifyConsensus(state[].data, attesterRatio)
verifyConsensus(state[].hbsPhase0.data, attesterRatio)
let
attestations_idx = state[].data.slot
attestations_idx = getStateField(state[], slot)
blockAttestations = attestations.getOrDefault(attestations_idx)
attestations.del attestations_idx
@ -90,8 +92,8 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
SLOTS_PER_EPOCH + MIN_ATTESTATION_INCLUSION_DELAY
let t =
if (state[].data.slot > GENESIS_SLOT and
(state[].data.slot + 1).isEpoch): tEpoch
if (getStateField(state[], slot) > GENESIS_SLOT and
(getStateField(state[], slot) + 1).isEpoch): tEpoch
else: tBlock
withTimer(timers[t]):
@ -107,20 +109,15 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
# work for every slot - we'll randomize it deterministically to give
# some variation
let
target_slot = state[].data.slot + MIN_ATTESTATION_INCLUSION_DELAY - 1
target_slot = getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY - 1
committees_per_slot =
get_committee_count_per_slot(state[].data, target_slot.epoch, cache)
blockrefs.add BlockRef(
root: latest_block_root, parent: blockrefs[^1], slot: target_slot)
get_committee_count_per_slot(state[], target_slot.epoch, cache)
let
scass = withTimerRet(timers[tShuffle]):
mapIt(
0 ..< committees_per_slot.int,
get_beacon_committee(state[].data, target_slot, it.CommitteeIndex, cache))
stateData = (ref StateData)(data: state[], blck: blockrefs[^1])
get_beacon_committee(state[], target_slot, it.CommitteeIndex, cache))
for i, scas in scass:
var
@ -135,13 +132,13 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
if first:
attestation =
makeAttestation(stateData[], latest_block_root, scas, target_slot,
makeAttestation(state[], latest_block_root, scas, target_slot,
i.CommitteeIndex, v, cache, flags)
agg.init(attestation.signature.load.get())
first = false
else:
let att2 =
makeAttestation(stateData[], latest_block_root, scas, target_slot,
makeAttestation(state[], latest_block_root, scas, target_slot,
i.CommitteeIndex, v, cache, flags)
if not att2.aggregation_bits.overlaps(attestation.aggregation_bits):
attestation.aggregation_bits.incl(att2.aggregation_bits)
@ -164,13 +161,13 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
flushFile(stdout)
if (state[].data.slot).isEpoch:
echo &" slot: {shortLog(state[].data.slot)} ",
&"epoch: {shortLog(state[].data.get_current_epoch())}"
if getStateField(state[], slot).isEpoch:
echo &" slot: {shortLog(getStateField(state[], slot))} ",
&"epoch: {shortLog(state[].get_current_epoch())}"
maybeWrite(true) # catch that last state as well..
echo "Done!"
printTimers(state[].data, attesters, validate, timers)
printTimers(state[].hbsPhase0.data, attesters, validate, timers)

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -10,21 +10,24 @@
import
# Specs
../../beacon_chain/spec/[datatypes, state_transition]
../../beacon_chain/spec/[
datatypes, forkedbeaconstate_helpers, state_transition]
proc nextEpoch*(state: var HashedBeaconState) =
proc nextEpoch*(state: var ForkedHashedBeaconState) =
## Transition to the start of the next epoch
var
cache = StateCache()
rewards = RewardInfo()
let slot =
state.data.slot + SLOTS_PER_EPOCH - (state.data.slot mod SLOTS_PER_EPOCH)
doAssert process_slots(state, slot, cache, rewards)
getStateField(state, slot) + SLOTS_PER_EPOCH -
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
doAssert process_slots(state, slot, cache, rewards, {}, FAR_FUTURE_SLOT)
proc nextSlot*(state: var HashedBeaconState) =
proc nextSlot*(state: var ForkedHashedBeaconState) =
## Transition to the next slot
var
cache = StateCache()
rewards = RewardInfo()
doAssert process_slots(state, state.data.slot + 1, cache, rewards)
doAssert process_slots(
state, getStateField(state, slot) + 1, cache, rewards, {}, FAR_FUTURE_SLOT)

View File

@ -11,7 +11,8 @@ import
# Standard library
os, sequtils, chronicles,
# Beacon chain internals
../../../beacon_chain/spec/[crypto, state_transition, presets],
../../../beacon_chain/spec/[
crypto, forkedbeaconstate_helpers, presets, state_transition],
../../../beacon_chain/spec/datatypes/altair,
../../../beacon_chain/ssz,
# Test utilities
@ -38,8 +39,8 @@ proc runTest(testName, testDir, unitTestName: string) =
test prefix & testName & " - " & unitTestName & preset():
var
preState = newClone(parseTest(testPath/"pre.ssz_snappy", SSZ, BeaconState))
hashedPreState = (ref HashedBeaconState)(
data: preState[], root: hash_tree_root(preState[]))
fhPreState = (ref ForkedHashedBeaconState)(hbsAltair: altair.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkAltair)
cache = StateCache()
rewards = RewardInfo()
@ -51,12 +52,12 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let success = state_transition(
defaultRuntimePreset, hashedPreState[], blck, cache, rewards, flags = {},
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
noRollback)
doAssert success, "Failure when applying block " & $i
else:
let success = state_transition(
defaultRuntimePreset, hashedPreState[], blck, cache, rewards, flags = {},
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
noRollback)
doAssert (i + 1 < numBlocks) or not success,
"We didn't expect these invalid blocks to be processed"
@ -64,8 +65,8 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let postState = newClone(parseTest(testPath/"post.ssz_snappy", SSZ, BeaconState))
when false:
reportDiff(hashedPreState.data, postState)
doAssert hashedPreState.root == postState[].hash_tree_root()
reportDiff(fhPreState.hbsAltair.data, postState)
doAssert getStateRoot(fhPreState[]) == postState[].hash_tree_root()
`testImpl _ blck _ testName`()

View File

@ -12,7 +12,7 @@ import
# Standard library
os, strutils,
# Beacon chain internals
../../../beacon_chain/spec/state_transition,
../../../beacon_chain/spec/[forkedbeaconstate_helpers, state_transition],
../../../beacon_chain/spec/datatypes/altair,
# Test utilities
../../testutil,
@ -30,18 +30,21 @@ proc runTest(identifier: string) =
test "Slots - " & identifier:
var
preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, BeaconState))
hashedPreState = (ref HashedBeaconState)(
data: preState[], root: hash_tree_root(preState[]))
fhPreState = (ref ForkedHashedBeaconState)(
hbsAltair: altair.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])),
beaconStateFork: forkAltair)
cache = StateCache()
rewards: RewardInfo
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, BeaconState))
check:
process_slots(
hashedPreState[], hashedPreState.data.slot + num_slots, cache, rewards)
fhPreState[], getStateField(fhPreState[], slot) + num_slots, cache,
rewards, {}, FAR_FUTURE_SLOT)
hashedPreState.root == postState[].hash_tree_root()
let newPreState = newClone(hashedPreState.data)
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
let newPreState = newClone(fhPreState.hbsAltair.data)
reportDiff(newPreState, postState)
`testImpl _ slots _ identifier`()

View File

@ -14,7 +14,8 @@ import
# Status internal
faststreams, streams,
# Beacon chain internals
../../../beacon_chain/spec/[crypto, state_transition, presets],
../../../beacon_chain/spec/[
crypto, state_transition, presets, forkedbeaconstate_helpers, helpers],
../../../beacon_chain/spec/datatypes/[phase0, altair],
../../../beacon_chain/[extras, ssz],
# Test utilities
@ -48,7 +49,7 @@ proc runTest(testName, testDir, unitTestName: string) =
test testName & " - " & unitTestName & preset():
var
preState = newClone(parseTest(testPath/"pre.ssz_snappy", SSZ, phase0.BeaconState))
sdPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
cache = StateCache()
rewards = RewardInfo()
@ -62,25 +63,25 @@ proc runTest(testName, testDir, unitTestName: string) =
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, phase0.SignedBeaconBlock)
let success = state_transition(
defaultRuntimePreset, sdPreState[], blck,
defaultRuntimePreset, fhPreState[], blck,
cache, rewards,
flags = {skipStateRootValidation}, noRollback,
transitionEpoch.fork_epoch.Epoch)
transitionEpoch.fork_epoch.Epoch.compute_start_slot_at_epoch)
doAssert success, "Failure when applying block " & $i
else:
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, altair.SignedBeaconBlock)
let success = state_transition(
defaultRuntimePreset, sdPreState[], blck,
defaultRuntimePreset, fhPreState[], blck,
cache, rewards,
flags = {skipStateRootValidation}, noRollback,
transitionEpoch.fork_epoch.Epoch)
transitionEpoch.fork_epoch.Epoch.compute_start_slot_at_epoch)
doAssert success, "Failure when applying block " & $i
let postState = newClone(parseTest(testPath/"post.ssz_snappy", SSZ, altair.BeaconState))
when false:
reportDiff(sdPreState.data, postState)
doAssert getStateRoot(sdPreState[]) == postState[].hash_tree_root()
reportDiff(fhPreState.data, postState)
doAssert getStateRoot(fhPreState[]) == postState[].hash_tree_root()
`testImpl _ blck _ testName`()

View File

@ -11,7 +11,8 @@ import
# Standard library
os, sequtils, chronicles,
# Beacon chain internals
../../../beacon_chain/spec/[crypto, state_transition, presets],
../../../beacon_chain/spec/[
crypto, forkedbeaconstate_helpers, presets, state_transition],
../../../beacon_chain/spec/datatypes/phase0,
../../../beacon_chain/ssz,
# Test utilities
@ -38,8 +39,8 @@ proc runTest(testName, testDir, unitTestName: string) =
test prefix & testName & " - " & unitTestName & preset():
var
preState = newClone(parseTest(testPath/"pre.ssz_snappy", SSZ, BeaconState))
hashedPreState = (ref HashedBeaconState)(
data: preState[], root: hash_tree_root(preState[]))
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
cache = StateCache()
rewards = RewardInfo()
@ -51,12 +52,12 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let success = state_transition(
defaultRuntimePreset, hashedPreState[], blck, cache, rewards, flags = {},
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
noRollback)
doAssert success, "Failure when applying block " & $i
else:
let success = state_transition(
defaultRuntimePreset, hashedPreState[], blck, cache, rewards, flags = {},
defaultRuntimePreset, fhPreState[], blck, cache, rewards, flags = {},
noRollback)
doAssert (i + 1 < numBlocks) or not success,
"We didn't expect these invalid blocks to be processed"
@ -64,8 +65,8 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let postState = newClone(parseTest(testPath/"post.ssz_snappy", SSZ, BeaconState))
when false:
reportDiff(hashedPreState.data, postState)
doAssert hashedPreState.root == postState[].hash_tree_root()
reportDiff(hashedPreState.hbsPhase0.data, postState)
doAssert getStateRoot(fhPreState[]) == postState[].hash_tree_root()
`testImpl _ blck _ testName`()

View File

@ -12,7 +12,7 @@ import
# Standard library
os, strutils,
# Beacon chain internals
../../../beacon_chain/spec/state_transition,
../../../beacon_chain/spec/[forkedbeaconstate_helpers, state_transition],
../../../beacon_chain/spec/datatypes/phase0,
# Test utilities
../../testutil,
@ -30,18 +30,19 @@ proc runTest(identifier: string) =
test "Slots - " & identifier:
var
preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, BeaconState))
hashedPreState = (ref HashedBeaconState)(
data: preState[], root: hash_tree_root(preState[]))
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
cache = StateCache()
rewards: RewardInfo
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, BeaconState))
check:
process_slots(
hashedPreState[], hashedPreState.data.slot + num_slots, cache, rewards)
fhPreState[], getStateField(fhPreState[], slot) + num_slots, cache,
rewards, {}, FAR_FUTURE_SLOT)
hashedPreState.root == postState[].hash_tree_root()
let newPreState = newClone(hashedPreState.data)
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
let newPreState = newClone(fhPreState.hbsPhase0.data)
reportDiff(newPreState, postState)
`testImpl _ slots _ identifier`()

View File

@ -14,7 +14,8 @@
import
stew/results,
# Specs
../../beacon_chain/spec/[beaconstate, datatypes, helpers],
../../beacon_chain/spec/[
beaconstate, datatypes, forkedbeaconstate_helpers, helpers],
# Mock helpers
../mocking/[mock_genesis, mock_attestations, mock_state],
../testutil
@ -22,8 +23,10 @@ import
suite "[Unit - Spec - Block processing] Attestations " & preset():
const NumValidators = uint64(8) * SLOTS_PER_EPOCH
let genesisState = newClone(initGenesisState(NumValidators))
doAssert genesisState.data.validators.lenu64 == NumValidators
let genesisState = (ref ForkedHashedBeaconState)(
hbsPhase0: initGenesisState(NumValidators), beaconStateFork: forkPhase0)
doAssert getStateField(genesisState[], validators).lenu64 == NumValidators
template valid_attestation(name: string, body: untyped): untyped {.dirty.}=
# Process a valid attestation
@ -41,30 +44,34 @@ suite "[Unit - Spec - Block processing] Attestations " & preset():
# Params for sanity checks
# ----------------------------------------
let
current_epoch_count = state.data.current_epoch_attestations.len
previous_epoch_count = state.data.previous_epoch_attestations.len
current_epoch_count =
state.hbsPhase0.data.current_epoch_attestations.len
previous_epoch_count =
state.hbsPhase0.data.previous_epoch_attestations.len
# State transition
# ----------------------------------------
var cache = StateCache()
check process_attestation(
state.data, attestation, flags = {}, cache
state.hbsPhase0.data, attestation, flags = {}, cache
).isOk
# Check that the attestation was processed
if attestation.data.target.epoch == get_current_epoch(state.data):
check(state.data.current_epoch_attestations.len == current_epoch_count + 1)
if attestation.data.target.epoch == get_current_epoch(state[]):
check(state.hbsPhase0.data.current_epoch_attestations.len ==
current_epoch_count + 1)
else:
check(state.data.previous_epoch_attestations.len == previous_epoch_count + 1)
check(state.hbsPhase0.data.previous_epoch_attestations.len ==
previous_epoch_count + 1)
valid_attestation("Valid attestation"):
let attestation = mockAttestation(state.data)
state.data.slot += MIN_ATTESTATION_INCLUSION_DELAY
let attestation = mockAttestation(state.hbsPhase0.data)
getStateField(state[], slot) += MIN_ATTESTATION_INCLUSION_DELAY
valid_attestation("Valid attestation from previous epoch"):
nextSlot(state[])
let attestation = mockAttestation(state.data)
state.data.slot = Slot(SLOTS_PER_EPOCH - 1)
let attestation = mockAttestation(state.hbsPhase0.data)
getStateField(state[], slot) = Slot(SLOTS_PER_EPOCH - 1)
nextEpoch(state[])
# TODO: regression BLS V0.10.1

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -8,25 +8,27 @@
import
# Specs
../../beacon_chain/spec/[
datatypes, state_transition_epoch, state_transition]
datatypes, forkedbeaconstate_helpers, state_transition,
state_transition_epoch]
proc processSlotsUntilEndCurrentEpoch(state: var HashedBeaconState) =
proc processSlotsUntilEndCurrentEpoch(state: var ForkedHashedBeaconState) =
# Process all slots until the end of the last slot of the current epoch
var
cache = StateCache()
rewards = RewardInfo()
let slot =
state.data.slot + SLOTS_PER_EPOCH - (state.data.slot mod SLOTS_PER_EPOCH)
getStateField(state, slot) + SLOTS_PER_EPOCH -
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
# Transition to slot before the epoch state transition
discard process_slots(state, slot - 1, cache, rewards)
discard process_slots(state, slot - 1, cache, rewards, {}, FAR_FUTURE_SLOT)
# For the last slot of the epoch,
# only process_slot without process_epoch
# (see process_slots()) - state.root is invalid after here!
process_slot(state.data, state.root)
process_slot(state.hbsPhase0.data, getStateRoot(state))
proc transitionEpochUntilJustificationFinalization*(state: var HashedBeaconState) =
proc transitionEpochUntilJustificationFinalization*(state: var ForkedHashedBeaconState) =
# Process slots and do the epoch transition until crosslinks
processSlotsUntilEndCurrentEpoch(state)
@ -34,7 +36,7 @@ proc transitionEpochUntilJustificationFinalization*(state: var HashedBeaconState
cache = StateCache()
rewards = RewardInfo()
rewards.init(state.data)
rewards.process_attestations(state.data, cache)
rewards.init(state.hbsPhase0.data)
rewards.process_attestations(state.hbsPhase0.data, cache)
process_justification_and_finalization(
state.data, rewards.total_balances)
state.hbsPhase0.data, rewards.total_balances)

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -12,9 +12,9 @@ import
# Vendored packages
stew/bitops2,
# Specs
../../beacon_chain/spec/datatypes,
../../beacon_chain/spec/[datatypes, forkedbeaconstate_helpers],
# Test helpers
../mocking/[mock_genesis],
../mocking/mock_genesis,
./epoch_utils,
./justification_finalization_helpers,
../testutil
@ -23,10 +23,11 @@ import
# (source) https://github.com/protolambda/eth2-docs#justification-and-finalization
# for a visualization of finalization rules
proc finalizeOn234(state: var HashedBeaconState, epoch: Epoch, sufficient_support: bool) =
proc finalizeOn234(
state: var ForkedHashedBeaconState, epoch: Epoch, sufficient_support: bool) =
## Check finalization on rule 1 "234"
doAssert epoch > 4
state.data.slot = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
getStateField(state, slot) = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
# 43210 -- epochs ago
# 3210x -- justification bitfields indices
@ -34,22 +35,22 @@ proc finalizeOn234(state: var HashedBeaconState, epoch: Epoch, sufficient_suppor
# checkpoints for epochs ago
let (c1, c2, c3, c4, _) = getCheckpoints(epoch)
putCheckpointsInBlockRoots(state.data, [c1, c2, c3, c4])
putCheckpointsInBlockRoots(state.hbsPhase0.data, [c1, c2, c3, c4])
# Save for final checks
let old_finalized = state.data.finalized_checkpoint
let old_finalized = getStateField(state, finalized_checkpoint)
# Mock the state
state.data.previous_justified_checkpoint = c4
state.data.current_justified_checkpoint = c3
state.data.justification_bits = 0'u8 # Bitvector of length 4
getStateField(state, previous_justified_checkpoint) = c4
getStateField(state, current_justified_checkpoint) = c3
getStateField(state, justification_bits) = 0'u8 # Bitvector of length 4
# mock 3rd and 4th latest epochs as justified
# indices are pre-shift
state.data.justification_bits.setBit 1
state.data.justification_bits.setBit 2
getStateField(state, justification_bits).setBit 1
getStateField(state, justification_bits).setBit 2
# mock the 2nd latest epoch as justifiable, with 4th as the source
addMockAttestations(
state.data,
state.hbsPhase0.data,
epoch = epoch - 2,
source = c4,
target = c2,
@ -60,18 +61,18 @@ proc finalizeOn234(state: var HashedBeaconState, epoch: Epoch, sufficient_suppor
transitionEpochUntilJustificationFinalization(state)
# Checks
doAssert state.data.previous_justified_checkpoint == c3 # changed to old current
doAssert getStateField(state, previous_justified_checkpoint) == c3 # changed to old current
if sufficient_support:
doAssert state.data.current_justified_checkpoint == c2 # changed to second latest
doAssert state.data.finalized_checkpoint == c4 # finalized old previous justified epoch
doAssert getStateField(state, current_justified_checkpoint) == c2 # changed to second latest
doAssert getStateField(state, finalized_checkpoint) == c4 # finalized old previous justified epoch
else:
doAssert state.data.current_justified_checkpoint == c3 # still old current
doAssert state.data.finalized_checkpoint == old_finalized # no new finalized checkpoint
doAssert getStateField(state, current_justified_checkpoint) == c3 # still old current
doAssert getStateField(state, finalized_checkpoint) == old_finalized # no new finalized checkpoint
proc finalizeOn23(state: var HashedBeaconState, epoch: Epoch, sufficient_support: bool) =
proc finalizeOn23(state: var ForkedHashedBeaconState, epoch: Epoch, sufficient_support: bool) =
## Check finalization on rule 2 "23"
doAssert epoch > 3
state.data.slot = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
getStateField(state, slot) = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
# 43210 -- epochs ago
# 210xx -- justification bitfields indices preshift
@ -80,21 +81,21 @@ proc finalizeOn23(state: var HashedBeaconState, epoch: Epoch, sufficient_support
# checkpoints for epochs ago
let (c1, c2, c3, _, _) = getCheckpoints(epoch)
putCheckpointsInBlockRoots(state.data, [c1, c2, c3])
putCheckpointsInBlockRoots(state.hbsPhase0.data, [c1, c2, c3])
# Save for final checks
let old_finalized = state.data.finalized_checkpoint
let old_finalized = getStateField(state, finalized_checkpoint)
# Mock the state
state.data.previous_justified_checkpoint = c3
state.data.current_justified_checkpoint = c3
state.data.justification_bits = 0'u8 # Bitvector of length 4
getStateField(state, previous_justified_checkpoint) = c3
getStateField(state, current_justified_checkpoint) = c3
getStateField(state, justification_bits) = 0'u8 # Bitvector of length 4
# mock 3rd as justified
# indices are pre-shift
state.data.justification_bits.setBit 1
getStateField(state, justification_bits).setBit 1
# mock the 2nd latest epoch as justifiable, with 3rd as the source
addMockAttestations(
state.data,
state.hbsPhase0.data,
epoch = epoch - 2,
source = c3,
target = c2,
@ -105,18 +106,18 @@ proc finalizeOn23(state: var HashedBeaconState, epoch: Epoch, sufficient_support
transitionEpochUntilJustificationFinalization(state)
# Checks
doAssert state.data.previous_justified_checkpoint == c3 # changed to old current
doAssert getStateField(state, previous_justified_checkpoint) == c3 # changed to old current
if sufficient_support:
doAssert state.data.current_justified_checkpoint == c2 # changed to second latest
doAssert state.data.finalized_checkpoint == c3 # finalized old previous justified epoch
doAssert getStateField(state, current_justified_checkpoint) == c2 # changed to second latest
doAssert getStateField(state, finalized_checkpoint) == c3 # finalized old previous justified epoch
else:
doAssert state.data.current_justified_checkpoint == c3 # still old current
doAssert state.data.finalized_checkpoint == old_finalized # no new finalized checkpoint
doAssert getStateField(state, current_justified_checkpoint) == c3 # still old current
doAssert getStateField(state, finalized_checkpoint) == old_finalized # no new finalized checkpoint
proc finalizeOn123(state: var HashedBeaconState, epoch: Epoch, sufficient_support: bool) =
proc finalizeOn123(state: var ForkedHashedBeaconState, epoch: Epoch, sufficient_support: bool) =
## Check finalization on rule 3 "123"
doAssert epoch > 5
state.data.slot = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
getStateField(state, slot) = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
# 43210 -- epochs ago
# 210xx -- justification bitfields indices preshift
@ -125,21 +126,21 @@ proc finalizeOn123(state: var HashedBeaconState, epoch: Epoch, sufficient_suppor
# checkpoints for epochs ago
let (c1, c2, c3, c4, c5) = getCheckpoints(epoch)
putCheckpointsInBlockRoots(state.data, [c1, c2, c3, c4, c5])
putCheckpointsInBlockRoots(state.hbsPhase0.data, [c1, c2, c3, c4, c5])
# Save for final checks
let old_finalized = state.data.finalized_checkpoint
let old_finalized = getStateField(state, finalized_checkpoint)
# Mock the state
state.data.previous_justified_checkpoint = c5
state.data.current_justified_checkpoint = c3
state.data.justification_bits = 0'u8 # Bitvector of length 4
getStateField(state, previous_justified_checkpoint) = c5
getStateField(state, current_justified_checkpoint) = c3
getStateField(state, justification_bits) = 0'u8 # Bitvector of length 4
# mock 3rd as justified
# indices are pre-shift
state.data.justification_bits.setBit 1
getStateField(state, justification_bits).setBit 1
# mock the 2nd latest epoch as justifiable, with 5th as the source
addMockAttestations(
state.data,
state.hbsPhase0.data,
epoch = epoch - 2,
source = c5,
target = c2,
@ -147,7 +148,7 @@ proc finalizeOn123(state: var HashedBeaconState, epoch: Epoch, sufficient_suppor
)
# mock the 1st latest epoch as justifiable with 3rd as source
addMockAttestations(
state.data,
state.hbsPhase0.data,
epoch = epoch - 1,
source = c3,
target = c1,
@ -158,18 +159,18 @@ proc finalizeOn123(state: var HashedBeaconState, epoch: Epoch, sufficient_suppor
transitionEpochUntilJustificationFinalization(state)
# Checks
doAssert state.data.previous_justified_checkpoint == c3 # changed to old current
doAssert getStateField(state, previous_justified_checkpoint) == c3 # changed to old current
if sufficient_support:
doAssert state.data.current_justified_checkpoint == c1 # changed to second latest
doAssert state.data.finalized_checkpoint == c3 # finalized old previous justified epoch
doAssert getStateField(state, current_justified_checkpoint) == c1 # changed to second latest
doAssert getStateField(state, finalized_checkpoint) == c3 # finalized old previous justified epoch
else:
doAssert state.data.current_justified_checkpoint == c3 # still old current
doAssert state.data.finalized_checkpoint == old_finalized # no new finalized checkpoint
doAssert getStateField(state, current_justified_checkpoint) == c3 # still old current
doAssert getStateField(state, finalized_checkpoint) == old_finalized # no new finalized checkpoint
proc finalizeOn12(state: var HashedBeaconState, epoch: Epoch, sufficient_support: bool) =
proc finalizeOn12(state: var ForkedHashedBeaconState, epoch: Epoch, sufficient_support: bool) =
## Check finalization on rule 4 "12"
doAssert epoch > 2
state.data.slot = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
getStateField(state, slot) = Slot((epoch * SLOTS_PER_EPOCH) - 1) # Skip ahead to just before epoch
# 43210 -- epochs ago
# 210xx -- justification bitfields indices preshift
@ -178,21 +179,21 @@ proc finalizeOn12(state: var HashedBeaconState, epoch: Epoch, sufficient_support
# checkpoints for epochs ago
let (c1, c2, _, _, _) = getCheckpoints(epoch)
putCheckpointsInBlockRoots(state.data, [c1, c2])
putCheckpointsInBlockRoots(state.hbsPhase0.data, [c1, c2])
# Save for final checks
let old_finalized = state.data.finalized_checkpoint
let old_finalized = getStateField(state, finalized_checkpoint)
# Mock the state
state.data.previous_justified_checkpoint = c2
state.data.current_justified_checkpoint = c2
state.data.justification_bits = 0'u8 # Bitvector of length 4
getStateField(state, previous_justified_checkpoint) = c2
getStateField(state, current_justified_checkpoint) = c2
getStateField(state, justification_bits) = 0'u8 # Bitvector of length 4
# mock 3rd as justified
# indices are pre-shift
state.data.justification_bits.setBit 0
getStateField(state, justification_bits).setBit 0
# mock the 2nd latest epoch as justifiable, with 3rd as the source
addMockAttestations(
state.data,
state.hbsPhase0.data,
epoch = epoch - 1,
source = c2,
target = c1,
@ -203,21 +204,22 @@ proc finalizeOn12(state: var HashedBeaconState, epoch: Epoch, sufficient_support
transitionEpochUntilJustificationFinalization(state)
# Checks
doAssert state.data.previous_justified_checkpoint == c2 # changed to old current
doAssert getStateField(state, previous_justified_checkpoint) == c2 # changed to old current
if sufficient_support:
doAssert state.data.current_justified_checkpoint == c1 # changed to second latest
doAssert state.data.finalized_checkpoint == c2 # finalized old previous justified epoch
doAssert getStateField(state, current_justified_checkpoint) == c1 # changed to second latest
doAssert getStateField(state, finalized_checkpoint) == c2 # finalized old previous justified epoch
else:
doAssert state.data.current_justified_checkpoint == c2 # still old current
doAssert state.data.finalized_checkpoint == old_finalized # no new finalized checkpoint
doAssert getStateField(state, current_justified_checkpoint) == c2 # still old current
doAssert getStateField(state, finalized_checkpoint) == old_finalized # no new finalized checkpoint
proc payload =
suite "[Unit - Spec - Epoch processing] Justification and Finalization " & preset():
echo " Finalization rules are detailed at https://github.com/protolambda/eth2-docs#justification-and-finalization"
const NumValidators = uint64(8) * SLOTS_PER_EPOCH
let genesisState = newClone(initGenesisState(NumValidators))
doAssert genesisState.data.validators.lenu64 == NumValidators
let genesisState = (ref ForkedHashedBeaconState)(
hbsPhase0: initGenesisState(NumValidators), beaconStateFork: forkPhase0)
doAssert getStateField(genesisState[], validators).lenu64 == NumValidators
setup:
var state = assignClone(genesisState[])

View File

@ -19,11 +19,10 @@ import
../beacon_chain/gossip_processing/[gossip_validation],
../beacon_chain/fork_choice/[fork_choice_types, fork_choice],
../beacon_chain/consensus_object_pools/[
block_quarantine, blockchain_dag, block_clearance, attestation_pool,
statedata_helpers],
block_quarantine, blockchain_dag, block_clearance, attestation_pool],
../beacon_chain/ssz/merkleization,
../beacon_chain/spec/[crypto, datatypes, digest, state_transition, helpers,
presets],
../beacon_chain/spec/[crypto, datatypes, digest, forkedbeaconstate_helpers,
state_transition, helpers, presets],
# Test utilities
./testutil, ./testdbutil, ./testblockutil
@ -69,14 +68,16 @@ suite "Attestation pool processing" & preset():
rewards: RewardInfo
# Slot 0 is a finalized slot - won't be making attestations for it..
check:
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
process_slots(
state.data, getStateField(state.data, slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
test "Can add and retrieve simple attestations" & preset():
let
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
attestation = makeAttestation(state[], state.blck.root, bc0[0], cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
attestation = makeAttestation(state[].data, state.blck.root, bc0[0], cache)
pool[].addAttestation(
attestation, @[bc0[0]], attestation.loadSig,
@ -100,10 +101,10 @@ suite "Attestation pool processing" & preset():
process_slots(
state.data,
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards)
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards, {}, FAR_FUTURE_SLOT)
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len == 1
@ -114,33 +115,33 @@ suite "Attestation pool processing" & preset():
state.data, state.blck.root,
cache, attestations = attestations, nextSlot = false).root
bc1 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
att1 = makeAttestation(state[], root1, bc1[0], cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
att1 = makeAttestation(state[].data, root1, bc1[0], cache)
check:
process_slots(
state.data,
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards)
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards, {}, FAR_FUTURE_SLOT)
check:
# shouldn't include already-included attestations
pool[].getAttestationsForBlock(state[], cache) == []
pool[].getAttestationsForTestBlock(state[], cache) == []
pool[].addAttestation(
att1, @[bc1[0]], att1.loadSig, att1.data.slot)
check:
# but new ones should go in
pool[].getAttestationsForBlock(state[], cache).len() == 1
pool[].getAttestationsForTestBlock(state[], cache).len() == 1
let
att2 = makeAttestation(state[], root1, bc1[1], cache)
att2 = makeAttestation(state[].data, root1, bc1[1], cache)
pool[].addAttestation(
att2, @[bc1[1]], att2.loadSig, att2.data.slot)
let
combined = pool[].getAttestationsForBlock(state[], cache)
combined = pool[].getAttestationsForTestBlock(state[], cache)
check:
# New attestations should be combined with old attestations
@ -152,18 +153,18 @@ suite "Attestation pool processing" & preset():
check:
# readding the combined attestation shouldn't have an effect
pool[].getAttestationsForBlock(state[], cache).len() == 1
pool[].getAttestationsForTestBlock(state[], cache).len() == 1
let
# Someone votes for a different root
att3 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
att3 = makeAttestation(state[].data, Eth2Digest(), bc1[2], cache)
pool[].addAttestation(
att3, @[bc1[2]], att3.loadSig, att3.data.slot)
check:
# We should now get both attestations for the block, but the aggregate
# should be the one with the most votes
pool[].getAttestationsForBlock(state[], cache).len() == 2
pool[].getAttestationsForTestBlock(state[], cache).len() == 2
pool[].getAggregatedAttestation(2.Slot, 0.CommitteeIndex).
get().aggregation_bits.countOnes() == 2
pool[].getAggregatedAttestation(2.Slot, hash_tree_root(att2.data)).
@ -171,7 +172,7 @@ suite "Attestation pool processing" & preset():
let
# Someone votes for a different root
att4 = makeAttestation(state[], Eth2Digest(), bc1[2], cache)
att4 = makeAttestation(state[].data, Eth2Digest(), bc1[2], cache)
pool[].addAttestation(
att4, @[bc1[2]], att3.loadSig, att3.data.slot)
@ -179,14 +180,14 @@ suite "Attestation pool processing" & preset():
let
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
var
att0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
att0 = makeAttestation(state[].data, state.blck.root, bc0[0], cache)
att0x = att0
att1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
att2 = makeAttestation(state[], state.blck.root, bc0[2], cache)
att3 = makeAttestation(state[], state.blck.root, bc0[3], cache)
att1 = makeAttestation(state[].data, state.blck.root, bc0[1], cache)
att2 = makeAttestation(state[].data, state.blck.root, bc0[2], cache)
att3 = makeAttestation(state[].data, state.blck.root, bc0[3], cache)
# Both attestations include member 2 but neither is a subset of the other
att0.combine(att2)
@ -198,11 +199,11 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
state.data,
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards)
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards, {}, FAR_FUTURE_SLOT)
check:
pool[].getAttestationsForBlock(state[], cache).len() == 2
pool[].getAttestationsForTestBlock(state[], cache).len() == 2
# Can get either aggregate here, random!
pool[].getAggregatedAttestation(1.Slot, 0.CommitteeIndex).isSome()
@ -210,7 +211,7 @@ suite "Attestation pool processing" & preset():
pool[].addAttestation(att3, @[bc0[3]], att3.loadSig, att3.data.slot)
block:
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len() == 2
attestations[0].aggregation_bits.countOnes() == 3
@ -222,7 +223,7 @@ suite "Attestation pool processing" & preset():
pool[].addAttestation(att0x, @[bc0[0]], att0x.loadSig, att0x.data.slot)
block:
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len() == 1
attestations[0].aggregation_bits.countOnes() == 4
@ -235,42 +236,44 @@ suite "Attestation pool processing" & preset():
root.data[0..<8] = toBytesBE(i.uint64)
let
bc0 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
for j in 0..<bc0.len():
root.data[8..<16] = toBytesBE(j.uint64)
var att = makeAttestation(state[], root, bc0[j], cache)
var att = makeAttestation(state[].data, root, bc0[j], cache)
pool[].addAttestation(att, @[bc0[j]], att.loadSig, att.data.slot)
inc attestations
check:
process_slots(state.data, getStateField(state, slot) + 1, cache,
rewards)
process_slots(state.data, getStateField(state.data, slot) + 1, cache,
rewards, {}, FAR_FUTURE_SLOT)
doAssert attestations.uint64 > MAX_ATTESTATIONS,
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
check:
# Fill block with attestations
pool[].getAttestationsForBlock(state[], cache).lenu64() ==
pool[].getAttestationsForTestBlock(state[], cache).lenu64() ==
MAX_ATTESTATIONS
pool[].getAggregatedAttestation(
getStateField(state, slot) - 1, 0.CommitteeIndex).isSome()
getStateField(state.data, slot) - 1, 0.CommitteeIndex).isSome()
test "Attestations may arrive in any order" & preset():
var cache = StateCache()
let
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
attestation0 = makeAttestation(state[].data, state.blck.root, bc0[0], cache)
check:
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
process_slots(
state.data, getStateField(state.data, slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
let
bc1 = get_beacon_committee(state[],
getStateField(state, slot), 0.CommitteeIndex, cache)
attestation1 = makeAttestation(state[], state.blck.root, bc1[0], cache)
bc1 = get_beacon_committee(state[].data,
getStateField(state.data, slot), 0.CommitteeIndex, cache)
attestation1 = makeAttestation(state[].data, state.blck.root, bc1[0], cache)
# test reverse order
pool[].addAttestation(
@ -279,9 +282,10 @@ suite "Attestation pool processing" & preset():
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
discard process_slots(
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len == 1
@ -291,9 +295,11 @@ suite "Attestation pool processing" & preset():
let
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
attestation0 =
makeAttestation(state[].data, state.blck.root, bc0[0], cache)
attestation1 =
makeAttestation(state[].data, state.blck.root, bc0[1], cache)
pool[].addAttestation(
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
@ -302,9 +308,10 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards,
{}, FAR_FUTURE_SLOT)
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len == 1
@ -315,9 +322,11 @@ suite "Attestation pool processing" & preset():
var
# Create an attestation for slot 1!
bc0 = get_beacon_committee(
state[], getStateField(state, slot), 0.CommitteeIndex, cache)
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
state[].data, getStateField(state.data, slot), 0.CommitteeIndex, cache)
attestation0 =
makeAttestation(state[].data, state.blck.root, bc0[0], cache)
attestation1 =
makeAttestation(state[].data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1)
@ -328,9 +337,10 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards,
{}, FAR_FUTURE_SLOT)
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len == 1
@ -339,10 +349,12 @@ suite "Attestation pool processing" & preset():
var cache = StateCache()
var
# Create an attestation for slot 1!
bc0 = get_beacon_committee(state[],
getStateField(state, slot), 0.CommitteeIndex, cache)
attestation0 = makeAttestation(state[], state.blck.root, bc0[0], cache)
attestation1 = makeAttestation(state[], state.blck.root, bc0[1], cache)
bc0 = get_beacon_committee(state[].data,
getStateField(state.data, slot), 0.CommitteeIndex, cache)
attestation0 =
makeAttestation(state[].data, state.blck.root, bc0[0], cache)
attestation1 =
makeAttestation(state[].data, state.blck.root, bc0[1], cache)
attestation0.combine(attestation1)
@ -353,9 +365,10 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards,
{}, FAR_FUTURE_SLOT)
let attestations = pool[].getAttestationsForBlock(state[], cache)
let attestations = pool[].getAttestationsForTestBlock(state[], cache)
check:
attestations.len == 1
@ -414,8 +427,9 @@ suite "Attestation pool processing" & preset():
pool[].addForkChoice(epochRef, blckRef, signedBlock.message, blckRef.slot)
bc1 = get_beacon_committee(
state[], getStateField(state, slot) - 1, 1.CommitteeIndex, cache)
attestation0 = makeAttestation(state[], b10.root, bc1[0], cache)
state[].data, getStateField(state.data, slot) - 1, 1.CommitteeIndex,
cache)
attestation0 = makeAttestation(state[].data, b10.root, bc1[0], cache)
pool[].addAttestation(
attestation0, @[bc1[0]], attestation0.loadSig, attestation0.data.slot)
@ -427,8 +441,8 @@ suite "Attestation pool processing" & preset():
head2 == b10Add[]
let
attestation1 = makeAttestation(state[], b11.root, bc1[1], cache)
attestation2 = makeAttestation(state[], b11.root, bc1[2], cache)
attestation1 = makeAttestation(state[].data, b11.root, bc1[1], cache)
attestation2 = makeAttestation(state[].data, b11.root, bc1[2], cache)
pool[].addAttestation(
attestation1, @[bc1[1]], attestation1.loadSig, attestation1.data.slot)
@ -503,7 +517,7 @@ suite "Attestation pool processing" & preset():
for epoch in 0 ..< 5:
let start_slot = compute_start_slot_at_epoch(Epoch epoch)
let committees_per_slot =
get_committee_count_per_slot(state[], Epoch epoch, cache)
get_committee_count_per_slot(state[].data, Epoch epoch, cache)
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
let new_block = addTestBlock(
state.data, block_root, cache, attestations = attestations)
@ -523,7 +537,7 @@ suite "Attestation pool processing" & preset():
attestations.setlen(0)
for index in 0'u64 ..< committees_per_slot:
let committee = get_beacon_committee(
state[], getStateField(state, slot), index.CommitteeIndex,
state[].data, getStateField(state.data, slot), index.CommitteeIndex,
cache)
# Create a bitfield filled with the given count per attestation,
@ -535,7 +549,7 @@ suite "Attestation pool processing" & preset():
attestations.add Attestation(
aggregation_bits: aggregation_bits,
data: makeAttestationData(
state[], getStateField(state, slot),
state[].data, getStateField(state.data, slot),
index.CommitteeIndex, blockroot)
# signature: ValidatorSig()
)

View File

@ -12,7 +12,8 @@ import
unittest2,
../beacon_chain/[beacon_chain_db, extras, interop, ssz],
../beacon_chain/spec/[
beaconstate, datatypes, digest, crypto, state_transition, presets],
beaconstate, crypto, datatypes, digest, forkedbeaconstate_helpers, presets,
state_transition],
../beacon_chain/consensus_object_pools/blockchain_dag,
eth/db/kvstore,
# test utilies
@ -73,12 +74,12 @@ suite "Beacon chain DB" & preset():
testStates = getTestStates(dag.headState.data)
# Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref HashedBeaconState) -> int:
cmp($x.root, $y.root)
sort(testStates) do (x, y: ref ForkedHashedBeaconState) -> int:
cmp($getStateRoot(x[]), $getStateRoot(y[]))
for state in testStates:
db.putState(state[].data)
let root = hash_tree_root(state[].data)
db.putState(state[].hbsPhase0.data)
let root = hash_tree_root(state[])
check:
db.containsState(root)
@ -98,12 +99,12 @@ suite "Beacon chain DB" & preset():
var testStates = getTestStates(dag.headState.data)
# Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref HashedBeaconState) -> int:
cmp($x.root, $y.root)
sort(testStates) do (x, y: ref ForkedHashedBeaconState) -> int:
cmp($getStateRoot(x[]), $getStateRoot(y[]))
for state in testStates:
db.putState(state[].data)
let root = hash_tree_root(state[].data)
db.putState(state[].hbsPhase0.data)
let root = hash_tree_root(state[])
check:
db.getState(root, stateBuffer[], noRollback)

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -12,11 +12,13 @@ import
unittest2,
stew/assign2,
eth/keys,
../beacon_chain/spec/[datatypes, digest, helpers, state_transition, presets],
../beacon_chain/spec/[
datatypes, digest, forkedbeaconstate_helpers, helpers, state_transition,
presets],
../beacon_chain/beacon_node_types,
../beacon_chain/[beacon_chain_db, ssz],
../beacon_chain/consensus_object_pools/[
blockchain_dag, block_quarantine, block_clearance, statedata_helpers],
blockchain_dag, block_quarantine, block_clearance],
./testutil, ./testdbutil, ./testblockutil
when isMainModule:
@ -155,7 +157,7 @@ suite "Block pool processing" & preset():
b2Add = dag.addRawBlock(quarantine, b2, nil)
b2Get = dag.get(b2.root)
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
validators = getStateField(dag.headState, validators).lenu64()
validators = getStateField(dag.headState.data, validators).lenu64()
check:
b2Get.isSome()
@ -175,7 +177,9 @@ suite "Block pool processing" & preset():
# Skip one slot to get a gap
check:
process_slots(state[], state.data.slot + 1, cache, rewards)
process_slots(
state[], getStateField(state[], slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
let
b4 = addTestBlock(state[], b2.root, cache)
@ -263,7 +267,7 @@ suite "Block pool processing" & preset():
check:
# ensure we loaded the correct head state
dag2.head.root == b2.root
hash_tree_root(dag2.headState) == b2.message.state_root
hash_tree_root(dag2.headState.data) == b2.message.state_root
dag2.get(b1.root).isSome()
dag2.get(b2.root).isSome()
dag2.heads.len == 1
@ -287,7 +291,7 @@ suite "Block pool processing" & preset():
check:
dag.head == b1Add[]
getStateField(dag.headState, slot) == b1Add[].slot
getStateField(dag.headState.data, slot) == b1Add[].slot
test "updateStateData sanity" & preset():
let
@ -305,38 +309,38 @@ suite "Block pool processing" & preset():
check:
tmpState.blck == b1Add[]
getStateField(tmpState, slot) == bs1.slot
getStateField(tmpState.data, slot) == bs1.slot
# Skip slots
dag.updateStateData(tmpState[], bs1_3, false, cache) # skip slots
check:
tmpState.blck == b1Add[]
getStateField(tmpState, slot) == bs1_3.slot
getStateField(tmpState.data, slot) == bs1_3.slot
# Move back slots, but not blocks
dag.updateStateData(tmpState[], bs1_3.parent(), false, cache)
check:
tmpState.blck == b1Add[]
getStateField(tmpState, slot) == bs1_3.parent().slot
getStateField(tmpState.data, slot) == bs1_3.parent().slot
# Move to different block and slot
dag.updateStateData(tmpState[], bs2_3, false, cache)
check:
tmpState.blck == b2Add[]
getStateField(tmpState, slot) == bs2_3.slot
getStateField(tmpState.data, slot) == bs2_3.slot
# Move back slot and block
dag.updateStateData(tmpState[], bs1, false, cache)
check:
tmpState.blck == b1Add[]
getStateField(tmpState, slot) == bs1.slot
getStateField(tmpState.data, slot) == bs1.slot
# Move back to genesis
dag.updateStateData(tmpState[], bs1.parent(), false, cache)
check:
tmpState.blck == b1Add[].parent
getStateField(tmpState, slot) == bs1.parent.slot
getStateField(tmpState.data, slot) == bs1.parent.slot
suite "chain DAG finalization tests" & preset():
setup:
@ -354,8 +358,8 @@ suite "chain DAG finalization tests" & preset():
tmpState = assignClone(dag.headState.data)
check:
process_slots(
tmpState[], tmpState.data.slot + (5 * SLOTS_PER_EPOCH).uint64,
cache, rewards)
tmpState[], getStateField(tmpState[], slot) + (5 * SLOTS_PER_EPOCH).uint64,
cache, rewards, {}, FAR_FUTURE_SLOT)
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache)
block:
@ -373,7 +377,7 @@ suite "chain DAG finalization tests" & preset():
blck = addTestBlock(
tmpState[], dag.head.root, cache,
attestations = makeFullAttestations(
tmpState[], dag.head.root, tmpState.data.slot, cache, {}))
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {}))
let added = dag.addRawBlock(quarantine, blck, nil)
check: added.isOk()
dag.updateHead(added[], quarantine)
@ -383,7 +387,7 @@ suite "chain DAG finalization tests" & preset():
dag.heads.len() == 1
check:
dag.db.immutableValidators.len() == getStateField(dag.headState, validators).len()
dag.db.immutableValidators.len() == getStateField(dag.headState.data, validators).len()
let
finalER = dag.findEpochRef(dag.finalizedHead.blck, dag.finalizedHead.slot.epoch)
@ -436,16 +440,15 @@ suite "chain DAG finalization tests" & preset():
dag2.head.root == dag.head.root
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
dag2.finalizedHead.slot == dag.finalizedHead.slot
hash_tree_root(dag2.headState) == hash_tree_root(dag.headState)
hash_tree_root(dag2.headState.data) == hash_tree_root(dag.headState.data)
test "orphaned epoch block" & preset():
var prestate = (ref HashedBeaconState)()
var prestate = (ref ForkedHashedBeaconState)(beaconStateFork: forkPhase0)
for i in 0 ..< SLOTS_PER_EPOCH:
if i == SLOTS_PER_EPOCH - 1:
assign(prestate[], dag.headState.data)
let blck = makeTestBlock(
dag.headState.data, dag.head.root, cache)
let blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
let added = dag.addRawBlock(quarantine, blck, nil)
check: added.isOk()
dag.updateHead(added[], quarantine)
@ -457,11 +460,12 @@ suite "chain DAG finalization tests" & preset():
# The loop creates multiple branches, which StateCache isn't suitable for
cache = StateCache()
doAssert process_slots(prestate[], prestate[].data.slot + 1, cache, rewards)
doAssert process_slots(
prestate[], getStateField(prestate[], slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
# create another block, orphaning the head
let blck = makeTestBlock(
prestate[], dag.head.parent.root, cache)
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache)
# Add block, but don't update head
let added = dag.addRawBlock(quarantine, blck, nil)
@ -486,12 +490,13 @@ suite "chain DAG finalization tests" & preset():
# Advance past epoch so that the epoch transition is gapped
check:
process_slots(
dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2), cache, rewards)
dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2), cache, rewards, {},
FAR_FUTURE_SLOT)
var blck = makeTestBlock(
dag.headState.data, dag.head.root, cache,
attestations = makeFullAttestations(
dag.headState, dag.head.root, getStateField(dag.headState, slot),
dag.headState.data, dag.head.root, getStateField(dag.headState.data, slot),
cache, {}))
let added = dag.addRawBlock(quarantine, blck, nil)
@ -508,9 +513,8 @@ suite "chain DAG finalization tests" & preset():
assign(tmpStateData[], dag.headState)
dag.updateStateData(tmpStateData[], cur.atSlot(cur.slot), false, cache)
check:
dag.get(cur).data.message.state_root ==
tmpStateData[].data.root
tmpStateData[].data.root == hash_tree_root(tmpSTateData[])
dag.get(cur).data.message.state_root == getStateRoot(tmpStateData[].data)
getStateRoot(tmpStateData[].data) == hash_tree_root(tmpStateData[].data)
cur = cur.parent
let
@ -522,4 +526,4 @@ suite "chain DAG finalization tests" & preset():
dag2.head.root == dag.head.root
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
dag2.finalizedHead.slot == dag.finalizedHead.slot
hash_tree_root(dag2.headState) == hash_tree_root(dag.headState)
hash_tree_root(dag2.headState.data) == hash_tree_root(dag.headState.data)

View File

@ -17,11 +17,10 @@ import
../beacon_chain/gossip_processing/[gossip_validation, batch_validation],
../beacon_chain/fork_choice/[fork_choice_types, fork_choice],
../beacon_chain/consensus_object_pools/[
block_quarantine, blockchain_dag, block_clearance, attestation_pool,
statedata_helpers],
block_quarantine, blockchain_dag, block_clearance, attestation_pool],
../beacon_chain/ssz/merkleization,
../beacon_chain/spec/[crypto, datatypes, digest, state_transition, helpers,
presets, network],
../beacon_chain/spec/[crypto, datatypes, digest, forkedbeaconstate_helpers,
state_transition, helpers, presets, network],
# Test utilities
./testutil, ./testdbutil, ./testblockutil
@ -43,7 +42,9 @@ suite "Gossip validation " & preset():
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false)
# Slot 0 is a finalized slot - won't be making attestations for it..
check:
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
process_slots(
state.data, getStateField(state.data, slot) + 1, cache, rewards, {},
FAR_FUTURE_SLOT)
test "Validation sanity":
# TODO: refactor tests to avoid skipping BLS validation
@ -67,14 +68,14 @@ suite "Gossip validation " & preset():
var
# Create attestations for slot 1
beacon_committee = get_beacon_committee(
dag.headState, dag.head.slot, 0.CommitteeIndex, cache)
dag.headState.data, dag.head.slot, 0.CommitteeIndex, cache)
att_1_0 = makeAttestation(
dag.headState, dag.head.root, beacon_committee[0], cache)
dag.headState.data, dag.head.root, beacon_committee[0], cache)
att_1_1 = makeAttestation(
dag.headState, dag.head.root, beacon_committee[1], cache)
dag.headState.data, dag.head.root, beacon_committee[1], cache)
committees_per_slot =
get_committee_count_per_slot(dag.headState,
get_committee_count_per_slot(dag.headState.data,
att_1_0.data.slot.epoch, cache)
subnet = compute_subnet_for_attestation(

View File

@ -11,7 +11,8 @@ import
options, sequtils,
unittest2,
./testutil, ./testdbutil, ./teststateutil,
../beacon_chain/spec/[datatypes, digest, helpers, presets],
../beacon_chain/spec/[
datatypes, digest, forkedbeaconstate_helpers, helpers, presets],
../beacon_chain/[beacon_node_types, statediff],
../beacon_chain/ssz,
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine]
@ -30,19 +31,22 @@ suite "state diff tests" & preset():
for i in 0 ..< testStates.len:
for j in (i+1) ..< testStates.len:
doAssert testStates[i].data.slot < testStates[j].data.slot
if testStates[i].data.slot + SLOTS_PER_EPOCH != testStates[j].data.slot:
doAssert getStateField(testStates[i][], slot) <
getStateField(testStates[j][], slot)
if getStateField(testStates[i][], slot) + SLOTS_PER_EPOCH != getStateField(testStates[j][], slot):
continue
var tmpStateApplyBase = assignClone(testStates[i].data)
let diff = diffStates(testStates[i].data, testStates[j].data)
var tmpStateApplyBase = assignClone(testStates[i].hbsPhase0.data)
let diff = diffStates(
testStates[i].hbsPhase0.data, testStates[j].hbsPhase0.data)
# Immutable parts of validators stored separately, so aren't part of
# the state diff. Synthesize required portion here for testing.
applyDiff(
tmpStateApplyBase[],
mapIt(testStates[j].data.validators.asSeq[
testStates[i].data.validators.len ..
testStates[j].data.validators.len - 1],
mapIt(
getStateField(testStates[j][], validators).asSeq[
getStateField(testStates[i][], validators).len ..
getStateField(testStates[j][], validators).len - 1],
it.getImmutableValidatorData),
diff)
check hash_tree_root(testStates[j].data) ==
check hash_tree_root(testStates[j][]) ==
hash_tree_root(tmpStateApplyBase[])

View File

@ -8,12 +8,12 @@
import
chronicles,
options, stew/endians2,
../beacon_chain/extras,
../beacon_chain/[beacon_node_types, extras],
../beacon_chain/validators/validator_pool,
../beacon_chain/ssz/merkleization,
../beacon_chain/spec/[crypto, datatypes, digest, presets, helpers, validator,
signatures, state_transition],
../beacon_chain/consensus_object_pools/statedata_helpers
../beacon_chain/spec/[crypto, datatypes, digest, presets, helpers,
signatures, state_transition, forkedbeaconstate_helpers],
../beacon_chain/consensus_object_pools/attestation_pool
func makeFakeValidatorPrivKey(i: int): ValidatorPrivKey =
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
@ -76,7 +76,7 @@ func signBlock(
)
proc addTestBlock*(
state: var HashedBeaconState,
state: var ForkedHashedBeaconState,
parent_root: Eth2Digest,
cache: var StateCache,
eth1_data = Eth1Data(),
@ -88,30 +88,34 @@ proc addTestBlock*(
# Create and add a block to state - state will advance by one slot!
if nextSlot:
var rewards: RewardInfo
doAssert process_slots(state, state.data.slot + 1, cache, rewards, flags)
doAssert process_slots(
state, getStateField(state, slot) + 1, cache, rewards, flags,
FAR_FUTURE_SLOT)
let
proposer_index = get_beacon_proposer_index(state.data, cache)
privKey = hackPrivKey(state.data.validators[proposer_index.get])
proposer_index = get_beacon_proposer_index(
state, cache, getStateField(state, slot))
privKey = hackPrivKey(getStateField(state, validators)[proposer_index.get])
randao_reveal =
if skipBlsValidation notin flags:
privKey.genRandaoReveal(
state.data.fork, state.data.genesis_validators_root, state.data.slot).
toValidatorSig()
getStateField(state, fork),
getStateField(state, genesis_validators_root),
getStateField(state, slot)).toValidatorSig()
else:
ValidatorSig()
let
message = makeBeaconBlock(
defaultRuntimePreset,
state,
state.hbsPhase0,
proposer_index.get(),
parent_root,
randao_reveal,
# Keep deposit counts internally consistent.
Eth1Data(
deposit_root: eth1_data.deposit_root,
deposit_count: state.data.eth1_deposit_index + deposits.lenu64,
deposit_count: getStateField(state, eth1_deposit_index) + deposits.lenu64,
block_hash: eth1_data.block_hash),
graffiti,
attestations,
@ -127,13 +131,14 @@ proc addTestBlock*(
let
new_block = signBlock(
state.data.fork,
state.data.genesis_validators_root, message.get(), privKey, flags)
getStateField(state, fork),
getStateField(state, genesis_validators_root), message.get(), privKey,
flags)
new_block
proc makeTestBlock*(
state: HashedBeaconState,
state: ForkedHashedBeaconState,
parent_root: Eth2Digest,
cache: var StateCache,
eth1_data = Eth1Data(),
@ -150,7 +155,7 @@ proc makeTestBlock*(
graffiti)
func makeAttestationData*(
state: StateData, slot: Slot, committee_index: CommitteeIndex,
state: ForkedHashedBeaconState, slot: Slot, committee_index: CommitteeIndex,
beacon_block_root: Eth2Digest): AttestationData =
## Create an attestation / vote for the block `beacon_block_root` using the
## data in `state` to fill in the rest of the fields.
@ -181,7 +186,7 @@ func makeAttestationData*(
)
func makeAttestation*(
state: StateData, beacon_block_root: Eth2Digest,
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest,
committee: seq[ValidatorIndex], slot: Slot, index: CommitteeIndex,
validator_index: ValidatorIndex, cache: var StateCache,
flags: UpdateFlags = {}): Attestation =
@ -216,7 +221,7 @@ func makeAttestation*(
)
func find_beacon_committee*(
state: StateData, validator_index: ValidatorIndex,
state: ForkedHashedBeaconState, validator_index: ValidatorIndex,
cache: var StateCache): auto =
let epoch = compute_epoch_at_slot(getStateField(state, slot))
for epoch_committee_index in 0'u64 ..< get_committee_count_per_slot(
@ -231,7 +236,7 @@ func find_beacon_committee*(
doAssert false
func makeAttestation*(
state: StateData, beacon_block_root: Eth2Digest,
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest,
validator_index: ValidatorIndex, cache: var StateCache): Attestation =
let (committee, slot, index) =
find_beacon_committee(state, validator_index, cache)
@ -239,7 +244,7 @@ func makeAttestation*(
validator_index, cache)
func makeFullAttestations*(
state: StateData, beacon_block_root: Eth2Digest, slot: Slot,
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest, slot: Slot,
cache: var StateCache,
flags: UpdateFlags = {}): seq[Attestation] =
# Create attestations in which the full committee participates for each shard
@ -284,10 +289,12 @@ func makeFullAttestations*(
state: HashedBeaconState, beacon_block_root: Eth2Digest, slot: Slot,
cache: var StateCache,
flags: UpdateFlags = {}): seq[Attestation] =
# TODO this only supports phase 0 currently. Either expand that to
# Altair here or use the ForkedHashedBeaconState version only
makeFullAttestations(
(ref StateData)(data: state, blck: BlockRef(
root: beacon_block_root, slot: slot))[], beacon_block_root, slot, cache,
flags)
(ref ForkedHashedBeaconState)(
beaconStateFork: forkPhase0, hbsPhase0: state)[],
beacon_block_root, slot, cache, flags)
iterator makeTestBlocks*(
state: HashedBeaconState,
@ -296,11 +303,13 @@ iterator makeTestBlocks*(
blocks: int,
attested: bool): SignedBeaconBlock =
var
state = assignClone(state)
# TODO replace wrapper with more native usage
state = (ref ForkedHashedBeaconState)(
hbsPhase0: state, beaconStateFork: forkPhase0)
parent_root = parent_root
for _ in 0..<blocks:
let attestations = if attested:
makeFullAttestations(state[], parent_root, state[].data.slot, cache)
makeFullAttestations(state[], parent_root, getStateField(state[], slot), cache)
else:
@[]
@ -308,3 +317,13 @@ iterator makeTestBlocks*(
state[], parent_root, cache, attestations = attestations)
yield blck
parent_root = blck.root
iterator makeTestBlocks*(state: ForkedHashedBeaconState; parent_root: Eth2Digest;
cache: var StateCache; blocks: int; attested: bool): SignedBeaconBlock =
for blck in makeTestBlocks(state.hbsPhase0, parent_root, cache, blocks, attested):
yield blck
proc getAttestationsforTestBlock*(
pool: var AttestationPool, stateData: StateData, cache: var StateCache):
seq[Attestation] =
pool.getAttestationsForBlock(stateData.data.hbsPhase0, cache)

View File

@ -12,8 +12,9 @@ import
./mocking/mock_deposits,
./helpers/math_helpers,
../beacon_chain/ssz/merkleization,
../beacon_chain/spec/[beaconstate, crypto, datatypes, presets,
helpers, state_transition]
../beacon_chain/spec/[
beaconstate, crypto, datatypes, forkedbeaconstate_helpers, helpers,
presets, state_transition]
proc valid_deposit(state: var BeaconState) =
const deposit_amount = MAX_EFFECTIVE_BALANCE
@ -40,8 +41,8 @@ proc valid_deposit(state: var BeaconState) =
EFFECTIVE_BALANCE_INCREMENT
)
proc getTestStates*(initialState: HashedBeaconState):
seq[ref HashedBeaconState] =
proc getTestStates*(initialState: ForkedHashedBeaconState):
seq[ref ForkedHashedBeaconState] =
# Randomly generated slot numbers, with a jump to around
# SLOTS_PER_HISTORICAL_ROOT to force wraparound of those
# slot-based mod/increment fields.
@ -64,9 +65,10 @@ proc getTestStates*(initialState: HashedBeaconState):
for i, epoch in stateEpochs:
let slot = epoch.Epoch.compute_start_slot_at_epoch
if tmpState.data.slot < slot:
doAssert process_slots(tmpState[], slot, cache, rewards)
if getStateField(tmpState[], slot) < slot:
doAssert process_slots(
tmpState[], slot, cache, rewards, {}, FAR_FUTURE_SLOT)
if i mod 3 == 0:
valid_deposit(tmpState.data)
doAssert tmpState.data.slot == slot
valid_deposit(tmpState.hbsPhase0.data)
doAssert getStateField(tmpState[], slot) == slot
result.add assignClone(tmpState[])