more `withState` `state` -> `forkyState` (#4104)

This commit is contained in:
tersec 2022-09-10 06:12:07 +00:00 committed by GitHub
parent 1d620f0123
commit 19bf460a3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 43 additions and 34 deletions

View File

@ -145,10 +145,10 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef,
if enableTestFeatures in dag.updateFlags and blckRef == dag.head:
unrealized = withState(dag.headState):
when stateFork >= BeaconStateFork.Altair:
state.data.compute_unrealized_finality()
forkyState.data.compute_unrealized_finality()
else:
var cache: StateCache
state.data.compute_unrealized_finality(cache)
forkyState.data.compute_unrealized_finality(cache)
withBlck(blck):
forkChoice.process_block(
dag, epochRef, blckRef, unrealized, blck.message,

View File

@ -105,9 +105,9 @@ proc addResolvedHeadBlock(
if enableTestFeatures in dag.updateFlags:
unrealized = withState(state):
when stateFork >= BeaconStateFork.Altair:
state.data.compute_unrealized_finality()
forkyState.data.compute_unrealized_finality()
else:
state.data.compute_unrealized_finality(cache)
forkyState.data.compute_unrealized_finality(cache)
onBlockAdded(blockRef, trustedBlock, epochRef, unrealized)
if not(isNil(dag.onBlockAdded)):
dag.onBlockAdded(ForkedTrustedSignedBeaconBlock.init(trustedBlock))

View File

@ -965,7 +965,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
cfg, getStateField(dag.headState, genesis_validators_root))
withState(dag.headState):
dag.validatorMonitor[].registerState(state.data)
dag.validatorMonitor[].registerState(forkyState.data)
updateBeaconMetrics(dag.headState, dag.head.bid, cache)
@ -1024,7 +1024,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
withState(dag.headState):
when stateFork >= BeaconStateFork.Altair:
dag.headSyncCommittees = state.data.get_sync_committee_cache(cache)
dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache)
info "Block DAG initialized",
head = shortLog(dag.head),
@ -1044,7 +1044,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
withState(dag.headState):
when stateFork >= BeaconStateFork.Bellatrix:
template executionPayloadHeader(): auto =
state().data.latest_execution_payload_header
forkyState().data.latest_execution_payload_header
const emptyExecutionPayloadHeader =
default(type(executionPayloadHeader))
if executionPayloadHeader != emptyExecutionPayloadHeader:

View File

@ -343,9 +343,9 @@ proc runProposalForkchoiceUpdated*(
# opportunistically, so mismatches are fine if not too frequent.
let
timestamp = withState(self.dag.headState):
compute_timestamp_at_slot(state.data, nextWallSlot)
compute_timestamp_at_slot(forkyState.data, nextWallSlot)
randomData = withState(self.dag.headState):
get_randao_mix(state.data, get_current_epoch(state.data)).data
get_randao_mix(forkyState.data, get_current_epoch(forkyState.data)).data
feeRecipient = self.getFeeRecipient(
nextProposer, validatorIndex, nextWallSlot.epoch)
beaconHead = self.attestationPool[].getBeaconHead(self.dag.head)

View File

@ -241,7 +241,7 @@ proc storeBlock*(
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():
vm[].registerSyncAggregateInBlock(
trustedBlock.message.slot, trustedBlock.root,
state.data.current_sync_committee.pubkeys.data[i])
forkyState.data.current_sync_committee.pubkeys.data[i])
self.dumpBlock(signedBlock, blck)

View File

@ -204,7 +204,8 @@ template validateBeaconBlockBellatrix(
# compute_timestamp_at_slot(state, block.slot).
let timestampAtSlot =
withState(dag.headState):
compute_timestamp_at_slot(state.data, signed_beacon_block.message.slot)
compute_timestamp_at_slot(
forkyState.data, signed_beacon_block.message.slot)
if not (signed_beacon_block.message.body.execution_payload.timestamp ==
timestampAtSlot):
quarantine[].addUnviable(signed_beacon_block.root)

View File

@ -398,9 +398,9 @@ proc collectSignatureSets*(
else:
let
current_sync_committee =
state.data.get_sync_committee_cache(cache).current_sync_committee
previous_slot = max(state.data.slot, Slot(1)) - 1
beacon_block_root = get_block_root_at_slot(state.data, previous_slot)
forkyState.data.get_sync_committee_cache(cache).current_sync_committee
previous_slot = max(forkyState.data.slot, Slot(1)) - 1
beacon_block_root = get_block_root_at_slot(forkyState.data, previous_slot)
pubkey = ? aggregateAttesters(
current_sync_committee,
signed_block.message.body.sync_aggregate.sync_committee_bits,

View File

@ -276,7 +276,7 @@ proc routeSyncCommitteeMessages*(
var statuses = newSeq[Option[SendResult]](len(msgs))
let
curPeriod = sync_committee_period(state.data.slot)
curPeriod = sync_committee_period(forkyState.data.slot)
nextPeriod = curPeriod + 1
let (keysCur, keysNxt) =
@ -285,7 +285,7 @@ proc routeSyncCommitteeMessages*(
var resNxt: Table[uint64, int]
for index, msg in msgs:
if msg.validator_index < lenu64(state.data.validators):
if msg.validator_index < lenu64(forkyState.data.validators):
let msgPeriod = sync_committee_period(msg.slot + 1)
if msgPeriod == curPeriod:
resCur[msg.validator_index] = index

View File

@ -125,14 +125,15 @@ proc addLocalValidators*(node: BeaconNode,
let slot = node.currentSlot()
withState(node.dag.headState):
for item in validators:
node.addLocalValidator(state.data.validators.asSeq(), item, slot)
node.addLocalValidator(forkyState.data.validators.asSeq(), item, slot)
proc addRemoteValidators*(node: BeaconNode,
validators: openArray[KeystoreData]) =
let slot = node.currentSlot()
withState(node.dag.headState):
for item in validators:
let index = findValidator(state.data.validators.asSeq(), item.pubkey)
let index = findValidator(
forkyState.data.validators.asSeq(), item.pubkey)
node.attachedValidators[].addRemoteValidator(index, item, slot)
proc addValidators*(node: BeaconNode) =
@ -505,7 +506,7 @@ proc makeBeaconBlockForHeadAndSlot*(
let
exits = withState(state):
node.exitPool[].getBeaconBlockExits(node.dag.cfg, state.data)
node.exitPool[].getBeaconBlockExits(node.dag.cfg, forkyState.data)
effectiveExecutionPayload =
if executionPayload.isSome:
executionPayload.get

View File

@ -118,7 +118,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
attestation = Attestation(
aggregation_bits: CommitteeValidatorsBits.init(committee.len),
data: makeAttestationData(
state.data, slot, committee_index, latest_block_root),
forkyState.data, slot, committee_index, latest_block_root),
)
first = true
@ -132,8 +132,8 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
if attestation.aggregation_bits.countOnes() > 0:
if validate:
attestation.signature = makeAttestationSig(
state.data.fork, genesis_validators_root, attestation.data,
committee, attestation.aggregation_bits)
forkyState.data.fork, genesis_validators_root,
attestation.data, committee, attestation.aggregation_bits)
# add the attestation if any of the validators attested, as given
# by the randomness. We have to delay when the attestation is

View File

@ -41,7 +41,7 @@ proc runTest(path: string, fork: BeaconStateFork) =
withState(state[]):
var computedProof = newSeq[Eth2Digest](log2trunc(proof.leaf_index))
build_proof(state.data, proof.leaf_index, computedProof).get
build_proof(forkyState.data, proof.leaf_index, computedProof).get
check:
computedProof == proof.branch.mapIt(Eth2Digest.fromHex(it))
@ -50,7 +50,7 @@ proc runTest(path: string, fork: BeaconStateFork) =
computedProof,
log2trunc(proof.leaf_index),
get_subtree_index(proof.leaf_index),
state.root)
forkyState.root)
suite "EF - Light client - Single merkle proof" & preset():
const presetPath = SszTestsDir/const_preset

View File

@ -125,14 +125,14 @@ suite "Attestation pool processing" & preset():
att1 = makeAttestation(state[], root1, bc1[0], cache)
check:
withState(state[]): state.latest_block_root == root1
withState(state[]): forkyState.latest_block_root == root1
process_slots(
defaultRuntimeConfig, state[],
getStateField(state[], slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
info, {}).isOk()
withState(state[]): state.latest_block_root == root1
withState(state[]): forkyState.latest_block_root == root1
check:
# shouldn't include already-included attestations

View File

@ -85,9 +85,11 @@ suite "Exit pool testing suite":
check: pool[].isSeen(msg)
withState(dag.headState):
check:
pool[].getBeaconBlockExits(cfg, state.data).proposer_slashings.lenu64 ==
pool[].getBeaconBlockExits(
cfg, forkyState.data).proposer_slashings.lenu64 ==
min(i + 1, MAX_PROPOSER_SLASHINGS)
pool[].getBeaconBlockExits(cfg, state.data).proposer_slashings.len == 0
pool[].getBeaconBlockExits(
cfg, forkyState.data).proposer_slashings.len == 0
test "addExitMessage/getAttesterSlashingMessage":
for i in 0'u64 .. MAX_ATTESTER_SLASHINGS + 5:
@ -106,9 +108,11 @@ suite "Exit pool testing suite":
check: pool[].isSeen(msg)
withState(dag.headState):
check:
pool[].getBeaconBlockExits(cfg, state.data).attester_slashings.lenu64 ==
pool[].getBeaconBlockExits(
cfg, forkyState.data).attester_slashings.lenu64 ==
min(i + 1, MAX_ATTESTER_SLASHINGS)
pool[].getBeaconBlockExits(cfg, state.data).attester_slashings.len == 0
pool[].getBeaconBlockExits(
cfg, forkyState.data).attester_slashings.len == 0
test "addExitMessage/getVoluntaryExitMessage":
# Need to advance state or it will not accept voluntary exits
@ -135,9 +139,11 @@ suite "Exit pool testing suite":
withState(dag.headState):
check:
pool[].getBeaconBlockExits(cfg, state.data).voluntary_exits.lenu64 ==
pool[].getBeaconBlockExits(
cfg, forkyState.data).voluntary_exits.lenu64 ==
min(i + 1, MAX_VOLUNTARY_EXITS)
pool[].getBeaconBlockExits(cfg, state.data).voluntary_exits.len == 0
pool[].getBeaconBlockExits(
cfg, forkyState.data).voluntary_exits.len == 0
test "pre-pre-fork voluntary exit":
var
@ -163,4 +169,5 @@ suite "Exit pool testing suite":
check:
# Message signed with a (fork-2) domain can no longer be added as that
# fork is not present in the BeaconState and thus fails transition
pool[].getBeaconBlockExits(cfg, state.data).voluntary_exits.lenu64 == 0
pool[].getBeaconBlockExits(
cfg, forkyState.data).voluntary_exits.lenu64 == 0

View File

@ -81,7 +81,7 @@ proc getTestStates*(
if i mod 3 == 0:
withState(tmpState[]):
valid_deposit(state)
valid_deposit(forkyState)
doAssert getStateField(tmpState[], slot) == slot
if tmpState[].kind == stateFork: