reward accounting for altair+ (#2981)

Similar to the existing `RewardInfo`, this PR adds the infrastructure
needed to export epoch processing information from altair+. Because
accounting is done somewhat differently, the PR uses a fork-specific
object to extrct the information in order to make the cost on the spec
side low.

* RewardInfo -> EpochInfo, ForkedEpochInfo
* use array for computing new sync committee
* avoid repeated total active balance computations in block processing
* simplify proposer index check
* simplify epoch transition tests
* pre-compute base increment and reuse in epoch processing, and a few
other small optimizations

This PR introduces the type and does the heavy lifting in terms of
refactoring - the tools that use the accounting will need separate PR:s
(as well as refinements to the exportred information)
This commit is contained in:
Jacek Sieka 2021-10-13 16:24:36 +02:00 committed by GitHub
parent 2eb9a608a4
commit f90b2b8b1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 677 additions and 546 deletions

View File

@ -755,7 +755,7 @@ proc get*(dag: ChainDAGRef, root: Eth2Digest): Option[BlockData] =
proc advanceSlots(
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
cache: var StateCache, rewards: var RewardInfo) =
cache: var StateCache, info: var ForkedEpochInfo) =
# Given a state, advance it zero or more slots by applying empty slot
# processing - the state must be positions at a slot before or equal to the
# target
@ -764,7 +764,7 @@ proc advanceSlots(
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
doAssert process_slots(
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, rewards,
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info,
dag.updateFlags),
"process_slots shouldn't fail when state slot is correct"
if save:
@ -773,7 +773,7 @@ proc advanceSlots(
proc applyBlock(
dag: ChainDAGRef,
state: var StateData, blck: BlockData, flags: UpdateFlags,
cache: var StateCache, rewards: var RewardInfo): bool =
cache: var StateCache, info: var ForkedEpochInfo): bool =
# Apply a single block to the state - the state must be positioned at the
# parent of the block with a slot lower than the one of the block being
# applied
@ -793,15 +793,15 @@ proc applyBlock(
of BeaconBlockFork.Phase0:
state_transition(
dag.cfg, state.data, blck.data.phase0Block,
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
cache, info, flags + dag.updateFlags + {slotProcessed}, restore)
of BeaconBlockFork.Altair:
state_transition(
dag.cfg, state.data, blck.data.altairBlock,
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
cache, info, flags + dag.updateFlags + {slotProcessed}, restore)
of BeaconBlockFork.Merge:
state_transition(
dag.cfg, state.data, blck.data.mergeBlock,
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
cache, info, flags + dag.updateFlags + {slotProcessed}, restore)
if ok:
state.blck = blck.refs
@ -932,7 +932,7 @@ proc updateStateData*(
assignTick = Moment.now()
startSlot {.used.} = getStateField(state.data, slot) # used in logs below
startRoot {.used.} = getStateRoot(state.data)
var rewards: RewardInfo
var info: ForkedEpochInfo
# Time to replay all the blocks between then and now
for i in countdown(ancestors.len - 1, 0):
# Because the ancestors are in the database, there's no need to persist them
@ -940,11 +940,11 @@ proc updateStateData*(
# database, we can skip certain checks that have already been performed
# before adding the block to the database.
let ok =
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache, rewards)
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache, info)
doAssert ok, "Blocks in database should never fail to apply.."
# ...and make sure to process empty slots as requested
dag.advanceSlots(state, bs.slot, save, cache, rewards)
dag.advanceSlots(state, bs.slot, save, cache, info)
# ...and make sure to load the state cache, if it exists
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)

View File

@ -535,10 +535,13 @@ func get_total_active_balance*(state: SomeBeaconState, cache: var StateCache): G
state, cache.get_shuffled_active_validator_indices(state, epoch))
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward_per_increment
func get_base_reward_per_increment_sqrt*(
total_active_balance_sqrt: uint64): Gwei =
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div total_active_balance_sqrt
func get_base_reward_per_increment*(
state: altair.BeaconState | merge.BeaconState, cache: var StateCache): Gwei =
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div
integer_squareroot(get_total_active_balance(state, cache))
total_active_balance: Gwei): Gwei =
get_base_reward_per_increment_sqrt(integer_squareroot(total_active_balance))
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward
func get_base_reward(
@ -658,13 +661,12 @@ proc process_attestation*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_next_sync_committee_indices
func get_next_sync_committee_indices(state: altair.BeaconState | merge.BeaconState):
seq[ValidatorIndex] =
func get_next_sync_committee_keys(state: altair.BeaconState | merge.BeaconState):
array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] =
## Return the sequence of sync committee indices (which may include
## duplicate indices) for the next sync committee, given a ``state`` at a
## sync committee period boundary.
# TODO this size is known statically, so return array[] if possible
let epoch = get_current_epoch(state) + 1
const MAX_RANDOM_BYTE = 255
@ -674,10 +676,11 @@ func get_next_sync_committee_indices(state: altair.BeaconState | merge.BeaconSta
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
var
i = 0'u64
sync_committee_indices: seq[ValidatorIndex]
index = 0
res: array[SYNC_COMMITTEE_SIZE, ValidatorPubKey]
hash_buffer: array[40, byte]
hash_buffer[0..31] = seed.data
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
while index < SYNC_COMMITTEE_SIZE:
hash_buffer[32..39] = uint_to_bytes8(uint64(i div 32))
let
shuffled_index = compute_shuffled_index(uint64(i mod active_validator_count), active_validator_count, seed)
@ -685,22 +688,17 @@ func get_next_sync_committee_indices(state: altair.BeaconState | merge.BeaconSta
random_byte = eth2digest(hash_buffer).data[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
sync_committee_indices.add candidate_index
res[index] = state.validators[candidate_index].pubkey
inc index
i += 1'u64
sync_committee_indices
res
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_next_sync_committee
proc get_next_sync_committee*(state: altair.BeaconState | merge.BeaconState):
SyncCommittee =
## Return the *next* sync committee for a given ``state``.
let indices = get_next_sync_committee_indices(state)
# TODO not robust
doAssert indices.len == SYNC_COMMITTEE_SIZE
var res: SyncCommittee
for i, index in indices:
res.pubkeys.data[i] = state.validators[index].pubkey
res.pubkeys.resetCache()
res.pubkeys.data = get_next_sync_committee_keys(state)
# see signatures_batch, TODO shouldn't be here
# Deposit processing ensures all keys are valid

View File

@ -240,6 +240,26 @@ type
current_sync_committee*: SyncCommittee # [New in Altair]
next_sync_committee*: SyncCommittee # [New in Altair]
UnslashedParticipatingBalances* = object
previous_epoch*: array[PARTICIPATION_FLAG_WEIGHTS.len, Gwei]
current_epoch_TIMELY_TARGET*: Gwei
current_epoch*: Gwei # aka total_active_balance
ParticipationFlag* {.pure.} = enum
timelySourceAttester
timelyTargetAttester
timelyHeadAttester
eligible
ParticipationInfo* = object
flags*: set[ParticipationFlag]
delta*: RewardDelta
EpochInfo* = object
## Information about the outcome of epoch processing
validators*: seq[ParticipationInfo]
balances*: UnslashedParticipatingBalances
# TODO Careful, not nil analysis is broken / incomplete and the semantics will
# likely change in future versions of the language:
# https://github.com/nim-lang/RFCs/issues/250
@ -501,3 +521,7 @@ chronicles.formatIt SyncCommitteeMessage: shortLog(it)
template hash*(x: LightClientUpdate): Hash =
hash(x.header)
func clear*(info: var EpochInfo) =
info.validators.setLen(0)
info.balances = UnslashedParticipatingBalances()

View File

@ -547,10 +547,6 @@ type
# time of attestation.
previous_epoch_head_attesters_raw*: Gwei
RewardInfo* = object
statuses*: seq[RewardStatus]
total_balances*: TotalBalances
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData2 =
let cookedKey = validator.pubkey.load() # Loading the pubkey is slow!
doAssert cookedKey.isSome,

View File

@ -242,8 +242,17 @@ type
SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock
SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody
EpochInfo* = object
## Information about the outcome of epoch processing
statuses*: seq[RewardStatus]
total_balances*: TotalBalances
chronicles.formatIt BeaconBlock: it.shortLog
func clear*(info: var EpochInfo) =
info.statuses.setLen(0)
info.total_balances = TotalBalances()
Json.useCustomSerialization(BeaconState.justification_bits):
read:
let s = reader.readValue(string)

View File

@ -68,6 +68,19 @@ type
of BeaconBlockFork.Merge:
mergeBlock*: merge.TrustedSignedBeaconBlock
EpochInfoFork* {.pure.} = enum
Phase0
Altair
ForkedEpochInfo* = object
case kind*: EpochInfoFork
of EpochInfoFork.Phase0:
phase0Info*: phase0.EpochInfo
of EpochInfoFork.Altair:
altairInfo*: altair.EpochInfo
ForkyEpochInfo* = phase0.EpochInfo | altair.EpochInfo
ForkDigests* = object
phase0*: ForkDigest
altair*: ForkDigest
@ -133,6 +146,11 @@ template init*(T: type ForkedTrustedSignedBeaconBlock, blck: altair.TrustedSigne
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: merge.TrustedSignedBeaconBlock): T =
T(kind: BeaconBlockFork.Merge, mergeBlock: blck)
template init*(T: type ForkedEpochInfo, info: phase0.EpochInfo): T =
T(kind: EpochInfoFork.Phase0, phase0Info: info)
template init*(T: type ForkedEpochInfo, info: altair.EpochInfo): T =
T(kind: EpochInfoFork.Altair, altairInfo: info)
# State-related functionality based on ForkedHashedBeaconState instead of HashedBeaconState
template withState*(x: ForkedHashedBeaconState, body: untyped): untyped =
@ -150,6 +168,28 @@ template withState*(x: ForkedHashedBeaconState, body: untyped): untyped =
template state: untyped {.inject.} = x.hbsPhase0
body
template withEpochInfo*(x: ForkedEpochInfo, body: untyped): untyped =
case x.kind
of EpochInfoFork.Phase0:
template info: untyped {.inject.} = x.phase0Info
body
of EpochInfoFork.Altair:
template info: untyped {.inject.} = x.altairInfo
body
template withEpochInfo*(
state: phase0.BeaconState, x: var ForkedEpochInfo, body: untyped): untyped =
x.kind = EpochInfoFork.Phase0
template info: untyped {.inject.} = x.phase0Info
body
template withEpochInfo*(
state: altair.BeaconState | merge.BeaconState, x: var ForkedEpochInfo,
body: untyped): untyped =
x.kind = EpochInfoFork.Altair
template info: untyped {.inject.} = x.altairInfo
body
# Dispatch functions
func assign*(tgt: var ForkedHashedBeaconState, src: ForkedHashedBeaconState) =
if tgt.beaconStateFork == src.beaconStateFork:

View File

@ -165,19 +165,18 @@ func clear_epoch_from_cache(cache: var StateCache, epoch: Epoch) =
proc advance_slot(
cfg: RuntimeConfig,
state: var SomeBeaconState, previous_slot_state_root: Eth2Digest,
flags: UpdateFlags, cache: var StateCache, rewards: var RewardInfo) {.nbench.} =
flags: UpdateFlags, cache: var StateCache, info: var ForkyEpochInfo) {.nbench.} =
# Do the per-slot and potentially the per-epoch processing, then bump the
# slot number - we've now arrived at the slot state on top of which a block
# optionally can be applied.
process_slot(state, previous_slot_state_root)
rewards.statuses.setLen(0)
rewards.total_balances = TotalBalances()
info.clear()
let is_epoch_transition = (state.slot + 1).isEpoch
if is_epoch_transition:
# Note: Genesis epoch = 0, no need to test if before Genesis
process_epoch(cfg, state, flags, cache, rewards)
process_epoch(cfg, state, flags, cache, info)
clear_epoch_from_cache(cache, (state.slot + 1).compute_epoch_at_slot)
state.slot += 1
@ -222,7 +221,7 @@ proc maybeUpgradeState*(
proc process_slots*(
cfg: RuntimeConfig, state: var ForkedHashedBeaconState, slot: Slot,
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags): bool {.nbench.} =
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags): bool {.nbench.} =
if not (getStateField(state, slot) < slot):
if slotProcessed notin flags or getStateField(state, slot) != slot:
notice "Unusual request for a slot in the past",
@ -234,8 +233,9 @@ proc process_slots*(
# Update the state so its slot matches that of the block
while getStateField(state, slot) < slot:
withState(state):
withEpochInfo(state.data, info):
advance_slot(
cfg, state.data, state.root, flags, cache, rewards)
cfg, state.data, state.root, flags, cache, info)
if skipLastStateRootCalculation notin flags or
state.data.slot < slot:
@ -334,7 +334,7 @@ proc state_transition*(
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock |
altair.TrustedSignedBeaconBlock | merge.TrustedSignedBeaconBlock |
merge.SignedBeaconBlock,
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags,
rollback: RollbackForkedHashedProc): bool {.nbench.} =
## Apply a block to the state, advancing the slot counter as necessary. The
## given state must be of a lower slot, or, in case the `slotProcessed` flag
@ -352,7 +352,7 @@ proc state_transition*(
## object should be rolled back to a consistent state. If the transition fails
## before the state has been updated, `rollback` will not be called.
if not process_slots(
cfg, state, signedBlock.message.slot, cache, rewards,
cfg, state, signedBlock.message.slot, cache, info,
flags + {skipLastStateRootCalculation}):
return false
state_transition_block(

View File

@ -380,19 +380,14 @@ proc process_voluntary_exit*(
proc process_operations(cfg: RuntimeConfig,
state: var SomeBeaconState,
body: SomeSomeBeaconBlockBody,
base_reward_per_increment: Gwei,
flags: UpdateFlags,
cache: var StateCache): Result[void, cstring] {.nbench.} =
# Verify that outstanding deposits are processed up to the maximum number of
# deposits
template base_reward_per_increment(state: phase0.BeaconState): Gwei = 0.Gwei
template base_reward_per_increment(
state: altair.BeaconState | merge.BeaconState): Gwei =
get_base_reward_per_increment(state, cache)
let
req_deposits = min(MAX_DEPOSITS,
state.eth1_data.deposit_count - state.eth1_deposit_index)
generalized_base_reward_per_increment = base_reward_per_increment(state)
if state.eth1_data.deposit_count < state.eth1_deposit_index or
body.deposits.lenu64 != req_deposits:
@ -403,7 +398,7 @@ proc process_operations(cfg: RuntimeConfig,
for op in body.attester_slashings:
? process_attester_slashing(cfg, state, op, flags, cache)
for op in body.attestations:
? process_attestation(state, op, flags, generalized_base_reward_per_increment, cache)
? process_attestation(state, op, flags, base_reward_per_increment, cache)
for op in body.deposits:
? process_deposit(cfg, state, op, flags)
for op in body.voluntary_exits:
@ -413,7 +408,8 @@ proc process_operations(cfg: RuntimeConfig,
# https://github.com/ethereum/consensus-specs/blob/v1.1.0-alpha.6/specs/altair/beacon-chain.md#sync-committee-processing
proc process_sync_aggregate*(
state: var (altair.BeaconState | merge.BeaconState), aggregate: SyncAggregate, cache: var StateCache):
state: var (altair.BeaconState | merge.BeaconState),
aggregate: SyncAggregate, total_active_balance: Gwei, cache: var StateCache):
Result[void, cstring] {.nbench.} =
# Verify sync committee aggregate signature signing over the previous slot
# block root
@ -441,11 +437,20 @@ proc process_sync_aggregate*(
# Compute participant and proposer rewards
let
total_active_increments = get_total_active_balance(state, cache) div EFFECTIVE_BALANCE_INCREMENT
total_base_rewards = get_base_reward_per_increment(state, cache) * total_active_increments
max_participant_rewards = total_base_rewards * SYNC_REWARD_WEIGHT div WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
total_active_increments =
total_active_balance div EFFECTIVE_BALANCE_INCREMENT
total_base_rewards =
get_base_reward_per_increment(total_active_balance) * total_active_increments
max_participant_rewards =
total_base_rewards * SYNC_REWARD_WEIGHT div WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
participant_reward = max_participant_rewards div SYNC_COMMITTEE_SIZE
proposer_reward = participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
proposer_reward =
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
proposer_index = get_beacon_proposer_index(state, cache)
if proposer_index.isNone:
# We're processing a block, so this can't happen, in theory (!)
return err("process_sync_aggregate: no proposer")
# Apply participant and proposer rewards
@ -462,8 +467,6 @@ proc process_sync_aggregate*(
for i in 0 ..< min(
state.current_sync_committee.pubkeys.len,
aggregate.sync_committee_bits.len):
let proposer_index = get_beacon_proposer_index(state, cache)
if proposer_index.isSome:
let participant_index =
pubkeyIndices.getOrDefault(state.current_sync_committee.pubkeys[i])
if aggregate.sync_committee_bits[i]:
@ -471,8 +474,6 @@ proc process_sync_aggregate*(
increase_balance(state, proposer_index.get, proposer_reward)
else:
decrease_balance(state, participant_index, participant_reward)
else:
warn "process_sync_aggregate: get_beacon_proposer_index failed"
ok()
@ -561,7 +562,7 @@ proc process_block*(
? process_block_header(state, blck, flags, cache)
? process_randao(state, blck.body, flags, cache)
? process_eth1_data(state, blck.body)
? process_operations(cfg, state, blck.body, flags, cache)
? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache)
ok()
@ -593,8 +594,16 @@ proc process_block*(
? process_block_header(state, blck, flags, cache)
? process_randao(state, blck.body, flags, cache)
? process_eth1_data(state, blck.body)
? process_operations(cfg, state, blck.body, flags, cache)
? process_sync_aggregate(state, blck.body.sync_aggregate, cache) # [New in Altair]
let
total_active_balance = get_total_active_balance(state, cache)
base_reward_per_increment =
get_base_reward_per_increment(total_active_balance)
? process_operations(
cfg, state, blck.body, base_reward_per_increment, flags, cache)
? process_sync_aggregate(
state, blck.body.sync_aggregate, total_active_balance, cache) # [New in Altair]
ok()
@ -617,8 +626,15 @@ proc process_block*(
func(_: ExecutionPayload): bool = true)
? process_randao(state, blck.body, flags, cache)
? process_eth1_data(state, blck.body)
? process_operations(cfg, state, blck.body, flags, cache)
? process_sync_aggregate(state, blck.body.sync_aggregate, cache)
let
total_active_balance = get_total_active_balance(state, cache)
base_reward_per_increment =
get_base_reward_per_increment(total_active_balance)
? process_operations(
cfg, state, blck.body, base_reward_per_increment, flags, cache)
? process_sync_aggregate(
state, blck.body.sync_aggregate, total_active_balance, cache)
ok()

View File

@ -52,9 +52,9 @@ template previous_epoch_target_attesters*(v: TotalBalances): Gwei =
template previous_epoch_head_attesters*(v: TotalBalances): Gwei =
max(EFFECTIVE_BALANCE_INCREMENT, v.previous_epoch_head_attesters_raw)
func init*(rewards: var RewardInfo, state: SomeBeaconState) =
rewards.total_balances = TotalBalances()
rewards.statuses.setLen(state.validators.len)
func init*(info: var phase0.EpochInfo, state: phase0.BeaconState) =
info.total_balances = TotalBalances()
info.statuses.setLen(state.validators.len)
for i in 0..<state.validators.len:
let v = unsafeAddr state.validators[i]
@ -66,13 +66,13 @@ func init*(rewards: var RewardInfo, state: SomeBeaconState) =
flags.incl RewardFlags.canWithdrawInCurrentEpoch
if v[].is_active_validator(state.get_current_epoch()):
rewards.total_balances.current_epoch_raw += v[].effective_balance
info.total_balances.current_epoch_raw += v[].effective_balance
if v[].is_active_validator(state.get_previous_epoch()):
flags.incl RewardFlags.isActiveInPreviousEpoch
rewards.total_balances.previous_epoch_raw += v[].effective_balance
info.total_balances.previous_epoch_raw += v[].effective_balance
rewards.statuses[i] = RewardStatus(
info.statuses[i] = RewardStatus(
current_epoch_effective_balance: v[].effective_balance,
flags: flags,
)
@ -82,7 +82,7 @@ func add(a: var RewardDelta, b: RewardDelta) =
a.penalties += b.penalties
func process_attestation(
self: var RewardInfo, state: phase0.BeaconState, a: PendingAttestation,
info: var phase0.EpochInfo, state: phase0.BeaconState, a: PendingAttestation,
cache: var StateCache) =
# Collect information about the attestation
var
@ -110,12 +110,12 @@ func process_attestation(
# Update the cache for all participants
for validator_index in get_attesting_indices(
state, a.data, a.aggregation_bits, cache):
template v(): untyped = self.statuses[validator_index]
template v(): untyped = info.statuses[validator_index]
v.flags = v.flags + flags
if is_previous_epoch_attester.isSome:
if v.isPreviousEpochAttester.isSome:
if v.is_previous_epoch_attester.isSome:
if is_previous_epoch_attester.get().delay <
v.is_previous_epoch_attester.get().delay:
v.is_previous_epoch_attester = is_previous_epoch_attester
@ -123,47 +123,51 @@ func process_attestation(
v.is_previous_epoch_attester = is_previous_epoch_attester
func process_attestations*(
self: var RewardInfo, state: phase0.BeaconState, cache: var StateCache) =
info: var phase0.EpochInfo, state: phase0.BeaconState, cache: var StateCache) =
# Walk state attestations and update the status information
for a in state.previous_epoch_attestations:
process_attestation(self, state, a, cache)
process_attestation(info, state, a, cache)
for a in state.current_epoch_attestations:
process_attestation(self, state, a, cache)
process_attestation(info, state, a, cache)
for idx, v in self.statuses:
for idx, v in info.statuses:
if v.flags.contains RewardFlags.isSlashed:
continue
let validator_balance = state.validators[idx].effective_balance
if v.flags.contains RewardFlags.isCurrentEpochAttester:
self.total_balances.current_epoch_attesters_raw += validator_balance
info.total_balances.current_epoch_attesters_raw += validator_balance
if v.flags.contains RewardFlags.isCurrentEpochTargetAttester:
self.total_balances.current_epoch_target_attesters_raw += validator_balance
info.total_balances.current_epoch_target_attesters_raw += validator_balance
if v.is_previous_epoch_attester.isSome():
self.total_balances.previous_epoch_attesters_raw += validator_balance
info.total_balances.previous_epoch_attesters_raw += validator_balance
if v.flags.contains RewardFlags.isPreviousEpochTargetAttester:
self.total_balances.previous_epoch_target_attesters_raw += validator_balance
info.total_balances.previous_epoch_target_attesters_raw += validator_balance
if v.flags.contains RewardFlags.isPreviousEpochHeadAttester:
self.total_balances.previous_epoch_head_attesters_raw += validator_balance
info.total_balances.previous_epoch_head_attesters_raw += validator_balance
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#helpers
# get_eligible_validator_indices
func is_eligible_validator*(validator: RewardStatus): bool =
validator.flags.contains(RewardFlags.isActiveInPreviousEpoch) or
(validator.flags.contains(RewardFlags.isSlashed) and not
(validator.flags.contains RewardFlags.canWithdrawInCurrentEpoch))
func is_eligible_validator*(validator: Validator, previous_epoch: Epoch): bool =
is_active_validator(validator, previous_epoch) or
(validator.slashed and previous_epoch + 1 < validator.withdrawable_epoch)
func is_eligible_validator*(validator: ParticipationInfo): bool =
validator.flags.contains(ParticipationFlag.eligible)
# Spec
# --------------------------------------------------------
type
UnslashedParticipatingBalances = object
previous_epoch: array[PARTICIPATION_FLAG_WEIGHTS.len, Gwei]
current_epoch_TIMELY_TARGET: Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_unslashed_participating_indices
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#get_total_balance
func get_unslashed_participating_balances*(state: altair.BeaconState | merge.BeaconState):
@ -174,17 +178,25 @@ func get_unslashed_participating_balances*(state: altair.BeaconState | merge.Bea
var res: UnslashedParticipatingBalances
for validator_index in 0'u64 ..< state.validators.lenu64:
let
is_active_current_epoch = is_active_validator(
state.validators[validator_index], current_epoch)
validator_effective_balance =
state.validators[validator_index].effective_balance
if is_active_current_epoch:
# Active balance counted also for slashed validators
res.current_epoch += validator_effective_balance
if state.validators[validator_index].slashed:
continue
let
is_active_previous_epoch = is_active_validator(
state.validators[validator_index], previous_epoch)
is_active_current_epoch = is_active_validator(
state.validators[validator_index], current_epoch)
previous_epoch_participation =
state.previous_epoch_participation[validator_index]
validator_effective_balance =
state.validators[validator_index].effective_balance
if is_active_previous_epoch:
for flag_index in 0 ..< PARTICIPATION_FLAG_WEIGHTS.len:
if has_flag(previous_epoch_participation, flag_index):
@ -203,6 +215,8 @@ func get_unslashed_participating_balances*(state: altair.BeaconState | merge.Bea
res.current_epoch_TIMELY_TARGET =
max(EFFECTIVE_BALANCE_INCREMENT, res.current_epoch_TIMELY_TARGET)
res.current_epoch = max(EFFECTIVE_BALANCE_INCREMENT, res.current_epoch)
res
func is_unslashed_participating_index(
@ -414,8 +428,7 @@ proc weigh_justification_and_finalization(state: var (altair.BeaconState | merge
checkpoint = shortLog(state.finalized_checkpoint)
proc process_justification_and_finalization*(state: var (altair.BeaconState | merge.BeaconState),
total_active_balance: Gwei,
unslashed_participating_balances: UnslashedParticipatingBalances,
balances: UnslashedParticipatingBalances,
flags: UpdateFlags = {}) {.nbench.} =
# Initial FFG checkpoint values have a `0x00` stub for `root`.
# Skip FFG updates in the first two epochs to avoid corner cases that might
@ -428,9 +441,9 @@ proc process_justification_and_finalization*(state: var (altair.BeaconState | me
# historical reasons.
# https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.2/specs/phase0/beacon-chain.md#justification-and-finalization
weigh_justification_and_finalization(
state, total_active_balance,
unslashed_participating_balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
unslashed_participating_balances.current_epoch_TIMELY_TARGET, flags)
state, balances.current_epoch,
balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
balances.current_epoch_TIMELY_TARGET, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#helpers
func get_base_reward_sqrt*(state: phase0.BeaconState, index: ValidatorIndex,
@ -555,18 +568,18 @@ func get_inactivity_penalty_delta*(validator: RewardStatus,
delta
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#get_attestation_deltas
func get_attestation_deltas(state: phase0.BeaconState, rewards: var RewardInfo) =
func get_attestation_deltas(state: phase0.BeaconState, info: var phase0.EpochInfo) =
## Update rewards with attestation reward/penalty deltas for each validator.
let
finality_delay = get_finality_delay(state)
total_balance = rewards.total_balances.current_epoch
total_balance = info.total_balances.current_epoch
total_balance_sqrt = integer_squareroot(total_balance)
# Filter out ineligible validators. All sub-functions of the spec do this
# except for `get_inclusion_delay_deltas`. It's safe to do so here because
# any validator that is in the unslashed indices of the matching source
# attestations is active, and therefore eligible.
for index, validator in rewards.statuses.mpairs():
for index, validator in info.statuses.mpairs():
if not is_eligible_validator(validator):
continue
@ -576,11 +589,11 @@ func get_attestation_deltas(state: phase0.BeaconState, rewards: var RewardInfo)
let
source_delta = get_source_delta(
validator, base_reward, rewards.total_balances, finality_delay)
validator, base_reward, info.total_balances, finality_delay)
target_delta = get_target_delta(
validator, base_reward, rewards.total_balances, finality_delay)
validator, base_reward, info.total_balances, finality_delay)
head_delta = get_head_delta(
validator, base_reward, rewards.total_balances, finality_delay)
validator, base_reward, info.total_balances, finality_delay)
(inclusion_delay_delta, proposer_delta) =
get_inclusion_delay_delta(validator, base_reward)
inactivity_delta = get_inactivity_penalty_delta(
@ -594,67 +607,66 @@ func get_attestation_deltas(state: phase0.BeaconState, rewards: var RewardInfo)
if proposer_delta.isSome:
let proposer_index = proposer_delta.get()[0]
if proposer_index < rewards.statuses.lenu64:
rewards.statuses[proposer_index].delta.add(
if proposer_index < info.statuses.lenu64:
info.statuses[proposer_index].delta.add(
proposer_delta.get()[1])
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward_per_increment
func get_base_reward_per_increment(
state: altair.BeaconState | merge.BeaconState, total_active_balance_sqrt: uint64): Gwei =
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div total_active_balance_sqrt
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward
func get_base_reward(
func get_base_reward_increment(
state: altair.BeaconState | merge.BeaconState, index: ValidatorIndex,
total_active_balance_sqrt: uint64): Gwei =
base_reward_per_increment: Gwei): Gwei =
## Return the base reward for the validator defined by ``index`` with respect
## to the current ``state``.
let increments =
state.validators[index].effective_balance div EFFECTIVE_BALANCE_INCREMENT
increments * get_base_reward_per_increment(state, total_active_balance_sqrt)
increments * base_reward_per_increment
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_flag_index_deltas
iterator get_flag_index_deltas(
state: altair.BeaconState | merge.BeaconState, flag_index: int, total_active_balance: Gwei,
total_active_balance_sqrt: uint64,
unslashed_participating_balances: UnslashedParticipatingBalances):
(ValidatorIndex, Gwei, Gwei) =
state: altair.BeaconState | merge.BeaconState, flag_index: int,
base_reward_per_increment: Gwei,
info: altair.EpochInfo):
(ValidatorIndex, RewardDelta) =
## Return the deltas for a given ``flag_index`` by scanning through the
## participation flags.
let
previous_epoch = get_previous_epoch(state)
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index].uint64 # safe
unslashed_participating_balance =
unslashed_participating_balances.previous_epoch[flag_index]
info.balances.previous_epoch[flag_index]
unslashed_participating_increments =
unslashed_participating_balance div EFFECTIVE_BALANCE_INCREMENT
active_increments = total_active_balance div EFFECTIVE_BALANCE_INCREMENT
active_increments =
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT
for index in 0 ..< state.validators.len:
# TODO Obviously not great
let v = state.validators[index]
if not (is_active_validator(v, previous_epoch) or
(v.slashed and previous_epoch + 1 < v.withdrawable_epoch)):
if not is_eligible_validator(info.validators[index]):
continue
template vidx: ValidatorIndex = index.ValidatorIndex
let base_reward = get_base_reward(state, vidx, total_active_balance_sqrt)
let base_reward = get_base_reward_increment(state, vidx, base_reward_per_increment)
yield
if is_unslashed_participating_index(
state, flag_index, previous_epoch, vidx):
if not is_in_inactivity_leak(state):
let reward_numerator =
base_reward * weight * unslashed_participating_increments
(vidx, reward_numerator div (active_increments * WEIGHT_DENOMINATOR), 0.Gwei)
(vidx, RewardDelta(
rewards: reward_numerator div (active_increments * WEIGHT_DENOMINATOR),
penalties: 0.Gwei))
else:
(vidx, 0.Gwei, 0.Gwei)
(vidx, RewardDelta(rewards: 0.Gwei, penalties: 0.Gwei))
elif flag_index != TIMELY_HEAD_FLAG_INDEX:
(vidx, 0.Gwei, base_reward * weight div WEIGHT_DENOMINATOR)
(vidx, RewardDelta(
rewards: 0.Gwei,
penalties: base_reward * weight div WEIGHT_DENOMINATOR))
else:
(vidx, 0.Gwei, 0.Gwei)
(vidx, RewardDelta(rewards: 0.Gwei, penalties: 0.Gwei))
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
iterator get_inactivity_penalty_deltas(cfg: RuntimeConfig, state: altair.BeaconState | merge.BeaconState):
iterator get_inactivity_penalty_deltas(
cfg: RuntimeConfig, state: altair.BeaconState | merge.BeaconState,
info: altair.EpochInfo):
(ValidatorIndex, Gwei) =
## Return the inactivity penalty deltas by considering timely target
## participation flags and inactivity scores.
@ -664,10 +676,7 @@ iterator get_inactivity_penalty_deltas(cfg: RuntimeConfig, state: altair.BeaconS
previous_epoch = get_previous_epoch(state)
for index in 0 ..< state.validators.len:
# get_eligible_validator_indices()
let v = state.validators[index]
if not (is_active_validator(v, previous_epoch) or
(v.slashed and previous_epoch + 1 < v.withdrawable_epoch)):
if not is_eligible_validator(info.validators[index]):
continue
template vidx: untyped = index.ValidatorIndex
@ -678,24 +687,24 @@ iterator get_inactivity_penalty_deltas(cfg: RuntimeConfig, state: altair.BeaconS
state.inactivity_scores[index]
yield (vidx, Gwei(penalty_numerator div penalty_denominator))
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#rewards-and-penalties
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#rewards-and-penalties-1
func process_rewards_and_penalties(
state: var phase0.BeaconState, rewards: var RewardInfo) {.nbench.} =
state: var phase0.BeaconState, info: var phase0.EpochInfo) {.nbench.} =
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
# for work done in the previous epoch
doAssert rewards.statuses.len == state.validators.len
doAssert info.statuses.len == state.validators.len
if get_current_epoch(state) == GENESIS_EPOCH:
return
get_attestation_deltas(state, rewards)
get_attestation_deltas(state, info)
# Here almost all balances are updated (assuming most validators are active) -
# clearing the cache becomes a bottleneck if done item by item because of the
# recursive nature of cache clearing - instead, we clear the whole cache then
# update the raw list directly
state.balances.clearCache()
for idx, v in rewards.statuses:
for idx, v in info.statuses:
var balance = state.balances.asSeq()[idx]
increase_balance(balance, v.delta.rewards)
decrease_balance(balance, v.delta.penalties)
@ -704,39 +713,35 @@ func process_rewards_and_penalties(
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#rewards-and-penalties
func process_rewards_and_penalties(
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
total_active_balance: Gwei,
unslashed_participating_balances: UnslashedParticipatingBalances)
info: var altair.EpochInfo)
{.nbench.} =
if get_current_epoch(state) == GENESIS_EPOCH:
return
# TODO assess relevance of missing phase0 optimizations
# TODO probably both of these aren't necessary, but need to verify
# commutativity & associativity. Probably, since active validators
# get ejected at 16 Gwei, either it avoids over or underflow there
# or doesn't receive rewards or penalties so both are 0. But start
# with this.
var
rewards = newSeq[Gwei](state.validators.len)
penalties = newSeq[Gwei](state.validators.len)
let total_active_balance_sqrt = integer_squareroot(total_active_balance)
let
total_active_balance = info.balances.current_epoch
base_reward_per_increment = get_base_reward_per_increment(
total_active_balance)
doAssert state.validators.len() == info.validators.len()
for flag_index in 0 ..< PARTICIPATION_FLAG_WEIGHTS.len:
for validator_index, reward, penalty in get_flag_index_deltas(
state, flag_index, total_active_balance, total_active_balance_sqrt,
unslashed_participating_balances):
rewards[validator_index] += reward
penalties[validator_index] += penalty
for validator_index, delta in get_flag_index_deltas(
state, flag_index, base_reward_per_increment, info):
info.validators[validator_index].delta.add(delta)
for validator_index, penalty in get_inactivity_penalty_deltas(cfg, state):
penalties[validator_index] += penalty
for validator_index, penalty in get_inactivity_penalty_deltas(
cfg, state, info):
info.validators[validator_index].delta.penalties += penalty
# Here almost all balances are updated (assuming most validators are active) -
# clearing the cache becomes a bottleneck if done item by item because of the
# recursive nature of cache clearing - instead, we clear the whole cache then
# update the raw list directly
state.balances.clearCache()
for index in 0 ..< len(state.validators):
var balance = state.balances.asSeq()[index]
increase_balance(balance, rewards[index])
decrease_balance(balance, penalties[index])
increase_balance(balance, info.validators[index].delta.rewards)
decrease_balance(balance, info.validators[index].delta.penalties)
state.balances.asSeq()[index] = balance
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
@ -905,21 +910,20 @@ proc process_sync_committee_updates*(state: var (altair.BeaconState | merge.Beac
state.next_sync_committee = get_next_sync_committee(state)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#inactivity-scores
func process_inactivity_updates*(cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState)) =
func process_inactivity_updates*(
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
info: altair.EpochInfo) =
# Score updates based on previous epoch participation, skip genesis epoch
if get_current_epoch(state) == GENESIS_EPOCH:
return
# TODO actually implement get_eligible_validator_indices() as an iterator
let
previous_epoch = get_previous_epoch(state) # get_eligible_validator_indices()
not_in_inactivity_leak = not is_in_inactivity_leak(state)
state.inactivity_scores.clearCache()
for index in 0'u64 ..< state.validators.lenu64:
# get_eligible_validator_indices()
let v = state.validators.asSeq()[index]
if not (is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)):
if not is_eligible_validator(info.validators[index]):
continue
# Increase the inactivity score of inactive validators
@ -940,16 +944,15 @@ func process_inactivity_updates*(cfg: RuntimeConfig, state: var (altair.BeaconSt
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#epoch-processing
proc process_epoch*(
cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags,
cache: var StateCache, rewards: var RewardInfo) {.nbench.} =
cache: var StateCache, info: var phase0.EpochInfo) {.nbench.} =
let currentEpoch = get_current_epoch(state)
trace "process_epoch",
current_epoch = currentEpoch
init(rewards, state)
rewards.process_attestations(state, cache)
init(info, state)
info.process_attestations(state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
process_justification_and_finalization(
state, rewards.total_balances, flags)
process_justification_and_finalization(state, info.total_balances, flags)
# state.slot hasn't been incremented yet.
if verifyFinalization in flags and currentEpoch >= 2:
@ -962,13 +965,13 @@ proc process_epoch*(
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#rewards-and-penalties-1
process_rewards_and_penalties(state, rewards)
process_rewards_and_penalties(state, info)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
process_registry_updates(cfg, state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#slashings
process_slashings(state, rewards.total_balances.current_epoch)
process_slashings(state, info.total_balances.current_epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#eth1-data-votes-updates
process_eth1_data_reset(state)
@ -988,26 +991,40 @@ proc process_epoch*(
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#participation-records-rotation
process_participation_record_updates(state)
func init*(
info: var altair.EpochInfo,
state: altair.BeaconState | merge.BeaconState) =
# init participation, overwriting the full structure
info.balances = get_unslashed_participating_balances(state)
info.validators.setLen(state.validators.len())
let previous_epoch = get_previous_epoch(state)
for index in 0..<state.validators.len():
var flags: set[ParticipationFlag]
if is_eligible_validator(state.validators[index], previous_epoch):
flags.incl ParticipationFlag.eligible
info.validators[index] = ParticipationInfo(
flags: flags
)
func init*(
T: type altair.EpochInfo, state: altair.BeaconState | merge.BeaconState): T =
init(result, state)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#epoch-processing
proc process_epoch*(
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
flags: UpdateFlags, cache: var StateCache, rewards: var RewardInfo)
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo)
{.nbench.} =
let currentEpoch = get_current_epoch(state)
trace "process_epoch",
current_epoch = currentEpoch
init(rewards, state)
when false:
rewards.process_attestations(state, cache)
let
total_active_balance = state.get_total_active_balance(cache)
unslashed_participating_balances =
state.get_unslashed_participating_balances()
info.init(state)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#justification-and-finalization
process_justification_and_finalization(
state, total_active_balance, unslashed_participating_balances, flags)
process_justification_and_finalization(state, info.balances, flags)
# state.slot hasn't been incremented yet.
if verifyFinalization in flags and currentEpoch >= 2:
@ -1019,17 +1036,16 @@ proc process_epoch*(
# the finalization rules triggered.
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
process_inactivity_updates(cfg, state) # [New in Altair]
process_inactivity_updates(cfg, state, info) # [New in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
process_rewards_and_penalties(
cfg, state, total_active_balance, unslashed_participating_balances)
process_rewards_and_penalties(cfg, state, info)
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
process_registry_updates(cfg, state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#slashings
process_slashings(state, total_active_balance)
process_slashings(state, info.balances.current_epoch)
process_eth1_data_reset(state)

View File

@ -145,7 +145,7 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
let prePath = dir / preState & ".ssz"
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
echo "Running: ", prePath
let state = (ref ForkedHashedBeaconState)(
@ -162,14 +162,14 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
let flags = if skipBLS: {skipBlsValidation}
else: {}
let success = state_transition(
defaultRuntimeConfig, state[], signedBlock, cache, rewards, flags,
defaultRuntimeConfig, state[], signedBlock, cache, info, flags,
noRollback)
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
let prePath = dir / preState & ".ssz"
echo "Running: ", prePath
@ -182,7 +182,7 @@ proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
# Shouldn't necessarily assert, because nbench can run test suite
discard process_slots(
defaultRuntimeConfig, state[], getStateField(state[], slot) + numSlots,
cache, rewards, {})
cache, info, {})
template processEpochScenarioImpl(
dir, preState: string,
@ -217,6 +217,22 @@ proc process_deposit(state: var phase0.BeaconState;
flags: UpdateFlags = {}): Result[void, cstring] =
process_deposit(defaultRuntimeConfig, state, deposit, flags)
proc bench_process_justification_and_finalization(state: var phase0.BeaconState) =
var
cache: StateCache
info: phase0.EpochInfo
info.init(state)
info.process_attestations(state, cache)
process_justification_and_finalization(state, info.total_balances)
func bench_process_slashings(state: var phase0.BeaconState) =
var
cache: StateCache
info: phase0.EpochInfo
info.init(state)
info.process_attestations(state, cache)
process_slashings(state, info.total_balances.current_epoch)
template processBlockScenarioImpl(
dir, preState: string, skipBLS: bool,
transitionFn, paramName: untyped,
@ -258,13 +274,13 @@ template genProcessBlockScenario(name, transitionFn,
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ref ConsensusObjectType)
genProcessEpochScenario(runProcessJustificationFinalization,
process_justification_and_finalization)
bench_process_justification_and_finalization)
genProcessEpochScenario(runProcessRegistryUpdates,
process_registry_updates)
genProcessEpochScenario(runProcessSlashings,
process_slashings)
bench_process_slashings)
genProcessBlockScenario(runProcessBlockHeader,
process_block_header,

View File

@ -93,9 +93,9 @@ proc doTransition(conf: NcliConf) =
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
if not state_transition(getRuntimeConfig(conf.eth2Network),
stateY[], blckX, cache, rewards, flags, noRollback):
stateY[], blckX, cache, info, flags, noRollback):
error "State transition failed"
quit 1
else:
@ -121,13 +121,13 @@ proc doSlots(conf: NcliConf) =
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
for i in 0'u64..<conf.slot:
let isEpoch = (getStateField(stateY[], slot) + 1).isEpoch
withTimer(timers[if isEpoch: tApplyEpochSlot else: tApplySlot]):
doAssert process_slots(
defaultRuntimeConfig, stateY[], getStateField(stateY[], slot) + 1,
cache, rewards, {})
cache, info, {})
withTimer(timers[tSaveState]):
saveSSZFile(conf.postState, stateY[])

View File

@ -189,7 +189,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
loadedState = new phase0.BeaconState
withTimer(timers[tLoadState]):
@ -202,7 +202,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
let ok = process_slots(
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
rewards, {})
info, {})
doAssert ok, "Slot processing can't fail with correct inputs"
var start = Moment.now()
@ -458,7 +458,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
perfs = newSeq[ValidatorPerformance](
getStateField(dag.headState.data, validators).len())
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
blck: phase0.TrustedSignedBeaconBlock
doAssert blockRefs.len() > 0, "Must select at least one block"
@ -470,7 +470,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
dag.updateStateData(
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
func processEpoch() =
proc processEpoch() =
let
prev_epoch_target_slot =
state[].data.get_previous_epoch().compute_start_slot_at_epoch()
@ -490,8 +490,10 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
indices.incl(validator_index)
indices
for i, s in rewards.statuses.pairs():
case info.kind
of EpochInfoFork.Phase0:
template info: untyped = info.phase0Info
for i, s in info.statuses.pairs():
let perf = addr perfs[i]
if RewardFlags.isActiveInPreviousEpoch in s.flags:
if s.is_previous_epoch_attester.isSome():
@ -519,6 +521,8 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
else:
perf.attestation_misses += 1;
of EpochInfoFork.Altair:
echo "TODO altair"
for bi in 0..<blockRefs.len:
blck = db.getBlock(blockRefs[blockRefs.len - bi - 1].root).get()
@ -529,7 +533,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
else: {}
let ok = process_slots(
dag.cfg, state[].data, nextSlot, cache, rewards, flags)
dag.cfg, state[].data, nextSlot, cache, info, flags)
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[].data, slot).isEpoch():
@ -544,7 +548,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
while getStateField(state[].data, slot) < ends:
let ok = process_slots(
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
rewards, {})
info, {})
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[].data, slot).isEpoch():
@ -686,7 +690,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
blck: phase0.TrustedSignedBeaconBlock
let
@ -713,18 +717,21 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
if not inTxn:
outDb.exec("BEGIN TRANSACTION;").expect("DB")
inTxn = true
case info.kind
of EpochInfoFork.Phase0:
template info: untyped = info.phase0Info
insertEpochInfo.exec(
(getStateField(state[].data, slot).epoch.int64,
rewards.total_balances.current_epoch_raw.int64,
rewards.total_balances.previous_epoch_raw.int64,
rewards.total_balances.current_epoch_attesters_raw.int64,
rewards.total_balances.current_epoch_target_attesters_raw.int64,
rewards.total_balances.previous_epoch_attesters_raw.int64,
rewards.total_balances.previous_epoch_target_attesters_raw.int64,
rewards.total_balances.previous_epoch_head_attesters_raw.int64)
info.total_balances.current_epoch_raw.int64,
info.total_balances.previous_epoch_raw.int64,
info.total_balances.current_epoch_attesters_raw.int64,
info.total_balances.current_epoch_target_attesters_raw.int64,
info.total_balances.previous_epoch_attesters_raw.int64,
info.total_balances.previous_epoch_target_attesters_raw.int64,
info.total_balances.previous_epoch_head_attesters_raw.int64)
).expect("DB")
for index, status in rewards.statuses.pairs():
for index, status in info.statuses.pairs():
if not is_eligible_validator(status):
continue
let
@ -753,6 +760,9 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
int64(target_attester), # Target delta
int64(head_attester), # Head delta
delay)).expect("DB")
of EpochInfoFork.Altair:
echo "TODO altair support"
if getStateField(state[].data, slot).epoch.int64 mod 16 == 0:
inTxn = false
outDb.exec("COMMIT;").expect("DB")
@ -766,7 +776,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
else: {}
let ok = process_slots(cfg, state[].data, nextSlot, cache, rewards, flags)
let ok = process_slots(cfg, state[].data, nextSlot, cache, info, flags)
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[].data, slot).isEpoch():
@ -782,7 +792,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
while getStateField(state[].data, slot) <= ends:
let ok = process_slots(
cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
rewards, {})
info, {})
doAssert ok, "Slot processing can't fail with correct inputs"
if getStateField(state[].data, slot).isEpoch():

View File

@ -118,10 +118,10 @@ proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
data: data.state, root: hash_tree_root(data.state)),
beaconStateFork: forkPhase0)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
result =
state_transition(
cfg, fhState[], blck, cache, rewards, flags, rollback)
cfg, fhState[], blck, cache, info, flags, rollback)
data.state = fhState.hbsPhase0.data
decodeAndProcess(BlockInput):

View File

@ -7,6 +7,9 @@
{.used.}
import
chronicles
import
./test_fixture_fork,
./test_fixture_merkle_single_proof,

View File

@ -44,10 +44,13 @@ proc runTest(identifier: string) =
let
attestation = parseTest(
testDir/"attestation.ssz_snappy", SSZ, Attestation)
total_active_balance = get_total_active_balance(preState[], cache)
base_reward_per_increment =
get_base_reward_per_increment(total_active_balance)
done = process_attestation(
preState[], attestation, {},
get_base_reward_per_increment(preState[], cache),
cache)
preState[], attestation, {}, base_reward_per_increment, cache)
if existsFile(testDir/"post.ssz_snappy"):
let postState =

View File

@ -12,6 +12,7 @@ import
os,
# Utilities
stew/results,
chronicles,
# Beacon chain internals
../../../beacon_chain/spec/state_transition_block,
../../../beacon_chain/spec/datatypes/altair,

View File

@ -12,6 +12,7 @@ import
os,
# Utilities
stew/results,
chronicles,
# Beacon chain internals
../../../beacon_chain/spec/[beaconstate, presets, state_transition_block],
../../../beacon_chain/spec/datatypes/altair,

View File

@ -12,6 +12,7 @@ import
os,
# Utilities
stew/results,
chronicles,
# Beacon chain internals
../../../beacon_chain/spec/[beaconstate, state_transition_block],
../../../beacon_chain/spec/datatypes/altair,
@ -20,9 +21,6 @@ import
../fixtures_utils,
../../helpers/debug_state
when isMainModule:
import chronicles # or some random compile error happens...
const OpSyncAggregateDir = SszTestsDir/const_preset/"altair"/"operations"/"sync_aggregate"/"pyspec_tests"
proc runTest(dir, identifier: string) =
@ -45,8 +43,9 @@ proc runTest(dir, identifier: string) =
let
syncAggregate = parseTest(
testDir/"sync_aggregate.ssz_snappy", SSZ, SyncAggregate)
total_active_balance = get_total_active_balance(preState[], cache)
done = process_sync_aggregate(
preState[], syncAggregate, cache)
preState[], syncAggregate, total_active_balance, cache)
if existsFile(testDir/"post.ssz_snappy"):
let postState =

View File

@ -12,6 +12,7 @@ import
os,
# Utilities
stew/results,
chronicles,
# Beacon chain internals
../../../beacon_chain/spec/state_transition_block,
../../../beacon_chain/spec/datatypes/altair,

View File

@ -36,7 +36,7 @@ proc runTest(testName, testDir, unitTestName: string) =
fhPreState = (ref ForkedHashedBeaconState)(hbsAltair: altair.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkAltair)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
# so purely lexicographic sorting wouldn't sort properly.
@ -46,12 +46,12 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let success = state_transition(
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
noRollback)
doAssert success, "Failure when applying block " & $i
else:
let success = state_transition(
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
noRollback)
doAssert (i + 1 < numBlocks) or not success,
"We didn't expect these invalid blocks to be processed"

View File

@ -35,13 +35,13 @@ proc runTest(identifier: string) =
data: preState[], root: hash_tree_root(preState[])),
beaconStateFork: forkAltair)
cache = StateCache()
rewards: RewardInfo
info: ForkedEpochInfo
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, altair.BeaconState))
check:
process_slots(
defaultRuntimeConfig, fhPreState[],
getStateField(fhPreState[], slot) + num_slots, cache, rewards, {})
getStateField(fhPreState[], slot) + num_slots, cache, info, {})
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
let newPreState = newClone(fhPreState.hbsAltair.data)

View File

@ -11,6 +11,7 @@ import
# Standard library
os, strutils,
# Beacon chain internals
chronicles,
../../../beacon_chain/spec/[beaconstate, presets, state_transition_epoch],
../../../beacon_chain/spec/datatypes/altair,
# Test utilities
@ -19,9 +20,10 @@ import
../test_fixture_rewards,
../../helpers/debug_state
const RootDir = SszTestsDir/const_preset/"altair"/"epoch_processing"
template runSuite(
suiteDir, testName: string, transitionProc: untyped{ident},
useCache, useTAB, useUPB: static bool = false): untyped =
suiteDir, testName: string, transitionProc: untyped): untyped =
suite "Ethereum Foundation - Altair - Epoch Processing - " & testName & preset():
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
@ -29,47 +31,34 @@ template runSuite(
test testName & " - " & unitTestName & preset():
# BeaconState objects are stored on the heap to avoid stack overflow
type T = altair.BeaconState
var preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
var preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
var cache {.inject, used.} = StateCache()
template state: untyped {.inject, used.} = preState[]
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
doAssert not (useCache and useTAB)
when useCache:
var cache = StateCache()
when compiles(transitionProc(defaultRuntimeConfig, preState[], cache)):
transitionProc(defaultRuntimeConfig, preState[], cache)
else:
transitionProc(preState[], cache)
elif useTAB and not useUPB:
var cache = StateCache()
let total_active_balance = preState[].get_total_active_balance(cache)
transitionProc(preState[], total_active_balance)
elif useTAB and useUPB:
var cache = StateCache()
let
total_active_balance = preState[].get_total_active_balance(cache)
unslashed_participating_balances =
preState[].get_unslashed_participating_balances()
transitionProc(
preState[], total_active_balance, unslashed_participating_balances)
else:
when compiles(transitionProc(preState[])):
transitionProc(preState[])
else:
transitionProc(defaultRuntimeConfig, preState[])
transitionProc
check:
hash_tree_root(preState[]) == hash_tree_root(postState[])
reportDiff(preState, postState)
# Justification & Finalization
# ---------------------------------------------------------------
const JustificationFinalizationDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
runSuite(JustificationFinalizationDir, "Justification & Finalization", process_justification_and_finalization, useCache = false, useTAB = true, useUPB = true)
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
let info = altair.EpochInfo.init(state)
process_justification_and_finalization(state, info.balances)
# Inactivity updates
# ---------------------------------------------------------------
const InactivityDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"inactivity_updates"/"pyspec_tests"
runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = false)
const InactivityDir = RootDir/"inactivity_updates"/"pyspec_tests"
runSuite(InactivityDir, "Inactivity"):
let info = altair.EpochInfo.init(state)
process_inactivity_updates(cfg, state, info)
# Rewards & Penalties
# ---------------------------------------------------------------
@ -79,53 +68,63 @@ runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = fal
# Registry updates
# ---------------------------------------------------------------
const RegistryUpdatesDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"registry_updates"/"pyspec_tests"
runSuite(RegistryUpdatesDir, "Registry updates", process_registry_updates, useCache = true)
const RegistryUpdatesDir = RootDir/"registry_updates"/"pyspec_tests"
runSuite(RegistryUpdatesDir, "Registry updates"):
process_registry_updates(cfg, state, cache)
# Slashings
# ---------------------------------------------------------------
const SlashingsDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"slashings"/"pyspec_tests"
runSuite(SlashingsDir, "Slashings", process_slashings, useCache = false, useTAB = true)
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
runSuite(SlashingsDir, "Slashings"):
let info = altair.EpochInfo.init(state)
process_slashings(state, info.balances.current_epoch)
# Eth1 data reset
# ---------------------------------------------------------------
const Eth1DataResetDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"eth1_data_reset/"/"pyspec_tests"
runSuite(Eth1DataResetDir, "Eth1 data reset", process_eth1_data_reset, useCache = false)
const Eth1DataResetDir = RootDir/"eth1_data_reset/"/"pyspec_tests"
runSuite(Eth1DataResetDir, "Eth1 data reset"):
process_eth1_data_reset(state)
# Effective balance updates
# ---------------------------------------------------------------
const EffectiveBalanceUpdatesDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"effective_balance_updates"/"pyspec_tests"
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates", process_effective_balance_updates, useCache = false)
const EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"/"pyspec_tests"
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
process_effective_balance_updates(state)
# Slashings reset
# ---------------------------------------------------------------
const SlashingsResetDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"slashings_reset"/"pyspec_tests"
runSuite(SlashingsResetDir, "Slashings reset", process_slashings_reset, useCache = false)
const SlashingsResetDir = RootDir/"slashings_reset"/"pyspec_tests"
runSuite(SlashingsResetDir, "Slashings reset"):
process_slashings_reset(state)
# RANDAO mixes reset
# ---------------------------------------------------------------
const RandaoMixesResetDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"randao_mixes_reset"/"pyspec_tests"
runSuite(RandaoMixesResetDir, "RANDAO mixes reset", process_randao_mixes_reset, useCache = false)
const RandaoMixesResetDir = RootDir/"randao_mixes_reset"/"pyspec_tests"
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
process_randao_mixes_reset(state)
# Historical roots update
# ---------------------------------------------------------------
const HistoricalRootsUpdateDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"historical_roots_update"/"pyspec_tests"
runSuite(HistoricalRootsUpdateDir, "Historical roots update", process_historical_roots_update, useCache = false)
const HistoricalRootsUpdateDir = RootDir/"historical_roots_update"/"pyspec_tests"
runSuite(HistoricalRootsUpdateDir, "Historical roots update"):
process_historical_roots_update(state)
# Participation flag updates
# ---------------------------------------------------------------
const ParticipationFlagDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"participation_flag_updates"/"pyspec_tests"
runSuite(ParticipationFlagDir, "Participation flag updates", process_participation_flag_updates, useCache = false)
const ParticipationFlagDir = RootDir/"participation_flag_updates"/"pyspec_tests"
runSuite(ParticipationFlagDir, "Participation flag updates"):
process_participation_flag_updates(state)
# Sync committee updates
# ---------------------------------------------------------------
const SyncCommitteeDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"sync_committee_updates"/"pyspec_tests"
runSuite(SyncCommitteeDir, "Sync committee updates", process_sync_committee_updates, useCache = false)
const SyncCommitteeDir = RootDir/"sync_committee_updates"/"pyspec_tests"
runSuite(SyncCommitteeDir, "Sync committee updates"):
process_sync_committee_updates(state)

View File

@ -154,10 +154,10 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
# Forward to next sync committee period
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
doAssert process_slots(
cfg, forked[], Slot(SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD),
cache, rewards, flags = {})
cache, info, flags = {})
let
snapshot_period =
pre_snapshot.header.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
@ -220,11 +220,10 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
# Change finality
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
blocks = newSeq[ForkedSignedBeaconBlock]()
doAssert process_slots(
cfg, forked[], Slot(SLOTS_PER_EPOCH * 2),
cache, rewards, flags = {})
cfg, forked[], Slot(SLOTS_PER_EPOCH * 2), cache, info, flags = {})
for epoch in 0 ..< 3:
for slot in 0 ..< SLOTS_PER_EPOCH:
blocks.add block_for_next_slot(cfg, forked[], cache,

View File

@ -12,6 +12,7 @@ import
# Standard library
os, sequtils,
# Status internal
chronicles,
faststreams, streams,
# Beacon chain internals
../../../beacon_chain/spec/[state_transition, forks, helpers],
@ -45,7 +46,7 @@ proc runTest(testName, testDir, unitTestName: string) =
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
cfg = defaultRuntimeConfig
cfg.ALTAIR_FORK_EPOCH = transitionEpoch.fork_epoch.Epoch
@ -58,16 +59,14 @@ proc runTest(testName, testDir, unitTestName: string) =
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, phase0.SignedBeaconBlock)
let success = state_transition(
cfg, fhPreState[], blck,
cache, rewards,
cfg, fhPreState[], blck, cache, info,
flags = {skipStateRootValidation}, noRollback)
doAssert success, "Failure when applying block " & $i
else:
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, altair.SignedBeaconBlock)
let success = state_transition(
cfg, fhPreState[], blck,
cache, rewards,
cfg, fhPreState[], blck, cache, info,
flags = {skipStateRootValidation}, noRollback)
doAssert success, "Failure when applying block " & $i

View File

@ -9,9 +9,8 @@ import
# Standard library
std/[os, strutils, typetraits],
# Internals
../../beacon_chain/spec/datatypes/[phase0, altair],
../../beacon_chain/spec/[
eth2_merkleization, eth2_ssz_serialization, state_transition_epoch],
eth2_merkleization, eth2_ssz_serialization],
# Status libs,
snappy,
stew/byteutils
@ -72,18 +71,3 @@ proc parseTest*(path: string, Format: typedesc[SSZ], T: typedesc): T =
stderr.write $Format & " load issue for file \"", path, "\"\n"
stderr.write err.formatMsg(path), "\n"
quit 1
proc process_justification_and_finalization*(state: var phase0.BeaconState) =
var cache = StateCache()
var rewards: RewardInfo
rewards.init(state)
rewards.process_attestations(state, cache)
process_justification_and_finalization(state, rewards.total_balances)
func process_slashings*(state: var phase0.BeaconState) =
var cache = StateCache()
var rewards: RewardInfo
rewards.init(state)
rewards.process_attestations(state, cache)
process_slashings(state, rewards.total_balances.current_epoch)

View File

@ -45,8 +45,8 @@ proc runTest(identifier: string) =
attestation = parseTest(
testDir/"attestation.ssz_snappy", SSZ, Attestation)
done = process_attestation(
preState[], attestation, {},
get_base_reward_per_increment(preState[], cache), cache)
preState[], attestation, {}, get_base_reward_per_increment(
get_total_active_balance(preState[], cache)), cache)
if existsFile(testDir/"post.ssz_snappy"):
let postState =

View File

@ -36,7 +36,7 @@ proc runTest(testName, testDir, unitTestName: string) =
fhPreState = (ref ForkedHashedBeaconState)(hbsMerge: merge.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkMerge)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
# so purely lexicographic sorting wouldn't sort properly.
@ -48,12 +48,12 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let success = state_transition(
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
noRollback)
doAssert success, "Failure when applying block " & $i
else:
let success = state_transition(
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
noRollback)
doAssert (i + 1 < numBlocks) or not success,
"We didn't expect these invalid blocks to be processed"

View File

@ -35,13 +35,13 @@ proc runTest(identifier: string) =
data: preState[], root: hash_tree_root(preState[])),
beaconStateFork: forkMerge)
cache = StateCache()
rewards: RewardInfo
info = ForkedEpochInfo()
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, merge.BeaconState))
check:
process_slots(
defaultRuntimeConfig, fhPreState[],
getStateField(fhPreState[], slot) + num_slots, cache, rewards, {})
getStateField(fhPreState[], slot) + num_slots, cache, info, {})
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
let newPreState = newClone(fhPreState.hbsMerge.data)

View File

@ -11,17 +11,19 @@ import
# Standard library
os, strutils,
# Beacon chain internals
chronicles,
../../../beacon_chain/spec/[beaconstate, presets, state_transition_epoch],
../../../beacon_chain/spec/datatypes/merge,
../../../beacon_chain/spec/datatypes/[altair, merge],
# Test utilities
../../testutil,
../fixtures_utils,
../test_fixture_rewards,
../../helpers/debug_state
const RootDir = SszTestsDir/const_preset/"merge"/"epoch_processing"
template runSuite(
suiteDir, testName: string, transitionProc: untyped{ident},
useCache, useTAB, useUPB: static bool = false): untyped =
suiteDir, testName: string, transitionProc: untyped): untyped =
suite "Ethereum Foundation - Merge - Epoch Processing - " & testName & preset():
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
@ -29,47 +31,34 @@ template runSuite(
test testName & " - " & unitTestName & preset():
# BeaconState objects are stored on the heap to avoid stack overflow
type T = merge.BeaconState
var preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
var preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
var cache {.inject, used.} = StateCache()
template state: untyped {.inject, used.} = preState[]
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
doAssert not (useCache and useTAB)
when useCache:
var cache = StateCache()
when compiles(transitionProc(defaultRuntimeConfig, preState[], cache)):
transitionProc(defaultRuntimeConfig, preState[], cache)
else:
transitionProc(preState[], cache)
elif useTAB and not useUPB:
var cache = StateCache()
let total_active_balance = preState[].get_total_active_balance(cache)
transitionProc(preState[], total_active_balance)
elif useTAB and useUPB:
var cache = StateCache()
let
total_active_balance = preState[].get_total_active_balance(cache)
unslashed_participating_balances =
preState[].get_unslashed_participating_balances()
transitionProc(
preState[], total_active_balance, unslashed_participating_balances)
else:
when compiles(transitionProc(preState[])):
transitionProc(preState[])
else:
transitionProc(defaultRuntimeConfig, preState[])
transitionProc
check:
hash_tree_root(preState[]) == hash_tree_root(postState[])
reportDiff(preState, postState)
# Justification & Finalization
# ---------------------------------------------------------------
const JustificationFinalizationDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
runSuite(JustificationFinalizationDir, "Justification & Finalization", process_justification_and_finalization, useCache = false, useTAB = true, useUPB = true)
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
let info = altair.EpochInfo.init(state)
process_justification_and_finalization(state, info.balances)
# Inactivity updates
# ---------------------------------------------------------------
const InactivityDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"inactivity_updates"/"pyspec_tests"
runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = false)
const InactivityDir = RootDir/"inactivity_updates"/"pyspec_tests"
runSuite(InactivityDir, "Inactivity"):
let info = altair.EpochInfo.init(state)
process_inactivity_updates(cfg, state, info)
# Rewards & Penalties
# ---------------------------------------------------------------
@ -79,53 +68,63 @@ runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = fal
# Registry updates
# ---------------------------------------------------------------
const RegistryUpdatesDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"registry_updates"/"pyspec_tests"
runSuite(RegistryUpdatesDir, "Registry updates", process_registry_updates, useCache = true)
const RegistryUpdatesDir = RootDir/"registry_updates"/"pyspec_tests"
runSuite(RegistryUpdatesDir, "Registry updates"):
process_registry_updates(cfg, state, cache)
# Slashings
# ---------------------------------------------------------------
const SlashingsDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"slashings"/"pyspec_tests"
runSuite(SlashingsDir, "Slashings", process_slashings, useCache = false, useTAB = true)
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
runSuite(SlashingsDir, "Slashings"):
let info = altair.EpochInfo.init(state)
process_slashings(state, info.balances.current_epoch)
# Eth1 data reset
# ---------------------------------------------------------------
const Eth1DataResetDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"eth1_data_reset/"/"pyspec_tests"
runSuite(Eth1DataResetDir, "Eth1 data reset", process_eth1_data_reset, useCache = false)
const Eth1DataResetDir = RootDir/"eth1_data_reset/"/"pyspec_tests"
runSuite(Eth1DataResetDir, "Eth1 data reset"):
process_eth1_data_reset(state)
# Effective balance updates
# ---------------------------------------------------------------
const EffectiveBalanceUpdatesDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"effective_balance_updates"/"pyspec_tests"
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates", process_effective_balance_updates, useCache = false)
const EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"/"pyspec_tests"
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
process_effective_balance_updates(state)
# Slashings reset
# ---------------------------------------------------------------
const SlashingsResetDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"slashings_reset"/"pyspec_tests"
runSuite(SlashingsResetDir, "Slashings reset", process_slashings_reset, useCache = false)
const SlashingsResetDir = RootDir/"slashings_reset"/"pyspec_tests"
runSuite(SlashingsResetDir, "Slashings reset"):
process_slashings_reset(state)
# RANDAO mixes reset
# ---------------------------------------------------------------
const RandaoMixesResetDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"randao_mixes_reset"/"pyspec_tests"
runSuite(RandaoMixesResetDir, "RANDAO mixes reset", process_randao_mixes_reset, useCache = false)
const RandaoMixesResetDir = RootDir/"randao_mixes_reset"/"pyspec_tests"
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
process_randao_mixes_reset(state)
# Historical roots update
# ---------------------------------------------------------------
const HistoricalRootsUpdateDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"historical_roots_update"/"pyspec_tests"
runSuite(HistoricalRootsUpdateDir, "Historical roots update", process_historical_roots_update, useCache = false)
const HistoricalRootsUpdateDir = RootDir/"historical_roots_update"/"pyspec_tests"
runSuite(HistoricalRootsUpdateDir, "Historical roots update"):
process_historical_roots_update(state)
# Participation flag updates
# ---------------------------------------------------------------
const ParticipationFlagDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"participation_flag_updates"/"pyspec_tests"
runSuite(ParticipationFlagDir, "Participation flag updates", process_participation_flag_updates, useCache = false)
const ParticipationFlagDir = RootDir/"participation_flag_updates"/"pyspec_tests"
runSuite(ParticipationFlagDir, "Participation flag updates"):
process_participation_flag_updates(state)
# Sync committee updates
# ---------------------------------------------------------------
const SyncCommitteeDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"sync_committee_updates"/"pyspec_tests"
runSuite(SyncCommitteeDir, "Sync committee updates", process_sync_committee_updates, useCache = false)
const SyncCommitteeDir = RootDir/"sync_committee_updates"/"pyspec_tests"
runSuite(SyncCommitteeDir, "Sync committee updates"):
process_sync_committee_updates(state)

View File

@ -11,6 +11,7 @@ import
# Standard library
os,
# Utilities
chronicles,
unittest2,
stew/results,
# Beacon chain internals

View File

@ -36,7 +36,7 @@ proc runTest(testName, testDir, unitTestName: string) =
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
# so purely lexicographic sorting wouldn't sort properly.
@ -46,12 +46,12 @@ proc runTest(testName, testDir, unitTestName: string) =
if hasPostState:
let success = state_transition(
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
noRollback)
doAssert success, "Failure when applying block " & $i
else:
let success = state_transition(
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
noRollback)
doAssert (i + 1 < numBlocks) or not success,
"We didn't expect these invalid blocks to be processed"

View File

@ -32,14 +32,14 @@ proc runTest(identifier: string) =
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
cache = StateCache()
rewards: RewardInfo
info: ForkedEpochInfo
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, phase0.BeaconState))
check:
process_slots(
defaultRuntimeConfig,
fhPreState[], getStateField(fhPreState[], slot) + num_slots, cache,
rewards, {})
info, {})
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
let newPreState = newClone(fhPreState.hbsPhase0.data)

View File

@ -11,6 +11,7 @@ import
# Standard library
os, strutils,
# Beacon chain internals
chronicles,
../../../beacon_chain/spec/state_transition_epoch,
../../../beacon_chain/spec/datatypes/phase0,
# Test utilities
@ -19,7 +20,9 @@ import
../test_fixture_rewards,
../../helpers/debug_state
template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, useCache: static bool): untyped =
const RootDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"
template runSuite(suiteDir, testName: string, transitionProc: untyped): untyped =
suite "Ethereum Foundation - Phase 0 - Epoch Processing - " & testName & preset():
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
@ -27,23 +30,28 @@ template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, us
test testName & " - " & unitTestName & preset():
# BeaconState objects are stored on the heap to avoid stack overflow
type T = phase0.BeaconState
var preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
var preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
var cache {.used.}: StateCache
when compiles(transitionProc(defaultRuntimeConfig, preState[], cache)):
transitionProc(defaultRuntimeConfig, preState[], cache)
elif compiles(transitionProc(preState[], cache)):
transitionProc(preState[], cache)
else:
transitionProc(preState[])
var cache {.inject, used.} = StateCache()
var info {.inject.}: EpochInfo
template state: untyped {.inject, used.} = preState[]
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
init(info, preState[])
transitionProc
check:
hash_tree_root(preState[]) == hash_tree_root(postState[])
reportDiff(preState, postState)
# Justification & Finalization
# ---------------------------------------------------------------
const JustificationFinalizationDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
runSuite(JustificationFinalizationDir, "Justification & Finalization", process_justification_and_finalization, useCache = false)
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
info.process_attestations(state, cache)
process_justification_and_finalization(state, info.total_balances)
# Rewards & Penalties
# ---------------------------------------------------------------
@ -53,32 +61,41 @@ runSuite(JustificationFinalizationDir, "Justification & Finalization", process_
# Registry updates
# ---------------------------------------------------------------
const RegistryUpdatesDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"registry_updates"/"pyspec_tests"
runSuite(RegistryUpdatesDir, "Registry updates", process_registry_updates, useCache = true)
const RegistryUpdatesDir = RootDir/"registry_updates"/"pyspec_tests"
runSuite(RegistryUpdatesDir, "Registry updates"):
process_registry_updates(cfg, state, cache)
# Slashings
# ---------------------------------------------------------------
const SlashingsDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"slashings"/"pyspec_tests"
runSuite(SlashingsDir, "Slashings", process_slashings, useCache = false)
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
runSuite(SlashingsDir, "Slashings"):
info.process_attestations(state, cache)
process_slashings(state, info.total_balances.current_epoch)
# Final updates
# ---------------------------------------------------------------
const Eth1DataResetDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"eth1_data_reset/"/"pyspec_tests"
runSuite(Eth1DataResetDir, "Eth1 data reset", process_eth1_data_reset, useCache = false)
const Eth1DataResetDir = RootDir/"eth1_data_reset/"/"pyspec_tests"
runSuite(Eth1DataResetDir, "Eth1 data reset"):
process_eth1_data_reset(state)
const EffectiveBalanceUpdatesDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"effective_balance_updates"/"pyspec_tests"
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates", process_effective_balance_updates, useCache = false)
const EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"/"pyspec_tests"
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
process_effective_balance_updates(state)
const SlashingsResetDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"slashings_reset"/"pyspec_tests"
runSuite(SlashingsResetDir, "Slashings reset", process_slashings_reset, useCache = false)
const SlashingsResetDir = RootDir/"slashings_reset"/"pyspec_tests"
runSuite(SlashingsResetDir, "Slashings reset"):
process_slashings_reset(state)
const RandaoMixesResetDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"randao_mixes_reset"/"pyspec_tests"
runSuite(RandaoMixesResetDir, "RANDAO mixes reset", process_randao_mixes_reset, useCache = false)
const RandaoMixesResetDir = RootDir/"randao_mixes_reset"/"pyspec_tests"
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
process_randao_mixes_reset(state)
const HistoricalRootsUpdateDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"historical_roots_update"/"pyspec_tests"
runSuite(HistoricalRootsUpdateDir, "Historical roots update", process_historical_roots_update, useCache = false)
const HistoricalRootsUpdateDir = RootDir/"historical_roots_update"/"pyspec_tests"
runSuite(HistoricalRootsUpdateDir, "Historical roots update"):
process_historical_roots_update(state)
const ParticipationRecordsDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"participation_record_updates"/"pyspec_tests"
runSuite(ParticipationRecordsDir, "Participation record updates", process_participation_record_updates, useCache = false)
const ParticipationRecordsDir = RootDir/"participation_record_updates"/"pyspec_tests"
runSuite(ParticipationRecordsDir, "Participation record updates"):
process_participation_record_updates(state)

View File

@ -13,8 +13,8 @@ import
# Utilities
stew/results,
# Beacon chain internals
../../beacon_chain/spec/datatypes/phase0,
../../beacon_chain/spec/[validator, helpers, state_transition_epoch],
../../beacon_chain/spec/datatypes/phase0,
# Test utilities
../testutil,
./fixtures_utils
@ -60,13 +60,13 @@ proc runTest(rewardsDir, identifier: string) =
parseTest(testDir/"inactivity_penalty_deltas.ssz_snappy", SSZ, Deltas)
var
rewards = RewardInfo()
info: phase0.EpochInfo
finality_delay = (state[].get_previous_epoch() - state[].finalized_checkpoint.epoch)
rewards.init(state[])
rewards.process_attestations(state[], cache)
info.init(state[])
info.process_attestations(state[], cache)
let
total_balance = rewards.total_balances.current_epoch
total_balance = info.total_balances.current_epoch
total_balance_sqrt = integer_squareroot(total_balance)
var
@ -76,7 +76,7 @@ proc runTest(rewardsDir, identifier: string) =
inclusionDelayDeltas2 = Deltas.init(state[].validators.len)
inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len)
for index, validator in rewards.statuses.mpairs():
for index, validator in info.statuses.mpairs():
if not is_eligible_validator(validator):
continue
@ -85,11 +85,11 @@ proc runTest(rewardsDir, identifier: string) =
state[], index.ValidatorIndex, total_balance_sqrt)
sourceDeltas2.add(index, get_source_delta(
validator, base_reward, rewards.total_balances, finality_delay))
validator, base_reward, info.total_balances, finality_delay))
targetDeltas2.add(index, get_target_delta(
validator, base_reward, rewards.total_balances, finality_delay))
validator, base_reward, info.total_balances, finality_delay))
headDeltas2.add(index, get_head_delta(
validator, base_reward, rewards.total_balances, finality_delay))
validator, base_reward, info.total_balances, finality_delay))
let
(inclusion_delay_delta, proposer_delta) =

View File

@ -88,8 +88,8 @@ proc mockBlock*(
cache = StateCache()
tmpState = assignClone(state)
if getStateField(state, slot) != slot:
var rewards = RewardInfo()
doAssert process_slots(cfg, tmpState[], slot, cache, rewards, flags = {})
var info = ForkedEpochInfo()
doAssert process_slots(cfg, tmpState[], slot, cache, info, flags = {})
result.kind = case tmpState[].beaconStateFork
of forkPhase0: BeaconBlockFork.Phase0

View File

@ -17,17 +17,17 @@ proc nextEpoch*(state: var ForkedHashedBeaconState) =
## Transition to the start of the next epoch
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
let slot =
getStateField(state, slot) + SLOTS_PER_EPOCH -
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
doAssert process_slots(defaultRuntimeConfig, state, slot, cache, rewards, {})
doAssert process_slots(defaultRuntimeConfig, state, slot, cache, info, {})
proc nextSlot*(state: var ForkedHashedBeaconState) =
## Transition to the next slot
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
doAssert process_slots(
defaultRuntimeConfig, state, getStateField(state, slot) + 1, cache, rewards, {})
defaultRuntimeConfig, state, getStateField(state, slot) + 1, cache, info, {})

View File

@ -9,19 +9,19 @@ import
# Specs
../../beacon_chain/spec/[
forks, presets, state_transition, state_transition_epoch],
../../beacon_chain/spec/datatypes/base
../../beacon_chain/spec/datatypes/phase0
proc processSlotsUntilEndCurrentEpoch(state: var ForkedHashedBeaconState) =
# Process all slots until the end of the last slot of the current epoch
var
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
let slot =
getStateField(state, slot) + SLOTS_PER_EPOCH -
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
# Transition to slot before the epoch state transition
discard process_slots(defaultRuntimeConfig, state, slot - 1, cache, rewards, {})
discard process_slots(defaultRuntimeConfig, state, slot - 1, cache, info, {})
# For the last slot of the epoch,
# only process_slot without process_epoch
@ -34,9 +34,9 @@ proc transitionEpochUntilJustificationFinalization*(state: var ForkedHashedBeaco
var
cache = StateCache()
rewards = RewardInfo()
info: phase0.EpochInfo
rewards.init(state.hbsPhase0.data)
rewards.process_attestations(state.hbsPhase0.data, cache)
info.init(state.hbsPhase0.data)
info.process_attestations(state.hbsPhase0.data, cache)
process_justification_and_finalization(
state.hbsPhase0.data, rewards.total_balances)
state.hbsPhase0.data, info.total_balances)

View File

@ -65,11 +65,11 @@ suite "Attestation pool processing" & preset():
pool = newClone(AttestationPool.init(dag, quarantine))
state = newClone(dag.headState)
cache = StateCache()
rewards: RewardInfo
info = ForkedEpochInfo()
# Slot 0 is a finalized slot - won't be making attestations for it..
check:
process_slots(
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, rewards,
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info,
{})
test "Can add and retrieve simple attestations" & preset():
@ -102,7 +102,7 @@ suite "Attestation pool processing" & preset():
process_slots(
defaultRuntimeConfig, state.data,
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards, {})
info, {})
let attestations = pool[].getAttestationsForBlock(state.data, cache)
@ -122,7 +122,7 @@ suite "Attestation pool processing" & preset():
process_slots(
defaultRuntimeConfig, state.data,
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards, {})
info, {})
check:
# shouldn't include already-included attestations
@ -200,7 +200,7 @@ suite "Attestation pool processing" & preset():
process_slots(
defaultRuntimeConfig, state.data,
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
rewards, {})
info, {})
check:
pool[].getAttestationsForBlock(state.data, cache).len() == 2
@ -247,7 +247,7 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
defaultRuntimeConfig, state.data,
getStateField(state.data, slot) + 1, cache, rewards, {})
getStateField(state.data, slot) + 1, cache, info, {})
doAssert attestations.uint64 > MAX_ATTESTATIONS,
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
@ -269,7 +269,7 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
cache, rewards, {})
cache, info, {})
let
bc1 = get_beacon_committee(state[].data,
@ -284,7 +284,7 @@ suite "Attestation pool processing" & preset():
discard process_slots(
defaultRuntimeConfig, state.data,
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
let attestations = pool[].getAttestationsForBlock(state.data, cache)
@ -310,7 +310,7 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
defaultRuntimeConfig, state.data,
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
let attestations = pool[].getAttestationsForBlock(state.data, cache)
@ -339,7 +339,7 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
defaultRuntimeConfig, state.data,
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
let attestations = pool[].getAttestationsForBlock(state.data, cache)
@ -367,7 +367,7 @@ suite "Attestation pool processing" & preset():
check:
process_slots(
defaultRuntimeConfig, state.data,
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
let attestations = pool[].getAttestationsForBlock(state.data, cache)

View File

@ -125,7 +125,7 @@ suite "Block pool processing" & preset():
nilPhase0Callback: OnPhase0BlockAdded
state = newClone(dag.headState.data)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0).phase0Block
b2 = addTestBlock(state[], b1.root, cache).phase0Block
@ -178,7 +178,7 @@ suite "Block pool processing" & preset():
check:
process_slots(
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1, cache,
rewards, {})
info, {})
let
b4 = addTestBlock(state[], b2.root, cache).phase0Block
@ -352,7 +352,7 @@ suite "chain DAG finalization tests" & preset():
quarantine = QuarantineRef.init(keys.newRng(), taskpool)
nilPhase0Callback: OnPhase0BlockAdded
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
test "prune heads on finalization" & preset():
# Create a fork that will not be taken
@ -363,7 +363,7 @@ suite "chain DAG finalization tests" & preset():
process_slots(
defaultRuntimeConfig, tmpState[],
getStateField(tmpState[], slot) + (5 * SLOTS_PER_EPOCH).uint64,
cache, rewards, {})
cache, info, {})
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache).phase0Block
block:
@ -466,7 +466,7 @@ suite "chain DAG finalization tests" & preset():
doAssert process_slots(
defaultRuntimeConfig, prestate[], getStateField(prestate[], slot) + 1,
cache, rewards, {})
cache, info, {})
# create another block, orphaning the head
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache).phase0Block
@ -495,7 +495,7 @@ suite "chain DAG finalization tests" & preset():
check:
process_slots(
defaultRuntimeConfig, dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2),
cache, rewards, {})
cache, info, {})
var blck = makeTestBlock(
dag.headState.data, dag.head.root, cache,
@ -586,7 +586,7 @@ suite "Diverging hardforks":
nilPhase0Callback: OnPhase0BlockAdded
state = newClone(dag.headState.data)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
tmpState = assignClone(dag.headState.data)
@ -595,7 +595,7 @@ suite "Diverging hardforks":
process_slots(
phase0RuntimeConfig, tmpState[],
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
cache, rewards, {})
cache, info, {})
# Because the first block is after the Altair transition, the only block in
# common is the tail block
@ -614,7 +614,7 @@ suite "Diverging hardforks":
process_slots(
phase0RuntimeConfig, tmpState[],
getStateField(tmpState[], slot) + SLOTS_PER_EPOCH.uint64,
cache, rewards, {})
cache, info, {})
# There's a block in the shared-correct phase0 hardfork, before epoch 2
var
@ -626,7 +626,7 @@ suite "Diverging hardforks":
process_slots(
phase0RuntimeConfig, tmpState[],
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
cache, rewards, {})
cache, info, {})
var
b2 = addTestBlock(tmpState[], b1.root, cache).phase0Block

View File

@ -42,13 +42,13 @@ suite "Gossip validation " & preset():
pool = newClone(AttestationPool.init(dag, quarantine))
state = newClone(dag.headState)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false, taskpool)
# Slot 0 is a finalized slot - won't be making attestations for it..
check:
process_slots(
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
cache, rewards, {})
cache, info, {})
test "Any committee index is valid":
template committee(idx: uint64): untyped =

View File

@ -11,7 +11,7 @@ import
# Status libraries
stew/bitops2,
# Beacon chain internals
../beacon_chain/spec/[helpers, state_transition],
../beacon_chain/spec/[forks, helpers, state_transition],
# Test utilities
./unittest2, mocking/mock_genesis
@ -29,9 +29,9 @@ suite "Spec helpers":
var
forked = newClone(initGenesisState())
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
doAssert process_slots(defaultRuntimeConfig, forked[],
Slot(100), cache, rewards, flags = {})
Slot(100), cache, info, flags = {})
let
state = forked[].hbsPhase0.data

View File

@ -84,9 +84,9 @@ proc addTestBlock*(
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
# Create and add a block to state - state will advance by one slot!
if nextSlot:
var rewards: RewardInfo
var info = ForkedEpochInfo()
doAssert process_slots(
cfg, state, getStateField(state, slot) + 1, cache, rewards, flags)
cfg, state, getStateField(state, slot) + 1, cache, info, flags)
let
proposer_index = get_beacon_proposer_index(

View File

@ -60,7 +60,7 @@ proc getTestStates*(
var
tmpState = assignClone(initialState)
cache = StateCache()
rewards = RewardInfo()
info = ForkedEpochInfo()
cfg = defaultRuntimeConfig
if stateFork in [forkAltair, forkMerge]:
@ -73,7 +73,7 @@ proc getTestStates*(
let slot = epoch.Epoch.compute_start_slot_at_epoch
if getStateField(tmpState[], slot) < slot:
doAssert process_slots(
cfg, tmpState[], slot, cache, rewards, {})
cfg, tmpState[], slot, cache, info, {})
if i mod 3 == 0:
withState(tmpState[]):