reward accounting for altair+ (#2981)
Similar to the existing `RewardInfo`, this PR adds the infrastructure needed to export epoch processing information from altair+. Because accounting is done somewhat differently, the PR uses a fork-specific object to extrct the information in order to make the cost on the spec side low. * RewardInfo -> EpochInfo, ForkedEpochInfo * use array for computing new sync committee * avoid repeated total active balance computations in block processing * simplify proposer index check * simplify epoch transition tests * pre-compute base increment and reuse in epoch processing, and a few other small optimizations This PR introduces the type and does the heavy lifting in terms of refactoring - the tools that use the accounting will need separate PR:s (as well as refinements to the exportred information)
This commit is contained in:
parent
2eb9a608a4
commit
f90b2b8b1f
|
@ -755,7 +755,7 @@ proc get*(dag: ChainDAGRef, root: Eth2Digest): Option[BlockData] =
|
||||||
|
|
||||||
proc advanceSlots(
|
proc advanceSlots(
|
||||||
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
|
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
|
||||||
cache: var StateCache, rewards: var RewardInfo) =
|
cache: var StateCache, info: var ForkedEpochInfo) =
|
||||||
# Given a state, advance it zero or more slots by applying empty slot
|
# Given a state, advance it zero or more slots by applying empty slot
|
||||||
# processing - the state must be positions at a slot before or equal to the
|
# processing - the state must be positions at a slot before or equal to the
|
||||||
# target
|
# target
|
||||||
|
@ -764,7 +764,7 @@ proc advanceSlots(
|
||||||
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
||||||
|
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, rewards,
|
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info,
|
||||||
dag.updateFlags),
|
dag.updateFlags),
|
||||||
"process_slots shouldn't fail when state slot is correct"
|
"process_slots shouldn't fail when state slot is correct"
|
||||||
if save:
|
if save:
|
||||||
|
@ -773,7 +773,7 @@ proc advanceSlots(
|
||||||
proc applyBlock(
|
proc applyBlock(
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
state: var StateData, blck: BlockData, flags: UpdateFlags,
|
state: var StateData, blck: BlockData, flags: UpdateFlags,
|
||||||
cache: var StateCache, rewards: var RewardInfo): bool =
|
cache: var StateCache, info: var ForkedEpochInfo): bool =
|
||||||
# Apply a single block to the state - the state must be positioned at the
|
# Apply a single block to the state - the state must be positioned at the
|
||||||
# parent of the block with a slot lower than the one of the block being
|
# parent of the block with a slot lower than the one of the block being
|
||||||
# applied
|
# applied
|
||||||
|
@ -793,15 +793,15 @@ proc applyBlock(
|
||||||
of BeaconBlockFork.Phase0:
|
of BeaconBlockFork.Phase0:
|
||||||
state_transition(
|
state_transition(
|
||||||
dag.cfg, state.data, blck.data.phase0Block,
|
dag.cfg, state.data, blck.data.phase0Block,
|
||||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
cache, info, flags + dag.updateFlags + {slotProcessed}, restore)
|
||||||
of BeaconBlockFork.Altair:
|
of BeaconBlockFork.Altair:
|
||||||
state_transition(
|
state_transition(
|
||||||
dag.cfg, state.data, blck.data.altairBlock,
|
dag.cfg, state.data, blck.data.altairBlock,
|
||||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
cache, info, flags + dag.updateFlags + {slotProcessed}, restore)
|
||||||
of BeaconBlockFork.Merge:
|
of BeaconBlockFork.Merge:
|
||||||
state_transition(
|
state_transition(
|
||||||
dag.cfg, state.data, blck.data.mergeBlock,
|
dag.cfg, state.data, blck.data.mergeBlock,
|
||||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
cache, info, flags + dag.updateFlags + {slotProcessed}, restore)
|
||||||
if ok:
|
if ok:
|
||||||
state.blck = blck.refs
|
state.blck = blck.refs
|
||||||
|
|
||||||
|
@ -932,7 +932,7 @@ proc updateStateData*(
|
||||||
assignTick = Moment.now()
|
assignTick = Moment.now()
|
||||||
startSlot {.used.} = getStateField(state.data, slot) # used in logs below
|
startSlot {.used.} = getStateField(state.data, slot) # used in logs below
|
||||||
startRoot {.used.} = getStateRoot(state.data)
|
startRoot {.used.} = getStateRoot(state.data)
|
||||||
var rewards: RewardInfo
|
var info: ForkedEpochInfo
|
||||||
# Time to replay all the blocks between then and now
|
# Time to replay all the blocks between then and now
|
||||||
for i in countdown(ancestors.len - 1, 0):
|
for i in countdown(ancestors.len - 1, 0):
|
||||||
# Because the ancestors are in the database, there's no need to persist them
|
# Because the ancestors are in the database, there's no need to persist them
|
||||||
|
@ -940,11 +940,11 @@ proc updateStateData*(
|
||||||
# database, we can skip certain checks that have already been performed
|
# database, we can skip certain checks that have already been performed
|
||||||
# before adding the block to the database.
|
# before adding the block to the database.
|
||||||
let ok =
|
let ok =
|
||||||
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache, rewards)
|
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache, info)
|
||||||
doAssert ok, "Blocks in database should never fail to apply.."
|
doAssert ok, "Blocks in database should never fail to apply.."
|
||||||
|
|
||||||
# ...and make sure to process empty slots as requested
|
# ...and make sure to process empty slots as requested
|
||||||
dag.advanceSlots(state, bs.slot, save, cache, rewards)
|
dag.advanceSlots(state, bs.slot, save, cache, info)
|
||||||
|
|
||||||
# ...and make sure to load the state cache, if it exists
|
# ...and make sure to load the state cache, if it exists
|
||||||
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
||||||
|
|
|
@ -535,10 +535,13 @@ func get_total_active_balance*(state: SomeBeaconState, cache: var StateCache): G
|
||||||
state, cache.get_shuffled_active_validator_indices(state, epoch))
|
state, cache.get_shuffled_active_validator_indices(state, epoch))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward_per_increment
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward_per_increment
|
||||||
|
func get_base_reward_per_increment_sqrt*(
|
||||||
|
total_active_balance_sqrt: uint64): Gwei =
|
||||||
|
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div total_active_balance_sqrt
|
||||||
|
|
||||||
func get_base_reward_per_increment*(
|
func get_base_reward_per_increment*(
|
||||||
state: altair.BeaconState | merge.BeaconState, cache: var StateCache): Gwei =
|
total_active_balance: Gwei): Gwei =
|
||||||
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div
|
get_base_reward_per_increment_sqrt(integer_squareroot(total_active_balance))
|
||||||
integer_squareroot(get_total_active_balance(state, cache))
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward
|
||||||
func get_base_reward(
|
func get_base_reward(
|
||||||
|
@ -658,13 +661,12 @@ proc process_attestation*(
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_next_sync_committee_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_next_sync_committee_indices
|
||||||
func get_next_sync_committee_indices(state: altair.BeaconState | merge.BeaconState):
|
func get_next_sync_committee_keys(state: altair.BeaconState | merge.BeaconState):
|
||||||
seq[ValidatorIndex] =
|
array[SYNC_COMMITTEE_SIZE, ValidatorPubKey] =
|
||||||
## Return the sequence of sync committee indices (which may include
|
## Return the sequence of sync committee indices (which may include
|
||||||
## duplicate indices) for the next sync committee, given a ``state`` at a
|
## duplicate indices) for the next sync committee, given a ``state`` at a
|
||||||
## sync committee period boundary.
|
## sync committee period boundary.
|
||||||
|
|
||||||
# TODO this size is known statically, so return array[] if possible
|
|
||||||
let epoch = get_current_epoch(state) + 1
|
let epoch = get_current_epoch(state) + 1
|
||||||
|
|
||||||
const MAX_RANDOM_BYTE = 255
|
const MAX_RANDOM_BYTE = 255
|
||||||
|
@ -674,10 +676,11 @@ func get_next_sync_committee_indices(state: altair.BeaconState | merge.BeaconSta
|
||||||
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
||||||
var
|
var
|
||||||
i = 0'u64
|
i = 0'u64
|
||||||
sync_committee_indices: seq[ValidatorIndex]
|
index = 0
|
||||||
|
res: array[SYNC_COMMITTEE_SIZE, ValidatorPubKey]
|
||||||
hash_buffer: array[40, byte]
|
hash_buffer: array[40, byte]
|
||||||
hash_buffer[0..31] = seed.data
|
hash_buffer[0..31] = seed.data
|
||||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
while index < SYNC_COMMITTEE_SIZE:
|
||||||
hash_buffer[32..39] = uint_to_bytes8(uint64(i div 32))
|
hash_buffer[32..39] = uint_to_bytes8(uint64(i div 32))
|
||||||
let
|
let
|
||||||
shuffled_index = compute_shuffled_index(uint64(i mod active_validator_count), active_validator_count, seed)
|
shuffled_index = compute_shuffled_index(uint64(i mod active_validator_count), active_validator_count, seed)
|
||||||
|
@ -685,22 +688,17 @@ func get_next_sync_committee_indices(state: altair.BeaconState | merge.BeaconSta
|
||||||
random_byte = eth2digest(hash_buffer).data[i mod 32]
|
random_byte = eth2digest(hash_buffer).data[i mod 32]
|
||||||
effective_balance = state.validators[candidate_index].effective_balance
|
effective_balance = state.validators[candidate_index].effective_balance
|
||||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||||
sync_committee_indices.add candidate_index
|
res[index] = state.validators[candidate_index].pubkey
|
||||||
|
inc index
|
||||||
i += 1'u64
|
i += 1'u64
|
||||||
sync_committee_indices
|
res
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_next_sync_committee
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_next_sync_committee
|
||||||
proc get_next_sync_committee*(state: altair.BeaconState | merge.BeaconState):
|
proc get_next_sync_committee*(state: altair.BeaconState | merge.BeaconState):
|
||||||
SyncCommittee =
|
SyncCommittee =
|
||||||
## Return the *next* sync committee for a given ``state``.
|
## Return the *next* sync committee for a given ``state``.
|
||||||
let indices = get_next_sync_committee_indices(state)
|
|
||||||
# TODO not robust
|
|
||||||
doAssert indices.len == SYNC_COMMITTEE_SIZE
|
|
||||||
|
|
||||||
var res: SyncCommittee
|
var res: SyncCommittee
|
||||||
for i, index in indices:
|
res.pubkeys.data = get_next_sync_committee_keys(state)
|
||||||
res.pubkeys.data[i] = state.validators[index].pubkey
|
|
||||||
res.pubkeys.resetCache()
|
|
||||||
|
|
||||||
# see signatures_batch, TODO shouldn't be here
|
# see signatures_batch, TODO shouldn't be here
|
||||||
# Deposit processing ensures all keys are valid
|
# Deposit processing ensures all keys are valid
|
||||||
|
|
|
@ -240,6 +240,26 @@ type
|
||||||
current_sync_committee*: SyncCommittee # [New in Altair]
|
current_sync_committee*: SyncCommittee # [New in Altair]
|
||||||
next_sync_committee*: SyncCommittee # [New in Altair]
|
next_sync_committee*: SyncCommittee # [New in Altair]
|
||||||
|
|
||||||
|
UnslashedParticipatingBalances* = object
|
||||||
|
previous_epoch*: array[PARTICIPATION_FLAG_WEIGHTS.len, Gwei]
|
||||||
|
current_epoch_TIMELY_TARGET*: Gwei
|
||||||
|
current_epoch*: Gwei # aka total_active_balance
|
||||||
|
|
||||||
|
ParticipationFlag* {.pure.} = enum
|
||||||
|
timelySourceAttester
|
||||||
|
timelyTargetAttester
|
||||||
|
timelyHeadAttester
|
||||||
|
eligible
|
||||||
|
|
||||||
|
ParticipationInfo* = object
|
||||||
|
flags*: set[ParticipationFlag]
|
||||||
|
delta*: RewardDelta
|
||||||
|
|
||||||
|
EpochInfo* = object
|
||||||
|
## Information about the outcome of epoch processing
|
||||||
|
validators*: seq[ParticipationInfo]
|
||||||
|
balances*: UnslashedParticipatingBalances
|
||||||
|
|
||||||
# TODO Careful, not nil analysis is broken / incomplete and the semantics will
|
# TODO Careful, not nil analysis is broken / incomplete and the semantics will
|
||||||
# likely change in future versions of the language:
|
# likely change in future versions of the language:
|
||||||
# https://github.com/nim-lang/RFCs/issues/250
|
# https://github.com/nim-lang/RFCs/issues/250
|
||||||
|
@ -428,7 +448,7 @@ iterator allSyncCommittees*: SyncCommitteeIndex =
|
||||||
yield SyncCommitteeIndex(committeeIdx)
|
yield SyncCommitteeIndex(committeeIdx)
|
||||||
|
|
||||||
template validateSyncCommitteeIndexOr*(
|
template validateSyncCommitteeIndexOr*(
|
||||||
networkValParam: uint64,
|
networkValParam: uint64,
|
||||||
elseBody: untyped): SyncCommitteeIndex =
|
elseBody: untyped): SyncCommitteeIndex =
|
||||||
let networkVal = networkValParam
|
let networkVal = networkValParam
|
||||||
if networkVal < SYNC_COMMITTEE_SUBNET_COUNT:
|
if networkVal < SYNC_COMMITTEE_SUBNET_COUNT:
|
||||||
|
@ -501,3 +521,7 @@ chronicles.formatIt SyncCommitteeMessage: shortLog(it)
|
||||||
|
|
||||||
template hash*(x: LightClientUpdate): Hash =
|
template hash*(x: LightClientUpdate): Hash =
|
||||||
hash(x.header)
|
hash(x.header)
|
||||||
|
|
||||||
|
func clear*(info: var EpochInfo) =
|
||||||
|
info.validators.setLen(0)
|
||||||
|
info.balances = UnslashedParticipatingBalances()
|
|
@ -547,10 +547,6 @@ type
|
||||||
# time of attestation.
|
# time of attestation.
|
||||||
previous_epoch_head_attesters_raw*: Gwei
|
previous_epoch_head_attesters_raw*: Gwei
|
||||||
|
|
||||||
RewardInfo* = object
|
|
||||||
statuses*: seq[RewardStatus]
|
|
||||||
total_balances*: TotalBalances
|
|
||||||
|
|
||||||
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData2 =
|
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData2 =
|
||||||
let cookedKey = validator.pubkey.load() # Loading the pubkey is slow!
|
let cookedKey = validator.pubkey.load() # Loading the pubkey is slow!
|
||||||
doAssert cookedKey.isSome,
|
doAssert cookedKey.isSome,
|
||||||
|
|
|
@ -242,8 +242,17 @@ type
|
||||||
SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock
|
SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock
|
||||||
SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody
|
SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody
|
||||||
|
|
||||||
|
EpochInfo* = object
|
||||||
|
## Information about the outcome of epoch processing
|
||||||
|
statuses*: seq[RewardStatus]
|
||||||
|
total_balances*: TotalBalances
|
||||||
|
|
||||||
chronicles.formatIt BeaconBlock: it.shortLog
|
chronicles.formatIt BeaconBlock: it.shortLog
|
||||||
|
|
||||||
|
func clear*(info: var EpochInfo) =
|
||||||
|
info.statuses.setLen(0)
|
||||||
|
info.total_balances = TotalBalances()
|
||||||
|
|
||||||
Json.useCustomSerialization(BeaconState.justification_bits):
|
Json.useCustomSerialization(BeaconState.justification_bits):
|
||||||
read:
|
read:
|
||||||
let s = reader.readValue(string)
|
let s = reader.readValue(string)
|
||||||
|
|
|
@ -68,6 +68,19 @@ type
|
||||||
of BeaconBlockFork.Merge:
|
of BeaconBlockFork.Merge:
|
||||||
mergeBlock*: merge.TrustedSignedBeaconBlock
|
mergeBlock*: merge.TrustedSignedBeaconBlock
|
||||||
|
|
||||||
|
EpochInfoFork* {.pure.} = enum
|
||||||
|
Phase0
|
||||||
|
Altair
|
||||||
|
|
||||||
|
ForkedEpochInfo* = object
|
||||||
|
case kind*: EpochInfoFork
|
||||||
|
of EpochInfoFork.Phase0:
|
||||||
|
phase0Info*: phase0.EpochInfo
|
||||||
|
of EpochInfoFork.Altair:
|
||||||
|
altairInfo*: altair.EpochInfo
|
||||||
|
|
||||||
|
ForkyEpochInfo* = phase0.EpochInfo | altair.EpochInfo
|
||||||
|
|
||||||
ForkDigests* = object
|
ForkDigests* = object
|
||||||
phase0*: ForkDigest
|
phase0*: ForkDigest
|
||||||
altair*: ForkDigest
|
altair*: ForkDigest
|
||||||
|
@ -133,6 +146,11 @@ template init*(T: type ForkedTrustedSignedBeaconBlock, blck: altair.TrustedSigne
|
||||||
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: merge.TrustedSignedBeaconBlock): T =
|
template init*(T: type ForkedTrustedSignedBeaconBlock, blck: merge.TrustedSignedBeaconBlock): T =
|
||||||
T(kind: BeaconBlockFork.Merge, mergeBlock: blck)
|
T(kind: BeaconBlockFork.Merge, mergeBlock: blck)
|
||||||
|
|
||||||
|
template init*(T: type ForkedEpochInfo, info: phase0.EpochInfo): T =
|
||||||
|
T(kind: EpochInfoFork.Phase0, phase0Info: info)
|
||||||
|
template init*(T: type ForkedEpochInfo, info: altair.EpochInfo): T =
|
||||||
|
T(kind: EpochInfoFork.Altair, altairInfo: info)
|
||||||
|
|
||||||
# State-related functionality based on ForkedHashedBeaconState instead of HashedBeaconState
|
# State-related functionality based on ForkedHashedBeaconState instead of HashedBeaconState
|
||||||
|
|
||||||
template withState*(x: ForkedHashedBeaconState, body: untyped): untyped =
|
template withState*(x: ForkedHashedBeaconState, body: untyped): untyped =
|
||||||
|
@ -150,6 +168,28 @@ template withState*(x: ForkedHashedBeaconState, body: untyped): untyped =
|
||||||
template state: untyped {.inject.} = x.hbsPhase0
|
template state: untyped {.inject.} = x.hbsPhase0
|
||||||
body
|
body
|
||||||
|
|
||||||
|
template withEpochInfo*(x: ForkedEpochInfo, body: untyped): untyped =
|
||||||
|
case x.kind
|
||||||
|
of EpochInfoFork.Phase0:
|
||||||
|
template info: untyped {.inject.} = x.phase0Info
|
||||||
|
body
|
||||||
|
of EpochInfoFork.Altair:
|
||||||
|
template info: untyped {.inject.} = x.altairInfo
|
||||||
|
body
|
||||||
|
|
||||||
|
template withEpochInfo*(
|
||||||
|
state: phase0.BeaconState, x: var ForkedEpochInfo, body: untyped): untyped =
|
||||||
|
x.kind = EpochInfoFork.Phase0
|
||||||
|
template info: untyped {.inject.} = x.phase0Info
|
||||||
|
body
|
||||||
|
|
||||||
|
template withEpochInfo*(
|
||||||
|
state: altair.BeaconState | merge.BeaconState, x: var ForkedEpochInfo,
|
||||||
|
body: untyped): untyped =
|
||||||
|
x.kind = EpochInfoFork.Altair
|
||||||
|
template info: untyped {.inject.} = x.altairInfo
|
||||||
|
body
|
||||||
|
|
||||||
# Dispatch functions
|
# Dispatch functions
|
||||||
func assign*(tgt: var ForkedHashedBeaconState, src: ForkedHashedBeaconState) =
|
func assign*(tgt: var ForkedHashedBeaconState, src: ForkedHashedBeaconState) =
|
||||||
if tgt.beaconStateFork == src.beaconStateFork:
|
if tgt.beaconStateFork == src.beaconStateFork:
|
||||||
|
|
|
@ -165,19 +165,18 @@ func clear_epoch_from_cache(cache: var StateCache, epoch: Epoch) =
|
||||||
proc advance_slot(
|
proc advance_slot(
|
||||||
cfg: RuntimeConfig,
|
cfg: RuntimeConfig,
|
||||||
state: var SomeBeaconState, previous_slot_state_root: Eth2Digest,
|
state: var SomeBeaconState, previous_slot_state_root: Eth2Digest,
|
||||||
flags: UpdateFlags, cache: var StateCache, rewards: var RewardInfo) {.nbench.} =
|
flags: UpdateFlags, cache: var StateCache, info: var ForkyEpochInfo) {.nbench.} =
|
||||||
# Do the per-slot and potentially the per-epoch processing, then bump the
|
# Do the per-slot and potentially the per-epoch processing, then bump the
|
||||||
# slot number - we've now arrived at the slot state on top of which a block
|
# slot number - we've now arrived at the slot state on top of which a block
|
||||||
# optionally can be applied.
|
# optionally can be applied.
|
||||||
process_slot(state, previous_slot_state_root)
|
process_slot(state, previous_slot_state_root)
|
||||||
|
|
||||||
rewards.statuses.setLen(0)
|
info.clear()
|
||||||
rewards.total_balances = TotalBalances()
|
|
||||||
|
|
||||||
let is_epoch_transition = (state.slot + 1).isEpoch
|
let is_epoch_transition = (state.slot + 1).isEpoch
|
||||||
if is_epoch_transition:
|
if is_epoch_transition:
|
||||||
# Note: Genesis epoch = 0, no need to test if before Genesis
|
# Note: Genesis epoch = 0, no need to test if before Genesis
|
||||||
process_epoch(cfg, state, flags, cache, rewards)
|
process_epoch(cfg, state, flags, cache, info)
|
||||||
clear_epoch_from_cache(cache, (state.slot + 1).compute_epoch_at_slot)
|
clear_epoch_from_cache(cache, (state.slot + 1).compute_epoch_at_slot)
|
||||||
|
|
||||||
state.slot += 1
|
state.slot += 1
|
||||||
|
@ -222,7 +221,7 @@ proc maybeUpgradeState*(
|
||||||
|
|
||||||
proc process_slots*(
|
proc process_slots*(
|
||||||
cfg: RuntimeConfig, state: var ForkedHashedBeaconState, slot: Slot,
|
cfg: RuntimeConfig, state: var ForkedHashedBeaconState, slot: Slot,
|
||||||
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags): bool {.nbench.} =
|
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags): bool {.nbench.} =
|
||||||
if not (getStateField(state, slot) < slot):
|
if not (getStateField(state, slot) < slot):
|
||||||
if slotProcessed notin flags or getStateField(state, slot) != slot:
|
if slotProcessed notin flags or getStateField(state, slot) != slot:
|
||||||
notice "Unusual request for a slot in the past",
|
notice "Unusual request for a slot in the past",
|
||||||
|
@ -234,8 +233,9 @@ proc process_slots*(
|
||||||
# Update the state so its slot matches that of the block
|
# Update the state so its slot matches that of the block
|
||||||
while getStateField(state, slot) < slot:
|
while getStateField(state, slot) < slot:
|
||||||
withState(state):
|
withState(state):
|
||||||
advance_slot(
|
withEpochInfo(state.data, info):
|
||||||
cfg, state.data, state.root, flags, cache, rewards)
|
advance_slot(
|
||||||
|
cfg, state.data, state.root, flags, cache, info)
|
||||||
|
|
||||||
if skipLastStateRootCalculation notin flags or
|
if skipLastStateRootCalculation notin flags or
|
||||||
state.data.slot < slot:
|
state.data.slot < slot:
|
||||||
|
@ -334,7 +334,7 @@ proc state_transition*(
|
||||||
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock |
|
phase0.TrustedSignedBeaconBlock | altair.SignedBeaconBlock |
|
||||||
altair.TrustedSignedBeaconBlock | merge.TrustedSignedBeaconBlock |
|
altair.TrustedSignedBeaconBlock | merge.TrustedSignedBeaconBlock |
|
||||||
merge.SignedBeaconBlock,
|
merge.SignedBeaconBlock,
|
||||||
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
|
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags,
|
||||||
rollback: RollbackForkedHashedProc): bool {.nbench.} =
|
rollback: RollbackForkedHashedProc): bool {.nbench.} =
|
||||||
## Apply a block to the state, advancing the slot counter as necessary. The
|
## Apply a block to the state, advancing the slot counter as necessary. The
|
||||||
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
||||||
|
@ -352,7 +352,7 @@ proc state_transition*(
|
||||||
## object should be rolled back to a consistent state. If the transition fails
|
## object should be rolled back to a consistent state. If the transition fails
|
||||||
## before the state has been updated, `rollback` will not be called.
|
## before the state has been updated, `rollback` will not be called.
|
||||||
if not process_slots(
|
if not process_slots(
|
||||||
cfg, state, signedBlock.message.slot, cache, rewards,
|
cfg, state, signedBlock.message.slot, cache, info,
|
||||||
flags + {skipLastStateRootCalculation}):
|
flags + {skipLastStateRootCalculation}):
|
||||||
return false
|
return false
|
||||||
state_transition_block(
|
state_transition_block(
|
||||||
|
@ -494,8 +494,8 @@ proc makeBeaconBlock*(
|
||||||
# To create a block, we'll first apply a partial block to the state, skipping
|
# To create a block, we'll first apply a partial block to the state, skipping
|
||||||
# some validations.
|
# some validations.
|
||||||
|
|
||||||
var blck = partialBeaconBlock(cfg, state, proposer_index, parent_root,
|
var blck = partialBeaconBlock(cfg, state, proposer_index, parent_root,
|
||||||
randao_reveal, eth1_data, graffiti, attestations, deposits,
|
randao_reveal, eth1_data, graffiti, attestations, deposits,
|
||||||
proposerSlashings, attesterSlashings, voluntaryExits,
|
proposerSlashings, attesterSlashings, voluntaryExits,
|
||||||
sync_aggregate, executionPayload)
|
sync_aggregate, executionPayload)
|
||||||
|
|
||||||
|
@ -619,11 +619,11 @@ proc makeBeaconBlock*(
|
||||||
template makeBeaconBlock(kind: untyped): Result[ForkedBeaconBlock, string] =
|
template makeBeaconBlock(kind: untyped): Result[ForkedBeaconBlock, string] =
|
||||||
# To create a block, we'll first apply a partial block to the state, skipping
|
# To create a block, we'll first apply a partial block to the state, skipping
|
||||||
# some validations.
|
# some validations.
|
||||||
|
|
||||||
var blck =
|
var blck =
|
||||||
ForkedBeaconBlock.init(
|
ForkedBeaconBlock.init(
|
||||||
partialBeaconBlock(cfg, state.`hbs kind`, proposer_index, parent_root,
|
partialBeaconBlock(cfg, state.`hbs kind`, proposer_index, parent_root,
|
||||||
randao_reveal, eth1_data, graffiti, attestations, deposits,
|
randao_reveal, eth1_data, graffiti, attestations, deposits,
|
||||||
proposerSlashings, attesterSlashings, voluntaryExits,
|
proposerSlashings, attesterSlashings, voluntaryExits,
|
||||||
sync_aggregate, executionPayload))
|
sync_aggregate, executionPayload))
|
||||||
|
|
||||||
|
|
|
@ -380,19 +380,14 @@ proc process_voluntary_exit*(
|
||||||
proc process_operations(cfg: RuntimeConfig,
|
proc process_operations(cfg: RuntimeConfig,
|
||||||
state: var SomeBeaconState,
|
state: var SomeBeaconState,
|
||||||
body: SomeSomeBeaconBlockBody,
|
body: SomeSomeBeaconBlockBody,
|
||||||
|
base_reward_per_increment: Gwei,
|
||||||
flags: UpdateFlags,
|
flags: UpdateFlags,
|
||||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||||
# Verify that outstanding deposits are processed up to the maximum number of
|
# Verify that outstanding deposits are processed up to the maximum number of
|
||||||
# deposits
|
# deposits
|
||||||
template base_reward_per_increment(state: phase0.BeaconState): Gwei = 0.Gwei
|
|
||||||
template base_reward_per_increment(
|
|
||||||
state: altair.BeaconState | merge.BeaconState): Gwei =
|
|
||||||
get_base_reward_per_increment(state, cache)
|
|
||||||
|
|
||||||
let
|
let
|
||||||
req_deposits = min(MAX_DEPOSITS,
|
req_deposits = min(MAX_DEPOSITS,
|
||||||
state.eth1_data.deposit_count - state.eth1_deposit_index)
|
state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||||
generalized_base_reward_per_increment = base_reward_per_increment(state)
|
|
||||||
|
|
||||||
if state.eth1_data.deposit_count < state.eth1_deposit_index or
|
if state.eth1_data.deposit_count < state.eth1_deposit_index or
|
||||||
body.deposits.lenu64 != req_deposits:
|
body.deposits.lenu64 != req_deposits:
|
||||||
|
@ -403,7 +398,7 @@ proc process_operations(cfg: RuntimeConfig,
|
||||||
for op in body.attester_slashings:
|
for op in body.attester_slashings:
|
||||||
? process_attester_slashing(cfg, state, op, flags, cache)
|
? process_attester_slashing(cfg, state, op, flags, cache)
|
||||||
for op in body.attestations:
|
for op in body.attestations:
|
||||||
? process_attestation(state, op, flags, generalized_base_reward_per_increment, cache)
|
? process_attestation(state, op, flags, base_reward_per_increment, cache)
|
||||||
for op in body.deposits:
|
for op in body.deposits:
|
||||||
? process_deposit(cfg, state, op, flags)
|
? process_deposit(cfg, state, op, flags)
|
||||||
for op in body.voluntary_exits:
|
for op in body.voluntary_exits:
|
||||||
|
@ -413,7 +408,8 @@ proc process_operations(cfg: RuntimeConfig,
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.0-alpha.6/specs/altair/beacon-chain.md#sync-committee-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.0-alpha.6/specs/altair/beacon-chain.md#sync-committee-processing
|
||||||
proc process_sync_aggregate*(
|
proc process_sync_aggregate*(
|
||||||
state: var (altair.BeaconState | merge.BeaconState), aggregate: SyncAggregate, cache: var StateCache):
|
state: var (altair.BeaconState | merge.BeaconState),
|
||||||
|
aggregate: SyncAggregate, total_active_balance: Gwei, cache: var StateCache):
|
||||||
Result[void, cstring] {.nbench.} =
|
Result[void, cstring] {.nbench.} =
|
||||||
# Verify sync committee aggregate signature signing over the previous slot
|
# Verify sync committee aggregate signature signing over the previous slot
|
||||||
# block root
|
# block root
|
||||||
|
@ -441,11 +437,20 @@ proc process_sync_aggregate*(
|
||||||
|
|
||||||
# Compute participant and proposer rewards
|
# Compute participant and proposer rewards
|
||||||
let
|
let
|
||||||
total_active_increments = get_total_active_balance(state, cache) div EFFECTIVE_BALANCE_INCREMENT
|
total_active_increments =
|
||||||
total_base_rewards = get_base_reward_per_increment(state, cache) * total_active_increments
|
total_active_balance div EFFECTIVE_BALANCE_INCREMENT
|
||||||
max_participant_rewards = total_base_rewards * SYNC_REWARD_WEIGHT div WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
|
total_base_rewards =
|
||||||
|
get_base_reward_per_increment(total_active_balance) * total_active_increments
|
||||||
|
max_participant_rewards =
|
||||||
|
total_base_rewards * SYNC_REWARD_WEIGHT div WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
|
||||||
participant_reward = max_participant_rewards div SYNC_COMMITTEE_SIZE
|
participant_reward = max_participant_rewards div SYNC_COMMITTEE_SIZE
|
||||||
proposer_reward = participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
|
proposer_reward =
|
||||||
|
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
|
||||||
|
proposer_index = get_beacon_proposer_index(state, cache)
|
||||||
|
|
||||||
|
if proposer_index.isNone:
|
||||||
|
# We're processing a block, so this can't happen, in theory (!)
|
||||||
|
return err("process_sync_aggregate: no proposer")
|
||||||
|
|
||||||
# Apply participant and proposer rewards
|
# Apply participant and proposer rewards
|
||||||
|
|
||||||
|
@ -460,19 +465,15 @@ proc process_sync_aggregate*(
|
||||||
|
|
||||||
# TODO could use a sequtils2 zipIt
|
# TODO could use a sequtils2 zipIt
|
||||||
for i in 0 ..< min(
|
for i in 0 ..< min(
|
||||||
state.current_sync_committee.pubkeys.len,
|
state.current_sync_committee.pubkeys.len,
|
||||||
aggregate.sync_committee_bits.len):
|
aggregate.sync_committee_bits.len):
|
||||||
let proposer_index = get_beacon_proposer_index(state, cache)
|
let participant_index =
|
||||||
if proposer_index.isSome:
|
pubkeyIndices.getOrDefault(state.current_sync_committee.pubkeys[i])
|
||||||
let participant_index =
|
if aggregate.sync_committee_bits[i]:
|
||||||
pubkeyIndices.getOrDefault(state.current_sync_committee.pubkeys[i])
|
increase_balance(state, participant_index, participant_reward)
|
||||||
if aggregate.sync_committee_bits[i]:
|
increase_balance(state, proposer_index.get, proposer_reward)
|
||||||
increase_balance(state, participant_index, participant_reward)
|
|
||||||
increase_balance(state, proposer_index.get, proposer_reward)
|
|
||||||
else:
|
|
||||||
decrease_balance(state, participant_index, participant_reward)
|
|
||||||
else:
|
else:
|
||||||
warn "process_sync_aggregate: get_beacon_proposer_index failed"
|
decrease_balance(state, participant_index, participant_reward)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -561,7 +562,7 @@ proc process_block*(
|
||||||
? process_block_header(state, blck, flags, cache)
|
? process_block_header(state, blck, flags, cache)
|
||||||
? process_randao(state, blck.body, flags, cache)
|
? process_randao(state, blck.body, flags, cache)
|
||||||
? process_eth1_data(state, blck.body)
|
? process_eth1_data(state, blck.body)
|
||||||
? process_operations(cfg, state, blck.body, flags, cache)
|
? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -593,8 +594,16 @@ proc process_block*(
|
||||||
? process_block_header(state, blck, flags, cache)
|
? process_block_header(state, blck, flags, cache)
|
||||||
? process_randao(state, blck.body, flags, cache)
|
? process_randao(state, blck.body, flags, cache)
|
||||||
? process_eth1_data(state, blck.body)
|
? process_eth1_data(state, blck.body)
|
||||||
? process_operations(cfg, state, blck.body, flags, cache)
|
|
||||||
? process_sync_aggregate(state, blck.body.sync_aggregate, cache) # [New in Altair]
|
let
|
||||||
|
total_active_balance = get_total_active_balance(state, cache)
|
||||||
|
base_reward_per_increment =
|
||||||
|
get_base_reward_per_increment(total_active_balance)
|
||||||
|
|
||||||
|
? process_operations(
|
||||||
|
cfg, state, blck.body, base_reward_per_increment, flags, cache)
|
||||||
|
? process_sync_aggregate(
|
||||||
|
state, blck.body.sync_aggregate, total_active_balance, cache) # [New in Altair]
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -617,8 +626,15 @@ proc process_block*(
|
||||||
func(_: ExecutionPayload): bool = true)
|
func(_: ExecutionPayload): bool = true)
|
||||||
? process_randao(state, blck.body, flags, cache)
|
? process_randao(state, blck.body, flags, cache)
|
||||||
? process_eth1_data(state, blck.body)
|
? process_eth1_data(state, blck.body)
|
||||||
? process_operations(cfg, state, blck.body, flags, cache)
|
|
||||||
? process_sync_aggregate(state, blck.body.sync_aggregate, cache)
|
let
|
||||||
|
total_active_balance = get_total_active_balance(state, cache)
|
||||||
|
base_reward_per_increment =
|
||||||
|
get_base_reward_per_increment(total_active_balance)
|
||||||
|
? process_operations(
|
||||||
|
cfg, state, blck.body, base_reward_per_increment, flags, cache)
|
||||||
|
? process_sync_aggregate(
|
||||||
|
state, blck.body.sync_aggregate, total_active_balance, cache)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
|
@ -52,9 +52,9 @@ template previous_epoch_target_attesters*(v: TotalBalances): Gwei =
|
||||||
template previous_epoch_head_attesters*(v: TotalBalances): Gwei =
|
template previous_epoch_head_attesters*(v: TotalBalances): Gwei =
|
||||||
max(EFFECTIVE_BALANCE_INCREMENT, v.previous_epoch_head_attesters_raw)
|
max(EFFECTIVE_BALANCE_INCREMENT, v.previous_epoch_head_attesters_raw)
|
||||||
|
|
||||||
func init*(rewards: var RewardInfo, state: SomeBeaconState) =
|
func init*(info: var phase0.EpochInfo, state: phase0.BeaconState) =
|
||||||
rewards.total_balances = TotalBalances()
|
info.total_balances = TotalBalances()
|
||||||
rewards.statuses.setLen(state.validators.len)
|
info.statuses.setLen(state.validators.len)
|
||||||
|
|
||||||
for i in 0..<state.validators.len:
|
for i in 0..<state.validators.len:
|
||||||
let v = unsafeAddr state.validators[i]
|
let v = unsafeAddr state.validators[i]
|
||||||
|
@ -66,13 +66,13 @@ func init*(rewards: var RewardInfo, state: SomeBeaconState) =
|
||||||
flags.incl RewardFlags.canWithdrawInCurrentEpoch
|
flags.incl RewardFlags.canWithdrawInCurrentEpoch
|
||||||
|
|
||||||
if v[].is_active_validator(state.get_current_epoch()):
|
if v[].is_active_validator(state.get_current_epoch()):
|
||||||
rewards.total_balances.current_epoch_raw += v[].effective_balance
|
info.total_balances.current_epoch_raw += v[].effective_balance
|
||||||
|
|
||||||
if v[].is_active_validator(state.get_previous_epoch()):
|
if v[].is_active_validator(state.get_previous_epoch()):
|
||||||
flags.incl RewardFlags.isActiveInPreviousEpoch
|
flags.incl RewardFlags.isActiveInPreviousEpoch
|
||||||
rewards.total_balances.previous_epoch_raw += v[].effective_balance
|
info.total_balances.previous_epoch_raw += v[].effective_balance
|
||||||
|
|
||||||
rewards.statuses[i] = RewardStatus(
|
info.statuses[i] = RewardStatus(
|
||||||
current_epoch_effective_balance: v[].effective_balance,
|
current_epoch_effective_balance: v[].effective_balance,
|
||||||
flags: flags,
|
flags: flags,
|
||||||
)
|
)
|
||||||
|
@ -82,7 +82,7 @@ func add(a: var RewardDelta, b: RewardDelta) =
|
||||||
a.penalties += b.penalties
|
a.penalties += b.penalties
|
||||||
|
|
||||||
func process_attestation(
|
func process_attestation(
|
||||||
self: var RewardInfo, state: phase0.BeaconState, a: PendingAttestation,
|
info: var phase0.EpochInfo, state: phase0.BeaconState, a: PendingAttestation,
|
||||||
cache: var StateCache) =
|
cache: var StateCache) =
|
||||||
# Collect information about the attestation
|
# Collect information about the attestation
|
||||||
var
|
var
|
||||||
|
@ -110,12 +110,12 @@ func process_attestation(
|
||||||
# Update the cache for all participants
|
# Update the cache for all participants
|
||||||
for validator_index in get_attesting_indices(
|
for validator_index in get_attesting_indices(
|
||||||
state, a.data, a.aggregation_bits, cache):
|
state, a.data, a.aggregation_bits, cache):
|
||||||
template v(): untyped = self.statuses[validator_index]
|
template v(): untyped = info.statuses[validator_index]
|
||||||
|
|
||||||
v.flags = v.flags + flags
|
v.flags = v.flags + flags
|
||||||
|
|
||||||
if is_previous_epoch_attester.isSome:
|
if is_previous_epoch_attester.isSome:
|
||||||
if v.isPreviousEpochAttester.isSome:
|
if v.is_previous_epoch_attester.isSome:
|
||||||
if is_previous_epoch_attester.get().delay <
|
if is_previous_epoch_attester.get().delay <
|
||||||
v.is_previous_epoch_attester.get().delay:
|
v.is_previous_epoch_attester.get().delay:
|
||||||
v.is_previous_epoch_attester = is_previous_epoch_attester
|
v.is_previous_epoch_attester = is_previous_epoch_attester
|
||||||
|
@ -123,47 +123,51 @@ func process_attestation(
|
||||||
v.is_previous_epoch_attester = is_previous_epoch_attester
|
v.is_previous_epoch_attester = is_previous_epoch_attester
|
||||||
|
|
||||||
func process_attestations*(
|
func process_attestations*(
|
||||||
self: var RewardInfo, state: phase0.BeaconState, cache: var StateCache) =
|
info: var phase0.EpochInfo, state: phase0.BeaconState, cache: var StateCache) =
|
||||||
# Walk state attestations and update the status information
|
# Walk state attestations and update the status information
|
||||||
for a in state.previous_epoch_attestations:
|
for a in state.previous_epoch_attestations:
|
||||||
process_attestation(self, state, a, cache)
|
process_attestation(info, state, a, cache)
|
||||||
for a in state.current_epoch_attestations:
|
for a in state.current_epoch_attestations:
|
||||||
process_attestation(self, state, a, cache)
|
process_attestation(info, state, a, cache)
|
||||||
|
|
||||||
for idx, v in self.statuses:
|
for idx, v in info.statuses:
|
||||||
if v.flags.contains RewardFlags.isSlashed:
|
if v.flags.contains RewardFlags.isSlashed:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let validator_balance = state.validators[idx].effective_balance
|
let validator_balance = state.validators[idx].effective_balance
|
||||||
|
|
||||||
if v.flags.contains RewardFlags.isCurrentEpochAttester:
|
if v.flags.contains RewardFlags.isCurrentEpochAttester:
|
||||||
self.total_balances.current_epoch_attesters_raw += validator_balance
|
info.total_balances.current_epoch_attesters_raw += validator_balance
|
||||||
|
|
||||||
if v.flags.contains RewardFlags.isCurrentEpochTargetAttester:
|
if v.flags.contains RewardFlags.isCurrentEpochTargetAttester:
|
||||||
self.total_balances.current_epoch_target_attesters_raw += validator_balance
|
info.total_balances.current_epoch_target_attesters_raw += validator_balance
|
||||||
|
|
||||||
if v.is_previous_epoch_attester.isSome():
|
if v.is_previous_epoch_attester.isSome():
|
||||||
self.total_balances.previous_epoch_attesters_raw += validator_balance
|
info.total_balances.previous_epoch_attesters_raw += validator_balance
|
||||||
|
|
||||||
if v.flags.contains RewardFlags.isPreviousEpochTargetAttester:
|
if v.flags.contains RewardFlags.isPreviousEpochTargetAttester:
|
||||||
self.total_balances.previous_epoch_target_attesters_raw += validator_balance
|
info.total_balances.previous_epoch_target_attesters_raw += validator_balance
|
||||||
|
|
||||||
if v.flags.contains RewardFlags.isPreviousEpochHeadAttester:
|
if v.flags.contains RewardFlags.isPreviousEpochHeadAttester:
|
||||||
self.total_balances.previous_epoch_head_attesters_raw += validator_balance
|
info.total_balances.previous_epoch_head_attesters_raw += validator_balance
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#helpers
|
||||||
|
# get_eligible_validator_indices
|
||||||
func is_eligible_validator*(validator: RewardStatus): bool =
|
func is_eligible_validator*(validator: RewardStatus): bool =
|
||||||
validator.flags.contains(RewardFlags.isActiveInPreviousEpoch) or
|
validator.flags.contains(RewardFlags.isActiveInPreviousEpoch) or
|
||||||
(validator.flags.contains(RewardFlags.isSlashed) and not
|
(validator.flags.contains(RewardFlags.isSlashed) and not
|
||||||
(validator.flags.contains RewardFlags.canWithdrawInCurrentEpoch))
|
(validator.flags.contains RewardFlags.canWithdrawInCurrentEpoch))
|
||||||
|
|
||||||
|
func is_eligible_validator*(validator: Validator, previous_epoch: Epoch): bool =
|
||||||
|
is_active_validator(validator, previous_epoch) or
|
||||||
|
(validator.slashed and previous_epoch + 1 < validator.withdrawable_epoch)
|
||||||
|
|
||||||
|
func is_eligible_validator*(validator: ParticipationInfo): bool =
|
||||||
|
validator.flags.contains(ParticipationFlag.eligible)
|
||||||
|
|
||||||
# Spec
|
# Spec
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
|
|
||||||
type
|
|
||||||
UnslashedParticipatingBalances = object
|
|
||||||
previous_epoch: array[PARTICIPATION_FLAG_WEIGHTS.len, Gwei]
|
|
||||||
current_epoch_TIMELY_TARGET: Gwei
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_unslashed_participating_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_unslashed_participating_indices
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#get_total_balance
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#get_total_balance
|
||||||
func get_unslashed_participating_balances*(state: altair.BeaconState | merge.BeaconState):
|
func get_unslashed_participating_balances*(state: altair.BeaconState | merge.BeaconState):
|
||||||
|
@ -174,17 +178,25 @@ func get_unslashed_participating_balances*(state: altair.BeaconState | merge.Bea
|
||||||
var res: UnslashedParticipatingBalances
|
var res: UnslashedParticipatingBalances
|
||||||
|
|
||||||
for validator_index in 0'u64 ..< state.validators.lenu64:
|
for validator_index in 0'u64 ..< state.validators.lenu64:
|
||||||
|
let
|
||||||
|
is_active_current_epoch = is_active_validator(
|
||||||
|
state.validators[validator_index], current_epoch)
|
||||||
|
validator_effective_balance =
|
||||||
|
state.validators[validator_index].effective_balance
|
||||||
|
|
||||||
|
if is_active_current_epoch:
|
||||||
|
# Active balance counted also for slashed validators
|
||||||
|
res.current_epoch += validator_effective_balance
|
||||||
|
|
||||||
if state.validators[validator_index].slashed:
|
if state.validators[validator_index].slashed:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let
|
let
|
||||||
is_active_previous_epoch = is_active_validator(
|
is_active_previous_epoch = is_active_validator(
|
||||||
state.validators[validator_index], previous_epoch)
|
state.validators[validator_index], previous_epoch)
|
||||||
is_active_current_epoch = is_active_validator(
|
|
||||||
state.validators[validator_index], current_epoch)
|
|
||||||
previous_epoch_participation =
|
previous_epoch_participation =
|
||||||
state.previous_epoch_participation[validator_index]
|
state.previous_epoch_participation[validator_index]
|
||||||
validator_effective_balance =
|
|
||||||
state.validators[validator_index].effective_balance
|
|
||||||
if is_active_previous_epoch:
|
if is_active_previous_epoch:
|
||||||
for flag_index in 0 ..< PARTICIPATION_FLAG_WEIGHTS.len:
|
for flag_index in 0 ..< PARTICIPATION_FLAG_WEIGHTS.len:
|
||||||
if has_flag(previous_epoch_participation, flag_index):
|
if has_flag(previous_epoch_participation, flag_index):
|
||||||
|
@ -203,6 +215,8 @@ func get_unslashed_participating_balances*(state: altair.BeaconState | merge.Bea
|
||||||
res.current_epoch_TIMELY_TARGET =
|
res.current_epoch_TIMELY_TARGET =
|
||||||
max(EFFECTIVE_BALANCE_INCREMENT, res.current_epoch_TIMELY_TARGET)
|
max(EFFECTIVE_BALANCE_INCREMENT, res.current_epoch_TIMELY_TARGET)
|
||||||
|
|
||||||
|
res.current_epoch = max(EFFECTIVE_BALANCE_INCREMENT, res.current_epoch)
|
||||||
|
|
||||||
res
|
res
|
||||||
|
|
||||||
func is_unslashed_participating_index(
|
func is_unslashed_participating_index(
|
||||||
|
@ -414,8 +428,7 @@ proc weigh_justification_and_finalization(state: var (altair.BeaconState | merge
|
||||||
checkpoint = shortLog(state.finalized_checkpoint)
|
checkpoint = shortLog(state.finalized_checkpoint)
|
||||||
|
|
||||||
proc process_justification_and_finalization*(state: var (altair.BeaconState | merge.BeaconState),
|
proc process_justification_and_finalization*(state: var (altair.BeaconState | merge.BeaconState),
|
||||||
total_active_balance: Gwei,
|
balances: UnslashedParticipatingBalances,
|
||||||
unslashed_participating_balances: UnslashedParticipatingBalances,
|
|
||||||
flags: UpdateFlags = {}) {.nbench.} =
|
flags: UpdateFlags = {}) {.nbench.} =
|
||||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||||
# Skip FFG updates in the first two epochs to avoid corner cases that might
|
# Skip FFG updates in the first two epochs to avoid corner cases that might
|
||||||
|
@ -428,9 +441,9 @@ proc process_justification_and_finalization*(state: var (altair.BeaconState | me
|
||||||
# historical reasons.
|
# historical reasons.
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.2/specs/phase0/beacon-chain.md#justification-and-finalization
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.2/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||||
weigh_justification_and_finalization(
|
weigh_justification_and_finalization(
|
||||||
state, total_active_balance,
|
state, balances.current_epoch,
|
||||||
unslashed_participating_balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
|
balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
|
||||||
unslashed_participating_balances.current_epoch_TIMELY_TARGET, flags)
|
balances.current_epoch_TIMELY_TARGET, flags)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#helpers
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#helpers
|
||||||
func get_base_reward_sqrt*(state: phase0.BeaconState, index: ValidatorIndex,
|
func get_base_reward_sqrt*(state: phase0.BeaconState, index: ValidatorIndex,
|
||||||
|
@ -555,18 +568,18 @@ func get_inactivity_penalty_delta*(validator: RewardStatus,
|
||||||
delta
|
delta
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#get_attestation_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#get_attestation_deltas
|
||||||
func get_attestation_deltas(state: phase0.BeaconState, rewards: var RewardInfo) =
|
func get_attestation_deltas(state: phase0.BeaconState, info: var phase0.EpochInfo) =
|
||||||
## Update rewards with attestation reward/penalty deltas for each validator.
|
## Update rewards with attestation reward/penalty deltas for each validator.
|
||||||
|
|
||||||
let
|
let
|
||||||
finality_delay = get_finality_delay(state)
|
finality_delay = get_finality_delay(state)
|
||||||
total_balance = rewards.total_balances.current_epoch
|
total_balance = info.total_balances.current_epoch
|
||||||
total_balance_sqrt = integer_squareroot(total_balance)
|
total_balance_sqrt = integer_squareroot(total_balance)
|
||||||
# Filter out ineligible validators. All sub-functions of the spec do this
|
# Filter out ineligible validators. All sub-functions of the spec do this
|
||||||
# except for `get_inclusion_delay_deltas`. It's safe to do so here because
|
# except for `get_inclusion_delay_deltas`. It's safe to do so here because
|
||||||
# any validator that is in the unslashed indices of the matching source
|
# any validator that is in the unslashed indices of the matching source
|
||||||
# attestations is active, and therefore eligible.
|
# attestations is active, and therefore eligible.
|
||||||
for index, validator in rewards.statuses.mpairs():
|
for index, validator in info.statuses.mpairs():
|
||||||
if not is_eligible_validator(validator):
|
if not is_eligible_validator(validator):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -576,11 +589,11 @@ func get_attestation_deltas(state: phase0.BeaconState, rewards: var RewardInfo)
|
||||||
|
|
||||||
let
|
let
|
||||||
source_delta = get_source_delta(
|
source_delta = get_source_delta(
|
||||||
validator, base_reward, rewards.total_balances, finality_delay)
|
validator, base_reward, info.total_balances, finality_delay)
|
||||||
target_delta = get_target_delta(
|
target_delta = get_target_delta(
|
||||||
validator, base_reward, rewards.total_balances, finality_delay)
|
validator, base_reward, info.total_balances, finality_delay)
|
||||||
head_delta = get_head_delta(
|
head_delta = get_head_delta(
|
||||||
validator, base_reward, rewards.total_balances, finality_delay)
|
validator, base_reward, info.total_balances, finality_delay)
|
||||||
(inclusion_delay_delta, proposer_delta) =
|
(inclusion_delay_delta, proposer_delta) =
|
||||||
get_inclusion_delay_delta(validator, base_reward)
|
get_inclusion_delay_delta(validator, base_reward)
|
||||||
inactivity_delta = get_inactivity_penalty_delta(
|
inactivity_delta = get_inactivity_penalty_delta(
|
||||||
|
@ -594,67 +607,66 @@ func get_attestation_deltas(state: phase0.BeaconState, rewards: var RewardInfo)
|
||||||
|
|
||||||
if proposer_delta.isSome:
|
if proposer_delta.isSome:
|
||||||
let proposer_index = proposer_delta.get()[0]
|
let proposer_index = proposer_delta.get()[0]
|
||||||
if proposer_index < rewards.statuses.lenu64:
|
if proposer_index < info.statuses.lenu64:
|
||||||
rewards.statuses[proposer_index].delta.add(
|
info.statuses[proposer_index].delta.add(
|
||||||
proposer_delta.get()[1])
|
proposer_delta.get()[1])
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward_per_increment
|
|
||||||
func get_base_reward_per_increment(
|
|
||||||
state: altair.BeaconState | merge.BeaconState, total_active_balance_sqrt: uint64): Gwei =
|
|
||||||
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div total_active_balance_sqrt
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_base_reward
|
||||||
func get_base_reward(
|
func get_base_reward_increment(
|
||||||
state: altair.BeaconState | merge.BeaconState, index: ValidatorIndex,
|
state: altair.BeaconState | merge.BeaconState, index: ValidatorIndex,
|
||||||
total_active_balance_sqrt: uint64): Gwei =
|
base_reward_per_increment: Gwei): Gwei =
|
||||||
## Return the base reward for the validator defined by ``index`` with respect
|
## Return the base reward for the validator defined by ``index`` with respect
|
||||||
## to the current ``state``.
|
## to the current ``state``.
|
||||||
let increments =
|
let increments =
|
||||||
state.validators[index].effective_balance div EFFECTIVE_BALANCE_INCREMENT
|
state.validators[index].effective_balance div EFFECTIVE_BALANCE_INCREMENT
|
||||||
increments * get_base_reward_per_increment(state, total_active_balance_sqrt)
|
increments * base_reward_per_increment
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_flag_index_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#get_flag_index_deltas
|
||||||
iterator get_flag_index_deltas(
|
iterator get_flag_index_deltas(
|
||||||
state: altair.BeaconState | merge.BeaconState, flag_index: int, total_active_balance: Gwei,
|
state: altair.BeaconState | merge.BeaconState, flag_index: int,
|
||||||
total_active_balance_sqrt: uint64,
|
base_reward_per_increment: Gwei,
|
||||||
unslashed_participating_balances: UnslashedParticipatingBalances):
|
info: altair.EpochInfo):
|
||||||
(ValidatorIndex, Gwei, Gwei) =
|
(ValidatorIndex, RewardDelta) =
|
||||||
## Return the deltas for a given ``flag_index`` by scanning through the
|
## Return the deltas for a given ``flag_index`` by scanning through the
|
||||||
## participation flags.
|
## participation flags.
|
||||||
let
|
let
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index].uint64 # safe
|
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index].uint64 # safe
|
||||||
unslashed_participating_balance =
|
unslashed_participating_balance =
|
||||||
unslashed_participating_balances.previous_epoch[flag_index]
|
info.balances.previous_epoch[flag_index]
|
||||||
unslashed_participating_increments =
|
unslashed_participating_increments =
|
||||||
unslashed_participating_balance div EFFECTIVE_BALANCE_INCREMENT
|
unslashed_participating_balance div EFFECTIVE_BALANCE_INCREMENT
|
||||||
active_increments = total_active_balance div EFFECTIVE_BALANCE_INCREMENT
|
active_increments =
|
||||||
|
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
|
||||||
for index in 0 ..< state.validators.len:
|
for index in 0 ..< state.validators.len:
|
||||||
# TODO Obviously not great
|
if not is_eligible_validator(info.validators[index]):
|
||||||
let v = state.validators[index]
|
|
||||||
if not (is_active_validator(v, previous_epoch) or
|
|
||||||
(v.slashed and previous_epoch + 1 < v.withdrawable_epoch)):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
template vidx: ValidatorIndex = index.ValidatorIndex
|
template vidx: ValidatorIndex = index.ValidatorIndex
|
||||||
let base_reward = get_base_reward(state, vidx, total_active_balance_sqrt)
|
let base_reward = get_base_reward_increment(state, vidx, base_reward_per_increment)
|
||||||
yield
|
yield
|
||||||
if is_unslashed_participating_index(
|
if is_unslashed_participating_index(
|
||||||
state, flag_index, previous_epoch, vidx):
|
state, flag_index, previous_epoch, vidx):
|
||||||
if not is_in_inactivity_leak(state):
|
if not is_in_inactivity_leak(state):
|
||||||
let reward_numerator =
|
let reward_numerator =
|
||||||
base_reward * weight * unslashed_participating_increments
|
base_reward * weight * unslashed_participating_increments
|
||||||
(vidx, reward_numerator div (active_increments * WEIGHT_DENOMINATOR), 0.Gwei)
|
(vidx, RewardDelta(
|
||||||
|
rewards: reward_numerator div (active_increments * WEIGHT_DENOMINATOR),
|
||||||
|
penalties: 0.Gwei))
|
||||||
else:
|
else:
|
||||||
(vidx, 0.Gwei, 0.Gwei)
|
(vidx, RewardDelta(rewards: 0.Gwei, penalties: 0.Gwei))
|
||||||
elif flag_index != TIMELY_HEAD_FLAG_INDEX:
|
elif flag_index != TIMELY_HEAD_FLAG_INDEX:
|
||||||
(vidx, 0.Gwei, base_reward * weight div WEIGHT_DENOMINATOR)
|
(vidx, RewardDelta(
|
||||||
|
rewards: 0.Gwei,
|
||||||
|
penalties: base_reward * weight div WEIGHT_DENOMINATOR))
|
||||||
else:
|
else:
|
||||||
(vidx, 0.Gwei, 0.Gwei)
|
(vidx, RewardDelta(rewards: 0.Gwei, penalties: 0.Gwei))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
|
||||||
iterator get_inactivity_penalty_deltas(cfg: RuntimeConfig, state: altair.BeaconState | merge.BeaconState):
|
iterator get_inactivity_penalty_deltas(
|
||||||
|
cfg: RuntimeConfig, state: altair.BeaconState | merge.BeaconState,
|
||||||
|
info: altair.EpochInfo):
|
||||||
(ValidatorIndex, Gwei) =
|
(ValidatorIndex, Gwei) =
|
||||||
## Return the inactivity penalty deltas by considering timely target
|
## Return the inactivity penalty deltas by considering timely target
|
||||||
## participation flags and inactivity scores.
|
## participation flags and inactivity scores.
|
||||||
|
@ -664,10 +676,7 @@ iterator get_inactivity_penalty_deltas(cfg: RuntimeConfig, state: altair.BeaconS
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
|
|
||||||
for index in 0 ..< state.validators.len:
|
for index in 0 ..< state.validators.len:
|
||||||
# get_eligible_validator_indices()
|
if not is_eligible_validator(info.validators[index]):
|
||||||
let v = state.validators[index]
|
|
||||||
if not (is_active_validator(v, previous_epoch) or
|
|
||||||
(v.slashed and previous_epoch + 1 < v.withdrawable_epoch)):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
template vidx: untyped = index.ValidatorIndex
|
template vidx: untyped = index.ValidatorIndex
|
||||||
|
@ -678,24 +687,24 @@ iterator get_inactivity_penalty_deltas(cfg: RuntimeConfig, state: altair.BeaconS
|
||||||
state.inactivity_scores[index]
|
state.inactivity_scores[index]
|
||||||
yield (vidx, Gwei(penalty_numerator div penalty_denominator))
|
yield (vidx, Gwei(penalty_numerator div penalty_denominator))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#rewards-and-penalties
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||||
func process_rewards_and_penalties(
|
func process_rewards_and_penalties(
|
||||||
state: var phase0.BeaconState, rewards: var RewardInfo) {.nbench.} =
|
state: var phase0.BeaconState, info: var phase0.EpochInfo) {.nbench.} =
|
||||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
||||||
# for work done in the previous epoch
|
# for work done in the previous epoch
|
||||||
doAssert rewards.statuses.len == state.validators.len
|
doAssert info.statuses.len == state.validators.len
|
||||||
|
|
||||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
return
|
return
|
||||||
|
|
||||||
get_attestation_deltas(state, rewards)
|
get_attestation_deltas(state, info)
|
||||||
|
|
||||||
# Here almost all balances are updated (assuming most validators are active) -
|
# Here almost all balances are updated (assuming most validators are active) -
|
||||||
# clearing the cache becomes a bottleneck if done item by item because of the
|
# clearing the cache becomes a bottleneck if done item by item because of the
|
||||||
# recursive nature of cache clearing - instead, we clear the whole cache then
|
# recursive nature of cache clearing - instead, we clear the whole cache then
|
||||||
# update the raw list directly
|
# update the raw list directly
|
||||||
state.balances.clearCache()
|
state.balances.clearCache()
|
||||||
for idx, v in rewards.statuses:
|
for idx, v in info.statuses:
|
||||||
var balance = state.balances.asSeq()[idx]
|
var balance = state.balances.asSeq()[idx]
|
||||||
increase_balance(balance, v.delta.rewards)
|
increase_balance(balance, v.delta.rewards)
|
||||||
decrease_balance(balance, v.delta.penalties)
|
decrease_balance(balance, v.delta.penalties)
|
||||||
|
@ -704,39 +713,35 @@ func process_rewards_and_penalties(
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#rewards-and-penalties
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#rewards-and-penalties
|
||||||
func process_rewards_and_penalties(
|
func process_rewards_and_penalties(
|
||||||
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
||||||
total_active_balance: Gwei,
|
info: var altair.EpochInfo)
|
||||||
unslashed_participating_balances: UnslashedParticipatingBalances)
|
|
||||||
{.nbench.} =
|
{.nbench.} =
|
||||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
return
|
return
|
||||||
|
|
||||||
# TODO assess relevance of missing phase0 optimizations
|
let
|
||||||
# TODO probably both of these aren't necessary, but need to verify
|
total_active_balance = info.balances.current_epoch
|
||||||
# commutativity & associativity. Probably, since active validators
|
base_reward_per_increment = get_base_reward_per_increment(
|
||||||
# get ejected at 16 Gwei, either it avoids over or underflow there
|
total_active_balance)
|
||||||
# or doesn't receive rewards or penalties so both are 0. But start
|
|
||||||
# with this.
|
|
||||||
var
|
|
||||||
rewards = newSeq[Gwei](state.validators.len)
|
|
||||||
penalties = newSeq[Gwei](state.validators.len)
|
|
||||||
|
|
||||||
let total_active_balance_sqrt = integer_squareroot(total_active_balance)
|
|
||||||
|
|
||||||
|
doAssert state.validators.len() == info.validators.len()
|
||||||
for flag_index in 0 ..< PARTICIPATION_FLAG_WEIGHTS.len:
|
for flag_index in 0 ..< PARTICIPATION_FLAG_WEIGHTS.len:
|
||||||
for validator_index, reward, penalty in get_flag_index_deltas(
|
for validator_index, delta in get_flag_index_deltas(
|
||||||
state, flag_index, total_active_balance, total_active_balance_sqrt,
|
state, flag_index, base_reward_per_increment, info):
|
||||||
unslashed_participating_balances):
|
info.validators[validator_index].delta.add(delta)
|
||||||
rewards[validator_index] += reward
|
|
||||||
penalties[validator_index] += penalty
|
|
||||||
|
|
||||||
for validator_index, penalty in get_inactivity_penalty_deltas(cfg, state):
|
for validator_index, penalty in get_inactivity_penalty_deltas(
|
||||||
penalties[validator_index] += penalty
|
cfg, state, info):
|
||||||
|
info.validators[validator_index].delta.penalties += penalty
|
||||||
|
|
||||||
|
# Here almost all balances are updated (assuming most validators are active) -
|
||||||
|
# clearing the cache becomes a bottleneck if done item by item because of the
|
||||||
|
# recursive nature of cache clearing - instead, we clear the whole cache then
|
||||||
|
# update the raw list directly
|
||||||
state.balances.clearCache()
|
state.balances.clearCache()
|
||||||
for index in 0 ..< len(state.validators):
|
for index in 0 ..< len(state.validators):
|
||||||
var balance = state.balances.asSeq()[index]
|
var balance = state.balances.asSeq()[index]
|
||||||
increase_balance(balance, rewards[index])
|
increase_balance(balance, info.validators[index].delta.rewards)
|
||||||
decrease_balance(balance, penalties[index])
|
decrease_balance(balance, info.validators[index].delta.penalties)
|
||||||
state.balances.asSeq()[index] = balance
|
state.balances.asSeq()[index] = balance
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
|
||||||
|
@ -905,21 +910,20 @@ proc process_sync_committee_updates*(state: var (altair.BeaconState | merge.Beac
|
||||||
state.next_sync_committee = get_next_sync_committee(state)
|
state.next_sync_committee = get_next_sync_committee(state)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#inactivity-scores
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#inactivity-scores
|
||||||
func process_inactivity_updates*(cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState)) =
|
func process_inactivity_updates*(
|
||||||
|
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
||||||
|
info: altair.EpochInfo) =
|
||||||
# Score updates based on previous epoch participation, skip genesis epoch
|
# Score updates based on previous epoch participation, skip genesis epoch
|
||||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
return
|
return
|
||||||
|
|
||||||
# TODO actually implement get_eligible_validator_indices() as an iterator
|
|
||||||
let
|
let
|
||||||
previous_epoch = get_previous_epoch(state) # get_eligible_validator_indices()
|
previous_epoch = get_previous_epoch(state) # get_eligible_validator_indices()
|
||||||
not_in_inactivity_leak = not is_in_inactivity_leak(state)
|
not_in_inactivity_leak = not is_in_inactivity_leak(state)
|
||||||
|
|
||||||
state.inactivity_scores.clearCache()
|
state.inactivity_scores.clearCache()
|
||||||
for index in 0'u64 ..< state.validators.lenu64:
|
for index in 0'u64 ..< state.validators.lenu64:
|
||||||
# get_eligible_validator_indices()
|
if not is_eligible_validator(info.validators[index]):
|
||||||
let v = state.validators.asSeq()[index]
|
|
||||||
if not (is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Increase the inactivity score of inactive validators
|
# Increase the inactivity score of inactive validators
|
||||||
|
@ -940,16 +944,15 @@ func process_inactivity_updates*(cfg: RuntimeConfig, state: var (altair.BeaconSt
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#epoch-processing
|
||||||
proc process_epoch*(
|
proc process_epoch*(
|
||||||
cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags,
|
cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags,
|
||||||
cache: var StateCache, rewards: var RewardInfo) {.nbench.} =
|
cache: var StateCache, info: var phase0.EpochInfo) {.nbench.} =
|
||||||
let currentEpoch = get_current_epoch(state)
|
let currentEpoch = get_current_epoch(state)
|
||||||
trace "process_epoch",
|
trace "process_epoch",
|
||||||
current_epoch = currentEpoch
|
current_epoch = currentEpoch
|
||||||
init(rewards, state)
|
init(info, state)
|
||||||
rewards.process_attestations(state, cache)
|
info.process_attestations(state, cache)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
|
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||||
process_justification_and_finalization(
|
process_justification_and_finalization(state, info.total_balances, flags)
|
||||||
state, rewards.total_balances, flags)
|
|
||||||
|
|
||||||
# state.slot hasn't been incremented yet.
|
# state.slot hasn't been incremented yet.
|
||||||
if verifyFinalization in flags and currentEpoch >= 2:
|
if verifyFinalization in flags and currentEpoch >= 2:
|
||||||
|
@ -962,13 +965,13 @@ proc process_epoch*(
|
||||||
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
|
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||||
process_rewards_and_penalties(state, rewards)
|
process_rewards_and_penalties(state, info)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
|
||||||
process_registry_updates(cfg, state, cache)
|
process_registry_updates(cfg, state, cache)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#slashings
|
||||||
process_slashings(state, rewards.total_balances.current_epoch)
|
process_slashings(state, info.total_balances.current_epoch)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#eth1-data-votes-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#eth1-data-votes-updates
|
||||||
process_eth1_data_reset(state)
|
process_eth1_data_reset(state)
|
||||||
|
@ -988,26 +991,40 @@ proc process_epoch*(
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#participation-records-rotation
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#participation-records-rotation
|
||||||
process_participation_record_updates(state)
|
process_participation_record_updates(state)
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
info: var altair.EpochInfo,
|
||||||
|
state: altair.BeaconState | merge.BeaconState) =
|
||||||
|
# init participation, overwriting the full structure
|
||||||
|
info.balances = get_unslashed_participating_balances(state)
|
||||||
|
info.validators.setLen(state.validators.len())
|
||||||
|
|
||||||
|
let previous_epoch = get_previous_epoch(state)
|
||||||
|
for index in 0..<state.validators.len():
|
||||||
|
var flags: set[ParticipationFlag]
|
||||||
|
if is_eligible_validator(state.validators[index], previous_epoch):
|
||||||
|
flags.incl ParticipationFlag.eligible
|
||||||
|
|
||||||
|
info.validators[index] = ParticipationInfo(
|
||||||
|
flags: flags
|
||||||
|
)
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
T: type altair.EpochInfo, state: altair.BeaconState | merge.BeaconState): T =
|
||||||
|
init(result, state)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/altair/beacon-chain.md#epoch-processing
|
||||||
proc process_epoch*(
|
proc process_epoch*(
|
||||||
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
||||||
flags: UpdateFlags, cache: var StateCache, rewards: var RewardInfo)
|
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo)
|
||||||
{.nbench.} =
|
{.nbench.} =
|
||||||
let currentEpoch = get_current_epoch(state)
|
let currentEpoch = get_current_epoch(state)
|
||||||
trace "process_epoch",
|
trace "process_epoch",
|
||||||
current_epoch = currentEpoch
|
current_epoch = currentEpoch
|
||||||
init(rewards, state)
|
|
||||||
when false:
|
|
||||||
rewards.process_attestations(state, cache)
|
|
||||||
|
|
||||||
let
|
info.init(state)
|
||||||
total_active_balance = state.get_total_active_balance(cache)
|
|
||||||
unslashed_participating_balances =
|
|
||||||
state.get_unslashed_participating_balances()
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#justification-and-finalization
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||||
process_justification_and_finalization(
|
process_justification_and_finalization(state, info.balances, flags)
|
||||||
state, total_active_balance, unslashed_participating_balances, flags)
|
|
||||||
|
|
||||||
# state.slot hasn't been incremented yet.
|
# state.slot hasn't been incremented yet.
|
||||||
if verifyFinalization in flags and currentEpoch >= 2:
|
if verifyFinalization in flags and currentEpoch >= 2:
|
||||||
|
@ -1019,17 +1036,16 @@ proc process_epoch*(
|
||||||
# the finalization rules triggered.
|
# the finalization rules triggered.
|
||||||
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
|
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
|
||||||
|
|
||||||
process_inactivity_updates(cfg, state) # [New in Altair]
|
process_inactivity_updates(cfg, state, info) # [New in Altair]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||||
process_rewards_and_penalties(
|
process_rewards_and_penalties(cfg, state, info)
|
||||||
cfg, state, total_active_balance, unslashed_participating_balances)
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.2/specs/phase0/beacon-chain.md#registry-updates
|
||||||
process_registry_updates(cfg, state, cache)
|
process_registry_updates(cfg, state, cache)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#slashings
|
||||||
process_slashings(state, total_active_balance)
|
process_slashings(state, info.balances.current_epoch)
|
||||||
|
|
||||||
process_eth1_data_reset(state)
|
process_eth1_data_reset(state)
|
||||||
|
|
||||||
|
|
|
@ -145,7 +145,7 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
||||||
let prePath = dir / preState & ".ssz"
|
let prePath = dir / preState & ".ssz"
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
|
|
||||||
echo "Running: ", prePath
|
echo "Running: ", prePath
|
||||||
let state = (ref ForkedHashedBeaconState)(
|
let state = (ref ForkedHashedBeaconState)(
|
||||||
|
@ -162,14 +162,14 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
||||||
let flags = if skipBLS: {skipBlsValidation}
|
let flags = if skipBLS: {skipBlsValidation}
|
||||||
else: {}
|
else: {}
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, state[], signedBlock, cache, rewards, flags,
|
defaultRuntimeConfig, state[], signedBlock, cache, info, flags,
|
||||||
noRollback)
|
noRollback)
|
||||||
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||||
|
|
||||||
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
let prePath = dir / preState & ".ssz"
|
let prePath = dir / preState & ".ssz"
|
||||||
|
|
||||||
echo "Running: ", prePath
|
echo "Running: ", prePath
|
||||||
|
@ -182,7 +182,7 @@ proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||||
# Shouldn't necessarily assert, because nbench can run test suite
|
# Shouldn't necessarily assert, because nbench can run test suite
|
||||||
discard process_slots(
|
discard process_slots(
|
||||||
defaultRuntimeConfig, state[], getStateField(state[], slot) + numSlots,
|
defaultRuntimeConfig, state[], getStateField(state[], slot) + numSlots,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
template processEpochScenarioImpl(
|
template processEpochScenarioImpl(
|
||||||
dir, preState: string,
|
dir, preState: string,
|
||||||
|
@ -217,6 +217,22 @@ proc process_deposit(state: var phase0.BeaconState;
|
||||||
flags: UpdateFlags = {}): Result[void, cstring] =
|
flags: UpdateFlags = {}): Result[void, cstring] =
|
||||||
process_deposit(defaultRuntimeConfig, state, deposit, flags)
|
process_deposit(defaultRuntimeConfig, state, deposit, flags)
|
||||||
|
|
||||||
|
proc bench_process_justification_and_finalization(state: var phase0.BeaconState) =
|
||||||
|
var
|
||||||
|
cache: StateCache
|
||||||
|
info: phase0.EpochInfo
|
||||||
|
info.init(state)
|
||||||
|
info.process_attestations(state, cache)
|
||||||
|
process_justification_and_finalization(state, info.total_balances)
|
||||||
|
|
||||||
|
func bench_process_slashings(state: var phase0.BeaconState) =
|
||||||
|
var
|
||||||
|
cache: StateCache
|
||||||
|
info: phase0.EpochInfo
|
||||||
|
info.init(state)
|
||||||
|
info.process_attestations(state, cache)
|
||||||
|
process_slashings(state, info.total_balances.current_epoch)
|
||||||
|
|
||||||
template processBlockScenarioImpl(
|
template processBlockScenarioImpl(
|
||||||
dir, preState: string, skipBLS: bool,
|
dir, preState: string, skipBLS: bool,
|
||||||
transitionFn, paramName: untyped,
|
transitionFn, paramName: untyped,
|
||||||
|
@ -258,13 +274,13 @@ template genProcessBlockScenario(name, transitionFn,
|
||||||
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ref ConsensusObjectType)
|
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ref ConsensusObjectType)
|
||||||
|
|
||||||
genProcessEpochScenario(runProcessJustificationFinalization,
|
genProcessEpochScenario(runProcessJustificationFinalization,
|
||||||
process_justification_and_finalization)
|
bench_process_justification_and_finalization)
|
||||||
|
|
||||||
genProcessEpochScenario(runProcessRegistryUpdates,
|
genProcessEpochScenario(runProcessRegistryUpdates,
|
||||||
process_registry_updates)
|
process_registry_updates)
|
||||||
|
|
||||||
genProcessEpochScenario(runProcessSlashings,
|
genProcessEpochScenario(runProcessSlashings,
|
||||||
process_slashings)
|
bench_process_slashings)
|
||||||
|
|
||||||
genProcessBlockScenario(runProcessBlockHeader,
|
genProcessBlockScenario(runProcessBlockHeader,
|
||||||
process_block_header,
|
process_block_header,
|
||||||
|
|
|
@ -93,9 +93,9 @@ proc doTransition(conf: NcliConf) =
|
||||||
|
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
if not state_transition(getRuntimeConfig(conf.eth2Network),
|
if not state_transition(getRuntimeConfig(conf.eth2Network),
|
||||||
stateY[], blckX, cache, rewards, flags, noRollback):
|
stateY[], blckX, cache, info, flags, noRollback):
|
||||||
error "State transition failed"
|
error "State transition failed"
|
||||||
quit 1
|
quit 1
|
||||||
else:
|
else:
|
||||||
|
@ -121,13 +121,13 @@ proc doSlots(conf: NcliConf) =
|
||||||
|
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
for i in 0'u64..<conf.slot:
|
for i in 0'u64..<conf.slot:
|
||||||
let isEpoch = (getStateField(stateY[], slot) + 1).isEpoch
|
let isEpoch = (getStateField(stateY[], slot) + 1).isEpoch
|
||||||
withTimer(timers[if isEpoch: tApplyEpochSlot else: tApplySlot]):
|
withTimer(timers[if isEpoch: tApplyEpochSlot else: tApplySlot]):
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
defaultRuntimeConfig, stateY[], getStateField(stateY[], slot) + 1,
|
defaultRuntimeConfig, stateY[], getStateField(stateY[], slot) + 1,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
withTimer(timers[tSaveState]):
|
withTimer(timers[tSaveState]):
|
||||||
saveSSZFile(conf.postState, stateY[])
|
saveSSZFile(conf.postState, stateY[])
|
||||||
|
|
158
ncli/ncli_db.nim
158
ncli/ncli_db.nim
|
@ -189,7 +189,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
|
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
loadedState = new phase0.BeaconState
|
loadedState = new phase0.BeaconState
|
||||||
|
|
||||||
withTimer(timers[tLoadState]):
|
withTimer(timers[tLoadState]):
|
||||||
|
@ -202,7 +202,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
||||||
let ok = process_slots(
|
let ok = process_slots(
|
||||||
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||||
|
|
||||||
var start = Moment.now()
|
var start = Moment.now()
|
||||||
|
@ -458,7 +458,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
perfs = newSeq[ValidatorPerformance](
|
perfs = newSeq[ValidatorPerformance](
|
||||||
getStateField(dag.headState.data, validators).len())
|
getStateField(dag.headState.data, validators).len())
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
blck: phase0.TrustedSignedBeaconBlock
|
blck: phase0.TrustedSignedBeaconBlock
|
||||||
|
|
||||||
doAssert blockRefs.len() > 0, "Must select at least one block"
|
doAssert blockRefs.len() > 0, "Must select at least one block"
|
||||||
|
@ -470,7 +470,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
dag.updateStateData(
|
dag.updateStateData(
|
||||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||||
|
|
||||||
func processEpoch() =
|
proc processEpoch() =
|
||||||
let
|
let
|
||||||
prev_epoch_target_slot =
|
prev_epoch_target_slot =
|
||||||
state[].data.get_previous_epoch().compute_start_slot_at_epoch()
|
state[].data.get_previous_epoch().compute_start_slot_at_epoch()
|
||||||
|
@ -490,35 +490,39 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
|
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
|
||||||
indices.incl(validator_index)
|
indices.incl(validator_index)
|
||||||
indices
|
indices
|
||||||
|
case info.kind
|
||||||
for i, s in rewards.statuses.pairs():
|
of EpochInfoFork.Phase0:
|
||||||
let perf = addr perfs[i]
|
template info: untyped = info.phase0Info
|
||||||
if RewardFlags.isActiveInPreviousEpoch in s.flags:
|
for i, s in info.statuses.pairs():
|
||||||
if s.is_previous_epoch_attester.isSome():
|
let perf = addr perfs[i]
|
||||||
perf.attestation_hits += 1;
|
if RewardFlags.isActiveInPreviousEpoch in s.flags:
|
||||||
|
|
||||||
if RewardFlags.isPreviousEpochHeadAttester in s.flags:
|
|
||||||
perf.head_attestation_hits += 1
|
|
||||||
else:
|
|
||||||
perf.head_attestation_misses += 1
|
|
||||||
|
|
||||||
if RewardFlags.isPreviousEpochTargetAttester in s.flags:
|
|
||||||
perf.target_attestation_hits += 1
|
|
||||||
else:
|
|
||||||
perf.target_attestation_misses += 1
|
|
||||||
|
|
||||||
if i.ValidatorIndex in first_slot_attesters:
|
|
||||||
if first_slot_empty:
|
|
||||||
perf.first_slot_head_attester_when_first_slot_empty += 1
|
|
||||||
else:
|
|
||||||
perf.first_slot_head_attester_when_first_slot_not_empty += 1
|
|
||||||
|
|
||||||
if s.is_previous_epoch_attester.isSome():
|
if s.is_previous_epoch_attester.isSome():
|
||||||
perf.delays.mgetOrPut(
|
perf.attestation_hits += 1;
|
||||||
s.is_previous_epoch_attester.get().delay, 0'u64) += 1
|
|
||||||
|
|
||||||
else:
|
if RewardFlags.isPreviousEpochHeadAttester in s.flags:
|
||||||
perf.attestation_misses += 1;
|
perf.head_attestation_hits += 1
|
||||||
|
else:
|
||||||
|
perf.head_attestation_misses += 1
|
||||||
|
|
||||||
|
if RewardFlags.isPreviousEpochTargetAttester in s.flags:
|
||||||
|
perf.target_attestation_hits += 1
|
||||||
|
else:
|
||||||
|
perf.target_attestation_misses += 1
|
||||||
|
|
||||||
|
if i.ValidatorIndex in first_slot_attesters:
|
||||||
|
if first_slot_empty:
|
||||||
|
perf.first_slot_head_attester_when_first_slot_empty += 1
|
||||||
|
else:
|
||||||
|
perf.first_slot_head_attester_when_first_slot_not_empty += 1
|
||||||
|
|
||||||
|
if s.is_previous_epoch_attester.isSome():
|
||||||
|
perf.delays.mgetOrPut(
|
||||||
|
s.is_previous_epoch_attester.get().delay, 0'u64) += 1
|
||||||
|
|
||||||
|
else:
|
||||||
|
perf.attestation_misses += 1;
|
||||||
|
of EpochInfoFork.Altair:
|
||||||
|
echo "TODO altair"
|
||||||
|
|
||||||
for bi in 0..<blockRefs.len:
|
for bi in 0..<blockRefs.len:
|
||||||
blck = db.getBlock(blockRefs[blockRefs.len - bi - 1].root).get()
|
blck = db.getBlock(blockRefs[blockRefs.len - bi - 1].root).get()
|
||||||
|
@ -529,7 +533,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
|
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
|
||||||
else: {}
|
else: {}
|
||||||
let ok = process_slots(
|
let ok = process_slots(
|
||||||
dag.cfg, state[].data, nextSlot, cache, rewards, flags)
|
dag.cfg, state[].data, nextSlot, cache, info, flags)
|
||||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||||
|
|
||||||
if getStateField(state[].data, slot).isEpoch():
|
if getStateField(state[].data, slot).isEpoch():
|
||||||
|
@ -544,7 +548,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
while getStateField(state[].data, slot) < ends:
|
while getStateField(state[].data, slot) < ends:
|
||||||
let ok = process_slots(
|
let ok = process_slots(
|
||||||
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||||
|
|
||||||
if getStateField(state[].data, slot).isEpoch():
|
if getStateField(state[].data, slot).isEpoch():
|
||||||
|
@ -686,7 +690,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
|
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
blck: phase0.TrustedSignedBeaconBlock
|
blck: phase0.TrustedSignedBeaconBlock
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -713,46 +717,52 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
if not inTxn:
|
if not inTxn:
|
||||||
outDb.exec("BEGIN TRANSACTION;").expect("DB")
|
outDb.exec("BEGIN TRANSACTION;").expect("DB")
|
||||||
inTxn = true
|
inTxn = true
|
||||||
insertEpochInfo.exec(
|
case info.kind
|
||||||
(getStateField(state[].data, slot).epoch.int64,
|
of EpochInfoFork.Phase0:
|
||||||
rewards.total_balances.current_epoch_raw.int64,
|
template info: untyped = info.phase0Info
|
||||||
rewards.total_balances.previous_epoch_raw.int64,
|
insertEpochInfo.exec(
|
||||||
rewards.total_balances.current_epoch_attesters_raw.int64,
|
(getStateField(state[].data, slot).epoch.int64,
|
||||||
rewards.total_balances.current_epoch_target_attesters_raw.int64,
|
info.total_balances.current_epoch_raw.int64,
|
||||||
rewards.total_balances.previous_epoch_attesters_raw.int64,
|
info.total_balances.previous_epoch_raw.int64,
|
||||||
rewards.total_balances.previous_epoch_target_attesters_raw.int64,
|
info.total_balances.current_epoch_attesters_raw.int64,
|
||||||
rewards.total_balances.previous_epoch_head_attesters_raw.int64)
|
info.total_balances.current_epoch_target_attesters_raw.int64,
|
||||||
).expect("DB")
|
info.total_balances.previous_epoch_attesters_raw.int64,
|
||||||
|
info.total_balances.previous_epoch_target_attesters_raw.int64,
|
||||||
|
info.total_balances.previous_epoch_head_attesters_raw.int64)
|
||||||
|
).expect("DB")
|
||||||
|
|
||||||
for index, status in rewards.statuses.pairs():
|
for index, status in info.statuses.pairs():
|
||||||
if not is_eligible_validator(status):
|
if not is_eligible_validator(status):
|
||||||
continue
|
continue
|
||||||
let
|
let
|
||||||
notSlashed = (RewardFlags.isSlashed notin status.flags)
|
notSlashed = (RewardFlags.isSlashed notin status.flags)
|
||||||
source_attester =
|
source_attester =
|
||||||
notSlashed and status.is_previous_epoch_attester.isSome()
|
notSlashed and status.is_previous_epoch_attester.isSome()
|
||||||
target_attester =
|
target_attester =
|
||||||
notSlashed and RewardFlags.isPreviousEpochTargetAttester in status.flags
|
notSlashed and RewardFlags.isPreviousEpochTargetAttester in status.flags
|
||||||
head_attester =
|
head_attester =
|
||||||
notSlashed and RewardFlags.isPreviousEpochHeadAttester in status.flags
|
notSlashed and RewardFlags.isPreviousEpochHeadAttester in status.flags
|
||||||
delay =
|
delay =
|
||||||
if notSlashed and status.is_previous_epoch_attester.isSome():
|
if notSlashed and status.is_previous_epoch_attester.isSome():
|
||||||
some(int64(status.is_previous_epoch_attester.get().delay))
|
some(int64(status.is_previous_epoch_attester.get().delay))
|
||||||
else:
|
else:
|
||||||
none(int64)
|
none(int64)
|
||||||
|
|
||||||
|
if conf.perfect or not
|
||||||
|
(source_attester and target_attester and head_attester and
|
||||||
|
delay.isSome() and delay.get() == 1):
|
||||||
|
insertValidatorInfo.exec(
|
||||||
|
(index.int64,
|
||||||
|
getStateField(state[].data, slot).epoch.int64,
|
||||||
|
status.delta.rewards.int64,
|
||||||
|
status.delta.penalties.int64,
|
||||||
|
int64(source_attester), # Source delta
|
||||||
|
int64(target_attester), # Target delta
|
||||||
|
int64(head_attester), # Head delta
|
||||||
|
delay)).expect("DB")
|
||||||
|
of EpochInfoFork.Altair:
|
||||||
|
echo "TODO altair support"
|
||||||
|
|
||||||
if conf.perfect or not
|
|
||||||
(source_attester and target_attester and head_attester and
|
|
||||||
delay.isSome() and delay.get() == 1):
|
|
||||||
insertValidatorInfo.exec(
|
|
||||||
(index.int64,
|
|
||||||
getStateField(state[].data, slot).epoch.int64,
|
|
||||||
status.delta.rewards.int64,
|
|
||||||
status.delta.penalties.int64,
|
|
||||||
int64(source_attester), # Source delta
|
|
||||||
int64(target_attester), # Target delta
|
|
||||||
int64(head_attester), # Head delta
|
|
||||||
delay)).expect("DB")
|
|
||||||
if getStateField(state[].data, slot).epoch.int64 mod 16 == 0:
|
if getStateField(state[].data, slot).epoch.int64 mod 16 == 0:
|
||||||
inTxn = false
|
inTxn = false
|
||||||
outDb.exec("COMMIT;").expect("DB")
|
outDb.exec("COMMIT;").expect("DB")
|
||||||
|
@ -766,7 +776,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
|
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
|
||||||
else: {}
|
else: {}
|
||||||
|
|
||||||
let ok = process_slots(cfg, state[].data, nextSlot, cache, rewards, flags)
|
let ok = process_slots(cfg, state[].data, nextSlot, cache, info, flags)
|
||||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||||
|
|
||||||
if getStateField(state[].data, slot).isEpoch():
|
if getStateField(state[].data, slot).isEpoch():
|
||||||
|
@ -782,7 +792,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
while getStateField(state[].data, slot) <= ends:
|
while getStateField(state[].data, slot) <= ends:
|
||||||
let ok = process_slots(
|
let ok = process_slots(
|
||||||
cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||||
|
|
||||||
if getStateField(state[].data, slot).isEpoch():
|
if getStateField(state[].data, slot).isEpoch():
|
||||||
|
|
|
@ -118,10 +118,10 @@ proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
|
||||||
data: data.state, root: hash_tree_root(data.state)),
|
data: data.state, root: hash_tree_root(data.state)),
|
||||||
beaconStateFork: forkPhase0)
|
beaconStateFork: forkPhase0)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
result =
|
result =
|
||||||
state_transition(
|
state_transition(
|
||||||
cfg, fhState[], blck, cache, rewards, flags, rollback)
|
cfg, fhState[], blck, cache, info, flags, rollback)
|
||||||
data.state = fhState.hbsPhase0.data
|
data.state = fhState.hbsPhase0.data
|
||||||
|
|
||||||
decodeAndProcess(BlockInput):
|
decodeAndProcess(BlockInput):
|
||||||
|
|
|
@ -7,6 +7,9 @@
|
||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
|
import
|
||||||
|
chronicles
|
||||||
|
|
||||||
import
|
import
|
||||||
./test_fixture_fork,
|
./test_fixture_fork,
|
||||||
./test_fixture_merkle_single_proof,
|
./test_fixture_merkle_single_proof,
|
||||||
|
|
|
@ -44,10 +44,13 @@ proc runTest(identifier: string) =
|
||||||
let
|
let
|
||||||
attestation = parseTest(
|
attestation = parseTest(
|
||||||
testDir/"attestation.ssz_snappy", SSZ, Attestation)
|
testDir/"attestation.ssz_snappy", SSZ, Attestation)
|
||||||
|
|
||||||
|
total_active_balance = get_total_active_balance(preState[], cache)
|
||||||
|
base_reward_per_increment =
|
||||||
|
get_base_reward_per_increment(total_active_balance)
|
||||||
|
|
||||||
done = process_attestation(
|
done = process_attestation(
|
||||||
preState[], attestation, {},
|
preState[], attestation, {}, base_reward_per_increment, cache)
|
||||||
get_base_reward_per_increment(preState[], cache),
|
|
||||||
cache)
|
|
||||||
|
|
||||||
if existsFile(testDir/"post.ssz_snappy"):
|
if existsFile(testDir/"post.ssz_snappy"):
|
||||||
let postState =
|
let postState =
|
||||||
|
|
|
@ -12,6 +12,7 @@ import
|
||||||
os,
|
os,
|
||||||
# Utilities
|
# Utilities
|
||||||
stew/results,
|
stew/results,
|
||||||
|
chronicles,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../../../beacon_chain/spec/state_transition_block,
|
../../../beacon_chain/spec/state_transition_block,
|
||||||
../../../beacon_chain/spec/datatypes/altair,
|
../../../beacon_chain/spec/datatypes/altair,
|
||||||
|
|
|
@ -12,6 +12,7 @@ import
|
||||||
os,
|
os,
|
||||||
# Utilities
|
# Utilities
|
||||||
stew/results,
|
stew/results,
|
||||||
|
chronicles,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../../../beacon_chain/spec/[beaconstate, presets, state_transition_block],
|
../../../beacon_chain/spec/[beaconstate, presets, state_transition_block],
|
||||||
../../../beacon_chain/spec/datatypes/altair,
|
../../../beacon_chain/spec/datatypes/altair,
|
||||||
|
|
|
@ -12,6 +12,7 @@ import
|
||||||
os,
|
os,
|
||||||
# Utilities
|
# Utilities
|
||||||
stew/results,
|
stew/results,
|
||||||
|
chronicles,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../../../beacon_chain/spec/[beaconstate, state_transition_block],
|
../../../beacon_chain/spec/[beaconstate, state_transition_block],
|
||||||
../../../beacon_chain/spec/datatypes/altair,
|
../../../beacon_chain/spec/datatypes/altair,
|
||||||
|
@ -20,9 +21,6 @@ import
|
||||||
../fixtures_utils,
|
../fixtures_utils,
|
||||||
../../helpers/debug_state
|
../../helpers/debug_state
|
||||||
|
|
||||||
when isMainModule:
|
|
||||||
import chronicles # or some random compile error happens...
|
|
||||||
|
|
||||||
const OpSyncAggregateDir = SszTestsDir/const_preset/"altair"/"operations"/"sync_aggregate"/"pyspec_tests"
|
const OpSyncAggregateDir = SszTestsDir/const_preset/"altair"/"operations"/"sync_aggregate"/"pyspec_tests"
|
||||||
|
|
||||||
proc runTest(dir, identifier: string) =
|
proc runTest(dir, identifier: string) =
|
||||||
|
@ -45,8 +43,9 @@ proc runTest(dir, identifier: string) =
|
||||||
let
|
let
|
||||||
syncAggregate = parseTest(
|
syncAggregate = parseTest(
|
||||||
testDir/"sync_aggregate.ssz_snappy", SSZ, SyncAggregate)
|
testDir/"sync_aggregate.ssz_snappy", SSZ, SyncAggregate)
|
||||||
|
total_active_balance = get_total_active_balance(preState[], cache)
|
||||||
done = process_sync_aggregate(
|
done = process_sync_aggregate(
|
||||||
preState[], syncAggregate, cache)
|
preState[], syncAggregate, total_active_balance, cache)
|
||||||
|
|
||||||
if existsFile(testDir/"post.ssz_snappy"):
|
if existsFile(testDir/"post.ssz_snappy"):
|
||||||
let postState =
|
let postState =
|
||||||
|
|
|
@ -12,6 +12,7 @@ import
|
||||||
os,
|
os,
|
||||||
# Utilities
|
# Utilities
|
||||||
stew/results,
|
stew/results,
|
||||||
|
chronicles,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../../../beacon_chain/spec/state_transition_block,
|
../../../beacon_chain/spec/state_transition_block,
|
||||||
../../../beacon_chain/spec/datatypes/altair,
|
../../../beacon_chain/spec/datatypes/altair,
|
||||||
|
|
|
@ -36,7 +36,7 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
fhPreState = (ref ForkedHashedBeaconState)(hbsAltair: altair.HashedBeaconState(
|
fhPreState = (ref ForkedHashedBeaconState)(hbsAltair: altair.HashedBeaconState(
|
||||||
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkAltair)
|
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkAltair)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
|
|
||||||
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
||||||
# so purely lexicographic sorting wouldn't sort properly.
|
# so purely lexicographic sorting wouldn't sort properly.
|
||||||
|
@ -46,12 +46,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
|
|
||||||
if hasPostState:
|
if hasPostState:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||||
noRollback)
|
noRollback)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
else:
|
else:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||||
noRollback)
|
noRollback)
|
||||||
doAssert (i + 1 < numBlocks) or not success,
|
doAssert (i + 1 < numBlocks) or not success,
|
||||||
"We didn't expect these invalid blocks to be processed"
|
"We didn't expect these invalid blocks to be processed"
|
||||||
|
|
|
@ -35,13 +35,13 @@ proc runTest(identifier: string) =
|
||||||
data: preState[], root: hash_tree_root(preState[])),
|
data: preState[], root: hash_tree_root(preState[])),
|
||||||
beaconStateFork: forkAltair)
|
beaconStateFork: forkAltair)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards: RewardInfo
|
info: ForkedEpochInfo
|
||||||
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, altair.BeaconState))
|
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, altair.BeaconState))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, fhPreState[],
|
defaultRuntimeConfig, fhPreState[],
|
||||||
getStateField(fhPreState[], slot) + num_slots, cache, rewards, {})
|
getStateField(fhPreState[], slot) + num_slots, cache, info, {})
|
||||||
|
|
||||||
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
|
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
|
||||||
let newPreState = newClone(fhPreState.hbsAltair.data)
|
let newPreState = newClone(fhPreState.hbsAltair.data)
|
||||||
|
|
|
@ -11,6 +11,7 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, strutils,
|
os, strutils,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
|
chronicles,
|
||||||
../../../beacon_chain/spec/[beaconstate, presets, state_transition_epoch],
|
../../../beacon_chain/spec/[beaconstate, presets, state_transition_epoch],
|
||||||
../../../beacon_chain/spec/datatypes/altair,
|
../../../beacon_chain/spec/datatypes/altair,
|
||||||
# Test utilities
|
# Test utilities
|
||||||
|
@ -19,9 +20,10 @@ import
|
||||||
../test_fixture_rewards,
|
../test_fixture_rewards,
|
||||||
../../helpers/debug_state
|
../../helpers/debug_state
|
||||||
|
|
||||||
|
const RootDir = SszTestsDir/const_preset/"altair"/"epoch_processing"
|
||||||
|
|
||||||
template runSuite(
|
template runSuite(
|
||||||
suiteDir, testName: string, transitionProc: untyped{ident},
|
suiteDir, testName: string, transitionProc: untyped): untyped =
|
||||||
useCache, useTAB, useUPB: static bool = false): untyped =
|
|
||||||
suite "Ethereum Foundation - Altair - Epoch Processing - " & testName & preset():
|
suite "Ethereum Foundation - Altair - Epoch Processing - " & testName & preset():
|
||||||
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
|
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
|
||||||
|
|
||||||
|
@ -29,47 +31,34 @@ template runSuite(
|
||||||
test testName & " - " & unitTestName & preset():
|
test testName & " - " & unitTestName & preset():
|
||||||
# BeaconState objects are stored on the heap to avoid stack overflow
|
# BeaconState objects are stored on the heap to avoid stack overflow
|
||||||
type T = altair.BeaconState
|
type T = altair.BeaconState
|
||||||
var preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
var preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
||||||
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
||||||
|
var cache {.inject, used.} = StateCache()
|
||||||
|
template state: untyped {.inject, used.} = preState[]
|
||||||
|
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
|
||||||
|
|
||||||
doAssert not (useCache and useTAB)
|
transitionProc
|
||||||
when useCache:
|
|
||||||
var cache = StateCache()
|
check:
|
||||||
when compiles(transitionProc(defaultRuntimeConfig, preState[], cache)):
|
hash_tree_root(preState[]) == hash_tree_root(postState[])
|
||||||
transitionProc(defaultRuntimeConfig, preState[], cache)
|
|
||||||
else:
|
|
||||||
transitionProc(preState[], cache)
|
|
||||||
elif useTAB and not useUPB:
|
|
||||||
var cache = StateCache()
|
|
||||||
let total_active_balance = preState[].get_total_active_balance(cache)
|
|
||||||
transitionProc(preState[], total_active_balance)
|
|
||||||
elif useTAB and useUPB:
|
|
||||||
var cache = StateCache()
|
|
||||||
let
|
|
||||||
total_active_balance = preState[].get_total_active_balance(cache)
|
|
||||||
unslashed_participating_balances =
|
|
||||||
preState[].get_unslashed_participating_balances()
|
|
||||||
transitionProc(
|
|
||||||
preState[], total_active_balance, unslashed_participating_balances)
|
|
||||||
else:
|
|
||||||
when compiles(transitionProc(preState[])):
|
|
||||||
transitionProc(preState[])
|
|
||||||
else:
|
|
||||||
transitionProc(defaultRuntimeConfig, preState[])
|
|
||||||
|
|
||||||
reportDiff(preState, postState)
|
reportDiff(preState, postState)
|
||||||
|
|
||||||
# Justification & Finalization
|
# Justification & Finalization
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const JustificationFinalizationDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
|
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
|
||||||
runSuite(JustificationFinalizationDir, "Justification & Finalization", process_justification_and_finalization, useCache = false, useTAB = true, useUPB = true)
|
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||||
|
let info = altair.EpochInfo.init(state)
|
||||||
|
process_justification_and_finalization(state, info.balances)
|
||||||
|
|
||||||
# Inactivity updates
|
# Inactivity updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const InactivityDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"inactivity_updates"/"pyspec_tests"
|
const InactivityDir = RootDir/"inactivity_updates"/"pyspec_tests"
|
||||||
runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = false)
|
runSuite(InactivityDir, "Inactivity"):
|
||||||
|
let info = altair.EpochInfo.init(state)
|
||||||
|
process_inactivity_updates(cfg, state, info)
|
||||||
|
|
||||||
# Rewards & Penalties
|
# Rewards & Penalties
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
@ -79,53 +68,63 @@ runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = fal
|
||||||
# Registry updates
|
# Registry updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const RegistryUpdatesDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"registry_updates"/"pyspec_tests"
|
const RegistryUpdatesDir = RootDir/"registry_updates"/"pyspec_tests"
|
||||||
runSuite(RegistryUpdatesDir, "Registry updates", process_registry_updates, useCache = true)
|
runSuite(RegistryUpdatesDir, "Registry updates"):
|
||||||
|
process_registry_updates(cfg, state, cache)
|
||||||
|
|
||||||
# Slashings
|
# Slashings
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SlashingsDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"slashings"/"pyspec_tests"
|
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
|
||||||
runSuite(SlashingsDir, "Slashings", process_slashings, useCache = false, useTAB = true)
|
runSuite(SlashingsDir, "Slashings"):
|
||||||
|
let info = altair.EpochInfo.init(state)
|
||||||
|
process_slashings(state, info.balances.current_epoch)
|
||||||
|
|
||||||
# Eth1 data reset
|
# Eth1 data reset
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const Eth1DataResetDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"eth1_data_reset/"/"pyspec_tests"
|
const Eth1DataResetDir = RootDir/"eth1_data_reset/"/"pyspec_tests"
|
||||||
runSuite(Eth1DataResetDir, "Eth1 data reset", process_eth1_data_reset, useCache = false)
|
runSuite(Eth1DataResetDir, "Eth1 data reset"):
|
||||||
|
process_eth1_data_reset(state)
|
||||||
|
|
||||||
# Effective balance updates
|
# Effective balance updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const EffectiveBalanceUpdatesDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"effective_balance_updates"/"pyspec_tests"
|
const EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"/"pyspec_tests"
|
||||||
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates", process_effective_balance_updates, useCache = false)
|
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
|
||||||
# Slashings reset
|
# Slashings reset
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SlashingsResetDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"slashings_reset"/"pyspec_tests"
|
const SlashingsResetDir = RootDir/"slashings_reset"/"pyspec_tests"
|
||||||
runSuite(SlashingsResetDir, "Slashings reset", process_slashings_reset, useCache = false)
|
runSuite(SlashingsResetDir, "Slashings reset"):
|
||||||
|
process_slashings_reset(state)
|
||||||
|
|
||||||
# RANDAO mixes reset
|
# RANDAO mixes reset
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const RandaoMixesResetDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"randao_mixes_reset"/"pyspec_tests"
|
const RandaoMixesResetDir = RootDir/"randao_mixes_reset"/"pyspec_tests"
|
||||||
runSuite(RandaoMixesResetDir, "RANDAO mixes reset", process_randao_mixes_reset, useCache = false)
|
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
|
||||||
# Historical roots update
|
# Historical roots update
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const HistoricalRootsUpdateDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"historical_roots_update"/"pyspec_tests"
|
const HistoricalRootsUpdateDir = RootDir/"historical_roots_update"/"pyspec_tests"
|
||||||
runSuite(HistoricalRootsUpdateDir, "Historical roots update", process_historical_roots_update, useCache = false)
|
runSuite(HistoricalRootsUpdateDir, "Historical roots update"):
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
|
||||||
# Participation flag updates
|
# Participation flag updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const ParticipationFlagDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"participation_flag_updates"/"pyspec_tests"
|
const ParticipationFlagDir = RootDir/"participation_flag_updates"/"pyspec_tests"
|
||||||
runSuite(ParticipationFlagDir, "Participation flag updates", process_participation_flag_updates, useCache = false)
|
runSuite(ParticipationFlagDir, "Participation flag updates"):
|
||||||
|
process_participation_flag_updates(state)
|
||||||
|
|
||||||
# Sync committee updates
|
# Sync committee updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SyncCommitteeDir = SszTestsDir/const_preset/"altair"/"epoch_processing"/"sync_committee_updates"/"pyspec_tests"
|
const SyncCommitteeDir = RootDir/"sync_committee_updates"/"pyspec_tests"
|
||||||
runSuite(SyncCommitteeDir, "Sync committee updates", process_sync_committee_updates, useCache = false)
|
runSuite(SyncCommitteeDir, "Sync committee updates"):
|
||||||
|
process_sync_committee_updates(state)
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import
|
import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[algorithm, sequtils, sets],
|
std/[algorithm, sequtils, sets],
|
||||||
# Status libraries
|
# Status libraries
|
||||||
|
@ -31,17 +31,17 @@ proc compute_aggregate_sync_committee_signature(
|
||||||
return ValidatorSig.infinity
|
return ValidatorSig.infinity
|
||||||
|
|
||||||
let
|
let
|
||||||
root =
|
root =
|
||||||
if block_root != ZERO_HASH: block_root
|
if block_root != ZERO_HASH: block_root
|
||||||
else: mockBlockForNextSlot(forked).altairBlock.message.parent_root
|
else: mockBlockForNextSlot(forked).altairBlock.message.parent_root
|
||||||
signing_root = sync_committee_msg_signing_root(
|
signing_root = sync_committee_msg_signing_root(
|
||||||
state.fork, state.slot.epoch, state.genesis_validators_root, root)
|
state.fork, state.slot.epoch, state.genesis_validators_root, root)
|
||||||
|
|
||||||
var
|
var
|
||||||
aggregateSig {.noInit.}: AggregateSignature
|
aggregateSig {.noInit.}: AggregateSignature
|
||||||
initialized = false
|
initialized = false
|
||||||
for validator_index in participants:
|
for validator_index in participants:
|
||||||
let
|
let
|
||||||
privkey = MockPrivKeys[validator_index]
|
privkey = MockPrivKeys[validator_index]
|
||||||
signature = blsSign(privkey, signing_root.data)
|
signature = blsSign(privkey, signing_root.data)
|
||||||
if not initialized:
|
if not initialized:
|
||||||
|
@ -69,7 +69,7 @@ proc block_for_next_slot(
|
||||||
makeFullAttestations(forked, parent_root, state.slot, cache)
|
makeFullAttestations(forked, parent_root, state.slot, cache)
|
||||||
else:
|
else:
|
||||||
@[]
|
@[]
|
||||||
|
|
||||||
addTestBlock(
|
addTestBlock(
|
||||||
forked, parent_root, cache, attestations = attestations, cfg = cfg)
|
forked, parent_root, cache, attestations = attestations, cfg = cfg)
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ let full_sync_committee_bits = block:
|
||||||
res
|
res
|
||||||
|
|
||||||
suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
let
|
let
|
||||||
cfg = block:
|
cfg = block:
|
||||||
var res = defaultRuntimeConfig
|
var res = defaultRuntimeConfig
|
||||||
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH
|
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH
|
||||||
|
@ -90,7 +90,7 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
test "process_light_client_update_not_updated":
|
test "process_light_client_update_not_updated":
|
||||||
var forked = assignClone(genesisState[])
|
var forked = assignClone(genesisState[])
|
||||||
template state: untyped {.inject.} = forked[].hbsAltair.data
|
template state: untyped {.inject.} = forked[].hbsAltair.data
|
||||||
|
|
||||||
let pre_snapshot = LightClientSnapshot(
|
let pre_snapshot = LightClientSnapshot(
|
||||||
current_sync_committee: state.current_sync_committee,
|
current_sync_committee: state.current_sync_committee,
|
||||||
next_sync_committee: state.next_sync_committee)
|
next_sync_committee: state.next_sync_committee)
|
||||||
|
@ -114,7 +114,7 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
sync_committee_bits = full_sync_committee_bits
|
sync_committee_bits = full_sync_committee_bits
|
||||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
forked[], committee)
|
forked[], committee)
|
||||||
var next_sync_committee_branch:
|
var next_sync_committee_branch:
|
||||||
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
|
|
||||||
# Ensure that finality checkpoint is genesis
|
# Ensure that finality checkpoint is genesis
|
||||||
|
@ -132,8 +132,8 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
sync_committee_bits: sync_committee_bits,
|
sync_committee_bits: sync_committee_bits,
|
||||||
sync_committee_signature: sync_committee_signature,
|
sync_committee_signature: sync_committee_signature,
|
||||||
fork_version: state.fork.current_version)
|
fork_version: state.fork.current_version)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
process_light_client_update(
|
process_light_client_update(
|
||||||
store, update, state.slot, state.genesis_validators_root)
|
store, update, state.slot, state.genesis_validators_root)
|
||||||
|
|
||||||
|
@ -154,12 +154,12 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
# Forward to next sync committee period
|
# Forward to next sync committee period
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
cfg, forked[], Slot(SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD),
|
cfg, forked[], Slot(SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD),
|
||||||
cache, rewards, flags = {})
|
cache, info, flags = {})
|
||||||
let
|
let
|
||||||
snapshot_period =
|
snapshot_period =
|
||||||
pre_snapshot.header.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
pre_snapshot.header.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
update_period = state.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
update_period = state.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
check: snapshot_period + 1 == update_period
|
check: snapshot_period + 1 == update_period
|
||||||
|
@ -180,9 +180,9 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
sync_committee_bits = full_sync_committee_bits
|
sync_committee_bits = full_sync_committee_bits
|
||||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
forked[], committee, block_root = block_header.hash_tree_root())
|
forked[], committee, block_root = block_header.hash_tree_root())
|
||||||
|
|
||||||
# Sync committee is updated
|
# Sync committee is updated
|
||||||
var next_sync_committee_branch {.noinit.}:
|
var next_sync_committee_branch {.noinit.}:
|
||||||
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
build_proof(state, NEXT_SYNC_COMMITTEE_INDEX, next_sync_committee_branch)
|
build_proof(state, NEXT_SYNC_COMMITTEE_INDEX, next_sync_committee_branch)
|
||||||
# Finality is unchanged
|
# Finality is unchanged
|
||||||
|
@ -199,7 +199,7 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
sync_committee_signature: sync_committee_signature,
|
sync_committee_signature: sync_committee_signature,
|
||||||
fork_version: state.fork.current_version)
|
fork_version: state.fork.current_version)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
process_light_client_update(
|
process_light_client_update(
|
||||||
store, update, state.slot, state.genesis_validators_root)
|
store, update, state.slot, state.genesis_validators_root)
|
||||||
|
|
||||||
|
@ -218,28 +218,27 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
var store = LightClientStore(snapshot: pre_snapshot)
|
var store = LightClientStore(snapshot: pre_snapshot)
|
||||||
|
|
||||||
# Change finality
|
# Change finality
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
blocks = newSeq[ForkedSignedBeaconBlock]()
|
blocks = newSeq[ForkedSignedBeaconBlock]()
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
cfg, forked[], Slot(SLOTS_PER_EPOCH * 2),
|
cfg, forked[], Slot(SLOTS_PER_EPOCH * 2), cache, info, flags = {})
|
||||||
cache, rewards, flags = {})
|
for epoch in 0 ..< 3:
|
||||||
for epoch in 0 ..< 3:
|
|
||||||
for slot in 0 ..< SLOTS_PER_EPOCH:
|
for slot in 0 ..< SLOTS_PER_EPOCH:
|
||||||
blocks.add block_for_next_slot(cfg, forked[], cache,
|
blocks.add block_for_next_slot(cfg, forked[], cache,
|
||||||
withAttestations = true)
|
withAttestations = true)
|
||||||
# Ensure that finality checkpoint has changed
|
# Ensure that finality checkpoint has changed
|
||||||
check: state.finalized_checkpoint.epoch == 3
|
check: state.finalized_checkpoint.epoch == 3
|
||||||
# Ensure that it's same period
|
# Ensure that it's same period
|
||||||
let
|
let
|
||||||
snapshot_period =
|
snapshot_period =
|
||||||
pre_snapshot.header.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
pre_snapshot.header.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
update_period = state.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
update_period = state.slot.epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
check: snapshot_period == update_period
|
check: snapshot_period == update_period
|
||||||
|
|
||||||
# Updated sync_committee and finality
|
# Updated sync_committee and finality
|
||||||
var next_sync_committee_branch:
|
var next_sync_committee_branch:
|
||||||
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
let
|
let
|
||||||
finalized_block = blocks[SLOTS_PER_EPOCH - 1].altairBlock
|
finalized_block = blocks[SLOTS_PER_EPOCH - 1].altairBlock
|
||||||
|
@ -249,17 +248,17 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
parent_root: finalized_block.message.parent_root,
|
parent_root: finalized_block.message.parent_root,
|
||||||
state_root: finalized_block.message.state_root,
|
state_root: finalized_block.message.state_root,
|
||||||
body_root: finalized_block.message.body.hash_tree_root())
|
body_root: finalized_block.message.body.hash_tree_root())
|
||||||
check:
|
check:
|
||||||
finalized_block_header.slot ==
|
finalized_block_header.slot ==
|
||||||
compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||||
finalized_block_header.hash_tree_root() ==
|
finalized_block_header.hash_tree_root() ==
|
||||||
state.finalized_checkpoint.root
|
state.finalized_checkpoint.root
|
||||||
var finality_branch {.noinit.}:
|
var finality_branch {.noinit.}:
|
||||||
array[log2trunc(FINALIZED_ROOT_INDEX), Eth2Digest]
|
array[log2trunc(FINALIZED_ROOT_INDEX), Eth2Digest]
|
||||||
build_proof(state, FINALIZED_ROOT_INDEX, finality_branch)
|
build_proof(state, FINALIZED_ROOT_INDEX, finality_branch)
|
||||||
|
|
||||||
# Build block header
|
# Build block header
|
||||||
let
|
let
|
||||||
blck = mockBlock(forked[], state.slot, cfg = cfg).altairBlock.message
|
blck = mockBlock(forked[], state.slot, cfg = cfg).altairBlock.message
|
||||||
block_header = BeaconBlockHeader(
|
block_header = BeaconBlockHeader(
|
||||||
slot: blck.slot,
|
slot: blck.slot,
|
||||||
|
@ -286,7 +285,7 @@ suite "Ethereum Foundation - Altair - Unittests - Sync protocol" & preset():
|
||||||
sync_committee_signature: sync_committee_signature,
|
sync_committee_signature: sync_committee_signature,
|
||||||
fork_version: state.fork.current_version)
|
fork_version: state.fork.current_version)
|
||||||
|
|
||||||
check:
|
check:
|
||||||
process_light_client_update(
|
process_light_client_update(
|
||||||
store, update, state.slot, state.genesis_validators_root)
|
store, update, state.slot, state.genesis_validators_root)
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, sequtils,
|
os, sequtils,
|
||||||
# Status internal
|
# Status internal
|
||||||
|
chronicles,
|
||||||
faststreams, streams,
|
faststreams, streams,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../../../beacon_chain/spec/[state_transition, forks, helpers],
|
../../../beacon_chain/spec/[state_transition, forks, helpers],
|
||||||
|
@ -45,7 +46,7 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
|
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
|
||||||
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
|
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
cfg = defaultRuntimeConfig
|
cfg = defaultRuntimeConfig
|
||||||
cfg.ALTAIR_FORK_EPOCH = transitionEpoch.fork_epoch.Epoch
|
cfg.ALTAIR_FORK_EPOCH = transitionEpoch.fork_epoch.Epoch
|
||||||
|
|
||||||
|
@ -58,16 +59,14 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, phase0.SignedBeaconBlock)
|
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, phase0.SignedBeaconBlock)
|
||||||
|
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
cfg, fhPreState[], blck,
|
cfg, fhPreState[], blck, cache, info,
|
||||||
cache, rewards,
|
|
||||||
flags = {skipStateRootValidation}, noRollback)
|
flags = {skipStateRootValidation}, noRollback)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
else:
|
else:
|
||||||
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, altair.SignedBeaconBlock)
|
let blck = parseTest(testPath/"blocks_" & $i & ".ssz_snappy", SSZ, altair.SignedBeaconBlock)
|
||||||
|
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
cfg, fhPreState[], blck,
|
cfg, fhPreState[], blck, cache, info,
|
||||||
cache, rewards,
|
|
||||||
flags = {skipStateRootValidation}, noRollback)
|
flags = {skipStateRootValidation}, noRollback)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
|
|
||||||
|
|
|
@ -9,9 +9,8 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
std/[os, strutils, typetraits],
|
std/[os, strutils, typetraits],
|
||||||
# Internals
|
# Internals
|
||||||
../../beacon_chain/spec/datatypes/[phase0, altair],
|
|
||||||
../../beacon_chain/spec/[
|
../../beacon_chain/spec/[
|
||||||
eth2_merkleization, eth2_ssz_serialization, state_transition_epoch],
|
eth2_merkleization, eth2_ssz_serialization],
|
||||||
# Status libs,
|
# Status libs,
|
||||||
snappy,
|
snappy,
|
||||||
stew/byteutils
|
stew/byteutils
|
||||||
|
@ -72,18 +71,3 @@ proc parseTest*(path: string, Format: typedesc[SSZ], T: typedesc): T =
|
||||||
stderr.write $Format & " load issue for file \"", path, "\"\n"
|
stderr.write $Format & " load issue for file \"", path, "\"\n"
|
||||||
stderr.write err.formatMsg(path), "\n"
|
stderr.write err.formatMsg(path), "\n"
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
proc process_justification_and_finalization*(state: var phase0.BeaconState) =
|
|
||||||
var cache = StateCache()
|
|
||||||
|
|
||||||
var rewards: RewardInfo
|
|
||||||
rewards.init(state)
|
|
||||||
rewards.process_attestations(state, cache)
|
|
||||||
process_justification_and_finalization(state, rewards.total_balances)
|
|
||||||
|
|
||||||
func process_slashings*(state: var phase0.BeaconState) =
|
|
||||||
var cache = StateCache()
|
|
||||||
var rewards: RewardInfo
|
|
||||||
rewards.init(state)
|
|
||||||
rewards.process_attestations(state, cache)
|
|
||||||
process_slashings(state, rewards.total_balances.current_epoch)
|
|
||||||
|
|
|
@ -45,8 +45,8 @@ proc runTest(identifier: string) =
|
||||||
attestation = parseTest(
|
attestation = parseTest(
|
||||||
testDir/"attestation.ssz_snappy", SSZ, Attestation)
|
testDir/"attestation.ssz_snappy", SSZ, Attestation)
|
||||||
done = process_attestation(
|
done = process_attestation(
|
||||||
preState[], attestation, {},
|
preState[], attestation, {}, get_base_reward_per_increment(
|
||||||
get_base_reward_per_increment(preState[], cache), cache)
|
get_total_active_balance(preState[], cache)), cache)
|
||||||
|
|
||||||
if existsFile(testDir/"post.ssz_snappy"):
|
if existsFile(testDir/"post.ssz_snappy"):
|
||||||
let postState =
|
let postState =
|
||||||
|
|
|
@ -36,7 +36,7 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
fhPreState = (ref ForkedHashedBeaconState)(hbsMerge: merge.HashedBeaconState(
|
fhPreState = (ref ForkedHashedBeaconState)(hbsMerge: merge.HashedBeaconState(
|
||||||
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkMerge)
|
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkMerge)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
|
|
||||||
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
||||||
# so purely lexicographic sorting wouldn't sort properly.
|
# so purely lexicographic sorting wouldn't sort properly.
|
||||||
|
@ -48,12 +48,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
|
|
||||||
if hasPostState:
|
if hasPostState:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||||
noRollback)
|
noRollback)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
else:
|
else:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||||
noRollback)
|
noRollback)
|
||||||
doAssert (i + 1 < numBlocks) or not success,
|
doAssert (i + 1 < numBlocks) or not success,
|
||||||
"We didn't expect these invalid blocks to be processed"
|
"We didn't expect these invalid blocks to be processed"
|
||||||
|
|
|
@ -35,13 +35,13 @@ proc runTest(identifier: string) =
|
||||||
data: preState[], root: hash_tree_root(preState[])),
|
data: preState[], root: hash_tree_root(preState[])),
|
||||||
beaconStateFork: forkMerge)
|
beaconStateFork: forkMerge)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards: RewardInfo
|
info = ForkedEpochInfo()
|
||||||
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, merge.BeaconState))
|
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, merge.BeaconState))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, fhPreState[],
|
defaultRuntimeConfig, fhPreState[],
|
||||||
getStateField(fhPreState[], slot) + num_slots, cache, rewards, {})
|
getStateField(fhPreState[], slot) + num_slots, cache, info, {})
|
||||||
|
|
||||||
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
|
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
|
||||||
let newPreState = newClone(fhPreState.hbsMerge.data)
|
let newPreState = newClone(fhPreState.hbsMerge.data)
|
||||||
|
|
|
@ -11,17 +11,19 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, strutils,
|
os, strutils,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
|
chronicles,
|
||||||
../../../beacon_chain/spec/[beaconstate, presets, state_transition_epoch],
|
../../../beacon_chain/spec/[beaconstate, presets, state_transition_epoch],
|
||||||
../../../beacon_chain/spec/datatypes/merge,
|
../../../beacon_chain/spec/datatypes/[altair, merge],
|
||||||
# Test utilities
|
# Test utilities
|
||||||
../../testutil,
|
../../testutil,
|
||||||
../fixtures_utils,
|
../fixtures_utils,
|
||||||
../test_fixture_rewards,
|
../test_fixture_rewards,
|
||||||
../../helpers/debug_state
|
../../helpers/debug_state
|
||||||
|
|
||||||
|
const RootDir = SszTestsDir/const_preset/"merge"/"epoch_processing"
|
||||||
|
|
||||||
template runSuite(
|
template runSuite(
|
||||||
suiteDir, testName: string, transitionProc: untyped{ident},
|
suiteDir, testName: string, transitionProc: untyped): untyped =
|
||||||
useCache, useTAB, useUPB: static bool = false): untyped =
|
|
||||||
suite "Ethereum Foundation - Merge - Epoch Processing - " & testName & preset():
|
suite "Ethereum Foundation - Merge - Epoch Processing - " & testName & preset():
|
||||||
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
|
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
|
||||||
|
|
||||||
|
@ -29,47 +31,34 @@ template runSuite(
|
||||||
test testName & " - " & unitTestName & preset():
|
test testName & " - " & unitTestName & preset():
|
||||||
# BeaconState objects are stored on the heap to avoid stack overflow
|
# BeaconState objects are stored on the heap to avoid stack overflow
|
||||||
type T = merge.BeaconState
|
type T = merge.BeaconState
|
||||||
var preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
var preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
||||||
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
||||||
|
var cache {.inject, used.} = StateCache()
|
||||||
|
template state: untyped {.inject, used.} = preState[]
|
||||||
|
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
|
||||||
|
|
||||||
doAssert not (useCache and useTAB)
|
transitionProc
|
||||||
when useCache:
|
|
||||||
var cache = StateCache()
|
check:
|
||||||
when compiles(transitionProc(defaultRuntimeConfig, preState[], cache)):
|
hash_tree_root(preState[]) == hash_tree_root(postState[])
|
||||||
transitionProc(defaultRuntimeConfig, preState[], cache)
|
|
||||||
else:
|
|
||||||
transitionProc(preState[], cache)
|
|
||||||
elif useTAB and not useUPB:
|
|
||||||
var cache = StateCache()
|
|
||||||
let total_active_balance = preState[].get_total_active_balance(cache)
|
|
||||||
transitionProc(preState[], total_active_balance)
|
|
||||||
elif useTAB and useUPB:
|
|
||||||
var cache = StateCache()
|
|
||||||
let
|
|
||||||
total_active_balance = preState[].get_total_active_balance(cache)
|
|
||||||
unslashed_participating_balances =
|
|
||||||
preState[].get_unslashed_participating_balances()
|
|
||||||
transitionProc(
|
|
||||||
preState[], total_active_balance, unslashed_participating_balances)
|
|
||||||
else:
|
|
||||||
when compiles(transitionProc(preState[])):
|
|
||||||
transitionProc(preState[])
|
|
||||||
else:
|
|
||||||
transitionProc(defaultRuntimeConfig, preState[])
|
|
||||||
|
|
||||||
reportDiff(preState, postState)
|
reportDiff(preState, postState)
|
||||||
|
|
||||||
# Justification & Finalization
|
# Justification & Finalization
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const JustificationFinalizationDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
|
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
|
||||||
runSuite(JustificationFinalizationDir, "Justification & Finalization", process_justification_and_finalization, useCache = false, useTAB = true, useUPB = true)
|
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||||
|
let info = altair.EpochInfo.init(state)
|
||||||
|
process_justification_and_finalization(state, info.balances)
|
||||||
|
|
||||||
# Inactivity updates
|
# Inactivity updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const InactivityDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"inactivity_updates"/"pyspec_tests"
|
const InactivityDir = RootDir/"inactivity_updates"/"pyspec_tests"
|
||||||
runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = false)
|
runSuite(InactivityDir, "Inactivity"):
|
||||||
|
let info = altair.EpochInfo.init(state)
|
||||||
|
process_inactivity_updates(cfg, state, info)
|
||||||
|
|
||||||
# Rewards & Penalties
|
# Rewards & Penalties
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
@ -79,53 +68,63 @@ runSuite(InactivityDir, "Inactivity", process_inactivity_updates, useCache = fal
|
||||||
# Registry updates
|
# Registry updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const RegistryUpdatesDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"registry_updates"/"pyspec_tests"
|
const RegistryUpdatesDir = RootDir/"registry_updates"/"pyspec_tests"
|
||||||
runSuite(RegistryUpdatesDir, "Registry updates", process_registry_updates, useCache = true)
|
runSuite(RegistryUpdatesDir, "Registry updates"):
|
||||||
|
process_registry_updates(cfg, state, cache)
|
||||||
|
|
||||||
# Slashings
|
# Slashings
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SlashingsDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"slashings"/"pyspec_tests"
|
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
|
||||||
runSuite(SlashingsDir, "Slashings", process_slashings, useCache = false, useTAB = true)
|
runSuite(SlashingsDir, "Slashings"):
|
||||||
|
let info = altair.EpochInfo.init(state)
|
||||||
|
process_slashings(state, info.balances.current_epoch)
|
||||||
|
|
||||||
# Eth1 data reset
|
# Eth1 data reset
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const Eth1DataResetDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"eth1_data_reset/"/"pyspec_tests"
|
const Eth1DataResetDir = RootDir/"eth1_data_reset/"/"pyspec_tests"
|
||||||
runSuite(Eth1DataResetDir, "Eth1 data reset", process_eth1_data_reset, useCache = false)
|
runSuite(Eth1DataResetDir, "Eth1 data reset"):
|
||||||
|
process_eth1_data_reset(state)
|
||||||
|
|
||||||
# Effective balance updates
|
# Effective balance updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const EffectiveBalanceUpdatesDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"effective_balance_updates"/"pyspec_tests"
|
const EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"/"pyspec_tests"
|
||||||
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates", process_effective_balance_updates, useCache = false)
|
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
|
||||||
# Slashings reset
|
# Slashings reset
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SlashingsResetDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"slashings_reset"/"pyspec_tests"
|
const SlashingsResetDir = RootDir/"slashings_reset"/"pyspec_tests"
|
||||||
runSuite(SlashingsResetDir, "Slashings reset", process_slashings_reset, useCache = false)
|
runSuite(SlashingsResetDir, "Slashings reset"):
|
||||||
|
process_slashings_reset(state)
|
||||||
|
|
||||||
# RANDAO mixes reset
|
# RANDAO mixes reset
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const RandaoMixesResetDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"randao_mixes_reset"/"pyspec_tests"
|
const RandaoMixesResetDir = RootDir/"randao_mixes_reset"/"pyspec_tests"
|
||||||
runSuite(RandaoMixesResetDir, "RANDAO mixes reset", process_randao_mixes_reset, useCache = false)
|
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
|
||||||
# Historical roots update
|
# Historical roots update
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const HistoricalRootsUpdateDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"historical_roots_update"/"pyspec_tests"
|
const HistoricalRootsUpdateDir = RootDir/"historical_roots_update"/"pyspec_tests"
|
||||||
runSuite(HistoricalRootsUpdateDir, "Historical roots update", process_historical_roots_update, useCache = false)
|
runSuite(HistoricalRootsUpdateDir, "Historical roots update"):
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
|
||||||
# Participation flag updates
|
# Participation flag updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const ParticipationFlagDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"participation_flag_updates"/"pyspec_tests"
|
const ParticipationFlagDir = RootDir/"participation_flag_updates"/"pyspec_tests"
|
||||||
runSuite(ParticipationFlagDir, "Participation flag updates", process_participation_flag_updates, useCache = false)
|
runSuite(ParticipationFlagDir, "Participation flag updates"):
|
||||||
|
process_participation_flag_updates(state)
|
||||||
|
|
||||||
# Sync committee updates
|
# Sync committee updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SyncCommitteeDir = SszTestsDir/const_preset/"merge"/"epoch_processing"/"sync_committee_updates"/"pyspec_tests"
|
const SyncCommitteeDir = RootDir/"sync_committee_updates"/"pyspec_tests"
|
||||||
runSuite(SyncCommitteeDir, "Sync committee updates", process_sync_committee_updates, useCache = false)
|
runSuite(SyncCommitteeDir, "Sync committee updates"):
|
||||||
|
process_sync_committee_updates(state)
|
||||||
|
|
|
@ -11,6 +11,7 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
os,
|
os,
|
||||||
# Utilities
|
# Utilities
|
||||||
|
chronicles,
|
||||||
unittest2,
|
unittest2,
|
||||||
stew/results,
|
stew/results,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
|
|
|
@ -36,7 +36,7 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
|
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
|
||||||
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
|
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
|
|
||||||
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
||||||
# so purely lexicographic sorting wouldn't sort properly.
|
# so purely lexicographic sorting wouldn't sort properly.
|
||||||
|
@ -46,12 +46,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
||||||
|
|
||||||
if hasPostState:
|
if hasPostState:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||||
noRollback)
|
noRollback)
|
||||||
doAssert success, "Failure when applying block " & $i
|
doAssert success, "Failure when applying block " & $i
|
||||||
else:
|
else:
|
||||||
let success = state_transition(
|
let success = state_transition(
|
||||||
defaultRuntimeConfig, fhPreState[], blck, cache, rewards, flags = {},
|
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||||
noRollback)
|
noRollback)
|
||||||
doAssert (i + 1 < numBlocks) or not success,
|
doAssert (i + 1 < numBlocks) or not success,
|
||||||
"We didn't expect these invalid blocks to be processed"
|
"We didn't expect these invalid blocks to be processed"
|
||||||
|
|
|
@ -32,14 +32,14 @@ proc runTest(identifier: string) =
|
||||||
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
|
fhPreState = (ref ForkedHashedBeaconState)(hbsPhase0: phase0.HashedBeaconState(
|
||||||
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
|
data: preState[], root: hash_tree_root(preState[])), beaconStateFork: forkPhase0)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards: RewardInfo
|
info: ForkedEpochInfo
|
||||||
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, phase0.BeaconState))
|
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, phase0.BeaconState))
|
||||||
|
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig,
|
defaultRuntimeConfig,
|
||||||
fhPreState[], getStateField(fhPreState[], slot) + num_slots, cache,
|
fhPreState[], getStateField(fhPreState[], slot) + num_slots, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
|
|
||||||
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
|
getStateRoot(fhPreState[]) == postState[].hash_tree_root()
|
||||||
let newPreState = newClone(fhPreState.hbsPhase0.data)
|
let newPreState = newClone(fhPreState.hbsPhase0.data)
|
||||||
|
|
|
@ -11,6 +11,7 @@ import
|
||||||
# Standard library
|
# Standard library
|
||||||
os, strutils,
|
os, strutils,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
|
chronicles,
|
||||||
../../../beacon_chain/spec/state_transition_epoch,
|
../../../beacon_chain/spec/state_transition_epoch,
|
||||||
../../../beacon_chain/spec/datatypes/phase0,
|
../../../beacon_chain/spec/datatypes/phase0,
|
||||||
# Test utilities
|
# Test utilities
|
||||||
|
@ -19,7 +20,9 @@ import
|
||||||
../test_fixture_rewards,
|
../test_fixture_rewards,
|
||||||
../../helpers/debug_state
|
../../helpers/debug_state
|
||||||
|
|
||||||
template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, useCache: static bool): untyped =
|
const RootDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"
|
||||||
|
|
||||||
|
template runSuite(suiteDir, testName: string, transitionProc: untyped): untyped =
|
||||||
suite "Ethereum Foundation - Phase 0 - Epoch Processing - " & testName & preset():
|
suite "Ethereum Foundation - Phase 0 - Epoch Processing - " & testName & preset():
|
||||||
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
|
for testDir in walkDirRec(suiteDir, yieldFilter = {pcDir}, checkDir = true):
|
||||||
|
|
||||||
|
@ -27,23 +30,28 @@ template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, us
|
||||||
test testName & " - " & unitTestName & preset():
|
test testName & " - " & unitTestName & preset():
|
||||||
# BeaconState objects are stored on the heap to avoid stack overflow
|
# BeaconState objects are stored on the heap to avoid stack overflow
|
||||||
type T = phase0.BeaconState
|
type T = phase0.BeaconState
|
||||||
var preState = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
var preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
||||||
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
let postState = newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
||||||
var cache {.used.}: StateCache
|
var cache {.inject, used.} = StateCache()
|
||||||
when compiles(transitionProc(defaultRuntimeConfig, preState[], cache)):
|
var info {.inject.}: EpochInfo
|
||||||
transitionProc(defaultRuntimeConfig, preState[], cache)
|
template state: untyped {.inject, used.} = preState[]
|
||||||
elif compiles(transitionProc(preState[], cache)):
|
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
|
||||||
transitionProc(preState[], cache)
|
init(info, preState[])
|
||||||
else:
|
|
||||||
transitionProc(preState[])
|
transitionProc
|
||||||
|
|
||||||
|
check:
|
||||||
|
hash_tree_root(preState[]) == hash_tree_root(postState[])
|
||||||
|
|
||||||
reportDiff(preState, postState)
|
reportDiff(preState, postState)
|
||||||
|
|
||||||
# Justification & Finalization
|
# Justification & Finalization
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const JustificationFinalizationDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
|
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
|
||||||
runSuite(JustificationFinalizationDir, "Justification & Finalization", process_justification_and_finalization, useCache = false)
|
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||||
|
info.process_attestations(state, cache)
|
||||||
|
process_justification_and_finalization(state, info.total_balances)
|
||||||
|
|
||||||
# Rewards & Penalties
|
# Rewards & Penalties
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
@ -53,32 +61,41 @@ runSuite(JustificationFinalizationDir, "Justification & Finalization", process_
|
||||||
# Registry updates
|
# Registry updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const RegistryUpdatesDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"registry_updates"/"pyspec_tests"
|
const RegistryUpdatesDir = RootDir/"registry_updates"/"pyspec_tests"
|
||||||
runSuite(RegistryUpdatesDir, "Registry updates", process_registry_updates, useCache = true)
|
runSuite(RegistryUpdatesDir, "Registry updates"):
|
||||||
|
process_registry_updates(cfg, state, cache)
|
||||||
|
|
||||||
# Slashings
|
# Slashings
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const SlashingsDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"slashings"/"pyspec_tests"
|
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
|
||||||
runSuite(SlashingsDir, "Slashings", process_slashings, useCache = false)
|
runSuite(SlashingsDir, "Slashings"):
|
||||||
|
info.process_attestations(state, cache)
|
||||||
|
process_slashings(state, info.total_balances.current_epoch)
|
||||||
|
|
||||||
# Final updates
|
# Final updates
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
const Eth1DataResetDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"eth1_data_reset/"/"pyspec_tests"
|
const Eth1DataResetDir = RootDir/"eth1_data_reset/"/"pyspec_tests"
|
||||||
runSuite(Eth1DataResetDir, "Eth1 data reset", process_eth1_data_reset, useCache = false)
|
runSuite(Eth1DataResetDir, "Eth1 data reset"):
|
||||||
|
process_eth1_data_reset(state)
|
||||||
|
|
||||||
const EffectiveBalanceUpdatesDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"effective_balance_updates"/"pyspec_tests"
|
const EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"/"pyspec_tests"
|
||||||
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates", process_effective_balance_updates, useCache = false)
|
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
|
||||||
const SlashingsResetDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"slashings_reset"/"pyspec_tests"
|
const SlashingsResetDir = RootDir/"slashings_reset"/"pyspec_tests"
|
||||||
runSuite(SlashingsResetDir, "Slashings reset", process_slashings_reset, useCache = false)
|
runSuite(SlashingsResetDir, "Slashings reset"):
|
||||||
|
process_slashings_reset(state)
|
||||||
|
|
||||||
const RandaoMixesResetDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"randao_mixes_reset"/"pyspec_tests"
|
const RandaoMixesResetDir = RootDir/"randao_mixes_reset"/"pyspec_tests"
|
||||||
runSuite(RandaoMixesResetDir, "RANDAO mixes reset", process_randao_mixes_reset, useCache = false)
|
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
|
||||||
const HistoricalRootsUpdateDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"historical_roots_update"/"pyspec_tests"
|
const HistoricalRootsUpdateDir = RootDir/"historical_roots_update"/"pyspec_tests"
|
||||||
runSuite(HistoricalRootsUpdateDir, "Historical roots update", process_historical_roots_update, useCache = false)
|
runSuite(HistoricalRootsUpdateDir, "Historical roots update"):
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
|
||||||
const ParticipationRecordsDir = SszTestsDir/const_preset/"phase0"/"epoch_processing"/"participation_record_updates"/"pyspec_tests"
|
const ParticipationRecordsDir = RootDir/"participation_record_updates"/"pyspec_tests"
|
||||||
runSuite(ParticipationRecordsDir, "Participation record updates", process_participation_record_updates, useCache = false)
|
runSuite(ParticipationRecordsDir, "Participation record updates"):
|
||||||
|
process_participation_record_updates(state)
|
||||||
|
|
|
@ -13,8 +13,8 @@ import
|
||||||
# Utilities
|
# Utilities
|
||||||
stew/results,
|
stew/results,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../../beacon_chain/spec/datatypes/phase0,
|
|
||||||
../../beacon_chain/spec/[validator, helpers, state_transition_epoch],
|
../../beacon_chain/spec/[validator, helpers, state_transition_epoch],
|
||||||
|
../../beacon_chain/spec/datatypes/phase0,
|
||||||
# Test utilities
|
# Test utilities
|
||||||
../testutil,
|
../testutil,
|
||||||
./fixtures_utils
|
./fixtures_utils
|
||||||
|
@ -60,13 +60,13 @@ proc runTest(rewardsDir, identifier: string) =
|
||||||
parseTest(testDir/"inactivity_penalty_deltas.ssz_snappy", SSZ, Deltas)
|
parseTest(testDir/"inactivity_penalty_deltas.ssz_snappy", SSZ, Deltas)
|
||||||
|
|
||||||
var
|
var
|
||||||
rewards = RewardInfo()
|
info: phase0.EpochInfo
|
||||||
finality_delay = (state[].get_previous_epoch() - state[].finalized_checkpoint.epoch)
|
finality_delay = (state[].get_previous_epoch() - state[].finalized_checkpoint.epoch)
|
||||||
|
|
||||||
rewards.init(state[])
|
info.init(state[])
|
||||||
rewards.process_attestations(state[], cache)
|
info.process_attestations(state[], cache)
|
||||||
let
|
let
|
||||||
total_balance = rewards.total_balances.current_epoch
|
total_balance = info.total_balances.current_epoch
|
||||||
total_balance_sqrt = integer_squareroot(total_balance)
|
total_balance_sqrt = integer_squareroot(total_balance)
|
||||||
|
|
||||||
var
|
var
|
||||||
|
@ -76,7 +76,7 @@ proc runTest(rewardsDir, identifier: string) =
|
||||||
inclusionDelayDeltas2 = Deltas.init(state[].validators.len)
|
inclusionDelayDeltas2 = Deltas.init(state[].validators.len)
|
||||||
inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len)
|
inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len)
|
||||||
|
|
||||||
for index, validator in rewards.statuses.mpairs():
|
for index, validator in info.statuses.mpairs():
|
||||||
if not is_eligible_validator(validator):
|
if not is_eligible_validator(validator):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -85,11 +85,11 @@ proc runTest(rewardsDir, identifier: string) =
|
||||||
state[], index.ValidatorIndex, total_balance_sqrt)
|
state[], index.ValidatorIndex, total_balance_sqrt)
|
||||||
|
|
||||||
sourceDeltas2.add(index, get_source_delta(
|
sourceDeltas2.add(index, get_source_delta(
|
||||||
validator, base_reward, rewards.total_balances, finality_delay))
|
validator, base_reward, info.total_balances, finality_delay))
|
||||||
targetDeltas2.add(index, get_target_delta(
|
targetDeltas2.add(index, get_target_delta(
|
||||||
validator, base_reward, rewards.total_balances, finality_delay))
|
validator, base_reward, info.total_balances, finality_delay))
|
||||||
headDeltas2.add(index, get_head_delta(
|
headDeltas2.add(index, get_head_delta(
|
||||||
validator, base_reward, rewards.total_balances, finality_delay))
|
validator, base_reward, info.total_balances, finality_delay))
|
||||||
|
|
||||||
let
|
let
|
||||||
(inclusion_delay_delta, proposer_delta) =
|
(inclusion_delay_delta, proposer_delta) =
|
||||||
|
|
|
@ -88,8 +88,8 @@ proc mockBlock*(
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
tmpState = assignClone(state)
|
tmpState = assignClone(state)
|
||||||
if getStateField(state, slot) != slot:
|
if getStateField(state, slot) != slot:
|
||||||
var rewards = RewardInfo()
|
var info = ForkedEpochInfo()
|
||||||
doAssert process_slots(cfg, tmpState[], slot, cache, rewards, flags = {})
|
doAssert process_slots(cfg, tmpState[], slot, cache, info, flags = {})
|
||||||
|
|
||||||
result.kind = case tmpState[].beaconStateFork
|
result.kind = case tmpState[].beaconStateFork
|
||||||
of forkPhase0: BeaconBlockFork.Phase0
|
of forkPhase0: BeaconBlockFork.Phase0
|
||||||
|
|
|
@ -17,17 +17,17 @@ proc nextEpoch*(state: var ForkedHashedBeaconState) =
|
||||||
## Transition to the start of the next epoch
|
## Transition to the start of the next epoch
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
let slot =
|
let slot =
|
||||||
getStateField(state, slot) + SLOTS_PER_EPOCH -
|
getStateField(state, slot) + SLOTS_PER_EPOCH -
|
||||||
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
|
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
|
||||||
doAssert process_slots(defaultRuntimeConfig, state, slot, cache, rewards, {})
|
doAssert process_slots(defaultRuntimeConfig, state, slot, cache, info, {})
|
||||||
|
|
||||||
proc nextSlot*(state: var ForkedHashedBeaconState) =
|
proc nextSlot*(state: var ForkedHashedBeaconState) =
|
||||||
## Transition to the next slot
|
## Transition to the next slot
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
|
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
defaultRuntimeConfig, state, getStateField(state, slot) + 1, cache, rewards, {})
|
defaultRuntimeConfig, state, getStateField(state, slot) + 1, cache, info, {})
|
||||||
|
|
|
@ -9,19 +9,19 @@ import
|
||||||
# Specs
|
# Specs
|
||||||
../../beacon_chain/spec/[
|
../../beacon_chain/spec/[
|
||||||
forks, presets, state_transition, state_transition_epoch],
|
forks, presets, state_transition, state_transition_epoch],
|
||||||
../../beacon_chain/spec/datatypes/base
|
../../beacon_chain/spec/datatypes/phase0
|
||||||
|
|
||||||
proc processSlotsUntilEndCurrentEpoch(state: var ForkedHashedBeaconState) =
|
proc processSlotsUntilEndCurrentEpoch(state: var ForkedHashedBeaconState) =
|
||||||
# Process all slots until the end of the last slot of the current epoch
|
# Process all slots until the end of the last slot of the current epoch
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
let slot =
|
let slot =
|
||||||
getStateField(state, slot) + SLOTS_PER_EPOCH -
|
getStateField(state, slot) + SLOTS_PER_EPOCH -
|
||||||
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
|
(getStateField(state, slot) mod SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
# Transition to slot before the epoch state transition
|
# Transition to slot before the epoch state transition
|
||||||
discard process_slots(defaultRuntimeConfig, state, slot - 1, cache, rewards, {})
|
discard process_slots(defaultRuntimeConfig, state, slot - 1, cache, info, {})
|
||||||
|
|
||||||
# For the last slot of the epoch,
|
# For the last slot of the epoch,
|
||||||
# only process_slot without process_epoch
|
# only process_slot without process_epoch
|
||||||
|
@ -34,9 +34,9 @@ proc transitionEpochUntilJustificationFinalization*(state: var ForkedHashedBeaco
|
||||||
|
|
||||||
var
|
var
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info: phase0.EpochInfo
|
||||||
|
|
||||||
rewards.init(state.hbsPhase0.data)
|
info.init(state.hbsPhase0.data)
|
||||||
rewards.process_attestations(state.hbsPhase0.data, cache)
|
info.process_attestations(state.hbsPhase0.data, cache)
|
||||||
process_justification_and_finalization(
|
process_justification_and_finalization(
|
||||||
state.hbsPhase0.data, rewards.total_balances)
|
state.hbsPhase0.data, info.total_balances)
|
||||||
|
|
|
@ -65,11 +65,11 @@ suite "Attestation pool processing" & preset():
|
||||||
pool = newClone(AttestationPool.init(dag, quarantine))
|
pool = newClone(AttestationPool.init(dag, quarantine))
|
||||||
state = newClone(dag.headState)
|
state = newClone(dag.headState)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards: RewardInfo
|
info = ForkedEpochInfo()
|
||||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, rewards,
|
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, info,
|
||||||
{})
|
{})
|
||||||
|
|
||||||
test "Can add and retrieve simple attestations" & preset():
|
test "Can add and retrieve simple attestations" & preset():
|
||||||
|
@ -102,7 +102,7 @@ suite "Attestation pool processing" & preset():
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
|
|
||||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ suite "Attestation pool processing" & preset():
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
|
|
||||||
check:
|
check:
|
||||||
# shouldn't include already-included attestations
|
# shouldn't include already-included attestations
|
||||||
|
@ -200,7 +200,7 @@ suite "Attestation pool processing" & preset():
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
getStateField(state.data, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
|
|
||||||
check:
|
check:
|
||||||
pool[].getAttestationsForBlock(state.data, cache).len() == 2
|
pool[].getAttestationsForBlock(state.data, cache).len() == 2
|
||||||
|
@ -247,7 +247,7 @@ suite "Attestation pool processing" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
getStateField(state.data, slot) + 1, cache, rewards, {})
|
getStateField(state.data, slot) + 1, cache, info, {})
|
||||||
|
|
||||||
doAssert attestations.uint64 > MAX_ATTESTATIONS,
|
doAssert attestations.uint64 > MAX_ATTESTATIONS,
|
||||||
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
|
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
|
||||||
|
@ -269,7 +269,7 @@ suite "Attestation pool processing" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
let
|
let
|
||||||
bc1 = get_beacon_committee(state[].data,
|
bc1 = get_beacon_committee(state[].data,
|
||||||
|
@ -284,7 +284,7 @@ suite "Attestation pool processing" & preset():
|
||||||
|
|
||||||
discard process_slots(
|
discard process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
|
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
|
||||||
|
|
||||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||||
|
|
||||||
|
@ -310,7 +310,7 @@ suite "Attestation pool processing" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
|
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
|
||||||
|
|
||||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||||
|
|
||||||
|
@ -339,7 +339,7 @@ suite "Attestation pool processing" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
|
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
|
||||||
|
|
||||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||||
|
|
||||||
|
@ -367,7 +367,7 @@ suite "Attestation pool processing" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data,
|
defaultRuntimeConfig, state.data,
|
||||||
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards, {})
|
MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, info, {})
|
||||||
|
|
||||||
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
let attestations = pool[].getAttestationsForBlock(state.data, cache)
|
||||||
|
|
||||||
|
|
|
@ -125,7 +125,7 @@ suite "Block pool processing" & preset():
|
||||||
nilPhase0Callback: OnPhase0BlockAdded
|
nilPhase0Callback: OnPhase0BlockAdded
|
||||||
state = newClone(dag.headState.data)
|
state = newClone(dag.headState.data)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
||||||
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0).phase0Block
|
b1 = addTestBlock(state[], dag.tail.root, cache, attestations = att0).phase0Block
|
||||||
b2 = addTestBlock(state[], b1.root, cache).phase0Block
|
b2 = addTestBlock(state[], b1.root, cache).phase0Block
|
||||||
|
@ -178,7 +178,7 @@ suite "Block pool processing" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1, cache,
|
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1, cache,
|
||||||
rewards, {})
|
info, {})
|
||||||
|
|
||||||
let
|
let
|
||||||
b4 = addTestBlock(state[], b2.root, cache).phase0Block
|
b4 = addTestBlock(state[], b2.root, cache).phase0Block
|
||||||
|
@ -352,7 +352,7 @@ suite "chain DAG finalization tests" & preset():
|
||||||
quarantine = QuarantineRef.init(keys.newRng(), taskpool)
|
quarantine = QuarantineRef.init(keys.newRng(), taskpool)
|
||||||
nilPhase0Callback: OnPhase0BlockAdded
|
nilPhase0Callback: OnPhase0BlockAdded
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
|
|
||||||
test "prune heads on finalization" & preset():
|
test "prune heads on finalization" & preset():
|
||||||
# Create a fork that will not be taken
|
# Create a fork that will not be taken
|
||||||
|
@ -363,7 +363,7 @@ suite "chain DAG finalization tests" & preset():
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, tmpState[],
|
defaultRuntimeConfig, tmpState[],
|
||||||
getStateField(tmpState[], slot) + (5 * SLOTS_PER_EPOCH).uint64,
|
getStateField(tmpState[], slot) + (5 * SLOTS_PER_EPOCH).uint64,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache).phase0Block
|
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache).phase0Block
|
||||||
block:
|
block:
|
||||||
|
@ -466,7 +466,7 @@ suite "chain DAG finalization tests" & preset():
|
||||||
|
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
defaultRuntimeConfig, prestate[], getStateField(prestate[], slot) + 1,
|
defaultRuntimeConfig, prestate[], getStateField(prestate[], slot) + 1,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
# create another block, orphaning the head
|
# create another block, orphaning the head
|
||||||
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache).phase0Block
|
let blck = makeTestBlock(prestate[], dag.head.parent.root, cache).phase0Block
|
||||||
|
@ -495,7 +495,7 @@ suite "chain DAG finalization tests" & preset():
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2),
|
defaultRuntimeConfig, dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2),
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
var blck = makeTestBlock(
|
var blck = makeTestBlock(
|
||||||
dag.headState.data, dag.head.root, cache,
|
dag.headState.data, dag.head.root, cache,
|
||||||
|
@ -586,7 +586,7 @@ suite "Diverging hardforks":
|
||||||
nilPhase0Callback: OnPhase0BlockAdded
|
nilPhase0Callback: OnPhase0BlockAdded
|
||||||
state = newClone(dag.headState.data)
|
state = newClone(dag.headState.data)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
|
blck = makeTestBlock(dag.headState.data, dag.head.root, cache)
|
||||||
tmpState = assignClone(dag.headState.data)
|
tmpState = assignClone(dag.headState.data)
|
||||||
|
|
||||||
|
@ -595,7 +595,7 @@ suite "Diverging hardforks":
|
||||||
process_slots(
|
process_slots(
|
||||||
phase0RuntimeConfig, tmpState[],
|
phase0RuntimeConfig, tmpState[],
|
||||||
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
# Because the first block is after the Altair transition, the only block in
|
# Because the first block is after the Altair transition, the only block in
|
||||||
# common is the tail block
|
# common is the tail block
|
||||||
|
@ -614,7 +614,7 @@ suite "Diverging hardforks":
|
||||||
process_slots(
|
process_slots(
|
||||||
phase0RuntimeConfig, tmpState[],
|
phase0RuntimeConfig, tmpState[],
|
||||||
getStateField(tmpState[], slot) + SLOTS_PER_EPOCH.uint64,
|
getStateField(tmpState[], slot) + SLOTS_PER_EPOCH.uint64,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
# There's a block in the shared-correct phase0 hardfork, before epoch 2
|
# There's a block in the shared-correct phase0 hardfork, before epoch 2
|
||||||
var
|
var
|
||||||
|
@ -626,7 +626,7 @@ suite "Diverging hardforks":
|
||||||
process_slots(
|
process_slots(
|
||||||
phase0RuntimeConfig, tmpState[],
|
phase0RuntimeConfig, tmpState[],
|
||||||
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
var
|
var
|
||||||
b2 = addTestBlock(tmpState[], b1.root, cache).phase0Block
|
b2 = addTestBlock(tmpState[], b1.root, cache).phase0Block
|
||||||
|
|
|
@ -42,13 +42,13 @@ suite "Gossip validation " & preset():
|
||||||
pool = newClone(AttestationPool.init(dag, quarantine))
|
pool = newClone(AttestationPool.init(dag, quarantine))
|
||||||
state = newClone(dag.headState)
|
state = newClone(dag.headState)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false, taskpool)
|
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false, taskpool)
|
||||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||||
check:
|
check:
|
||||||
process_slots(
|
process_slots(
|
||||||
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
|
||||||
cache, rewards, {})
|
cache, info, {})
|
||||||
|
|
||||||
test "Any committee index is valid":
|
test "Any committee index is valid":
|
||||||
template committee(idx: uint64): untyped =
|
template committee(idx: uint64): untyped =
|
||||||
|
|
|
@ -11,7 +11,7 @@ import
|
||||||
# Status libraries
|
# Status libraries
|
||||||
stew/bitops2,
|
stew/bitops2,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../beacon_chain/spec/[helpers, state_transition],
|
../beacon_chain/spec/[forks, helpers, state_transition],
|
||||||
# Test utilities
|
# Test utilities
|
||||||
./unittest2, mocking/mock_genesis
|
./unittest2, mocking/mock_genesis
|
||||||
|
|
||||||
|
@ -29,9 +29,9 @@ suite "Spec helpers":
|
||||||
var
|
var
|
||||||
forked = newClone(initGenesisState())
|
forked = newClone(initGenesisState())
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
doAssert process_slots(defaultRuntimeConfig, forked[],
|
doAssert process_slots(defaultRuntimeConfig, forked[],
|
||||||
Slot(100), cache, rewards, flags = {})
|
Slot(100), cache, info, flags = {})
|
||||||
|
|
||||||
let
|
let
|
||||||
state = forked[].hbsPhase0.data
|
state = forked[].hbsPhase0.data
|
||||||
|
@ -46,10 +46,10 @@ suite "Spec helpers":
|
||||||
let depth = log2trunc(i)
|
let depth = log2trunc(i)
|
||||||
var proof = newSeq[Eth2Digest](depth)
|
var proof = newSeq[Eth2Digest](depth)
|
||||||
build_proof(state, i, proof)
|
build_proof(state, i, proof)
|
||||||
check: is_valid_merkle_branch(hash_tree_root(fieldVar), proof,
|
check: is_valid_merkle_branch(hash_tree_root(fieldVar), proof,
|
||||||
depth, get_subtree_index(i), root)
|
depth, get_subtree_index(i), root)
|
||||||
when fieldVar is object and not (fieldVar is Eth2Digest):
|
when fieldVar is object and not (fieldVar is Eth2Digest):
|
||||||
let
|
let
|
||||||
numChildLeaves = fieldVar.numLeaves
|
numChildLeaves = fieldVar.numLeaves
|
||||||
childDepth = log2trunc(numChildLeaves)
|
childDepth = log2trunc(numChildLeaves)
|
||||||
process(fieldVar, i shl childDepth)
|
process(fieldVar, i shl childDepth)
|
||||||
|
|
|
@ -15,7 +15,7 @@ import
|
||||||
type
|
type
|
||||||
MockPrivKeysT = object
|
MockPrivKeysT = object
|
||||||
MockPubKeysT = object
|
MockPubKeysT = object
|
||||||
const
|
const
|
||||||
MockPrivKeys* = MockPrivKeysT()
|
MockPrivKeys* = MockPrivKeysT()
|
||||||
MockPubKeys* = MockPubKeysT()
|
MockPubKeys* = MockPubKeysT()
|
||||||
|
|
||||||
|
@ -36,8 +36,8 @@ func makeFakeHash*(i: int): Eth2Digest =
|
||||||
copyMem(addr result.data[0], addr bytes[0], sizeof(bytes))
|
copyMem(addr result.data[0], addr bytes[0], sizeof(bytes))
|
||||||
|
|
||||||
func makeDeposit*(
|
func makeDeposit*(
|
||||||
i: int,
|
i: int,
|
||||||
flags: UpdateFlags = {},
|
flags: UpdateFlags = {},
|
||||||
cfg = defaultRuntimeConfig): DepositData =
|
cfg = defaultRuntimeConfig): DepositData =
|
||||||
let
|
let
|
||||||
privkey = MockPrivKeys[i.ValidatorIndex]
|
privkey = MockPrivKeys[i.ValidatorIndex]
|
||||||
|
@ -84,9 +84,9 @@ proc addTestBlock*(
|
||||||
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
|
cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock =
|
||||||
# Create and add a block to state - state will advance by one slot!
|
# Create and add a block to state - state will advance by one slot!
|
||||||
if nextSlot:
|
if nextSlot:
|
||||||
var rewards: RewardInfo
|
var info = ForkedEpochInfo()
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
cfg, state, getStateField(state, slot) + 1, cache, rewards, flags)
|
cfg, state, getStateField(state, slot) + 1, cache, info, flags)
|
||||||
|
|
||||||
let
|
let
|
||||||
proposer_index = get_beacon_proposer_index(
|
proposer_index = get_beacon_proposer_index(
|
||||||
|
@ -101,7 +101,7 @@ proc addTestBlock*(
|
||||||
else:
|
else:
|
||||||
ValidatorSig()
|
ValidatorSig()
|
||||||
|
|
||||||
let
|
let
|
||||||
message = makeBeaconBlock(
|
message = makeBeaconBlock(
|
||||||
cfg,
|
cfg,
|
||||||
state,
|
state,
|
||||||
|
|
|
@ -60,7 +60,7 @@ proc getTestStates*(
|
||||||
var
|
var
|
||||||
tmpState = assignClone(initialState)
|
tmpState = assignClone(initialState)
|
||||||
cache = StateCache()
|
cache = StateCache()
|
||||||
rewards = RewardInfo()
|
info = ForkedEpochInfo()
|
||||||
cfg = defaultRuntimeConfig
|
cfg = defaultRuntimeConfig
|
||||||
|
|
||||||
if stateFork in [forkAltair, forkMerge]:
|
if stateFork in [forkAltair, forkMerge]:
|
||||||
|
@ -73,7 +73,7 @@ proc getTestStates*(
|
||||||
let slot = epoch.Epoch.compute_start_slot_at_epoch
|
let slot = epoch.Epoch.compute_start_slot_at_epoch
|
||||||
if getStateField(tmpState[], slot) < slot:
|
if getStateField(tmpState[], slot) < slot:
|
||||||
doAssert process_slots(
|
doAssert process_slots(
|
||||||
cfg, tmpState[], slot, cache, rewards, {})
|
cfg, tmpState[], slot, cache, info, {})
|
||||||
|
|
||||||
if i mod 3 == 0:
|
if i mod 3 == 0:
|
||||||
withState(tmpState[]):
|
withState(tmpState[]):
|
||||||
|
|
Loading…
Reference in New Issue