add attestation stats tool to ncli_db (#2539)
This also makes future efforts to provide metrics and logs for attestation efficiency easier * Export rewards from epoch transition * Use less memory for reward calculation (bool -> set[enum], field alignment) * Reuse reward memory when replaying, avoiding spike * Allow replaying any range in ncli_db benchmark
This commit is contained in:
parent
5cd5da74c4
commit
646923c3dd
|
@ -165,8 +165,9 @@ proc addRawBlockCheckStateTransition(
|
|||
doAssert v.addr == addr dag.clearanceState.data
|
||||
assign(dag.clearanceState, dag.headState)
|
||||
|
||||
var rewards: RewardInfo
|
||||
if not state_transition(dag.runtimePreset, dag.clearanceState.data, signedBlock,
|
||||
cache, dag.updateFlags + {slotProcessed}, restore):
|
||||
cache, rewards, dag.updateFlags + {slotProcessed}, restore):
|
||||
info "Invalid block"
|
||||
|
||||
return (ValidationResult.Reject, Invalid)
|
||||
|
|
|
@ -690,14 +690,14 @@ proc get*(dag: ChainDAGRef, root: Eth2Digest): Option[BlockData] =
|
|||
|
||||
proc advanceSlots(
|
||||
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
|
||||
cache: var StateCache) =
|
||||
cache: var StateCache, rewards: var RewardInfo) =
|
||||
# Given a state, advance it zero or more slots by applying empty slot
|
||||
# processing - the state must be positions at a slot before or equal to the
|
||||
# target
|
||||
doAssert getStateField(state, slot) <= slot
|
||||
while getStateField(state, slot) < slot:
|
||||
doAssert process_slots(
|
||||
state.data, getStateField(state, slot) + 1, cache,
|
||||
state.data, getStateField(state, slot) + 1, cache, rewards,
|
||||
dag.updateFlags),
|
||||
"process_slots shouldn't fail when state slot is correct"
|
||||
if save:
|
||||
|
@ -706,7 +706,7 @@ proc advanceSlots(
|
|||
proc applyBlock(
|
||||
dag: ChainDAGRef,
|
||||
state: var StateData, blck: BlockData, flags: UpdateFlags,
|
||||
cache: var StateCache): bool =
|
||||
cache: var StateCache, rewards: var RewardInfo): bool =
|
||||
# Apply a single block to the state - the state must be positioned at the
|
||||
# parent of the block with a slot lower than the one of the block being
|
||||
# applied
|
||||
|
@ -721,7 +721,7 @@ proc applyBlock(
|
|||
|
||||
let ok = state_transition(
|
||||
dag.runtimePreset, state.data, blck.data,
|
||||
cache, flags + dag.updateFlags + {slotProcessed}, restore)
|
||||
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
||||
if ok:
|
||||
state.blck = blck.refs
|
||||
|
||||
|
@ -832,6 +832,7 @@ proc updateStateData*(
|
|||
let
|
||||
startSlot {.used.} = getStateField(state, slot) # used in logs below
|
||||
startRoot {.used.} = state.data.root
|
||||
var rewards: RewardInfo
|
||||
# Time to replay all the blocks between then and now
|
||||
for i in countdown(ancestors.len - 1, 0):
|
||||
# Because the ancestors are in the database, there's no need to persist them
|
||||
|
@ -839,13 +840,13 @@ proc updateStateData*(
|
|||
# database, we can skip certain checks that have already been performed
|
||||
# before adding the block to the database.
|
||||
let ok =
|
||||
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache)
|
||||
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache, rewards)
|
||||
doAssert ok, "Blocks in database should never fail to apply.."
|
||||
|
||||
loadStateCache(dag, cache, bs.blck, bs.slot.epoch)
|
||||
|
||||
# ...and make sure to process empty slots as requested
|
||||
dag.advanceSlots(state, bs.slot, save, cache)
|
||||
dag.advanceSlots(state, bs.slot, save, cache, rewards)
|
||||
|
||||
let diff = Moment.now() - startTime
|
||||
|
||||
|
|
|
@ -667,6 +667,89 @@ type
|
|||
DoppelgangerProtection* = object
|
||||
broadcastStartEpoch*: Epoch
|
||||
|
||||
type
|
||||
# Caches for computing justificiation, rewards and penalties - based on
|
||||
# implementation in Lighthouse:
|
||||
# https://github.com/sigp/lighthouse/blob/master/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs
|
||||
RewardDelta* = object
|
||||
rewards*: Gwei
|
||||
penalties*: Gwei
|
||||
|
||||
InclusionInfo* = object
|
||||
# The distance between the attestation slot and the slot that attestation
|
||||
# was included in block.
|
||||
delay*: uint64
|
||||
# The index of the proposer at the slot where the attestation was included.
|
||||
proposer_index*: uint64
|
||||
|
||||
RewardFlags* {.pure.} = enum
|
||||
isSlashed
|
||||
canWithdrawInCurrentEpoch
|
||||
isActiveInPreviousEpoch
|
||||
isCurrentEpochAttester
|
||||
|
||||
# the validator's beacon block root attestation for the first slot
|
||||
# of the _current_ epoch matches the block root known to the state.
|
||||
isCurrentEpochTargetAttester
|
||||
|
||||
# Set if the validator's beacon block root attestation for the first slot of
|
||||
# the _previous_ epoch matches the block root known to the state.
|
||||
# Information used to reward the block producer of this validators
|
||||
# earliest-included attestation.
|
||||
isPreviousEpochTargetAttester
|
||||
# True if the validator's beacon block root attestation in the _previous_
|
||||
# epoch at the attestation's slot (`attestation_data.slot`) matches the
|
||||
# block root known to the state.
|
||||
isPreviousEpochHeadAttester
|
||||
|
||||
RewardStatus* = object
|
||||
## Data detailing the status of a single validator with respect to the
|
||||
## reward processing
|
||||
|
||||
# The validator's effective balance in the _current_ epoch.
|
||||
current_epoch_effective_balance*: uint64
|
||||
|
||||
# True if the validator had an attestation included in the _previous_ epoch.
|
||||
is_previous_epoch_attester*: Option[InclusionInfo]
|
||||
|
||||
inclusion_info*: Option[InclusionInfo]
|
||||
|
||||
# Total rewards and penalties for this validator
|
||||
delta*: RewardDelta
|
||||
|
||||
flags*: set[RewardFlags]
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_total_balance
|
||||
TotalBalances* = object
|
||||
# The total effective balance of all active validators during the _current_
|
||||
# epoch.
|
||||
current_epoch_raw*: Gwei
|
||||
# The total effective balance of all active validators during the _previous_
|
||||
# epoch.
|
||||
previous_epoch_raw*: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _current_ epoch.
|
||||
current_epoch_attesters_raw*: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _current_ epoch and agreed with the state about the beacon block at the
|
||||
# first slot of the _current_ epoch.
|
||||
current_epoch_target_attesters_raw*: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _previous_ epoch.
|
||||
previous_epoch_attesters_raw*: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _previous_ epoch and agreed with the state about the beacon block at the
|
||||
# first slot of the _previous_ epoch.
|
||||
previous_epoch_target_attesters_raw*: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _previous_ epoch and agreed with the state about the beacon block at the
|
||||
# time of attestation.
|
||||
previous_epoch_head_attesters_raw*: Gwei
|
||||
|
||||
RewardInfo* = object
|
||||
statuses*: seq[RewardStatus]
|
||||
total_balances*: TotalBalances
|
||||
|
||||
func getImmutableValidatorData*(validator: Validator): ImmutableValidatorData =
|
||||
ImmutableValidatorData(
|
||||
pubkey: validator.pubkey,
|
||||
|
|
|
@ -127,23 +127,30 @@ func clear_epoch_from_cache(cache: var StateCache, epoch: Epoch) =
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
|
||||
proc advance_slot(
|
||||
state: var BeaconState, previous_slot_state_root: Eth2Digest,
|
||||
flags: UpdateFlags, cache: var StateCache) {.nbench.} =
|
||||
flags: UpdateFlags, cache: var StateCache, rewards: var RewardInfo) {.nbench.} =
|
||||
# Do the per-slot and potentially the per-epoch processing, then bump the
|
||||
# slot number - we've now arrived at the slot state on top of which a block
|
||||
# optionally can be applied.
|
||||
process_slot(state, previous_slot_state_root)
|
||||
|
||||
rewards.statuses.setLen(0)
|
||||
rewards.total_balances = TotalBalances()
|
||||
|
||||
let is_epoch_transition = (state.slot + 1).isEpoch
|
||||
if is_epoch_transition:
|
||||
# Note: Genesis epoch = 0, no need to test if before Genesis
|
||||
process_epoch(state, flags, cache)
|
||||
process_epoch(state, flags, cache, rewards)
|
||||
clear_epoch_from_cache(cache, (state.slot + 1).compute_epoch_at_slot)
|
||||
|
||||
state.slot += 1
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
|
||||
proc process_slots*(state: var HashedBeaconState, slot: Slot,
|
||||
cache: var StateCache, flags: UpdateFlags = {}): bool {.nbench.} =
|
||||
cache: var StateCache, rewards: var RewardInfo,
|
||||
flags: UpdateFlags = {}): bool {.nbench.} =
|
||||
## Process one or more slot transitions without blocks - if the slot transtion
|
||||
## passes an epoch boundary, epoch processing will run and `rewards` will be
|
||||
## updated, else it will be cleared
|
||||
if not (state.data.slot < slot):
|
||||
if slotProcessed notin flags or state.data.slot != slot:
|
||||
notice(
|
||||
|
@ -156,7 +163,7 @@ proc process_slots*(state: var HashedBeaconState, slot: Slot,
|
|||
|
||||
# Catch up to the target slot
|
||||
while state.data.slot < slot:
|
||||
advance_slot(state.data, state.root, flags, cache)
|
||||
advance_slot(state.data, state.root, flags, cache, rewards)
|
||||
|
||||
# The root must be updated on every slot update, or the next `process_slot`
|
||||
# will be incorrect
|
||||
|
@ -170,7 +177,7 @@ proc noRollback*(state: var HashedBeaconState) =
|
|||
proc state_transition*(
|
||||
preset: RuntimePreset,
|
||||
state: var HashedBeaconState, signedBlock: SomeSignedBeaconBlock,
|
||||
cache: var StateCache, flags: UpdateFlags,
|
||||
cache: var StateCache, rewards: var RewardInfo, flags: UpdateFlags,
|
||||
rollback: RollbackHashedProc): bool {.nbench.} =
|
||||
## Apply a block to the state, advancing the slot counter as necessary. The
|
||||
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
||||
|
@ -200,7 +207,7 @@ proc state_transition*(
|
|||
|
||||
# Update the state so its slot matches that of the block
|
||||
while state.data.slot < slot:
|
||||
advance_slot(state.data, state.root, flags, cache)
|
||||
advance_slot(state.data, state.root, flags, cache, rewards)
|
||||
|
||||
if state.data.slot < slot:
|
||||
# Don't update state root for the slot of the block
|
||||
|
|
|
@ -32,86 +32,6 @@ import
|
|||
|
||||
logScope: topics = "consens"
|
||||
|
||||
type
|
||||
# Caches for computing justificiation, rewards and penalties - based on
|
||||
# implementation in Lighthouse:
|
||||
# https://github.com/sigp/lighthouse/blob/master/consensus/state_processing/src/per_epoch_processing/validator_statuses.rs
|
||||
Delta* = object
|
||||
rewards*: Gwei
|
||||
penalties*: Gwei
|
||||
|
||||
InclusionInfo = object
|
||||
# The distance between the attestation slot and the slot that attestation
|
||||
# was included in block.
|
||||
delay: uint64
|
||||
# The index of the proposer at the slot where the attestation was included.
|
||||
proposer_index: uint64
|
||||
|
||||
ValidatorStatus = object
|
||||
# True if the validator has been slashed, ever.
|
||||
is_slashed: bool
|
||||
# True if the validator can withdraw in the current epoch.
|
||||
is_withdrawable_in_current_epoch: bool
|
||||
# True if the validator was active in the state's _current_ epoch.
|
||||
is_active_in_current_epoch: bool
|
||||
# True if the validator was active in the state's _previous_ epoch.
|
||||
is_active_in_previous_epoch: bool
|
||||
# The validator's effective balance in the _current_ epoch.
|
||||
current_epoch_effective_balance: uint64
|
||||
|
||||
# True if the validator had an attestation included in the _current_ epoch.
|
||||
is_current_epoch_attester: bool
|
||||
# True if the validator's beacon block root attestation for the first slot
|
||||
# of the _current_ epoch matches the block root known to the state.
|
||||
is_current_epoch_target_attester: bool
|
||||
# True if the validator had an attestation included in the _previous_ epoch.
|
||||
is_previous_epoch_attester: Option[InclusionInfo]
|
||||
# Set if the validator's beacon block root attestation for the first slot of
|
||||
# the _previous_ epoch matches the block root known to the state.
|
||||
# Information used to reward the block producer of this validators
|
||||
# earliest-included attestation.
|
||||
is_previous_epoch_target_attester: bool
|
||||
# True if the validator's beacon block root attestation in the _previous_
|
||||
# epoch at the attestation's slot (`attestation_data.slot`) matches the
|
||||
# block root known to the state.
|
||||
is_previous_epoch_head_attester: bool
|
||||
|
||||
inclusion_info: Option[InclusionInfo]
|
||||
|
||||
# Total rewards and penalties for this validator
|
||||
delta: Delta
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_total_balance
|
||||
TotalBalances = object
|
||||
# The total effective balance of all active validators during the _current_
|
||||
# epoch.
|
||||
current_epoch_raw: Gwei
|
||||
# The total effective balance of all active validators during the _previous_
|
||||
# epoch.
|
||||
previous_epoch_raw: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _current_ epoch.
|
||||
current_epoch_attesters_raw: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _current_ epoch and agreed with the state about the beacon block at the
|
||||
# first slot of the _current_ epoch.
|
||||
current_epoch_target_attesters_raw: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _previous_ epoch.
|
||||
previous_epoch_attesters_raw: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _previous_ epoch and agreed with the state about the beacon block at the
|
||||
# first slot of the _previous_ epoch.
|
||||
previous_epoch_target_attesters_raw: Gwei
|
||||
# The total effective balance of all validators who attested during the
|
||||
# _previous_ epoch and agreed with the state about the beacon block at the
|
||||
# time of attestation.
|
||||
previous_epoch_head_attesters_raw: Gwei
|
||||
|
||||
ValidatorStatuses* = object
|
||||
statuses*: seq[ValidatorStatus]
|
||||
total_balances*: TotalBalances
|
||||
|
||||
# Accessors that implement the max condition in `get_total_balance`:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_total_balance
|
||||
template current_epoch*(v: TotalBalances): Gwei =
|
||||
|
@ -129,42 +49,48 @@ template previous_epoch_target_attesters*(v: TotalBalances): Gwei =
|
|||
template previous_epoch_head_attesters*(v: TotalBalances): Gwei =
|
||||
max(EFFECTIVE_BALANCE_INCREMENT, v.previous_epoch_head_attesters_raw)
|
||||
|
||||
func init*(T: type ValidatorStatuses, state: BeaconState): T =
|
||||
result.statuses = newSeq[ValidatorStatus](state.validators.len)
|
||||
func init*(rewards: var RewardInfo, state: BeaconState) =
|
||||
rewards.total_balances = TotalBalances()
|
||||
rewards.statuses.setLen(state.validators.len)
|
||||
|
||||
for i in 0..<state.validators.len:
|
||||
let v = unsafeAddr state.validators[i]
|
||||
result.statuses[i].is_slashed = v[].slashed
|
||||
result.statuses[i].is_withdrawable_in_current_epoch =
|
||||
state.get_current_epoch() >= v[].withdrawable_epoch
|
||||
result.statuses[i].current_epoch_effective_balance = v[].effective_balance
|
||||
var flags: set[RewardFlags]
|
||||
|
||||
if v[].slashed:
|
||||
flags.incl(isSlashed)
|
||||
if state.get_current_epoch() >= v[].withdrawable_epoch:
|
||||
flags.incl canWithdrawInCurrentEpoch
|
||||
|
||||
if v[].is_active_validator(state.get_current_epoch()):
|
||||
result.statuses[i].is_active_in_current_epoch = true
|
||||
result.total_balances.current_epoch_raw += v[].effective_balance
|
||||
rewards.total_balances.current_epoch_raw += v[].effective_balance
|
||||
|
||||
if v[].is_active_validator(state.get_previous_epoch()):
|
||||
result.statuses[i].is_active_in_previous_epoch = true
|
||||
result.total_balances.previous_epoch_raw += v[].effective_balance
|
||||
flags.incl isActiveInPreviousEpoch
|
||||
rewards.total_balances.previous_epoch_raw += v[].effective_balance
|
||||
|
||||
func add(a: var Delta, b: Delta) =
|
||||
rewards.statuses[i] = RewardStatus(
|
||||
current_epoch_effective_balance: v[].effective_balance,
|
||||
flags: flags,
|
||||
)
|
||||
|
||||
func add(a: var RewardDelta, b: RewardDelta) =
|
||||
a.rewards += b.rewards
|
||||
a.penalties += b.penalties
|
||||
|
||||
func process_attestation(
|
||||
self: var ValidatorStatuses, state: BeaconState, a: PendingAttestation,
|
||||
self: var RewardInfo, state: BeaconState, a: PendingAttestation,
|
||||
cache: var StateCache) =
|
||||
# Collect information about the attestation
|
||||
var
|
||||
is_current_epoch_attester, is_current_epoch_target_attester: bool
|
||||
is_previous_epoch_target_attester: bool
|
||||
is_previous_epoch_head_attester: bool
|
||||
flags: set[RewardFlags]
|
||||
is_previous_epoch_attester: Option[InclusionInfo]
|
||||
|
||||
if a.data.target.epoch == state.get_current_epoch():
|
||||
is_current_epoch_attester = true
|
||||
flags.incl isCurrentEpochAttester
|
||||
|
||||
if a.data.target.root == get_block_root(state, state.get_current_epoch()):
|
||||
is_current_epoch_target_attester = true;
|
||||
flags.incl isCurrentEpochTargetAttester
|
||||
|
||||
elif a.data.target.epoch == state.get_previous_epoch():
|
||||
is_previous_epoch_attester = some(InclusionInfo(
|
||||
|
@ -173,37 +99,28 @@ func process_attestation(
|
|||
))
|
||||
|
||||
if a.data.target.root == get_block_root(state, state.get_previous_epoch()):
|
||||
is_previous_epoch_target_attester = true;
|
||||
flags.incl isPreviousEpochTargetAttester
|
||||
|
||||
if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot):
|
||||
is_previous_epoch_head_attester = true
|
||||
flags.incl isPreviousEpochHeadAttester
|
||||
|
||||
# Update the cache for all participants
|
||||
for validator_index in get_attesting_indices(
|
||||
state, a.data, a.aggregation_bits, cache):
|
||||
template v(): untyped = self.statuses[validator_index]
|
||||
if is_current_epoch_attester:
|
||||
v.is_current_epoch_attester = true
|
||||
|
||||
if is_current_epoch_target_attester:
|
||||
v.is_current_epoch_target_attester = true
|
||||
v.flags = v.flags + flags
|
||||
|
||||
if is_previous_epoch_attester.isSome:
|
||||
if v.is_previous_epoch_attester.isSome:
|
||||
if v.isPreviousEpochAttester.isSome:
|
||||
if is_previous_epoch_attester.get().delay <
|
||||
v.is_previous_epoch_attester.get().delay:
|
||||
v.is_previous_epoch_attester = is_previous_epoch_attester
|
||||
else:
|
||||
v.is_previous_epoch_attester = is_previous_epoch_attester
|
||||
|
||||
if is_previous_epoch_target_attester:
|
||||
v.is_previous_epoch_target_attester = true
|
||||
|
||||
if is_previous_epoch_head_attester:
|
||||
v.is_previous_epoch_head_attester = true
|
||||
|
||||
func process_attestations*(
|
||||
self: var ValidatorStatuses, state: BeaconState, cache: var StateCache) =
|
||||
self: var RewardInfo, state: BeaconState, cache: var StateCache) =
|
||||
# Walk state attestations and update the status information
|
||||
for a in state.previous_epoch_attestations:
|
||||
process_attestation(self, state, a, cache)
|
||||
|
@ -211,29 +128,30 @@ func process_attestations*(
|
|||
process_attestation(self, state, a, cache)
|
||||
|
||||
for idx, v in self.statuses:
|
||||
if v.is_slashed:
|
||||
if isSlashed in v.flags:
|
||||
continue
|
||||
|
||||
let validator_balance = state.validators[idx].effective_balance
|
||||
|
||||
if v.is_current_epoch_attester:
|
||||
if isCurrentEpochAttester in v.flags:
|
||||
self.total_balances.current_epoch_attesters_raw += validator_balance
|
||||
|
||||
if v.is_current_epoch_target_attester:
|
||||
if isCurrentEpochTargetAttester in v.flags:
|
||||
self.total_balances.current_epoch_target_attesters_raw += validator_balance
|
||||
|
||||
if v.is_previous_epoch_attester.isSome():
|
||||
self.total_balances.previous_epoch_attesters_raw += validator_balance
|
||||
|
||||
if v.is_previous_epoch_target_attester:
|
||||
if isPreviousEpochTargetAttester in v.flags:
|
||||
self.total_balances.previous_epoch_target_attesters_raw += validator_balance
|
||||
|
||||
if v.is_previous_epoch_head_attester:
|
||||
if isPreviousEpochHeadAttester in v.flags:
|
||||
self.total_balances.previous_epoch_head_attesters_raw += validator_balance
|
||||
|
||||
func is_eligible_validator*(validator: ValidatorStatus): bool =
|
||||
validator.is_active_in_previous_epoch or
|
||||
(validator.is_slashed and (not validator.is_withdrawable_in_current_epoch))
|
||||
func is_eligible_validator*(validator: RewardStatus): bool =
|
||||
isActiveInPreviousEpoch in validator.flags or
|
||||
(isSlashed in validator.flags and
|
||||
(canWithdrawInCurrentEpoch notin validator.flags))
|
||||
|
||||
# Spec
|
||||
# --------------------------------------------------------
|
||||
|
@ -369,80 +287,83 @@ func get_attestation_component_delta(is_unslashed_attester: bool,
|
|||
attesting_balance: Gwei,
|
||||
total_balance: Gwei,
|
||||
base_reward: uint64,
|
||||
finality_delay: uint64): Delta =
|
||||
finality_delay: uint64): RewardDelta =
|
||||
# Helper with shared logic for use by get source, target, and head deltas
|
||||
# functions
|
||||
if is_unslashed_attester:
|
||||
if is_in_inactivity_leak(finality_delay):
|
||||
# Since full base reward will be canceled out by inactivity penalty deltas,
|
||||
# optimal participation receives full base reward compensation here.
|
||||
Delta(rewards: base_reward)
|
||||
RewardDelta(rewards: base_reward)
|
||||
else:
|
||||
let reward_numerator =
|
||||
base_reward * (attesting_balance div EFFECTIVE_BALANCE_INCREMENT)
|
||||
Delta(rewards:
|
||||
RewardDelta(rewards:
|
||||
reward_numerator div (total_balance div EFFECTIVE_BALANCE_INCREMENT))
|
||||
else:
|
||||
Delta(penalties: base_reward)
|
||||
RewardDelta(penalties: base_reward)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#components-of-attestation-deltas
|
||||
func get_source_delta*(validator: ValidatorStatus,
|
||||
func get_source_delta*(validator: RewardStatus,
|
||||
base_reward: uint64,
|
||||
total_balances: TotalBalances,
|
||||
finality_delay: uint64): Delta =
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return attester micro-rewards/penalties for source-vote for each validator.
|
||||
get_attestation_component_delta(
|
||||
validator.is_previous_epoch_attester.isSome() and (not validator.is_slashed),
|
||||
validator.is_previous_epoch_attester.isSome() and
|
||||
(isSlashed notin validator.flags),
|
||||
total_balances.previous_epoch_attesters,
|
||||
total_balances.current_epoch,
|
||||
base_reward,
|
||||
finality_delay)
|
||||
|
||||
func get_target_delta*(validator: ValidatorStatus,
|
||||
func get_target_delta*(validator: RewardStatus,
|
||||
base_reward: uint64,
|
||||
total_balances: TotalBalances,
|
||||
finality_delay: uint64): Delta =
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return attester micro-rewards/penalties for target-vote for each validator.
|
||||
get_attestation_component_delta(
|
||||
validator.is_previous_epoch_target_attester and (not validator.is_slashed),
|
||||
isPreviousEpochTargetAttester in validator.flags and
|
||||
(isSlashed notin validator.flags),
|
||||
total_balances.previous_epoch_target_attesters,
|
||||
total_balances.current_epoch,
|
||||
base_reward,
|
||||
finality_delay)
|
||||
|
||||
func get_head_delta*(validator: ValidatorStatus,
|
||||
func get_head_delta*(validator: RewardStatus,
|
||||
base_reward: uint64,
|
||||
total_balances: TotalBalances,
|
||||
finality_delay: uint64): Delta =
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return attester micro-rewards/penalties for head-vote for each validator.
|
||||
get_attestation_component_delta(
|
||||
validator.is_previous_epoch_head_attester and (not validator.is_slashed),
|
||||
isPreviousEpochHeadAttester in validator.flags and
|
||||
(isSlashed notin validator.flags),
|
||||
total_balances.previous_epoch_head_attesters,
|
||||
total_balances.current_epoch,
|
||||
base_reward,
|
||||
finality_delay)
|
||||
|
||||
func get_inclusion_delay_delta*(validator: ValidatorStatus,
|
||||
func get_inclusion_delay_delta*(validator: RewardStatus,
|
||||
base_reward: uint64):
|
||||
(Delta, Option[(uint64, Delta)]) =
|
||||
(RewardDelta, Option[(uint64, RewardDelta)]) =
|
||||
## Return proposer and inclusion delay micro-rewards/penalties for each validator.
|
||||
if validator.is_previous_epoch_attester.isSome() and (not validator.is_slashed):
|
||||
if validator.is_previous_epoch_attester.isSome() and (isSlashed notin validator.flags):
|
||||
let
|
||||
inclusion_info = validator.is_previous_epoch_attester.get()
|
||||
proposer_reward = get_proposer_reward(base_reward)
|
||||
proposer_delta = Delta(rewards: proposer_reward)
|
||||
proposer_delta = RewardDelta(rewards: proposer_reward)
|
||||
|
||||
let
|
||||
max_attester_reward = base_reward - proposer_reward
|
||||
delta = Delta(rewards: max_attester_reward div inclusion_info.delay)
|
||||
delta = RewardDelta(rewards: max_attester_reward div inclusion_info.delay)
|
||||
proposer_index = inclusion_info.proposer_index;
|
||||
return (delta, some((proposer_index, proposer_delta)))
|
||||
|
||||
func get_inactivity_penalty_delta*(validator: ValidatorStatus,
|
||||
func get_inactivity_penalty_delta*(validator: RewardStatus,
|
||||
base_reward: Gwei,
|
||||
finality_delay: uint64): Delta =
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return inactivity reward/penalty deltas for each validator.
|
||||
var delta: Delta
|
||||
var delta: RewardDelta
|
||||
|
||||
if is_in_inactivity_leak(finality_delay):
|
||||
# If validator is performing optimally this cancels all rewards for a neutral balance
|
||||
|
@ -452,27 +373,27 @@ func get_inactivity_penalty_delta*(validator: ValidatorStatus,
|
|||
# Additionally, all validators whose FFG target didn't match are penalized extra
|
||||
# This condition is equivalent to this condition from the spec:
|
||||
# `index not in get_unslashed_attesting_indices(state, matching_target_attestations)`
|
||||
if validator.is_slashed or (not validator.is_previous_epoch_target_attester):
|
||||
delta.penalties +=
|
||||
validator.current_epoch_effective_balance * finality_delay div
|
||||
INACTIVITY_PENALTY_QUOTIENT
|
||||
if (isSlashed in validator.flags) or
|
||||
(isPreviousEpochTargetAttester notin validator.flags):
|
||||
delta.penalties +=
|
||||
validator.current_epoch_effective_balance * finality_delay div
|
||||
INACTIVITY_PENALTY_QUOTIENT
|
||||
|
||||
delta
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_attestation_deltas
|
||||
func get_attestation_deltas(
|
||||
state: BeaconState, validator_statuses: var ValidatorStatuses) =
|
||||
## Update validator_statuses with attestation reward/penalty deltas for each validator.
|
||||
func get_attestation_deltas(state: BeaconState, rewards: var RewardInfo) =
|
||||
## Update rewards with attestation reward/penalty deltas for each validator.
|
||||
|
||||
let
|
||||
finality_delay = get_finality_delay(state)
|
||||
total_balance = validator_statuses.total_balances.current_epoch
|
||||
total_balance = rewards.total_balances.current_epoch
|
||||
total_balance_sqrt = integer_squareroot(total_balance)
|
||||
# Filter out ineligible validators. All sub-functions of the spec do this
|
||||
# except for `get_inclusion_delay_deltas`. It's safe to do so here because
|
||||
# any validator that is in the unslashed indices of the matching source
|
||||
# attestations is active, and therefore eligible.
|
||||
for index, validator in validator_statuses.statuses.mpairs():
|
||||
for index, validator in rewards.statuses.mpairs():
|
||||
if not is_eligible_validator(validator):
|
||||
continue
|
||||
|
||||
|
@ -482,11 +403,11 @@ func get_attestation_deltas(
|
|||
|
||||
let
|
||||
source_delta = get_source_delta(
|
||||
validator, base_reward, validator_statuses.total_balances, finality_delay)
|
||||
validator, base_reward, rewards.total_balances, finality_delay)
|
||||
target_delta = get_target_delta(
|
||||
validator, base_reward, validator_statuses.total_balances, finality_delay)
|
||||
validator, base_reward, rewards.total_balances, finality_delay)
|
||||
head_delta = get_head_delta(
|
||||
validator, base_reward, validator_statuses.total_balances, finality_delay)
|
||||
validator, base_reward, rewards.total_balances, finality_delay)
|
||||
(inclusion_delay_delta, proposer_delta) =
|
||||
get_inclusion_delay_delta(validator, base_reward)
|
||||
inactivity_delta = get_inactivity_penalty_delta(
|
||||
|
@ -500,28 +421,28 @@ func get_attestation_deltas(
|
|||
|
||||
if proposer_delta.isSome:
|
||||
let proposer_index = proposer_delta.get()[0]
|
||||
if proposer_index < validator_statuses.statuses.lenu64:
|
||||
validator_statuses.statuses[proposer_index].delta.add(
|
||||
if proposer_index < rewards.statuses.lenu64:
|
||||
rewards.statuses[proposer_index].delta.add(
|
||||
proposer_delta.get()[1])
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#process_rewards_and_penalties
|
||||
func process_rewards_and_penalties(
|
||||
state: var BeaconState, validator_statuses: var ValidatorStatuses) {.nbench.} =
|
||||
state: var BeaconState, rewards: var RewardInfo) {.nbench.} =
|
||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
||||
# for work done in the previous epoch
|
||||
doAssert validator_statuses.statuses.len == state.validators.len
|
||||
doAssert rewards.statuses.len == state.validators.len
|
||||
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
|
||||
get_attestation_deltas(state, validator_statuses)
|
||||
get_attestation_deltas(state, rewards)
|
||||
|
||||
# Here almost all balances are updated (assuming most validators are active) -
|
||||
# clearing the cache becomes a bottleneck if done item by item because of the
|
||||
# recursive nature of cache clearing - instead, we clear the whole cache then
|
||||
# update the raw list directly
|
||||
state.balances.clearCache()
|
||||
for idx, v in validator_statuses.statuses:
|
||||
for idx, v in rewards.statuses:
|
||||
increase_balance(state.balances.asSeq()[idx], v.delta.rewards)
|
||||
decrease_balance(state.balances.asSeq()[idx], v.delta.penalties)
|
||||
|
||||
|
@ -622,16 +543,17 @@ func process_final_updates*(state: var BeaconState) {.nbench.} =
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#epoch-processing
|
||||
proc process_epoch*(
|
||||
state: var BeaconState, flags: UpdateFlags, cache: var StateCache) {.nbench.} =
|
||||
state: var BeaconState, flags: UpdateFlags, cache: var StateCache,
|
||||
rewards: var RewardInfo) {.nbench.} =
|
||||
let currentEpoch = get_current_epoch(state)
|
||||
trace "process_epoch",
|
||||
current_epoch = currentEpoch
|
||||
var validator_statuses = ValidatorStatuses.init(state)
|
||||
validator_statuses.process_attestations(state, cache)
|
||||
init(rewards, state)
|
||||
rewards.process_attestations(state, cache)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||
process_justification_and_finalization(
|
||||
state, validator_statuses.total_balances, flags)
|
||||
state, rewards.total_balances, flags)
|
||||
|
||||
# state.slot hasn't been incremented yet.
|
||||
if verifyFinalization in flags and currentEpoch >= 2:
|
||||
|
@ -644,13 +566,13 @@ proc process_epoch*(
|
|||
doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||
process_rewards_and_penalties(state, validator_statuses)
|
||||
process_rewards_and_penalties(state, rewards)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#registry-updates
|
||||
process_registry_updates(state, cache)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#slashings
|
||||
process_slashings(state, validator_statuses.total_balances.current_epoch)
|
||||
process_slashings(state, rewards.total_balances.current_epoch)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#final-updates
|
||||
process_final_updates(state)
|
||||
|
|
|
@ -145,7 +145,9 @@ proc parseSSZ(path: string, T: typedesc): T =
|
|||
|
||||
proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, skipBLS: bool) =
|
||||
let prePath = dir / preState & ".ssz"
|
||||
var cache = StateCache()
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
echo "Running: ", prePath
|
||||
let state = (ref HashedBeaconState)(
|
||||
|
@ -161,11 +163,14 @@ proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, ski
|
|||
let flags = if skipBLS: {skipBlsValidation}
|
||||
else: {}
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, state[], signedBlock, cache, flags, noRollback)
|
||||
defaultRuntimePreset, state[], signedBlock, cache, rewards, flags,
|
||||
noRollback)
|
||||
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||
var cache = StateCache()
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
let prePath = dir / preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
|
@ -175,7 +180,7 @@ proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
|||
state.root = hash_tree_root(state.data)
|
||||
|
||||
# Shouldn't necessarily assert, because nbench can run test suite
|
||||
discard process_slots(state[], state.data.slot + numSlots, cache)
|
||||
discard process_slots(state[], state.data.slot + numSlots, cache, rewards)
|
||||
|
||||
template processEpochScenarioImpl(
|
||||
dir, preState: string,
|
||||
|
|
|
@ -83,9 +83,11 @@ proc doTransition(conf: NcliConf) =
|
|||
|
||||
stateY.root = hash_tree_root(stateY.data)
|
||||
|
||||
var cache = StateCache()
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
if not state_transition(getRuntimePresetForNetwork(conf.eth2Network),
|
||||
stateY[], blckX, cache, flags, noRollback):
|
||||
stateY[], blckX, cache, rewards, flags, noRollback):
|
||||
error "State transition failed"
|
||||
quit 1
|
||||
else:
|
||||
|
@ -107,11 +109,13 @@ proc doSlots(conf: NcliConf) =
|
|||
|
||||
stateY.root = hash_tree_root(stateY.data)
|
||||
|
||||
var cache: StateCache
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
for i in 0'u64..<conf.slot:
|
||||
let isEpoch = (stateY[].data.slot + 1).isEpoch
|
||||
withTimer(timers[if isEpoch: tApplyEpochSlot else: tApplySlot]):
|
||||
doAssert process_slots(stateY[], stateY[].data.slot + 1, cache)
|
||||
doAssert process_slots(stateY[], stateY[].data.slot + 1, cache, rewards)
|
||||
|
||||
withTimer(timers[tSaveState]):
|
||||
SSZ.saveFile(conf.postState, stateY.data)
|
||||
|
|
241
ncli/ncli_db.nim
241
ncli/ncli_db.nim
|
@ -4,8 +4,8 @@ import
|
|||
../beacon_chain/networking/network_metadata,
|
||||
../beacon_chain/[beacon_chain_db, extras],
|
||||
../beacon_chain/consensus_object_pools/blockchain_dag,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, helpers,
|
||||
state_transition, presets],
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers,
|
||||
state_transition, presets, validator],
|
||||
../beacon_chain/ssz, ../beacon_chain/ssz/sszdump,
|
||||
../research/simutils, ./e2store
|
||||
|
||||
|
@ -27,6 +27,7 @@ type
|
|||
pruneDatabase
|
||||
rewindState
|
||||
exportEra
|
||||
validatorPerf
|
||||
|
||||
StateDbKind* {.pure.} = enum
|
||||
sql
|
||||
|
@ -55,9 +56,14 @@ type
|
|||
.}: DbCmd
|
||||
|
||||
of bench:
|
||||
slots* {.
|
||||
benchSlot* {.
|
||||
defaultValue: 0
|
||||
name: "start-slot"
|
||||
desc: "Starting slot, negative = backwards from head".}: int64
|
||||
benchSlots* {.
|
||||
defaultValue: 50000
|
||||
desc: "Number of slots to run benchmark for".}: uint64
|
||||
name: "slots"
|
||||
desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64
|
||||
storeBlocks* {.
|
||||
defaultValue: false
|
||||
desc: "Store each read block back into a separate database".}: bool
|
||||
|
@ -109,6 +115,39 @@ type
|
|||
defaultValue: 1
|
||||
desc: "Number of eras to write".}: uint64
|
||||
|
||||
of validatorPerf:
|
||||
perfSlot* {.
|
||||
defaultValue: -128 * SLOTS_PER_EPOCH.int64
|
||||
name: "start-slot"
|
||||
desc: "Starting slot, negative = backwards from head".}: int64
|
||||
perfSlots* {.
|
||||
defaultValue: 0
|
||||
name: "slots"
|
||||
desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64
|
||||
|
||||
proc getBlockRange(dag: ChainDAGRef, startSlot: int64, count: uint64): seq[BlockRef] =
|
||||
# Range of block in reverse order
|
||||
let
|
||||
start =
|
||||
if startSlot >= 0: Slot(startSlot)
|
||||
elif uint64(-startSlot) >= dag.head.slot: Slot(0)
|
||||
else: Slot(dag.head.slot - uint64(-startSlot))
|
||||
ends =
|
||||
if count == 0: dag.head.slot + 1
|
||||
else: start + count
|
||||
var
|
||||
blockRefs: seq[BlockRef]
|
||||
cur = dag.head
|
||||
|
||||
while cur != nil:
|
||||
if cur.slot < ends:
|
||||
if cur.slot < start or cur.slot == 0: # skip genesis
|
||||
break
|
||||
else:
|
||||
blockRefs.add cur
|
||||
cur = cur.parent
|
||||
blockRefs
|
||||
|
||||
proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
|
||||
var timers: array[Timers, RunningStat]
|
||||
|
||||
|
@ -116,7 +155,6 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
let
|
||||
db = BeaconChainDB.new(
|
||||
runtimePreset, conf.databaseDir.string,
|
||||
inMemory = false,
|
||||
fileStateStorage = conf.stateDbKind == StateDbKind.file)
|
||||
dbBenchmark = BeaconChainDB.new(runtimePreset, "benchmark")
|
||||
defer:
|
||||
|
@ -128,43 +166,37 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
quit 1
|
||||
|
||||
echo "Initializing block pool..."
|
||||
let pool = withTimerRet(timers[tInit]):
|
||||
let dag = withTimerRet(timers[tInit]):
|
||||
ChainDAGRef.init(runtimePreset, db, {})
|
||||
|
||||
echo &"Loaded {pool.blocks.len} blocks, head slot {pool.head.slot}"
|
||||
|
||||
var
|
||||
blockRefs: seq[BlockRef]
|
||||
blockRefs = dag.getBlockRange(conf.benchSlot, conf.benchSlots)
|
||||
blocks: seq[TrustedSignedBeaconBlock]
|
||||
cur = pool.head
|
||||
|
||||
while cur != nil:
|
||||
blockRefs.add cur
|
||||
cur = cur.parent
|
||||
|
||||
for b in 1..<blockRefs.len: # Skip genesis block
|
||||
if blockRefs[blockRefs.len - b - 1].slot > conf.slots:
|
||||
break
|
||||
echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks"
|
||||
doAssert blockRefs.len() > 0, "Must select at least one block"
|
||||
|
||||
for b in 0..<blockRefs.len:
|
||||
withTimer(timers[tLoadBlock]):
|
||||
blocks.add db.getBlock(blockRefs[blockRefs.len - b - 1].root).get()
|
||||
|
||||
let state = (ref HashedBeaconState)(
|
||||
root: db.getBlock(blockRefs[^1].root).get().message.state_root
|
||||
)
|
||||
|
||||
withTimer(timers[tLoadState]):
|
||||
discard db.getState(state[].root, state[].data, noRollback)
|
||||
let state = newClone(dag.headState)
|
||||
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
loadedState = new BeaconState
|
||||
|
||||
withTimer(timers[tLoadState]):
|
||||
dag.updateStateData(
|
||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
|
||||
for b in blocks.mitems():
|
||||
while state[].data.slot < b.message.slot:
|
||||
let isEpoch = state[].data.slot.epoch() != (state[].data.slot + 1).epoch
|
||||
while state[].data.data.slot < b.message.slot:
|
||||
let isEpoch = (state[].data.data.slot + 1).isEpoch()
|
||||
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
||||
let ok = process_slots(state[], state[].data.slot + 1, cache, {})
|
||||
let ok = process_slots(
|
||||
state[].data, state[].data.data.slot + 1, cache, rewards, {})
|
||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||
|
||||
var start = Moment.now()
|
||||
|
@ -172,7 +204,7 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
if conf.resetCache:
|
||||
cache = StateCache()
|
||||
if not state_transition(
|
||||
runtimePreset, state[], b, cache, {slotProcessed}, noRollback):
|
||||
runtimePreset, state[].data, b, cache, rewards, {slotProcessed}, noRollback):
|
||||
dump("./", b)
|
||||
echo "State transition failed (!)"
|
||||
quit 1
|
||||
|
@ -182,20 +214,20 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
|
|||
withTimer(timers[tDbStore]):
|
||||
dbBenchmark.putBlock(b)
|
||||
|
||||
if state[].data.slot.isEpoch and conf.storeStates:
|
||||
if state[].data.slot.epoch < 2:
|
||||
dbBenchmark.putState(state[].root, state[].data)
|
||||
if state[].data.data.slot.isEpoch and conf.storeStates:
|
||||
if state[].data.data.slot.epoch < 2:
|
||||
dbBenchmark.putState(state[].data.root, state[].data.data)
|
||||
dbBenchmark.checkpoint()
|
||||
else:
|
||||
withTimer(timers[tDbStore]):
|
||||
dbBenchmark.putState(state[].root, state[].data)
|
||||
dbBenchmark.putState(state[].data.root, state[].data.data)
|
||||
dbBenchmark.checkpoint()
|
||||
|
||||
withTimer(timers[tDbLoad]):
|
||||
doAssert dbBenchmark.getState(state[].root, loadedState[], noRollback)
|
||||
doAssert dbBenchmark.getState(state[].data.root, loadedState[], noRollback)
|
||||
|
||||
if state[].data.slot.epoch mod 16 == 0:
|
||||
doAssert hash_tree_root(state[].data) == hash_tree_root(loadedState[])
|
||||
if state[].data.data.slot.epoch mod 16 == 0:
|
||||
doAssert hash_tree_root(state[].data.data) == hash_tree_root(loadedState[])
|
||||
|
||||
printTimers(false, timers)
|
||||
|
||||
|
@ -385,6 +417,145 @@ proc cmdExportEra(conf: DbConf, preset: RuntimePreset) =
|
|||
ancestor = ancestors[ancestors.len - 1 - i]
|
||||
e2s.appendRecord(db.getBlock(ancestor.root).get()).get()
|
||||
|
||||
type
|
||||
# Validator performance metrics tool based on
|
||||
# https://github.com/paulhauner/lighthouse/blob/etl/lcli/src/etl/validator_performance.rs
|
||||
# Credits to Paul Hauner
|
||||
ValidatorPerformance = object
|
||||
attestation_hits: uint64
|
||||
attestation_misses: uint64
|
||||
head_attestation_hits: uint64
|
||||
head_attestation_misses: uint64
|
||||
target_attestation_hits: uint64
|
||||
target_attestation_misses: uint64
|
||||
first_slot_head_attester_when_first_slot_empty: uint64
|
||||
first_slot_head_attester_when_first_slot_not_empty: uint64
|
||||
delays: Table[uint64, uint64]
|
||||
|
||||
proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
|
||||
echo "Opening database..."
|
||||
let
|
||||
db = BeaconChainDB.new(
|
||||
runtimePreset, conf.databaseDir.string,
|
||||
fileStateStorage = conf.stateDbKind == StateDbKind.file)
|
||||
defer:
|
||||
db.close()
|
||||
|
||||
if not ChainDAGRef.isInitialized(db):
|
||||
echo "Database not initialized"
|
||||
quit 1
|
||||
|
||||
echo "# Initializing block pool..."
|
||||
let dag = ChainDAGRef.init(runtimePreset, db, {})
|
||||
|
||||
var
|
||||
blockRefs = dag.getBlockRange(conf.perfSlot, conf.perfSlots)
|
||||
perfs = newSeq[ValidatorPerformance](
|
||||
dag.headState.data.data.validators.len())
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
blck: TrustedSignedBeaconBlock
|
||||
|
||||
doAssert blockRefs.len() > 0, "Must select at least one block"
|
||||
|
||||
echo "# Analyzing performance for epochs ",
|
||||
blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch
|
||||
|
||||
let state = newClone(dag.headState)
|
||||
dag.updateStateData(
|
||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
|
||||
proc processEpoch() =
|
||||
let
|
||||
prev_epoch_target_slot =
|
||||
state[].data.data.get_previous_epoch().compute_start_slot_at_epoch()
|
||||
penultimate_epoch_end_slot =
|
||||
if prev_epoch_target_slot == 0: Slot(0)
|
||||
else: prev_epoch_target_slot - 1
|
||||
first_slot_empty =
|
||||
state[].data.data.get_block_root_at_slot(prev_epoch_target_slot) ==
|
||||
state[].data.data.get_block_root_at_slot(penultimate_epoch_end_slot)
|
||||
|
||||
let first_slot_attesters = block:
|
||||
let committee_count = state[].data.data.get_committee_count_per_slot(
|
||||
prev_epoch_target_slot, cache)
|
||||
var indices = HashSet[ValidatorIndex]()
|
||||
for committee_index in 0..<committee_count:
|
||||
for validator_index in state[].data.data.get_beacon_committee(
|
||||
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
|
||||
indices.incl(validator_index)
|
||||
indices
|
||||
|
||||
for i, s in rewards.statuses.pairs():
|
||||
let perf = addr perfs[i]
|
||||
if RewardFlags.isActiveInPreviousEpoch in s.flags:
|
||||
if s.is_previous_epoch_attester.isSome():
|
||||
perf.attestation_hits += 1;
|
||||
|
||||
if RewardFlags.isPreviousEpochHeadAttester in s.flags:
|
||||
perf.head_attestation_hits += 1
|
||||
else:
|
||||
perf.head_attestation_misses += 1
|
||||
|
||||
if RewardFlags.isPreviousEpochTargetAttester in s.flags:
|
||||
perf.target_attestation_hits += 1
|
||||
else:
|
||||
perf.target_attestation_misses += 1
|
||||
|
||||
if i.ValidatorIndex in first_slot_attesters:
|
||||
if first_slot_empty:
|
||||
perf.first_slot_head_attester_when_first_slot_empty += 1
|
||||
else:
|
||||
perf.first_slot_head_attester_when_first_slot_not_empty += 1
|
||||
|
||||
if s.inclusion_info.isSome():
|
||||
perf.delays.mGetOrPut(s.inclusion_info.get().delay, 0'u64) += 1
|
||||
|
||||
else:
|
||||
perf.attestation_misses += 1;
|
||||
|
||||
|
||||
for bi in 0..<blockRefs.len:
|
||||
blck = db.getBlock(blockRefs[blockRefs.len - bi - 1].root).get()
|
||||
while state[].data.data.slot < blck.message.slot:
|
||||
let ok = process_slots(
|
||||
state[].data, state[].data.data.slot + 1, cache, rewards, {})
|
||||
doAssert ok, "Slot processing can't fail with correct inputs"
|
||||
|
||||
if state[].data.data.slot.isEpoch():
|
||||
processEpoch()
|
||||
|
||||
if not state_transition(
|
||||
runtimePreset, state[].data, blck, cache, rewards, {slotProcessed}, noRollback):
|
||||
echo "State transition failed (!)"
|
||||
quit 1
|
||||
|
||||
# Capture rewards from the epoch leading up to the last block
|
||||
let nextEpochStart = (blck.message.slot.epoch + 1).compute_start_slot_at_epoch
|
||||
doAssert process_slots(state[].data, nextEpochStart, cache, rewards, {})
|
||||
processEpoch()
|
||||
|
||||
echo "validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty"
|
||||
|
||||
for (i, perf) in perfs.pairs:
|
||||
var
|
||||
count = 0'u64
|
||||
sum = 0'u64
|
||||
for delay, n in perf.delays:
|
||||
count += n
|
||||
sum += delay * n
|
||||
echo i,",",
|
||||
perf.attestation_hits,",",
|
||||
perf.attestation_misses,",",
|
||||
perf.head_attestation_hits,",",
|
||||
perf.head_attestation_misses,",",
|
||||
perf.target_attestation_hits,",",
|
||||
perf.target_attestation_misses,",",
|
||||
if count == 0: 0.0
|
||||
else: sum.float / count.float,",",
|
||||
perf.first_slot_head_attester_when_first_slot_empty,",",
|
||||
perf.first_slot_head_attester_when_first_slot_not_empty
|
||||
|
||||
when isMainModule:
|
||||
var
|
||||
conf = DbConf.load()
|
||||
|
@ -403,3 +574,5 @@ when isMainModule:
|
|||
cmdRewindState(conf, runtimePreset)
|
||||
of exportEra:
|
||||
cmdExportEra(conf, runtimePreset)
|
||||
of validatorPerf:
|
||||
cmdValidatorPerf(conf, runtimePreset)
|
||||
|
|
|
@ -116,8 +116,10 @@ proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
|
|||
hashedState =
|
||||
HashedBeaconState(data: data.state, root: hash_tree_root(data.state))
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
result =
|
||||
state_transition(preset, hashedState, blck, cache, flags, rollback)
|
||||
state_transition(
|
||||
preset, hashedState, blck, cache, rewards, flags, rollback)
|
||||
data.state = hashedState.data
|
||||
|
||||
decodeAndProcess(BlockInput):
|
||||
|
|
|
@ -14,12 +14,17 @@ import
|
|||
|
||||
proc nextEpoch*(state: var HashedBeaconState) =
|
||||
## Transition to the start of the next epoch
|
||||
var cache = StateCache()
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
let slot =
|
||||
state.data.slot + SLOTS_PER_EPOCH - (state.data.slot mod SLOTS_PER_EPOCH)
|
||||
doAssert process_slots(state, slot, cache)
|
||||
doAssert process_slots(state, slot, cache, rewards)
|
||||
|
||||
proc nextSlot*(state: var HashedBeaconState) =
|
||||
## Transition to the next slot
|
||||
var cache = StateCache()
|
||||
doAssert process_slots(state, state.data.slot + 1, cache)
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
doAssert process_slots(state, state.data.slot + 1, cache, rewards)
|
||||
|
|
|
@ -68,12 +68,14 @@ proc sszDecodeEntireInput*(input: openArray[byte], Decoded: type): Decoded =
|
|||
proc process_justification_and_finalization*(state: var BeaconState) =
|
||||
var cache = StateCache()
|
||||
|
||||
var validator_statuses = ValidatorStatuses.init(state)
|
||||
validator_statuses.process_attestations(state, cache)
|
||||
process_justification_and_finalization(state, validator_statuses.total_balances)
|
||||
var rewards: RewardInfo
|
||||
rewards.init(state)
|
||||
rewards.process_attestations(state, cache)
|
||||
process_justification_and_finalization(state, rewards.total_balances)
|
||||
|
||||
proc process_slashings*(state: var BeaconState) =
|
||||
var cache = StateCache()
|
||||
var validator_statuses = ValidatorStatuses.init(state)
|
||||
validator_statuses.process_attestations(state, cache)
|
||||
process_slashings(state, validator_statuses.total_balances.current_epoch)
|
||||
var rewards: RewardInfo
|
||||
rewards.init(state)
|
||||
rewards.process_attestations(state, cache)
|
||||
process_slashings(state, rewards.total_balances.current_epoch)
|
||||
|
|
|
@ -30,7 +30,7 @@ type Deltas = object
|
|||
rewards: List[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
|
||||
penalties: List[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
|
||||
|
||||
func add(v: var Deltas, idx: int, delta: Delta) =
|
||||
func add(v: var Deltas, idx: int, delta: RewardDelta) =
|
||||
v.rewards[idx] += delta.rewards
|
||||
v.penalties[idx] += delta.penalties
|
||||
|
||||
|
@ -62,12 +62,13 @@ proc runTest(rewardsDir, identifier: string) =
|
|||
parseTest(testDir/"inactivity_penalty_deltas.ssz", SSZ, Deltas)
|
||||
|
||||
var
|
||||
validator_statuses = ValidatorStatuses.init(state[])
|
||||
rewards = RewardInfo()
|
||||
finality_delay = (state[].get_previous_epoch() - state[].finalized_checkpoint.epoch)
|
||||
|
||||
validator_statuses.process_attestations(state[], cache)
|
||||
rewards.init(state[])
|
||||
rewards.process_attestations(state[], cache)
|
||||
let
|
||||
total_balance = validator_statuses.total_balances.current_epoch
|
||||
total_balance = rewards.total_balances.current_epoch
|
||||
total_balance_sqrt = integer_squareroot(total_balance)
|
||||
|
||||
var
|
||||
|
@ -77,7 +78,7 @@ proc runTest(rewardsDir, identifier: string) =
|
|||
inclusionDelayDeltas2 = Deltas.init(state[].validators.len)
|
||||
inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len)
|
||||
|
||||
for index, validator in validator_statuses.statuses.mpairs():
|
||||
for index, validator in rewards.statuses.mpairs():
|
||||
if not is_eligible_validator(validator):
|
||||
continue
|
||||
|
||||
|
@ -86,11 +87,11 @@ proc runTest(rewardsDir, identifier: string) =
|
|||
state[], index.ValidatorIndex, total_balance_sqrt)
|
||||
|
||||
sourceDeltas2.add(index, get_source_delta(
|
||||
validator, base_reward, validator_statuses.total_balances, finality_delay))
|
||||
validator, base_reward, rewards.total_balances, finality_delay))
|
||||
targetDeltas2.add(index, get_target_delta(
|
||||
validator, base_reward, validator_statuses.total_balances, finality_delay))
|
||||
validator, base_reward, rewards.total_balances, finality_delay))
|
||||
headDeltas2.add(index, get_head_delta(
|
||||
validator, base_reward, validator_statuses.total_balances, finality_delay))
|
||||
validator, base_reward, rewards.total_balances, finality_delay))
|
||||
|
||||
let
|
||||
(inclusion_delay_delta, proposer_delta) =
|
||||
|
|
|
@ -40,6 +40,7 @@ proc runTest(testName, testDir, unitTestName: string) =
|
|||
hashedPreState = (ref HashedBeaconState)(
|
||||
data: preState[], root: hash_tree_root(preState[]))
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
# In test cases with more than 10 blocks the first 10 aren't 0-prefixed,
|
||||
# so purely lexicographic sorting wouldn't sort properly.
|
||||
|
@ -49,12 +50,12 @@ proc runTest(testName, testDir, unitTestName: string) =
|
|||
|
||||
if hasPostState:
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, hashedPreState[], blck, cache, flags = {},
|
||||
defaultRuntimePreset, hashedPreState[], blck, cache, rewards, flags = {},
|
||||
noRollback)
|
||||
doAssert success, "Failure when applying block " & $i
|
||||
else:
|
||||
let success = state_transition(
|
||||
defaultRuntimePreset, hashedPreState[], blck, cache, flags = {},
|
||||
defaultRuntimePreset, hashedPreState[], blck, cache, rewards, flags = {},
|
||||
noRollback)
|
||||
doAssert (i + 1 < numBlocks) or not success,
|
||||
"We didn't expect these invalid blocks to be processed"
|
||||
|
|
|
@ -20,10 +20,6 @@ import
|
|||
const SanitySlotsDir = SszTestsDir/const_preset/"phase0"/"sanity"/"slots"/"pyspec_tests"
|
||||
|
||||
proc runTest(identifier: string) =
|
||||
# We wrap the tests in a proc to avoid running out of globals
|
||||
# in the future: Nim supports up to 3500 globals
|
||||
# but unittest with the macro/templates put everything as globals
|
||||
# https://github.com/nim-lang/Nim/issues/12084#issue-486866402
|
||||
let
|
||||
testDir = SanitySlotsDir / identifier
|
||||
num_slots = readLines(testDir / "slots.yaml", 2)[0].parseInt.uint64
|
||||
|
@ -35,11 +31,12 @@ proc runTest(identifier: string) =
|
|||
hashedPreState = (ref HashedBeaconState)(
|
||||
data: preState[], root: hash_tree_root(preState[]))
|
||||
cache = StateCache()
|
||||
rewards: RewardInfo
|
||||
let postState = newClone(parseTest(testDir/"post.ssz", SSZ, BeaconState))
|
||||
|
||||
check:
|
||||
process_slots(
|
||||
hashedPreState[], hashedPreState.data.slot + num_slots, cache)
|
||||
hashedPreState[], hashedPreState.data.slot + num_slots, cache, rewards)
|
||||
|
||||
hashedPreState.root == postState[].hash_tree_root()
|
||||
let newPreState = newClone(hashedPreState.data)
|
||||
|
|
|
@ -12,12 +12,14 @@ import
|
|||
|
||||
proc processSlotsUntilEndCurrentEpoch(state: var HashedBeaconState) =
|
||||
# Process all slots until the end of the last slot of the current epoch
|
||||
var cache = StateCache()
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
let slot =
|
||||
state.data.slot + SLOTS_PER_EPOCH - (state.data.slot mod SLOTS_PER_EPOCH)
|
||||
|
||||
# Transition to slot before the epoch state transition
|
||||
discard process_slots(state, slot - 1, cache)
|
||||
discard process_slots(state, slot - 1, cache, rewards)
|
||||
|
||||
# For the last slot of the epoch,
|
||||
# only process_slot without process_epoch
|
||||
|
@ -28,9 +30,11 @@ proc transitionEpochUntilJustificationFinalization*(state: var HashedBeaconState
|
|||
# Process slots and do the epoch transition until crosslinks
|
||||
processSlotsUntilEndCurrentEpoch(state)
|
||||
|
||||
var cache = StateCache()
|
||||
var
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
var validator_statuses = ValidatorStatuses.init(state.data)
|
||||
validator_statuses.process_attestations(state.data, cache)
|
||||
rewards.init(state.data)
|
||||
rewards.process_attestations(state.data, cache)
|
||||
process_justification_and_finalization(
|
||||
state.data, validator_statuses.total_balances)
|
||||
state.data, rewards.total_balances)
|
||||
|
|
|
@ -65,9 +65,10 @@ suite "Attestation pool processing" & preset():
|
|||
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
||||
state = newClone(chainDag.headState)
|
||||
cache = StateCache()
|
||||
rewards: RewardInfo
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
check:
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache)
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
|
||||
|
||||
test "Can add and retrieve simple attestations" & preset():
|
||||
let
|
||||
|
@ -99,7 +100,8 @@ suite "Attestation pool processing" & preset():
|
|||
|
||||
process_slots(
|
||||
state.data,
|
||||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache)
|
||||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
|
@ -119,7 +121,8 @@ suite "Attestation pool processing" & preset():
|
|||
check:
|
||||
process_slots(
|
||||
state.data,
|
||||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache)
|
||||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
rewards)
|
||||
|
||||
check:
|
||||
# shouldn't include already-included attestations
|
||||
|
@ -197,7 +200,8 @@ suite "Attestation pool processing" & preset():
|
|||
check:
|
||||
process_slots(
|
||||
state.data,
|
||||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache)
|
||||
getStateField(state, slot) + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
rewards)
|
||||
|
||||
check:
|
||||
pool[].getAttestationsForBlock(state.data.data, cache).len() == 2
|
||||
|
@ -242,7 +246,8 @@ suite "Attestation pool processing" & preset():
|
|||
inc attestations
|
||||
|
||||
check:
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache)
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache,
|
||||
rewards)
|
||||
|
||||
doAssert attestations.uint64 > MAX_ATTESTATIONS,
|
||||
"6*SLOTS_PER_EPOCH validators > 128 mainnet MAX_ATTESTATIONS"
|
||||
|
@ -263,7 +268,7 @@ suite "Attestation pool processing" & preset():
|
|||
state.data.data, state.blck.root, bc0[0], cache)
|
||||
|
||||
check:
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache)
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
|
||||
|
||||
let
|
||||
bc1 = get_beacon_committee(state.data.data,
|
||||
|
@ -278,7 +283,7 @@ suite "Attestation pool processing" & preset():
|
|||
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
|
||||
|
||||
discard process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
|
@ -302,7 +307,8 @@ suite "Attestation pool processing" & preset():
|
|||
attestation1, @[bc0[1]], attestation1.loadSig, attestation1.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
|
@ -329,7 +335,8 @@ suite "Attestation pool processing" & preset():
|
|||
attestation1, @[bc0[1]], attestation1.loadSig, attestation1.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
|
@ -355,7 +362,8 @@ suite "Attestation pool processing" & preset():
|
|||
attestation0, @[bc0[0]], attestation0.loadSig, attestation0.data.slot)
|
||||
|
||||
check:
|
||||
process_slots(state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache)
|
||||
process_slots(
|
||||
state.data, MIN_ATTESTATION_INCLUSION_DELAY.Slot + 1, cache, rewards)
|
||||
|
||||
let attestations = pool[].getAttestationsForBlock(state.data.data, cache)
|
||||
|
||||
|
|
|
@ -124,6 +124,7 @@ suite "Block pool processing" & preset():
|
|||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
stateData = newClone(dag.headState)
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
b1 = addTestBlock(stateData.data, dag.tail.root, cache)
|
||||
b2 = addTestBlock(stateData.data, b1.root, cache)
|
||||
test "getRef returns nil for missing blocks":
|
||||
|
@ -166,7 +167,8 @@ suite "Block pool processing" & preset():
|
|||
|
||||
# Skip one slot to get a gap
|
||||
check:
|
||||
process_slots(stateData.data, getStateField(stateData, slot) + 1, cache)
|
||||
process_slots(
|
||||
stateData.data, getStateField(stateData, slot) + 1, cache, rewards)
|
||||
|
||||
let
|
||||
b4 = addTestBlock(stateData.data, b2.root, cache)
|
||||
|
@ -336,6 +338,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
test "prune heads on finalization" & preset():
|
||||
# Create a fork that will not be taken
|
||||
|
@ -344,7 +347,8 @@ suite "chain DAG finalization tests" & preset():
|
|||
tmpState = assignClone(dag.headState.data)
|
||||
check:
|
||||
process_slots(
|
||||
tmpState[], tmpState.data.slot + (5 * SLOTS_PER_EPOCH).uint64, cache)
|
||||
tmpState[], tmpState.data.slot + (5 * SLOTS_PER_EPOCH).uint64, cache,
|
||||
rewards)
|
||||
|
||||
let lateBlock = addTestBlock(tmpState[], dag.head.root, cache)
|
||||
block:
|
||||
|
@ -444,7 +448,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
# The loop creates multiple branches, which StateCache isn't suitable for
|
||||
cache = StateCache()
|
||||
|
||||
doAssert process_slots(prestate[], prestate[].data.slot + 1, cache)
|
||||
doAssert process_slots(prestate[], prestate[].data.slot + 1, cache, rewards)
|
||||
|
||||
# create another block, orphaning the head
|
||||
let blck = makeTestBlock(
|
||||
|
@ -468,6 +472,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
dag = init(ChainDAGRef, defaultRuntimePreset, db)
|
||||
quarantine = QuarantineRef.init(keys.newRng())
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
test "init with gaps" & preset():
|
||||
for blck in makeTestBlocks(
|
||||
|
@ -481,7 +486,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
# Advance past epoch so that the epoch transition is gapped
|
||||
check:
|
||||
process_slots(
|
||||
dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2), cache)
|
||||
dag.headState.data, Slot(SLOTS_PER_EPOCH * 6 + 2), cache, rewards)
|
||||
|
||||
var blck = makeTestBlock(
|
||||
dag.headState.data, dag.head.root, cache,
|
||||
|
|
|
@ -38,10 +38,11 @@ suite "Gossip validation " & preset():
|
|||
pool = newClone(AttestationPool.init(chainDag, quarantine))
|
||||
state = newClone(chainDag.headState)
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
batchCrypto = BatchCrypto.new(keys.newRng(), eager = proc(): bool = false)
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
check:
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache)
|
||||
process_slots(state.data, getStateField(state, slot) + 1, cache, rewards)
|
||||
|
||||
test "Validation sanity":
|
||||
# TODO: refactor tests to avoid skipping BLS validation
|
||||
|
|
|
@ -455,7 +455,7 @@ suite "PeerPool testing suite":
|
|||
await pool.clearSafe()
|
||||
result = true
|
||||
|
||||
asyncCheck testConsumer()
|
||||
asyncSpawn testConsumer()
|
||||
check waitFor(testClose()) == true
|
||||
|
||||
test "Access peers by key test": closureScope:
|
||||
|
|
|
@ -31,10 +31,11 @@ suite "Block processing" & preset():
|
|||
var
|
||||
state = newClone(genesisState[])
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
test "Passes from genesis state, no block" & preset():
|
||||
check:
|
||||
process_slots(state[], state.data.slot + 1, cache)
|
||||
process_slots(state[], state.data.slot + 1, cache, rewards)
|
||||
state.data.slot == genesisState.data.slot + 1
|
||||
|
||||
test "Passes from genesis state, empty block" & preset():
|
||||
|
@ -43,7 +44,7 @@ suite "Block processing" & preset():
|
|||
new_block = makeTestBlock(state[], previous_block_root, cache)
|
||||
|
||||
let block_ok = state_transition(
|
||||
defaultRuntimePreset, state[], new_block, cache, {}, noRollback)
|
||||
defaultRuntimePreset, state[], new_block, cache, rewards, {}, noRollback)
|
||||
|
||||
check:
|
||||
block_ok
|
||||
|
@ -52,19 +53,18 @@ suite "Block processing" & preset():
|
|||
|
||||
test "Passes through epoch update, no block" & preset():
|
||||
check:
|
||||
process_slots(state[], Slot(SLOTS_PER_EPOCH), cache)
|
||||
process_slots(state[], Slot(SLOTS_PER_EPOCH), cache, rewards)
|
||||
state.data.slot == genesisState.data.slot + SLOTS_PER_EPOCH
|
||||
|
||||
test "Passes through epoch update, empty block" & preset():
|
||||
var
|
||||
previous_block_root = genesisRoot
|
||||
cache = StateCache()
|
||||
|
||||
for i in 1..SLOTS_PER_EPOCH:
|
||||
let new_block = makeTestBlock(state[], previous_block_root, cache)
|
||||
|
||||
let block_ok = state_transition(
|
||||
defaultRuntimePreset, state[], new_block, cache, {}, noRollback)
|
||||
defaultRuntimePreset, state[], new_block, cache, rewards, {}, noRollback)
|
||||
|
||||
check:
|
||||
block_ok
|
||||
|
@ -77,11 +77,10 @@ suite "Block processing" & preset():
|
|||
test "Attestation gets processed at epoch" & preset():
|
||||
var
|
||||
previous_block_root = genesisRoot
|
||||
cache = StateCache()
|
||||
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
check:
|
||||
process_slots(state[], state.data.slot + 1, cache)
|
||||
process_slots(state[], state.data.slot + 1, cache, rewards)
|
||||
|
||||
let
|
||||
# Create an attestation for slot 1 signed by the only attester we have!
|
||||
|
@ -94,14 +93,15 @@ suite "Block processing" & preset():
|
|||
# to let the attestation propagate properly to interested participants
|
||||
check:
|
||||
process_slots(
|
||||
state[], GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY + 1, cache)
|
||||
state[], GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY + 1, cache,
|
||||
rewards)
|
||||
|
||||
let
|
||||
new_block = makeTestBlock(state[], previous_block_root, cache,
|
||||
attestations = @[attestation]
|
||||
)
|
||||
check state_transition(
|
||||
defaultRuntimePreset, state[], new_block, cache, {}, noRollback)
|
||||
defaultRuntimePreset, state[], new_block, cache, rewards, {}, noRollback)
|
||||
|
||||
check:
|
||||
# TODO epoch attestations can get multiplied now; clean up paths to
|
||||
|
@ -109,7 +109,7 @@ suite "Block processing" & preset():
|
|||
state.data.current_epoch_attestations.len >= 1
|
||||
|
||||
check:
|
||||
process_slots(state[], Slot(191), cache)
|
||||
process_slots(state[], Slot(191), cache, rewards)
|
||||
|
||||
# Would need to process more epochs for the attestation to be removed from
|
||||
# the state! (per above bug)
|
||||
|
|
|
@ -85,7 +85,8 @@ proc addTestBlock*(
|
|||
nextSlot = true): SignedBeaconBlock =
|
||||
# Create and add a block to state - state will advance by one slot!
|
||||
if nextSlot:
|
||||
doAssert process_slots(state, state.data.slot + 1, cache, flags)
|
||||
var rewards: RewardInfo
|
||||
doAssert process_slots(state, state.data.slot + 1, cache, rewards, flags)
|
||||
|
||||
let
|
||||
proposer_index = get_beacon_proposer_index(state.data, cache)
|
||||
|
|
|
@ -60,11 +60,12 @@ proc getTestStates*(initialState: HashedBeaconState):
|
|||
var
|
||||
tmpState = assignClone(initialState)
|
||||
cache = StateCache()
|
||||
rewards = RewardInfo()
|
||||
|
||||
for i, epoch in stateEpochs:
|
||||
let slot = epoch.Epoch.compute_start_slot_at_epoch
|
||||
if tmpState.data.slot < slot:
|
||||
doAssert process_slots(tmpState[], slot, cache)
|
||||
doAssert process_slots(tmpState[], slot, cache, rewards)
|
||||
if i mod 3 == 0:
|
||||
valid_deposit(tmpState.data)
|
||||
doAssert tmpState.data.slot == slot
|
||||
|
|
Loading…
Reference in New Issue