remove nbench (#3152)
Used during initial development of the spec, `nbench` has fallen behind and by and large been superceded by `block_sim`, `state_sim` and `ncli_db bench`.
This commit is contained in:
parent
fa2cf028e2
commit
cb3f1fd4fc
3
Makefile
3
Makefile
|
@ -43,8 +43,6 @@ TOOLS := \
|
|||
deposit_contract \
|
||||
resttest \
|
||||
logtrace \
|
||||
nbench \
|
||||
nbench_spec_scenarios \
|
||||
ncli \
|
||||
ncli_db \
|
||||
stack_sizes \
|
||||
|
@ -59,7 +57,6 @@ TOOLS_DIRS := \
|
|||
beacon_chain/eth1 \
|
||||
benchmarks \
|
||||
ncli \
|
||||
nbench \
|
||||
research \
|
||||
tools
|
||||
TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS))
|
||||
|
|
|
@ -14,8 +14,7 @@ import
|
|||
chronicles,
|
||||
../extras,
|
||||
./datatypes/[phase0, altair, merge],
|
||||
"."/[eth2_merkleization, forks, signatures, validator],
|
||||
../../nbench/bench_lab
|
||||
"."/[eth2_merkleization, forks, signatures, validator]
|
||||
|
||||
export extras, forks, validator
|
||||
|
||||
|
@ -195,7 +194,7 @@ proc initialize_beacon_state_from_eth1*(
|
|||
eth1_block_hash: Eth2Digest,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: openArray[DepositData],
|
||||
flags: UpdateFlags = {}): phase0.BeaconState {.nbench.} =
|
||||
flags: UpdateFlags = {}): phase0.BeaconState =
|
||||
## Get the genesis ``BeaconState``.
|
||||
##
|
||||
## Before the beacon chain starts, validators will register in the Eth1 chain
|
||||
|
@ -627,7 +626,7 @@ proc check_attestation*(
|
|||
proc process_attestation*(
|
||||
state: var ForkyBeaconState, attestation: SomeAttestation, flags: UpdateFlags,
|
||||
base_reward_per_increment: Gwei, cache: var StateCache):
|
||||
Result[void, cstring] {.nbench.} =
|
||||
Result[void, cstring] =
|
||||
# In the spec, attestation validation is mixed with state mutation, so here
|
||||
# we've split it into two functions so that the validation logic can be
|
||||
# reused when looking for suitable blocks to include in attestations.
|
||||
|
|
|
@ -49,8 +49,7 @@ import
|
|||
./datatypes/[phase0, altair, merge],
|
||||
"."/[
|
||||
beaconstate, eth2_merkleization, forks, helpers, signatures,
|
||||
state_transition_block, state_transition_epoch, validator],
|
||||
../../nbench/bench_lab
|
||||
state_transition_block, state_transition_epoch, validator]
|
||||
|
||||
export extras, phase0, altair
|
||||
|
||||
|
@ -58,9 +57,9 @@ type Foo = phase0.SignedBeaconBlock | altair.SignedBeaconBlock | phase0.TrustedS
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
|
||||
proc verify_block_signature(
|
||||
#state: ForkyBeaconState, signed_block: SomeSomeSignedBeaconBlock): bool {.nbench.} =
|
||||
state: ForkyBeaconState, signed_block: Foo): bool {.nbench.} =
|
||||
#state: ForkyBeaconState, signed_block: phase0.SomeSignedBeaconBlock | altair.SomeSignedBeaconBlock): bool {.nbench.} =
|
||||
#state: ForkyBeaconState, signed_block: SomeSomeSignedBeaconBlock): bool =
|
||||
state: ForkyBeaconState, signed_block: Foo): bool =
|
||||
#state: ForkyBeaconState, signed_block: phase0.SomeSignedBeaconBlock | altair.SomeSignedBeaconBlock): bool =
|
||||
let
|
||||
proposer_index = signed_block.message.proposer_index
|
||||
if proposer_index >= state.validators.lenu64:
|
||||
|
@ -135,7 +134,7 @@ type
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
|
||||
func process_slot*(
|
||||
state: var ForkyBeaconState, pre_state_root: Eth2Digest) {.nbench.} =
|
||||
state: var ForkyBeaconState, pre_state_root: Eth2Digest) =
|
||||
# `process_slot` is the first stage of per-slot processing - it is run for
|
||||
# every slot, including epoch slots - it does not however update the slot
|
||||
# number! `pre_state_root` refers to the state root of the incoming
|
||||
|
@ -165,7 +164,7 @@ func clear_epoch_from_cache(cache: var StateCache, epoch: Epoch) =
|
|||
proc advance_slot(
|
||||
cfg: RuntimeConfig,
|
||||
state: var ForkyBeaconState, previous_slot_state_root: Eth2Digest,
|
||||
flags: UpdateFlags, cache: var StateCache, info: var ForkyEpochInfo) {.nbench.} =
|
||||
flags: UpdateFlags, cache: var StateCache, info: var ForkyEpochInfo) =
|
||||
# Do the per-slot and potentially the per-epoch processing, then bump the
|
||||
# slot number - we've now arrived at the slot state on top of which a block
|
||||
# optionally can be applied.
|
||||
|
@ -221,7 +220,7 @@ proc maybeUpgradeState*(
|
|||
|
||||
proc process_slots*(
|
||||
cfg: RuntimeConfig, state: var ForkedHashedBeaconState, slot: Slot,
|
||||
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags): bool {.nbench.} =
|
||||
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags): bool =
|
||||
if not (getStateField(state, slot) < slot):
|
||||
if slotProcessed notin flags or getStateField(state, slot) != slot:
|
||||
notice "Unusual request for a slot in the past",
|
||||
|
@ -255,7 +254,7 @@ proc state_transition_block_aux(
|
|||
altair.SigVerifiedSignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
||||
merge.TrustedSignedBeaconBlock | merge.SigVerifiedSignedBeaconBlock |
|
||||
merge.SignedBeaconBlock,
|
||||
cache: var StateCache, flags: UpdateFlags): bool {.nbench.} =
|
||||
cache: var StateCache, flags: UpdateFlags): bool =
|
||||
# Block updates - these happen when there's a new block being suggested
|
||||
# by the block proposer. Every actor in the network will update its state
|
||||
# according to the contents of this block - but first they will validate
|
||||
|
@ -307,7 +306,7 @@ proc state_transition_block*(
|
|||
altair.TrustedSignedBeaconBlock | merge.TrustedSignedBeaconBlock |
|
||||
merge.SigVerifiedSignedBeaconBlock | merge.SignedBeaconBlock,
|
||||
cache: var StateCache, flags: UpdateFlags,
|
||||
rollback: RollbackForkedHashedProc): bool {.nbench.} =
|
||||
rollback: RollbackForkedHashedProc): bool =
|
||||
## `rollback` is called if the transition fails and the given state has been
|
||||
## partially changed. If a temporary state was given to `state_transition`,
|
||||
## it is safe to use `noRollback` and leave it broken, else the state
|
||||
|
@ -335,7 +334,7 @@ proc state_transition*(
|
|||
altair.TrustedSignedBeaconBlock | merge.TrustedSignedBeaconBlock |
|
||||
merge.SignedBeaconBlock,
|
||||
cache: var StateCache, info: var ForkedEpochInfo, flags: UpdateFlags,
|
||||
rollback: RollbackForkedHashedProc): bool {.nbench.} =
|
||||
rollback: RollbackForkedHashedProc): bool =
|
||||
## Apply a block to the state, advancing the slot counter as necessary. The
|
||||
## given state must be of a lower slot, or, in case the `slotProcessed` flag
|
||||
## is set, can be the slot state of the same slot as the block (where the
|
||||
|
|
|
@ -24,15 +24,14 @@ import
|
|||
chronicles, metrics,
|
||||
../extras,
|
||||
./datatypes/[phase0, altair, merge],
|
||||
"."/[beaconstate, eth2_merkleization, helpers, validator, signatures],
|
||||
../../nbench/bench_lab
|
||||
"."/[beaconstate, eth2_merkleization, helpers, validator, signatures]
|
||||
|
||||
export extras, phase0, altair
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#block-header
|
||||
func process_block_header*(
|
||||
state: var ForkyBeaconState, blck: SomeSomeBeaconBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
# Verify that the slots match
|
||||
if not (blck.slot == state.slot):
|
||||
return err("process_block_header: slot mismatch")
|
||||
|
@ -75,7 +74,7 @@ func `xor`[T: array](a, b: T): T =
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#randao
|
||||
proc process_randao(
|
||||
state: var ForkyBeaconState, body: SomeSomeBeaconBlockBody, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
let
|
||||
proposer_index = get_beacon_proposer_index(state, cache)
|
||||
|
||||
|
@ -106,7 +105,7 @@ proc process_randao(
|
|||
ok()
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#eth1-data
|
||||
func process_eth1_data(state: var ForkyBeaconState, body: SomeSomeBeaconBlockBody): Result[void, cstring] {.nbench.}=
|
||||
func process_eth1_data(state: var ForkyBeaconState, body: SomeSomeBeaconBlockBody): Result[void, cstring]=
|
||||
if not state.eth1_data_votes.add body.eth1_data:
|
||||
# Count is reset in process_final_updates, so this should never happen
|
||||
return err("process_eth1_data: no more room for eth1 data")
|
||||
|
@ -127,7 +126,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
|||
proc check_proposer_slashing*(
|
||||
state: var ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
|
||||
flags: UpdateFlags):
|
||||
Result[void, cstring] {.nbench.} =
|
||||
Result[void, cstring] =
|
||||
|
||||
let
|
||||
header_1 = proposer_slashing.signed_header_1.message
|
||||
|
@ -177,7 +176,7 @@ proc process_proposer_slashing*(
|
|||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||
proposer_slashing: SomeProposerSlashing, flags: UpdateFlags,
|
||||
cache: var StateCache):
|
||||
Result[void, cstring] {.nbench.} =
|
||||
Result[void, cstring] =
|
||||
? check_proposer_slashing(state, proposer_slashing, flags)
|
||||
slash_validator(
|
||||
cfg, state,
|
||||
|
@ -202,7 +201,7 @@ proc check_attester_slashing*(
|
|||
state: var ForkyBeaconState,
|
||||
attester_slashing: SomeAttesterSlashing,
|
||||
flags: UpdateFlags
|
||||
): Result[seq[ValidatorIndex], cstring] {.nbench.} =
|
||||
): Result[seq[ValidatorIndex], cstring] =
|
||||
let
|
||||
attestation_1 = attester_slashing.attestation_1
|
||||
attestation_2 = attester_slashing.attestation_2
|
||||
|
@ -244,7 +243,7 @@ proc process_attester_slashing*(
|
|||
attester_slashing: SomeAttesterSlashing,
|
||||
flags: UpdateFlags,
|
||||
cache: var StateCache
|
||||
): Result[void, cstring] {.nbench.} =
|
||||
): Result[void, cstring] =
|
||||
let attester_slashing_validity =
|
||||
check_attester_slashing(state, attester_slashing, flags)
|
||||
|
||||
|
@ -259,7 +258,7 @@ proc process_attester_slashing*(
|
|||
proc process_deposit*(cfg: RuntimeConfig,
|
||||
state: var ForkyBeaconState,
|
||||
deposit: Deposit,
|
||||
flags: UpdateFlags): Result[void, cstring] {.nbench.} =
|
||||
flags: UpdateFlags): Result[void, cstring] =
|
||||
## Process an Eth1 deposit, registering a validator or increasing its balance.
|
||||
|
||||
# Verify the Merkle branch
|
||||
|
@ -328,7 +327,7 @@ proc check_voluntary_exit*(
|
|||
cfg: RuntimeConfig,
|
||||
state: ForkyBeaconState,
|
||||
signed_voluntary_exit: SomeSignedVoluntaryExit,
|
||||
flags: UpdateFlags): Result[void, cstring] {.nbench.} =
|
||||
flags: UpdateFlags): Result[void, cstring] =
|
||||
|
||||
let voluntary_exit = signed_voluntary_exit.message
|
||||
|
||||
|
@ -389,7 +388,7 @@ proc process_voluntary_exit*(
|
|||
state: var ForkyBeaconState,
|
||||
signed_voluntary_exit: SomeSignedVoluntaryExit,
|
||||
flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
? check_voluntary_exit(cfg, state, signed_voluntary_exit, flags)
|
||||
initiate_validator_exit(
|
||||
cfg, state, signed_voluntary_exit.message.validator_index.ValidatorIndex,
|
||||
|
@ -402,7 +401,7 @@ proc process_operations(cfg: RuntimeConfig,
|
|||
body: SomeSomeBeaconBlockBody,
|
||||
base_reward_per_increment: Gwei,
|
||||
flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
# Verify that outstanding deposits are processed up to the maximum number of
|
||||
# deposits
|
||||
let
|
||||
|
@ -430,7 +429,7 @@ proc process_operations(cfg: RuntimeConfig,
|
|||
proc process_sync_aggregate*(
|
||||
state: var (altair.BeaconState | merge.BeaconState),
|
||||
aggregate: SomeSyncAggregate, total_active_balance: Gwei, cache: var StateCache):
|
||||
Result[void, cstring] {.nbench.} =
|
||||
Result[void, cstring] =
|
||||
# Verify sync committee aggregate signature signing over the previous slot
|
||||
# block root
|
||||
let
|
||||
|
@ -491,7 +490,7 @@ proc process_sync_aggregate*(
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.1.4/specs/merge/beacon-chain.md#process_execution_payload
|
||||
proc process_execution_payload*(
|
||||
state: var merge.BeaconState, payload: ExecutionPayload,
|
||||
execute_payload: ExecutePayload): Result[void, cstring] {.nbench.} =
|
||||
execute_payload: ExecutePayload): Result[void, cstring] =
|
||||
## Verify consistency of the parent hash with respect to the previous
|
||||
## execution payload header
|
||||
if is_merge_complete(state):
|
||||
|
@ -537,7 +536,7 @@ type SomePhase0Block =
|
|||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var phase0.BeaconState, blck: SomePhase0Block, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
## When there's a new block, we need to verify that the block is sane and
|
||||
## update the state accordingly - the state is left in an unknown state when
|
||||
## block application fails (!)
|
||||
|
@ -552,13 +551,13 @@ proc process_block*(
|
|||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var altair.BeaconState, blck: SomePhase0Block, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
err("process_block: Altair state with Phase 0 block")
|
||||
|
||||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var merge.BeaconState, blck: SomePhase0Block, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
err("process_block: Merge state with Phase 0 block")
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/beacon-chain.md#block-processing
|
||||
|
@ -569,7 +568,7 @@ type SomeAltairBlock =
|
|||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var altair.BeaconState, blck: SomeAltairBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
## When there's a new block, we need to verify that the block is sane and
|
||||
## update the state accordingly - the state is left in an unknown state when
|
||||
## block application fails (!)
|
||||
|
@ -596,7 +595,7 @@ type SomeMergeBlock =
|
|||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var merge.BeaconState, blck: SomeMergeBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
## When there's a new block, we need to verify that the block is sane and
|
||||
## update the state accordingly - the state is left in an unknown state when
|
||||
## block application fails (!)
|
||||
|
@ -624,23 +623,23 @@ proc process_block*(
|
|||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var phase0.BeaconState, blck: SomeAltairBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
err("process_block: Phase 0 state with Altair block")
|
||||
|
||||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var phase0.BeaconState, blck: SomeMergeBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
err("process_block: Phase 0 state with Merge block")
|
||||
|
||||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var altair.BeaconState, blck: SomeMergeBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
err("process_block: Altair state with Merge block")
|
||||
|
||||
proc process_block*(
|
||||
cfg: RuntimeConfig,
|
||||
state: var merge.BeaconState, blck: SomeAltairBlock, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||
cache: var StateCache): Result[void, cstring]=
|
||||
err("process_block: Merge state with Altair block")
|
||||
|
|
|
@ -25,8 +25,7 @@ import
|
|||
stew/bitops2, chronicles,
|
||||
../extras,
|
||||
./datatypes/[phase0, altair, merge],
|
||||
"."/[beaconstate, eth2_merkleization, helpers, validator],
|
||||
../../nbench/bench_lab
|
||||
"."/[beaconstate, eth2_merkleization, helpers, validator]
|
||||
|
||||
export extras, phase0, altair
|
||||
|
||||
|
@ -236,7 +235,7 @@ func is_unslashed_participating_index(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||
proc process_justification_and_finalization*(state: var phase0.BeaconState,
|
||||
balances: TotalBalances, flags: UpdateFlags = {}) {.nbench.} =
|
||||
balances: TotalBalances, flags: UpdateFlags = {}) =
|
||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might
|
||||
# result in modifying this stub.
|
||||
|
@ -429,7 +428,7 @@ proc weigh_justification_and_finalization(state: var (altair.BeaconState | merge
|
|||
|
||||
proc process_justification_and_finalization*(state: var (altair.BeaconState | merge.BeaconState),
|
||||
balances: UnslashedParticipatingBalances,
|
||||
flags: UpdateFlags = {}) {.nbench.} =
|
||||
flags: UpdateFlags = {}) =
|
||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might
|
||||
# result in modifying this stub.
|
||||
|
@ -720,7 +719,7 @@ iterator get_inactivity_penalty_deltas*(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||
func process_rewards_and_penalties(
|
||||
state: var phase0.BeaconState, info: var phase0.EpochInfo) {.nbench.} =
|
||||
state: var phase0.BeaconState, info: var phase0.EpochInfo) =
|
||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
||||
# for work done in the previous epoch
|
||||
doAssert info.validators.len == state.validators.len
|
||||
|
@ -745,7 +744,7 @@ func process_rewards_and_penalties(
|
|||
func process_rewards_and_penalties(
|
||||
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
||||
info: var altair.EpochInfo)
|
||||
{.nbench.} =
|
||||
=
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
|
||||
|
@ -777,7 +776,7 @@ func process_rewards_and_penalties(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#registry-updates
|
||||
func process_registry_updates*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState, cache: var StateCache) {.nbench.} =
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState, cache: var StateCache) =
|
||||
## Process activation eligibility and ejections
|
||||
|
||||
# Make visible, e.g.,
|
||||
|
@ -828,7 +827,7 @@ func process_registry_updates*(
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#slashings
|
||||
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) {.nbench.} =
|
||||
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
|
||||
let
|
||||
epoch = get_current_epoch(state)
|
||||
multiplier =
|
||||
|
@ -858,7 +857,7 @@ func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) {.nben
|
|||
decrease_balance(state, index.ValidatorIndex, penalty)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#eth1-data-votes-updates
|
||||
func process_eth1_data_reset*(state: var ForkyBeaconState) {.nbench.} =
|
||||
func process_eth1_data_reset*(state: var ForkyBeaconState) =
|
||||
let next_epoch = get_current_epoch(state) + 1
|
||||
|
||||
# Reset eth1 data votes
|
||||
|
@ -866,7 +865,7 @@ func process_eth1_data_reset*(state: var ForkyBeaconState) {.nbench.} =
|
|||
state.eth1_data_votes = default(type state.eth1_data_votes)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#effective-balances-updates
|
||||
func process_effective_balance_updates*(state: var ForkyBeaconState) {.nbench.} =
|
||||
func process_effective_balance_updates*(state: var ForkyBeaconState) =
|
||||
# Update effective balances with hysteresis
|
||||
for index in 0..<state.validators.len:
|
||||
let balance = state.balances.asSeq()[index]
|
||||
|
@ -888,14 +887,14 @@ func process_effective_balance_updates*(state: var ForkyBeaconState) {.nbench.}
|
|||
state.validators[index].effective_balance = new_effective_balance
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#slashings-balances-updates
|
||||
func process_slashings_reset*(state: var ForkyBeaconState) {.nbench.} =
|
||||
func process_slashings_reset*(state: var ForkyBeaconState) =
|
||||
let next_epoch = get_current_epoch(state) + 1
|
||||
|
||||
# Reset slashings
|
||||
state.slashings[int(next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] = 0.Gwei
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#randao-mixes-updates
|
||||
func process_randao_mixes_reset*(state: var ForkyBeaconState) {.nbench.} =
|
||||
func process_randao_mixes_reset*(state: var ForkyBeaconState) =
|
||||
let
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = current_epoch + 1
|
||||
|
@ -905,7 +904,7 @@ func process_randao_mixes_reset*(state: var ForkyBeaconState) {.nbench.} =
|
|||
get_randao_mix(state, current_epoch)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#historical-roots-updates
|
||||
func process_historical_roots_update*(state: var ForkyBeaconState) {.nbench.} =
|
||||
func process_historical_roots_update*(state: var ForkyBeaconState) =
|
||||
## Set historical root accumulator
|
||||
let next_epoch = get_current_epoch(state) + 1
|
||||
|
||||
|
@ -919,7 +918,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) {.nbench.} =
|
|||
raiseAssert "no more room for historical roots, so long and thanks for the fish!"
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#participation-records-rotation
|
||||
func process_participation_record_updates*(state: var phase0.BeaconState) {.nbench.} =
|
||||
func process_participation_record_updates*(state: var phase0.BeaconState) =
|
||||
# Rotate current/previous epoch attestations - using swap avoids copying all
|
||||
# elements using a slow genericSeqAssign
|
||||
state.previous_epoch_attestations.clear()
|
||||
|
@ -983,7 +982,7 @@ func process_inactivity_updates*(
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#epoch-processing
|
||||
proc process_epoch*(
|
||||
cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags,
|
||||
cache: var StateCache, info: var phase0.EpochInfo) {.nbench.} =
|
||||
cache: var StateCache, info: var phase0.EpochInfo) =
|
||||
let currentEpoch = get_current_epoch(state)
|
||||
trace "process_epoch",
|
||||
current_epoch = currentEpoch
|
||||
|
@ -1055,7 +1054,7 @@ func init*(
|
|||
proc process_epoch*(
|
||||
cfg: RuntimeConfig, state: var (altair.BeaconState | merge.BeaconState),
|
||||
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo)
|
||||
{.nbench.} =
|
||||
=
|
||||
let currentEpoch = get_current_epoch(state)
|
||||
trace "process_epoch",
|
||||
current_epoch = currentEpoch
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
# Nimbus-bench
|
||||
|
||||
Nbench is a profiler dedicated to the Nimbus Beacon Chain.
|
||||
|
||||
It is built as a domain specific profiler that aims to be
|
||||
as unintrusive as possible while providing complementary reports
|
||||
to dedicated tools like ``perf``, ``Apple Instruments`` or ``Intel Vtune``
|
||||
that allows you to dive deep down to a specific line or assembly instructions.
|
||||
|
||||
In particular, those tools cannot tell you that your cryptographic subsystem
|
||||
or your parsing routines or your random number generation should be revisited,
|
||||
may sample at to high a resolution (millisecond) instead of per-function statistics,
|
||||
and are much less useful without debugging symbols which requires a lot of space.
|
||||
I.e. ``perf`` and other generic profiler tools give you the laser-thin focused pictures
|
||||
while nbench strives to give you the big picture.
|
||||
|
||||
Features
|
||||
- by default nbench will collect the number of calls and time spent in
|
||||
each function.
|
||||
- like ncli or nfuzz, you can provide nbench isolated scenarios in SSZ format
|
||||
to analyze Nimbus behaviour.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
nim c -d:const_preset=mainnet -d:nbench -d:release -o:build/nbench nbench/nbench.nim
|
||||
export SCENARIOS=vendor/nim-eth2-scenarios/tests-v0.11.1/mainnet/phase0
|
||||
|
||||
# Full state transition
|
||||
build/nbench cmdFullStateTransition -d="${SCENARIOS}"/sanity/blocks/pyspec_tests/voluntary_exit/ -q=2
|
||||
|
||||
# Slot processing
|
||||
build/nbench cmdSlotProcessing -d="${SCENARIOS}"/sanity/slots/pyspec_tests/slots_1
|
||||
|
||||
# Justification-Finalisation
|
||||
build/nbench cmdEpochProcessing --epochProcessingCat=catJustificationFinalization -d="${SCENARIOS}"/epoch_processing/justification_and_finalization/pyspec_tests/234_ok_support/
|
||||
|
||||
# Registry updates
|
||||
build/nbench cmdEpochProcessing --epochProcessingCat=catRegistryUpdates -d="${SCENARIOS}"/epoch_processing/registry_updates/pyspec_tests/activation_queue_efficiency/
|
||||
|
||||
# Slashings
|
||||
build/nbench cmdEpochProcessing --epochProcessingCat=catSlashings -d="${SCENARIOS}"/epoch_processing/slashings/pyspec_tests/max_penalties/
|
||||
|
||||
# Block header processing
|
||||
build/nbench cmdBlockProcessing --blockProcessingCat=catBlockHeader -d="${SCENARIOS}"/operations/block_header/pyspec_tests/proposer_slashed/
|
||||
|
||||
# Proposer slashing
|
||||
build/nbench cmdBlockProcessing --blockProcessingCat=catProposerSlashings -d="${SCENARIOS}"/operations/proposer_slashing/pyspec_tests/invalid_proposer_index/
|
||||
|
||||
# Attester slashing
|
||||
build/nbench cmdBlockProcessing --blockProcessingCat=catAttesterSlashings -d="${SCENARIOS}"/operations/attester_slashing/pyspec_tests/success_surround/
|
||||
|
||||
# Attestation processing
|
||||
build/nbench cmdBlockProcessing --blockProcessingCat=catAttestations -d="${SCENARIOS}"/operations/attestation/pyspec_tests/success_multi_proposer_index_iterations/
|
||||
|
||||
# Deposit processing
|
||||
build/nbench cmdBlockProcessing --blockProcessingCat=catDeposits -d="${SCENARIOS}"/operations/deposit/pyspec_tests/new_deposit_max/
|
||||
|
||||
# Voluntary exit
|
||||
build/nbench cmdBlockProcessing --blockProcessingCat=catVoluntaryExits -d="${SCENARIOS}"/operations/voluntary_exit/pyspec_tests/validator_exit_in_future/
|
||||
```
|
||||
|
||||
## Running the whole Eth2.0 specs test suite
|
||||
|
||||
Warning: this is a proof-of-concept, there is a slight degree of interleaving in output.
|
||||
Furthermore benchmarks are run in parallel and might interfere which each other.
|
||||
|
||||
```
|
||||
nim c -d:const_preset=mainnet -d:nbench -d:release -o:build/nbench nbench/nbench.nim
|
||||
nim c -o:build/nbench_tests nbench/nbench_spec_scenarios.nim
|
||||
build/nbench_tests --nbench=build/nbench --tests=vendor/nim-eth2-scenarios/tests-v0.11.1/mainnet/
|
||||
```
|
||||
|
||||
## TODO Reporting
|
||||
- Dumping as CSV files also for archival, perf regression suite and/or data mining.
|
||||
- Piggybacking on eth-metrics and can report over Prometheus or StatsD.
|
||||
- you can augment it via label pragmas that can be applied file-wide
|
||||
to tag "cryptography", "block_transition", "database" to have a global view
|
||||
of the system.
|
|
@ -1,146 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
# Standard lib
|
||||
macros, std/[monotimes, times],
|
||||
# Internal
|
||||
platforms/platforms
|
||||
|
||||
# Bench laboratory
|
||||
# --------------------------------------------------
|
||||
#
|
||||
# This file defines support data structures to enable profiling.
|
||||
|
||||
# Utils
|
||||
# --------------------------------------------------
|
||||
const someGcc = defined(gcc) or defined(llvm_gcc) or defined(clang) or defined(icc)
|
||||
const hasThreadSupport = defined(threads)
|
||||
|
||||
proc atomicInc*(memLoc: var int64, x = 1'i64): int64 =
|
||||
when someGcc and hasThreadSupport:
|
||||
result = atomicAddFetch(memLoc.addr, x, ATOMIC_RELAXED)
|
||||
elif defined(vcc) and hasThreadSupport:
|
||||
result = addAndFetch(memLoc.addr, x)
|
||||
result += x
|
||||
else:
|
||||
memloc += x
|
||||
result = memLoc
|
||||
|
||||
# Types
|
||||
# --------------------------------------------------
|
||||
|
||||
type
|
||||
Metadata* = object
|
||||
procName*: string
|
||||
module: string
|
||||
package: string
|
||||
tag: string # Can be change to multi-tags later
|
||||
# TODO - replace by eth-metrics once we figure out a CSV/JSON/Console backend
|
||||
numCalls*: int64
|
||||
cumulatedTimeNs*: int64 # in nanoseconds
|
||||
when SupportsGetTicks:
|
||||
cumulatedCycles*: int64
|
||||
|
||||
var ctBenchMetrics*{.compileTime.}: seq[Metadata]
|
||||
## Metrics are collected here, this is just a temporary holder of compileTime values
|
||||
## Unfortunately the "seq" is emptied when passing the compileTime/runtime boundaries
|
||||
## due to Nim bugs
|
||||
|
||||
var BenchMetrics*: seq[Metadata]
|
||||
## We can't directly use it at compileTime because it doesn't exist.
|
||||
## We need `BenchMetrics = static(ctBenchMetrics)`
|
||||
## To transfer the compileTime content to runtime at an opportune time.
|
||||
|
||||
template ntag(tagname: string){.pragma.}
|
||||
## This will allow tagging proc in the future with
|
||||
## "crypto", "ssz", "block_transition", "epoch_transition" ...
|
||||
|
||||
# Symbols
|
||||
# --------------------------------------------------
|
||||
|
||||
template fnEntry(name: string, id: int, startTime, startCycle: untyped): untyped =
|
||||
## Bench tracing to insert on function entry
|
||||
{.noSideEffect, gcsafe.}:
|
||||
discard BenchMetrics[id].numCalls.atomicInc()
|
||||
let startTime = getMonoTime()
|
||||
when SupportsGetTicks:
|
||||
let startCycle = getTicks()
|
||||
else:
|
||||
let startCycle = 0
|
||||
|
||||
const nbench_trace {.booldefine.} = off # For manual "debug-echo"-style timing.
|
||||
when nbench_trace:
|
||||
# strformat doesn't work in templates.
|
||||
from strutils import alignLeft, formatFloat
|
||||
|
||||
template fnExit(name: string, id: int, startTime, startCycle: untyped): untyped =
|
||||
## Bench tracing to insert before each function exit
|
||||
{.noSideEffect, gcsafe.}:
|
||||
when SupportsGetTicks:
|
||||
let stopCycle = getTicks()
|
||||
let stopTime = getMonoTime()
|
||||
when SupportsGetTicks:
|
||||
let elapsedCycles = stopCycle - startCycle
|
||||
let elapsedTime = inNanoseconds(stopTime - startTime)
|
||||
|
||||
discard BenchMetrics[id].cumulatedTimeNs.atomicInc(elapsedTime)
|
||||
when SupportsGetTicks:
|
||||
discard BenchMetrics[id].cumulatedCycles.atomicInc(elapsedCycles)
|
||||
|
||||
when nbench_trace:
|
||||
# Advice: Use "when name == relevantProc" to isolate specific procedures.
|
||||
# strformat doesn't work in templates.
|
||||
when SupportsGetTicks:
|
||||
echo static(alignLeft(name, 50)),
|
||||
"Time (ms): ", alignLeft(formatFloat(elapsedTime.float64 * 1e-6, precision=3), 10),
|
||||
"Cycles (billions): ", formatFloat(elapsedCycles.float64 * 1e-9, precision=3)
|
||||
else:
|
||||
echo static(alignLeft(name, 50)),
|
||||
"Time (ms): ", alignLeft(formatFloat(elapsedTime.float64 * 1e-6, precision=3), 10)
|
||||
|
||||
macro nbenchAnnotate(procAst: untyped): untyped =
|
||||
procAst.expectKind({nnkProcDef, nnkFuncDef})
|
||||
|
||||
let id = ctBenchMetrics.len
|
||||
let name = procAst[0]
|
||||
# TODO, get the module and the package the proc is coming from
|
||||
# and the tag "crypto", "ssz", "block_transition", "epoch_transition" ...
|
||||
|
||||
ctBenchMetrics.add Metadata(procName: $name)
|
||||
var newBody = newStmtList()
|
||||
let startTime = genSym(nskLet, "nbench_" & $name & "_startTime_")
|
||||
let startCycle = genSym(nskLet, "nbench_" & $name & "_startCycles_")
|
||||
newBody.add getAst(fnEntry($name, id, startTime, startCycle))
|
||||
newbody.add nnkDefer.newTree(getAst(fnExit($name, id, startTime, startCycle)))
|
||||
newBody.add procAst.body
|
||||
|
||||
procAst.body = newBody
|
||||
result = procAst
|
||||
|
||||
template nbench*(procBody: untyped): untyped =
|
||||
when defined(nbench):
|
||||
nbenchAnnotate(procBody)
|
||||
else:
|
||||
procBody
|
||||
|
||||
# Sanity checks
|
||||
# ---------------------------------------------------
|
||||
|
||||
when isMainModule:
|
||||
|
||||
expandMacros:
|
||||
proc foo(x: int): int{.nbench.} =
|
||||
echo "Hey hey hey"
|
||||
result = x
|
||||
|
||||
BenchMetrics = static(ctBenchMetrics)
|
||||
|
||||
echo BenchMetrics
|
||||
discard foo(10)
|
||||
echo BenchMetrics
|
||||
doAssert BenchMetrics[0].numCalls == 1
|
|
@ -1,5 +0,0 @@
|
|||
import scenarios, confutils
|
||||
|
||||
let scenario = ScenarioConf.load()
|
||||
|
||||
echo scenario.attestation
|
|
@ -1,129 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
# Standard library
|
||||
os,
|
||||
# Status libraries
|
||||
confutils, serialization,
|
||||
# Beacon-chain
|
||||
../beacon_chain/spec/datatypes/base,
|
||||
# Bench specific
|
||||
scenarios, bench_lab, reports
|
||||
|
||||
# Example:
|
||||
# build/nbench cmdFullStateTransition -d
|
||||
|
||||
# Nimbus Bench
|
||||
# --------------------------------------------------
|
||||
#
|
||||
# Run select scenarios and get statistics on Nimbus runtime behaviour
|
||||
|
||||
when not defined(nbench):
|
||||
{.error: "`nbench` requires `-d:nbench` flag to enable tracing on procedures.".}
|
||||
|
||||
proc main() =
|
||||
# TODO versioning
|
||||
echo "Nimbus bench, preset \"", const_preset, '\"'
|
||||
|
||||
BenchMetrics = static(ctBenchMetrics) # Make compile-time data available at runtime
|
||||
let scenario = ScenarioConf.load()
|
||||
|
||||
case scenario.cmd
|
||||
of cmdFullStateTransition:
|
||||
runFullTransition(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
scenario.blocksPrefix,
|
||||
scenario.blocksQty,
|
||||
scenario.skipBLS
|
||||
)
|
||||
of cmdSlotProcessing:
|
||||
runProcessSlots(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
scenario.numSlots
|
||||
)
|
||||
of cmdBlockProcessing:
|
||||
case scenario.blockProcessingCat
|
||||
of catBlockHeader:
|
||||
runProcessBlockHeader(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
"block", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||
# scenario.attesterSlashing
|
||||
scenario.skipBLS
|
||||
)
|
||||
of catProposerSlashings:
|
||||
runProcessProposerSlashing(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
"proposer_slashing", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||
# scenario.attesterSlashing
|
||||
scenario.skipBLS
|
||||
)
|
||||
of catAttesterSlashings:
|
||||
runProcessAttesterSlashing(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
"attester_slashing", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||
# scenario.attesterSlashing
|
||||
scenario.skipBLS
|
||||
)
|
||||
of catAttestations:
|
||||
runProcessAttestation(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
"attestation", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||
# scenario.attestation,
|
||||
scenario.skipBLS
|
||||
)
|
||||
of catDeposits:
|
||||
runProcessDeposit(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
"deposit", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||
# scenario.deposit,
|
||||
scenario.skipBLS
|
||||
)
|
||||
of catVoluntaryExits:
|
||||
runProcessVoluntaryExits(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState,
|
||||
"voluntary_exit", # Pending https://github.com/status-im/nim-confutils/issues/11
|
||||
# scenario.voluntary_exit,
|
||||
scenario.skipBLS
|
||||
)
|
||||
else:
|
||||
quit "Unsupported"
|
||||
of cmdEpochProcessing:
|
||||
case scenario.epochProcessingCat
|
||||
of catJustificationFinalization:
|
||||
runProcessJustificationFinalization(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState
|
||||
)
|
||||
of catRegistryUpdates:
|
||||
runProcessRegistryUpdates(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState
|
||||
)
|
||||
of catSlashings:
|
||||
runProcessSlashings(
|
||||
scenario.scenarioDir.string,
|
||||
scenario.preState
|
||||
)
|
||||
else:
|
||||
quit "Unsupported"
|
||||
|
||||
# TODO: Nimbus not fine-grained enough in UpdateFlags
|
||||
let flags = if scenario.skipBLS: "[skipBLS, skipStateRootVerification]"
|
||||
else: "[withBLS, withStateRootVerification]"
|
||||
reportCli(BenchMetrics, const_preset, flags)
|
||||
|
||||
when isMainModule:
|
||||
main()
|
|
@ -1 +0,0 @@
|
|||
-d:nbench
|
|
@ -1,90 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
# Standard library
|
||||
os, osproc, strformat,
|
||||
# Status libraries
|
||||
confutils
|
||||
|
||||
# Nimbus Bench Batch
|
||||
# --------------------------------------------------
|
||||
# This script calls Nimbus bench in parallel batch
|
||||
# to run a series of benchmarks from the spec SSZ tests
|
||||
|
||||
type
|
||||
CmdLists = seq[string]
|
||||
|
||||
proc collectTarget(cmds: var CmdLists, nbench, name, cmd, cat, path: string) =
|
||||
echo "----------------------------------------"
|
||||
echo "Collecting ", name, " transitions"
|
||||
echo "----------------------------------------"
|
||||
for folder in walkDirRec(path, yieldFilter = {pcDir}, relative = true):
|
||||
echo "Found: ", folder
|
||||
var cat = cat
|
||||
if cmd == "cmdBlockProcessing":
|
||||
cat = "--blockProcessingCat=" & cat
|
||||
elif cmd == "cmdEpochProcessing":
|
||||
cat = "--epochProcessingCat=" & cat
|
||||
cmds.add &"{nbench} {cmd} {cat} -d={path/folder}"
|
||||
|
||||
proc collectBenchTargets(nbench, basePath: string): CmdLists =
|
||||
# State processing
|
||||
# -------------------------------------------------------------------------
|
||||
block: # Full state transitions
|
||||
echo "----------------------------------------"
|
||||
echo "Collecting full state transitions"
|
||||
echo "----------------------------------------"
|
||||
let path = basePath/"phase0"/"sanity"/"blocks"/"pyspec_tests"
|
||||
for folder in walkDirRec(path, yieldFilter = {pcDir}, relative = true):
|
||||
var countBlocks = 0
|
||||
for _ in walkFiles(path/folder/"blocks_*.ssz"):
|
||||
inc countBlocks
|
||||
echo "Found: ", folder, " with ", countBlocks, " blocks"
|
||||
result.add &"{nbench} cmdFullStateTransition -d={path/folder} -q={$countBlocks}"
|
||||
# Slot processing
|
||||
# -------------------------------------------------------------------------
|
||||
block: # Slot processing
|
||||
let path = basePath/"phase0"/"sanity"/"slots"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "slot", "cmdSlotProcessing", "", path)
|
||||
# Epoch processing
|
||||
# -------------------------------------------------------------------------
|
||||
block: # Justification-Finalization
|
||||
let path = basePath/"phase0"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "justification_and_finalization", "cmdEpochProcessing", "catJustificationFinalization", path)
|
||||
block: # Registry updates
|
||||
let path = basePath/"phase0"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "registry_updates", "cmdEpochProcessing", "catRegistryUpdates", path)
|
||||
block: # Slashings
|
||||
let path = basePath/"phase0"/"epoch_processing"/"slashings"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "slashings", "cmdEpochProcessing", "catSlashings", path)
|
||||
# Block processing
|
||||
# -------------------------------------------------------------------------
|
||||
block: # Attestation
|
||||
let path = basePath/"phase0"/"operations"/"attestation"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "attestation", "cmdBlockProcessing", "catAttestations", path)
|
||||
block: # Attester_slashing
|
||||
let path = basePath/"phase0"/"operations"/"attester_slashing"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "attester_slashing", "cmdBlockProcessing", "catAttesterSlashings", path)
|
||||
block: # block_header
|
||||
let path = basePath/"phase0"/"operations"/"block_header"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "block_header", "cmdBlockProcessing", "catBlockHeader", path)
|
||||
block: # deposit
|
||||
let path = basePath/"phase0"/"operations"/"deposit"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "deposit", "cmdBlockProcessing", "catDeposits", path)
|
||||
block: # proposer_slashing
|
||||
let path = basePath/"phase0"/"operations"/"proposer_slashing"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "proposer_slashing", "cmdBlockProcessing", "catProposerSlashings", path)
|
||||
block: # voluntary_exit
|
||||
let path = basePath/"phase0"/"operations"/"voluntary_exit"/"pyspec_tests"
|
||||
result.collectTarget(nbench, "voluntary_exit", "cmdBlockProcessing", "catVoluntaryExits", path)
|
||||
|
||||
cli do(nbench: string, tests: string):
|
||||
let cmdLists = collectBenchTargets(nbench, tests)
|
||||
echo "\n========================================================\n"
|
||||
let err = execProcesses(cmdLists)
|
||||
quit err
|
|
@ -1,28 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
const GccCompatible = defined(gcc) or defined(clang)
|
||||
const x86arch = defined(i386) or defined(amd64)
|
||||
|
||||
const supports_x86_inline_asm = block:
|
||||
x86arch and (
|
||||
(
|
||||
GccCompatible and not defined(windows)
|
||||
) or (
|
||||
defined(vcc)
|
||||
)
|
||||
)
|
||||
|
||||
when supports_x86_inline_asm:
|
||||
import x86
|
||||
export getTicks, cpuName
|
||||
|
||||
const SupportsCPUName* = true
|
||||
const SupportsGetTicks* = true
|
||||
else:
|
||||
const SupportsCPUName* = false
|
||||
const SupportsGetTicks* = false
|
|
@ -1,127 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
# Cpu Name
|
||||
# -------------------------------------------------------
|
||||
|
||||
{.passC:"-std=gnu99".} # TODO may conflict with milagro "-std=c99"
|
||||
|
||||
proc cpuID(eaxi, ecxi: int32): tuple[eax, ebx, ecx, edx: int32] =
|
||||
when defined(vcc):
|
||||
proc cpuidVcc(cpuInfo: ptr int32; functionID: int32)
|
||||
{.importc: "__cpuidex", header: "intrin.h".}
|
||||
cpuidVcc(addr result.eax, eaxi, ecxi)
|
||||
else:
|
||||
var (eaxr, ebxr, ecxr, edxr) = (0'i32, 0'i32, 0'i32, 0'i32)
|
||||
asm """
|
||||
cpuid
|
||||
:"=a"(`eaxr`), "=b"(`ebxr`), "=c"(`ecxr`), "=d"(`edxr`)
|
||||
:"a"(`eaxi`), "c"(`ecxi`)"""
|
||||
(eaxr, ebxr, ecxr, edxr)
|
||||
|
||||
proc cpuName*(): string =
|
||||
var leaves {.global.} = cast[array[48, char]]([
|
||||
cpuID(eaxi = 0x80000002'i32, ecxi = 0),
|
||||
cpuID(eaxi = 0x80000003'i32, ecxi = 0),
|
||||
cpuID(eaxi = 0x80000004'i32, ecxi = 0)])
|
||||
result = $cast[cstring](addr leaves[0])
|
||||
|
||||
# Counting cycles
|
||||
# -------------------------------------------------------
|
||||
|
||||
# From Linux
|
||||
#
|
||||
# The RDTSC instruction is not ordered relative to memory
|
||||
# access. The Intel SDM and the AMD APM are both vague on this
|
||||
# point, but empirically an RDTSC instruction can be
|
||||
# speculatively executed before prior loads. An RDTSC
|
||||
# immediately after an appropriate barrier appears to be
|
||||
# ordered as a normal load, that is, it provides the same
|
||||
# ordering guarantees as reading from a global memory location
|
||||
# that some other imaginary CPU is updating continuously with a
|
||||
# time stamp.
|
||||
#
|
||||
# From Intel SDM
|
||||
# https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/ia-32-ia-64-benchmark-code-execution-paper.pdf
|
||||
|
||||
proc getTicks*(): int64 {.inline.} =
|
||||
when defined(vcc):
|
||||
proc rdtsc(): int64 {.sideeffect, importc: "__rdtsc", header: "<intrin.h>".}
|
||||
proc lfence() {.importc: "__mm_lfence", header: "<intrin.h>".}
|
||||
|
||||
lfence()
|
||||
return rdtsc()
|
||||
|
||||
else:
|
||||
when defined(amd64):
|
||||
var lo, hi: int64
|
||||
# TODO: Provide a compile-time flag for RDTSCP support
|
||||
# and use it instead of lfence + RDTSC
|
||||
{.emit: """asm volatile(
|
||||
"lfence\n"
|
||||
"rdtsc\n"
|
||||
: "=a"(`lo`), "=d"(`hi`)
|
||||
:
|
||||
: "memory"
|
||||
);""".}
|
||||
return (hi shl 32) or lo
|
||||
else: # 32-bit x86
|
||||
var res: int32
|
||||
# TODO: Provide a compile-time flag for RDTSCP support
|
||||
# and use it instead of lfence + RDTSC
|
||||
{.emit: """asm volatile(
|
||||
"lfence\n"
|
||||
"rdtsc\n"
|
||||
: "=a"(`res`)
|
||||
:
|
||||
: "memory"
|
||||
);""".}
|
||||
return res
|
||||
|
||||
# Sanity check
|
||||
# -------------------------------------------------------
|
||||
|
||||
when isMainModule:
|
||||
|
||||
import std/[times, monotimes, math, volatile, os]
|
||||
|
||||
block: # CpuName
|
||||
echo "Your CPU is: "
|
||||
echo " ", cpuName()
|
||||
|
||||
block: # Cycle Count
|
||||
echo "The cost of an int64 modulo operation on your platform is:"
|
||||
|
||||
# Dealing with compiler optimization on microbenchmarks is hard
|
||||
{.pragma: volatile, codegenDecl: "volatile $# $#".}
|
||||
|
||||
proc modNtimes(a, b: int64, N: int) {.noinline.} =
|
||||
var c{.volatile.}: int64
|
||||
for i in 0 ..< N:
|
||||
c.addr.volatileStore(a.unsafeAddr.volatileLoad() mod b.unsafeAddr.volatileLoad())
|
||||
|
||||
let a {.volatile.} = 1000003'i64 # a prime number
|
||||
let b {.volatile.} = 10007'i64 # another prime number
|
||||
let N {.volatile.} = 3_000_000
|
||||
|
||||
let startMono = getMonoTime()
|
||||
let startCycles = getTicks()
|
||||
modNtimes(a, b, N)
|
||||
let stopCycles = getTicks()
|
||||
let stopMono = getMonoTime()
|
||||
|
||||
|
||||
let elapsedMono = inNanoseconds(stopMono - startMono)
|
||||
let elapsedCycles = stopCycles - startCycles
|
||||
let timerResolutionGHz = round(elapsedCycles.float32 / elapsedMono.float32, 3)
|
||||
|
||||
echo " ", (elapsedCycles) div N, " cycles"
|
||||
echo " ", (elapsedMono) div N, " ns/iter"
|
||||
echo " ", timerResolutionGHz, " GHz (timer resolution)"
|
||||
|
||||
block: # CPU Frequency
|
||||
discard # TODO, surprisingly this is very complex
|
|
@ -1,67 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
# Standard library
|
||||
strformat, strutils,
|
||||
# Bench
|
||||
bench_lab, platforms/platforms
|
||||
|
||||
template cpuX86(body: untyped): untyped =
|
||||
when defined(i386) or defined(amd64):
|
||||
body
|
||||
|
||||
|
||||
# Reporting benchmark result
|
||||
# -------------------------------------------------------
|
||||
|
||||
proc reportCli*(metrics: seq[Metadata], preset, flags: string) =
|
||||
|
||||
let name = when SupportsCPUName: cpuName() else: "(name auto-detection not implemented for this CPU family)"
|
||||
echo "\nCPU: ", name
|
||||
|
||||
when SupportsGetTicks:
|
||||
# https://blog.trailofbits.com/2019/10/03/tsc-frequency-for-all-better-profiling-and-benchmarking/
|
||||
# https://www.agner.org/optimize/blog/read.php?i=838
|
||||
echo "The CPU Cycle Count is indicative only. It cannot be used to compare across systems, works at your CPU nominal frequency and is sensitive to overclocking, throttling and frequency scaling (powersaving and Turbo Boost)."
|
||||
|
||||
const lineSep = &"""|{'-'.repeat(50)}|{'-'.repeat(14)}|{'-'.repeat(20)}|{'-'.repeat(15)}|{'-'.repeat(17)}|{'-'.repeat(26)}|{'-'.repeat(26)}|"""
|
||||
echo "\n"
|
||||
echo lineSep
|
||||
echo &"""|{"Procedures (" & preset & ')':^50}|{"# of Calls":^14}|{"Throughput (ops/s)":^20}|{"Time (ms)":^15}|{"Avg Time (ms)":^17}|{"CPU cycles (in billions)":^26}|{"Avg cycles (in billions)":^26}|"""
|
||||
echo &"""|{flags:^50}|{' '.repeat(14)}|{' '.repeat(20)}|{' '.repeat(15)}|{' '.repeat(17)}|{"indicative only":^26}|{"indicative only":^26}|"""
|
||||
echo lineSep
|
||||
for m in metrics:
|
||||
if m.numCalls == 0:
|
||||
continue
|
||||
# TODO: running variance / standard deviation but the Welford method is quite costly.
|
||||
# https://nim-lang.org/docs/stats.html / https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
|
||||
let cumulTimeMs = m.cumulatedTimeNs.float64 * 1e-6
|
||||
let avgTimeMs = cumulTimeMs / m.numCalls.float64
|
||||
let throughput = 1e3 / avgTimeMs
|
||||
let cumulCyclesBillions = m.cumulatedCycles.float64 * 1e-9
|
||||
let avgCyclesBillions = cumulCyclesBillions / m.numCalls.float64
|
||||
echo &"""|{m.procName:<50}|{m.numCalls:>14}|{throughput:>20.3f}|{cumulTimeMs:>15.3f}|{avgTimeMs:>17.3f}|"""
|
||||
echo lineSep
|
||||
|
||||
else:
|
||||
const lineSep = &"""|{'-'.repeat(50)}|{'-'.repeat(14)}|{'-'.repeat(20)}|{'-'.repeat(15)}|{'-'.repeat(17)}|"""
|
||||
echo "\n"
|
||||
echo lineSep
|
||||
echo &"""|{"Procedures (" & preset & ')':^50}|{"# of Calls":^14}|{"Throughput (ops/s)":^20}|{"Time (ms)":^15}|{"Avg Time (ms)":^17}|"""
|
||||
echo &"""|{flags:^50}|{' '.repeat(14)}|{' '.repeat(20)}|{' '.repeat(15)}|{' '.repeat(17)}|"""
|
||||
echo lineSep
|
||||
for m in metrics:
|
||||
if m.numCalls == 0:
|
||||
continue
|
||||
# TODO: running variance / standard deviation but the Welford method is quite costly.
|
||||
# https://nim-lang.org/docs/stats.html / https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
|
||||
let cumulTimeMs = m.cumulatedTimeNs.float64 * 1e-6
|
||||
let avgTimeMs = cumulTimeMs / m.numCalls.float64
|
||||
let throughput = 1e3 / avgTimeMs
|
||||
echo &"""|{m.procName:<50}|{m.numCalls:>14}|{throughput:>20.3f}|{cumulTimeMs:>15.3f}|{avgTimeMs:>17.3f}|"""
|
||||
echo lineSep
|
|
@ -1,316 +0,0 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
# Standard library
|
||||
os, tables,
|
||||
# Status libraries
|
||||
confutils/defs, serialization, chronicles,
|
||||
# Beacon-chain
|
||||
../beacon_chain/spec/datatypes/[phase0],
|
||||
../beacon_chain/spec/[
|
||||
beaconstate, forks, helpers, state_transition, state_transition_block,
|
||||
state_transition_epoch],
|
||||
../tests/consensus_spec/fixtures_utils
|
||||
|
||||
# Nimbus Bench - Scenario configuration
|
||||
# --------------------------------------------------
|
||||
|
||||
type
|
||||
StartupCommand* = enum
|
||||
noCommand
|
||||
cmdFullStateTransition
|
||||
cmdSlotProcessing
|
||||
cmdBlockProcessing
|
||||
cmdEpochProcessing
|
||||
|
||||
BlockProcessingCat* = enum
|
||||
catBlockHeader
|
||||
catRANDAO
|
||||
catEth1Data
|
||||
catProposerSlashings
|
||||
catAttesterSlashings
|
||||
catAttestations
|
||||
catDeposits
|
||||
catVoluntaryExits
|
||||
|
||||
EpochProcessingCat* = enum
|
||||
catJustificationFinalization
|
||||
catRegistryUpdates
|
||||
catSlashings
|
||||
# catRewardsPenalties # no upstream tests
|
||||
|
||||
ScenarioConf* = object
|
||||
scenarioDir* {.
|
||||
desc: "The directory of your benchmark scenario"
|
||||
name: "scenario-dir"
|
||||
abbr: "d"
|
||||
required .}: InputDir
|
||||
preState* {.
|
||||
desc: "The name of your pre-state (without .ssz)"
|
||||
name: "pre"
|
||||
abbr: "p"
|
||||
defaultValue: "pre".}: string
|
||||
blocksPrefix* {.
|
||||
desc: "The prefix of your blocks file, for exemple \"blocks_\" for blocks in the form \"blocks_XX.ssz\""
|
||||
name: "blocks-prefix"
|
||||
abbr: "b"
|
||||
defaultValue: "blocks_".}: string
|
||||
blocksQty* {.
|
||||
desc: "The number of blocks to process for this transition. Blocks should start at 0."
|
||||
name: "block-quantity"
|
||||
abbr: "q"
|
||||
defaultValue: 1.}: int
|
||||
skipBLS*{.
|
||||
desc: "Skip BLS public keys and signature verification"
|
||||
name: "skip-bls"
|
||||
defaultValue: true.}: bool
|
||||
case cmd*{.
|
||||
command
|
||||
defaultValue: noCommand }: StartupCommand
|
||||
of noCommand:
|
||||
discard
|
||||
of cmdFullStateTransition:
|
||||
discard
|
||||
of cmdSlotProcessing:
|
||||
numSlots* {.
|
||||
desc: "The number of slots the pre-state will be advanced by"
|
||||
name: "num-slots"
|
||||
abbr: "s"
|
||||
defaultValue: 1.}: uint64
|
||||
of cmdBlockProcessing:
|
||||
case blockProcessingCat* {.
|
||||
desc: "block transitions"
|
||||
# name: "process-blocks" # Pending https://github.com/status-im/nim-confutils/issues/10
|
||||
implicitlySelectable
|
||||
required .}: BlockProcessingCat
|
||||
of catBlockHeader:
|
||||
blockHeader*{.
|
||||
desc: "Block header filename (without .ssz)"
|
||||
name: "block-header"
|
||||
defaultValue: "block".}: string
|
||||
of catRANDAO:
|
||||
discard
|
||||
of catEth1Data:
|
||||
discard
|
||||
of catProposerSlashings:
|
||||
proposerSlashing*{.
|
||||
desc: "Proposer slashing filename (without .ssz)"
|
||||
name: "proposer-slashing"
|
||||
defaultValue: "proposer_slashing".}: string
|
||||
of catAttesterSlashings:
|
||||
attesterSlashing*{.
|
||||
desc: "Attester slashing filename (without .ssz)"
|
||||
name: "attester-slashing"
|
||||
defaultValue: "attester_slashing".}: string
|
||||
of catAttestations:
|
||||
attestation*{.
|
||||
desc: "Attestation filename (without .ssz)"
|
||||
name: "attestation"
|
||||
defaultValue: "attestation".}: string
|
||||
of catDeposits:
|
||||
deposit*{.
|
||||
desc: "Deposit filename (without .ssz)"
|
||||
name: "deposit"
|
||||
defaultValue: "deposit".}: string
|
||||
of catVoluntaryExits:
|
||||
voluntaryExit*{.
|
||||
desc: "Voluntary Exit filename (without .ssz)"
|
||||
name: "voluntary_exit"
|
||||
defaultValue: "voluntary_exit".}: string
|
||||
of cmdEpochProcessing:
|
||||
epochProcessingCat*: EpochProcessingCat
|
||||
|
||||
proc parseSSZ(path: string, T: typedesc): T =
|
||||
try:
|
||||
when T is ref:
|
||||
result = newClone(SSZ.loadFile(path, typeof(default(T)[])))
|
||||
else:
|
||||
result = SSZ.loadFile(path, T)
|
||||
except SerializationError as err:
|
||||
writeStackTrace()
|
||||
stderr.write "SSZ load issue for file \"", path, "\"\n"
|
||||
stderr.write err.formatMsg(path), "\n"
|
||||
quit 1
|
||||
except CatchableError:
|
||||
writeStackTrace()
|
||||
stderr.write "SSZ load issue for file \"", path, "\"\n"
|
||||
quit 1
|
||||
|
||||
proc runFullTransition*(dir, preState, blocksPrefix: string, blocksQty: int, skipBLS: bool) =
|
||||
let prePath = dir / preState & ".ssz"
|
||||
var
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
|
||||
echo "Running: ", prePath
|
||||
let state = (ref ForkedHashedBeaconState)(
|
||||
phase0Data: phase0.HashedBeaconState(data: parseSSZ(prePath, phase0.BeaconState)),
|
||||
kind: BeaconStateFork.Phase0
|
||||
)
|
||||
setStateRoot(state[], hash_tree_root(state[].phase0Data.data))
|
||||
|
||||
for i in 0 ..< blocksQty:
|
||||
let blockPath = dir / blocksPrefix & $i & ".ssz"
|
||||
echo "Processing: ", blockPath
|
||||
|
||||
let signedBlock = parseSSZ(blockPath, phase0.SignedBeaconBlock)
|
||||
let flags = if skipBLS: {skipBlsValidation}
|
||||
else: {}
|
||||
let success = state_transition(
|
||||
defaultRuntimeConfig, state[], signedBlock, cache, info, flags,
|
||||
noRollback)
|
||||
echo "State transition status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
|
||||
var
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
let prePath = dir / preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
let state = (ref ForkedHashedBeaconState)(
|
||||
phase0Data: phase0.HashedBeaconState(
|
||||
data: parseSSZ(prePath, phase0.BeaconState)),
|
||||
kind: BeaconStateFork.Phase0)
|
||||
setStateRoot(state[], hash_tree_root(state[].phase0Data.data))
|
||||
|
||||
# Shouldn't necessarily assert, because nbench can run test suite
|
||||
discard process_slots(
|
||||
defaultRuntimeConfig, state[], getStateField(state[], slot) + numSlots,
|
||||
cache, info, {})
|
||||
|
||||
template processEpochScenarioImpl(
|
||||
dir, preState: string,
|
||||
transitionFn: untyped): untyped =
|
||||
let prePath = dir/preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
type T = phase0.BeaconState
|
||||
let state = (ref phase0.HashedBeaconState)(
|
||||
data: parseSSZ(prePath, T)
|
||||
)
|
||||
state.root = hash_tree_root(state.data)
|
||||
|
||||
var cache {.used.} = StateCache()
|
||||
when compiles(transitionFn(defaultRuntimeConfig, state.data, cache)):
|
||||
transitionFn(defaultRuntimeConfig, state.data, cache)
|
||||
elif compiles(transitionFn(state.data, cache)):
|
||||
transitionFn(state.data, cache)
|
||||
elif compiles(transitionFn(state.data)):
|
||||
transitionFn(state.data)
|
||||
else:
|
||||
transitionFn(defaultRuntimeConfig, state.data)
|
||||
|
||||
echo astToStr(transitionFn) & " status: ", "Done" # if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
template genProcessEpochScenario(name, transitionFn: untyped): untyped =
|
||||
proc `name`*(dir, preState: string) =
|
||||
processEpochScenarioImpl(dir, preState, transitionFn)
|
||||
|
||||
proc process_deposit(state: var phase0.BeaconState;
|
||||
deposit: Deposit;
|
||||
flags: UpdateFlags = {}): Result[void, cstring] =
|
||||
process_deposit(defaultRuntimeConfig, state, deposit, flags)
|
||||
|
||||
proc bench_process_justification_and_finalization(state: var phase0.BeaconState) =
|
||||
var
|
||||
cache: StateCache
|
||||
info: phase0.EpochInfo
|
||||
info.init(state)
|
||||
info.process_attestations(state, cache)
|
||||
process_justification_and_finalization(state, info.balances)
|
||||
|
||||
func bench_process_slashings(state: var phase0.BeaconState) =
|
||||
var
|
||||
cache: StateCache
|
||||
info: phase0.EpochInfo
|
||||
info.init(state)
|
||||
info.process_attestations(state, cache)
|
||||
process_slashings(state, info.balances.current_epoch)
|
||||
|
||||
template processBlockScenarioImpl(
|
||||
dir, preState: string, skipBLS: bool,
|
||||
transitionFn, paramName: untyped,
|
||||
ConsensusObjectRefType: typedesc): untyped =
|
||||
let prePath = dir/preState & ".ssz"
|
||||
|
||||
echo "Running: ", prePath
|
||||
type T = phase0.BeaconState
|
||||
let state = (ref phase0.HashedBeaconState)(
|
||||
data: parseSSZ(prePath, T)
|
||||
)
|
||||
state.root = hash_tree_root(state.data)
|
||||
|
||||
var cache {.used.} = StateCache()
|
||||
let flags {.used.} = if skipBLS: {skipBlsValidation}
|
||||
else: {}
|
||||
|
||||
let consObjPath = dir/paramName & ".ssz"
|
||||
echo "Processing: ", consObjPath
|
||||
var consObj = parseSSZ(consObjPath, ConsensusObjectRefType)
|
||||
|
||||
when compiles(transitionFn(state.data, consObj[], flags, cache)):
|
||||
let success = transitionFn(state.data, consObj[], flags, cache).isOk
|
||||
elif compiles(transitionFn(defaultRuntimeConfig, state.data, consObj[], flags, cache)):
|
||||
let success = transitionFn(defaultRuntimeConfig, state.data, consObj[], flags, cache).isOk
|
||||
elif compiles(transitionFn(state.data, consObj[], flags)):
|
||||
let success = transitionFn(state.data, consObj[], flags).isOk
|
||||
elif compiles(transitionFn(state, consObj[], flags, cache)):
|
||||
let success = transitionFn(state, consObj[], flags, cache).isOk
|
||||
else:
|
||||
let success = transitionFn(state, consObj[]).isOk
|
||||
|
||||
echo astToStr(transitionFn) & " status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
|
||||
|
||||
template genProcessBlockScenario(name, transitionFn,
|
||||
paramName: untyped,
|
||||
ConsensusObjectType: typedesc): untyped =
|
||||
proc `name`*(dir, preState, `paramName`: string, skipBLS: bool) =
|
||||
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ref ConsensusObjectType)
|
||||
|
||||
genProcessEpochScenario(runProcessJustificationFinalization,
|
||||
bench_process_justification_and_finalization)
|
||||
|
||||
genProcessEpochScenario(runProcessRegistryUpdates,
|
||||
process_registry_updates)
|
||||
|
||||
genProcessEpochScenario(runProcessSlashings,
|
||||
bench_process_slashings)
|
||||
|
||||
genProcessBlockScenario(runProcessBlockHeader,
|
||||
process_block_header,
|
||||
block_header,
|
||||
phase0.BeaconBlock)
|
||||
|
||||
genProcessBlockScenario(runProcessProposerSlashing,
|
||||
process_proposer_slashing,
|
||||
proposer_slashing,
|
||||
ProposerSlashing)
|
||||
|
||||
template do_process_attestation(state, operation, flags, cache: untyped):
|
||||
untyped =
|
||||
process_attestation(state, operation, flags, 0.Gwei, cache)
|
||||
genProcessBlockScenario(runProcessAttestation,
|
||||
do_process_attestation,
|
||||
attestation,
|
||||
Attestation)
|
||||
|
||||
genProcessBlockScenario(runProcessAttesterSlashing,
|
||||
process_attester_slashing,
|
||||
att_slash,
|
||||
AttesterSlashing)
|
||||
|
||||
genProcessBlockScenario(runProcessDeposit,
|
||||
process_deposit,
|
||||
deposit,
|
||||
Deposit)
|
||||
|
||||
genProcessBlockScenario(runProcessVoluntaryExits,
|
||||
process_voluntary_exit,
|
||||
deposit,
|
||||
SignedVoluntaryExit)
|
Loading…
Reference in New Issue