rm state_sim; capella genesis for block_sim (#5331)

* rm state_sim; capella genesis for block_sim

* copyright year
This commit is contained in:
tersec 2023-08-21 09:10:15 +00:00 committed by GitHub
parent 52640e9e03
commit 295c3e2e41
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 73 additions and 472 deletions

View File

@ -289,7 +289,6 @@ XML_TEST_BINARIES := \
# test suite
TEST_BINARIES := \
state_sim \
block_sim \
test_libnimbus_lc
.PHONY: $(TEST_BINARIES) $(XML_TEST_BINARIES) force_build_alone_all_tests
@ -346,8 +345,8 @@ fork_choice: | build deps
# CI false negatives in a process lasting hours and requiring a restart, and
# therefore even more wasted time, when it does.
#
# If one asks for, e.g., `make all_tests state_sim`, it intentionally allows
# those in paralle, because the CI system does do that.
# If one asks for, e.g., `make all_tests block_sim`, it intentionally allows
# those in parallel, because the CI system doesn't do that.
#
# https://www.gnu.org/software/make/manual/html_node/Parallel-Disable.html
# describes a special target .WAIT which would enable this more easily but
@ -367,15 +366,7 @@ all_tests: | build deps nimbus_signing_node force_build_alone_all_tests
$(NIM_PARAMS) $(TEST_MODULES_FLAGS) && \
echo -e $(BUILD_END_MSG) "build/$@"
# State and block sims; getting to 4th epoch triggers consensus checks
state_sim: | build deps
+ echo -e $(BUILD_MSG) "build/$@" && \
MAKE="$(MAKE)" V="$(V)" $(ENV_SCRIPT) scripts/compile_nim_program.sh \
$@ \
"research/$@.nim" \
$(NIM_PARAMS) && \
echo -e $(BUILD_END_MSG) "build/$@"
# Block sim; getting to 4th epoch triggers consensus checks
block_sim: | build deps
+ echo -e $(BUILD_MSG) "build/$@" && \
MAKE="$(MAKE)" V="$(V)" $(ENV_SCRIPT) scripts/compile_nim_program.sh \
@ -404,8 +395,7 @@ endif
for TEST_BINARY in $(TEST_BINARIES); do \
PARAMS=""; \
REDIRECT=""; \
if [[ "$${TEST_BINARY}" == "state_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
elif [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
if [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=10000 --slots=192"; \
elif [[ "$${TEST_BINARY}" == "test_libnimbus_lc" ]]; then REDIRECT="$${TEST_BINARY}.log"; \
fi; \
echo -e "\nRunning $${TEST_BINARY} $${PARAMS}\n"; \
@ -804,7 +794,7 @@ ntu: | build deps
+ $(ENV_SCRIPT) $(NIMC) -d:danger -o:vendor/.nimble/bin/ntu c vendor/nim-testutils/ntu.nim
clean: | clean-common
rm -rf build/{$(TOOLS_CSV),all_tests,test_*,proto_array,fork_choice,*.a,*.so,*_node,*ssz*,nimbus_*,beacon_node*,block_sim,state_sim,transition*,generate_makefile,nimbus-wix/obj}
rm -rf build/{$(TOOLS_CSV),all_tests,test_*,proto_array,fork_choice,*.a,*.so,*_node,*ssz*,nimbus_*,beacon_node*,block_sim,transition*,generate_makefile,nimbus-wix/obj}
ifneq ($(USE_LIBBACKTRACE), 0)
+ "$(MAKE)" -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
endif

View File

@ -79,15 +79,15 @@ We provide several tools to interact with ETH2 and the data in the beacon chain:
## For researchers
### State transition simulation
### Block simulation
The state transition simulator can quickly run the Beacon chain state transition function in isolation and output JSON snapshots of the state. The simulation runs without networking and blocks are processed without slot time delays.
The block simulator can quickly run the Beacon chain state transition function in isolation. The simulation runs without networking and without slot time delays.
```bash
# build and run the state simulator, then display its help ("-d:release" speeds it
# build and run the block simulator, then display its help ("-d:release" speeds it
# up substantially, allowing the simulation of longer runs in reasonable time)
make NIMFLAGS="-d:release" state_sim
build/state_sim --help
make NIMFLAGS="-d:release" block_sim
build/block_sim --help
```
### Local network simulation

View File

@ -104,7 +104,7 @@ nim --version # Nimbus is tested and supported on 1.2.12 at the moment
- build a specific tool:
```bash
make state_sim
make block_sim
```
- you can control the Makefile's verbosity with the V variable (defaults to 0):
@ -232,8 +232,8 @@ It runs without networking and blocks are processed without slot time delays.
```bash
# build the state simulator, then display its help ("-d:release" speeds it
# up substantially, allowing the simulation of longer runs in reasonable time)
make NIMFLAGS="-d:release" state_sim
build/state_sim --help
make NIMFLAGS="-d:release" block_sim
build/block_sim --help
```
Use the output of the `help` command to pass desired values to the simulator.
@ -244,11 +244,10 @@ The most important options are:
- `slots`: the number of slots to run the simulation for (default 192)
- `validators`: the number of validators (default 6400)
- `attesterRatio`: the expected fraction of attesters that actually do their work for every slot (default 0.73)
- `json_interval`: how often JSON snapshots of the state are outputted (default every 32 slots -- or once per epoch)
For example, to run the state simulator for 384 slots, with 20,000 validators, and an average of 66% of attesters doing their work every slot, while outputting snapshots of the state twice per epoch, run:
For example, to run the block simulator for 384 slots, with 20,000 validators, and an average of 66% of attesters doing their work every slot, run:
```
build/state_sim --slots=384 --validators=20000 --attesterRatio=0.66 --json_interval=16
build/block_sim --slots=384 --validators=20000 --attesterRatio=0.66
```

View File

@ -5,38 +5,42 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# `block_sim` is a block and attestation simulator similar to `state_sim` whose
# task is to run the beacon chain without considering the network or the
# wall clock. Functionally, it achieves the same as the distributed beacon chain
# by producing blocks and attestations as if they were created by separate
# nodes, just like a set of `beacon_node` instances would.
# `block_sim` is a block, attestation, and sync committee simulator, whose task
# is to run the beacon chain without considering the network or the wall clock.
#
# Similar to `state_sim`, but uses the block and attestation pools along with
# a database, as if a real node was running.
# Functionally, it achieves the same as the distributed beacon chain by
# producing blocks and attestations as if they were created by separate
# nodes, just like a set of `beacon_node` instances would.
import
confutils, chronicles, eth/db/kvstore_sqlite3,
chronos/timer, taskpools,
../tests/testblockutil,
../beacon_chain/spec/[forks, state_transition],
../beacon_chain/spec/datatypes/[phase0, altair, bellatrix, deneb],
../beacon_chain/[beacon_chain_db, beacon_clock],
../beacon_chain/el/el_manager,
../beacon_chain/beacon_chain_db,
../beacon_chain/validators/validator_pool,
../beacon_chain/gossip_processing/[batch_validation, gossip_validation],
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
block_clearance, attestation_pool,
sync_committee_msg_pool],
../beacon_chain/consensus_object_pools/[blockchain_dag, block_clearance],
./simutils
from std/math import E, ln, sqrt
from std/random import Rand, initRand, rand
from std/random import Rand, gauss, initRand, rand
from std/stats import RunningStat
from std/strformat import `&`
from ../beacon_chain/spec/datatypes/capella import SignedBeaconBlock
from ../beacon_chain/consensus_object_pools/attestation_pool import
AttestationPool, addAttestation, addForkChoice, getAttestationsForBlock,
init, prune
from ../beacon_chain/consensus_object_pools/block_quarantine import
Quarantine, init
from ../beacon_chain/consensus_object_pools/sync_committee_msg_pool import
SyncCommitteeMsgPool, addContribution, addSyncCommitteeMessage, init,
produceContribution, produceSyncAggregate, pruneData
from ../beacon_chain/el/el_manager import
Eth1Block, Eth1BlockNumber, Eth1BlockTimestamp, Eth1Chain, addBlock,
getBlockProposalData, getDepositsRoot, init
from ../beacon_chain/spec/beaconstate import
get_beacon_committee, get_beacon_proposer_index,
get_committee_count_per_slot, get_committee_indices
from ../beacon_chain/spec/state_transition_block import process_block
type Timers = enum
tBlock = "Process non-epoch slot with block"
@ -50,156 +54,11 @@ type Timers = enum
template seconds(x: uint64): timer.Duration =
timer.seconds(int(x))
func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
# TODO This is present in Nim 1.4
const K = sqrt(2 / E)
var
a = 0.0
b = 0.0
while true:
a = rand(r, 1.0)
b = (2.0 * rand(r, 1.0) - 1.0) * K
if b * b <= -4.0 * a * a * ln(a): break
mu + sigma * (b / a)
from ../beacon_chain/spec/state_transition_block import process_block
# TODO The rest of nimbus-eth2 uses only the forked version of these, and in
# general it's better for the validator_duties caller to use the forkedstate
# version, so isolate these here pending refactoring of block_sim to prefer,
# when possible, to also use the forked version. It'll be worth keeping some
# example of the non-forked version because it enables fork bootstrapping.
proc makeSimulationBlock(
cfg: RuntimeConfig,
state: var phase0.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate,
execution_payload: bellatrix.ExecutionPayloadForSigning,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[phase0.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[phase0.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload)
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
proc makeSimulationBlock(
cfg: RuntimeConfig,
state: var altair.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate,
execution_payload: bellatrix.ExecutionPayloadForSigning,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[altair.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[altair.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload)
# Signatures are verified elsewhere, so don't duplicate inefficiently here
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
proc makeSimulationBlock(
cfg: RuntimeConfig,
state: var bellatrix.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate,
execution_payload: bellatrix.ExecutionPayloadForSigning,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[bellatrix.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[bellatrix.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload)
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
proc makeSimulationBlock(
cfg: RuntimeConfig,
state: var capella.HashedBeaconState,
@ -287,8 +146,8 @@ proc makeSimulationBlock(
ok(blck)
# TODO confutils is an impenetrable black box. how can a help text be added here?
cli do(slots = SLOTS_PER_EPOCH * 6,
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
cli do(slots = SLOTS_PER_EPOCH * 7,
validators = SLOTS_PER_EPOCH * 500,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
syncCommitteeRatio {.desc: "ratio of validators that perform sync committee actions in each round"} = 0.82,
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
@ -296,14 +155,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
let
(genesisState, depositTreeSnapshot) = loadGenesis(validators, false)
genesisTime = float getStateField(genesisState[], genesis_time)
var
cfg = defaultRuntimeConfig
cfg.ALTAIR_FORK_EPOCH = 1.Epoch
cfg.BELLATRIX_FORK_EPOCH = 2.Epoch
cfg.CAPELLA_FORK_EPOCH = 3.Epoch
cfg.DENEB_FORK_EPOCH = 4.Epoch
const cfg = getSimulationConfig()
echo "Starting simulation..."
@ -484,18 +336,9 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
finalizedEpochRef.eth1_data,
finalizedEpochRef.eth1_deposit_index)
sync_aggregate =
when T.toFork >= ConsensusFork.Altair:
syncCommitteePool[].produceSyncAggregate(dag.head.bid, slot)
else:
SyncAggregate.init()
syncCommitteePool[].produceSyncAggregate(dag.head.bid, slot)
hashedState =
when T is phase0.SignedBeaconBlock:
addr state.phase0Data
elif T is altair.SignedBeaconBlock:
addr state.altairData
elif T is bellatrix.SignedBeaconBlock:
addr state.bellatrixData
elif T is capella.SignedBeaconBlock:
when T is capella.SignedBeaconBlock:
addr state.capellaData
elif T is deneb.SignedBeaconBlock:
addr state.denebData
@ -525,10 +368,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
noRollback,
cache)
var
newBlock = T(
message: message.get()
)
var newBlock = T(message: message.get())
let blockRoot = withTimerRet(timers[tHashBlock]):
hash_tree_root(newBlock.message)
@ -547,72 +387,6 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
# HTTP server's state function, combine all proposeForkBlock functions into a
# single generic function. Until https://github.com/nim-lang/Nim/issues/20811
# is fixed, that generic function must take `blockRatio` as a parameter.
proc proposePhase0Block(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[phase0.SignedBeaconBlock](updatedState, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[], [])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
proc proposeAltairBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[altair.SignedBeaconBlock](updatedState, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[], [])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
proc proposeBellatrixBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[bellatrix.SignedBeaconBlock](updatedState, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: bellatrix.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[], [])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
proc proposeCapellaBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
@ -699,9 +473,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
case dag.cfg.consensusForkAtEpoch(slot.epoch)
of ConsensusFork.Deneb: proposeDenebBlock(slot)
of ConsensusFork.Capella: proposeCapellaBlock(slot)
of ConsensusFork.Bellatrix: proposeBellatrixBlock(slot)
of ConsensusFork.Altair: proposeAltairBlock(slot)
of ConsensusFork.Phase0: proposePhase0Block(slot)
of ConsensusFork.Bellatrix, ConsensusFork.Altair, ConsensusFork.Phase0:
doAssert false
if attesterRatio > 0.0:
withTimer(timers[tAttest]):
handleAttestations(slot)

View File

@ -1,3 +1,10 @@
# beacon_chain
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
-u:metrics
-d:"libp2p_pki_schemes=secp256k1"
@ -5,6 +12,5 @@
-d:chronosStrictException
--styleCheck:usages
--styleCheck:hint
--hint[XDeclaredButNotUsed]:off
--hint[ConvFromXtoItselfNotNeeded]:off
--hint[Processing]:off

View File

@ -6,12 +6,15 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
stats, strformat, times,
stew/io2,
../tests/testblockutil, ../tests/consensus_spec/os_ops,
../beacon_chain/[filepath],
../beacon_chain/spec/datatypes/[phase0, altair],
../beacon_chain/spec/[beaconstate, deposit_snapshots, forks, helpers]
../beacon_chain/spec/[beaconstate, forks]
from std/stats import RunningStat, mean, push, standardDeviationS
from std/strformat import `&`
from std/times import cpuTime
from ../beacon_chain/filepath import secureCreatePath
from ../beacon_chain/spec/deposit_snapshots import DepositTreeSnapshot
template withTimer*(stats: var RunningStat, body: untyped) =
# TODO unify timing somehow
@ -32,23 +35,7 @@ template withTimerRet*(stats: var RunningStat, body: untyped): untyped =
tmp
func verifyConsensus*(state: phase0.BeaconState, attesterRatio: auto) =
if attesterRatio < 0.63:
doAssert state.current_justified_checkpoint.epoch == 0
doAssert state.finalized_checkpoint.epoch == 0
# Quorum is 2/3 of validators, and at low numbers, quantization effects
# can dominate, so allow for play above/below attesterRatio of 2/3.
if attesterRatio < 0.72:
return
let current_epoch = get_current_epoch(state)
if current_epoch >= 3:
doAssert state.current_justified_checkpoint.epoch + 1 >= current_epoch
if current_epoch >= 4:
doAssert state.finalized_checkpoint.epoch + 2 >= current_epoch
func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: auto) =
func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: float) =
if attesterRatio < 0.63:
doAssert getStateField(state, current_justified_checkpoint).epoch == 0
doAssert getStateField(state, finalized_checkpoint).epoch == 0
@ -66,6 +53,14 @@ func verifyConsensus*(state: ForkedHashedBeaconState, attesterRatio: auto) =
doAssert getStateField(
state, finalized_checkpoint).epoch + 2 >= current_epoch
func getSimulationConfig*(): RuntimeConfig {.compileTime.} =
var cfg = defaultRuntimeConfig
cfg.ALTAIR_FORK_EPOCH = 0.Epoch
cfg.BELLATRIX_FORK_EPOCH = 0.Epoch
cfg.CAPELLA_FORK_EPOCH = 0.Epoch
cfg.DENEB_FORK_EPOCH = 2.Epoch
cfg
proc loadGenesis*(validators: Natural, validate: bool):
(ref ForkedHashedBeaconState, DepositTreeSnapshot) =
const genesisDir = "test_sim"
@ -79,7 +74,7 @@ proc loadGenesis*(validators: Natural, validate: bool):
&"genesis_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
contractSnapshotFn = genesisDir /
&"deposit_contract_snapshot_{const_preset}_{validators}_{SPEC_VERSION}.ssz"
cfg = defaultRuntimeConfig
const cfg = getSimulationConfig()
if fileExists(genesisFn) and fileExists(contractSnapshotFn):
let res = newClone(readSszForkedHashedBeaconState(
@ -116,12 +111,14 @@ proc loadGenesis*(validators: Natural, validate: bool):
depositContractState: merkleizer.toDepositContractState)
let res = (ref ForkedHashedBeaconState)(
kind: ConsensusFork.Phase0,
phase0Data: initialize_hashed_beacon_state_from_eth1(
cfg, ZERO_HASH, 0, deposits, flags))
kind: ConsensusFork.Capella,
capellaData: capella.HashedBeaconState(
data: initialize_beacon_state_from_eth1(
cfg, ZERO_HASH, 0, deposits,
default(capella.ExecutionPayloadHeader), {skipBlsValidation})))
echo &"Saving to {genesisFn}..."
SSZ.saveFile(genesisFn, res.phase0Data.data)
SSZ.saveFile(genesisFn, res.capellaData.data)
echo &"Saving to {contractSnapshotFn}..."
SSZ.saveFile(contractSnapshotFn, contractSnapshot)
@ -145,13 +142,6 @@ proc printTimers*[Timers: enum](
fmtTime(timers[t].min), fmtTime(timers[t].max), &"{timers[t].n :>12}, ",
$t
proc printTimers*[Timers: enum](
state: phase0.BeaconState, attesters: RunningStat, validate: bool,
timers: array[Timers, RunningStat]) =
echo "Validators: ", state.validators.len, ", epoch length: ", SLOTS_PER_EPOCH
echo "Validators per attestation (mean): ", attesters.mean
printTimers(validate, timers)
proc printTimers*[Timers: enum](
state: ForkedHashedBeaconState, attesters: RunningStat, validate: bool,
timers: array[Timers, RunningStat]) =

View File

@ -1,157 +0,0 @@
# beacon_chain
# Copyright (c) 2019-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# `state_sim` runs the state transition function in isolation, creating blocks
# and attesting to them as if the network was running as a whole.
import
std/[stats, times, strformat, random, tables],
confutils,
../tests/testblockutil,
../beacon_chain/spec/datatypes/phase0,
../beacon_chain/spec/eth2_apis/eth2_rest_serialization,
../beacon_chain/spec/[beaconstate, forks, helpers, signatures],
./simutils
type Timers = enum
tBlock = "Process non-epoch slot with block"
tEpoch = "Process epoch slot with block"
tHashBlock = "Tree-hash block"
tAttest = "Combine committee attestations"
func jsonName(prefix, slot: auto): string =
fmt"{prefix:04}-{shortLog(slot):08}.json"
proc writeJson*(fn, v: auto) =
var f: File
defer: close(f)
RestJson.saveFile(fn, v, pretty = true)
cli do(slots = SLOTS_PER_EPOCH * 5,
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
json_interval = SLOTS_PER_EPOCH,
write_last_json = false,
prefix: int = 0,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
validate = true):
let
flags = if validate: {} else: {skipBlsValidation}
(state, _) = loadGenesis(validators, validate)
genesisBlock = get_initial_beacon_block(state[])
genesis_validators_root = getStateField(state[], genesis_validators_root)
echo "Starting simulation..."
var
attestations = initTable[Slot, seq[Attestation]]()
latest_block_root = withBlck(genesisBlock): blck.root
timers: array[Timers, RunningStat]
attesters: RunningStat
r = initRand(1)
signedBlock: ForkedSignedBeaconBlock
cache = StateCache()
proc maybeWrite(last: bool) =
if write_last_json:
if getStateField(state[], slot) mod json_interval.uint64 == 0:
write(stdout, ":")
else:
write(stdout, ".")
if last:
withState(state[]): writeJson("state.json", forkyState.data)
else:
withState(state[]):
if forkyState.data.slot mod json_interval.uint64 == 0:
writeJson(jsonName(prefix, forkyState.data.slot), forkyState.data)
write(stdout, ":")
else:
write(stdout, ".")
# TODO doAssert against this up-front
# indexed attestation: validator index beyond max validators per committee
# len(indices) <= MAX_VALIDATORS_PER_COMMITTEE
for i in 0..<slots:
maybeWrite(false)
verifyConsensus(state[].phase0Data.data, attesterRatio)
let
attestations_idx = getStateField(state[], slot)
blockAttestations = attestations.getOrDefault(attestations_idx)
attestations.del attestations_idx
doAssert attestations.lenu64 <=
SLOTS_PER_EPOCH + MIN_ATTESTATION_INCLUSION_DELAY
let t =
if (getStateField(state[], slot) > GENESIS_SLOT and
(getStateField(state[], slot) + 1).is_epoch): tEpoch
else: tBlock
withTimer(timers[t]):
signedBlock = addTestBlock(
state[], cache, attestations = blockAttestations,
flags = flags)
latest_block_root = withTimerRet(timers[tHashBlock]):
withBlck(signedBlock): hash_tree_root(blck.message)
if attesterRatio > 0.0:
# attesterRatio is the fraction of attesters that actually do their
# work for every slot - we'll randomize it deterministically to give
# some variation
withState(state[]):
let
slot = forkyState.data.slot
epoch = slot.epoch
committees_per_slot =
get_committee_count_per_slot(forkyState.data, epoch, cache)
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(
forkyState.data, slot, committee_index, cache)
var
attestation = Attestation(
aggregation_bits: CommitteeValidatorsBits.init(committee.len),
data: makeAttestationData(
forkyState.data, slot, committee_index, latest_block_root),
)
first = true
attesters.push committee.len()
withTimer(timers[tAttest]):
for index_in_committee, validator_index in committee:
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
attestation.aggregation_bits.setBit index_in_committee
if not attestation.aggregation_bits.isZeros:
if validate:
attestation.signature = makeAttestationSig(
forkyState.data.fork, genesis_validators_root,
attestation.data, committee, attestation.aggregation_bits)
# add the attestation if any of the validators attested, as given
# by the randomness. We have to delay when the attestation is
# actually added to the block per the attestation delay rule!
let
target_slot = slot + MIN_ATTESTATION_INCLUSION_DELAY - 1
attestations.mgetOrPut(target_slot, default(seq[Attestation])).add(
attestation)
flushFile(stdout)
if getStateField(state[], slot).is_epoch:
echo &" slot: {shortLog(getStateField(state[], slot))} ",
&"epoch: {shortLog(state[].get_current_epoch())}"
maybeWrite(true) # catch that last state as well..
echo "Done!"
printTimers(state[], attesters, true, timers)