nimbus-eth2/research/block_sim.nim

653 lines
24 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# `block_sim` is a block and attestation simulator similar to `state_sim` whose
# task is to run the beacon chain without considering the network or the
# wall clock. Functionally, it achieves the same as the distributed beacon chain
# by producing blocks and attestations as if they were created by separate
# nodes, just like a set of `beacon_node` instances would.
#
# Similar to `state_sim`, but uses the block and attestation pools along with
# a database, as if a real node was running.
import
math, stats, times, strformat,
tables, options, random, tables, os,
confutils, chronicles, eth/db/kvstore_sqlite3,
chronos/timer, eth/keys, taskpools,
../tests/testblockutil,
../beacon_chain/spec/[
beaconstate, forks, helpers, signatures, state_transition],
../beacon_chain/spec/datatypes/[phase0, altair, bellatrix],
../beacon_chain/[beacon_chain_db, beacon_clock],
../beacon_chain/eth1/eth1_monitor,
../beacon_chain/validators/validator_pool,
../beacon_chain/gossip_processing/[batch_validation, gossip_validation],
../beacon_chain/consensus_object_pools/[blockchain_dag, block_quarantine,
block_clearance, attestation_pool,
sync_committee_msg_pool],
./simutils
from ../beacon_chain/spec/datatypes/capella import SignedBeaconBlock
type Timers = enum
tBlock = "Process non-epoch slot with block"
tEpoch = "Process epoch slot with block"
tHashBlock = "Tree-hash block"
tSignBlock = "Sign block"
tAttest = "Have committee attest to block"
tSyncCommittees = "Produce sync committee actions"
tReplay = "Replay all produced blocks"
template seconds(x: uint64): timer.Duration =
timer.seconds(int(x))
func gauss(r: var Rand; mu = 0.0; sigma = 1.0): float =
# TODO This is present in Nim 1.4
const K = sqrt(2 / E)
var
a = 0.0
b = 0.0
while true:
a = rand(r, 1.0)
b = (2.0 * rand(r, 1.0) - 1.0) * K
if b * b <= -4.0 * a * a * ln(a): break
mu + sigma * (b / a)
from ../beacon_chain/spec/state_transition_block import process_block
# TODO The rest of nimbus-eth2 uses only the forked version of these, and in
# general it's better for the validator_duties caller to use the forkedstate
# version, so isolate these here pending refactoring of block_sim to prefer,
# when possible, to also use the forked version. It'll be worth keeping some
# example of the non-forked version because it enables fork bootstrapping.
proc makeBeaconBlock(
cfg: RuntimeConfig,
state: var phase0.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockExits,
sync_aggregate: SyncAggregate,
execution_payload: bellatrix.ExecutionPayload,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[phase0.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[phase0.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload)
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
proc makeBeaconBlock(
cfg: RuntimeConfig,
state: var altair.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockExits,
sync_aggregate: SyncAggregate,
execution_payload: bellatrix.ExecutionPayload,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[altair.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[altair.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload)
# Signatures are verified elsewhere, so don't duplicate inefficiently here
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
proc makeBeaconBlock(
cfg: RuntimeConfig,
state: var bellatrix.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockExits,
sync_aggregate: SyncAggregate,
execution_payload: bellatrix.ExecutionPayload,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[bellatrix.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[bellatrix.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload)
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
proc makeBeaconBlock(
cfg: RuntimeConfig,
state: var capella.HashedBeaconState,
proposer_index: ValidatorIndex,
randao_reveal: ValidatorSig,
eth1_data: Eth1Data,
graffiti: GraffitiBytes,
attestations: seq[Attestation],
deposits: seq[Deposit],
exits: BeaconBlockExits,
sync_aggregate: SyncAggregate,
execution_payload: capella.ExecutionPayload,
bls_to_execution_changes: SignedBLSToExecutionChangeList,
rollback: RollbackHashedProc[capella.HashedBeaconState],
cache: var StateCache,
# TODO:
# `verificationFlags` is needed only in tests and can be
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags = {}): Result[capella.BeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
## state.slot meaning process_slots must be called up to the slot for which
## the block is to be created.
# To create a block, we'll first apply a partial block to the state, skipping
# some validations.
var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload,
bls_to_execution_changes)
let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
if res.isErr:
rollback(state)
return err(res.error())
state.root = hash_tree_root(state.data)
blck.state_root = state.root
ok(blck)
# TODO confutils is an impenetrable black box. how can a help text be added here?
cli do(slots = SLOTS_PER_EPOCH * 6,
validators = SLOTS_PER_EPOCH * 400, # One per shard is minimum
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.82,
syncCommitteeRatio {.desc: "ratio of validators that perform sync committee actions in each round"} = 0.82,
blockRatio {.desc: "ratio of slots with blocks"} = 1.0,
replay = true):
let
(genesisState, depositContractSnapshot) = loadGenesis(validators, false)
genesisTime = float getStateField(genesisState[], genesis_time)
var
cfg = defaultRuntimeConfig
Implement split preset/config support (#2710) * Implement split preset/config support This is the initial bulk refactor to introduce runtime config values in a number of places, somewhat replacing the existing mechanism of loading network metadata. It still needs more work, this is the initial refactor that introduces runtime configuration in some of the places that need it. The PR changes the way presets and constants work, to match the spec. In particular, a "preset" now refers to the compile-time configuration while a "cfg" or "RuntimeConfig" is the dynamic part. A single binary can support either mainnet or minimal, but not both. Support for other presets has been removed completely (can be readded, in case there's need). There's a number of outstanding tasks: * `SECONDS_PER_SLOT` still needs fixing * loading custom runtime configs needs redoing * checking constants against YAML file * yeerongpilly support `build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG` * load fork epoch from config * fix fork digest sent in status * nicer error string for request failures * fix tools * one more * fixup * fixup * fixup * use "standard" network definition folder in local testnet Files are loaded from their standard locations, including genesis etc, to conform to the format used in the `eth2-networks` repo. * fix launch scripts, allow unknown config values * fix base config of rest test * cleanups * bundle mainnet config using common loader * fix spec links and names * only include supported preset in binary * drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
cfg.ALTAIR_FORK_EPOCH = 1.Epoch
cfg.BELLATRIX_FORK_EPOCH = 2.Epoch
cfg.CAPELLA_FORK_EPOCH = 3.Epoch
echo "Starting simulation..."
let db = BeaconChainDB.new("block_sim_db")
defer: db.close()
State-only checkpoint state startup (#4251) Currently, we require genesis and a checkpoint block and state to start from an arbitrary slot - this PR relaxes this requirement so that we can start with a state alone. The current trusted-node-sync algorithm works by first downloading blocks until we find an epoch aligned non-empty slot, then downloads the state via slot. However, current [proposals](https://github.com/ethereum/beacon-APIs/pull/226) for checkpointing prefer finalized state as the main reference - this allows more simple access control and caching on the server side - in particular, this should help checkpoint-syncing from sources that have a fast `finalized` state download (like infura and teku) but are slow when accessing state via slot. Earlier versions of Nimbus will not be able to read databases created without a checkpoint block and genesis. In most cases, backfilling makes the database compatible except where genesis is also missing (custom networks). * backfill checkpoint block from libp2p instead of checkpoint source, when doing trusted node sync * allow starting the client without genesis / checkpoint block * perform epoch start slot lookahead when loading tail state, so as to deal with the case where the epoch start slot does not have a block * replace `--blockId` with `--state-id` in TNS command line * when replaying, also look at the parent of the last-known-block (even if we don't have the parent block data, we can still replay from a "parent" state) - in particular, this clears the way for implementing state pruning * deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
ChainDAGRef.preInit(db, genesisState[])
putInitialDepositContractSnapshot(db, depositContractSnapshot)
var
validatorMonitor = newClone(ValidatorMonitor.init())
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
eth1Chain = Eth1Chain.init(cfg, db)
merkleizer = DepositsMerkleizer.init(depositContractSnapshot.depositContractState)
taskpool = Taskpool.new()
verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
quarantine = newClone(Quarantine.init())
attPool = AttestationPool.init(dag, quarantine)
batchCrypto = BatchCrypto.new(
keys.newRng(), eager = func(): bool = true, taskpool)
syncCommitteePool = newClone SyncCommitteeMsgPool.init(keys.newRng())
timers: array[Timers, RunningStat]
attesters: RunningStat
r = initRand(1)
tmpState = assignClone(dag.headState)
eth1Chain.addBlock Eth1Block(
number: Eth1BlockNumber 1,
timestamp: Eth1BlockTimestamp genesisTime)
let replayState = assignClone(dag.headState)
proc handleAttestations(slot: Slot) =
let
attestationHead = dag.head.atSlot(slot)
Prune `BlockRef` on finalization (#3513) Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag.withUpdatedState(tmpState[], attestationHead.toBlockSlotId.expect("not nil")) do:
let
fork = getStateField(state, fork)
genesis_validators_root = getStateField(state, genesis_validators_root)
committees_per_slot =
get_committee_count_per_slot(state, slot.epoch, cache)
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(
state, slot, committee_index, cache)
for index_in_committee, validator_index in committee:
if rand(r, 1.0) <= attesterRatio:
let
data = makeAttestationData(
Prune `BlockRef` on finalization (#3513) Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
state, slot, committee_index, bid.root)
sig =
get_attestation_signature(
fork, genesis_validators_root, data,
MockPrivKeys[validator_index])
attestation = Attestation.init(
[uint64 index_in_committee], committee.len, data,
sig.toValidatorSig()).expect("valid data")
attPool.addAttestation(
attestation, [validator_index], sig, data.slot.start_beacon_time)
do:
raiseAssert "withUpdatedState failed"
proc handleSyncCommitteeActions(slot: Slot) =
type
Aggregator = object
subcommitteeIdx: SyncSubcommitteeIndex
Speed up altair block processing 2x (#3115) * Speed up altair block processing >2x Like #3089, this PR drastially speeds up historical REST queries and other long state replays. * cache sync committee validator indices * use ~80mb less memory for validator pubkey mappings * batch-verify sync aggregate signature (fixes #2985) * document sync committee hack with head block vs sync message block * add batch signature verification failure tests Before: ``` ../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000 All time are ms Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB 0.481, 1.878, 0.215, 59.167, 981, Load block from database 8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database 6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch 93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch 20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` After: ``` 7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB 0.553, 2.122, 0.175, 66.692, 981, Load block from database 5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database 6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch 94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch 11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` * add comment
2021-11-24 12:43:50 +00:00
validatorIdx: ValidatorIndex
selectionProof: ValidatorSig
let
syncCommittee = @(dag.syncCommitteeParticipants(slot + 1))
genesis_validators_root = dag.genesis_validators_root
fork = dag.forkAtEpoch(slot.epoch)
messagesTime = slot.attestation_deadline()
contributionsTime = slot.sync_contribution_deadline()
var aggregators: seq[Aggregator]
for subcommitteeIdx in SyncSubcommitteeIndex:
Speed up altair block processing 2x (#3115) * Speed up altair block processing >2x Like #3089, this PR drastially speeds up historical REST queries and other long state replays. * cache sync committee validator indices * use ~80mb less memory for validator pubkey mappings * batch-verify sync aggregate signature (fixes #2985) * document sync committee hack with head block vs sync message block * add batch signature verification failure tests Before: ``` ../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000 All time are ms Average, StdDev, Min, Max, Samples, Test Validation is turned off meaning that no BLS operations are performed 5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB 0.481, 1.878, 0.215, 59.167, 981, Load block from database 8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database 6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch 93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch 20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` After: ``` 7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB 0.553, 2.122, 0.175, 66.692, 981, Load block from database 5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database 6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch 94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch 11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing 0.000, 0.000, 0.000, 0.000, 0, Database load 0.000, 0.000, 0.000, 0.000, 0, Database store ``` * add comment
2021-11-24 12:43:50 +00:00
for validatorIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
if rand(r, 1.0) > syncCommitteeRatio:
continue
let
validatorPrivKey = MockPrivKeys[validatorIdx]
signature = get_sync_committee_message_signature(
fork, genesis_validators_root, slot, dag.head.root, validatorPrivKey)
msg = SyncCommitteeMessage(
slot: slot,
beacon_block_root: dag.head.root,
validator_index: uint64 validatorIdx,
signature: signature.toValidatorSig)
let res = waitFor dag.validateSyncCommitteeMessage(
batchCrypto,
syncCommitteePool,
msg,
subcommitteeIdx,
messagesTime,
false)
doAssert res.isOk
let (positions, cookedSig) = res.get()
syncCommitteePool[].addSyncCommitteeMessage(
msg.slot,
msg.beacon_block_root,
msg.validator_index,
cookedSig,
subcommitteeIdx,
positions)
let
selectionProofSig = get_sync_committee_selection_proof(
fork, genesis_validators_root, slot, subcommitteeIdx,
validatorPrivKey).toValidatorSig
if is_sync_committee_aggregator(selectionProofSig):
aggregators.add Aggregator(
subcommitteeIdx: subcommitteeIdx,
validatorIdx: validatorIdx,
selectionProof: selectionProofSig)
for aggregator in aggregators:
var contribution: SyncCommitteeContribution
let contributionWasProduced = syncCommitteePool[].produceContribution(
slot, dag.head.root, aggregator.subcommitteeIdx, contribution)
if contributionWasProduced:
let
contributionAndProof = ContributionAndProof(
aggregator_index: uint64 aggregator.validatorIdx,
contribution: contribution,
selection_proof: aggregator.selectionProof)
validatorPrivKey =
MockPrivKeys[aggregator.validatorIdx.ValidatorIndex]
signedContributionAndProof = SignedContributionAndProof(
message: contributionAndProof,
signature: get_contribution_and_proof_signature(
fork, genesis_validators_root, contributionAndProof,
validatorPrivKey).toValidatorSig)
res = waitFor dag.validateContribution(
batchCrypto,
syncCommitteePool,
signedContributionAndProof,
contributionsTime,
false)
doAssert res.isOk
discard syncCommitteePool[].addContribution(
signedContributionAndProof, res.get()[0])
proc getNewBlock[T](
state: var ForkedHashedBeaconState, slot: Slot, cache: var StateCache): T =
let
finalizedEpochRef = dag.getFinalizedEpochRef()
proposerIdx = get_beacon_proposer_index(
state, cache, getStateField(state, slot)).get()
privKey = MockPrivKeys[proposerIdx]
eth1ProposalData = eth1Chain.getBlockProposalData(
state,
finalizedEpochRef.eth1_data,
finalizedEpochRef.eth1_deposit_index)
sync_aggregate =
when T is phase0.SignedBeaconBlock:
SyncAggregate.init()
elif T is altair.SignedBeaconBlock or T is bellatrix.SignedBeaconBlock or
T is capella.SignedBeaconBlock:
syncCommitteePool[].produceSyncAggregate(dag.head.root)
hashedState =
when T is phase0.SignedBeaconBlock:
addr state.phase0Data
elif T is altair.SignedBeaconBlock:
addr state.altairData
elif T is bellatrix.SignedBeaconBlock:
addr state.bellatrixData
elif T is capella.SignedBeaconBlock:
addr state.capellaData
else:
static: doAssert false
message = makeBeaconBlock(
cfg,
hashedState[],
proposerIdx,
get_epoch_signature(
getStateField(state, fork),
getStateField(state, genesis_validators_root),
slot.epoch, privKey).toValidatorSig(),
eth1ProposalData.vote,
default(GraffitiBytes),
attPool.getAttestationsForBlock(state, cache),
eth1ProposalData.deposits,
BeaconBlockExits(),
sync_aggregate,
when T is capella.SignedBeaconBlock:
default(capella.ExecutionPayload)
else:
default(bellatrix.ExecutionPayload),
default(SignedBLSToExecutionChangeList),
noRollback,
cache)
var
newBlock = T(
message: message.get()
)
let blockRoot = withTimerRet(timers[tHashBlock]):
hash_tree_root(newBlock.message)
newBlock.root = blockRoot
# Careful, state no longer valid after here because of the await..
newBlock.signature = withTimerRet(timers[tSignBlock]):
get_block_signature(
getStateField(state, fork),
getStateField(state, genesis_validators_root),
newBlock.message.slot,
blockRoot, privKey).toValidatorSig()
newBlock
# TODO when withUpdatedState's state template doesn't conflict with chronos's
# HTTP server's state function, combine all proposeForkBlock functions into a
# single generic function. Until https://github.com/nim-lang/Nim/issues/20811
# is fixed, that generic function must take `blockRatio` as a parameter.
proc proposePhase0Block(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
Prune `BlockRef` on finalization (#3513) Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[phase0.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
proc proposeAltairBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
Prune `BlockRef` on finalization (#3513) Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[altair.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: altair.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
proc proposeBellatrixBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
Prune `BlockRef` on finalization (#3513) Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[bellatrix.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: bellatrix.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
proc proposeCapellaBlock(slot: Slot) =
if rand(r, 1.0) > blockRatio:
return
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
let
newBlock = getNewBlock[capella.SignedBeaconBlock](state, slot, cache)
added = dag.addHeadBlock(verifier, newBlock) do (
blckRef: BlockRef, signedBlock: capella.TrustedSignedBeaconBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attPool.addForkChoice(
epochRef, blckRef, unrealized, signedBlock.message,
blckRef.slot.start_beacon_time)
dag.updateHead(added[], quarantine[])
if dag.needStateCachesAndForkChoicePruning():
dag.pruneStateCachesDAG()
attPool.prune()
do:
raiseAssert "withUpdatedState failed"
var
lastEth1BlockAt = genesisTime
eth1BlockNum = 1000
for i in 0..<slots:
let
slot = Slot(i + 1)
t =
if slot.is_epoch: tEpoch
else: tBlock
now = genesisTime + float(slot * SECONDS_PER_SLOT)
while true:
let nextBlockTime = lastEth1BlockAt +
Implement split preset/config support (#2710) * Implement split preset/config support This is the initial bulk refactor to introduce runtime config values in a number of places, somewhat replacing the existing mechanism of loading network metadata. It still needs more work, this is the initial refactor that introduces runtime configuration in some of the places that need it. The PR changes the way presets and constants work, to match the spec. In particular, a "preset" now refers to the compile-time configuration while a "cfg" or "RuntimeConfig" is the dynamic part. A single binary can support either mainnet or minimal, but not both. Support for other presets has been removed completely (can be readded, in case there's need). There's a number of outstanding tasks: * `SECONDS_PER_SLOT` still needs fixing * loading custom runtime configs needs redoing * checking constants against YAML file * yeerongpilly support `build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG` * load fork epoch from config * fix fork digest sent in status * nicer error string for request failures * fix tools * one more * fixup * fixup * fixup * use "standard" network definition folder in local testnet Files are loaded from their standard locations, including genesis etc, to conform to the format used in the `eth2-networks` repo. * fix launch scripts, allow unknown config values * fix base config of rest test * cleanups * bundle mainnet config using common loader * fix spec links and names * only include supported preset in binary * drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
max(1.0, gauss(r, float defaultRuntimeConfig.SECONDS_PER_ETH1_BLOCK, 3.0))
if nextBlockTime > now:
break
inc eth1BlockNum
var eth1Block = Eth1Block(
hash: makeFakeHash(eth1BlockNum),
number: Eth1BlockNumber eth1BlockNum,
timestamp: Eth1BlockTimestamp nextBlockTime)
let newDeposits = int clamp(gauss(r, 5.0, 8.0), 0.0, 1000.0)
for i in 0 ..< newDeposits:
let validatorIdx = merkleizer.getChunkCount.int
2022-04-14 15:39:37 +00:00
let d = makeDeposit(validatorIdx, {skipBlsValidation})
eth1Block.deposits.add d
merkleizer.addChunk hash_tree_root(d).data
eth1Block.depositRoot = merkleizer.getDepositsRoot
eth1Block.depositCount = merkleizer.getChunkCount
eth1Chain.addBlock eth1Block
lastEth1BlockAt = nextBlockTime
if blockRatio > 0.0:
withTimer(timers[t]):
case dag.cfg.stateForkAtEpoch(slot.epoch)
of BeaconStateFork.Capella: proposeCapellaBlock(slot)
of BeaconStateFork.Bellatrix: proposeBellatrixBlock(slot)
of BeaconStateFork.Altair: proposeAltairBlock(slot)
of BeaconStateFork.Phase0: proposePhase0Block(slot)
if attesterRatio > 0.0:
withTimer(timers[tAttest]):
handleAttestations(slot)
if syncCommitteeRatio > 0.0:
withTimer(timers[tSyncCommittees]):
handleSyncCommitteeActions(slot)
syncCommitteePool[].pruneData(slot)
# TODO if attestation pool was smarter, it would include older attestations
# too!
verifyConsensus(dag.headState, attesterRatio * blockRatio)
if t == tEpoch:
echo &". slot: {shortLog(slot)} ",
&"epoch: {shortLog(slot.epoch)}"
else:
write(stdout, ".")
flushFile(stdout)
if replay:
withTimer(timers[tReplay]):
var cache = StateCache()
doAssert dag.updateState(
Prune `BlockRef` on finalization (#3513) Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
replayState[], dag.getBlockIdAtSlot(Slot(slots)).expect("block"),
false, cache)
echo "Done!"
printTimers(dag.headState, attesters, true, timers)