import os, stats, strformat, tables, chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3, ../beacon_chain/networking/network_metadata, ../beacon_chain/[beacon_chain_db, extras], ../beacon_chain/consensus_object_pools/[blockchain_dag, statedata_helpers], ../beacon_chain/spec/[crypto, datatypes, digest, helpers, state_transition, presets], ../beacon_chain/ssz, ../beacon_chain/ssz/sszdump, ../research/simutils, ./e2store type Timers = enum tInit = "Initialize DB" tLoadBlock = "Load block from database" tLoadState = "Load state from database" tAdvanceSlot = "Advance slot, non-epoch" tAdvanceEpoch = "Advance slot, epoch" tApplyBlock = "Apply block, no slot processing" tDbLoad = "Database load" tDbStore = "Database store" type DbCmd* = enum bench dumpState dumpBlock pruneDatabase rewindState exportEra validatorPerf # TODO: # This should probably allow specifying a run-time preset DbConf = object databaseDir* {. defaultValue: "" desc: "Directory where `nbc.sqlite` is stored" name: "db" }: InputDir eth2Network* {. desc: "The Eth2 network preset to use" name: "network" }: Option[string] case cmd* {. command desc: "" .}: DbCmd of bench: benchSlot* {. defaultValue: 0 name: "start-slot" desc: "Starting slot, negative = backwards from head".}: int64 benchSlots* {. defaultValue: 50000 name: "slots" desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64 storeBlocks* {. defaultValue: false desc: "Store each read block back into a separate database".}: bool storeStates* {. defaultValue: false desc: "Store a state each epoch into a separate database".}: bool printTimes* {. defaultValue: true desc: "Print csv of block processing time".}: bool resetCache* {. defaultValue: false desc: "Process each block with a fresh cache".}: bool of dumpState: stateRoot* {. argument desc: "State roots to save".}: seq[string] of dumpBlock: blockRootx* {. argument desc: "Block roots to save".}: seq[string] of pruneDatabase: dryRun* {. defaultValue: false desc: "Don't write to the database copy; only simulate actions; default false".}: bool keepOldStates* {. defaultValue: true desc: "Keep pre-finalization states; default true".}: bool verbose* {. defaultValue: false desc: "Enables verbose output; default false".}: bool of rewindState: blockRoot* {. argument desc: "Block root".}: string slot* {. argument desc: "Slot".}: uint64 of exportEra: era* {. defaultValue: 0 desc: "The era number to write".}: uint64 eraCount* {. defaultValue: 1 desc: "Number of eras to write".}: uint64 of validatorPerf: perfSlot* {. defaultValue: -128 * SLOTS_PER_EPOCH.int64 name: "start-slot" desc: "Starting slot, negative = backwards from head".}: int64 perfSlots* {. defaultValue: 0 name: "slots" desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64 proc getBlockRange(dag: ChainDAGRef, startSlot: int64, count: uint64): seq[BlockRef] = # Range of block in reverse order let start = if startSlot >= 0: Slot(startSlot) elif uint64(-startSlot) >= dag.head.slot: Slot(0) else: Slot(dag.head.slot - uint64(-startSlot)) ends = if count == 0: dag.head.slot + 1 else: start + count var blockRefs: seq[BlockRef] cur = dag.head while cur != nil: if cur.slot < ends: if cur.slot < start or cur.slot == 0: # skip genesis break else: blockRefs.add cur cur = cur.parent blockRefs proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) = var timers: array[Timers, RunningStat] echo "Opening database..." let db = BeaconChainDB.new( runtimePreset, conf.databaseDir.string,) dbBenchmark = BeaconChainDB.new(runtimePreset, "benchmark") defer: db.close() dbBenchmark.close() if not ChainDAGRef.isInitialized(db): echo "Database not initialized" quit 1 echo "Initializing block pool..." let dag = withTimerRet(timers[tInit]): ChainDAGRef.init(runtimePreset, db, {}) var blockRefs = dag.getBlockRange(conf.benchSlot, conf.benchSlots) blocks: seq[TrustedSignedBeaconBlock] echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks" doAssert blockRefs.len() > 0, "Must select at least one block" for b in 0.. dag.head.slot: echo "Written all complete eras" break var e2s = E2Store.open(".", name, firstSlot).get() defer: e2s.close() dag.withState(tmpState[], canonical): e2s.appendRecord(stateData.data.data).get() var ancestors: seq[BlockRef] cur = canonical.blck if era != 0: while cur != nil and cur.slot >= firstSlot: ancestors.add(cur) cur = cur.parent for i in 0.. 0, "Must select at least one block" echo "# Analyzing performance for epochs ", blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch let state = newClone(dag.headState) dag.updateStateData( state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache) proc processEpoch() = let prev_epoch_target_slot = state[].get_previous_epoch().compute_start_slot_at_epoch() penultimate_epoch_end_slot = if prev_epoch_target_slot == 0: Slot(0) else: prev_epoch_target_slot - 1 first_slot_empty = state[].get_block_root_at_slot(prev_epoch_target_slot) == state[].get_block_root_at_slot(penultimate_epoch_end_slot) let first_slot_attesters = block: let committee_count = state[].get_committee_count_per_slot( prev_epoch_target_slot.epoch, cache) var indices = HashSet[ValidatorIndex]() for committee_index in 0..