import os, stats, strformat, tables, chronicles, confutils, stew/byteutils, eth/db/kvstore_sqlite3, ../beacon_chain/networking/network_metadata, ../beacon_chain/[beacon_chain_db], ../beacon_chain/consensus_object_pools/[ blockchain_dag, forkedbeaconstate_dbhelpers], ../beacon_chain/spec/datatypes/phase0, ../beacon_chain/spec/[ forks, helpers, state_transition, state_transition_epoch], ../beacon_chain/sszdump, ../research/simutils, ./e2store type Timers = enum tInit = "Initialize DB" tLoadBlock = "Load block from database" tLoadState = "Load state from database" tAdvanceSlot = "Advance slot, non-epoch" tAdvanceEpoch = "Advance slot, epoch" tApplyBlock = "Apply block, no slot processing" tDbLoad = "Database load" tDbStore = "Database store" type DbCmd* = enum bench dumpState dumpBlock pruneDatabase rewindState exportEra validatorPerf validatorDb = "Create or update attestation performance database" # TODO: # This should probably allow specifying a run-time preset DbConf = object databaseDir* {. defaultValue: "" desc: "Directory where `nbc.sqlite` is stored" name: "db" }: InputDir eth2Network* {. desc: "The Eth2 network preset to use" name: "network" }: Option[string] case cmd* {. command desc: "" .}: DbCmd of bench: benchSlot* {. defaultValue: 0 name: "start-slot" desc: "Starting slot, negative = backwards from head".}: int64 benchSlots* {. defaultValue: 50000 name: "slots" desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64 storeBlocks* {. defaultValue: false desc: "Store each read block back into a separate database".}: bool storeStates* {. defaultValue: false desc: "Store a state each epoch into a separate database".}: bool printTimes* {. defaultValue: true desc: "Print csv of block processing time".}: bool resetCache* {. defaultValue: false desc: "Process each block with a fresh cache".}: bool of dumpState: stateRoot* {. argument desc: "State roots to save".}: seq[string] of dumpBlock: blockRootx* {. argument desc: "Block roots to save".}: seq[string] of pruneDatabase: dryRun* {. defaultValue: false desc: "Don't write to the database copy; only simulate actions; default false".}: bool keepOldStates* {. defaultValue: true desc: "Keep pre-finalization states; default true".}: bool verbose* {. defaultValue: false desc: "Enables verbose output; default false".}: bool of rewindState: blockRoot* {. argument desc: "Block root".}: string slot* {. argument desc: "Slot".}: uint64 of exportEra: era* {. defaultValue: 0 desc: "The era number to write".}: uint64 eraCount* {. defaultValue: 1 desc: "Number of eras to write".}: uint64 of validatorPerf: perfSlot* {. defaultValue: -128 * SLOTS_PER_EPOCH.int64 name: "start-slot" desc: "Starting slot, negative = backwards from head".}: int64 perfSlots* {. defaultValue: 0 name: "slots" desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64 of validatorDb: outDir* {. defaultValue: "" name: "out-db" desc: "Output database".}: string perfect* {. defaultValue: false name: "perfect" desc: "Include perfect records (full rewards)".}: bool func getSlotRange(dag: ChainDAGRef, startSlot: int64, count: uint64): (Slot, Slot) = let start = if startSlot >= 0: Slot(startSlot) elif uint64(-startSlot) >= dag.head.slot: Slot(0) else: Slot(dag.head.slot - uint64(-startSlot)) ends = if count == 0: dag.head.slot + 1 else: start + count (start, ends) func getBlockRange(dag: ChainDAGRef, start, ends: Slot): seq[BlockRef] = # Range of block in reverse order var blockRefs: seq[BlockRef] cur = dag.head while cur != nil: if cur.slot < ends: if cur.slot < start or cur.slot == 0: # skip genesis break else: blockRefs.add cur cur = cur.parent blockRefs proc cmdBench(conf: DbConf, cfg: RuntimeConfig) = var timers: array[Timers, RunningStat] echo "Opening database..." let db = BeaconChainDB.new(conf.databaseDir.string,) dbBenchmark = BeaconChainDB.new("benchmark") defer: db.close() dbBenchmark.close() if not ChainDAGRef.isInitialized(db): echo "Database not initialized" quit 1 echo "Initializing block pool..." let dag = withTimerRet(timers[tInit]): ChainDAGRef.init(cfg, db, {}) var (start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots) blockRefs = dag.getBlockRange(start, ends) blocks: seq[phase0.TrustedSignedBeaconBlock] echo &"Loaded {dag.blocks.len} blocks, head slot {dag.head.slot}, selected {blockRefs.len} blocks" doAssert blockRefs.len() > 0, "Must select at least one block" for b in 0.. dag.head.slot: echo "Written all complete eras" break var e2s = E2Store.open(".", name, firstSlot).get() defer: e2s.close() dag.withState(tmpState[], canonical): e2s.appendRecord(stateData.data.hbsPhase0.data).get() var ancestors: seq[BlockRef] cur = canonical.blck if era != 0: while cur != nil and cur.slot >= firstSlot: ancestors.add(cur) cur = cur.parent for i in 0.. 0, "Must select at least one block" echo "# Analyzing performance for epochs ", blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch let state = newClone(dag.headState) dag.updateStateData( state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache) func processEpoch() = let prev_epoch_target_slot = state[].data.get_previous_epoch().compute_start_slot_at_epoch() penultimate_epoch_end_slot = if prev_epoch_target_slot == 0: Slot(0) else: prev_epoch_target_slot - 1 first_slot_empty = state[].data.get_block_root_at_slot(prev_epoch_target_slot) == state[].data.get_block_root_at_slot(penultimate_epoch_end_slot) let first_slot_attesters = block: let committee_count = state[].data.get_committee_count_per_slot( prev_epoch_target_slot.epoch, cache) var indices = HashSet[ValidatorIndex]() for committee_index in 0.. ends: echo "No (new) data found, database at ", minEpoch, ", finalized to ", ends.epoch quit 1 let blockRefs = dag.getBlockRange(start, ends) echo "Analyzing performance for epochs ", start.epoch, " - ", ends.epoch let state = newClone(dag.headState) dag.updateStateData( state[], blockRefs[^1].atSlot(if start > 0: start - 1 else: 0.Slot), false, cache) proc processEpoch() = echo getStateField(state[].data, slot).epoch outDb.exec("BEGIN TRANSACTION;").expect("DB") insertEpochInfo.exec( (getStateField(state[].data, slot).epoch.int64, rewards.total_balances.current_epoch_raw.int64, rewards.total_balances.previous_epoch_raw.int64, rewards.total_balances.current_epoch_attesters_raw.int64, rewards.total_balances.current_epoch_target_attesters_raw.int64, rewards.total_balances.previous_epoch_attesters_raw.int64, rewards.total_balances.previous_epoch_target_attesters_raw.int64, rewards.total_balances.previous_epoch_head_attesters_raw.int64) ).expect("DB") for index, status in rewards.statuses.pairs(): if not is_eligible_validator(status): continue let notSlashed = (RewardFlags.isSlashed notin status.flags) source_attester = notSlashed and status.is_previous_epoch_attester.isSome() target_attester = notSlashed and RewardFlags.isPreviousEpochTargetAttester in status.flags head_attester = notSlashed and RewardFlags.isPreviousEpochHeadAttester in status.flags delay = if notSlashed and status.is_previous_epoch_attester.isSome(): some(int64(status.is_previous_epoch_attester.get().delay)) else: none(int64) if conf.perfect or not (source_attester and target_attester and head_attester and delay.isSome() and delay.get() == 1): insertValidatorInfo.exec( (index.int64, getStateField(state[].data, slot).epoch.int64, status.delta.rewards.int64, status.delta.penalties.int64, int64(source_attester), # Source delta int64(target_attester), # Target delta int64(head_attester), # Head delta delay)).expect("DB") outDb.exec("COMMIT;").expect("DB") for bi in 0..