ncli_db: more readonly support (#3411)

Update several `ncli_db` commands to run in readOnly mode, allowing them
to be used with a running instance - in particular era export.

* export all eras by default
* skip already-exported eras
This commit is contained in:
Jacek Sieka 2022-02-18 07:37:44 +01:00 committed by GitHub
parent c34104adbd
commit a88427bd39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 42 additions and 32 deletions

View File

@ -1082,16 +1082,18 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
# from the old format, this brings down the write from minutes to seconds
stmt.dispose()
if newSummaries.len() > 0:
db.withManyWrites:
for s in newSummaries:
db.putBeaconBlockSummary(s.root, s.summary)
if not db.db.readOnly:
if newSummaries.len() > 0:
db.withManyWrites:
for s in newSummaries:
db.putBeaconBlockSummary(s.root, s.summary)
# Clean up pre-altair summaries - by now, we will have moved them to the
# new table
db.db.exec(
"DELETE FROM kvstore WHERE key >= ? and key < ?",
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
# Clean up pre-altair summaries - by now, we will have moved them to the
# new table
db.db.exec(
"DELETE FROM kvstore WHERE key >= ? and key < ?",
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
var row: stmt.Result
for rowRes in exec(stmt, root.data, row):
expectDb rowRes

View File

@ -132,9 +132,9 @@ type
defaultValue: 0
desc: "The era number to write".}: uint64
eraCount* {.
defaultValue: 1
defaultValue: 0
name: "count"
desc: "Number of eras to write".}: uint64
desc: "Number of eras to write (0=all)".}: uint64
of DbCmd.importEra:
eraFiles* {.
@ -314,7 +314,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
printTimers(false, timers)
proc cmdDumpState(conf: DbConf) =
let db = BeaconChainDB.new(conf.databaseDir.string)
let db = BeaconChainDB.new(conf.databaseDir.string, readOnly = true)
defer: db.close()
let
@ -352,7 +352,7 @@ proc cmdPutState(conf: DbConf, cfg: RuntimeConfig) =
db.putState(state)
proc cmdDumpBlock(conf: DbConf) =
let db = BeaconChainDB.new(conf.databaseDir.string)
let db = BeaconChainDB.new(conf.databaseDir.string, readOnly = true)
defer: db.close()
for blockRoot in conf.blockRootx:
@ -391,7 +391,7 @@ proc cmdPutBlock(conf: DbConf, cfg: RuntimeConfig) =
proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
echo "Opening database..."
let db = BeaconChainDB.new(conf.databaseDir.string)
let db = BeaconChainDB.new(conf.databaseDir.string, readOnly = true)
defer: db.close()
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
@ -422,7 +422,7 @@ func atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
blck.atSlot(slot - 1).blck.atSlot(slot)
proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
let db = BeaconChainDB.new(conf.databaseDir.string)
let db = BeaconChainDB.new(conf.databaseDir.string, readOnly = true)
defer: db.close()
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
@ -443,7 +443,8 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
tmp: seq[byte]
timers: array[Timers, RunningStat]
for era in conf.era ..< conf.era + conf.eraCount:
var era = conf.era
while conf.eraCount == 0 or era < conf.era + conf.eraCount:
if shouldShutDown: quit QuitSuccess
let
firstSlot =
@ -457,24 +458,29 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
break
let name = withState(dag.headState.data): eraFileName(cfg, state.data, era)
echo "Writing ", name
if isFile(name):
echo "Skipping ", name, " (already exists)"
else:
echo "Writing ", name
let e2 = openFile(name, {OpenFlags.Write, OpenFlags.Create}).get()
defer: discard closeFile(e2)
let e2 = openFile(name, {OpenFlags.Write, OpenFlags.Create}).get()
defer: discard closeFile(e2)
var group = EraGroup.init(e2, firstSlot).get()
if firstSlot.isSome():
withTimer(timers[tBlocks]):
var blocks: array[SLOTS_PER_HISTORICAL_ROOT.int, BlockId]
for i in dag.getBlockRange(firstSlot.get(), 1, blocks)..<blocks.len:
if dag.getBlockSSZ(blocks[i], tmp):
group.update(e2, blocks[i].slot, tmp).get()
var group = EraGroup.init(e2, firstSlot).get()
if firstSlot.isSome():
withTimer(timers[tBlocks]):
var blocks: array[SLOTS_PER_HISTORICAL_ROOT.int, BlockId]
for i in dag.getBlockRange(firstSlot.get(), 1, blocks)..<blocks.len:
if dag.getBlockSSZ(blocks[i], tmp):
group.update(e2, blocks[i].slot, tmp).get()
withTimer(timers[tState]):
dag.withUpdatedState(tmpState[], canonical) do:
withState(stateData.data):
group.finish(e2, state.data).get()
do: raiseAssert "withUpdatedState failed"
withTimer(timers[tState]):
dag.withUpdatedState(tmpState[], canonical) do:
withState(stateData.data):
group.finish(e2, state.data).get()
do: raiseAssert "withUpdatedState failed"
era += 1
printTimers(true, timers)
@ -494,6 +500,8 @@ proc cmdImportEra(conf: DbConf, cfg: RuntimeConfig) =
var data: seq[byte]
for file in conf.eraFiles:
if shouldShutDown: quit QuitSuccess
let f = openFile(file, {OpenFlags.Read}).valueOr:
warn "Can't open ", file
continue
@ -550,7 +558,7 @@ type
proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
echo "Opening database..."
let
db = BeaconChainDB.new(conf.databaseDir.string,)
db = BeaconChainDB.new(conf.databaseDir.string, readOnly = true)
defer:
db.close()