more strict read-only database mode (#4362)
* avoid creating pre-altair backwards compatibility tables * allow running ncli_db era export without above tables present * drop unused pre-altair backwards compatibility tables * run benchmark on read-ronly database * fix running benchmark from genesis
This commit is contained in:
parent
07885a7210
commit
cd160b5650
|
@ -240,45 +240,56 @@ template expectDb(x: auto): untyped =
|
||||||
# full disk - this requires manual intervention, so we'll panic for now
|
# full disk - this requires manual intervention, so we'll panic for now
|
||||||
x.expect("working database (disk broken/full?)")
|
x.expect("working database (disk broken/full?)")
|
||||||
|
|
||||||
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): KvResult[Seq] =
|
proc init*[T](
|
||||||
? db.exec("""
|
Seq: type DbSeq[T], db: SqStoreRef, name: string,
|
||||||
CREATE TABLE IF NOT EXISTS """ & name & """(
|
readOnly = false): KvResult[Seq] =
|
||||||
id INTEGER PRIMARY KEY,
|
let hasTable = if db.readOnly or readOnly:
|
||||||
value BLOB
|
? db.hasTable(name)
|
||||||
);
|
else:
|
||||||
""")
|
? db.exec("""
|
||||||
|
CREATE TABLE IF NOT EXISTS '""" & name & """'(
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
value BLOB
|
||||||
|
);
|
||||||
|
""")
|
||||||
|
true
|
||||||
|
if hasTable:
|
||||||
|
let
|
||||||
|
insertStmt = db.prepareStmt(
|
||||||
|
"INSERT INTO '" & name & "'(value) VALUES (?);",
|
||||||
|
openArray[byte], void, managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
let
|
selectStmt = db.prepareStmt(
|
||||||
insertStmt = db.prepareStmt(
|
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
||||||
"INSERT INTO " & name & "(value) VALUES (?);",
|
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
||||||
openArray[byte], void, managed = false).expect("this is a valid statement")
|
|
||||||
|
|
||||||
selectStmt = db.prepareStmt(
|
countStmt = db.prepareStmt(
|
||||||
"SELECT value FROM " & name & " WHERE id = ?;",
|
"SELECT COUNT(1) FROM '" & name & "';",
|
||||||
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
NoParams, int64, managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
countStmt = db.prepareStmt(
|
var recordCount = int64 0
|
||||||
"SELECT COUNT(1) FROM " & name & ";",
|
let countQueryRes = countStmt.exec do (res: int64):
|
||||||
NoParams, int64, managed = false).expect("this is a valid statement")
|
recordCount = res
|
||||||
|
|
||||||
var recordCount = int64 0
|
let found = ? countQueryRes
|
||||||
let countQueryRes = countStmt.exec do (res: int64):
|
if not found:
|
||||||
recordCount = res
|
return err("Cannot count existing items")
|
||||||
|
countStmt.dispose()
|
||||||
|
|
||||||
let found = ? countQueryRes
|
ok(Seq(insertStmt: insertStmt,
|
||||||
if not found:
|
selectStmt: selectStmt,
|
||||||
return err("Cannot count existing items")
|
recordCount: recordCount))
|
||||||
countStmt.dispose()
|
else:
|
||||||
|
ok(Seq())
|
||||||
|
|
||||||
ok(Seq(insertStmt: insertStmt,
|
proc close*(s: var DbSeq) =
|
||||||
selectStmt: selectStmt,
|
|
||||||
recordCount: recordCount))
|
|
||||||
|
|
||||||
proc close*(s: DbSeq) =
|
|
||||||
s.insertStmt.dispose()
|
s.insertStmt.dispose()
|
||||||
s.selectStmt.dispose()
|
s.selectStmt.dispose()
|
||||||
|
|
||||||
|
reset(s)
|
||||||
|
|
||||||
proc add*[T](s: var DbSeq[T], val: T) =
|
proc add*[T](s: var DbSeq[T], val: T) =
|
||||||
|
doAssert(distinctBase(s.insertStmt) != nil, "database closed or table not preset")
|
||||||
var bytes = SSZ.encode(val)
|
var bytes = SSZ.encode(val)
|
||||||
s.insertStmt.exec(bytes).expectDb()
|
s.insertStmt.exec(bytes).expectDb()
|
||||||
inc s.recordCount
|
inc s.recordCount
|
||||||
|
@ -288,6 +299,8 @@ template len*[T](s: DbSeq[T]): int64 =
|
||||||
|
|
||||||
proc get*[T](s: DbSeq[T], idx: int64): T =
|
proc get*[T](s: DbSeq[T], idx: int64): T =
|
||||||
# This is used only locally
|
# This is used only locally
|
||||||
|
doAssert(distinctBase(s.selectStmt) != nil, $T & " table not present for read at " & $(idx))
|
||||||
|
|
||||||
let resultAddr = addr result
|
let resultAddr = addr result
|
||||||
|
|
||||||
let queryRes = s.selectStmt.exec(idx + 1) do (recordBytes: openArray[byte]):
|
let queryRes = s.selectStmt.exec(idx + 1) do (recordBytes: openArray[byte]):
|
||||||
|
@ -302,81 +315,91 @@ proc get*[T](s: DbSeq[T], idx: int64): T =
|
||||||
|
|
||||||
proc init*(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
proc init*(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
||||||
readOnly = false): KvResult[T] =
|
readOnly = false): KvResult[T] =
|
||||||
if not readOnly:
|
let hasTable = if db.readOnly or readOnly:
|
||||||
|
? db.hasTable(name)
|
||||||
|
else:
|
||||||
? db.exec("""
|
? db.exec("""
|
||||||
CREATE TABLE IF NOT EXISTS """ & name & """(
|
CREATE TABLE IF NOT EXISTS '""" & name & """'(
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
value BLOB NOT NULL
|
value BLOB NOT NULL
|
||||||
);
|
);""")
|
||||||
""")
|
true
|
||||||
|
|
||||||
let
|
if hasTable:
|
||||||
insertStmt = db.prepareStmt(
|
let
|
||||||
"REPLACE INTO " & name & "(id, value) VALUES (?, ?);",
|
insertStmt = db.prepareStmt(
|
||||||
(int64, array[32, byte]), void, managed = false).expect("this is a valid statement")
|
"REPLACE INTO '" & name & "'(id, value) VALUES (?, ?);",
|
||||||
|
(int64, array[32, byte]), void, managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
selectStmt = db.prepareStmt(
|
selectStmt = db.prepareStmt(
|
||||||
"SELECT value FROM " & name & " WHERE id = ?;",
|
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
||||||
int64, array[32, byte], managed = false).expect("this is a valid statement")
|
int64, array[32, byte], managed = false).expect("this is a valid statement")
|
||||||
selectAllStmt = db.prepareStmt(
|
selectAllStmt = db.prepareStmt(
|
||||||
"SELECT id, value FROM " & name & " ORDER BY id;",
|
"SELECT id, value FROM '" & name & "' ORDER BY id;",
|
||||||
NoParams, (int64, array[32, byte]), managed = false).expect("this is a valid statement")
|
NoParams, (int64, array[32, byte]), managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
maxIdStmt = db.prepareStmt(
|
maxIdStmt = db.prepareStmt(
|
||||||
"SELECT MAX(id) FROM " & name & ";",
|
"SELECT MAX(id) FROM '" & name & "';",
|
||||||
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
minIdStmt = db.prepareStmt(
|
minIdStmt = db.prepareStmt(
|
||||||
"SELECT MIN(id) FROM " & name & ";",
|
"SELECT MIN(id) FROM '" & name & "';",
|
||||||
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
||||||
|
|
||||||
var
|
var
|
||||||
low, high: Opt[Slot]
|
low, high: Opt[Slot]
|
||||||
tmp: Option[int64]
|
tmp: Option[int64]
|
||||||
|
|
||||||
for rowRes in minIdStmt.exec(tmp):
|
for rowRes in minIdStmt.exec(tmp):
|
||||||
expectDb rowRes
|
expectDb rowRes
|
||||||
if tmp.isSome():
|
if tmp.isSome():
|
||||||
low.ok(Slot(tmp.get()))
|
low.ok(Slot(tmp.get()))
|
||||||
|
|
||||||
for rowRes in maxIdStmt.exec(tmp):
|
for rowRes in maxIdStmt.exec(tmp):
|
||||||
expectDb rowRes
|
expectDb rowRes
|
||||||
if tmp.isSome():
|
if tmp.isSome():
|
||||||
high.ok(Slot(tmp.get()))
|
high.ok(Slot(tmp.get()))
|
||||||
|
|
||||||
maxIdStmt.dispose()
|
maxIdStmt.dispose()
|
||||||
minIdStmt.dispose()
|
minIdStmt.dispose()
|
||||||
|
|
||||||
ok(T(insertStmt: insertStmt,
|
ok(T(insertStmt: insertStmt,
|
||||||
selectStmt: selectStmt,
|
selectStmt: selectStmt,
|
||||||
selectAllStmt: selectAllStmt,
|
selectAllStmt: selectAllStmt,
|
||||||
low: low,
|
low: low,
|
||||||
high: high))
|
high: high))
|
||||||
|
else:
|
||||||
|
ok(T())
|
||||||
|
|
||||||
proc close*(s: FinalizedBlocks) =
|
proc close*(s: var FinalizedBlocks) =
|
||||||
s.insertStmt.dispose()
|
s.insertStmt.dispose()
|
||||||
s.selectStmt.dispose()
|
s.selectStmt.dispose()
|
||||||
s.selectAllStmt.dispose()
|
s.selectAllStmt.dispose()
|
||||||
|
reset(s)
|
||||||
|
|
||||||
proc insert*(s: var FinalizedBlocks, slot: Slot, val: Eth2Digest) =
|
proc insert*(s: var FinalizedBlocks, slot: Slot, val: Eth2Digest) =
|
||||||
doAssert slot.uint64 < int64.high.uint64, "Only reasonable slots supported"
|
doAssert slot.uint64 < int64.high.uint64, "Only reasonable slots supported"
|
||||||
|
doAssert(distinctBase(s.insertStmt) != nil, "database closed or table not present")
|
||||||
|
|
||||||
s.insertStmt.exec((slot.int64, val.data)).expectDb()
|
s.insertStmt.exec((slot.int64, val.data)).expectDb()
|
||||||
s.low.ok(min(slot, s.low.get(slot)))
|
s.low.ok(min(slot, s.low.get(slot)))
|
||||||
s.high.ok(max(slot, s.high.get(slot)))
|
s.high.ok(max(slot, s.high.get(slot)))
|
||||||
|
|
||||||
proc get*(s: FinalizedBlocks, idx: Slot): Opt[Eth2Digest] =
|
proc get*(s: FinalizedBlocks, idx: Slot): Opt[Eth2Digest] =
|
||||||
|
if distinctBase(s.selectStmt) == nil: return Opt.none(Eth2Digest)
|
||||||
var row: s.selectStmt.Result
|
var row: s.selectStmt.Result
|
||||||
for rowRes in s.selectStmt.exec(int64(idx), row):
|
for rowRes in s.selectStmt.exec(int64(idx), row):
|
||||||
expectDb rowRes
|
expectDb rowRes
|
||||||
return ok(Eth2Digest(data: row))
|
return ok(Eth2Digest(data: row))
|
||||||
|
|
||||||
err()
|
return Opt.none(Eth2Digest)
|
||||||
|
|
||||||
iterator pairs*(s: FinalizedBlocks): (Slot, Eth2Digest) =
|
iterator pairs*(s: FinalizedBlocks): (Slot, Eth2Digest) =
|
||||||
var row: s.selectAllStmt.Result
|
if distinctBase(s.selectAllStmt) != nil:
|
||||||
for rowRes in s.selectAllStmt.exec(row):
|
var row: s.selectAllStmt.Result
|
||||||
expectDb rowRes
|
for rowRes in s.selectAllStmt.exec(row):
|
||||||
yield (Slot(row[0]), Eth2Digest(data: row[1]))
|
expectDb rowRes
|
||||||
|
yield (Slot(row[0]), Eth2Digest(data: row[1]))
|
||||||
|
|
||||||
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[ImmutableValidatorData2] =
|
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[ImmutableValidatorData2] =
|
||||||
result = newSeqOfCap[ImmutableValidatorData2](vals.len())
|
result = newSeqOfCap[ImmutableValidatorData2](vals.len())
|
||||||
|
@ -420,24 +443,29 @@ template withManyWrites*(dbParam: BeaconChainDB, body: untyped) =
|
||||||
if isInsideTransaction(db.db): # calls `sqlite3_get_autocommit`
|
if isInsideTransaction(db.db): # calls `sqlite3_get_autocommit`
|
||||||
expectDb db.db.exec("ROLLBACK TRANSACTION;")
|
expectDb db.db.exec("ROLLBACK TRANSACTION;")
|
||||||
|
|
||||||
proc new*(T: type BeaconChainDB,
|
proc new*(T: type BeaconChainDBV0,
|
||||||
dir: string,
|
db: SqStoreRef,
|
||||||
inMemory = false,
|
|
||||||
readOnly = false
|
readOnly = false
|
||||||
|
): BeaconChainDBV0 =
|
||||||
|
var
|
||||||
|
# V0 compatibility tables - these were created WITHOUT ROWID which is slow
|
||||||
|
# for large blobs
|
||||||
|
backendV0 = kvStore db.openKvStore(
|
||||||
|
readOnly = db.readOnly or readOnly).expectDb()
|
||||||
|
# state_no_validators is similar to state_no_validators2 but uses a
|
||||||
|
# different key encoding and was created WITHOUT ROWID
|
||||||
|
stateStoreV0 = kvStore db.openKvStore(
|
||||||
|
"state_no_validators", readOnly = db.readOnly or readOnly).expectDb()
|
||||||
|
|
||||||
|
BeaconChainDBV0(
|
||||||
|
backend: backendV0,
|
||||||
|
stateStore: stateStoreV0,
|
||||||
|
)
|
||||||
|
|
||||||
|
proc new*(T: type BeaconChainDB,
|
||||||
|
db: SqStoreRef
|
||||||
): BeaconChainDB =
|
): BeaconChainDB =
|
||||||
var db = if inMemory:
|
if not db.readOnly:
|
||||||
SqStoreRef.init("", "test", readOnly = readOnly, inMemory = true).expect(
|
|
||||||
"working database (out of memory?)")
|
|
||||||
else:
|
|
||||||
if (let res = secureCreatePath(dir); res.isErr):
|
|
||||||
fatal "Failed to create create database directory",
|
|
||||||
path = dir, err = ioErrorMsg(res.error)
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
SqStoreRef.init(
|
|
||||||
dir, "nbc", readOnly = readOnly, manualCheckpoint = true).expectDb()
|
|
||||||
|
|
||||||
if not readOnly:
|
|
||||||
# Remove the deposits table we used before we switched
|
# Remove the deposits table we used before we switched
|
||||||
# to storing only deposit contract checkpoints
|
# to storing only deposit contract checkpoints
|
||||||
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
|
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
|
||||||
|
@ -448,13 +476,6 @@ proc new*(T: type BeaconChainDB,
|
||||||
debug "Failed to drop the validatorIndexFromPubKey table"
|
debug "Failed to drop the validatorIndexFromPubKey table"
|
||||||
|
|
||||||
var
|
var
|
||||||
# V0 compatibility tables - these were created WITHOUT ROWID which is slow
|
|
||||||
# for large blobs
|
|
||||||
backend = kvStore db.openKvStore().expectDb()
|
|
||||||
# state_no_validators is similar to state_no_validators2 but uses a
|
|
||||||
# different key encoding and was created WITHOUT ROWID
|
|
||||||
stateStore = kvStore db.openKvStore("state_no_validators").expectDb()
|
|
||||||
|
|
||||||
genesisDepositsSeq =
|
genesisDepositsSeq =
|
||||||
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
||||||
immutableValidatorsDb =
|
immutableValidatorsDb =
|
||||||
|
@ -491,30 +512,28 @@ proc new*(T: type BeaconChainDB,
|
||||||
# uncompressed keys instead. We still support upgrading a database from the
|
# uncompressed keys instead. We still support upgrading a database from the
|
||||||
# old format, but don't need to support downgrading, and therefore safely can
|
# old format, but don't need to support downgrading, and therefore safely can
|
||||||
# remove the keys
|
# remove the keys
|
||||||
let immutableValidatorsDb1 =
|
block:
|
||||||
DbSeq[ImmutableValidatorData].init(db, "immutable_validators").expectDb()
|
var immutableValidatorsDb1 = DbSeq[ImmutableValidatorData].init(
|
||||||
|
db, "immutable_validators", readOnly = true).expectDb()
|
||||||
|
|
||||||
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
||||||
notice "Migrating validator keys, this may take a minute",
|
notice "Migrating validator keys, this may take a minute",
|
||||||
len = immutableValidatorsDb1.len()
|
len = immutableValidatorsDb1.len()
|
||||||
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
||||||
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
||||||
immutableValidatorsDb.add(ImmutableValidatorDataDb2(
|
immutableValidatorsDb.add(ImmutableValidatorDataDb2(
|
||||||
pubkey: val.pubkey.loadValid().toUncompressed(),
|
pubkey: val.pubkey.loadValid().toUncompressed(),
|
||||||
withdrawal_credentials: val.withdrawal_credentials
|
withdrawal_credentials: val.withdrawal_credentials
|
||||||
))
|
))
|
||||||
immutableValidatorsDb1.close()
|
immutableValidatorsDb1.close()
|
||||||
|
|
||||||
# Safe because nobody will be downgrading to pre-altair versions
|
if not db.readOnly:
|
||||||
# TODO: drop table maybe? that would require not creating the table just above
|
# Safe because nobody will be downgrading to pre-altair versions
|
||||||
discard db.exec("DELETE FROM immutable_validators;")
|
discard db.exec("DROP TABLE IF EXISTS immutable_validators;")
|
||||||
|
|
||||||
T(
|
T(
|
||||||
db: db,
|
db: db,
|
||||||
v0: BeaconChainDBV0(
|
v0: BeaconChainDBV0.new(db, readOnly = true),
|
||||||
backend: backend,
|
|
||||||
stateStore: stateStore,
|
|
||||||
),
|
|
||||||
genesisDeposits: genesisDepositsSeq,
|
genesisDeposits: genesisDepositsSeq,
|
||||||
immutableValidatorsDb: immutableValidatorsDb,
|
immutableValidatorsDb: immutableValidatorsDb,
|
||||||
immutableValidators: loadImmutableValidators(immutableValidatorsDb),
|
immutableValidators: loadImmutableValidators(immutableValidatorsDb),
|
||||||
|
@ -529,6 +548,25 @@ proc new*(T: type BeaconChainDB,
|
||||||
lcData: lcData
|
lcData: lcData
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc new*(T: type BeaconChainDB,
|
||||||
|
dir: string,
|
||||||
|
inMemory = false,
|
||||||
|
readOnly = false
|
||||||
|
): BeaconChainDB =
|
||||||
|
let db =
|
||||||
|
if inMemory:
|
||||||
|
SqStoreRef.init("", "test", readOnly = readOnly, inMemory = true).expect(
|
||||||
|
"working database (out of memory?)")
|
||||||
|
else:
|
||||||
|
if (let res = secureCreatePath(dir); res.isErr):
|
||||||
|
fatal "Failed to create create database directory",
|
||||||
|
path = dir, err = ioErrorMsg(res.error)
|
||||||
|
quit 1
|
||||||
|
|
||||||
|
SqStoreRef.init(
|
||||||
|
dir, "nbc", readOnly = readOnly, manualCheckpoint = true).expectDb()
|
||||||
|
BeaconChainDB.new(db)
|
||||||
|
|
||||||
template getLightClientDataDB*(db: BeaconChainDB): LightClientDataDB =
|
template getLightClientDataDB*(db: BeaconChainDB): LightClientDataDB =
|
||||||
db.lcData
|
db.lcData
|
||||||
|
|
||||||
|
@ -1316,11 +1354,12 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
||||||
for s in newSummaries:
|
for s in newSummaries:
|
||||||
db.putBeaconBlockSummary(s.root, s.summary)
|
db.putBeaconBlockSummary(s.root, s.summary)
|
||||||
|
|
||||||
# Clean up pre-altair summaries - by now, we will have moved them to the
|
if db.db.hasTable("kvstore").expectDb():
|
||||||
# new table
|
# Clean up pre-altair summaries - by now, we will have moved them to the
|
||||||
db.db.exec(
|
# new table
|
||||||
"DELETE FROM kvstore WHERE key >= ? and key < ?",
|
db.db.exec(
|
||||||
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
|
"DELETE FROM kvstore WHERE key >= ? and key < ?",
|
||||||
|
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
|
||||||
|
|
||||||
var row: stmt.Result
|
var row: stmt.Result
|
||||||
for rowRes in exec(stmt, root.data, row):
|
for rowRes in exec(stmt, root.data, row):
|
||||||
|
@ -1353,12 +1392,12 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
||||||
|
|
||||||
# Test operations used to create broken and/or legacy database
|
# Test operations used to create broken and/or legacy database
|
||||||
|
|
||||||
proc putStateV0*(db: BeaconChainDB, key: Eth2Digest, value: phase0.BeaconState) =
|
proc putStateV0*(db: BeaconChainDBV0, key: Eth2Digest, value: phase0.BeaconState) =
|
||||||
# Writes to KVStore, as done in 1.0.12 and earlier
|
# Writes to KVStore, as done in 1.0.12 and earlier
|
||||||
db.v0.backend.putSnappySSZ(subkey(type value, key), value)
|
db.backend.putSnappySSZ(subkey(type value, key), value)
|
||||||
|
|
||||||
proc putBlockV0*(db: BeaconChainDB, value: phase0.TrustedSignedBeaconBlock) =
|
proc putBlockV0*(db: BeaconChainDBV0, value: phase0.TrustedSignedBeaconBlock) =
|
||||||
# Write to KVStore, as done in 1.0.12 and earlier
|
# Write to KVStore, as done in 1.0.12 and earlier
|
||||||
# In particular, no summary is written here - it should be recreated
|
# In particular, no summary is written here - it should be recreated
|
||||||
# automatically
|
# automatically
|
||||||
db.v0.backend.putSnappySSZ(subkey(phase0.SignedBeaconBlock, value.root), value)
|
db.backend.putSnappySSZ(subkey(phase0.SignedBeaconBlock, value.root), value)
|
||||||
|
|
|
@ -215,7 +215,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
|
|
||||||
echo "Opening database..."
|
echo "Opening database..."
|
||||||
let
|
let
|
||||||
db = BeaconChainDB.new(conf.databaseDir.string,)
|
db = BeaconChainDB.new(conf.databaseDir.string, readOnly = true)
|
||||||
dbBenchmark = BeaconChainDB.new("benchmark")
|
dbBenchmark = BeaconChainDB.new("benchmark")
|
||||||
defer:
|
defer:
|
||||||
db.close()
|
db.close()
|
||||||
|
@ -233,7 +233,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
|
|
||||||
var
|
var
|
||||||
(start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots)
|
(start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots)
|
||||||
blockRefs = dag.getBlockRange(start, ends)
|
blockRefs = dag.getBlockRange(max(start, Slot 1), ends)
|
||||||
blocks: (
|
blocks: (
|
||||||
seq[phase0.TrustedSignedBeaconBlock],
|
seq[phase0.TrustedSignedBeaconBlock],
|
||||||
seq[altair.TrustedSignedBeaconBlock],
|
seq[altair.TrustedSignedBeaconBlock],
|
||||||
|
@ -245,6 +245,7 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
|
|
||||||
for b in 0 ..< blockRefs.len:
|
for b in 0 ..< blockRefs.len:
|
||||||
let blck = blockRefs[blockRefs.len - b - 1]
|
let blck = blockRefs[blockRefs.len - b - 1]
|
||||||
|
|
||||||
withTimer(timers[tLoadBlock]):
|
withTimer(timers[tLoadBlock]):
|
||||||
case cfg.blockForkAtEpoch(blck.slot.epoch)
|
case cfg.blockForkAtEpoch(blck.slot.epoch)
|
||||||
of BeaconBlockFork.Phase0:
|
of BeaconBlockFork.Phase0:
|
||||||
|
@ -501,7 +502,8 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
|
||||||
else: some((era - 1).start_slot)
|
else: some((era - 1).start_slot)
|
||||||
endSlot = era.start_slot
|
endSlot = era.start_slot
|
||||||
eraBid = dag.atSlot(dag.head.bid, endSlot).valueOr:
|
eraBid = dag.atSlot(dag.head.bid, endSlot).valueOr:
|
||||||
echo "Skipping ", era, ", blocks not available"
|
echo "Skipping era ", era, ", blocks not available"
|
||||||
|
era += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if endSlot > dag.head.slot:
|
if endSlot > dag.head.slot:
|
||||||
|
|
|
@ -699,15 +699,18 @@ suite "Old database versions" & preset():
|
||||||
|
|
||||||
test "pre-1.1.0":
|
test "pre-1.1.0":
|
||||||
# only kvstore, no immutable validator keys
|
# only kvstore, no immutable validator keys
|
||||||
|
let
|
||||||
let db = BeaconChainDB.new("", inMemory = true)
|
sq = SqStoreRef.init("", "test", inMemory = true).expect(
|
||||||
|
"working database (out of memory?)")
|
||||||
|
v0 = BeaconChainDBV0.new(sq, readOnly = false)
|
||||||
|
db = BeaconChainDB.new(sq)
|
||||||
|
|
||||||
# preInit a database to a v1.0.12 state
|
# preInit a database to a v1.0.12 state
|
||||||
|
v0.putStateV0(genState[].root, genState[].data)
|
||||||
|
v0.putBlockV0(genBlock)
|
||||||
|
|
||||||
db.putStateRoot(
|
db.putStateRoot(
|
||||||
genState[].latest_block_root, genState[].data.slot, genState[].root)
|
genState[].latest_block_root, genState[].data.slot, genState[].root)
|
||||||
db.putStateV0(genState[].root, genState[].data)
|
|
||||||
|
|
||||||
db.putBlockV0(genBlock)
|
|
||||||
db.putTailBlock(genBlock.root)
|
db.putTailBlock(genBlock.root)
|
||||||
db.putHeadBlock(genBlock.root)
|
db.putHeadBlock(genBlock.root)
|
||||||
db.putGenesisBlock(genBlock.root)
|
db.putGenesisBlock(genBlock.root)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 6499ee2bc5d264fdc68f5f08b647222a5c5252fa
|
Subproject commit 8f4ef19fc91a6dbd075a7af5a20082060bdc0920
|
Loading…
Reference in New Issue