|
|
|
@ -8,52 +8,107 @@
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
|
|
|
|
|
import
|
|
|
|
|
std/[typetraits, tables],
|
|
|
|
|
stew/[arrayops, assign2, byteutils, endians2, io2, objects, results],
|
|
|
|
|
typetraits, tables,
|
|
|
|
|
stew/[assign2, byteutils, endians2, io2, objects, results],
|
|
|
|
|
serialization, chronicles, snappy,
|
|
|
|
|
eth/db/[kvstore, kvstore_sqlite3],
|
|
|
|
|
./networking/network_metadata, ./beacon_chain_db_immutable,
|
|
|
|
|
./spec/[crypto, datatypes, digest, state_transition],
|
|
|
|
|
./ssz/[ssz_serialization, merkleization],
|
|
|
|
|
./eth1/merkle_minimal,
|
|
|
|
|
./filepath
|
|
|
|
|
|
|
|
|
|
type
|
|
|
|
|
# TODO when DirStoreRef and helpers are placed in a separate module, kvStore
|
|
|
|
|
# doesn't find it.. :/
|
|
|
|
|
# eth/db/kvstore.nim(75, 6) Error: type mismatch: got <DirStoreRef, openArray[byte], openArray[byte]>
|
|
|
|
|
DirStoreRef* = ref object of RootObj
|
|
|
|
|
# DirStore is an experimental storage based on plain files stored in a
|
|
|
|
|
# directory tree - this _might_ be a suitable way of storing large blobs
|
|
|
|
|
# efficiently, where sqlite sometimes struggles - see
|
|
|
|
|
# https://github.com/status-im/nimbus-eth2/issues/2440
|
|
|
|
|
#
|
|
|
|
|
# The issue described by 2440 happens when both blocks and states are all
|
|
|
|
|
# stored in a single, giant table. The slow deletes have since been
|
|
|
|
|
# mitigated by using separate tables.
|
|
|
|
|
|
|
|
|
|
root: string
|
|
|
|
|
|
|
|
|
|
proc splitName(db: DirStoreRef, name: openArray[byte]): tuple[dir, file: string] =
|
|
|
|
|
# Splitting the name helps keep the number of files per directory down - up
|
|
|
|
|
# to 65536 folders will be created
|
|
|
|
|
if name.len() > 2:
|
|
|
|
|
(db.root & "/" & name.toOpenArray(0, 1).toHex(), name.toOpenArray(2, name.high()).toHex())
|
|
|
|
|
else:
|
|
|
|
|
(db.root & "/" & "0000", name.toHex())
|
|
|
|
|
|
|
|
|
|
proc get*(db: DirStoreRef, key: openArray[byte], onData: DataProc): KvResult[bool] =
|
|
|
|
|
let
|
|
|
|
|
(root, name) = db.splitName(key)
|
|
|
|
|
fileName = root & "/" & name
|
|
|
|
|
|
|
|
|
|
var data: seq[byte]
|
|
|
|
|
|
|
|
|
|
if readFile(fileName, data).isOk():
|
|
|
|
|
onData(data)
|
|
|
|
|
ok(true)
|
|
|
|
|
else:
|
|
|
|
|
# Serious errors are caught when writing, so we simplify things and say
|
|
|
|
|
# the entry doesn't exist if for any reason we can't read it
|
|
|
|
|
# TODO align this with `contains` that simply checks if the file exists
|
|
|
|
|
ok(false)
|
|
|
|
|
|
|
|
|
|
proc del*(db: DirStoreRef, key: openArray[byte]): KvResult[void] =
|
|
|
|
|
let
|
|
|
|
|
(root, name) = db.splitName(key)
|
|
|
|
|
fileName = root & "/" & name
|
|
|
|
|
|
|
|
|
|
removeFile(fileName).mapErr(ioErrorMsg)
|
|
|
|
|
|
|
|
|
|
proc contains*(db: DirStoreRef, key: openArray[byte]): KvResult[bool] =
|
|
|
|
|
let
|
|
|
|
|
(root, name) = db.splitName(key)
|
|
|
|
|
fileName = root & "/" & name
|
|
|
|
|
|
|
|
|
|
ok(isFile(fileName))
|
|
|
|
|
|
|
|
|
|
proc put*(db: DirStoreRef, key, val: openArray[byte]): KvResult[void] =
|
|
|
|
|
let
|
|
|
|
|
(root, name) = db.splitName(key)
|
|
|
|
|
fileName = root & "/" & name
|
|
|
|
|
|
|
|
|
|
? createPath(root).mapErr(ioErrorMsg)
|
|
|
|
|
? io2.writeFile(fileName, val).mapErr(ioErrorMsg)
|
|
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
|
|
proc close*(db: DirStoreRef): KvResult[void] =
|
|
|
|
|
discard
|
|
|
|
|
|
|
|
|
|
proc init*(T: type DirStoreRef, root: string): T =
|
|
|
|
|
T(
|
|
|
|
|
root: root,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type
|
|
|
|
|
DbSeq*[T] = object
|
|
|
|
|
insertStmt: SqliteStmt[openArray[byte], void]
|
|
|
|
|
selectStmt: SqliteStmt[int64, openArray[byte]]
|
|
|
|
|
recordCount: int64
|
|
|
|
|
|
|
|
|
|
DbMap*[K, V] = object
|
|
|
|
|
db: SqStoreRef
|
|
|
|
|
keyspace: int
|
|
|
|
|
|
|
|
|
|
DepositsSeq = DbSeq[DepositData]
|
|
|
|
|
ImmutableValidatorsSeq = DbSeq[ImmutableValidatorData]
|
|
|
|
|
|
|
|
|
|
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
|
|
|
|
|
|
|
|
|
|
DepositContractSnapshot* = object
|
|
|
|
|
eth1Block*: Eth2Digest
|
|
|
|
|
depositContractState*: DepositContractState
|
|
|
|
|
|
|
|
|
|
BeaconChainDBV0* = ref object
|
|
|
|
|
## BeaconChainDBV0 based on old kvstore table that sets the WITHOUT ROWID
|
|
|
|
|
## option which becomes unbearably slow with large blobs. It is used as a
|
|
|
|
|
## read-only store to support old versions - by freezing it at its current
|
|
|
|
|
## data set, downgrading remains possible since it's no longer touched -
|
|
|
|
|
## anyone downgrading will have to sync up whatever they missed.
|
|
|
|
|
##
|
|
|
|
|
## Newer versions read from the new tables first - if the data is not found,
|
|
|
|
|
## they turn to the old tables for reading. Writing is done only to the new
|
|
|
|
|
## tables.
|
|
|
|
|
##
|
|
|
|
|
## V0 stored most data in a single table, prefixing each key with a tag
|
|
|
|
|
## identifying the type of data.
|
|
|
|
|
##
|
|
|
|
|
## 1.1 introduced BeaconStateNoImmutableValidators storage where immutable
|
|
|
|
|
## validator data is stored in a separate table and only a partial
|
|
|
|
|
## BeaconState is written to kvstore
|
|
|
|
|
##
|
|
|
|
|
## 1.2 moved BeaconStateNoImmutableValidators to a separate table to
|
|
|
|
|
## alleviate some of the btree balancing issues - this doubled the speed but
|
|
|
|
|
## was still slow
|
|
|
|
|
backend: KvStoreRef # kvstore
|
|
|
|
|
stateStore: KvStoreRef # state_no_validators
|
|
|
|
|
|
|
|
|
|
BeaconChainDB* = ref object
|
|
|
|
|
## Database storing resolved blocks and states - resolved blocks are such
|
|
|
|
|
## blocks that form a chain back to the tail block.
|
|
|
|
@ -67,9 +122,7 @@ type
|
|
|
|
|
## database - this may have a number of "natural" causes such as switching
|
|
|
|
|
## between different versions of the client and accidentally using an old
|
|
|
|
|
## database.
|
|
|
|
|
db: SqStoreRef
|
|
|
|
|
|
|
|
|
|
v0: BeaconChainDBV0
|
|
|
|
|
backend: KvStoreRef
|
|
|
|
|
preset*: RuntimePreset
|
|
|
|
|
genesisDeposits*: DepositsSeq
|
|
|
|
|
|
|
|
|
@ -80,20 +133,12 @@ type
|
|
|
|
|
|
|
|
|
|
checkpoint*: proc() {.gcsafe, raises: [Defect].}
|
|
|
|
|
|
|
|
|
|
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
|
|
|
|
|
blocks: KvStoreRef # BlockRoot -> TrustedBeaconBlock
|
|
|
|
|
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
|
|
|
|
|
statesNoVal: KvStoreRef # StateRoot -> BeaconStateNoImmutableValidators
|
|
|
|
|
stateDiffs: KvStoreRef ##\
|
|
|
|
|
## StateRoot -> BeaconStateDiff
|
|
|
|
|
## Instead of storing full BeaconStates, one can store only the diff from
|
|
|
|
|
## a different state. As 75% of a typical BeaconState's serialized form's
|
|
|
|
|
## the validators, which are mostly immutable and append-only, just using
|
|
|
|
|
## a simple append-diff representation helps significantly. Various roots
|
|
|
|
|
## are stored in a mod-increment pattern across fixed-sized arrays, which
|
|
|
|
|
## addresses most of the rest of the BeaconState sizes.
|
|
|
|
|
stateStore: KvStoreRef
|
|
|
|
|
|
|
|
|
|
summaries: KvStoreRef # BlockRoot -> BeaconBlockSummary
|
|
|
|
|
Keyspaces* = enum
|
|
|
|
|
defaultKeyspace = "kvstore"
|
|
|
|
|
validatorIndexFromPubKey # Unused (?)
|
|
|
|
|
stateNoValidators = "state_no_validators"
|
|
|
|
|
|
|
|
|
|
DbKeyKind = enum
|
|
|
|
|
kHashToState
|
|
|
|
@ -111,26 +156,38 @@ type
|
|
|
|
|
kGenesisBlockRoot
|
|
|
|
|
## Immutable reference to the network genesis state
|
|
|
|
|
## (needed for satisfying requests to the beacon node API).
|
|
|
|
|
kEth1PersistedTo # Obsolete
|
|
|
|
|
kDepositsFinalizedByEth1 # Obsolete
|
|
|
|
|
kEth1PersistedTo
|
|
|
|
|
## (Obsolete) Used to point to the the latest ETH1 block hash which
|
|
|
|
|
## satisfied the follow distance and had its deposits persisted to disk.
|
|
|
|
|
kDepositsFinalizedByEth1
|
|
|
|
|
## A merkleizer checkpoint which can be used for computing the
|
|
|
|
|
## `deposit_root` of all eth1 finalized deposits (i.e. deposits
|
|
|
|
|
## confirmed by ETH1_FOLLOW_DISTANCE blocks). The `deposit_root`
|
|
|
|
|
## is acknowledged and confirmed by the attached web3 provider.
|
|
|
|
|
kDepositsFinalizedByEth2
|
|
|
|
|
## A merkleizer checkpoint used for computing merkle proofs of
|
|
|
|
|
## deposits added to Eth2 blocks (it may lag behind the finalized
|
|
|
|
|
## eth1 deposits checkpoint).
|
|
|
|
|
kHashToBlockSummary # Block summaries for fast startup
|
|
|
|
|
kHashToBlockSummary
|
|
|
|
|
## Cache of beacon block summaries - during startup when we construct the
|
|
|
|
|
## chain dag, loading full blocks takes a lot of time - the block
|
|
|
|
|
## summary contains a minimal snapshot of what's needed to instanciate
|
|
|
|
|
## the BlockRef tree.
|
|
|
|
|
kSpeculativeDeposits
|
|
|
|
|
## A merkelizer checkpoint created on the basis of deposit events
|
|
|
|
|
## that we were not able to verify against a `deposit_root` served
|
|
|
|
|
## by the web3 provider. This may happen on Geth nodes that serve
|
|
|
|
|
## only recent contract state data (i.e. only recent `deposit_roots`).
|
|
|
|
|
kHashToStateDiff # Obsolete
|
|
|
|
|
kHashToStateDiff
|
|
|
|
|
## Instead of storing full BeaconStates, one can store only the diff from
|
|
|
|
|
## a different state. As 75% of a typical BeaconState's serialized form's
|
|
|
|
|
## the validators, which are mostly immutable and append-only, just using
|
|
|
|
|
## a simple append-diff representation helps significantly. Various roots
|
|
|
|
|
## are stored in a mod-increment pattern across fixed-sized arrays, which
|
|
|
|
|
## addresses most of the rest of the BeaconState sizes.
|
|
|
|
|
kHashToStateOnlyMutableValidators
|
|
|
|
|
|
|
|
|
|
BeaconBlockSummary* = object
|
|
|
|
|
## Cache of beacon block summaries - during startup when we construct the
|
|
|
|
|
## chain dag, loading full blocks takes a lot of time - the block
|
|
|
|
|
## summary contains a minimal snapshot of what's needed to instanciate
|
|
|
|
|
## the BlockRef tree.
|
|
|
|
|
slot*: Slot
|
|
|
|
|
parent_root*: Eth2Digest
|
|
|
|
|
|
|
|
|
@ -165,6 +222,9 @@ func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
|
|
|
|
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
|
|
|
|
subkey(kHashToBlockSummary, key.data)
|
|
|
|
|
|
|
|
|
|
func subkey(kind: type BeaconStateDiff, key: Eth2Digest): auto =
|
|
|
|
|
subkey(kHashToStateDiff, key.data)
|
|
|
|
|
|
|
|
|
|
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
|
var ret: array[40, byte]
|
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
@ -181,51 +241,41 @@ template panic =
|
|
|
|
|
# Review all usages.
|
|
|
|
|
raiseAssert "The database should not be corrupted"
|
|
|
|
|
|
|
|
|
|
template expectDb(x: auto): untyped =
|
|
|
|
|
# There's no meaningful error handling implemented for a corrupt database or
|
|
|
|
|
# full disk - this requires manual intervention, so we'll panic for now
|
|
|
|
|
x.expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): Seq =
|
|
|
|
|
db.exec("""
|
|
|
|
|
CREATE TABLE IF NOT EXISTS """ & name & """(
|
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
|
value BLOB
|
|
|
|
|
);
|
|
|
|
|
""").expectDb()
|
|
|
|
|
""").expect "working database (disk broken/full?)"
|
|
|
|
|
|
|
|
|
|
let
|
|
|
|
|
insertStmt = db.prepareStmt(
|
|
|
|
|
"INSERT INTO " & name & "(value) VALUES (?);",
|
|
|
|
|
openArray[byte], void, managed = false).expect("this is a valid statement")
|
|
|
|
|
openArray[byte], void).expect("this is a valid statement")
|
|
|
|
|
|
|
|
|
|
selectStmt = db.prepareStmt(
|
|
|
|
|
"SELECT value FROM " & name & " WHERE id = ?;",
|
|
|
|
|
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
|
|
|
|
int64, openArray[byte]).expect("this is a valid statement")
|
|
|
|
|
|
|
|
|
|
countStmt = db.prepareStmt(
|
|
|
|
|
"SELECT COUNT(1) FROM " & name & ";",
|
|
|
|
|
NoParams, int64, managed = false).expect("this is a valid statement")
|
|
|
|
|
NoParams, int64).expect("this is a valid statement")
|
|
|
|
|
|
|
|
|
|
var recordCount = int64 0
|
|
|
|
|
let countQueryRes = countStmt.exec do (res: int64):
|
|
|
|
|
recordCount = res
|
|
|
|
|
|
|
|
|
|
let found = countQueryRes.expectDb()
|
|
|
|
|
let found = countQueryRes.expect("working database (disk broken/full?)")
|
|
|
|
|
if not found: panic()
|
|
|
|
|
countStmt.dispose()
|
|
|
|
|
|
|
|
|
|
Seq(insertStmt: insertStmt,
|
|
|
|
|
selectStmt: selectStmt,
|
|
|
|
|
recordCount: recordCount)
|
|
|
|
|
|
|
|
|
|
proc close*(s: DbSeq) =
|
|
|
|
|
s.insertStmt.dispose()
|
|
|
|
|
s.selectStmt.dispose()
|
|
|
|
|
|
|
|
|
|
proc add*[T](s: var DbSeq[T], val: T) =
|
|
|
|
|
var bytes = SSZ.encode(val)
|
|
|
|
|
s.insertStmt.exec(bytes).expectDb()
|
|
|
|
|
s.insertStmt.exec(bytes).expect "working database (disk broken/full?)"
|
|
|
|
|
inc s.recordCount
|
|
|
|
|
|
|
|
|
|
template len*[T](s: DbSeq[T]): int64 =
|
|
|
|
@ -241,112 +291,124 @@ proc get*[T](s: DbSeq[T], idx: int64): T =
|
|
|
|
|
except SerializationError:
|
|
|
|
|
panic()
|
|
|
|
|
|
|
|
|
|
let found = queryRes.expectDb()
|
|
|
|
|
let found = queryRes.expect("working database (disk broken/full?)")
|
|
|
|
|
if not found: panic()
|
|
|
|
|
|
|
|
|
|
proc createMap*(db: SqStoreRef, keyspace: int;
|
|
|
|
|
K, V: distinct type): DbMap[K, V] =
|
|
|
|
|
DbMap[K, V](db: db, keyspace: keyspace)
|
|
|
|
|
|
|
|
|
|
proc insert*[K, V](m: var DbMap[K, V], key: K, value: V) =
|
|
|
|
|
m.db.put(m.keyspace, SSZ.encode key, SSZ.encode value).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc contains*[K, V](m: DbMap[K, V], key: K): bool =
|
|
|
|
|
contains(m.db, SSZ.encode key).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
template insert*[K, V](t: var Table[K, V], key: K, value: V) =
|
|
|
|
|
add(t, key, value)
|
|
|
|
|
|
|
|
|
|
proc loadImmutableValidators(db: BeaconChainDB): seq[ImmutableValidatorData] =
|
|
|
|
|
# TODO not called, but build fails otherwise
|
|
|
|
|
for i in 0 ..< db.immutableValidators.len:
|
|
|
|
|
result.add db.immutableValidators.get(i)
|
|
|
|
|
|
|
|
|
|
type
|
|
|
|
|
SqKeyspaceStoreRef* = ref object of RootObj
|
|
|
|
|
# Wrapper around SqStoreRef to target a particular keyspace - using
|
|
|
|
|
# keyspaces helps keep performance decent when using large blobs in tables
|
|
|
|
|
# that otherwise contain lots of rows.
|
|
|
|
|
db: SqStoreRef
|
|
|
|
|
keyspace: int
|
|
|
|
|
|
|
|
|
|
proc get*(db: SqKeyspaceStoreRef, key: openArray[byte], onData: DataProc): KvResult[bool] =
|
|
|
|
|
get(db.db, db.keyspace, key, onData)
|
|
|
|
|
|
|
|
|
|
proc del*(db: SqKeyspaceStoreRef, key: openArray[byte]): KvResult[void] =
|
|
|
|
|
del(db.db, db.keyspace, key)
|
|
|
|
|
|
|
|
|
|
proc contains*(db: SqKeyspaceStoreRef, key: openArray[byte]): KvResult[bool] =
|
|
|
|
|
contains(db.db, db.keyspace, key)
|
|
|
|
|
|
|
|
|
|
proc put*(db: SqKeyspaceStoreRef, key, val: openArray[byte]): KvResult[void] =
|
|
|
|
|
put(db.db, db.keyspace, key, val)
|
|
|
|
|
|
|
|
|
|
proc close*(db: SqKeyspaceStoreRef): KvResult[void] =
|
|
|
|
|
ok() # Gets closed with the "default" keyspace
|
|
|
|
|
|
|
|
|
|
proc init(T: type SqKeyspaceStoreRef, db: SqStoreRef, keyspace: Keyspaces): T =
|
|
|
|
|
T(
|
|
|
|
|
db: db,
|
|
|
|
|
keyspace: int(keyspace)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
proc new*(T: type BeaconChainDB,
|
|
|
|
|
preset: RuntimePreset,
|
|
|
|
|
dir: string,
|
|
|
|
|
inMemory = false,
|
|
|
|
|
fileStateStorage = false,
|
|
|
|
|
): BeaconChainDB =
|
|
|
|
|
var db = if inMemory:
|
|
|
|
|
SqStoreRef.init("", "test", inMemory = true).expect(
|
|
|
|
|
var sqliteStore = if inMemory:
|
|
|
|
|
SqStoreRef.init("", "test", Keyspaces, inMemory = true).expect(
|
|
|
|
|
"working database (out of memory?)")
|
|
|
|
|
else:
|
|
|
|
|
let s = secureCreatePath(dir)
|
|
|
|
|
doAssert s.isOk # TODO(zah) Handle this in a better way
|
|
|
|
|
|
|
|
|
|
SqStoreRef.init(
|
|
|
|
|
dir, "nbc", manualCheckpoint = true).expectDb()
|
|
|
|
|
dir, "nbc", Keyspaces,
|
|
|
|
|
manualCheckpoint = true).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
# Remove the deposits table we used before we switched
|
|
|
|
|
# to storing only deposit contract checkpoints
|
|
|
|
|
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
|
|
|
|
|
if sqliteStore.exec("DROP TABLE IF EXISTS deposits;").isErr:
|
|
|
|
|
debug "Failed to drop the deposits table"
|
|
|
|
|
|
|
|
|
|
var
|
|
|
|
|
# V0 compatibility tables
|
|
|
|
|
backend = kvStore db.openKvStore().expectDb()
|
|
|
|
|
stateStore = kvStore db.openKvStore("state_no_validators").expectDb()
|
|
|
|
|
|
|
|
|
|
genesisDepositsSeq =
|
|
|
|
|
DbSeq[DepositData].init(db, "genesis_deposits")
|
|
|
|
|
DbSeq[DepositData].init(sqliteStore, "genesis_deposits")
|
|
|
|
|
immutableValidatorsSeq =
|
|
|
|
|
DbSeq[ImmutableValidatorData].init(db, "immutable_validators")
|
|
|
|
|
DbSeq[ImmutableValidatorData].init(sqliteStore, "immutable_validators")
|
|
|
|
|
backend = kvStore sqliteStore
|
|
|
|
|
stateStore =
|
|
|
|
|
if inMemory or (not fileStateStorage):
|
|
|
|
|
kvStore SqKeyspaceStoreRef.init(sqliteStore, stateNoValidators)
|
|
|
|
|
else:
|
|
|
|
|
kvStore DirStoreRef.init(dir & "/state")
|
|
|
|
|
|
|
|
|
|
# V1 - expected-to-be small rows get without rowid optimizations
|
|
|
|
|
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
|
|
|
|
|
blocks = kvStore db.openKvStore("blocks").expectDb()
|
|
|
|
|
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
|
|
|
|
|
statesNoVal = kvStore db.openKvStore("state_no_validators2").expectDb()
|
|
|
|
|
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
|
|
|
|
|
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
|
|
|
|
|
|
|
|
|
|
T(
|
|
|
|
|
db: db,
|
|
|
|
|
v0: BeaconChainDBV0(
|
|
|
|
|
backend: backend,
|
|
|
|
|
stateStore: stateStore,
|
|
|
|
|
),
|
|
|
|
|
T(backend: backend,
|
|
|
|
|
preset: preset,
|
|
|
|
|
genesisDeposits: genesisDepositsSeq,
|
|
|
|
|
immutableValidators: immutableValidatorsSeq,
|
|
|
|
|
immutableValidatorsMem: loadImmutableValidators(immutableValidatorsSeq),
|
|
|
|
|
checkpoint: proc() = db.checkpoint(),
|
|
|
|
|
keyValues: keyValues,
|
|
|
|
|
blocks: blocks,
|
|
|
|
|
stateRoots: stateRoots,
|
|
|
|
|
statesNoVal: statesNoVal,
|
|
|
|
|
stateDiffs: stateDiffs,
|
|
|
|
|
summaries: summaries,
|
|
|
|
|
)
|
|
|
|
|
checkpoint: proc() = sqliteStore.checkpoint(),
|
|
|
|
|
stateStore: stateStore,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
proc decodeSSZ[T](data: openArray[byte], output: var T): bool =
|
|
|
|
|
proc snappyEncode(inp: openArray[byte]): seq[byte] =
|
|
|
|
|
try:
|
|
|
|
|
readSszBytes(data, output, updateRoot = false)
|
|
|
|
|
true
|
|
|
|
|
except SerializationError as e:
|
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
|
false
|
|
|
|
|
snappy.encode(inp)
|
|
|
|
|
except CatchableError as err:
|
|
|
|
|
raiseAssert err.msg
|
|
|
|
|
|
|
|
|
|
proc decodeSnappySSZ[T](data: openArray[byte], output: var T): bool =
|
|
|
|
|
try:
|
|
|
|
|
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
|
|
|
|
|
readSszBytes(decompressed, output, updateRoot = false)
|
|
|
|
|
true
|
|
|
|
|
except SerializationError as e:
|
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
|
false
|
|
|
|
|
|
|
|
|
|
proc encodeSSZ(v: auto): seq[byte] =
|
|
|
|
|
proc sszEncode(v: auto): seq[byte] =
|
|
|
|
|
try:
|
|
|
|
|
SSZ.encode(v)
|
|
|
|
|
except IOError as err:
|
|
|
|
|
raiseAssert err.msg
|
|
|
|
|
|
|
|
|
|
proc encodeSnappySSZ(v: auto): seq[byte] =
|
|
|
|
|
try:
|
|
|
|
|
snappy.encode(SSZ.encode(v))
|
|
|
|
|
except CatchableError as err:
|
|
|
|
|
# In-memory encode shouldn't fail!
|
|
|
|
|
raiseAssert err.msg
|
|
|
|
|
|
|
|
|
|
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
|
|
|
|
|
db.put(key, v.data).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc putEncoded(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
|
db.put(key, snappyEncode(sszEncode(v))).expect(
|
|
|
|
|
"working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
|
|
|
|
var res: Opt[T]
|
|
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
|
if data.len == sizeof(Eth2Digest):
|
|
|
|
|
res.ok Eth2Digest(data: toArray(sizeof(Eth2Digest), data))
|
|
|
|
|
if data.len == 32:
|
|
|
|
|
res.ok Eth2Digest(data: toArray(32, data))
|
|
|
|
|
else:
|
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
@ -354,89 +416,63 @@ proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
|
|
|
|
typ = name(T), dataLen = data.len
|
|
|
|
|
discard
|
|
|
|
|
|
|
|
|
|
discard db.get(key, decode).expectDb()
|
|
|
|
|
discard db.get(key, decode).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
res
|
|
|
|
|
|
|
|
|
|
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
|
|
|
|
|
db.put(key, v.data).expectDb()
|
|
|
|
|
|
|
|
|
|
type GetResult = enum
|
|
|
|
|
found = "Found"
|
|
|
|
|
notFound = "Not found"
|
|
|
|
|
corrupted = "Corrupted"
|
|
|
|
|
|
|
|
|
|
proc getSSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
|
|
|
|
proc getEncoded[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
|
|
|
|
var status = GetResult.notFound
|
|
|
|
|
|
|
|
|
|
# TODO address is needed because there's no way to express lifetimes in nim
|
|
|
|
|
# we'll use unsafeAddr to find the code later
|
|
|
|
|
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
|
|
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
|
status =
|
|
|
|
|
if decodeSSZ(data, outputPtr[]): GetResult.found
|
|
|
|
|
else: GetResult.corrupted
|
|
|
|
|
try:
|
|
|
|
|
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
|
|
|
|
|
if decompressed.len > 0:
|
|
|
|
|
outputPtr[] = SSZ.decode(decompressed, T, updateRoot = false)
|
|
|
|
|
status = GetResult.found
|
|
|
|
|
else:
|
|
|
|
|
warn "Corrupt snappy record found in database", typ = name(T)
|
|
|
|
|
status = GetResult.corrupted
|
|
|
|
|
except SerializationError as e:
|
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
|
status = GetResult.corrupted
|
|
|
|
|
|
|
|
|
|
discard db.get(key, decode).expectDb()
|
|
|
|
|
discard db.get(key, decode).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
status
|
|
|
|
|
|
|
|
|
|
proc putSSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
|
db.put(key, encodeSSZ(v)).expectDb()
|
|
|
|
|
|
|
|
|
|
proc getSnappySSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
|
|
|
|
var status = GetResult.notFound
|
|
|
|
|
|
|
|
|
|
# TODO address is needed because there's no way to express lifetimes in nim
|
|
|
|
|
# we'll use unsafeAddr to find the code later
|
|
|
|
|
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
|
|
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
|
status =
|
|
|
|
|
if decodeSnappySSZ(data, outputPtr[]): GetResult.found
|
|
|
|
|
else: GetResult.corrupted
|
|
|
|
|
|
|
|
|
|
discard db.get(key, decode).expectDb()
|
|
|
|
|
|
|
|
|
|
status
|
|
|
|
|
|
|
|
|
|
proc putSnappySSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
|
db.put(key, encodeSnappySSZ(v)).expectDb()
|
|
|
|
|
|
|
|
|
|
proc close*(db: BeaconChainDBV0) =
|
|
|
|
|
discard db.stateStore.close()
|
|
|
|
|
proc close*(db: BeaconChainDB) =
|
|
|
|
|
discard db.backend.close()
|
|
|
|
|
|
|
|
|
|
proc close*(db: BeaconchainDB) =
|
|
|
|
|
if db.db == nil: return
|
|
|
|
|
|
|
|
|
|
# Close things in reverse order
|
|
|
|
|
discard db.summaries.close()
|
|
|
|
|
discard db.stateDiffs.close()
|
|
|
|
|
discard db.statesNoVal.close()
|
|
|
|
|
discard db.stateRoots.close()
|
|
|
|
|
discard db.blocks.close()
|
|
|
|
|
discard db.keyValues.close()
|
|
|
|
|
db.immutableValidators.close()
|
|
|
|
|
db.genesisDeposits.close()
|
|
|
|
|
db.v0.close()
|
|
|
|
|
db.db.close()
|
|
|
|
|
|
|
|
|
|
db.db = nil
|
|
|
|
|
|
|
|
|
|
func toBeaconBlockSummary(v: SomeBeaconBlock): BeaconBlockSummary =
|
|
|
|
|
BeaconBlockSummary(
|
|
|
|
|
slot: v.slot,
|
|
|
|
|
parent_root: v.parent_root,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
proc putBeaconBlockSummary(
|
|
|
|
|
db: BeaconChainDB, root: Eth2Digest, value: BeaconBlockSummary) =
|
|
|
|
|
# Summaries are too simple / small to compress, store them as plain SSZ
|
|
|
|
|
db.summaries.putSSZ(root.data, value)
|
|
|
|
|
|
|
|
|
|
# TODO: we should only store TrustedSignedBeaconBlock in the DB.
|
|
|
|
|
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
|
|
|
|
|
db.backend.putEncoded(subkey(type value, value.root), value)
|
|
|
|
|
db.backend.putEncoded(
|
|
|
|
|
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
|
|
|
|
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
|
|
|
|
|
db.blocks.putSnappySSZ(value.root.data, value)
|
|
|
|
|
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
|
|
|
|
db.backend.putEncoded(subkey(SignedBeaconBlock, value.root), value)
|
|
|
|
|
db.backend.putEncoded(
|
|
|
|
|
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
|
|
|
|
proc putBlock*(db: BeaconChainDB, value: SigVerifiedSignedBeaconBlock) =
|
|
|
|
|
db.backend.putEncoded(subkey(SignedBeaconBlock, value.root), value)
|
|
|
|
|
db.backend.putEncoded(
|
|
|
|
|
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
|
|
|
|
|
|
|
|
|
proc updateImmutableValidators(
|
|
|
|
|
db: BeaconChainDB, immutableValidators: var seq[ImmutableValidatorData],
|
|
|
|
@ -458,76 +494,80 @@ proc updateImmutableValidators(
|
|
|
|
|
immutableValidators.add immutableValidator
|
|
|
|
|
|
|
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: var BeaconState) =
|
|
|
|
|
db.updateImmutableValidators(db.immutableValidatorsMem, value.validators)
|
|
|
|
|
db.statesNoVal.putSnappySSZ(
|
|
|
|
|
key.data,
|
|
|
|
|
updateImmutableValidators(db, db.immutableValidatorsMem, value.validators)
|
|
|
|
|
db.stateStore.putEncoded(
|
|
|
|
|
subkey(BeaconStateNoImmutableValidators, key),
|
|
|
|
|
isomorphicCast[BeaconStateNoImmutableValidators](value))
|
|
|
|
|
|
|
|
|
|
proc putState*(db: BeaconChainDB, value: var BeaconState) =
|
|
|
|
|
db.putState(hash_tree_root(value), value)
|
|
|
|
|
|
|
|
|
|
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
|
var ret: array[40, byte]
|
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
|
ret[0..<8] = toBytesBE(slot.uint64)
|
|
|
|
|
ret[8..<40] = root.data
|
|
|
|
|
proc putStateFull*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
|
|
|
|
db.backend.putEncoded(subkey(BeaconState, key), value)
|
|
|
|
|
|
|
|
|
|
ret
|
|
|
|
|
proc putStateFull*(db: BeaconChainDB, value: BeaconState) =
|
|
|
|
|
db.putStateFull(hash_tree_root(value), value)
|
|
|
|
|
|
|
|
|
|
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
|
|
|
|
value: Eth2Digest) =
|
|
|
|
|
db.stateRoots.putRaw(stateRootKey(root, slot), value)
|
|
|
|
|
db.backend.putRaw(subkey(root, slot), value)
|
|
|
|
|
|
|
|
|
|
proc putStateDiff*(db: BeaconChainDB, root: Eth2Digest, value: BeaconStateDiff) =
|
|
|
|
|
db.stateDiffs.putSnappySSZ(root.data, value)
|
|
|
|
|
db.backend.putEncoded(subkey(BeaconStateDiff, root), value)
|
|
|
|
|
|
|
|
|
|
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
|
db.blocks.del(key.data).expectDb()
|
|
|
|
|
db.summaries.del(key.data).expectDb()
|
|
|
|
|
db.backend.del(subkey(SignedBeaconBlock, key)).expect("working database (disk broken/full?)")
|
|
|
|
|
db.backend.del(subkey(BeaconBlockSummary, key)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
|
db.statesNoVal.del(key.data).expectDb()
|
|
|
|
|
db.backend.del(subkey(BeaconState, key)).expect("working database (disk broken/full?)")
|
|
|
|
|
db.stateStore.del(subkey(BeaconStateNoImmutableValidators, key)).expect(
|
|
|
|
|
"working filesystem (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
|
|
|
|
|
db.stateRoots.del(stateRootKey(root, slot)).expectDb()
|
|
|
|
|
db.backend.del(subkey(root, slot)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc delStateDiff*(db: BeaconChainDB, root: Eth2Digest) =
|
|
|
|
|
db.stateDiffs.del(root.data).expectDb()
|
|
|
|
|
db.backend.del(subkey(BeaconStateDiff, root)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
|
db.keyValues.putRaw(subkey(kHeadBlock), key)
|
|
|
|
|
db.backend.putRaw(subkey(kHeadBlock), key)
|
|
|
|
|
|
|
|
|
|
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
|
db.keyValues.putRaw(subkey(kTailBlock), key)
|
|
|
|
|
db.backend.putRaw(subkey(kTailBlock), key)
|
|
|
|
|
|
|
|
|
|
proc putGenesisBlockRoot*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
|
db.keyValues.putRaw(subkey(kGenesisBlockRoot), key)
|
|
|
|
|
db.backend.putRaw(subkey(kGenesisBlockRoot), key)
|
|
|
|
|
|
|
|
|
|
proc putEth1FinalizedTo*(db: BeaconChainDB,
|
|
|
|
|
eth1Checkpoint: DepositContractSnapshot) =
|
|
|
|
|
db.backend.putEncoded(subkey(kDepositsFinalizedByEth1), eth1Checkpoint)
|
|
|
|
|
|
|
|
|
|
proc putEth2FinalizedTo*(db: BeaconChainDB,
|
|
|
|
|
eth1Checkpoint: DepositContractSnapshot) =
|
|
|
|
|
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
|
|
|
|
|
db.backend.putEncoded(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
|
|
|
|
|
|
|
|
|
|
proc getBlock(db: BeaconChainDBV0, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
|
|
|
|
|
proc putSpeculativeDeposits*(db: BeaconChainDB,
|
|
|
|
|
eth1Checkpoint: DepositContractSnapshot) =
|
|
|
|
|
db.backend.putEncoded(subkey(kSpeculativeDeposits), eth1Checkpoint)
|
|
|
|
|
|
|
|
|
|
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
|
|
|
|
|
# We only store blocks that we trust in the database
|
|
|
|
|
result.ok(TrustedSignedBeaconBlock())
|
|
|
|
|
if db.backend.getSnappySSZ(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
|
|
|
|
|
if db.backend.getEncoded(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
|
|
|
|
|
result.err()
|
|
|
|
|
else:
|
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
|
result.get().root = key
|
|
|
|
|
|
|
|
|
|
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
|
|
|
|
|
proc getBlockSummary*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconBlockSummary] =
|
|
|
|
|
# We only store blocks that we trust in the database
|
|
|
|
|
result.ok(TrustedSignedBeaconBlock())
|
|
|
|
|
if db.blocks.getSnappySSZ(key.data, result.get) != GetResult.found:
|
|
|
|
|
result = db.v0.getBlock(key)
|
|
|
|
|
else:
|
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
|
result.get().root = key
|
|
|
|
|
result.ok(BeaconBlockSummary())
|
|
|
|
|
if db.backend.getEncoded(subkey(BeaconBlockSummary, key), result.get) != GetResult.found:
|
|
|
|
|
result.err()
|
|
|
|
|
|
|
|
|
|
proc getStateOnlyMutableValidators(
|
|
|
|
|
immutableValidatorsMem: openArray[ImmutableValidatorData],
|
|
|
|
|
store: KvStoreRef, key: openArray[byte], output: var BeaconState,
|
|
|
|
|
db: BeaconChainDB, store: KvStoreRef, key: Eth2Digest, output: var BeaconState,
|
|
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
|
## re-allocating it if possible
|
|
|
|
@ -540,20 +580,23 @@ proc getStateOnlyMutableValidators(
|
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
|
|
|
|
|
|
|
|
|
case store.getSnappySSZ(
|
|
|
|
|
key, isomorphicCast[BeaconStateNoImmutableValidators](output))
|
|
|
|
|
case store.getEncoded(
|
|
|
|
|
subkey(
|
|
|
|
|
BeaconStateNoImmutableValidators, key),
|
|
|
|
|
isomorphicCast[BeaconStateNoImmutableValidators](output))
|
|
|
|
|
of GetResult.found:
|
|
|
|
|
let numValidators = output.validators.len
|
|
|
|
|
doAssert immutableValidatorsMem.len >= numValidators
|
|
|
|
|
doAssert db.immutableValidatorsMem.len >= numValidators
|
|
|
|
|
|
|
|
|
|
for i in 0 ..< numValidators:
|
|
|
|
|
let
|
|
|
|
|
# Bypass hash cache invalidation
|
|
|
|
|
dstValidator = addr output.validators.data[i]
|
|
|
|
|
srcValidator = addr db.immutableValidatorsMem[i]
|
|
|
|
|
|
|
|
|
|
assign(dstValidator.pubkey, immutableValidatorsMem[i].pubkey)
|
|
|
|
|
assign(dstValidator.pubkey, srcValidator.pubkey)
|
|
|
|
|
assign(dstValidator.withdrawal_credentials,
|
|
|
|
|
immutableValidatorsMem[i].withdrawal_credentials)
|
|
|
|
|
srcValidator.withdrawal_credentials)
|
|
|
|
|
|
|
|
|
|
output.validators.resetCache()
|
|
|
|
|
|
|
|
|
@ -564,35 +607,6 @@ proc getStateOnlyMutableValidators(
|
|
|
|
|
rollback(output)
|
|
|
|
|
false
|
|
|
|
|
|
|
|
|
|
proc getState(
|
|
|
|
|
db: BeaconChainDBV0,
|
|
|
|
|
immutableValidatorsMem: openArray[ImmutableValidatorData],
|
|
|
|
|
key: Eth2Digest, output: var BeaconState,
|
|
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
|
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
|
|
|
|
|
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
|
|
|
|
|
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
|
|
|
|
|
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
|
|
|
|
|
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
|
|
|
|
|
# from `stateStore`. We will try to read the state from all these locations.
|
|
|
|
|
if getStateOnlyMutableValidators(
|
|
|
|
|
immutableValidatorsMem, db.stateStore,
|
|
|
|
|
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
|
|
|
|
|
return true
|
|
|
|
|
if getStateOnlyMutableValidators(
|
|
|
|
|
immutableValidatorsMem, db.backend,
|
|
|
|
|
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case db.backend.getSnappySSZ(subkey(BeaconState, key), output)
|
|
|
|
|
of GetResult.found:
|
|
|
|
|
true
|
|
|
|
|
of GetResult.notFound:
|
|
|
|
|
false
|
|
|
|
|
of GetResult.corrupted:
|
|
|
|
|
rollback(output)
|
|
|
|
|
false
|
|
|
|
|
|
|
|
|
|
proc getState*(
|
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
|
|
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
@ -606,74 +620,99 @@ proc getState*(
|
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
|
|
|
|
if not getStateOnlyMutableValidators(
|
|
|
|
|
db.immutableValidatorsMem, db.statesNoVal, key.data, output, rollback):
|
|
|
|
|
db.v0.getState(db.immutableValidatorsMem, key, output, rollback)
|
|
|
|
|
else:
|
|
|
|
|
true
|
|
|
|
|
if getStateOnlyMutableValidators(db, db.stateStore, key, output, rollback):
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
proc getStateRoot(db: BeaconChainDBV0,
|
|
|
|
|
root: Eth2Digest,
|
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
|
|
|
|
db.backend.getRaw(subkey(root, slot), Eth2Digest)
|
|
|
|
|
case db.backend.getEncoded(subkey(BeaconState, key), output)
|
|
|
|
|
of GetResult.found:
|
|
|
|
|
true
|
|
|
|
|
of GetResult.notFound:
|
|
|
|
|
false
|
|
|
|
|
of GetResult.corrupted:
|
|
|
|
|
rollback(output)
|
|
|
|
|
false
|
|
|
|
|
|
|
|
|
|
proc getStateRoot*(db: BeaconChainDB,
|
|
|
|
|
root: Eth2Digest,
|
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
|
|
|
|
db.stateRoots.getRaw(stateRootKey(root, slot), Eth2Digest) or
|
|
|
|
|
db.v0.getStateRoot(root, slot)
|
|
|
|
|
db.backend.getRaw(subkey(root, slot), Eth2Digest)
|
|
|
|
|
|
|
|
|
|
proc getStateDiff*(db: BeaconChainDB,
|
|
|
|
|
root: Eth2Digest): Opt[BeaconStateDiff] =
|
|
|
|
|
result.ok(BeaconStateDiff())
|
|
|
|
|
if db.stateDiffs.getSnappySSZ(root.data, result.get) != GetResult.found:
|
|
|
|
|
if db.backend.getEncoded(subkey(BeaconStateDiff, root), result.get) != GetResult.found:
|
|
|
|
|
result.err
|
|
|
|
|
|
|
|
|
|
proc getHeadBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
|
|
|
|
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
|
db.backend.getRaw(subkey(kHeadBlock), Eth2Digest)
|
|
|
|
|
|
|
|
|
|
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
|
db.keyValues.getRaw(subkey(kHeadBlock), Eth2Digest) or
|
|
|
|
|
db.v0.getHeadBlock()
|
|
|
|
|
|
|
|
|
|
proc getTailBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
|
|
|
|
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
|
db.backend.getRaw(subkey(kTailBlock), Eth2Digest)
|
|
|
|
|
|
|
|
|
|
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
|
db.keyValues.getRaw(subkey(kTailBlock), Eth2Digest) or
|
|
|
|
|
db.v0.getTailBlock()
|
|
|
|
|
|
|
|
|
|
proc getGenesisBlockRoot(db: BeaconChainDBV0): Eth2Digest =
|
|
|
|
|
db.backend.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expectDb()
|
|
|
|
|
|
|
|
|
|
proc getGenesisBlockRoot*(db: BeaconChainDB): Eth2Digest =
|
|
|
|
|
db.keyValues.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expect(
|
|
|
|
|
db.backend.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expect(
|
|
|
|
|
"The database must be seeded with the genesis state")
|
|
|
|
|
|
|
|
|
|
proc getEth2FinalizedTo(db: BeaconChainDBV0): Opt[DepositContractSnapshot] =
|
|
|
|
|
proc getEth1FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
|
|
|
|
|
result.ok(DepositContractSnapshot())
|
|
|
|
|
let r = db.backend.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
|
|
|
|
|
let r = db.backend.getEncoded(subkey(kDepositsFinalizedByEth1), result.get)
|
|
|
|
|
if r != found: result.err()
|
|
|
|
|
|
|
|
|
|
proc getEth2FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
|
|
|
|
|
result.ok(DepositContractSnapshot())
|
|
|
|
|
let r = db.keyValues.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
|
|
|
|
|
if r != found: return db.v0.getEth2FinalizedTo()
|
|
|
|
|
let r = db.backend.getEncoded(subkey(kDepositsFinalizedByEth2), result.get)
|
|
|
|
|
if r != found: result.err()
|
|
|
|
|
|
|
|
|
|
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
|
|
|
|
db.backend.contains(subkey(SignedBeaconBlock, key)).expectDb()
|
|
|
|
|
proc getSpeculativeDeposits*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
|
|
|
|
|
result.ok(DepositContractSnapshot())
|
|
|
|
|
let r = db.backend.getEncoded(subkey(kSpeculativeDeposits), result.get)
|
|
|
|
|
if r != found: result.err()
|
|
|
|
|
|
|
|
|
|
proc delSpeculativeDeposits*(db: BeaconChainDB) =
|
|
|
|
|
db.backend.del(subkey(kSpeculativeDeposits)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
|
db.blocks.contains(key.data).expectDb() or db.v0.containsBlock(key)
|
|
|
|
|
|
|
|
|
|
proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
|
|
|
|
let sk = subkey(BeaconStateNoImmutableValidators, key)
|
|
|
|
|
db.stateStore.contains(sk).expectDb() or
|
|
|
|
|
db.backend.contains(sk).expectDb() or
|
|
|
|
|
db.backend.contains(subkey(BeaconState, key)).expectDb
|
|
|
|
|
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
|
db.statesNoVal.contains(key.data).expectDb or db.v0.containsState(key)
|
|
|
|
|
db.stateStore.contains(subkey(BeaconStateNoImmutableValidators, key)).expect(
|
|
|
|
|
"working database (disk broken/full?)") or
|
|
|
|
|
db.backend.contains(subkey(BeaconState, key)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc containsStateDiff*(db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
|
db.backend.contains(subkey(BeaconStateDiff, key)).expect("working database (disk broken/full?)")
|
|
|
|
|
|
|
|
|
|
proc repairGenesisState*(db: BeaconChainDB, key: Eth2Digest): KvResult[void] =
|
|
|
|
|
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
|
|
|
|
|
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
|
|
|
|
|
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
|
|
|
|
|
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
|
|
|
|
|
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
|
|
|
|
|
# from `stateStore`. This means that 1.2 cannot read a database created with
|
|
|
|
|
# 1.1 and earlier versions can't read databases created with either of 1.1
|
|
|
|
|
# and 1.2.
|
|
|
|
|
# Here, we will try to repair the database so that no matter what, there will
|
|
|
|
|
# be a `BeaconState` in `backend`:
|
|
|
|
|
|
|
|
|
|
if ? db.backend.contains(subkey(BeaconState, key)):
|
|
|
|
|
# No compatibility issues, life goes on
|
|
|
|
|
discard
|
|
|
|
|
elif ? db.backend.contains(subkey(BeaconStateNoImmutableValidators, key)):
|
|
|
|
|
# 1.1 writes this but not a full state - rewrite a full state
|
|
|
|
|
var output = new BeaconState
|
|
|
|
|
if not getStateOnlyMutableValidators(db, db.backend, key, output[], noRollback):
|
|
|
|
|
return err("Cannot load partial state")
|
|
|
|
|
|
|
|
|
|
putStateFull(db, output[])
|
|
|
|
|
elif ? db.stateStore.contains(subkey(BeaconStateNoImmutableValidators, key)):
|
|
|
|
|
# 1.2 writes this but not a full state - rewrite a full state
|
|
|
|
|
var output = new BeaconState
|
|
|
|
|
if not getStateOnlyMutableValidators(db, db.stateStore, key, output[], noRollback):
|
|
|
|
|
return err("Cannot load partial state")
|
|
|
|
|
|
|
|
|
|
putStateFull(db, output[])
|
|
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
|
|
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
|
TrustedSignedBeaconBlock =
|
|
|
|
@ -685,29 +724,11 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
|
var
|
|
|
|
|
res: TrustedSignedBeaconBlock
|
|
|
|
|
root = root
|
|
|
|
|
while db.blocks.getSnappySSZ(root.data, res) == GetResult.found or
|
|
|
|
|
db.v0.backend.getSnappySSZ(
|
|
|
|
|
subkey(SignedBeaconBlock, root), res) == GetResult.found:
|
|
|
|
|
while db.backend.getEncoded(subkey(SignedBeaconBlock, root), res) == GetResult.found:
|
|
|
|
|
res.root = root
|
|
|
|
|
yield res
|
|
|
|
|
root = res.message.parent_root
|
|
|
|
|
|
|
|
|
|
proc loadSummaries(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
|
|
|
|
|
# Load summaries into table - there's no telling what order they're in so we
|
|
|
|
|
# load them all - bugs in nim prevent this code from living in the iterator.
|
|
|
|
|
var summaries = initTable[Eth2Digest, BeaconBlockSummary](1024*1024)
|
|
|
|
|
|
|
|
|
|
discard db.summaries.find([], proc(k, v: openArray[byte]) =
|
|
|
|
|
var output: BeaconBlockSummary
|
|
|
|
|
|
|
|
|
|
if k.len() == sizeof(Eth2Digest) and decodeSSz(v, output):
|
|
|
|
|
summaries[Eth2Digest(data: toArray(sizeof(Eth2Digest), k))] = output
|
|
|
|
|
else:
|
|
|
|
|
warn "Invalid summary in database", klen = k.len(), vlen = v.len()
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
summaries
|
|
|
|
|
|
|
|
|
|
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
|
tuple[root: Eth2Digest, summary: BeaconBlockSummary] =
|
|
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
@ -715,45 +736,21 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
|
##
|
|
|
|
|
## The search will go on until the ancestor cannot be found.
|
|
|
|
|
|
|
|
|
|
# Summaries are loaded from the dedicated summaries table. For backwards
|
|
|
|
|
# compatibility, we also load from `kvstore` and finally, if no summaries
|
|
|
|
|
# can be found, by loading the blocks instead.
|
|
|
|
|
|
|
|
|
|
# First, load the full summary table into memory in one query - this makes
|
|
|
|
|
# initial startup very fast.
|
|
|
|
|
var
|
|
|
|
|
summaries = db.loadSummaries()
|
|
|
|
|
res: tuple[root: Eth2Digest, summary: BeaconBlockSummary]
|
|
|
|
|
blck: TrustedSignedBeaconBlock
|
|
|
|
|
foundOldSummary = false
|
|
|
|
|
tmp: TrustedSignedBeaconBlock
|
|
|
|
|
root = root
|
|
|
|
|
|
|
|
|
|
res.root = root
|
|
|
|
|
|
|
|
|
|
# Yield summaries in reverse chain order by walking the parent references.
|
|
|
|
|
# If a summary is missing, try loading it from the older version or create one
|
|
|
|
|
# from block data.
|
|
|
|
|
while true:
|
|
|
|
|
summaries.withValue(res.root, summary) do:
|
|
|
|
|
res.summary = summary[]
|
|
|
|
|
if db.backend.getEncoded(subkey(BeaconBlockSummary, root), res.summary) == GetResult.found:
|
|
|
|
|
res.root = root
|
|
|
|
|
yield res
|
|
|
|
|
do: # Summary was not found in summary table, look elsewhere
|
|
|
|
|
if db.v0.backend.getSnappySSZ(subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found:
|
|
|
|
|
yield res
|
|
|
|
|
elif db.v0.backend.getSnappySSZ(subkey(SignedBeaconBlock, res.root), blck) == GetResult.found:
|
|
|
|
|
res.summary = blck.message.toBeaconBlockSummary()
|
|
|
|
|
yield res
|
|
|
|
|
else:
|
|
|
|
|
break
|
|
|
|
|
# Next time, load them from the right place
|
|
|
|
|
db.putBeaconBlockSummary(res.root, res.summary)
|
|
|
|
|
elif db.backend.getEncoded(subkey(SignedBeaconBlock, root), tmp) == GetResult.found:
|
|
|
|
|
res.summary = tmp.message.toBeaconBlockSummary()
|
|
|
|
|
db.backend.putEncoded(subkey(BeaconBlockSummary, root), res.summary)
|
|
|
|
|
res.root = root
|
|
|
|
|
yield res
|
|
|
|
|
else:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
res.root = res.summary.parent_root
|
|
|
|
|
|
|
|
|
|
if false:
|
|
|
|
|
# When the current version has been online for a bit, we can safely remove
|
|
|
|
|
# summaries from kvstore by enabling this little snippet - if users were
|
|
|
|
|
# to downgrade after the summaries have been purged, the old versions that
|
|
|
|
|
# use summaries can also recreate them on the fly from blocks.
|
|
|
|
|
db.db.exec(
|
|
|
|
|
"DELETE FROM kvstore WHERE key >= ? and key < ?",
|
|
|
|
|
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
|
|
|
|
|
root = res.summary.parent_root
|
|
|
|
|