2020-04-23 06:27:35 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2020-10-15 11:49:02 +00:00
|
|
|
typetraits, tables,
|
2020-10-12 01:07:20 +00:00
|
|
|
stew/[results, objects, endians2, io2],
|
2020-06-13 18:57:07 +00:00
|
|
|
serialization, chronicles, snappy,
|
2020-10-12 01:07:20 +00:00
|
|
|
eth/db/[kvstore, kvstore_sqlite3],
|
2020-11-21 17:53:40 +00:00
|
|
|
./network_metadata,
|
2020-10-15 11:49:02 +00:00
|
|
|
./spec/[datatypes, digest, crypto, state_transition, signatures],
|
2020-10-27 11:04:17 +00:00
|
|
|
./ssz/[ssz_serialization, merkleization],
|
2020-10-26 08:55:10 +00:00
|
|
|
merkle_minimal, filepath
|
2018-11-23 23:58:49 +00:00
|
|
|
|
|
|
|
type
|
2020-10-12 01:07:20 +00:00
|
|
|
DbSeq*[T] = object
|
2020-10-28 18:35:31 +00:00
|
|
|
insertStmt: SqliteStmt[openArray[byte], void]
|
2020-11-21 17:53:40 +00:00
|
|
|
selectStmt: SqliteStmt[int64, openArray[byte]]
|
2020-10-13 19:16:54 +00:00
|
|
|
recordCount: int64
|
2020-10-12 01:07:20 +00:00
|
|
|
|
|
|
|
DbMap*[K, V] = object
|
|
|
|
db: SqStoreRef
|
|
|
|
keyspace: int
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
DepositsSeq = DbSeq[DepositData]
|
|
|
|
ImmutableValidatorDataSeq = seq[ImmutableValidatorData]
|
|
|
|
ValidatorKeyToIndexMap = Table[ValidatorPubKey, ValidatorIndex]
|
|
|
|
|
2020-10-26 08:55:10 +00:00
|
|
|
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
BeaconChainDB* = ref object
|
2019-03-13 22:59:20 +00:00
|
|
|
## Database storing resolved blocks and states - resolved blocks are such
|
|
|
|
## blocks that form a chain back to the tail block.
|
2020-04-23 06:27:35 +00:00
|
|
|
##
|
|
|
|
## We assume that the database backend is working / not corrupt - as such,
|
|
|
|
## we will raise a Defect any time there is an issue. This should be
|
|
|
|
## revisited in the future, when/if the calling code safely can handle
|
|
|
|
## corruption of this kind.
|
|
|
|
##
|
|
|
|
## We do however make an effort not to crash on invalid data inside the
|
|
|
|
## database - this may have a number of "natural" causes such as switching
|
|
|
|
## between different versions of the client and accidentally using an old
|
|
|
|
## database.
|
|
|
|
backend: KvStoreRef
|
2020-10-15 11:49:02 +00:00
|
|
|
preset: RuntimePreset
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
deposits*: DepositsSeq
|
|
|
|
immutableValidatorData*: ImmutableValidatorDataSeq
|
|
|
|
validatorKeyToIndex*: ValidatorKeyToIndexMap
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-26 08:55:10 +00:00
|
|
|
finalizedEth1DepositsMerkleizer*: DepositsMerkleizer
|
|
|
|
## A merkleizer keeping track of the `deposit_root` value obtained from
|
|
|
|
## Eth1 after finalizing blocks with ETH1_FOLLOW_DISTANCE confirmations.
|
|
|
|
## The value is used when voting for Eth1 heads.
|
|
|
|
|
|
|
|
finalizedEth2DepositsMerkleizer*: DepositsMerkleizer
|
|
|
|
## A separate merkleizer which is advanced when the Eth2 chain finalizes.
|
|
|
|
## It will lag behind the "eth1 merkleizer". We use to produce merkle
|
|
|
|
## proofs for deposits when they are added to Eth2 blocks.
|
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
Keyspaces* = enum
|
|
|
|
defaultKeyspace = "kvstore"
|
|
|
|
validatorIndexFromPubKey
|
|
|
|
|
2019-01-14 12:19:44 +00:00
|
|
|
DbKeyKind = enum
|
2019-02-21 04:42:17 +00:00
|
|
|
kHashToState
|
2019-01-14 12:19:44 +00:00
|
|
|
kHashToBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
kHeadBlock
|
|
|
|
## Pointer to the most recent block selected by the fork choice
|
|
|
|
kTailBlock
|
|
|
|
## Pointer to the earliest finalized block - this is the genesis block when
|
|
|
|
## the chain starts, but might advance as the database gets pruned
|
|
|
|
## TODO: determine how aggressively the database should be pruned. For a
|
|
|
|
## healthy network sync, we probably need to store blocks at least
|
|
|
|
## past the weak subjectivity period.
|
|
|
|
kBlockSlotStateRoot
|
|
|
|
## BlockSlot -> state_root mapping
|
|
|
|
kGenesisBlockRoot
|
|
|
|
## Immutable reference to the network genesis state
|
|
|
|
## (needed for satisfying requests to the beacon node API).
|
2020-10-12 01:07:20 +00:00
|
|
|
kEth1PersistedTo
|
|
|
|
## The latest ETH1 block hash which satisfied the follow distance and
|
|
|
|
## had its deposits persisted to disk.
|
2020-10-26 08:55:10 +00:00
|
|
|
kFinalizedEth1DepositsMarkleizer
|
|
|
|
## A merkleizer used to compute the `deposit_root` of all finalized
|
|
|
|
## deposits (i.e. deposits confirmed by ETH1_FOLLOW_DISTANCE blocks)
|
|
|
|
kFinalizedEth2DepositsMarkleizer
|
|
|
|
## A merkleizer used for computing merkle proofs of deposits added
|
|
|
|
## to Eth2 blocks (it may lag behind the finalized deposits merkleizer).
|
2020-11-03 22:30:43 +00:00
|
|
|
kHashToBlockSummary
|
|
|
|
## Cache of beacon block summaries - during startup when we construct the
|
|
|
|
## chain dag, loading full blocks takes a lot of time - the block
|
|
|
|
## summary contains a minimal snapshot of what's needed to instanciate
|
|
|
|
## the BlockRef tree.
|
|
|
|
|
|
|
|
BeaconBlockSummary* = object
|
|
|
|
slot*: Slot
|
|
|
|
parent_root*: Eth2Digest
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2020-08-18 20:31:55 +00:00
|
|
|
const
|
2020-11-12 07:42:45 +00:00
|
|
|
# The largest object we're saving is the BeaconState, and by far, the largest
|
|
|
|
# part of it is the validator - each validator takes up at least 129 bytes
|
|
|
|
# in phase0, which means 100k validators is >12mb - in addition to this,
|
|
|
|
# there are several MB of hashes.
|
|
|
|
maxDecompressedDbRecordSize = 64*1024*1024
|
2020-08-18 20:31:55 +00:00
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
# Subkeys essentially create "tables" within the key-value store by prefixing
|
|
|
|
# each entry with a table id
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: DbKeyKind): array[1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-03-04 11:50:26 +00:00
|
|
|
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
|
|
|
array[N + 1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
|
|
|
result[1 .. ^1] = key
|
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
func subkey(kind: type BeaconState, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToState, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2019-12-16 18:08:50 +00:00
|
|
|
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToBlock, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
|
|
|
subkey(kHashToBlockSummary, key.data)
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
ret[0..<8] = toBytesBE(slot.uint64)
|
|
|
|
# .. but 7 bytes should be enough for slots - in return, we get a nicely
|
|
|
|
# rounded key length
|
2019-03-28 06:10:48 +00:00
|
|
|
ret[0] = byte ord(kBlockSlotStateRoot)
|
2020-04-23 06:27:35 +00:00
|
|
|
ret[8..<40] = root.data
|
2019-03-28 06:10:48 +00:00
|
|
|
|
|
|
|
ret
|
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
template panic =
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah): Could we recover from a corrupted database?
|
|
|
|
# Review all usages.
|
2020-10-12 01:07:20 +00:00
|
|
|
raiseAssert "The database should not be corrupted"
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): Seq =
|
2020-10-13 19:16:54 +00:00
|
|
|
db.exec("""
|
|
|
|
CREATE TABLE IF NOT EXISTS """ & name & """(
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
value BLOB
|
|
|
|
);
|
|
|
|
""").expect "working database"
|
|
|
|
|
|
|
|
let
|
|
|
|
insertStmt = db.prepareStmt(
|
|
|
|
"INSERT INTO " & name & "(value) VALUES (?);",
|
2020-10-28 18:35:31 +00:00
|
|
|
openArray[byte], void).expect("this is a valid statement")
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
selectStmt = db.prepareStmt(
|
|
|
|
"SELECT value FROM " & name & " WHERE id = ?;",
|
2020-11-21 17:53:40 +00:00
|
|
|
int64, openArray[byte]).expect("this is a valid statement")
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
countStmt = db.prepareStmt(
|
|
|
|
"SELECT COUNT(*) FROM " & name & ";",
|
|
|
|
NoParams, int64).expect("this is a valid statement")
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
var recordCount = int64 0
|
|
|
|
let countQueryRes = countStmt.exec do (res: int64):
|
|
|
|
recordCount = res
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
let found = countQueryRes.expect("working database")
|
|
|
|
if not found: panic()
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
Seq(insertStmt: insertStmt,
|
|
|
|
selectStmt: selectStmt,
|
|
|
|
recordCount: recordCount)
|
2020-10-12 01:07:20 +00:00
|
|
|
|
|
|
|
proc add*[T](s: var DbSeq[T], val: T) =
|
|
|
|
var bytes = SSZ.encode(val)
|
2020-10-13 19:16:54 +00:00
|
|
|
s.insertStmt.exec(bytes).expect "working database"
|
2020-10-14 14:04:08 +00:00
|
|
|
inc s.recordCount
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
template len*[T](s: DbSeq[T]): uint64 =
|
|
|
|
s.recordCount.uint64
|
2020-10-12 01:07:20 +00:00
|
|
|
|
|
|
|
proc get*[T](s: DbSeq[T], idx: uint64): T =
|
2020-10-13 19:16:54 +00:00
|
|
|
# This is used only locally
|
|
|
|
let resultAddr = addr result
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-11-21 17:53:40 +00:00
|
|
|
let queryRes = s.selectStmt.exec(int64(idx) + 1) do (recordBytes: openArray[byte]):
|
2020-10-13 19:16:54 +00:00
|
|
|
try:
|
|
|
|
resultAddr[] = decode(SSZ, recordBytes, T)
|
|
|
|
except SerializationError:
|
|
|
|
panic()
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
let found = queryRes.expect("working database")
|
|
|
|
if not found: panic()
|
2020-10-12 01:07:20 +00:00
|
|
|
|
|
|
|
proc createMap*(db: SqStoreRef, keyspace: int;
|
|
|
|
K, V: distinct type): DbMap[K, V] =
|
|
|
|
DbMap[K, V](db: db, keyspace: keyspace)
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
proc insert*[K, V](m: var DbMap[K, V], key: K, value: V) =
|
2020-10-12 01:07:20 +00:00
|
|
|
m.db.put(m.keyspace, SSZ.encode key, SSZ.encode value).expect("working database")
|
|
|
|
|
|
|
|
proc contains*[K, V](m: DbMap[K, V], key: K): bool =
|
|
|
|
contains(m.db, SSZ.encode key).expect("working database")
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
template insert*[K, V](t: var Table[K, V], key: K, value: V) =
|
|
|
|
add(t, key, value)
|
|
|
|
|
|
|
|
proc produceDerivedData(deposit: DepositData,
|
|
|
|
preset: RuntimePreset,
|
|
|
|
validators: var ImmutableValidatorDataSeq,
|
2020-10-26 08:55:10 +00:00
|
|
|
validatorKeyToIndex: var ValidatorKeyToIndexMap,
|
2020-11-21 17:53:40 +00:00
|
|
|
finalizedEth1DepositsMerkleizer: var DepositsMerkleizer,
|
|
|
|
skipBlsCheck = false) =
|
2020-10-26 08:55:10 +00:00
|
|
|
let htr = hash_tree_root(deposit)
|
|
|
|
finalizedEth1DepositsMerkleizer.addChunk htr.data
|
|
|
|
|
2020-11-21 17:53:40 +00:00
|
|
|
if skipBlsCheck or verify_deposit_signature(preset, deposit):
|
2020-10-15 11:49:02 +00:00
|
|
|
let pubkey = deposit.pubkey
|
|
|
|
if pubkey notin validatorKeyToIndex:
|
|
|
|
let idx = ValidatorIndex validators.len
|
|
|
|
validators.add ImmutableValidatorData(
|
|
|
|
pubkey: pubkey,
|
|
|
|
withdrawal_credentials: deposit.withdrawal_credentials)
|
|
|
|
validatorKeyToIndex.insert(pubkey, idx)
|
|
|
|
|
|
|
|
proc processDeposit*(db: BeaconChainDB, newDeposit: DepositData) =
|
|
|
|
db.deposits.add newDeposit
|
|
|
|
|
|
|
|
produceDerivedData(
|
|
|
|
newDeposit,
|
|
|
|
db.preset,
|
|
|
|
db.immutableValidatorData,
|
2020-10-26 08:55:10 +00:00
|
|
|
db.validatorKeyToIndex,
|
|
|
|
db.finalizedEth1DepositsMerkleizer)
|
2020-10-15 11:49:02 +00:00
|
|
|
|
|
|
|
proc init*(T: type BeaconChainDB,
|
|
|
|
preset: RuntimePreset,
|
|
|
|
dir: string,
|
|
|
|
inMemory = false): BeaconChainDB =
|
2020-10-12 20:49:05 +00:00
|
|
|
if inMemory:
|
2020-10-15 11:49:02 +00:00
|
|
|
# TODO
|
2020-11-10 18:41:04 +00:00
|
|
|
# To support testing, the inMemory store should offer the complete
|
|
|
|
# functionalityof the database-backed one (i.e. tracking of deposits
|
|
|
|
# and validators)
|
2020-10-26 08:55:10 +00:00
|
|
|
T(backend: kvStore MemStoreRef.init(),
|
|
|
|
preset: preset,
|
|
|
|
finalizedEth1DepositsMerkleizer: init DepositsMerkleizer,
|
|
|
|
finalizedEth2DepositsMerkleizer: init DepositsMerkleizer)
|
2020-10-12 20:49:05 +00:00
|
|
|
else:
|
2020-10-27 11:04:17 +00:00
|
|
|
let s = secureCreatePath(dir)
|
2020-11-10 18:41:04 +00:00
|
|
|
doAssert s.isOk # TODO(zah) Handle this in a better way
|
2020-10-15 11:49:02 +00:00
|
|
|
|
|
|
|
let sqliteStore = SqStoreRef.init(dir, "nbc", Keyspaces).expect(
|
|
|
|
"working database")
|
|
|
|
|
|
|
|
var
|
|
|
|
immutableValidatorData = newSeq[ImmutableValidatorData]()
|
|
|
|
validatorKeyToIndex = initTable[ValidatorPubKey, ValidatorIndex]()
|
|
|
|
depositsSeq = DbSeq[DepositData].init(sqliteStore, "deposits")
|
2020-10-26 08:55:10 +00:00
|
|
|
finalizedEth1DepositsMerkleizer = init DepositsMerkleizer
|
|
|
|
finalizedEth2DepositsMerkleizer = init DepositsMerkleizer
|
2020-10-15 11:49:02 +00:00
|
|
|
|
2020-11-21 17:53:40 +00:00
|
|
|
let isPyrmont =
|
|
|
|
not pyrmontMetadata.incompatible and preset == pyrmontMetadata.runtimePreset
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
for i in 0 ..< depositsSeq.len:
|
2020-11-21 17:53:40 +00:00
|
|
|
# TODO this is a hack to avoid long startup times on pyrmont - it should
|
|
|
|
# be removed when the storage of deposits is fixed. It works because
|
|
|
|
# we know that he first 100k deposits on pyrmont have a valid
|
|
|
|
# signature
|
|
|
|
let skipBlsCheck = isPyrmont and i < 100010
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
produceDerivedData(
|
|
|
|
depositsSeq.get(i),
|
|
|
|
preset,
|
|
|
|
immutableValidatorData,
|
2020-10-26 08:55:10 +00:00
|
|
|
validatorKeyToIndex,
|
2020-11-21 17:53:40 +00:00
|
|
|
finalizedEth1DepositsMerkleizer, skipBlsCheck)
|
2020-10-15 11:49:02 +00:00
|
|
|
|
2020-10-12 20:49:05 +00:00
|
|
|
T(backend: kvStore sqliteStore,
|
2020-10-15 11:49:02 +00:00
|
|
|
preset: preset,
|
|
|
|
deposits: depositsSeq,
|
|
|
|
immutableValidatorData: immutableValidatorData,
|
2020-10-26 08:55:10 +00:00
|
|
|
validatorKeyToIndex: validatorKeyToIndex,
|
|
|
|
finalizedEth1DepositsMerkleizer: finalizedEth1DepositsMerkleizer,
|
|
|
|
finalizedEth2DepositsMerkleizer: finalizedEth2DepositsMerkleizer)
|
|
|
|
|
|
|
|
proc advanceTo*(merkleizer: var DepositsMerkleizer,
|
|
|
|
db: BeaconChainDB,
|
|
|
|
deposit_index: uint64) =
|
|
|
|
for i in merkleizer.totalChunks ..< depositIndex:
|
|
|
|
merkleizer.addChunk hash_tree_root(db.deposits.get(i)).data
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2020-06-13 18:57:07 +00:00
|
|
|
proc snappyEncode(inp: openArray[byte]): seq[byte] =
|
|
|
|
try:
|
|
|
|
snappy.encode(inp)
|
|
|
|
except CatchableError as err:
|
|
|
|
raiseAssert err.msg
|
|
|
|
|
|
|
|
proc put(db: BeaconChainDB, key: openArray[byte], v: Eth2Digest) =
|
|
|
|
db.backend.put(key, v.data).expect("working database")
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
proc put(db: BeaconChainDB, key: openArray[byte], v: auto) =
|
2020-06-13 18:57:07 +00:00
|
|
|
db.backend.put(key, snappyEncode(SSZ.encode(v))).expect("working database")
|
|
|
|
|
|
|
|
proc get(db: BeaconChainDB, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
|
|
|
var res: Opt[T]
|
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
if data.len == 32:
|
|
|
|
res.ok Eth2Digest(data: toArray(32, data))
|
|
|
|
else:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
typ = name(T), dataLen = data.len
|
|
|
|
discard
|
|
|
|
|
|
|
|
discard db.backend.get(key, decode).expect("working database")
|
|
|
|
|
|
|
|
res
|
2020-04-23 06:27:35 +00:00
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
type GetResult = enum
|
|
|
|
found
|
|
|
|
notFound
|
|
|
|
corrupted
|
|
|
|
|
2020-08-18 20:31:55 +00:00
|
|
|
proc get[T](db: BeaconChainDB, key: openArray[byte], output: var T): GetResult =
|
2020-08-13 09:50:05 +00:00
|
|
|
var status = GetResult.notFound
|
2020-06-25 10:23:10 +00:00
|
|
|
|
|
|
|
# TODO address is needed because there's no way to express lifetimes in nim
|
|
|
|
# we'll use unsafeAddr to find the code later
|
2020-08-13 09:50:05 +00:00
|
|
|
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
|
2020-04-23 06:27:35 +00:00
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
try:
|
2020-08-18 20:31:55 +00:00
|
|
|
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
|
|
|
|
if decompressed.len > 0:
|
2020-09-04 06:35:10 +00:00
|
|
|
outputPtr[] = SSZ.decode(decompressed, T, updateRoot = false)
|
2020-08-18 20:31:55 +00:00
|
|
|
status = GetResult.found
|
|
|
|
else:
|
|
|
|
warn "Corrupt snappy record found in database", typ = name(T)
|
|
|
|
status = GetResult.corrupted
|
2020-04-23 06:27:35 +00:00
|
|
|
except SerializationError as e:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
2020-05-20 13:41:02 +00:00
|
|
|
warn "Unable to deserialize data, old database?",
|
2020-08-18 20:31:55 +00:00
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
2020-08-13 09:50:05 +00:00
|
|
|
status = GetResult.corrupted
|
2020-04-23 06:27:35 +00:00
|
|
|
|
|
|
|
discard db.backend.get(key, decode).expect("working database")
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
status
|
2020-04-23 06:27:35 +00:00
|
|
|
|
2020-09-12 05:35:58 +00:00
|
|
|
proc close*(db: BeaconChainDB) =
|
|
|
|
discard db.backend.close()
|
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
func toBeaconBlockSummary(v: SomeBeaconBlock): BeaconBlockSummary =
|
|
|
|
BeaconBlockSummary(
|
|
|
|
slot: v.slot,
|
|
|
|
parent_root: v.parent_root,
|
|
|
|
)
|
|
|
|
|
2020-07-16 13:16:51 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
|
|
|
|
db.put(subkey(type value, value.root), value)
|
2020-11-03 22:30:43 +00:00
|
|
|
db.put(subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
2020-07-16 13:16:51 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
|
|
|
|
db.put(subkey(SignedBeaconBlock, value.root), value)
|
2020-11-03 22:30:43 +00:00
|
|
|
db.put(subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
2019-03-13 22:59:20 +00:00
|
|
|
# TODO prune old states - this is less easy than it seems as we never know
|
|
|
|
# when or if a particular state will become finalized.
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
db.put(subkey(type value, key), value)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
proc putState*(db: BeaconChainDB, value: BeaconState) =
|
2019-03-25 16:46:31 +00:00
|
|
|
db.putState(hash_tree_root(value), value)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
|
|
|
value: Eth2Digest) =
|
2020-06-13 18:57:07 +00:00
|
|
|
db.put(subkey(root, slot), value)
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2019-11-22 14:14:13 +00:00
|
|
|
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2020-11-03 22:30:43 +00:00
|
|
|
db.backend.del(subkey(SignedBeaconBlock, key)).expect("working database")
|
|
|
|
db.backend.del(subkey(BeaconBlockSummary, key)).expect("working database")
|
2019-11-22 14:14:13 +00:00
|
|
|
|
|
|
|
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
2020-04-28 08:08:32 +00:00
|
|
|
db.backend.del(subkey(BeaconState, key)).expect("working database")
|
2019-11-22 14:14:13 +00:00
|
|
|
|
2020-01-22 12:59:54 +00:00
|
|
|
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
|
2020-04-23 06:27:35 +00:00
|
|
|
db.backend.del(subkey(root, slot)).expect("working database")
|
2020-01-22 12:59:54 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2020-06-13 18:57:07 +00:00
|
|
|
db.put(subkey(kHeadBlock), key)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2020-06-13 18:57:07 +00:00
|
|
|
db.put(subkey(kTailBlock), key)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2020-09-22 20:42:42 +00:00
|
|
|
proc putGenesisBlockRoot*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.put(subkey(kGenesisBlockRoot), key)
|
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
proc putEth1PersistedTo*(db: BeaconChainDB, key: Eth1Data) =
|
2020-10-12 01:07:20 +00:00
|
|
|
db.put(subkey(kEth1PersistedTo), key)
|
|
|
|
|
2020-06-25 10:23:10 +00:00
|
|
|
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
|
|
|
|
# We only store blocks that we trust in the database
|
2020-09-04 06:35:10 +00:00
|
|
|
result.ok(TrustedSignedBeaconBlock())
|
2020-08-13 09:50:05 +00:00
|
|
|
if db.get(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
|
2020-06-25 10:23:10 +00:00
|
|
|
result.err()
|
2020-09-04 06:35:10 +00:00
|
|
|
else:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
proc getBlockSummary*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconBlockSummary] =
|
|
|
|
# We only store blocks that we trust in the database
|
|
|
|
result.ok(BeaconBlockSummary())
|
|
|
|
if db.get(subkey(BeaconBlockSummary, key), result.get) != GetResult.found:
|
|
|
|
result.err()
|
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
proc getState*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
|
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
2020-08-13 09:50:05 +00:00
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
2020-04-28 08:08:32 +00:00
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
2020-08-13 09:50:05 +00:00
|
|
|
case db.get(subkey(BeaconState, key), output)
|
|
|
|
of GetResult.found:
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
2020-06-25 10:23:10 +00:00
|
|
|
rollback(output)
|
|
|
|
false
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2020-04-22 23:35:55 +00:00
|
|
|
proc getStateRoot*(db: BeaconChainDB,
|
|
|
|
root: Eth2Digest,
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
2019-03-28 06:10:48 +00:00
|
|
|
db.get(subkey(root, slot), Eth2Digest)
|
|
|
|
|
2020-04-22 23:35:55 +00:00
|
|
|
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
2019-02-28 21:21:29 +00:00
|
|
|
db.get(subkey(kHeadBlock), Eth2Digest)
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2020-04-22 23:35:55 +00:00
|
|
|
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
2019-02-28 21:21:29 +00:00
|
|
|
db.get(subkey(kTailBlock), Eth2Digest)
|
2019-02-21 21:38:26 +00:00
|
|
|
|
2020-09-22 20:42:42 +00:00
|
|
|
proc getGenesisBlockRoot*(db: BeaconChainDB): Eth2Digest =
|
2020-10-12 01:07:20 +00:00
|
|
|
db.get(subkey(kGenesisBlockRoot), Eth2Digest).expect(
|
|
|
|
"The database must be seeded with the genesis state")
|
|
|
|
|
2020-10-14 14:04:08 +00:00
|
|
|
proc getEth1PersistedTo*(db: BeaconChainDB): Opt[Eth1Data] =
|
|
|
|
result.ok(Eth1Data())
|
|
|
|
if db.get(subkey(kEth1PersistedTo), result.get) != GetResult.found:
|
|
|
|
result.err()
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2020-04-23 18:58:54 +00:00
|
|
|
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
|
2020-04-23 06:27:35 +00:00
|
|
|
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database")
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2020-04-23 18:58:54 +00:00
|
|
|
proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
|
2020-04-28 08:08:32 +00:00
|
|
|
db.backend.contains(subkey(BeaconState, key)).expect("working database")
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
2020-07-16 13:16:51 +00:00
|
|
|
TrustedSignedBeaconBlock =
|
2019-02-21 17:20:50 +00:00
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
## oldest block last (blck will be at result[0]).
|
|
|
|
##
|
2019-02-28 21:21:29 +00:00
|
|
|
## The search will go on until the ancestor cannot be found.
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2020-09-04 06:35:10 +00:00
|
|
|
var
|
|
|
|
res: TrustedSignedBeaconBlock
|
|
|
|
root = root
|
|
|
|
while db.get(subkey(SignedBeaconBlock, root), res) == GetResult.found:
|
|
|
|
res.root = root
|
2020-06-25 10:23:10 +00:00
|
|
|
yield res
|
2020-09-04 06:35:10 +00:00
|
|
|
root = res.message.parent_root
|
2020-11-03 22:30:43 +00:00
|
|
|
|
|
|
|
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
tuple[root: Eth2Digest, summary: BeaconBlockSummary] =
|
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
## oldest block last (blck will be at result[0]).
|
|
|
|
##
|
|
|
|
## The search will go on until the ancestor cannot be found.
|
|
|
|
|
|
|
|
var
|
|
|
|
res: tuple[root: Eth2Digest, summary: BeaconBlockSummary]
|
|
|
|
tmp: TrustedSignedBeaconBlock
|
|
|
|
root = root
|
|
|
|
|
|
|
|
while true:
|
|
|
|
if db.get(subkey(BeaconBlockSummary, root), res.summary) == GetResult.found:
|
|
|
|
res.root = root
|
|
|
|
yield res
|
|
|
|
elif db.get(subkey(SignedBeaconBlock, root), tmp) == GetResult.found:
|
|
|
|
res.summary = tmp.message.toBeaconBlockSummary()
|
|
|
|
db.put(subkey(BeaconBlockSummary, root), res.summary)
|
|
|
|
res.root = root
|
|
|
|
yield res
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
root = res.summary.parent_root
|