2021-03-15 14:11:51 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2021-05-17 16:37:26 +00:00
|
|
|
std/[typetraits, tables],
|
|
|
|
stew/[arrayops, assign2, byteutils, endians2, io2, objects, results],
|
2020-06-13 18:57:07 +00:00
|
|
|
serialization, chronicles, snappy,
|
2020-10-12 01:07:20 +00:00
|
|
|
eth/db/[kvstore, kvstore_sqlite3],
|
2021-03-15 14:11:51 +00:00
|
|
|
./networking/network_metadata, ./beacon_chain_db_immutable,
|
2021-11-05 07:34:34 +00:00
|
|
|
./spec/[eth2_ssz_serialization, eth2_merkleization, forks, state_transition],
|
2021-09-27 14:22:58 +00:00
|
|
|
./spec/datatypes/[phase0, altair, merge],
|
2021-03-05 13:12:00 +00:00
|
|
|
./filepath
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2021-09-27 09:24:58 +00:00
|
|
|
export
|
|
|
|
phase0, altair, eth2_ssz_serialization, eth2_merkleization, kvstore,
|
|
|
|
kvstore_sqlite3
|
2021-06-10 07:37:02 +00:00
|
|
|
|
2021-05-26 07:07:18 +00:00
|
|
|
logScope: topics = "bc_db"
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
type
|
2020-10-12 01:07:20 +00:00
|
|
|
DbSeq*[T] = object
|
2020-10-28 18:35:31 +00:00
|
|
|
insertStmt: SqliteStmt[openArray[byte], void]
|
2020-11-21 17:53:40 +00:00
|
|
|
selectStmt: SqliteStmt[int64, openArray[byte]]
|
2020-10-13 19:16:54 +00:00
|
|
|
recordCount: int64
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
DepositsSeq = DbSeq[DepositData]
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
DepositContractSnapshot* = object
|
|
|
|
eth1Block*: Eth2Digest
|
|
|
|
depositContractState*: DepositContractState
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
BeaconChainDBV0* = ref object
|
|
|
|
## BeaconChainDBV0 based on old kvstore table that sets the WITHOUT ROWID
|
|
|
|
## option which becomes unbearably slow with large blobs. It is used as a
|
|
|
|
## read-only store to support old versions - by freezing it at its current
|
|
|
|
## data set, downgrading remains possible since it's no longer touched -
|
|
|
|
## anyone downgrading will have to sync up whatever they missed.
|
|
|
|
##
|
|
|
|
## Newer versions read from the new tables first - if the data is not found,
|
|
|
|
## they turn to the old tables for reading. Writing is done only to the new
|
|
|
|
## tables.
|
|
|
|
##
|
|
|
|
## V0 stored most data in a single table, prefixing each key with a tag
|
|
|
|
## identifying the type of data.
|
|
|
|
##
|
|
|
|
## 1.1 introduced BeaconStateNoImmutableValidators storage where immutable
|
|
|
|
## validator data is stored in a separate table and only a partial
|
|
|
|
## BeaconState is written to kvstore
|
|
|
|
##
|
|
|
|
## 1.2 moved BeaconStateNoImmutableValidators to a separate table to
|
|
|
|
## alleviate some of the btree balancing issues - this doubled the speed but
|
2021-05-26 07:07:18 +00:00
|
|
|
## was still
|
|
|
|
##
|
|
|
|
## 1.3 creates `kvstore` with rowid, making it quite fast, but doesn't do
|
|
|
|
## anything about existing databases. Versions after that use a separate
|
|
|
|
## file instead (V1)
|
2021-05-17 16:37:26 +00:00
|
|
|
backend: KvStoreRef # kvstore
|
|
|
|
stateStore: KvStoreRef # state_no_validators
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
BeaconChainDB* = ref object
|
2019-03-13 22:59:20 +00:00
|
|
|
## Database storing resolved blocks and states - resolved blocks are such
|
|
|
|
## blocks that form a chain back to the tail block.
|
2020-04-23 06:27:35 +00:00
|
|
|
##
|
|
|
|
## We assume that the database backend is working / not corrupt - as such,
|
|
|
|
## we will raise a Defect any time there is an issue. This should be
|
|
|
|
## revisited in the future, when/if the calling code safely can handle
|
|
|
|
## corruption of this kind.
|
|
|
|
##
|
|
|
|
## We do however make an effort not to crash on invalid data inside the
|
|
|
|
## database - this may have a number of "natural" causes such as switching
|
|
|
|
## between different versions of the client and accidentally using an old
|
|
|
|
## database.
|
2021-05-17 16:37:26 +00:00
|
|
|
db: SqStoreRef
|
|
|
|
|
|
|
|
v0: BeaconChainDBV0
|
2020-11-24 21:21:47 +00:00
|
|
|
genesisDeposits*: DepositsSeq
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
# immutableValidatorsDb only stores the total count; it's a proxy for SQL
|
2021-03-15 14:11:51 +00:00
|
|
|
# queries.
|
2021-11-25 18:41:54 +00:00
|
|
|
immutableValidatorsDb*: DbSeq[ImmutableValidatorDataDb2]
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators*: seq[ImmutableValidatorData2]
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-03-24 16:20:55 +00:00
|
|
|
checkpoint*: proc() {.gcsafe, raises: [Defect].}
|
2020-10-26 08:55:10 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
|
2021-06-24 07:11:47 +00:00
|
|
|
blocks: KvStoreRef # BlockRoot -> phase0.TrustedBeaconBlock
|
|
|
|
altairBlocks: KvStoreRef # BlockRoot -> altair.TrustedBeaconBlock
|
2021-09-30 01:07:24 +00:00
|
|
|
mergeBlocks: KvStoreRef # BlockRoot -> merge.TrustedBeaconBlock
|
2021-05-17 16:37:26 +00:00
|
|
|
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
|
2021-11-05 07:34:34 +00:00
|
|
|
statesNoVal: KvStoreRef # StateRoot -> Phase0BeaconStateNoImmutableValidators
|
2021-06-24 07:11:47 +00:00
|
|
|
altairStatesNoVal: KvStoreRef # StateRoot -> AltairBeaconStateNoImmutableValidators
|
2021-09-30 01:07:24 +00:00
|
|
|
mergeStatesNoVal: KvStoreRef # StateRoot -> MergeBeaconStateNoImmutableValidators
|
2021-05-17 16:37:26 +00:00
|
|
|
stateDiffs: KvStoreRef ##\
|
|
|
|
## StateRoot -> BeaconStateDiff
|
|
|
|
## Instead of storing full BeaconStates, one can store only the diff from
|
|
|
|
## a different state. As 75% of a typical BeaconState's serialized form's
|
|
|
|
## the validators, which are mostly immutable and append-only, just using
|
|
|
|
## a simple append-diff representation helps significantly. Various roots
|
|
|
|
## are stored in a mod-increment pattern across fixed-sized arrays, which
|
|
|
|
## addresses most of the rest of the BeaconState sizes.
|
2021-04-06 18:56:45 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
summaries: KvStoreRef # BlockRoot -> BeaconBlockSummary
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2019-01-14 12:19:44 +00:00
|
|
|
DbKeyKind = enum
|
2019-02-21 04:42:17 +00:00
|
|
|
kHashToState
|
2019-01-14 12:19:44 +00:00
|
|
|
kHashToBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
kHeadBlock
|
|
|
|
## Pointer to the most recent block selected by the fork choice
|
|
|
|
kTailBlock
|
|
|
|
## Pointer to the earliest finalized block - this is the genesis block when
|
|
|
|
## the chain starts, but might advance as the database gets pruned
|
|
|
|
## TODO: determine how aggressively the database should be pruned. For a
|
|
|
|
## healthy network sync, we probably need to store blocks at least
|
|
|
|
## past the weak subjectivity period.
|
|
|
|
kBlockSlotStateRoot
|
|
|
|
## BlockSlot -> state_root mapping
|
2021-11-05 07:34:34 +00:00
|
|
|
kGenesisBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
## Immutable reference to the network genesis state
|
|
|
|
## (needed for satisfying requests to the beacon node API).
|
2021-05-17 16:37:26 +00:00
|
|
|
kEth1PersistedTo # Obsolete
|
|
|
|
kDepositsFinalizedByEth1 # Obsolete
|
2020-11-24 21:21:47 +00:00
|
|
|
kDepositsFinalizedByEth2
|
|
|
|
## A merkleizer checkpoint used for computing merkle proofs of
|
|
|
|
## deposits added to Eth2 blocks (it may lag behind the finalized
|
|
|
|
## eth1 deposits checkpoint).
|
2021-05-17 16:37:26 +00:00
|
|
|
kHashToBlockSummary # Block summaries for fast startup
|
2020-11-24 21:21:47 +00:00
|
|
|
kSpeculativeDeposits
|
|
|
|
## A merkelizer checkpoint created on the basis of deposit events
|
|
|
|
## that we were not able to verify against a `deposit_root` served
|
|
|
|
## by the web3 provider. This may happen on Geth nodes that serve
|
|
|
|
## only recent contract state data (i.e. only recent `deposit_roots`).
|
2021-05-17 16:37:26 +00:00
|
|
|
kHashToStateDiff # Obsolete
|
2021-03-15 14:11:51 +00:00
|
|
|
kHashToStateOnlyMutableValidators
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
kBackfillBlock
|
|
|
|
## Pointer to the earliest block that we have backfilled - if this is not
|
|
|
|
## set, backfill == tail
|
2020-11-03 22:30:43 +00:00
|
|
|
|
|
|
|
BeaconBlockSummary* = object
|
2021-05-17 16:37:26 +00:00
|
|
|
## Cache of beacon block summaries - during startup when we construct the
|
|
|
|
## chain dag, loading full blocks takes a lot of time - the block
|
|
|
|
## summary contains a minimal snapshot of what's needed to instanciate
|
|
|
|
## the BlockRef tree.
|
2020-11-03 22:30:43 +00:00
|
|
|
slot*: Slot
|
|
|
|
parent_root*: Eth2Digest
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2020-08-18 20:31:55 +00:00
|
|
|
const
|
2020-11-12 07:42:45 +00:00
|
|
|
# The largest object we're saving is the BeaconState, and by far, the largest
|
|
|
|
# part of it is the validator - each validator takes up at least 129 bytes
|
|
|
|
# in phase0, which means 100k validators is >12mb - in addition to this,
|
|
|
|
# there are several MB of hashes.
|
|
|
|
maxDecompressedDbRecordSize = 64*1024*1024
|
2020-08-18 20:31:55 +00:00
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
# Subkeys essentially create "tables" within the key-value store by prefixing
|
|
|
|
# each entry with a table id
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: DbKeyKind): array[1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-03-04 11:50:26 +00:00
|
|
|
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
|
|
|
array[N + 1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
|
|
|
result[1 .. ^1] = key
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
func subkey(kind: type phase0.BeaconState, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToState, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
func subkey(
|
2021-11-05 07:34:34 +00:00
|
|
|
kind: type Phase0BeaconStateNoImmutableValidators, key: Eth2Digest): auto =
|
2021-03-15 14:11:51 +00:00
|
|
|
subkey(kHashToStateOnlyMutableValidators, key.data)
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
func subkey(kind: type phase0.SignedBeaconBlock, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToBlock, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
|
|
|
subkey(kHashToBlockSummary, key.data)
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
ret[0..<8] = toBytesBE(slot.uint64)
|
|
|
|
# .. but 7 bytes should be enough for slots - in return, we get a nicely
|
|
|
|
# rounded key length
|
2019-03-28 06:10:48 +00:00
|
|
|
ret[0] = byte ord(kBlockSlotStateRoot)
|
2020-04-23 06:27:35 +00:00
|
|
|
ret[8..<40] = root.data
|
2019-03-28 06:10:48 +00:00
|
|
|
|
|
|
|
ret
|
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
template panic =
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah): Could we recover from a corrupted database?
|
|
|
|
# Review all usages.
|
2020-10-12 01:07:20 +00:00
|
|
|
raiseAssert "The database should not be corrupted"
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
template expectDb(x: auto): untyped =
|
|
|
|
# There's no meaningful error handling implemented for a corrupt database or
|
|
|
|
# full disk - this requires manual intervention, so we'll panic for now
|
|
|
|
x.expect("working database (disk broken/full?)")
|
|
|
|
|
2021-05-26 07:07:18 +00:00
|
|
|
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): KvResult[Seq] =
|
|
|
|
? db.exec("""
|
2020-10-13 19:16:54 +00:00
|
|
|
CREATE TABLE IF NOT EXISTS """ & name & """(
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
value BLOB
|
|
|
|
);
|
2021-05-26 07:07:18 +00:00
|
|
|
""")
|
2020-10-13 19:16:54 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
insertStmt = db.prepareStmt(
|
|
|
|
"INSERT INTO " & name & "(value) VALUES (?);",
|
2021-05-17 16:37:26 +00:00
|
|
|
openArray[byte], void, managed = false).expect("this is a valid statement")
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
selectStmt = db.prepareStmt(
|
|
|
|
"SELECT value FROM " & name & " WHERE id = ?;",
|
2021-05-17 16:37:26 +00:00
|
|
|
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
countStmt = db.prepareStmt(
|
2020-11-24 21:21:47 +00:00
|
|
|
"SELECT COUNT(1) FROM " & name & ";",
|
2021-05-17 16:37:26 +00:00
|
|
|
NoParams, int64, managed = false).expect("this is a valid statement")
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
var recordCount = int64 0
|
|
|
|
let countQueryRes = countStmt.exec do (res: int64):
|
|
|
|
recordCount = res
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-05-26 07:07:18 +00:00
|
|
|
let found = ? countQueryRes
|
|
|
|
if not found:
|
|
|
|
return err("Cannot count existing items")
|
2021-05-17 16:37:26 +00:00
|
|
|
countStmt.dispose()
|
2020-10-13 19:16:54 +00:00
|
|
|
|
2021-05-26 07:07:18 +00:00
|
|
|
ok(Seq(insertStmt: insertStmt,
|
|
|
|
selectStmt: selectStmt,
|
|
|
|
recordCount: recordCount))
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc close*(s: DbSeq) =
|
|
|
|
s.insertStmt.dispose()
|
|
|
|
s.selectStmt.dispose()
|
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
proc add*[T](s: var DbSeq[T], val: T) =
|
|
|
|
var bytes = SSZ.encode(val)
|
2021-05-17 16:37:26 +00:00
|
|
|
s.insertStmt.exec(bytes).expectDb()
|
2020-10-14 14:04:08 +00:00
|
|
|
inc s.recordCount
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-03-10 18:01:43 +00:00
|
|
|
template len*[T](s: DbSeq[T]): int64 =
|
|
|
|
s.recordCount
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-03-10 18:01:43 +00:00
|
|
|
proc get*[T](s: DbSeq[T], idx: int64): T =
|
2020-10-13 19:16:54 +00:00
|
|
|
# This is used only locally
|
|
|
|
let resultAddr = addr result
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-03-10 18:01:43 +00:00
|
|
|
let queryRes = s.selectStmt.exec(idx + 1) do (recordBytes: openArray[byte]):
|
2020-10-13 19:16:54 +00:00
|
|
|
try:
|
|
|
|
resultAddr[] = decode(SSZ, recordBytes, T)
|
|
|
|
except SerializationError:
|
|
|
|
panic()
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
let found = queryRes.expectDb()
|
2020-10-13 19:16:54 +00:00
|
|
|
if not found: panic()
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-11-25 18:41:54 +00:00
|
|
|
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[ImmutableValidatorData2] =
|
|
|
|
result = newSeqOfCap[ImmutableValidatorData2](vals.len())
|
2021-05-27 10:59:42 +00:00
|
|
|
for i in 0 ..< vals.len:
|
2021-11-25 18:41:54 +00:00
|
|
|
let tmp = vals.get(i)
|
|
|
|
result.add ImmutableValidatorData2(
|
|
|
|
pubkey: tmp.pubkey.loadValid(),
|
|
|
|
withdrawal_credentials: tmp.withdrawal_credentials)
|
2021-06-10 07:37:02 +00:00
|
|
|
|
2021-04-06 18:56:45 +00:00
|
|
|
proc new*(T: type BeaconChainDB,
|
|
|
|
dir: string,
|
|
|
|
inMemory = false,
|
|
|
|
): BeaconChainDB =
|
2021-05-27 10:59:42 +00:00
|
|
|
var db = if inMemory:
|
|
|
|
SqStoreRef.init("", "test", inMemory = true).expect(
|
|
|
|
"working database (out of memory?)")
|
2021-03-15 14:11:51 +00:00
|
|
|
else:
|
|
|
|
let s = secureCreatePath(dir)
|
|
|
|
doAssert s.isOk # TODO(zah) Handle this in a better way
|
2020-10-15 11:49:02 +00:00
|
|
|
|
2021-05-27 10:59:42 +00:00
|
|
|
SqStoreRef.init(
|
|
|
|
dir, "nbc", manualCheckpoint = true).expectDb()
|
|
|
|
|
|
|
|
# Remove the deposits table we used before we switched
|
|
|
|
# to storing only deposit contract checkpoints
|
2021-05-28 09:35:23 +00:00
|
|
|
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
|
|
|
|
debug "Failed to drop the deposits table"
|
2021-05-27 10:59:42 +00:00
|
|
|
|
|
|
|
# An old pubkey->index mapping that hasn't been used on any mainnet release
|
|
|
|
if db.exec("DROP TABLE IF EXISTS validatorIndexFromPubKey;").isErr:
|
|
|
|
debug "Failed to drop the validatorIndexFromPubKey table"
|
2021-05-18 10:22:18 +00:00
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
var
|
2021-05-27 10:59:42 +00:00
|
|
|
# V0 compatibility tables - these were created WITHOUT ROWID which is slow
|
|
|
|
# for large blobs
|
|
|
|
backend = kvStore db.openKvStore().expectDb()
|
|
|
|
# state_no_validators is similar to state_no_validators2 but uses a
|
|
|
|
# different key encoding and was created WITHOUT ROWID
|
|
|
|
stateStore = kvStore db.openKvStore("state_no_validators").expectDb()
|
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
genesisDepositsSeq =
|
2021-05-26 07:07:18 +00:00
|
|
|
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidatorsDb =
|
2021-11-25 18:41:54 +00:00
|
|
|
DbSeq[ImmutableValidatorDataDb2].init(db, "immutable_validators2").expectDb()
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
# V1 - expected-to-be small rows get without rowid optimizations
|
|
|
|
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
|
|
|
|
blocks = kvStore db.openKvStore("blocks").expectDb()
|
2021-06-24 07:11:47 +00:00
|
|
|
altairBlocks = kvStore db.openKvStore("altair_blocks").expectDb()
|
2021-09-30 01:07:24 +00:00
|
|
|
mergeBlocks = kvStore db.openKvStore("merge_blocks").expectDb()
|
2021-05-17 16:37:26 +00:00
|
|
|
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
|
|
|
|
statesNoVal = kvStore db.openKvStore("state_no_validators2").expectDb()
|
2021-06-24 07:11:47 +00:00
|
|
|
altairStatesNoVal = kvStore db.openKvStore("altair_state_no_validators").expectDb()
|
2021-09-30 01:07:24 +00:00
|
|
|
mergeStatesNoVal = kvStore db.openKvStore("merge_state_no_validators").expectDb()
|
2021-05-17 16:37:26 +00:00
|
|
|
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
|
|
|
|
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
|
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
# `immutable_validators` stores validator keys in compressed format - this is
|
|
|
|
# slow to load and has been superceded by `immutable_validators2` which uses
|
|
|
|
# uncompressed keys instead. The migration is lossless but the old table
|
|
|
|
# should not be removed until after altair, to permit downgrades.
|
|
|
|
let immutableValidatorsDb1 =
|
|
|
|
DbSeq[ImmutableValidatorData].init(db, "immutable_validators").expectDb()
|
|
|
|
|
|
|
|
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
|
|
|
notice "Migrating validator keys, this may take a minute",
|
|
|
|
len = immutableValidatorsDb1.len()
|
|
|
|
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
|
|
|
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
2021-11-25 18:41:54 +00:00
|
|
|
immutableValidatorsDb.add(ImmutableValidatorDataDb2(
|
2021-06-10 07:37:02 +00:00
|
|
|
pubkey: val.pubkey.loadValid().toUncompressed(),
|
|
|
|
withdrawal_credentials: val.withdrawal_credentials
|
|
|
|
))
|
|
|
|
immutableValidatorsDb1.close()
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
T(
|
|
|
|
db: db,
|
2021-05-27 10:59:42 +00:00
|
|
|
v0: BeaconChainDBV0(
|
|
|
|
backend: backend,
|
|
|
|
stateStore: stateStore,
|
|
|
|
),
|
2021-03-15 14:11:51 +00:00
|
|
|
genesisDeposits: genesisDepositsSeq,
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidatorsDb: immutableValidatorsDb,
|
|
|
|
immutableValidators: loadImmutableValidators(immutableValidatorsDb),
|
2021-05-17 16:37:26 +00:00
|
|
|
checkpoint: proc() = db.checkpoint(),
|
|
|
|
keyValues: keyValues,
|
|
|
|
blocks: blocks,
|
2021-06-24 07:11:47 +00:00
|
|
|
altair_blocks: altair_blocks,
|
2021-09-30 01:07:24 +00:00
|
|
|
merge_blocks: merge_blocks,
|
2021-05-17 16:37:26 +00:00
|
|
|
stateRoots: stateRoots,
|
|
|
|
statesNoVal: statesNoVal,
|
2021-06-29 15:09:29 +00:00
|
|
|
altairStatesNoVal: altairStatesNoVal,
|
2021-09-30 01:07:24 +00:00
|
|
|
mergeStatesNoVal: mergeStatesNoVal,
|
2021-05-17 16:37:26 +00:00
|
|
|
stateDiffs: stateDiffs,
|
|
|
|
summaries: summaries,
|
|
|
|
)
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc decodeSSZ[T](data: openArray[byte], output: var T): bool =
|
2021-05-13 16:35:47 +00:00
|
|
|
try:
|
2021-05-17 16:37:26 +00:00
|
|
|
readSszBytes(data, output, updateRoot = false)
|
|
|
|
true
|
|
|
|
except SerializationError as e:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
false
|
|
|
|
|
|
|
|
proc decodeSnappySSZ[T](data: openArray[byte], output: var T): bool =
|
|
|
|
try:
|
|
|
|
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
|
|
|
|
readSszBytes(decompressed, output, updateRoot = false)
|
|
|
|
true
|
|
|
|
except SerializationError as e:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
false
|
2020-06-13 18:57:07 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc encodeSSZ(v: auto): seq[byte] =
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
SSZ.encode(v)
|
|
|
|
except IOError as err:
|
2021-05-13 16:35:47 +00:00
|
|
|
raiseAssert err.msg
|
2020-06-13 18:57:07 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc encodeSnappySSZ(v: auto): seq[byte] =
|
|
|
|
try:
|
|
|
|
snappy.encode(SSZ.encode(v))
|
|
|
|
except CatchableError as err:
|
|
|
|
# In-memory encode shouldn't fail!
|
|
|
|
raiseAssert err.msg
|
2021-05-17 06:34:44 +00:00
|
|
|
|
2021-04-06 18:56:45 +00:00
|
|
|
proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
2020-06-13 18:57:07 +00:00
|
|
|
var res: Opt[T]
|
|
|
|
proc decode(data: openArray[byte]) =
|
2021-05-17 16:37:26 +00:00
|
|
|
if data.len == sizeof(Eth2Digest):
|
|
|
|
res.ok Eth2Digest(data: toArray(sizeof(Eth2Digest), data))
|
2020-06-13 18:57:07 +00:00
|
|
|
else:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
typ = name(T), dataLen = data.len
|
|
|
|
discard
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.get(key, decode).expectDb()
|
2020-06-13 18:57:07 +00:00
|
|
|
|
|
|
|
res
|
2020-04-23 06:27:35 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
|
|
|
|
db.put(key, v.data).expectDb()
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
type GetResult = enum
|
2020-11-24 21:21:47 +00:00
|
|
|
found = "Found"
|
|
|
|
notFound = "Not found"
|
|
|
|
corrupted = "Corrupted"
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getSSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
2020-08-13 09:50:05 +00:00
|
|
|
var status = GetResult.notFound
|
2020-06-25 10:23:10 +00:00
|
|
|
|
|
|
|
# TODO address is needed because there's no way to express lifetimes in nim
|
|
|
|
# we'll use unsafeAddr to find the code later
|
2020-08-13 09:50:05 +00:00
|
|
|
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
|
2020-04-23 06:27:35 +00:00
|
|
|
proc decode(data: openArray[byte]) =
|
2021-05-17 16:37:26 +00:00
|
|
|
status =
|
|
|
|
if decodeSSZ(data, outputPtr[]): GetResult.found
|
|
|
|
else: GetResult.corrupted
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.get(key, decode).expectDb()
|
2021-05-13 16:35:47 +00:00
|
|
|
|
|
|
|
status
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc putSSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
db.put(key, encodeSSZ(v)).expectDb()
|
|
|
|
|
|
|
|
proc getSnappySSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
|
|
|
var status = GetResult.notFound
|
|
|
|
|
|
|
|
# TODO address is needed because there's no way to express lifetimes in nim
|
|
|
|
# we'll use unsafeAddr to find the code later
|
|
|
|
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
|
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
status =
|
|
|
|
if decodeSnappySSZ(data, outputPtr[]): GetResult.found
|
|
|
|
else: GetResult.corrupted
|
|
|
|
|
|
|
|
discard db.get(key, decode).expectDb()
|
|
|
|
|
|
|
|
status
|
|
|
|
|
|
|
|
proc putSnappySSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
db.put(key, encodeSnappySSZ(v)).expectDb()
|
|
|
|
|
|
|
|
proc close*(db: BeaconChainDBV0) =
|
2021-05-27 10:59:42 +00:00
|
|
|
discard db.stateStore.close()
|
|
|
|
discard db.backend.close()
|
2020-09-12 05:35:58 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc close*(db: BeaconchainDB) =
|
|
|
|
if db.db == nil: return
|
|
|
|
|
|
|
|
# Close things in reverse order
|
|
|
|
discard db.summaries.close()
|
|
|
|
discard db.stateDiffs.close()
|
2021-09-30 01:07:24 +00:00
|
|
|
discard db.mergeStatesNoVal.close()
|
2021-06-24 07:11:47 +00:00
|
|
|
discard db.altairStatesNoVal.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.statesNoVal.close()
|
|
|
|
discard db.stateRoots.close()
|
2021-09-30 01:07:24 +00:00
|
|
|
discard db.mergeBlocks.close()
|
2021-06-24 07:11:47 +00:00
|
|
|
discard db.altairBlocks.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.blocks.close()
|
|
|
|
discard db.keyValues.close()
|
2021-06-10 07:37:02 +00:00
|
|
|
db.immutableValidatorsDb.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
db.genesisDeposits.close()
|
|
|
|
db.v0.close()
|
|
|
|
db.db.close()
|
|
|
|
|
|
|
|
db.db = nil
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
func toBeaconBlockSummary(v: SomeSomeBeaconBlock): BeaconBlockSummary =
|
2020-11-03 22:30:43 +00:00
|
|
|
BeaconBlockSummary(
|
|
|
|
slot: v.slot,
|
|
|
|
parent_root: v.parent_root,
|
|
|
|
)
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc putBeaconBlockSummary(
|
|
|
|
db: BeaconChainDB, root: Eth2Digest, value: BeaconBlockSummary) =
|
|
|
|
# Summaries are too simple / small to compress, store them as plain SSZ
|
|
|
|
db.summaries.putSSZ(root.data, value)
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: phase0.TrustedSignedBeaconBlock) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.blocks.putSnappySSZ(value.root.data, value)
|
|
|
|
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
2019-02-18 10:34:39 +00:00
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: altair.TrustedSignedBeaconBlock) =
|
|
|
|
db.altairBlocks.putSnappySSZ(value.root.data, value)
|
|
|
|
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
|
|
|
|
2021-09-27 14:22:58 +00:00
|
|
|
proc putBlock*(db: BeaconChainDB, value: merge.TrustedSignedBeaconBlock) =
|
2021-09-30 01:07:24 +00:00
|
|
|
db.mergeBlocks.putSnappySSZ(value.root.data, value)
|
|
|
|
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
proc updateImmutableValidators*(
|
|
|
|
db: BeaconChainDB, validators: openArray[Validator]) =
|
|
|
|
# Must be called before storing a state that references the new validators
|
|
|
|
let numValidators = validators.len
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
while db.immutableValidators.len() < numValidators:
|
2021-03-15 14:11:51 +00:00
|
|
|
let immutableValidator =
|
2021-06-10 07:37:02 +00:00
|
|
|
getImmutableValidatorData(validators[db.immutableValidators.len()])
|
2021-11-25 18:41:54 +00:00
|
|
|
db.immutableValidatorsDb.add ImmutableValidatorDataDb2(
|
|
|
|
pubkey: immutableValidator.pubkey.toUncompressed(),
|
|
|
|
withdrawal_credentials: immutableValidator.withdrawal_credentials)
|
2021-03-15 14:11:51 +00:00
|
|
|
db.immutableValidators.add immutableValidator
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: phase0.BeaconState):
|
|
|
|
Phase0BeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[Phase0BeaconStateNoImmutableValidators](state)
|
|
|
|
|
|
|
|
template toBeaconStateNoImmutableValidators(state: altair.BeaconState):
|
|
|
|
AltairBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[AltairBeaconStateNoImmutableValidators](state)
|
|
|
|
|
|
|
|
template toBeaconStateNoImmutableValidators(state: merge.BeaconState):
|
|
|
|
MergeBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[MergeBeaconStateNoImmutableValidators](state)
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: phase0.BeaconState) =
|
2021-06-10 07:37:02 +00:00
|
|
|
db.updateImmutableValidators(value.validators.asSeq())
|
2021-05-17 16:37:26 +00:00
|
|
|
db.statesNoVal.putSnappySSZ(
|
2021-11-05 07:34:34 +00:00
|
|
|
key.data, toBeaconStateNoImmutableValidators(value))
|
2019-03-13 22:59:20 +00:00
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: altair.BeaconState) =
|
|
|
|
db.updateImmutableValidators(value.validators.asSeq())
|
|
|
|
db.altairStatesNoVal.putSnappySSZ(
|
2021-11-05 07:34:34 +00:00
|
|
|
key.data, toBeaconStateNoImmutableValidators(value))
|
2021-06-24 07:11:47 +00:00
|
|
|
|
2021-09-30 01:07:24 +00:00
|
|
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: merge.BeaconState) =
|
|
|
|
db.updateImmutableValidators(value.validators.asSeq())
|
|
|
|
db.mergeStatesNoVal.putSnappySSZ(
|
2021-11-05 07:34:34 +00:00
|
|
|
key.data, toBeaconStateNoImmutableValidators(value))
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2021-06-29 15:09:29 +00:00
|
|
|
# For testing rollback
|
|
|
|
proc putCorruptPhase0State*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.statesNoVal.putSnappySSZ(key.data, Validator())
|
|
|
|
|
|
|
|
proc putCorruptAltairState*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.altairStatesNoVal.putSnappySSZ(key.data, Validator())
|
|
|
|
|
2021-09-30 01:07:24 +00:00
|
|
|
proc putCorruptMergeState*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.mergeStatesNoVal.putSnappySSZ(key.data, Validator())
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
ret[0..<8] = toBytesBE(slot.uint64)
|
|
|
|
ret[8..<40] = root.data
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
ret
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
|
|
|
value: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.stateRoots.putRaw(stateRootKey(root, slot), value)
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2021-01-18 20:34:41 +00:00
|
|
|
proc putStateDiff*(db: BeaconChainDB, root: Eth2Digest, value: BeaconStateDiff) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.stateDiffs.putSnappySSZ(root.data, value)
|
2021-01-18 20:34:41 +00:00
|
|
|
|
2019-11-22 14:14:13 +00:00
|
|
|
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.blocks.del(key.data).expectDb()
|
2021-06-24 07:11:47 +00:00
|
|
|
db.altairBlocks.del(key.data).expectDb()
|
2021-09-30 01:07:24 +00:00
|
|
|
db.mergeBlocks.del(key.data).expectDb()
|
2021-05-17 16:37:26 +00:00
|
|
|
db.summaries.del(key.data).expectDb()
|
2019-11-22 14:14:13 +00:00
|
|
|
|
|
|
|
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.statesNoVal.del(key.data).expectDb()
|
2021-06-24 07:11:47 +00:00
|
|
|
db.altairStatesNoVal.del(key.data).expectDb()
|
2021-09-30 01:07:24 +00:00
|
|
|
db.mergeStatesNoVal.del(key.data).expectDb()
|
2019-11-22 14:14:13 +00:00
|
|
|
|
2020-01-22 12:59:54 +00:00
|
|
|
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.stateRoots.del(stateRootKey(root, slot)).expectDb()
|
2020-01-22 12:59:54 +00:00
|
|
|
|
2021-01-18 20:34:41 +00:00
|
|
|
proc delStateDiff*(db: BeaconChainDB, root: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.stateDiffs.del(root.data).expectDb()
|
2021-01-18 20:34:41 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.keyValues.putRaw(subkey(kHeadBlock), key)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.keyValues.putRaw(subkey(kTailBlock), key)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc putGenesisBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.keyValues.putRaw(subkey(kGenesisBlock), key)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
proc putBackfillBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.keyValues.putRaw(subkey(kBackfillBlock), key)
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
proc putEth2FinalizedTo*(db: BeaconChainDB,
|
|
|
|
eth1Checkpoint: DepositContractSnapshot) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getPhase0Block(db: BeaconChainDBV0, key: Eth2Digest): Opt[phase0.TrustedSignedBeaconBlock] =
|
2020-06-25 10:23:10 +00:00
|
|
|
# We only store blocks that we trust in the database
|
2021-06-11 17:51:46 +00:00
|
|
|
result.ok(default(phase0.TrustedSignedBeaconBlock))
|
|
|
|
if db.backend.getSnappySSZ(
|
|
|
|
subkey(phase0.SignedBeaconBlock, key), result.get) != GetResult.found:
|
2021-05-26 07:07:18 +00:00
|
|
|
result.err()
|
2021-05-27 10:59:42 +00:00
|
|
|
else:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getPhase0Block*(db: BeaconChainDB, key: Eth2Digest):
|
2021-06-11 17:51:46 +00:00
|
|
|
Opt[phase0.TrustedSignedBeaconBlock] =
|
2020-11-03 22:30:43 +00:00
|
|
|
# We only store blocks that we trust in the database
|
2021-06-11 17:51:46 +00:00
|
|
|
result.ok(default(phase0.TrustedSignedBeaconBlock))
|
2021-05-17 16:37:26 +00:00
|
|
|
if db.blocks.getSnappySSZ(key.data, result.get) != GetResult.found:
|
2021-11-05 07:34:34 +00:00
|
|
|
result = db.v0.getPhase0Block(key)
|
2021-05-17 16:37:26 +00:00
|
|
|
else:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
2020-11-03 22:30:43 +00:00
|
|
|
|
2021-06-24 07:11:47 +00:00
|
|
|
proc getAltairBlock*(db: BeaconChainDB, key: Eth2Digest):
|
|
|
|
Opt[altair.TrustedSignedBeaconBlock] =
|
|
|
|
# We only store blocks that we trust in the database
|
|
|
|
result.ok(default(altair.TrustedSignedBeaconBlock))
|
|
|
|
if db.altairBlocks.getSnappySSZ(key.data, result.get) == GetResult.found:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
|
|
|
else:
|
|
|
|
result.err()
|
|
|
|
|
2021-09-30 01:07:24 +00:00
|
|
|
proc getMergeBlock*(db: BeaconChainDB, key: Eth2Digest):
|
|
|
|
Opt[merge.TrustedSignedBeaconBlock] =
|
|
|
|
# We only store blocks that we trust in the database
|
|
|
|
result.ok(default(merge.TrustedSignedBeaconBlock))
|
|
|
|
if db.mergeBlocks.getSnappySSZ(key.data, result.get) == GetResult.found:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
|
|
|
else:
|
|
|
|
result.err()
|
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
proc getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators: openArray[ImmutableValidatorData2],
|
2021-11-05 07:34:34 +00:00
|
|
|
store: KvStoreRef, key: openArray[byte], output: var ForkyBeaconState,
|
2021-06-29 15:09:29 +00:00
|
|
|
rollback: RollbackProc): bool =
|
2021-06-24 07:11:47 +00:00
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
case store.getSnappySSZ(key, toBeaconStateNoImmutableValidators(output))
|
2021-09-30 01:07:24 +00:00
|
|
|
of GetResult.found:
|
|
|
|
let numValidators = output.validators.len
|
|
|
|
doAssert immutableValidators.len >= numValidators
|
|
|
|
|
|
|
|
for i in 0 ..< numValidators:
|
|
|
|
let
|
|
|
|
# Bypass hash cache invalidation
|
|
|
|
dstValidator = addr output.validators.data[i]
|
|
|
|
|
|
|
|
assign(
|
|
|
|
dstValidator.pubkey,
|
2021-11-25 18:41:54 +00:00
|
|
|
immutableValidators[i].pubkey.toPubKey())
|
2021-09-30 01:07:24 +00:00
|
|
|
assign(
|
|
|
|
dstValidator.withdrawal_credentials,
|
|
|
|
immutableValidators[i].withdrawal_credentials)
|
|
|
|
|
|
|
|
output.validators.resetCache()
|
|
|
|
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
|
|
|
rollback()
|
|
|
|
false
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getState(
|
|
|
|
db: BeaconChainDBV0,
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators: openArray[ImmutableValidatorData2],
|
2021-06-11 17:51:46 +00:00
|
|
|
key: Eth2Digest, output: var phase0.BeaconState,
|
2021-05-17 16:37:26 +00:00
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
|
|
|
|
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
|
|
|
|
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
|
|
|
|
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
|
|
|
|
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
|
|
|
|
# from `stateStore`. We will try to read the state from all these locations.
|
2021-05-27 10:59:42 +00:00
|
|
|
if getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators, db.stateStore,
|
2021-11-05 07:34:34 +00:00
|
|
|
subkey(Phase0BeaconStateNoImmutableValidators, key), output, rollback):
|
2021-05-17 16:37:26 +00:00
|
|
|
return true
|
|
|
|
if getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators, db.backend,
|
2021-11-05 07:34:34 +00:00
|
|
|
subkey(Phase0BeaconStateNoImmutableValidators, key), output, rollback):
|
2021-05-17 16:37:26 +00:00
|
|
|
return true
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
case db.backend.getSnappySSZ(subkey(phase0.BeaconState, key), output)
|
2021-05-17 16:37:26 +00:00
|
|
|
of GetResult.found:
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
2021-06-29 15:09:29 +00:00
|
|
|
rollback()
|
2021-05-17 16:37:26 +00:00
|
|
|
false
|
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
proc getState*(
|
2021-06-11 17:51:46 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, output: var phase0.BeaconState,
|
2020-04-28 08:08:32 +00:00
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
2020-08-13 09:50:05 +00:00
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
2020-04-28 08:08:32 +00:00
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
2021-05-17 16:37:26 +00:00
|
|
|
if not getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
db.immutableValidators, db.statesNoVal, key.data, output, rollback):
|
|
|
|
db.v0.getState(db.immutableValidators, key, output, rollback)
|
2021-05-17 16:37:26 +00:00
|
|
|
else:
|
2021-05-17 06:34:44 +00:00
|
|
|
true
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getState*(
|
2021-06-24 07:11:47 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, output: var altair.BeaconState,
|
2021-06-29 15:09:29 +00:00
|
|
|
rollback: RollbackProc): bool =
|
2021-06-24 07:11:47 +00:00
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
2021-11-05 07:34:34 +00:00
|
|
|
getStateOnlyMutableValidators(
|
2021-06-24 07:11:47 +00:00
|
|
|
db.immutableValidators, db.altairStatesNoVal, key.data, output, rollback)
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getState*(
|
2021-09-30 01:07:24 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, output: var merge.BeaconState,
|
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
# TODO RVO is inefficient for large objects:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/13879
|
2021-11-05 07:34:34 +00:00
|
|
|
getStateOnlyMutableValidators(
|
2021-09-30 01:07:24 +00:00
|
|
|
db.immutableValidators, db.mergeStatesNoVal, key.data, output, rollback)
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getStateRoot(db: BeaconChainDBV0,
|
2021-05-13 16:35:47 +00:00
|
|
|
root: Eth2Digest,
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
2021-05-27 10:59:42 +00:00
|
|
|
db.backend.getRaw(subkey(root, slot), Eth2Digest)
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getStateRoot*(db: BeaconChainDB,
|
|
|
|
root: Eth2Digest,
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
|
|
|
db.stateRoots.getRaw(stateRootKey(root, slot), Eth2Digest) or
|
|
|
|
db.v0.getStateRoot(root, slot)
|
|
|
|
|
2021-01-18 20:34:41 +00:00
|
|
|
proc getStateDiff*(db: BeaconChainDB,
|
|
|
|
root: Eth2Digest): Opt[BeaconStateDiff] =
|
|
|
|
result.ok(BeaconStateDiff())
|
2021-05-17 16:37:26 +00:00
|
|
|
if db.stateDiffs.getSnappySSZ(root.data, result.get) != GetResult.found:
|
2021-01-18 20:34:41 +00:00
|
|
|
result.err
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getHeadBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
2021-05-27 10:59:42 +00:00
|
|
|
db.backend.getRaw(subkey(kHeadBlock), Eth2Digest)
|
2019-02-21 21:38:26 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kHeadBlock), Eth2Digest) or
|
|
|
|
db.v0.getHeadBlock()
|
|
|
|
|
|
|
|
proc getTailBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
2021-05-27 10:59:42 +00:00
|
|
|
db.backend.getRaw(subkey(kTailBlock), Eth2Digest)
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kTailBlock), Eth2Digest) or
|
|
|
|
db.v0.getTailBlock()
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getGenesisBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
|
|
|
db.backend.getRaw(subkey(kGenesisBlock), Eth2Digest)
|
2021-05-17 16:37:26 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getGenesisBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kGenesisBlock), Eth2Digest) or
|
|
|
|
db.v0.getGenesisBlock()
|
2020-10-12 01:07:20 +00:00
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
proc getBackfillBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kBackfillBlock), Eth2Digest)
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getEth2FinalizedTo(db: BeaconChainDBV0): Opt[DepositContractSnapshot] =
|
2021-05-27 10:59:42 +00:00
|
|
|
result.ok(DepositContractSnapshot())
|
|
|
|
let r = db.backend.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
|
|
|
|
if r != found: result.err()
|
2020-11-24 21:21:47 +00:00
|
|
|
|
|
|
|
proc getEth2FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
|
|
|
|
result.ok(DepositContractSnapshot())
|
2021-05-17 16:37:26 +00:00
|
|
|
let r = db.keyValues.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
|
|
|
|
if r != found: return db.v0.getEth2FinalizedTo()
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
2021-06-11 17:51:46 +00:00
|
|
|
db.backend.contains(subkey(phase0.SignedBeaconBlock, key)).expectDb()
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
proc containsBlockPhase0*(db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
db.blocks.contains(key.data).expectDb() or
|
2021-06-24 07:11:47 +00:00
|
|
|
db.v0.containsBlock(key)
|
2021-05-17 06:34:44 +00:00
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
proc containsBlockAltair*(db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
db.altairBlocks.contains(key.data).expectDb()
|
|
|
|
|
2021-09-30 01:07:24 +00:00
|
|
|
proc containsBlockMerge*(db: BeaconChainDB, key: Eth2Digest): bool =
|
|
|
|
db.mergeBlocks.contains(key.data).expectDb()
|
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
|
2021-09-30 01:07:24 +00:00
|
|
|
db.containsBlockMerge(key) or db.containsBlockAltair(key) or
|
|
|
|
db.containsBlockPhase0(key)
|
2021-09-08 03:46:33 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
2021-11-05 07:34:34 +00:00
|
|
|
let sk = subkey(Phase0BeaconStateNoImmutableValidators, key)
|
2021-05-27 10:59:42 +00:00
|
|
|
db.stateStore.contains(sk).expectDb() or
|
|
|
|
db.backend.contains(sk).expectDb() or
|
2021-06-11 17:51:46 +00:00
|
|
|
db.backend.contains(subkey(phase0.BeaconState, key)).expectDb()
|
2021-05-17 06:34:44 +00:00
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
proc containsState*(db: BeaconChainDB, key: Eth2Digest, legacy: bool = true): bool =
|
2021-09-30 01:07:24 +00:00
|
|
|
db.mergeStatesNoVal.contains(key.data).expectDb or
|
2021-06-24 07:11:47 +00:00
|
|
|
db.altairStatesNoVal.contains(key.data).expectDb or
|
2021-05-30 08:14:17 +00:00
|
|
|
db.statesNoVal.contains(key.data).expectDb or
|
|
|
|
(legacy and db.v0.containsState(key))
|
2021-04-20 12:17:11 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
2021-06-11 17:51:46 +00:00
|
|
|
phase0.TrustedSignedBeaconBlock =
|
2019-02-21 17:20:50 +00:00
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
## oldest block last (blck will be at result[0]).
|
|
|
|
##
|
2019-02-28 21:21:29 +00:00
|
|
|
## The search will go on until the ancestor cannot be found.
|
2019-02-21 17:20:50 +00:00
|
|
|
|
2020-09-04 06:35:10 +00:00
|
|
|
var
|
2021-06-11 17:51:46 +00:00
|
|
|
res: phase0.TrustedSignedBeaconBlock
|
2020-09-04 06:35:10 +00:00
|
|
|
root = root
|
2021-05-17 16:37:26 +00:00
|
|
|
while db.blocks.getSnappySSZ(root.data, res) == GetResult.found or
|
2021-05-27 10:59:42 +00:00
|
|
|
db.v0.backend.getSnappySSZ(
|
2021-06-11 17:51:46 +00:00
|
|
|
subkey(phase0.SignedBeaconBlock, root), res) == GetResult.found:
|
2020-09-04 06:35:10 +00:00
|
|
|
res.root = root
|
2020-06-25 10:23:10 +00:00
|
|
|
yield res
|
2020-09-04 06:35:10 +00:00
|
|
|
root = res.message.parent_root
|
2020-11-03 22:30:43 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc loadSummaries(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
|
|
|
|
# Load summaries into table - there's no telling what order they're in so we
|
|
|
|
# load them all - bugs in nim prevent this code from living in the iterator.
|
|
|
|
var summaries = initTable[Eth2Digest, BeaconBlockSummary](1024*1024)
|
|
|
|
|
|
|
|
discard db.summaries.find([], proc(k, v: openArray[byte]) =
|
|
|
|
var output: BeaconBlockSummary
|
|
|
|
|
|
|
|
if k.len() == sizeof(Eth2Digest) and decodeSSz(v, output):
|
|
|
|
summaries[Eth2Digest(data: toArray(sizeof(Eth2Digest), k))] = output
|
|
|
|
else:
|
|
|
|
warn "Invalid summary in database", klen = k.len(), vlen = v.len()
|
|
|
|
)
|
|
|
|
|
|
|
|
summaries
|
|
|
|
|
2021-06-04 10:26:41 +00:00
|
|
|
type RootedSummary = tuple[root: Eth2Digest, summary: BeaconBlockSummary]
|
2020-11-03 22:30:43 +00:00
|
|
|
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
2021-06-04 10:26:41 +00:00
|
|
|
RootedSummary =
|
2020-11-03 22:30:43 +00:00
|
|
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
|
|
|
## oldest block last (blck will be at result[0]).
|
|
|
|
##
|
|
|
|
## The search will go on until the ancestor cannot be found.
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
# Summaries are loaded from the dedicated summaries table. For backwards
|
|
|
|
# compatibility, we also load from `kvstore` and finally, if no summaries
|
|
|
|
# can be found, by loading the blocks instead.
|
|
|
|
|
|
|
|
# First, load the full summary table into memory in one query - this makes
|
|
|
|
# initial startup very fast.
|
2020-11-03 22:30:43 +00:00
|
|
|
var
|
2021-05-17 16:37:26 +00:00
|
|
|
summaries = db.loadSummaries()
|
2021-06-04 10:26:41 +00:00
|
|
|
res: RootedSummary
|
2021-06-11 17:51:46 +00:00
|
|
|
blck: phase0.TrustedSignedBeaconBlock
|
2021-06-04 10:26:41 +00:00
|
|
|
newSummaries: seq[RootedSummary]
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
res.root = root
|
|
|
|
|
2021-06-04 10:26:41 +00:00
|
|
|
defer: # in case iteration is stopped along the way
|
|
|
|
# Write the newly found summaries in a single transaction - on first migration
|
|
|
|
# from the old format, this brings down the write from minutes to seconds
|
|
|
|
if newSummaries.len() > 0:
|
|
|
|
db.db.exec("BEGIN TRANSACTION;").expectDb()
|
|
|
|
for s in newSummaries:
|
|
|
|
db.putBeaconBlockSummary(s.root, s.summary)
|
|
|
|
db.db.exec("COMMIT;").expectDb()
|
|
|
|
|
|
|
|
if false:
|
|
|
|
# When the current version has been online for a bit, we can safely remove
|
|
|
|
# summaries from kvstore by enabling this little snippet - if users were
|
|
|
|
# to downgrade after the summaries have been purged, the old versions that
|
|
|
|
# use summaries can also recreate them on the fly from blocks.
|
|
|
|
db.db.exec(
|
|
|
|
"DELETE FROM kvstore WHERE key >= ? and key < ?",
|
|
|
|
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
# Yield summaries in reverse chain order by walking the parent references.
|
|
|
|
# If a summary is missing, try loading it from the older version or create one
|
|
|
|
# from block data.
|
2020-11-03 22:30:43 +00:00
|
|
|
while true:
|
2021-05-17 16:37:26 +00:00
|
|
|
summaries.withValue(res.root, summary) do:
|
|
|
|
res.summary = summary[]
|
2021-05-17 06:34:44 +00:00
|
|
|
yield res
|
2021-05-17 16:37:26 +00:00
|
|
|
do: # Summary was not found in summary table, look elsewhere
|
2021-05-27 10:59:42 +00:00
|
|
|
if db.v0.backend.getSnappySSZ(subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found:
|
2021-05-17 16:37:26 +00:00
|
|
|
yield res
|
2021-06-11 17:51:46 +00:00
|
|
|
elif db.v0.backend.getSnappySSZ(
|
|
|
|
subkey(phase0.SignedBeaconBlock, res.root), blck) == GetResult.found:
|
2021-05-17 16:37:26 +00:00
|
|
|
res.summary = blck.message.toBeaconBlockSummary()
|
|
|
|
yield res
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
# Next time, load them from the right place
|
2021-06-04 10:26:41 +00:00
|
|
|
newSummaries.add(res)
|
2021-05-17 16:37:26 +00:00
|
|
|
|
|
|
|
res.root = res.summary.parent_root
|
2021-08-05 08:26:10 +00:00
|
|
|
|
|
|
|
# Test operations used to create broken and/or legacy database
|
|
|
|
|
2021-08-09 11:14:28 +00:00
|
|
|
proc putStateV0*(db: BeaconChainDB, key: Eth2Digest, value: phase0.BeaconState) =
|
2021-08-05 08:26:10 +00:00
|
|
|
# Writes to KVStore, as done in 1.0.12 and earlier
|
|
|
|
db.v0.backend.putSnappySSZ(subkey(type value, key), value)
|
|
|
|
|
2021-08-09 11:14:28 +00:00
|
|
|
proc putBlockV0*(db: BeaconChainDB, value: phase0.TrustedSignedBeaconBlock) =
|
2021-08-05 08:26:10 +00:00
|
|
|
# Write to KVStore, as done in 1.0.12 and earlier
|
|
|
|
# In particular, no summary is written here - it should be recreated
|
|
|
|
# automatically
|
2021-08-09 11:14:28 +00:00
|
|
|
db.v0.backend.putSnappySSZ(subkey(phase0.SignedBeaconBlock, value.root), value)
|