2021-03-15 14:11:51 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2021-03-15 14:11:51 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2020-04-23 06:27:35 +00:00
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2022-01-17 12:07:49 +00:00
|
|
|
std/[typetraits, tables],
|
2024-01-16 22:37:14 +00:00
|
|
|
results,
|
|
|
|
stew/[arrayops, assign2, byteutils, endians2, io2, objects],
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
serialization, chronicles, snappy,
|
2020-10-12 01:07:20 +00:00
|
|
|
eth/db/[kvstore, kvstore_sqlite3],
|
2021-03-15 14:11:51 +00:00
|
|
|
./networking/network_metadata, ./beacon_chain_db_immutable,
|
2022-12-07 10:24:51 +00:00
|
|
|
./spec/[deposit_snapshots,
|
|
|
|
eth2_ssz_serialization,
|
|
|
|
eth2_merkleization,
|
|
|
|
forks,
|
2023-01-09 18:42:10 +00:00
|
|
|
presets,
|
2022-12-07 10:24:51 +00:00
|
|
|
state_transition],
|
2022-07-04 20:46:32 +00:00
|
|
|
"."/[beacon_chain_db_light_client, filepath]
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2022-11-02 16:23:30 +00:00
|
|
|
from ./spec/datatypes/capella import BeaconState
|
2023-03-10 12:51:36 +00:00
|
|
|
from ./spec/datatypes/deneb import TrustedSignedBeaconBlock
|
2022-11-02 16:23:30 +00:00
|
|
|
|
2021-09-27 09:24:58 +00:00
|
|
|
export
|
|
|
|
phase0, altair, eth2_ssz_serialization, eth2_merkleization, kvstore,
|
2024-01-17 14:26:16 +00:00
|
|
|
kvstore_sqlite3, deposit_snapshots
|
2021-06-10 07:37:02 +00:00
|
|
|
|
2021-05-26 07:07:18 +00:00
|
|
|
logScope: topics = "bc_db"
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
type
|
2020-10-12 01:07:20 +00:00
|
|
|
DbSeq*[T] = object
|
2020-10-28 18:35:31 +00:00
|
|
|
insertStmt: SqliteStmt[openArray[byte], void]
|
2020-11-21 17:53:40 +00:00
|
|
|
selectStmt: SqliteStmt[int64, openArray[byte]]
|
2020-10-13 19:16:54 +00:00
|
|
|
recordCount: int64
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
FinalizedBlocks* = object
|
|
|
|
# A sparse version of DbSeq - can have holes but not duplicate entries
|
|
|
|
insertStmt: SqliteStmt[(int64, array[32, byte]), void]
|
|
|
|
selectStmt: SqliteStmt[int64, array[32, byte]]
|
|
|
|
selectAllStmt: SqliteStmt[NoParams, (int64, array[32, byte])]
|
|
|
|
|
|
|
|
low*: Opt[Slot]
|
|
|
|
high*: Opt[Slot]
|
|
|
|
|
2020-10-15 11:49:02 +00:00
|
|
|
DepositsSeq = DbSeq[DepositData]
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
BeaconChainDBV0* = ref object
|
|
|
|
## BeaconChainDBV0 based on old kvstore table that sets the WITHOUT ROWID
|
|
|
|
## option which becomes unbearably slow with large blobs. It is used as a
|
|
|
|
## read-only store to support old versions - by freezing it at its current
|
|
|
|
## data set, downgrading remains possible since it's no longer touched -
|
|
|
|
## anyone downgrading will have to sync up whatever they missed.
|
|
|
|
##
|
|
|
|
## Newer versions read from the new tables first - if the data is not found,
|
|
|
|
## they turn to the old tables for reading. Writing is done only to the new
|
|
|
|
## tables.
|
|
|
|
##
|
|
|
|
## V0 stored most data in a single table, prefixing each key with a tag
|
|
|
|
## identifying the type of data.
|
|
|
|
##
|
|
|
|
## 1.1 introduced BeaconStateNoImmutableValidators storage where immutable
|
|
|
|
## validator data is stored in a separate table and only a partial
|
|
|
|
## BeaconState is written to kvstore
|
|
|
|
##
|
|
|
|
## 1.2 moved BeaconStateNoImmutableValidators to a separate table to
|
|
|
|
## alleviate some of the btree balancing issues - this doubled the speed but
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
## was still slow
|
2021-05-26 07:07:18 +00:00
|
|
|
##
|
|
|
|
## 1.3 creates `kvstore` with rowid, making it quite fast, but doesn't do
|
|
|
|
## anything about existing databases. Versions after that use a separate
|
|
|
|
## file instead (V1)
|
2022-03-29 11:33:06 +00:00
|
|
|
##
|
|
|
|
## Starting with bellatrix, we store blocks and states using snappy framed
|
|
|
|
## encoding so as to match the `Req`/`Resp` protocols and era files ("SZ").
|
2021-05-17 16:37:26 +00:00
|
|
|
backend: KvStoreRef # kvstore
|
|
|
|
stateStore: KvStoreRef # state_no_validators
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
BeaconChainDB* = ref object
|
2019-03-13 22:59:20 +00:00
|
|
|
## Database storing resolved blocks and states - resolved blocks are such
|
|
|
|
## blocks that form a chain back to the tail block.
|
2020-04-23 06:27:35 +00:00
|
|
|
##
|
|
|
|
## We assume that the database backend is working / not corrupt - as such,
|
|
|
|
## we will raise a Defect any time there is an issue. This should be
|
|
|
|
## revisited in the future, when/if the calling code safely can handle
|
|
|
|
## corruption of this kind.
|
|
|
|
##
|
2022-01-25 08:28:26 +00:00
|
|
|
## The database follows an "mostly-consistent" model where it's possible
|
|
|
|
## that some data has been lost to crashes and restarts - for example,
|
|
|
|
## the state root table might contain entries that don't lead to a state
|
|
|
|
## etc - this makes it easier to defer certain operations such as pruning
|
|
|
|
## and cleanup, but also means that some amount of "junk" is left behind
|
|
|
|
## when the application is restarted or crashes in the wrong moment.
|
|
|
|
##
|
|
|
|
## Generally, sqlite performs a commit at the end of every write, meaning
|
|
|
|
## that data write order is respected - the strategy thus becomes to write
|
|
|
|
## bulk data first, then update pointers like the `head root` entry.
|
2021-12-29 02:50:49 +00:00
|
|
|
db*: SqStoreRef
|
2021-05-17 16:37:26 +00:00
|
|
|
|
|
|
|
v0: BeaconChainDBV0
|
2020-11-24 21:21:47 +00:00
|
|
|
genesisDeposits*: DepositsSeq
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
# immutableValidatorsDb only stores the total count; it's a proxy for SQL
|
2022-02-16 15:44:04 +00:00
|
|
|
# queries. (v1.4.0+)
|
2021-11-25 18:41:54 +00:00
|
|
|
immutableValidatorsDb*: DbSeq[ImmutableValidatorDataDb2]
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators*: seq[ImmutableValidatorData2]
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2023-08-25 09:29:07 +00:00
|
|
|
checkpoint*: proc() {.gcsafe, raises: [].}
|
2020-10-26 08:55:10 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
|
2023-01-28 19:53:41 +00:00
|
|
|
blocks: array[ConsensusFork, KvStoreRef] # BlockRoot -> TrustedSignedBeaconBlock
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2023-02-27 14:02:37 +00:00
|
|
|
blobs: KvStoreRef # (BlockRoot -> BlobSidecar)
|
2023-01-09 18:42:10 +00:00
|
|
|
|
2024-11-19 05:53:13 +00:00
|
|
|
columns: KvStoreRef # (BlockRoot -> DataColumnSidecar)
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
statesNoVal: array[ConsensusFork, KvStoreRef] # StateRoot -> ForkBeaconStateNoImmutableValidators
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
stateDiffs: KvStoreRef ##\
|
|
|
|
## StateRoot -> BeaconStateDiff
|
|
|
|
## Instead of storing full BeaconStates, one can store only the diff from
|
|
|
|
## a different state. As 75% of a typical BeaconState's serialized form's
|
|
|
|
## the validators, which are mostly immutable and append-only, just using
|
|
|
|
## a simple append-diff representation helps significantly. Various roots
|
|
|
|
## are stored in a mod-increment pattern across fixed-sized arrays, which
|
|
|
|
## addresses most of the rest of the BeaconState sizes.
|
2021-04-06 18:56:45 +00:00
|
|
|
|
2022-02-26 18:16:19 +00:00
|
|
|
summaries: KvStoreRef
|
|
|
|
## BlockRoot -> BeaconBlockSummary - permits looking up basic block
|
|
|
|
## information via block root - contains only summaries that were valid
|
|
|
|
## at some point in history - it is however possible that entries exist
|
|
|
|
## that are no longer part of the finalized chain history, thus the
|
|
|
|
## cache should not be used to answer fork choice questions - see
|
|
|
|
## `getHeadBlock` and `finalizedBlocks` instead.
|
|
|
|
##
|
|
|
|
## May contain entries for blocks that are not stored in the database.
|
|
|
|
##
|
|
|
|
## See `finalizedBlocks` for an index in the other direction.
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
finalizedBlocks*: FinalizedBlocks
|
2022-02-16 15:44:04 +00:00
|
|
|
## Blocks that are known to be finalized, per the latest head (v1.7.0+)
|
2022-02-26 18:16:19 +00:00
|
|
|
## Only blocks that have passed verification, either via state transition
|
|
|
|
## or backfilling are indexed here - thus, similar to `head`, it is part
|
|
|
|
## of the inner security ring and is used to answer security questions
|
|
|
|
## in the chaindag.
|
|
|
|
##
|
|
|
|
## May contain entries for blocks that are not stored in the database.
|
|
|
|
##
|
|
|
|
## See `summaries` for an index in the other direction.
|
2022-01-30 16:51:04 +00:00
|
|
|
|
2022-07-04 20:46:32 +00:00
|
|
|
lcData: LightClientDataDB
|
|
|
|
## Persistent light client data to avoid expensive recomputations
|
|
|
|
|
2022-12-07 10:24:51 +00:00
|
|
|
DbKeyKind* = enum
|
|
|
|
# BEWARE. You should never remove entries from this enum.
|
|
|
|
# Only new items should be added to its end.
|
2019-02-21 04:42:17 +00:00
|
|
|
kHashToState
|
2019-01-14 12:19:44 +00:00
|
|
|
kHashToBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
kHeadBlock
|
|
|
|
## Pointer to the most recent block selected by the fork choice
|
|
|
|
kTailBlock
|
2022-12-07 10:24:51 +00:00
|
|
|
## Pointer to the earliest finalized block - this is the genesis
|
|
|
|
## block when the chain starts, but might advance as the database
|
|
|
|
## gets pruned
|
|
|
|
## TODO: determine how aggressively the database should be pruned.
|
|
|
|
## For a healthy network sync, we probably need to store blocks
|
|
|
|
## at least past the weak subjectivity period.
|
2020-09-22 20:42:42 +00:00
|
|
|
kBlockSlotStateRoot
|
|
|
|
## BlockSlot -> state_root mapping
|
2021-11-05 07:34:34 +00:00
|
|
|
kGenesisBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
## Immutable reference to the network genesis state
|
|
|
|
## (needed for satisfying requests to the beacon node API).
|
2021-05-17 16:37:26 +00:00
|
|
|
kEth1PersistedTo # Obsolete
|
|
|
|
kDepositsFinalizedByEth1 # Obsolete
|
2022-12-07 10:24:51 +00:00
|
|
|
kOldDepositContractSnapshot
|
|
|
|
## Deprecated:
|
|
|
|
## This was the merkleizer checkpoint produced by processing the
|
2024-03-07 17:42:52 +00:00
|
|
|
## finalized deposits (similar to kDepositContractSnapshot, but before
|
2022-12-07 10:24:51 +00:00
|
|
|
## the EIP-4881 support was introduced). Currently, we read from
|
|
|
|
## it during upgrades and we keep writing data to it as a measure
|
|
|
|
## allowing the users to downgrade to a previous version of Nimbus.
|
2021-05-17 16:37:26 +00:00
|
|
|
kHashToBlockSummary # Block summaries for fast startup
|
2020-11-24 21:21:47 +00:00
|
|
|
kSpeculativeDeposits
|
2022-12-07 10:24:51 +00:00
|
|
|
## Obsolete:
|
|
|
|
## This was a merkelizer checkpoint created on the basis of deposit
|
|
|
|
## events that we were not able to verify against a `deposit_root`
|
|
|
|
## served by the web3 provider. This was happening on Geth nodes
|
|
|
|
## that serve only recent contract state data (i.e. only recent
|
|
|
|
## `deposit_roots`).
|
2021-05-17 16:37:26 +00:00
|
|
|
kHashToStateDiff # Obsolete
|
2021-03-15 14:11:51 +00:00
|
|
|
kHashToStateOnlyMutableValidators
|
2021-12-21 10:40:14 +00:00
|
|
|
kBackfillBlock # Obsolete, was in `unstable` for a while, but never released
|
2024-03-07 17:42:52 +00:00
|
|
|
kDepositContractSnapshot
|
|
|
|
## Deposit contract state snapshot derived from EIP-4881 data.
|
|
|
|
## This key also stores intermediate hashes that are no longer used
|
|
|
|
## for future deposits, beyond the `finalized` branch from EIP-4881.
|
|
|
|
## Those extra hashes may be set to ZERO_HASH when importing from a
|
|
|
|
## compressed EIP-4881 `DepositTreeSnapshot`.
|
2020-11-03 22:30:43 +00:00
|
|
|
|
|
|
|
BeaconBlockSummary* = object
|
2021-05-17 16:37:26 +00:00
|
|
|
## Cache of beacon block summaries - during startup when we construct the
|
|
|
|
## chain dag, loading full blocks takes a lot of time - the block
|
|
|
|
## summary contains a minimal snapshot of what's needed to instanciate
|
|
|
|
## the BlockRef tree.
|
2020-11-03 22:30:43 +00:00
|
|
|
slot*: Slot
|
|
|
|
parent_root*: Eth2Digest
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
func shortLog*(v: BeaconBlockSummary): auto =
|
|
|
|
(v.slot, shortLog(v.parent_root))
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
# Subkeys essentially create "tables" within the key-value store by prefixing
|
|
|
|
# each entry with a table id
|
|
|
|
|
2019-02-21 04:42:17 +00:00
|
|
|
func subkey(kind: DbKeyKind): array[1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2019-03-04 11:50:26 +00:00
|
|
|
func subkey[N: static int](kind: DbKeyKind, key: array[N, byte]):
|
|
|
|
array[N + 1, byte] =
|
|
|
|
result[0] = byte ord(kind)
|
|
|
|
result[1 .. ^1] = key
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
func subkey(kind: type phase0.BeaconState, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToState, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
func subkey(
|
2021-11-05 07:34:34 +00:00
|
|
|
kind: type Phase0BeaconStateNoImmutableValidators, key: Eth2Digest): auto =
|
2021-03-15 14:11:51 +00:00
|
|
|
subkey(kHashToStateOnlyMutableValidators, key.data)
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
func subkey(kind: type phase0.SignedBeaconBlock, key: Eth2Digest): auto =
|
2019-02-21 04:42:17 +00:00
|
|
|
subkey(kHashToBlock, key.data)
|
2019-01-25 14:17:35 +00:00
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
|
|
|
subkey(kHashToBlockSummary, key.data)
|
|
|
|
|
2020-04-23 06:27:35 +00:00
|
|
|
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
ret[0..<8] = toBytesBE(slot.uint64)
|
|
|
|
# .. but 7 bytes should be enough for slots - in return, we get a nicely
|
|
|
|
# rounded key length
|
2019-03-28 06:10:48 +00:00
|
|
|
ret[0] = byte ord(kBlockSlotStateRoot)
|
2020-04-23 06:27:35 +00:00
|
|
|
ret[8..<40] = root.data
|
2019-03-28 06:10:48 +00:00
|
|
|
|
|
|
|
ret
|
|
|
|
|
2023-02-27 14:02:37 +00:00
|
|
|
func blobkey(root: Eth2Digest, index: BlobIndex) : array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
ret[0..<8] = toBytes(index)
|
|
|
|
ret[8..<40] = root.data
|
|
|
|
|
2023-05-19 16:51:13 +00:00
|
|
|
ret
|
|
|
|
|
2024-11-19 05:53:13 +00:00
|
|
|
func columnkey(root: Eth2Digest, index: ColumnIndex) : array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
ret[0..<8] = toBytes(index)
|
|
|
|
ret[8..<40] = root.data
|
|
|
|
|
|
|
|
ret
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
template expectDb(x: auto): untyped =
|
|
|
|
# There's no meaningful error handling implemented for a corrupt database or
|
|
|
|
# full disk - this requires manual intervention, so we'll panic for now
|
|
|
|
x.expect("working database (disk broken/full?)")
|
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc init*[T](
|
|
|
|
Seq: type DbSeq[T], db: SqStoreRef, name: string,
|
|
|
|
readOnly = false): KvResult[Seq] =
|
|
|
|
let hasTable = if db.readOnly or readOnly:
|
|
|
|
? db.hasTable(name)
|
|
|
|
else:
|
|
|
|
? db.exec("""
|
|
|
|
CREATE TABLE IF NOT EXISTS '""" & name & """'(
|
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
value BLOB
|
|
|
|
);
|
|
|
|
""")
|
|
|
|
true
|
|
|
|
if hasTable:
|
|
|
|
let
|
|
|
|
insertStmt = db.prepareStmt(
|
|
|
|
"INSERT INTO '" & name & "'(value) VALUES (?);",
|
|
|
|
openArray[byte], void, managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
selectStmt = db.prepareStmt(
|
|
|
|
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
|
|
|
int64, openArray[byte], managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
countStmt = db.prepareStmt(
|
|
|
|
"SELECT COUNT(1) FROM '" & name & "';",
|
|
|
|
NoParams, int64, managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
var recordCount = int64 0
|
|
|
|
let countQueryRes = countStmt.exec do (res: int64):
|
|
|
|
recordCount = res
|
|
|
|
|
|
|
|
let found = ? countQueryRes
|
|
|
|
if not found:
|
|
|
|
return err("Cannot count existing items")
|
|
|
|
countStmt.dispose()
|
|
|
|
|
|
|
|
ok(Seq(insertStmt: insertStmt,
|
|
|
|
selectStmt: selectStmt,
|
|
|
|
recordCount: recordCount))
|
|
|
|
else:
|
|
|
|
ok(Seq())
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc close*(s: var DbSeq) =
|
2021-05-17 16:37:26 +00:00
|
|
|
s.insertStmt.dispose()
|
|
|
|
s.selectStmt.dispose()
|
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
reset(s)
|
|
|
|
|
2020-10-12 01:07:20 +00:00
|
|
|
proc add*[T](s: var DbSeq[T], val: T) =
|
2022-11-28 23:21:58 +00:00
|
|
|
doAssert(distinctBase(s.insertStmt) != nil, "database closed or table not preset")
|
2022-12-08 16:21:53 +00:00
|
|
|
let bytes = SSZ.encode(val)
|
2021-05-17 16:37:26 +00:00
|
|
|
s.insertStmt.exec(bytes).expectDb()
|
2020-10-14 14:04:08 +00:00
|
|
|
inc s.recordCount
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-03-10 18:01:43 +00:00
|
|
|
template len*[T](s: DbSeq[T]): int64 =
|
|
|
|
s.recordCount
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-03-10 18:01:43 +00:00
|
|
|
proc get*[T](s: DbSeq[T], idx: int64): T =
|
2020-10-13 19:16:54 +00:00
|
|
|
# This is used only locally
|
2022-11-28 23:21:58 +00:00
|
|
|
doAssert(distinctBase(s.selectStmt) != nil, $T & " table not present for read at " & $(idx))
|
|
|
|
|
2020-10-13 19:16:54 +00:00
|
|
|
let resultAddr = addr result
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-03-10 18:01:43 +00:00
|
|
|
let queryRes = s.selectStmt.exec(idx + 1) do (recordBytes: openArray[byte]):
|
2020-10-13 19:16:54 +00:00
|
|
|
try:
|
|
|
|
resultAddr[] = decode(SSZ, recordBytes, T)
|
2022-09-27 16:56:08 +00:00
|
|
|
except SerializationError as exc:
|
|
|
|
raiseAssert "cannot decode " & $T & " at index " & $idx & ": " & exc.msg
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
let found = queryRes.expectDb()
|
2022-09-27 16:56:08 +00:00
|
|
|
if not found:
|
|
|
|
raiseAssert $T & " not found at index " & $(idx)
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2022-02-22 12:14:17 +00:00
|
|
|
proc init*(T: type FinalizedBlocks, db: SqStoreRef, name: string,
|
|
|
|
readOnly = false): KvResult[T] =
|
2022-11-28 23:21:58 +00:00
|
|
|
let hasTable = if db.readOnly or readOnly:
|
|
|
|
? db.hasTable(name)
|
|
|
|
else:
|
2022-02-22 12:14:17 +00:00
|
|
|
? db.exec("""
|
2022-11-28 23:21:58 +00:00
|
|
|
CREATE TABLE IF NOT EXISTS '""" & name & """'(
|
2022-02-22 12:14:17 +00:00
|
|
|
id INTEGER PRIMARY KEY,
|
|
|
|
value BLOB NOT NULL
|
2022-11-28 23:21:58 +00:00
|
|
|
);""")
|
|
|
|
true
|
2022-01-30 16:51:04 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
if hasTable:
|
|
|
|
let
|
|
|
|
insertStmt = db.prepareStmt(
|
|
|
|
"REPLACE INTO '" & name & "'(id, value) VALUES (?, ?);",
|
|
|
|
(int64, array[32, byte]), void, managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
selectStmt = db.prepareStmt(
|
|
|
|
"SELECT value FROM '" & name & "' WHERE id = ?;",
|
|
|
|
int64, array[32, byte], managed = false).expect("this is a valid statement")
|
|
|
|
selectAllStmt = db.prepareStmt(
|
|
|
|
"SELECT id, value FROM '" & name & "' ORDER BY id;",
|
|
|
|
NoParams, (int64, array[32, byte]), managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
maxIdStmt = db.prepareStmt(
|
|
|
|
"SELECT MAX(id) FROM '" & name & "';",
|
|
|
|
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
minIdStmt = db.prepareStmt(
|
|
|
|
"SELECT MIN(id) FROM '" & name & "';",
|
|
|
|
NoParams, Option[int64], managed = false).expect("this is a valid statement")
|
|
|
|
|
|
|
|
var
|
|
|
|
low, high: Opt[Slot]
|
|
|
|
tmp: Option[int64]
|
|
|
|
|
|
|
|
for rowRes in minIdStmt.exec(tmp):
|
|
|
|
expectDb rowRes
|
|
|
|
if tmp.isSome():
|
|
|
|
low.ok(Slot(tmp.get()))
|
|
|
|
|
|
|
|
for rowRes in maxIdStmt.exec(tmp):
|
|
|
|
expectDb rowRes
|
|
|
|
if tmp.isSome():
|
|
|
|
high.ok(Slot(tmp.get()))
|
|
|
|
|
|
|
|
maxIdStmt.dispose()
|
|
|
|
minIdStmt.dispose()
|
|
|
|
|
|
|
|
ok(T(insertStmt: insertStmt,
|
|
|
|
selectStmt: selectStmt,
|
|
|
|
selectAllStmt: selectAllStmt,
|
|
|
|
low: low,
|
|
|
|
high: high))
|
|
|
|
else:
|
|
|
|
ok(T())
|
2022-01-30 16:51:04 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc close*(s: var FinalizedBlocks) =
|
2022-01-30 16:51:04 +00:00
|
|
|
s.insertStmt.dispose()
|
|
|
|
s.selectStmt.dispose()
|
|
|
|
s.selectAllStmt.dispose()
|
2022-11-28 23:21:58 +00:00
|
|
|
reset(s)
|
2022-01-30 16:51:04 +00:00
|
|
|
|
|
|
|
proc insert*(s: var FinalizedBlocks, slot: Slot, val: Eth2Digest) =
|
|
|
|
doAssert slot.uint64 < int64.high.uint64, "Only reasonable slots supported"
|
2022-11-28 23:21:58 +00:00
|
|
|
doAssert(distinctBase(s.insertStmt) != nil, "database closed or table not present")
|
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
s.insertStmt.exec((slot.int64, val.data)).expectDb()
|
|
|
|
s.low.ok(min(slot, s.low.get(slot)))
|
|
|
|
s.high.ok(max(slot, s.high.get(slot)))
|
|
|
|
|
|
|
|
proc get*(s: FinalizedBlocks, idx: Slot): Opt[Eth2Digest] =
|
2022-11-28 23:21:58 +00:00
|
|
|
if distinctBase(s.selectStmt) == nil: return Opt.none(Eth2Digest)
|
2022-01-30 16:51:04 +00:00
|
|
|
var row: s.selectStmt.Result
|
|
|
|
for rowRes in s.selectStmt.exec(int64(idx), row):
|
|
|
|
expectDb rowRes
|
|
|
|
return ok(Eth2Digest(data: row))
|
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
return Opt.none(Eth2Digest)
|
2022-01-30 16:51:04 +00:00
|
|
|
|
|
|
|
iterator pairs*(s: FinalizedBlocks): (Slot, Eth2Digest) =
|
2022-11-28 23:21:58 +00:00
|
|
|
if distinctBase(s.selectAllStmt) != nil:
|
|
|
|
var row: s.selectAllStmt.Result
|
|
|
|
for rowRes in s.selectAllStmt.exec(row):
|
|
|
|
expectDb rowRes
|
|
|
|
yield (Slot(row[0]), Eth2Digest(data: row[1]))
|
2022-01-30 16:51:04 +00:00
|
|
|
|
2021-11-25 18:41:54 +00:00
|
|
|
proc loadImmutableValidators(vals: DbSeq[ImmutableValidatorDataDb2]): seq[ImmutableValidatorData2] =
|
|
|
|
result = newSeqOfCap[ImmutableValidatorData2](vals.len())
|
2021-05-27 10:59:42 +00:00
|
|
|
for i in 0 ..< vals.len:
|
2021-11-25 18:41:54 +00:00
|
|
|
let tmp = vals.get(i)
|
|
|
|
result.add ImmutableValidatorData2(
|
|
|
|
pubkey: tmp.pubkey.loadValid(),
|
|
|
|
withdrawal_credentials: tmp.withdrawal_credentials)
|
2021-06-10 07:37:02 +00:00
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
template withManyWrites*(dbParam: BeaconChainDB, body: untyped) =
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
let
|
|
|
|
db = dbParam
|
|
|
|
nested = isInsideTransaction(db.db)
|
|
|
|
|
2022-01-25 08:28:26 +00:00
|
|
|
# We don't enforce strong ordering or atomicity requirements in the beacon
|
|
|
|
# chain db in general, relying instead on readers to be able to deal with
|
|
|
|
# minor inconsistencies - however, putting writes in a transaction is orders
|
|
|
|
# of magnitude faster when doing many small writes, so we use this as an
|
|
|
|
# optimization technique and the templace is named accordingly.
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
if not nested:
|
|
|
|
expectDb db.db.exec("BEGIN TRANSACTION;")
|
2022-01-25 08:28:26 +00:00
|
|
|
var commit = false
|
|
|
|
try:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
body
|
|
|
|
commit = true
|
2022-01-25 08:28:26 +00:00
|
|
|
finally:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
if not nested:
|
|
|
|
if commit:
|
|
|
|
expectDb db.db.exec("COMMIT TRANSACTION;")
|
|
|
|
else:
|
|
|
|
# https://www.sqlite.org/lang_transaction.html
|
|
|
|
#
|
|
|
|
# For all of these errors, SQLite attempts to undo just the one statement
|
|
|
|
# it was working on and leave changes from prior statements within the same
|
|
|
|
# transaction intact and continue with the transaction. However, depending
|
|
|
|
# on the statement being evaluated and the point at which the error occurs,
|
|
|
|
# it might be necessary for SQLite to rollback and cancel the entire transaction.
|
|
|
|
# An application can tell which course of action SQLite took by using the
|
|
|
|
# sqlite3_get_autocommit() C-language interface.
|
|
|
|
#
|
|
|
|
# It is recommended that applications respond to the errors listed above by
|
|
|
|
# explicitly issuing a ROLLBACK command. If the transaction has already been
|
|
|
|
# rolled back automatically by the error response, then the ROLLBACK command
|
|
|
|
# will fail with an error, but no harm is caused by this.
|
|
|
|
#
|
|
|
|
if isInsideTransaction(db.db): # calls `sqlite3_get_autocommit`
|
|
|
|
expectDb db.db.exec("ROLLBACK TRANSACTION;")
|
2022-01-25 08:28:26 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc new*(T: type BeaconChainDBV0,
|
|
|
|
db: SqStoreRef,
|
2021-12-29 02:50:49 +00:00
|
|
|
readOnly = false
|
2022-11-28 23:21:58 +00:00
|
|
|
): BeaconChainDBV0 =
|
|
|
|
var
|
|
|
|
# V0 compatibility tables - these were created WITHOUT ROWID which is slow
|
|
|
|
# for large blobs
|
|
|
|
backendV0 = kvStore db.openKvStore(
|
|
|
|
readOnly = db.readOnly or readOnly).expectDb()
|
|
|
|
# state_no_validators is similar to state_no_validators2 but uses a
|
|
|
|
# different key encoding and was created WITHOUT ROWID
|
|
|
|
stateStoreV0 = kvStore db.openKvStore(
|
|
|
|
"state_no_validators", readOnly = db.readOnly or readOnly).expectDb()
|
2020-10-15 11:49:02 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
BeaconChainDBV0(
|
|
|
|
backend: backendV0,
|
|
|
|
stateStore: stateStoreV0,
|
|
|
|
)
|
2021-05-27 10:59:42 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc new*(T: type BeaconChainDB,
|
2023-01-09 18:42:10 +00:00
|
|
|
db: SqStoreRef,
|
|
|
|
cfg: RuntimeConfig = defaultRuntimeConfig
|
2022-11-28 23:21:58 +00:00
|
|
|
): BeaconChainDB =
|
|
|
|
if not db.readOnly:
|
2021-12-29 02:50:49 +00:00
|
|
|
# Remove the deposits table we used before we switched
|
|
|
|
# to storing only deposit contract checkpoints
|
|
|
|
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
|
|
|
|
debug "Failed to drop the deposits table"
|
2021-05-27 10:59:42 +00:00
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
# An old pubkey->index mapping that hasn't been used on any mainnet release
|
|
|
|
if db.exec("DROP TABLE IF EXISTS validatorIndexFromPubKey;").isErr:
|
|
|
|
debug "Failed to drop the validatorIndexFromPubKey table"
|
2021-05-18 10:22:18 +00:00
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
var
|
|
|
|
genesisDepositsSeq =
|
2021-05-26 07:07:18 +00:00
|
|
|
DbSeq[DepositData].init(db, "genesis_deposits").expectDb()
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidatorsDb =
|
2021-11-25 18:41:54 +00:00
|
|
|
DbSeq[ImmutableValidatorDataDb2].init(db, "immutable_validators2").expectDb()
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
# V1 - expected-to-be small rows get without rowid optimizations
|
|
|
|
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
|
2024-11-13 03:29:14 +00:00
|
|
|
blocks = if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: [
|
2022-02-21 08:48:02 +00:00
|
|
|
kvStore db.openKvStore("blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("altair_blocks").expectDb(),
|
2022-11-02 16:23:30 +00:00
|
|
|
kvStore db.openKvStore("bellatrix_blocks").expectDb(),
|
2022-12-05 16:29:09 +00:00
|
|
|
kvStore db.openKvStore("capella_blocks").expectDb(),
|
2024-04-03 14:43:43 +00:00
|
|
|
kvStore db.openKvStore("deneb_blocks").expectDb(),
|
2024-11-13 03:29:14 +00:00
|
|
|
kvStore db.openKvStore("electra_blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("fulu_blocks").expectDb()]
|
|
|
|
|
|
|
|
else: [
|
|
|
|
kvStore db.openKvStore("blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("altair_blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("bellatrix_blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("capella_blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("deneb_blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("electra_blocks").expectDb(),
|
|
|
|
kvStore db.openKvStore("").expectDb()]
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2024-11-13 03:29:14 +00:00
|
|
|
statesNoVal = if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH: [
|
|
|
|
kvStore db.openKvStore("state_no_validators").expectDb(),
|
|
|
|
kvStore db.openKvStore("altair_state_no_validators").expectDb(),
|
|
|
|
kvStore db.openKvStore("bellatrix_state_no_validators").expectDb(),
|
|
|
|
kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(),
|
|
|
|
kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(),
|
|
|
|
kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb(),
|
|
|
|
kvStore db.openKvStore("fulu_state_no_validator_pubkeys").expectDb()]
|
|
|
|
|
|
|
|
else: [
|
|
|
|
kvStore db.openKvStore("state_no_validators").expectDb(),
|
|
|
|
kvStore db.openKvStore("altair_state_no_validators").expectDb(),
|
|
|
|
kvStore db.openKvStore("bellatrix_state_no_validators").expectDb(),
|
|
|
|
kvStore db.openKvStore("capella_state_no_validator_pubkeys").expectDb(),
|
|
|
|
kvStore db.openKvStore("deneb_state_no_validator_pubkeys").expectDb(),
|
|
|
|
kvStore db.openKvStore("electra_state_no_validator_pubkeys").expectDb(),
|
|
|
|
kvStore db.openKvStore("").expectDb()]
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
|
|
|
|
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
|
2022-01-30 16:51:04 +00:00
|
|
|
finalizedBlocks = FinalizedBlocks.init(db, "finalized_blocks").expectDb()
|
2021-05-17 16:37:26 +00:00
|
|
|
|
2022-07-04 20:46:32 +00:00
|
|
|
lcData = db.initLightClientDataDB(LightClientDataDBNames(
|
2023-01-27 09:44:57 +00:00
|
|
|
altairHeaders: "lc_altair_headers",
|
|
|
|
capellaHeaders:
|
|
|
|
if cfg.CAPELLA_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
|
|
|
"lc_capella_headers"
|
|
|
|
else:
|
|
|
|
"",
|
2023-03-10 17:16:26 +00:00
|
|
|
denebHeaders:
|
2023-02-15 14:44:09 +00:00
|
|
|
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
2023-03-10 17:16:26 +00:00
|
|
|
"lc_deneb_headers"
|
2023-01-27 09:44:57 +00:00
|
|
|
else:
|
|
|
|
"",
|
2024-06-26 19:02:03 +00:00
|
|
|
electraHeaders:
|
|
|
|
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
|
|
|
"lc_electra_headers"
|
|
|
|
else:
|
|
|
|
"",
|
2022-07-04 20:46:32 +00:00
|
|
|
altairCurrentBranches: "lc_altair_current_branches",
|
2024-06-26 19:02:03 +00:00
|
|
|
electraCurrentBranches:
|
|
|
|
if cfg.ELECTRA_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
|
|
|
"lc_electra_current_branches"
|
|
|
|
else:
|
|
|
|
"",
|
2023-01-27 09:44:57 +00:00
|
|
|
altairSyncCommittees: "lc_altair_sync_committees",
|
2023-01-12 17:11:38 +00:00
|
|
|
legacyAltairBestUpdates: "lc_altair_best_updates",
|
|
|
|
bestUpdates: "lc_best_updates",
|
2022-07-04 20:46:32 +00:00
|
|
|
sealedPeriods: "lc_sealed_periods")).expectDb()
|
2024-06-26 19:02:03 +00:00
|
|
|
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
|
2022-07-04 20:46:32 +00:00
|
|
|
|
2024-09-12 09:11:08 +00:00
|
|
|
var blobs = kvStore db.openKvStore("deneb_blobs").expectDb()
|
2023-01-09 18:42:10 +00:00
|
|
|
|
2024-11-19 05:53:13 +00:00
|
|
|
var columns: KvStoreRef
|
|
|
|
if cfg.FULU_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
|
|
|
columns = kvStore db.openKvStore("fulu_columns").expectDb()
|
|
|
|
|
2022-02-16 15:44:04 +00:00
|
|
|
# Versions prior to 1.4.0 (altair) stored validators in `immutable_validators`
|
|
|
|
# which stores validator keys in compressed format - this is
|
2021-06-10 07:37:02 +00:00
|
|
|
# slow to load and has been superceded by `immutable_validators2` which uses
|
2022-02-16 15:44:04 +00:00
|
|
|
# uncompressed keys instead. We still support upgrading a database from the
|
|
|
|
# old format, but don't need to support downgrading, and therefore safely can
|
|
|
|
# remove the keys
|
2022-11-28 23:21:58 +00:00
|
|
|
block:
|
|
|
|
var immutableValidatorsDb1 = DbSeq[ImmutableValidatorData].init(
|
|
|
|
db, "immutable_validators", readOnly = true).expectDb()
|
|
|
|
|
|
|
|
if immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
|
|
|
notice "Migrating validator keys, this may take a minute",
|
|
|
|
len = immutableValidatorsDb1.len()
|
|
|
|
while immutableValidatorsDb.len() < immutableValidatorsDb1.len():
|
|
|
|
let val = immutableValidatorsDb1.get(immutableValidatorsDb.len())
|
|
|
|
immutableValidatorsDb.add(ImmutableValidatorDataDb2(
|
|
|
|
pubkey: val.pubkey.loadValid().toUncompressed(),
|
|
|
|
withdrawal_credentials: val.withdrawal_credentials
|
|
|
|
))
|
|
|
|
immutableValidatorsDb1.close()
|
|
|
|
|
|
|
|
if not db.readOnly:
|
|
|
|
# Safe because nobody will be downgrading to pre-altair versions
|
|
|
|
discard db.exec("DROP TABLE IF EXISTS immutable_validators;")
|
2022-02-16 15:44:04 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
T(
|
|
|
|
db: db,
|
2022-11-28 23:21:58 +00:00
|
|
|
v0: BeaconChainDBV0.new(db, readOnly = true),
|
2021-03-15 14:11:51 +00:00
|
|
|
genesisDeposits: genesisDepositsSeq,
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidatorsDb: immutableValidatorsDb,
|
|
|
|
immutableValidators: loadImmutableValidators(immutableValidatorsDb),
|
2021-05-17 16:37:26 +00:00
|
|
|
checkpoint: proc() = db.checkpoint(),
|
|
|
|
keyValues: keyValues,
|
|
|
|
blocks: blocks,
|
2023-01-09 18:42:10 +00:00
|
|
|
blobs: blobs,
|
2024-11-19 05:53:13 +00:00
|
|
|
columns: columns,
|
2021-05-17 16:37:26 +00:00
|
|
|
stateRoots: stateRoots,
|
|
|
|
statesNoVal: statesNoVal,
|
|
|
|
stateDiffs: stateDiffs,
|
|
|
|
summaries: summaries,
|
2022-01-30 16:51:04 +00:00
|
|
|
finalizedBlocks: finalizedBlocks,
|
2022-07-04 20:46:32 +00:00
|
|
|
lcData: lcData
|
2021-05-17 16:37:26 +00:00
|
|
|
)
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc new*(T: type BeaconChainDB,
|
|
|
|
dir: string,
|
2023-01-09 18:42:10 +00:00
|
|
|
cfg: RuntimeConfig = defaultRuntimeConfig,
|
2022-11-28 23:21:58 +00:00
|
|
|
inMemory = false,
|
|
|
|
readOnly = false
|
|
|
|
): BeaconChainDB =
|
|
|
|
let db =
|
|
|
|
if inMemory:
|
|
|
|
SqStoreRef.init("", "test", readOnly = readOnly, inMemory = true).expect(
|
|
|
|
"working database (out of memory?)")
|
|
|
|
else:
|
|
|
|
if (let res = secureCreatePath(dir); res.isErr):
|
|
|
|
fatal "Failed to create create database directory",
|
|
|
|
path = dir, err = ioErrorMsg(res.error)
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
SqStoreRef.init(
|
|
|
|
dir, "nbc", readOnly = readOnly, manualCheckpoint = true).expectDb()
|
2023-01-09 18:42:10 +00:00
|
|
|
BeaconChainDB.new(db, cfg)
|
2022-11-28 23:21:58 +00:00
|
|
|
|
2022-07-04 20:46:32 +00:00
|
|
|
template getLightClientDataDB*(db: BeaconChainDB): LightClientDataDB =
|
|
|
|
db.lcData
|
|
|
|
|
2022-12-07 10:24:51 +00:00
|
|
|
proc decodeSSZ*[T](data: openArray[byte], output: var T): bool =
|
2021-05-13 16:35:47 +00:00
|
|
|
try:
|
2021-05-17 16:37:26 +00:00
|
|
|
readSszBytes(data, output, updateRoot = false)
|
|
|
|
true
|
|
|
|
except SerializationError as e:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
false
|
|
|
|
|
|
|
|
proc decodeSnappySSZ[T](data: openArray[byte], output: var T): bool =
|
|
|
|
try:
|
2023-07-26 07:47:46 +00:00
|
|
|
let decompressed = snappy.decode(data)
|
2021-05-17 16:37:26 +00:00
|
|
|
readSszBytes(decompressed, output, updateRoot = false)
|
|
|
|
true
|
|
|
|
except SerializationError as e:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
false
|
2020-06-13 18:57:07 +00:00
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc decodeSZSSZ[T](data: openArray[byte], output: var T): bool =
|
|
|
|
try:
|
2023-07-26 07:47:46 +00:00
|
|
|
let decompressed = decodeFramed(data, checkIntegrity = false)
|
2022-03-29 11:33:06 +00:00
|
|
|
readSszBytes(decompressed, output, updateRoot = false)
|
|
|
|
true
|
|
|
|
except CatchableError as e:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
err = e.msg, typ = name(T), dataLen = data.len
|
|
|
|
false
|
|
|
|
|
2022-12-07 10:24:51 +00:00
|
|
|
func encodeSSZ*(v: auto): seq[byte] =
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
SSZ.encode(v)
|
|
|
|
except IOError as err:
|
2021-05-13 16:35:47 +00:00
|
|
|
raiseAssert err.msg
|
2020-06-13 18:57:07 +00:00
|
|
|
|
2022-10-08 10:07:54 +00:00
|
|
|
func encodeSnappySSZ(v: auto): seq[byte] =
|
2021-05-17 16:37:26 +00:00
|
|
|
try:
|
|
|
|
snappy.encode(SSZ.encode(v))
|
|
|
|
except CatchableError as err:
|
|
|
|
# In-memory encode shouldn't fail!
|
|
|
|
raiseAssert err.msg
|
2021-05-17 06:34:44 +00:00
|
|
|
|
2022-10-08 10:07:54 +00:00
|
|
|
func encodeSZSSZ(v: auto): seq[byte] =
|
2022-03-29 11:33:06 +00:00
|
|
|
# https://github.com/google/snappy/blob/main/framing_format.txt
|
|
|
|
try:
|
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
|
|
|
encodeFramed(SSZ.encode(v))
|
2022-03-29 11:33:06 +00:00
|
|
|
except CatchableError as err:
|
|
|
|
# In-memory encode shouldn't fail!
|
|
|
|
raiseAssert err.msg
|
|
|
|
|
2021-04-06 18:56:45 +00:00
|
|
|
proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
|
2020-06-13 18:57:07 +00:00
|
|
|
var res: Opt[T]
|
|
|
|
proc decode(data: openArray[byte]) =
|
2021-05-17 16:37:26 +00:00
|
|
|
if data.len == sizeof(Eth2Digest):
|
|
|
|
res.ok Eth2Digest(data: toArray(sizeof(Eth2Digest), data))
|
2020-06-13 18:57:07 +00:00
|
|
|
else:
|
|
|
|
# If the data can't be deserialized, it could be because it's from a
|
|
|
|
# version of the software that uses a different SSZ encoding
|
|
|
|
warn "Unable to deserialize data, old database?",
|
|
|
|
typ = name(T), dataLen = data.len
|
|
|
|
discard
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.get(key, decode).expectDb()
|
2020-06-13 18:57:07 +00:00
|
|
|
|
|
|
|
res
|
2020-04-23 06:27:35 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
|
|
|
|
db.put(key, v.data).expectDb()
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
type GetResult = enum
|
2020-11-24 21:21:47 +00:00
|
|
|
found = "Found"
|
|
|
|
notFound = "Not found"
|
|
|
|
corrupted = "Corrupted"
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getSSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
2020-08-13 09:50:05 +00:00
|
|
|
var status = GetResult.notFound
|
2020-06-25 10:23:10 +00:00
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
let outputPtr = addr output # callback is local, ptr wont escape
|
2020-04-23 06:27:35 +00:00
|
|
|
proc decode(data: openArray[byte]) =
|
2021-05-17 16:37:26 +00:00
|
|
|
status =
|
|
|
|
if decodeSSZ(data, outputPtr[]): GetResult.found
|
|
|
|
else: GetResult.corrupted
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.get(key, decode).expectDb()
|
2021-05-13 16:35:47 +00:00
|
|
|
|
|
|
|
status
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc putSSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
db.put(key, encodeSSZ(v)).expectDb()
|
|
|
|
|
|
|
|
proc getSnappySSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
|
|
|
var status = GetResult.notFound
|
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
let outputPtr = addr output # callback is local, ptr wont escape
|
2021-05-17 16:37:26 +00:00
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
status =
|
|
|
|
if decodeSnappySSZ(data, outputPtr[]): GetResult.found
|
|
|
|
else: GetResult.corrupted
|
|
|
|
|
|
|
|
discard db.get(key, decode).expectDb()
|
|
|
|
|
|
|
|
status
|
|
|
|
|
|
|
|
proc putSnappySSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
db.put(key, encodeSnappySSZ(v)).expectDb()
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc getSZSSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
|
|
|
|
var status = GetResult.notFound
|
|
|
|
|
2022-12-14 23:12:29 +00:00
|
|
|
let outputPtr = addr output # callback is local, ptr wont escape
|
2022-03-29 11:33:06 +00:00
|
|
|
proc decode(data: openArray[byte]) =
|
|
|
|
status =
|
|
|
|
if decodeSZSSZ(data, outputPtr[]): GetResult.found
|
|
|
|
else: GetResult.corrupted
|
|
|
|
|
|
|
|
discard db.get(key, decode).expectDb()
|
|
|
|
|
|
|
|
status
|
|
|
|
|
|
|
|
proc putSZSSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
|
|
|
|
db.put(key, encodeSZSSZ(v)).expectDb()
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc close*(db: BeaconChainDBV0) =
|
2021-05-27 10:59:42 +00:00
|
|
|
discard db.stateStore.close()
|
|
|
|
discard db.backend.close()
|
2020-09-12 05:35:58 +00:00
|
|
|
|
2022-02-16 22:24:44 +00:00
|
|
|
proc close*(db: BeaconChainDB) =
|
2021-05-17 16:37:26 +00:00
|
|
|
if db.db == nil: return
|
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
# Close things roughly in reverse order
|
2024-11-19 05:53:13 +00:00
|
|
|
if not isNil(db.columns):
|
|
|
|
discard db.columns.close()
|
2023-01-09 18:42:10 +00:00
|
|
|
if not isNil(db.blobs):
|
|
|
|
discard db.blobs.close()
|
2022-07-04 20:46:32 +00:00
|
|
|
db.lcData.close()
|
2022-01-30 16:51:04 +00:00
|
|
|
db.finalizedBlocks.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.summaries.close()
|
|
|
|
discard db.stateDiffs.close()
|
2022-11-02 16:23:30 +00:00
|
|
|
for kv in db.statesNoVal:
|
2022-11-11 14:37:43 +00:00
|
|
|
discard kv.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.stateRoots.close()
|
2022-11-02 16:23:30 +00:00
|
|
|
for kv in db.blocks:
|
2022-11-11 14:37:43 +00:00
|
|
|
discard kv.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
discard db.keyValues.close()
|
2022-01-30 16:51:04 +00:00
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
db.immutableValidatorsDb.close()
|
2021-05-17 16:37:26 +00:00
|
|
|
db.genesisDeposits.close()
|
|
|
|
db.v0.close()
|
|
|
|
db.db.close()
|
|
|
|
|
|
|
|
db.db = nil
|
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
func toBeaconBlockSummary*(v: SomeForkyBeaconBlock): BeaconBlockSummary =
|
2020-11-03 22:30:43 +00:00
|
|
|
BeaconBlockSummary(
|
|
|
|
slot: v.slot,
|
|
|
|
parent_root: v.parent_root,
|
|
|
|
)
|
|
|
|
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
proc putBeaconBlockSummary*(
|
2021-05-17 16:37:26 +00:00
|
|
|
db: BeaconChainDB, root: Eth2Digest, value: BeaconBlockSummary) =
|
|
|
|
# Summaries are too simple / small to compress, store them as plain SSZ
|
|
|
|
db.summaries.putSSZ(root.data, value)
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc putBlock*(
|
|
|
|
db: BeaconChainDB,
|
|
|
|
value: phase0.TrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock) =
|
2022-01-25 08:28:26 +00:00
|
|
|
db.withManyWrites:
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[type(value).kind].putSnappySSZ(value.root.data, value)
|
2022-01-25 08:28:26 +00:00
|
|
|
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc putBlock*(
|
|
|
|
db: BeaconChainDB,
|
2022-11-09 17:32:10 +00:00
|
|
|
value: bellatrix.TrustedSignedBeaconBlock |
|
2024-04-03 14:43:43 +00:00
|
|
|
capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock |
|
2024-11-13 03:29:14 +00:00
|
|
|
electra.TrustedSignedBeaconBlock | fulu.TrustedSignedBeaconBlock) =
|
2022-03-29 11:33:06 +00:00
|
|
|
db.withManyWrites:
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[type(value).kind].putSZSSZ(value.root.data, value)
|
2022-03-29 11:33:06 +00:00
|
|
|
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
|
|
|
|
|
2023-02-27 14:02:37 +00:00
|
|
|
proc putBlobSidecar*(
|
|
|
|
db: BeaconChainDB,
|
|
|
|
value: BlobSidecar) =
|
2023-11-06 06:48:43 +00:00
|
|
|
let block_root = hash_tree_root(value.signed_block_header.message)
|
|
|
|
db.blobs.putSZSSZ(blobkey(block_root, value.index), value)
|
2023-02-27 14:02:37 +00:00
|
|
|
|
2023-08-11 00:15:34 +00:00
|
|
|
proc delBlobSidecar*(
|
|
|
|
db: BeaconChainDB,
|
2023-10-26 03:40:04 +00:00
|
|
|
root: Eth2Digest, index: BlobIndex): bool =
|
2023-08-11 00:15:34 +00:00
|
|
|
db.blobs.del(blobkey(root, index)).expectDb()
|
|
|
|
|
2024-11-19 05:53:13 +00:00
|
|
|
proc putDataColumnSidecar*(
|
|
|
|
db: BeaconChainDB,
|
|
|
|
value: DataColumnSidecar) =
|
|
|
|
let block_root = hash_tree_root(value.signed_block_header.message)
|
|
|
|
db.columns.putSZSSZ(columnkey(block_root, value.index), value)
|
|
|
|
|
|
|
|
proc delDataColumnSidecar*(
|
|
|
|
db: BeaconChainDB,
|
|
|
|
root: Eth2Digest, index: ColumnIndex): bool =
|
|
|
|
db.columns.del(columnkey(root, index)).expectDb()
|
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
proc updateImmutableValidators*(
|
|
|
|
db: BeaconChainDB, validators: openArray[Validator]) =
|
|
|
|
# Must be called before storing a state that references the new validators
|
|
|
|
let numValidators = validators.len
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-06-10 07:37:02 +00:00
|
|
|
while db.immutableValidators.len() < numValidators:
|
2021-03-15 14:11:51 +00:00
|
|
|
let immutableValidator =
|
2021-06-10 07:37:02 +00:00
|
|
|
getImmutableValidatorData(validators[db.immutableValidators.len()])
|
2022-01-30 16:51:04 +00:00
|
|
|
if not db.db.readOnly:
|
|
|
|
db.immutableValidatorsDb.add ImmutableValidatorDataDb2(
|
|
|
|
pubkey: immutableValidator.pubkey.toUncompressed(),
|
|
|
|
withdrawal_credentials: immutableValidator.withdrawal_credentials)
|
2021-03-15 14:11:51 +00:00
|
|
|
db.immutableValidators.add immutableValidator
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: phase0.BeaconState):
|
|
|
|
Phase0BeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[Phase0BeaconStateNoImmutableValidators](state)
|
|
|
|
|
|
|
|
template toBeaconStateNoImmutableValidators(state: altair.BeaconState):
|
|
|
|
AltairBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[AltairBeaconStateNoImmutableValidators](state)
|
|
|
|
|
2022-01-07 17:10:40 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: bellatrix.BeaconState):
|
2022-02-21 11:55:56 +00:00
|
|
|
BellatrixBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[BellatrixBeaconStateNoImmutableValidators](state)
|
2021-11-05 07:34:34 +00:00
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: capella.BeaconState):
|
|
|
|
CapellaBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[CapellaBeaconStateNoImmutableValidators](state)
|
|
|
|
|
2023-02-25 01:03:34 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: deneb.BeaconState):
|
2023-03-10 17:15:08 +00:00
|
|
|
DenebBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[DenebBeaconStateNoImmutableValidators](state)
|
2022-12-07 16:47:23 +00:00
|
|
|
|
2024-04-03 14:43:43 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: electra.BeaconState):
|
|
|
|
ElectraBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[ElectraBeaconStateNoImmutableValidators](state)
|
|
|
|
|
2024-11-13 03:29:14 +00:00
|
|
|
template toBeaconStateNoImmutableValidators(state: fulu.BeaconState):
|
|
|
|
FuluBeaconStateNoImmutableValidators =
|
|
|
|
isomorphicCast[FuluBeaconStateNoImmutableValidators](state)
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc putState*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
|
|
|
value: phase0.BeaconState | altair.BeaconState) =
|
2021-06-24 07:11:47 +00:00
|
|
|
db.updateImmutableValidators(value.validators.asSeq())
|
2023-09-27 15:10:28 +00:00
|
|
|
db.statesNoVal[type(value).kind].putSnappySSZ(
|
2021-11-05 07:34:34 +00:00
|
|
|
key.data, toBeaconStateNoImmutableValidators(value))
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
proc putState*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
2024-04-03 14:43:43 +00:00
|
|
|
value: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
2024-11-13 03:29:14 +00:00
|
|
|
electra.BeaconState | fulu.BeaconState) =
|
2022-03-29 11:33:06 +00:00
|
|
|
db.updateImmutableValidators(value.validators.asSeq())
|
2023-09-27 15:10:28 +00:00
|
|
|
db.statesNoVal[type(value).kind].putSZSSZ(
|
2022-03-29 11:33:06 +00:00
|
|
|
key.data, toBeaconStateNoImmutableValidators(value))
|
|
|
|
|
2022-01-25 08:28:26 +00:00
|
|
|
proc putState*(db: BeaconChainDB, state: ForkyHashedBeaconState) =
|
|
|
|
db.withManyWrites:
|
2022-03-16 07:20:40 +00:00
|
|
|
db.putStateRoot(state.latest_block_root, state.data.slot, state.root)
|
2022-01-25 08:28:26 +00:00
|
|
|
db.putState(state.root, state.data)
|
2022-01-30 16:51:04 +00:00
|
|
|
|
2021-06-29 15:09:29 +00:00
|
|
|
# For testing rollback
|
2022-02-21 11:55:56 +00:00
|
|
|
proc putCorruptState*(
|
2023-01-28 19:53:41 +00:00
|
|
|
db: BeaconChainDB, fork: static ConsensusFork, key: Eth2Digest) =
|
2022-02-21 11:55:56 +00:00
|
|
|
db.statesNoVal[fork].putSnappySSZ(key.data, Validator())
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
|
|
|
var ret: array[40, byte]
|
|
|
|
# big endian to get a naturally ascending order on slots in sorted indices
|
|
|
|
ret[0..<8] = toBytesBE(slot.uint64)
|
|
|
|
ret[8..<40] = root.data
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
ret
|
2021-03-15 14:11:51 +00:00
|
|
|
|
2019-03-28 06:10:48 +00:00
|
|
|
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
|
|
|
value: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.stateRoots.putRaw(stateRootKey(root, slot), value)
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2021-01-18 20:34:41 +00:00
|
|
|
proc putStateDiff*(db: BeaconChainDB, root: Eth2Digest, value: BeaconStateDiff) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.stateDiffs.putSnappySSZ(root.data, value)
|
2021-01-18 20:34:41 +00:00
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
proc delBlock*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest): bool =
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
var deleted = false
|
2022-01-30 16:51:04 +00:00
|
|
|
db.withManyWrites:
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
discard db.summaries.del(key.data).expectDb()
|
|
|
|
deleted = db.blocks[fork].del(key.data).expectDb()
|
|
|
|
deleted
|
2019-11-22 14:14:13 +00:00
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
proc delState*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest) =
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
discard db.statesNoVal[fork].del(key.data).expectDb()
|
|
|
|
|
2023-06-24 14:14:28 +00:00
|
|
|
proc clearBlocks*(db: BeaconChainDB, fork: ConsensusFork): bool =
|
|
|
|
db.blocks[fork].clear().expectDb()
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
|
2023-06-24 14:14:28 +00:00
|
|
|
proc clearStates*(db: BeaconChainDB, fork: ConsensusFork): bool =
|
|
|
|
db.statesNoVal[fork].clear().expectDb()
|
2022-12-07 10:24:51 +00:00
|
|
|
|
2020-01-22 12:59:54 +00:00
|
|
|
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
discard db.stateRoots.del(stateRootKey(root, slot)).expectDb()
|
2020-01-22 12:59:54 +00:00
|
|
|
|
2021-01-18 20:34:41 +00:00
|
|
|
proc delStateDiff*(db: BeaconChainDB, root: Eth2Digest) =
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
discard db.stateDiffs.del(root.data).expectDb()
|
2021-01-18 20:34:41 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.keyValues.putRaw(subkey(kHeadBlock), key)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
2021-05-17 16:37:26 +00:00
|
|
|
db.keyValues.putRaw(subkey(kTailBlock), key)
|
2019-01-14 12:19:44 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc putGenesisBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
|
|
|
db.keyValues.putRaw(subkey(kGenesisBlock), key)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2024-03-07 17:42:52 +00:00
|
|
|
proc putDepositContractSnapshot*(
|
|
|
|
db: BeaconChainDB, snapshot: DepositContractSnapshot) =
|
2022-12-07 10:24:51 +00:00
|
|
|
db.withManyWrites:
|
2024-03-07 17:42:52 +00:00
|
|
|
db.keyValues.putSnappySSZ(subkey(kDepositContractSnapshot),
|
2022-12-07 10:24:51 +00:00
|
|
|
snapshot)
|
|
|
|
# TODO: We currently store this redundant old snapshot in order
|
|
|
|
# to allow the users to rollback to a previous version
|
|
|
|
# of Nimbus without problems. It would be reasonable
|
|
|
|
# to remove this in Nimbus 23.2
|
|
|
|
db.keyValues.putSnappySSZ(subkey(kOldDepositContractSnapshot),
|
|
|
|
snapshot.toOldDepositContractSnapshot)
|
|
|
|
|
2024-03-07 17:42:52 +00:00
|
|
|
proc hasDepositContractSnapshot*(db: BeaconChainDB): bool =
|
|
|
|
expectDb(subkey(kDepositContractSnapshot) in db.keyValues)
|
2022-12-07 10:24:51 +00:00
|
|
|
|
2024-03-07 17:42:52 +00:00
|
|
|
proc getDepositContractSnapshot*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
|
|
|
|
result.ok(default DepositContractSnapshot)
|
|
|
|
let r = db.keyValues.getSnappySSZ(
|
|
|
|
subkey(kDepositContractSnapshot), result.get)
|
2022-12-07 10:24:51 +00:00
|
|
|
if r != GetResult.found: result.err()
|
|
|
|
|
|
|
|
proc getUpgradableDepositSnapshot*(db: BeaconChainDB): Option[OldDepositContractSnapshot] =
|
|
|
|
var dcs: OldDepositContractSnapshot
|
|
|
|
let oldKey = subkey(kOldDepositContractSnapshot)
|
|
|
|
if db.keyValues.getSnappySSZ(oldKey, dcs) != GetResult.found:
|
|
|
|
# Old record is not present in the current database.
|
|
|
|
# We need to take a look in the v0 database as well.
|
|
|
|
if db.v0.backend.getSnappySSZ(oldKey, dcs) != GetResult.found:
|
|
|
|
return
|
|
|
|
|
|
|
|
return some dcs
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
proc getPhase0Block(
|
|
|
|
db: BeaconChainDBV0, key: Eth2Digest): Opt[phase0.TrustedSignedBeaconBlock] =
|
2020-06-25 10:23:10 +00:00
|
|
|
# We only store blocks that we trust in the database
|
2021-06-11 17:51:46 +00:00
|
|
|
result.ok(default(phase0.TrustedSignedBeaconBlock))
|
|
|
|
if db.backend.getSnappySSZ(
|
|
|
|
subkey(phase0.SignedBeaconBlock, key), result.get) != GetResult.found:
|
2021-05-26 07:07:18 +00:00
|
|
|
result.err()
|
2021-05-27 10:59:42 +00:00
|
|
|
else:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
2019-02-21 04:42:17 +00:00
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
proc getBlock*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
|
|
|
T: type phase0.TrustedSignedBeaconBlock): Opt[T] =
|
2020-11-03 22:30:43 +00:00
|
|
|
# We only store blocks that we trust in the database
|
2022-02-21 08:48:02 +00:00
|
|
|
result.ok(default(T))
|
2023-09-27 15:10:28 +00:00
|
|
|
if db.blocks[T.kind].getSnappySSZ(key.data, result.get) != GetResult.found:
|
2022-02-21 08:48:02 +00:00
|
|
|
# During the initial releases phase0, we stored blocks in a different table
|
2021-11-05 07:34:34 +00:00
|
|
|
result = db.v0.getPhase0Block(key)
|
2021-05-17 16:37:26 +00:00
|
|
|
else:
|
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
2020-11-03 22:30:43 +00:00
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc getBlock*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
|
|
|
T: type altair.TrustedSignedBeaconBlock): Opt[T] =
|
|
|
|
# We only store blocks that we trust in the database
|
|
|
|
result.ok(default(T))
|
2023-09-27 15:10:28 +00:00
|
|
|
if db.blocks[T.kind].getSnappySSZ(key.data, result.get) == GetResult.found:
|
2022-03-29 11:33:06 +00:00
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
|
|
|
else:
|
|
|
|
result.err()
|
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
proc getBlock*[
|
2022-12-13 00:56:50 +00:00
|
|
|
X: bellatrix.TrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock |
|
2024-11-13 03:29:14 +00:00
|
|
|
deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock |
|
|
|
|
fulu.TrustedSignedBeaconBlock](
|
2022-02-21 08:48:02 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
|
|
|
T: type X): Opt[T] =
|
2021-06-24 07:11:47 +00:00
|
|
|
# We only store blocks that we trust in the database
|
2022-02-21 08:48:02 +00:00
|
|
|
result.ok(default(T))
|
2023-09-27 15:10:28 +00:00
|
|
|
if db.blocks[T.kind].getSZSSZ(key.data, result.get) == GetResult.found:
|
2021-09-30 01:07:24 +00:00
|
|
|
# set root after deserializing (so it doesn't get zeroed)
|
|
|
|
result.get().root = key
|
|
|
|
else:
|
|
|
|
result.err()
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc getPhase0BlockSSZ(
|
|
|
|
db: BeaconChainDBV0, key: Eth2Digest, data: var seq[byte]): bool =
|
|
|
|
let dataPtr = addr data # Short-lived
|
2022-01-07 10:13:19 +00:00
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = snappy.decode(data)
|
|
|
|
success = dataPtr[].len > 0
|
2022-03-29 11:33:06 +00:00
|
|
|
db.backend.get(subkey(phase0.SignedBeaconBlock, key), decode).expectDb() and
|
|
|
|
success
|
2022-01-07 10:13:19 +00:00
|
|
|
|
2022-05-05 11:00:02 +00:00
|
|
|
proc getPhase0BlockSZ(
|
|
|
|
db: BeaconChainDBV0, key: Eth2Digest, data: var seq[byte]): bool =
|
|
|
|
let dataPtr = addr data # Short-lived
|
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = snappy.encodeFramed(snappy.decode(data))
|
|
|
|
success = dataPtr[].len > 0
|
2022-05-05 11:00:02 +00:00
|
|
|
db.backend.get(subkey(phase0.SignedBeaconBlock, key), decode).expectDb() and
|
|
|
|
success
|
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
# SSZ implementations are separate so as to avoid unnecessary data copies
|
|
|
|
proc getBlockSSZ*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
|
|
|
T: type phase0.TrustedSignedBeaconBlock): bool =
|
2022-03-29 11:33:06 +00:00
|
|
|
let dataPtr = addr data # Short-lived
|
2022-01-07 10:13:19 +00:00
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = snappy.decode(data)
|
|
|
|
success = dataPtr[].len > 0
|
2023-01-28 19:53:41 +00:00
|
|
|
db.blocks[ConsensusFork.Phase0].get(key.data, decode).expectDb() and success or
|
2022-01-07 10:13:19 +00:00
|
|
|
db.v0.getPhase0BlockSSZ(key, data)
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc getBlockSSZ*(
|
2022-02-21 08:48:02 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
2022-03-29 11:33:06 +00:00
|
|
|
T: type altair.TrustedSignedBeaconBlock): bool =
|
|
|
|
let dataPtr = addr data # Short-lived
|
2022-01-07 10:13:19 +00:00
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = snappy.decode(data)
|
|
|
|
success = dataPtr[].len > 0
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[T.kind].get(key.data, decode).expectDb() and success
|
2022-02-21 08:48:02 +00:00
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
proc getBlockSSZ*[
|
2022-12-08 16:21:53 +00:00
|
|
|
X: bellatrix.TrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock |
|
2024-11-13 03:29:14 +00:00
|
|
|
deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock |
|
|
|
|
fulu.TrustedSignedBeaconBlock](
|
2022-11-09 17:32:10 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], T: type X): bool =
|
2022-03-29 11:33:06 +00:00
|
|
|
let dataPtr = addr data # Short-lived
|
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = decodeFramed(data, checkIntegrity = false)
|
|
|
|
success = dataPtr[].len > 0
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[T.kind].get(key.data, decode).expectDb() and success
|
2022-03-29 11:33:06 +00:00
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
proc getBlockSSZ*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
2023-01-28 19:53:41 +00:00
|
|
|
fork: ConsensusFork): bool =
|
2023-10-24 09:15:34 +00:00
|
|
|
withConsensusFork(fork):
|
|
|
|
getBlockSSZ(db, key, data, consensusFork.TrustedSignedBeaconBlock)
|
2022-01-07 10:13:19 +00:00
|
|
|
|
2023-02-27 14:02:37 +00:00
|
|
|
proc getBlobSidecarSZ*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
|
2023-10-26 03:40:04 +00:00
|
|
|
data: var seq[byte]): bool =
|
2023-02-27 14:02:37 +00:00
|
|
|
let dataPtr = addr data # Short-lived
|
|
|
|
func decode(data: openArray[byte]) =
|
|
|
|
assign(dataPtr[], data)
|
|
|
|
db.blobs.get(blobkey(root, index), decode).expectDb()
|
|
|
|
|
2023-10-26 03:40:04 +00:00
|
|
|
proc getBlobSidecar*(db: BeaconChainDB, root: Eth2Digest, index: BlobIndex,
|
|
|
|
value: var BlobSidecar): bool =
|
|
|
|
db.blobs.getSZSSZ(blobkey(root, index), value) == GetResult.found
|
|
|
|
|
2024-11-19 05:53:13 +00:00
|
|
|
proc getDataColumnSidecarSZ*(db: BeaconChainDB, root: Eth2Digest,
|
|
|
|
index: ColumnIndex, data: var seq[byte]): bool =
|
|
|
|
let dataPtr = addr data # Short-lived
|
|
|
|
func decode(data: openArray[byte]) =
|
|
|
|
assign(dataPtr[], data)
|
|
|
|
db.columns.get(columnkey(root, index), decode).expectDb()
|
|
|
|
|
|
|
|
proc getDataColumnSidecar*(db: BeaconChainDB, root: Eth2Digest, index: ColumnIndex,
|
|
|
|
value: var DataColumnSidecar): bool =
|
|
|
|
db.columns.getSZSSZ(columnkey(root, index), value) == GetResult.found
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc getBlockSZ*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
|
|
|
T: type phase0.TrustedSignedBeaconBlock): bool =
|
|
|
|
let dataPtr = addr data # Short-lived
|
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = snappy.encodeFramed(snappy.decode(data))
|
|
|
|
success = dataPtr[].len > 0
|
2023-01-28 19:53:41 +00:00
|
|
|
db.blocks[ConsensusFork.Phase0].get(key.data, decode).expectDb() and success or
|
2022-05-05 11:00:02 +00:00
|
|
|
db.v0.getPhase0BlockSZ(key, data)
|
2022-03-29 11:33:06 +00:00
|
|
|
|
|
|
|
proc getBlockSZ*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
|
|
|
T: type altair.TrustedSignedBeaconBlock): bool =
|
|
|
|
let dataPtr = addr data # Short-lived
|
|
|
|
var success = true
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2023-07-26 07:47:46 +00:00
|
|
|
dataPtr[] = snappy.encodeFramed(snappy.decode(data))
|
|
|
|
success = dataPtr[].len > 0
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[T.kind].get(key.data, decode).expectDb() and success
|
2022-03-29 11:33:06 +00:00
|
|
|
|
2022-11-09 17:32:10 +00:00
|
|
|
proc getBlockSZ*[
|
2022-12-08 16:21:53 +00:00
|
|
|
X: bellatrix.TrustedSignedBeaconBlock | capella.TrustedSignedBeaconBlock |
|
2024-11-13 03:29:14 +00:00
|
|
|
deneb.TrustedSignedBeaconBlock | electra.TrustedSignedBeaconBlock |
|
|
|
|
fulu.TrustedSignedBeaconBlock](
|
2022-11-09 17:32:10 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte], T: type X): bool =
|
2022-03-29 11:33:06 +00:00
|
|
|
let dataPtr = addr data # Short-lived
|
2022-08-01 16:17:06 +00:00
|
|
|
func decode(data: openArray[byte]) =
|
2022-03-29 11:33:06 +00:00
|
|
|
assign(dataPtr[], data)
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[T.kind].get(key.data, decode).expectDb()
|
2022-03-29 11:33:06 +00:00
|
|
|
|
|
|
|
proc getBlockSZ*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest, data: var seq[byte],
|
2023-01-28 19:53:41 +00:00
|
|
|
fork: ConsensusFork): bool =
|
2023-10-24 09:15:34 +00:00
|
|
|
withConsensusFork(fork):
|
|
|
|
getBlockSZ(db, key, data, consensusFork.TrustedSignedBeaconBlock)
|
2022-12-05 16:29:09 +00:00
|
|
|
|
2021-03-15 14:11:51 +00:00
|
|
|
proc getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators: openArray[ImmutableValidatorData2],
|
2022-03-29 11:33:06 +00:00
|
|
|
store: KvStoreRef, key: openArray[byte],
|
|
|
|
output: var (phase0.BeaconState | altair.BeaconState),
|
2021-06-29 15:09:29 +00:00
|
|
|
rollback: RollbackProc): bool =
|
2021-06-24 07:11:47 +00:00
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
|
2023-08-07 14:52:42 +00:00
|
|
|
let prevNumValidators = output.validators.len
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
case store.getSnappySSZ(key, toBeaconStateNoImmutableValidators(output))
|
2021-09-30 01:07:24 +00:00
|
|
|
of GetResult.found:
|
|
|
|
let numValidators = output.validators.len
|
|
|
|
doAssert immutableValidators.len >= numValidators
|
|
|
|
|
2023-08-07 14:52:42 +00:00
|
|
|
for i in prevNumValidators ..< numValidators:
|
2021-09-30 01:07:24 +00:00
|
|
|
let
|
|
|
|
# Bypass hash cache invalidation
|
|
|
|
dstValidator = addr output.validators.data[i]
|
|
|
|
|
|
|
|
assign(
|
2024-02-21 19:06:19 +00:00
|
|
|
dstValidator.pubkeyData,
|
|
|
|
HashedValidatorPubKey.init(
|
|
|
|
immutableValidators[i].pubkey.toPubKey()))
|
2021-09-30 01:07:24 +00:00
|
|
|
assign(
|
|
|
|
dstValidator.withdrawal_credentials,
|
|
|
|
immutableValidators[i].withdrawal_credentials)
|
2023-09-04 17:48:31 +00:00
|
|
|
output.validators.clearCaches(i)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
|
|
|
rollback()
|
|
|
|
false
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
proc getStateOnlyMutableValidators(
|
|
|
|
immutableValidators: openArray[ImmutableValidatorData2],
|
2022-11-09 17:32:10 +00:00
|
|
|
store: KvStoreRef, key: openArray[byte],
|
2023-01-03 19:04:59 +00:00
|
|
|
output: var bellatrix.BeaconState, rollback: RollbackProc): bool =
|
2022-03-29 11:33:06 +00:00
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
|
2023-08-07 14:52:42 +00:00
|
|
|
let prevNumValidators = output.validators.len
|
|
|
|
|
2022-03-29 11:33:06 +00:00
|
|
|
case store.getSZSSZ(key, toBeaconStateNoImmutableValidators(output))
|
|
|
|
of GetResult.found:
|
|
|
|
let numValidators = output.validators.len
|
|
|
|
doAssert immutableValidators.len >= numValidators
|
|
|
|
|
2023-08-07 14:52:42 +00:00
|
|
|
for i in prevNumValidators ..< numValidators:
|
2023-01-03 19:04:59 +00:00
|
|
|
# Bypass hash cache invalidation
|
|
|
|
let dstValidator = addr output.validators.data[i]
|
2022-03-29 11:33:06 +00:00
|
|
|
|
2024-02-21 19:06:19 +00:00
|
|
|
assign(
|
|
|
|
dstValidator.pubkeyData,
|
|
|
|
HashedValidatorPubKey.init(
|
|
|
|
immutableValidators[i].pubkey.toPubKey()))
|
2022-03-29 11:33:06 +00:00
|
|
|
assign(
|
|
|
|
dstValidator.withdrawal_credentials,
|
|
|
|
immutableValidators[i].withdrawal_credentials)
|
2023-09-04 17:48:31 +00:00
|
|
|
output.validators.clearCaches(i)
|
2022-03-29 11:33:06 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
|
|
|
rollback()
|
|
|
|
false
|
|
|
|
|
2023-01-03 19:04:59 +00:00
|
|
|
proc getStateOnlyMutableValidators(
|
|
|
|
immutableValidators: openArray[ImmutableValidatorData2],
|
|
|
|
store: KvStoreRef, key: openArray[byte],
|
2024-11-13 03:29:14 +00:00
|
|
|
output: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState |
|
|
|
|
fulu.BeaconState),
|
2023-01-03 19:04:59 +00:00
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
|
|
|
|
2023-08-07 14:52:42 +00:00
|
|
|
let prevNumValidators = output.validators.len
|
|
|
|
|
2023-01-03 19:04:59 +00:00
|
|
|
case store.getSZSSZ(key, toBeaconStateNoImmutableValidators(output))
|
|
|
|
of GetResult.found:
|
|
|
|
let numValidators = output.validators.len
|
|
|
|
doAssert immutableValidators.len >= numValidators
|
|
|
|
|
2023-08-07 14:52:42 +00:00
|
|
|
for i in prevNumValidators ..< numValidators:
|
2023-01-03 19:04:59 +00:00
|
|
|
# Bypass hash cache invalidation
|
|
|
|
let dstValidator = addr output.validators.data[i]
|
2024-02-21 19:06:19 +00:00
|
|
|
assign(
|
|
|
|
dstValidator.pubkeyData,
|
|
|
|
HashedValidatorPubKey.init(
|
|
|
|
immutableValidators[i].pubkey.toPubKey()))
|
2023-09-04 17:48:31 +00:00
|
|
|
output.validators.clearCaches(i)
|
2023-01-03 19:04:59 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
|
|
|
rollback()
|
|
|
|
false
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getState(
|
|
|
|
db: BeaconChainDBV0,
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators: openArray[ImmutableValidatorData2],
|
2021-06-11 17:51:46 +00:00
|
|
|
key: Eth2Digest, output: var phase0.BeaconState,
|
2021-05-17 16:37:26 +00:00
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
|
|
|
|
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
|
|
|
|
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
|
|
|
|
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
|
|
|
|
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
|
|
|
|
# from `stateStore`. We will try to read the state from all these locations.
|
2021-05-27 10:59:42 +00:00
|
|
|
if getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators, db.stateStore,
|
2021-11-05 07:34:34 +00:00
|
|
|
subkey(Phase0BeaconStateNoImmutableValidators, key), output, rollback):
|
2021-05-17 16:37:26 +00:00
|
|
|
return true
|
|
|
|
if getStateOnlyMutableValidators(
|
2021-06-10 07:37:02 +00:00
|
|
|
immutableValidators, db.backend,
|
2021-11-05 07:34:34 +00:00
|
|
|
subkey(Phase0BeaconStateNoImmutableValidators, key), output, rollback):
|
2021-05-17 16:37:26 +00:00
|
|
|
return true
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
case db.backend.getSnappySSZ(subkey(phase0.BeaconState, key), output)
|
2021-05-17 16:37:26 +00:00
|
|
|
of GetResult.found:
|
|
|
|
true
|
|
|
|
of GetResult.notFound:
|
|
|
|
false
|
|
|
|
of GetResult.corrupted:
|
2021-06-29 15:09:29 +00:00
|
|
|
rollback()
|
2021-05-17 16:37:26 +00:00
|
|
|
false
|
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
proc getState*(
|
2021-06-11 17:51:46 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, output: var phase0.BeaconState,
|
2020-04-28 08:08:32 +00:00
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
2020-08-13 09:50:05 +00:00
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
2020-04-28 08:08:32 +00:00
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
2022-02-21 08:48:02 +00:00
|
|
|
type T = type(output)
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
if not getStateOnlyMutableValidators(
|
2023-09-27 15:10:28 +00:00
|
|
|
db.immutableValidators, db.statesNoVal[T.kind], key.data, output, rollback):
|
2021-06-10 07:37:02 +00:00
|
|
|
db.v0.getState(db.immutableValidators, key, output, rollback)
|
2021-05-17 16:37:26 +00:00
|
|
|
else:
|
2021-05-17 06:34:44 +00:00
|
|
|
true
|
2019-03-28 06:10:48 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getState*(
|
2022-02-21 08:48:02 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
2022-11-09 17:32:10 +00:00
|
|
|
output: var (altair.BeaconState | bellatrix.BeaconState |
|
2024-11-13 03:29:14 +00:00
|
|
|
capella.BeaconState | deneb.BeaconState | electra.BeaconState |
|
|
|
|
fulu.BeaconState),
|
2021-09-30 01:07:24 +00:00
|
|
|
rollback: RollbackProc): bool =
|
|
|
|
## Load state into `output` - BeaconState is large so we want to avoid
|
|
|
|
## re-allocating it if possible
|
|
|
|
## Return `true` iff the entry was found in the database and `output` was
|
|
|
|
## overwritten.
|
|
|
|
## Rollback will be called only if output was partially written - if it was
|
|
|
|
## not found at all, rollback will not be called
|
|
|
|
# TODO rollback is needed to deal with bug - use `noRollback` to ignore:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/14126
|
2022-02-21 08:48:02 +00:00
|
|
|
type T = type(output)
|
2021-11-05 07:34:34 +00:00
|
|
|
getStateOnlyMutableValidators(
|
2023-09-27 15:10:28 +00:00
|
|
|
db.immutableValidators, db.statesNoVal[T.kind], key.data, output,
|
2022-02-21 08:48:02 +00:00
|
|
|
rollback)
|
2021-09-30 01:07:24 +00:00
|
|
|
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
proc getState*(
|
2023-01-28 19:53:41 +00:00
|
|
|
db: BeaconChainDB, fork: ConsensusFork, state_root: Eth2Digest,
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
state: var ForkedHashedBeaconState, rollback: RollbackProc): bool =
|
|
|
|
if state.kind != fork:
|
|
|
|
# Avoid temporary (!)
|
|
|
|
state = (ref ForkedHashedBeaconState)(kind: fork)[]
|
|
|
|
|
|
|
|
withState(state):
|
|
|
|
if not db.getState(state_root, forkyState.data, rollback):
|
|
|
|
return false
|
|
|
|
|
|
|
|
forkyState.root = state_root
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getStateRoot(db: BeaconChainDBV0,
|
2021-05-13 16:35:47 +00:00
|
|
|
root: Eth2Digest,
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
2021-05-27 10:59:42 +00:00
|
|
|
db.backend.getRaw(subkey(root, slot), Eth2Digest)
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getStateRoot*(db: BeaconChainDB,
|
|
|
|
root: Eth2Digest,
|
|
|
|
slot: Slot): Opt[Eth2Digest] =
|
|
|
|
db.stateRoots.getRaw(stateRootKey(root, slot), Eth2Digest) or
|
|
|
|
db.v0.getStateRoot(root, slot)
|
|
|
|
|
2021-01-18 20:34:41 +00:00
|
|
|
proc getStateDiff*(db: BeaconChainDB,
|
|
|
|
root: Eth2Digest): Opt[BeaconStateDiff] =
|
|
|
|
result.ok(BeaconStateDiff())
|
2021-05-17 16:37:26 +00:00
|
|
|
if db.stateDiffs.getSnappySSZ(root.data, result.get) != GetResult.found:
|
2021-01-18 20:34:41 +00:00
|
|
|
result.err
|
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getHeadBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
2021-05-27 10:59:42 +00:00
|
|
|
db.backend.getRaw(subkey(kHeadBlock), Eth2Digest)
|
2019-02-21 21:38:26 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kHeadBlock), Eth2Digest) or
|
|
|
|
db.v0.getHeadBlock()
|
|
|
|
|
|
|
|
proc getTailBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
2021-05-27 10:59:42 +00:00
|
|
|
db.backend.getRaw(subkey(kTailBlock), Eth2Digest)
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kTailBlock), Eth2Digest) or
|
|
|
|
db.v0.getTailBlock()
|
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getGenesisBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
|
|
|
|
db.backend.getRaw(subkey(kGenesisBlock), Eth2Digest)
|
2021-05-17 16:37:26 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
proc getGenesisBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
|
|
|
|
db.keyValues.getRaw(subkey(kGenesisBlock), Eth2Digest) or
|
|
|
|
db.v0.getGenesisBlock()
|
2020-10-12 01:07:20 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
2021-06-11 17:51:46 +00:00
|
|
|
db.backend.contains(subkey(phase0.SignedBeaconBlock, key)).expectDb()
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
proc containsBlock*(
|
|
|
|
db: BeaconChainDB, key: Eth2Digest,
|
|
|
|
T: type phase0.TrustedSignedBeaconBlock): bool =
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[T.kind].contains(key.data).expectDb() or
|
2021-06-24 07:11:47 +00:00
|
|
|
db.v0.containsBlock(key)
|
2021-05-17 06:34:44 +00:00
|
|
|
|
2022-02-21 08:48:02 +00:00
|
|
|
proc containsBlock*[
|
2022-11-09 17:32:10 +00:00
|
|
|
X: altair.TrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock |
|
2024-07-01 04:18:41 +00:00
|
|
|
capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock |
|
2024-11-13 03:29:14 +00:00
|
|
|
electra.TrustedSignedBeaconBlock | fulu.TrustedSignedBeaconBlock](
|
2022-02-21 08:48:02 +00:00
|
|
|
db: BeaconChainDB, key: Eth2Digest, T: type X): bool =
|
2023-09-27 15:10:28 +00:00
|
|
|
db.blocks[X.kind].contains(key.data).expectDb()
|
2021-09-08 03:46:33 +00:00
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest, fork: ConsensusFork): bool =
|
2022-02-21 08:48:02 +00:00
|
|
|
case fork
|
2023-01-28 19:53:41 +00:00
|
|
|
of ConsensusFork.Phase0: containsBlock(db, key, phase0.TrustedSignedBeaconBlock)
|
2022-02-21 08:48:02 +00:00
|
|
|
else: db.blocks[fork].contains(key.data).expectDb()
|
2021-09-30 01:07:24 +00:00
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
|
2023-01-28 19:53:41 +00:00
|
|
|
for fork in countdown(ConsensusFork.high, ConsensusFork.low):
|
2022-12-16 12:11:08 +00:00
|
|
|
if db.containsBlock(key, fork): return true
|
|
|
|
|
|
|
|
false
|
2021-09-08 03:46:33 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool =
|
2021-11-05 07:34:34 +00:00
|
|
|
let sk = subkey(Phase0BeaconStateNoImmutableValidators, key)
|
2021-05-27 10:59:42 +00:00
|
|
|
db.stateStore.contains(sk).expectDb() or
|
|
|
|
db.backend.contains(sk).expectDb() or
|
2021-06-11 17:51:46 +00:00
|
|
|
db.backend.contains(subkey(phase0.BeaconState, key)).expectDb()
|
2021-05-17 06:34:44 +00:00
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
proc containsState*(db: BeaconChainDB, fork: ConsensusFork, key: Eth2Digest,
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
legacy: bool = true): bool =
|
|
|
|
if db.statesNoVal[fork].contains(key.data).expectDb(): return true
|
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
(legacy and fork == ConsensusFork.Phase0 and db.v0.containsState(key))
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
proc containsState*(db: BeaconChainDB, key: Eth2Digest, legacy: bool = true): bool =
|
2023-01-28 19:53:41 +00:00
|
|
|
for fork in countdown(ConsensusFork.high, ConsensusFork.low):
|
2022-12-16 12:11:08 +00:00
|
|
|
if db.statesNoVal[fork].contains(key.data).expectDb(): return true
|
|
|
|
|
|
|
|
(legacy and db.v0.containsState(key))
|
2021-04-20 12:17:11 +00:00
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
proc getBeaconBlockSummary*(db: BeaconChainDB, root: Eth2Digest):
|
|
|
|
Opt[BeaconBlockSummary] =
|
|
|
|
var summary: BeaconBlockSummary
|
|
|
|
if db.summaries.getSSZ(root.data, summary) == GetResult.found:
|
|
|
|
ok(summary)
|
|
|
|
else:
|
|
|
|
err()
|
|
|
|
|
2022-03-11 12:49:47 +00:00
|
|
|
proc loadStateRoots*(db: BeaconChainDB): Table[(Slot, Eth2Digest), Eth2Digest] =
|
|
|
|
## Load all known state roots - just because we have a state root doesn't
|
|
|
|
## mean we also have a state (and vice versa)!
|
|
|
|
var state_roots = initTable[(Slot, Eth2Digest), Eth2Digest](1024)
|
|
|
|
|
2022-04-08 16:22:49 +00:00
|
|
|
discard db.stateRoots.find([], proc(k, v: openArray[byte]) =
|
2022-03-11 12:49:47 +00:00
|
|
|
if k.len() == 40 and v.len() == 32:
|
|
|
|
# For legacy reasons, the first byte of the slot is not part of the slot
|
|
|
|
# but rather a subkey identifier - see subkey
|
|
|
|
var tmp = toArray(8, k.toOpenArray(0, 7))
|
|
|
|
tmp[0] = 0
|
|
|
|
state_roots[
|
|
|
|
(Slot(uint64.fromBytesBE(tmp)),
|
|
|
|
Eth2Digest(data: toArray(sizeof(Eth2Digest), k.toOpenArray(8, 39))))] =
|
|
|
|
Eth2Digest(data: toArray(sizeof(Eth2Digest), v))
|
|
|
|
else:
|
|
|
|
warn "Invalid state root in database", klen = k.len(), vlen = v.len()
|
|
|
|
)
|
|
|
|
|
|
|
|
state_roots
|
|
|
|
|
2022-01-17 12:07:49 +00:00
|
|
|
proc loadSummaries*(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
|
2021-05-17 16:37:26 +00:00
|
|
|
# Load summaries into table - there's no telling what order they're in so we
|
|
|
|
# load them all - bugs in nim prevent this code from living in the iterator.
|
2022-01-17 12:07:49 +00:00
|
|
|
var summaries = initTable[Eth2Digest, BeaconBlockSummary](1024*1024)
|
2021-05-17 16:37:26 +00:00
|
|
|
|
|
|
|
discard db.summaries.find([], proc(k, v: openArray[byte]) =
|
|
|
|
var output: BeaconBlockSummary
|
|
|
|
|
2022-02-16 22:24:44 +00:00
|
|
|
if k.len() == sizeof(Eth2Digest) and decodeSSZ(v, output):
|
2022-01-17 12:07:49 +00:00
|
|
|
summaries[Eth2Digest(data: toArray(sizeof(Eth2Digest), k))] = output
|
2021-05-17 16:37:26 +00:00
|
|
|
else:
|
|
|
|
warn "Invalid summary in database", klen = k.len(), vlen = v.len()
|
|
|
|
)
|
|
|
|
|
2022-01-17 12:07:49 +00:00
|
|
|
summaries
|
|
|
|
|
2021-06-04 10:26:41 +00:00
|
|
|
type RootedSummary = tuple[root: Eth2Digest, summary: BeaconBlockSummary]
|
2020-11-03 22:30:43 +00:00
|
|
|
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
2021-06-04 10:26:41 +00:00
|
|
|
RootedSummary =
|
2022-01-30 16:51:04 +00:00
|
|
|
## Load a chain of ancestors for blck - iterates over the block starting from
|
|
|
|
## root and moving parent by parent
|
2020-11-03 22:30:43 +00:00
|
|
|
##
|
2022-01-30 16:51:04 +00:00
|
|
|
## The search will go on until an ancestor cannot be found.
|
2020-11-03 22:30:43 +00:00
|
|
|
|
|
|
|
var
|
2021-06-04 10:26:41 +00:00
|
|
|
res: RootedSummary
|
|
|
|
newSummaries: seq[RootedSummary]
|
2021-05-13 16:35:47 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
res.root = root
|
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
# Yield summaries in reverse chain order by walking the parent references.
|
|
|
|
# If a summary is missing, try loading it from the older version or create one
|
|
|
|
# from block data.
|
|
|
|
|
|
|
|
const summariesQuery = """
|
|
|
|
WITH RECURSIVE
|
|
|
|
next(v) as (
|
|
|
|
SELECT value FROM beacon_block_summaries
|
|
|
|
WHERE `key` == ?
|
|
|
|
|
|
|
|
UNION ALL
|
|
|
|
SELECT value FROM beacon_block_summaries
|
|
|
|
INNER JOIN next ON `key` == substr(v, 9, 32)
|
|
|
|
)
|
|
|
|
SELECT v FROM next;
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
"""
|
2022-01-30 16:51:04 +00:00
|
|
|
let
|
|
|
|
stmt = expectDb db.db.prepareStmt(
|
|
|
|
summariesQuery, array[32, byte],
|
|
|
|
array[sizeof(BeaconBlockSummary), byte],
|
|
|
|
managed = false)
|
|
|
|
|
2021-06-04 10:26:41 +00:00
|
|
|
defer: # in case iteration is stopped along the way
|
|
|
|
# Write the newly found summaries in a single transaction - on first migration
|
|
|
|
# from the old format, this brings down the write from minutes to seconds
|
2022-01-30 16:51:04 +00:00
|
|
|
stmt.dispose()
|
|
|
|
|
2022-02-18 06:37:44 +00:00
|
|
|
if not db.db.readOnly:
|
|
|
|
if newSummaries.len() > 0:
|
|
|
|
db.withManyWrites:
|
|
|
|
for s in newSummaries:
|
|
|
|
db.putBeaconBlockSummary(s.root, s.summary)
|
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
if db.db.hasTable("kvstore").expectDb():
|
|
|
|
# Clean up pre-altair summaries - by now, we will have moved them to the
|
|
|
|
# new table
|
|
|
|
db.db.exec(
|
|
|
|
"DELETE FROM kvstore WHERE key >= ? and key < ?",
|
|
|
|
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
|
2022-02-18 06:37:44 +00:00
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
var row: stmt.Result
|
|
|
|
for rowRes in exec(stmt, root.data, row):
|
|
|
|
expectDb rowRes
|
|
|
|
if decodeSSZ(row, res.summary):
|
|
|
|
yield res
|
|
|
|
res.root = res.summary.parent_root
|
2021-06-04 10:26:41 +00:00
|
|
|
|
2022-01-30 16:51:04 +00:00
|
|
|
# Backwards compat for reading old databases, or those that for whatever
|
|
|
|
# reason lost a summary along the way..
|
2024-11-13 03:29:14 +00:00
|
|
|
static: doAssert ConsensusFork.high == ConsensusFork.Fulu
|
2020-11-03 22:30:43 +00:00
|
|
|
while true:
|
2022-01-30 16:51:04 +00:00
|
|
|
if db.v0.backend.getSnappySSZ(
|
|
|
|
subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found:
|
|
|
|
discard # Just yield below
|
2022-02-21 08:48:02 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, phase0.TrustedSignedBeaconBlock); blck.isSome()):
|
2022-01-30 16:51:04 +00:00
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2022-02-21 08:48:02 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, altair.TrustedSignedBeaconBlock); blck.isSome()):
|
2022-01-30 16:51:04 +00:00
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2022-02-21 08:48:02 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, bellatrix.TrustedSignedBeaconBlock); blck.isSome()):
|
2022-01-30 16:51:04 +00:00
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2022-12-16 12:11:08 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, capella.TrustedSignedBeaconBlock); blck.isSome()):
|
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2023-02-25 01:03:34 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, deneb.TrustedSignedBeaconBlock); blck.isSome()):
|
2022-12-16 12:11:08 +00:00
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2024-04-03 14:43:43 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, electra.TrustedSignedBeaconBlock); blck.isSome()):
|
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2024-11-13 03:29:14 +00:00
|
|
|
elif (let blck = db.getBlock(res.root, fulu.TrustedSignedBeaconBlock); blck.isSome()):
|
|
|
|
res.summary = blck.get().message.toBeaconBlockSummary()
|
2022-01-30 16:51:04 +00:00
|
|
|
else:
|
|
|
|
break
|
|
|
|
|
|
|
|
yield res
|
|
|
|
|
|
|
|
# Next time, load them from the right place
|
|
|
|
newSummaries.add(res)
|
2021-05-17 16:37:26 +00:00
|
|
|
|
|
|
|
res.root = res.summary.parent_root
|
2021-08-05 08:26:10 +00:00
|
|
|
|
|
|
|
# Test operations used to create broken and/or legacy database
|
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc putStateV0*(db: BeaconChainDBV0, key: Eth2Digest, value: phase0.BeaconState) =
|
2021-08-05 08:26:10 +00:00
|
|
|
# Writes to KVStore, as done in 1.0.12 and earlier
|
2022-11-28 23:21:58 +00:00
|
|
|
db.backend.putSnappySSZ(subkey(type value, key), value)
|
2021-08-05 08:26:10 +00:00
|
|
|
|
2022-11-28 23:21:58 +00:00
|
|
|
proc putBlockV0*(db: BeaconChainDBV0, value: phase0.TrustedSignedBeaconBlock) =
|
2021-08-05 08:26:10 +00:00
|
|
|
# Write to KVStore, as done in 1.0.12 and earlier
|
|
|
|
# In particular, no summary is written here - it should be recreated
|
|
|
|
# automatically
|
2022-11-28 23:21:58 +00:00
|
|
|
db.backend.putSnappySSZ(subkey(phase0.SignedBeaconBlock, value.root), value)
|