Revert "Upgrade database schema" (#2570)

This reverts commit 22ddf74752.
This commit is contained in:
tersec 2021-05-17 06:34:44 +00:00 committed by GitHub
parent 22ddf74752
commit 6057c2ffb4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 477 additions and 397 deletions

View File

@ -84,12 +84,14 @@ OK: 11/11 Fail: 0/11 Skip: 0/11
+ empty database [Preset: mainnet] OK
+ find ancestors [Preset: mainnet] OK
+ sanity check blocks [Preset: mainnet] OK
+ sanity check full states [Preset: mainnet] OK
+ sanity check full states, reusing buffers [Preset: mainnet] OK
+ sanity check genesis roundtrip [Preset: mainnet] OK
+ sanity check state diff roundtrip [Preset: mainnet] OK
+ sanity check states [Preset: mainnet] OK
+ sanity check states, reusing buffers [Preset: mainnet] OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
OK: 9/9 Fail: 0/9 Skip: 0/9
## Beacon state [Preset: mainnet]
```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
@ -321,4 +323,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 178/187 Fail: 0/187 Skip: 9/187
OK: 180/189 Fail: 0/189 Skip: 9/189

View File

@ -8,52 +8,107 @@
{.push raises: [Defect].}
import
std/[typetraits, tables],
stew/[arrayops, assign2, byteutils, endians2, io2, objects, results],
typetraits, tables,
stew/[assign2, byteutils, endians2, io2, objects, results],
serialization, chronicles, snappy,
eth/db/[kvstore, kvstore_sqlite3],
./networking/network_metadata, ./beacon_chain_db_immutable,
./spec/[crypto, datatypes, digest, state_transition],
./ssz/[ssz_serialization, merkleization],
./eth1/merkle_minimal,
./filepath
type
# TODO when DirStoreRef and helpers are placed in a separate module, kvStore
# doesn't find it.. :/
# eth/db/kvstore.nim(75, 6) Error: type mismatch: got <DirStoreRef, openArray[byte], openArray[byte]>
DirStoreRef* = ref object of RootObj
# DirStore is an experimental storage based on plain files stored in a
# directory tree - this _might_ be a suitable way of storing large blobs
# efficiently, where sqlite sometimes struggles - see
# https://github.com/status-im/nimbus-eth2/issues/2440
#
# The issue described by 2440 happens when both blocks and states are all
# stored in a single, giant table. The slow deletes have since been
# mitigated by using separate tables.
root: string
proc splitName(db: DirStoreRef, name: openArray[byte]): tuple[dir, file: string] =
# Splitting the name helps keep the number of files per directory down - up
# to 65536 folders will be created
if name.len() > 2:
(db.root & "/" & name.toOpenArray(0, 1).toHex(), name.toOpenArray(2, name.high()).toHex())
else:
(db.root & "/" & "0000", name.toHex())
proc get*(db: DirStoreRef, key: openArray[byte], onData: DataProc): KvResult[bool] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
var data: seq[byte]
if readFile(fileName, data).isOk():
onData(data)
ok(true)
else:
# Serious errors are caught when writing, so we simplify things and say
# the entry doesn't exist if for any reason we can't read it
# TODO align this with `contains` that simply checks if the file exists
ok(false)
proc del*(db: DirStoreRef, key: openArray[byte]): KvResult[void] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
removeFile(fileName).mapErr(ioErrorMsg)
proc contains*(db: DirStoreRef, key: openArray[byte]): KvResult[bool] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
ok(isFile(fileName))
proc put*(db: DirStoreRef, key, val: openArray[byte]): KvResult[void] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
? createPath(root).mapErr(ioErrorMsg)
? io2.writeFile(fileName, val).mapErr(ioErrorMsg)
ok()
proc close*(db: DirStoreRef): KvResult[void] =
discard
proc init*(T: type DirStoreRef, root: string): T =
T(
root: root,
)
type
DbSeq*[T] = object
insertStmt: SqliteStmt[openArray[byte], void]
selectStmt: SqliteStmt[int64, openArray[byte]]
recordCount: int64
DbMap*[K, V] = object
db: SqStoreRef
keyspace: int
DepositsSeq = DbSeq[DepositData]
ImmutableValidatorsSeq = DbSeq[ImmutableValidatorData]
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
DepositContractSnapshot* = object
eth1Block*: Eth2Digest
depositContractState*: DepositContractState
BeaconChainDBV0* = ref object
## BeaconChainDBV0 based on old kvstore table that sets the WITHOUT ROWID
## option which becomes unbearably slow with large blobs. It is used as a
## read-only store to support old versions - by freezing it at its current
## data set, downgrading remains possible since it's no longer touched -
## anyone downgrading will have to sync up whatever they missed.
##
## Newer versions read from the new tables first - if the data is not found,
## they turn to the old tables for reading. Writing is done only to the new
## tables.
##
## V0 stored most data in a single table, prefixing each key with a tag
## identifying the type of data.
##
## 1.1 introduced BeaconStateNoImmutableValidators storage where immutable
## validator data is stored in a separate table and only a partial
## BeaconState is written to kvstore
##
## 1.2 moved BeaconStateNoImmutableValidators to a separate table to
## alleviate some of the btree balancing issues - this doubled the speed but
## was still slow
backend: KvStoreRef # kvstore
stateStore: KvStoreRef # state_no_validators
BeaconChainDB* = ref object
## Database storing resolved blocks and states - resolved blocks are such
## blocks that form a chain back to the tail block.
@ -67,9 +122,7 @@ type
## database - this may have a number of "natural" causes such as switching
## between different versions of the client and accidentally using an old
## database.
db: SqStoreRef
v0: BeaconChainDBV0
backend: KvStoreRef
preset*: RuntimePreset
genesisDeposits*: DepositsSeq
@ -80,20 +133,12 @@ type
checkpoint*: proc() {.gcsafe, raises: [Defect].}
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
blocks: KvStoreRef # BlockRoot -> TrustedBeaconBlock
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
statesNoVal: KvStoreRef # StateRoot -> BeaconStateNoImmutableValidators
stateDiffs: KvStoreRef ##\
## StateRoot -> BeaconStateDiff
## Instead of storing full BeaconStates, one can store only the diff from
## a different state. As 75% of a typical BeaconState's serialized form's
## the validators, which are mostly immutable and append-only, just using
## a simple append-diff representation helps significantly. Various roots
## are stored in a mod-increment pattern across fixed-sized arrays, which
## addresses most of the rest of the BeaconState sizes.
stateStore: KvStoreRef
summaries: KvStoreRef # BlockRoot -> BeaconBlockSummary
Keyspaces* = enum
defaultKeyspace = "kvstore"
validatorIndexFromPubKey # Unused (?)
stateNoValidators = "state_no_validators"
DbKeyKind = enum
kHashToState
@ -111,26 +156,38 @@ type
kGenesisBlockRoot
## Immutable reference to the network genesis state
## (needed for satisfying requests to the beacon node API).
kEth1PersistedTo # Obsolete
kDepositsFinalizedByEth1 # Obsolete
kEth1PersistedTo
## (Obsolete) Used to point to the the latest ETH1 block hash which
## satisfied the follow distance and had its deposits persisted to disk.
kDepositsFinalizedByEth1
## A merkleizer checkpoint which can be used for computing the
## `deposit_root` of all eth1 finalized deposits (i.e. deposits
## confirmed by ETH1_FOLLOW_DISTANCE blocks). The `deposit_root`
## is acknowledged and confirmed by the attached web3 provider.
kDepositsFinalizedByEth2
## A merkleizer checkpoint used for computing merkle proofs of
## deposits added to Eth2 blocks (it may lag behind the finalized
## eth1 deposits checkpoint).
kHashToBlockSummary # Block summaries for fast startup
kHashToBlockSummary
## Cache of beacon block summaries - during startup when we construct the
## chain dag, loading full blocks takes a lot of time - the block
## summary contains a minimal snapshot of what's needed to instanciate
## the BlockRef tree.
kSpeculativeDeposits
## A merkelizer checkpoint created on the basis of deposit events
## that we were not able to verify against a `deposit_root` served
## by the web3 provider. This may happen on Geth nodes that serve
## only recent contract state data (i.e. only recent `deposit_roots`).
kHashToStateDiff # Obsolete
kHashToStateDiff
## Instead of storing full BeaconStates, one can store only the diff from
## a different state. As 75% of a typical BeaconState's serialized form's
## the validators, which are mostly immutable and append-only, just using
## a simple append-diff representation helps significantly. Various roots
## are stored in a mod-increment pattern across fixed-sized arrays, which
## addresses most of the rest of the BeaconState sizes.
kHashToStateOnlyMutableValidators
BeaconBlockSummary* = object
## Cache of beacon block summaries - during startup when we construct the
## chain dag, loading full blocks takes a lot of time - the block
## summary contains a minimal snapshot of what's needed to instanciate
## the BlockRef tree.
slot*: Slot
parent_root*: Eth2Digest
@ -165,6 +222,9 @@ func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
subkey(kHashToBlockSummary, key.data)
func subkey(kind: type BeaconStateDiff, key: Eth2Digest): auto =
subkey(kHashToStateDiff, key.data)
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
var ret: array[40, byte]
# big endian to get a naturally ascending order on slots in sorted indices
@ -181,51 +241,41 @@ template panic =
# Review all usages.
raiseAssert "The database should not be corrupted"
template expectDb(x: auto): untyped =
# There's no meaningful error handling implemented for a corrupt database or
# full disk - this requires manual intervention, so we'll panic for now
x.expect("working database (disk broken/full?)")
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): Seq =
db.exec("""
CREATE TABLE IF NOT EXISTS """ & name & """(
id INTEGER PRIMARY KEY,
value BLOB
);
""").expectDb()
""").expect "working database (disk broken/full?)"
let
insertStmt = db.prepareStmt(
"INSERT INTO " & name & "(value) VALUES (?);",
openArray[byte], void, managed = false).expect("this is a valid statement")
openArray[byte], void).expect("this is a valid statement")
selectStmt = db.prepareStmt(
"SELECT value FROM " & name & " WHERE id = ?;",
int64, openArray[byte], managed = false).expect("this is a valid statement")
int64, openArray[byte]).expect("this is a valid statement")
countStmt = db.prepareStmt(
"SELECT COUNT(1) FROM " & name & ";",
NoParams, int64, managed = false).expect("this is a valid statement")
NoParams, int64).expect("this is a valid statement")
var recordCount = int64 0
let countQueryRes = countStmt.exec do (res: int64):
recordCount = res
let found = countQueryRes.expectDb()
let found = countQueryRes.expect("working database (disk broken/full?)")
if not found: panic()
countStmt.dispose()
Seq(insertStmt: insertStmt,
selectStmt: selectStmt,
recordCount: recordCount)
proc close*(s: DbSeq) =
s.insertStmt.dispose()
s.selectStmt.dispose()
proc add*[T](s: var DbSeq[T], val: T) =
var bytes = SSZ.encode(val)
s.insertStmt.exec(bytes).expectDb()
s.insertStmt.exec(bytes).expect "working database (disk broken/full?)"
inc s.recordCount
template len*[T](s: DbSeq[T]): int64 =
@ -241,112 +291,124 @@ proc get*[T](s: DbSeq[T], idx: int64): T =
except SerializationError:
panic()
let found = queryRes.expectDb()
let found = queryRes.expect("working database (disk broken/full?)")
if not found: panic()
proc createMap*(db: SqStoreRef, keyspace: int;
K, V: distinct type): DbMap[K, V] =
DbMap[K, V](db: db, keyspace: keyspace)
proc insert*[K, V](m: var DbMap[K, V], key: K, value: V) =
m.db.put(m.keyspace, SSZ.encode key, SSZ.encode value).expect("working database (disk broken/full?)")
proc contains*[K, V](m: DbMap[K, V], key: K): bool =
contains(m.db, SSZ.encode key).expect("working database (disk broken/full?)")
template insert*[K, V](t: var Table[K, V], key: K, value: V) =
add(t, key, value)
proc loadImmutableValidators(db: BeaconChainDB): seq[ImmutableValidatorData] =
# TODO not called, but build fails otherwise
for i in 0 ..< db.immutableValidators.len:
result.add db.immutableValidators.get(i)
type
SqKeyspaceStoreRef* = ref object of RootObj
# Wrapper around SqStoreRef to target a particular keyspace - using
# keyspaces helps keep performance decent when using large blobs in tables
# that otherwise contain lots of rows.
db: SqStoreRef
keyspace: int
proc get*(db: SqKeyspaceStoreRef, key: openArray[byte], onData: DataProc): KvResult[bool] =
get(db.db, db.keyspace, key, onData)
proc del*(db: SqKeyspaceStoreRef, key: openArray[byte]): KvResult[void] =
del(db.db, db.keyspace, key)
proc contains*(db: SqKeyspaceStoreRef, key: openArray[byte]): KvResult[bool] =
contains(db.db, db.keyspace, key)
proc put*(db: SqKeyspaceStoreRef, key, val: openArray[byte]): KvResult[void] =
put(db.db, db.keyspace, key, val)
proc close*(db: SqKeyspaceStoreRef): KvResult[void] =
ok() # Gets closed with the "default" keyspace
proc init(T: type SqKeyspaceStoreRef, db: SqStoreRef, keyspace: Keyspaces): T =
T(
db: db,
keyspace: int(keyspace)
)
proc new*(T: type BeaconChainDB,
preset: RuntimePreset,
dir: string,
inMemory = false,
fileStateStorage = false,
): BeaconChainDB =
var db = if inMemory:
SqStoreRef.init("", "test", inMemory = true).expect(
var sqliteStore = if inMemory:
SqStoreRef.init("", "test", Keyspaces, inMemory = true).expect(
"working database (out of memory?)")
else:
let s = secureCreatePath(dir)
doAssert s.isOk # TODO(zah) Handle this in a better way
SqStoreRef.init(
dir, "nbc", manualCheckpoint = true).expectDb()
dir, "nbc", Keyspaces,
manualCheckpoint = true).expect("working database (disk broken/full?)")
# Remove the deposits table we used before we switched
# to storing only deposit contract checkpoints
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
if sqliteStore.exec("DROP TABLE IF EXISTS deposits;").isErr:
debug "Failed to drop the deposits table"
var
# V0 compatibility tables
backend = kvStore db.openKvStore().expectDb()
stateStore = kvStore db.openKvStore("state_no_validators").expectDb()
genesisDepositsSeq =
DbSeq[DepositData].init(db, "genesis_deposits")
DbSeq[DepositData].init(sqliteStore, "genesis_deposits")
immutableValidatorsSeq =
DbSeq[ImmutableValidatorData].init(db, "immutable_validators")
DbSeq[ImmutableValidatorData].init(sqliteStore, "immutable_validators")
backend = kvStore sqliteStore
stateStore =
if inMemory or (not fileStateStorage):
kvStore SqKeyspaceStoreRef.init(sqliteStore, stateNoValidators)
else:
kvStore DirStoreRef.init(dir & "/state")
# V1 - expected-to-be small rows get without rowid optimizations
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
blocks = kvStore db.openKvStore("blocks").expectDb()
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
statesNoVal = kvStore db.openKvStore("state_no_validators2").expectDb()
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
T(
db: db,
v0: BeaconChainDBV0(
backend: backend,
stateStore: stateStore,
),
T(backend: backend,
preset: preset,
genesisDeposits: genesisDepositsSeq,
immutableValidators: immutableValidatorsSeq,
immutableValidatorsMem: loadImmutableValidators(immutableValidatorsSeq),
checkpoint: proc() = db.checkpoint(),
keyValues: keyValues,
blocks: blocks,
stateRoots: stateRoots,
statesNoVal: statesNoVal,
stateDiffs: stateDiffs,
summaries: summaries,
)
checkpoint: proc() = sqliteStore.checkpoint(),
stateStore: stateStore,
)
proc decodeSSZ[T](data: openArray[byte], output: var T): bool =
proc snappyEncode(inp: openArray[byte]): seq[byte] =
try:
readSszBytes(data, output, updateRoot = false)
true
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
false
snappy.encode(inp)
except CatchableError as err:
raiseAssert err.msg
proc decodeSnappySSZ[T](data: openArray[byte], output: var T): bool =
try:
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
readSszBytes(decompressed, output, updateRoot = false)
true
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
false
proc encodeSSZ(v: auto): seq[byte] =
proc sszEncode(v: auto): seq[byte] =
try:
SSZ.encode(v)
except IOError as err:
raiseAssert err.msg
proc encodeSnappySSZ(v: auto): seq[byte] =
try:
snappy.encode(SSZ.encode(v))
except CatchableError as err:
# In-memory encode shouldn't fail!
raiseAssert err.msg
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
db.put(key, v.data).expect("working database (disk broken/full?)")
proc putEncoded(db: KvStoreRef, key: openArray[byte], v: auto) =
db.put(key, snappyEncode(sszEncode(v))).expect(
"working database (disk broken/full?)")
proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
var res: Opt[T]
proc decode(data: openArray[byte]) =
if data.len == sizeof(Eth2Digest):
res.ok Eth2Digest(data: toArray(sizeof(Eth2Digest), data))
if data.len == 32:
res.ok Eth2Digest(data: toArray(32, data))
else:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
@ -354,89 +416,63 @@ proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
typ = name(T), dataLen = data.len
discard
discard db.get(key, decode).expectDb()
discard db.get(key, decode).expect("working database (disk broken/full?)")
res
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
db.put(key, v.data).expectDb()
type GetResult = enum
found = "Found"
notFound = "Not found"
corrupted = "Corrupted"
proc getSSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
proc getEncoded[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
var status = GetResult.notFound
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
proc decode(data: openArray[byte]) =
status =
if decodeSSZ(data, outputPtr[]): GetResult.found
else: GetResult.corrupted
try:
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
if decompressed.len > 0:
outputPtr[] = SSZ.decode(decompressed, T, updateRoot = false)
status = GetResult.found
else:
warn "Corrupt snappy record found in database", typ = name(T)
status = GetResult.corrupted
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
status = GetResult.corrupted
discard db.get(key, decode).expectDb()
discard db.get(key, decode).expect("working database (disk broken/full?)")
status
proc putSSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
db.put(key, encodeSSZ(v)).expectDb()
proc getSnappySSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
var status = GetResult.notFound
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
proc decode(data: openArray[byte]) =
status =
if decodeSnappySSZ(data, outputPtr[]): GetResult.found
else: GetResult.corrupted
discard db.get(key, decode).expectDb()
status
proc putSnappySSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
db.put(key, encodeSnappySSZ(v)).expectDb()
proc close*(db: BeaconChainDBV0) =
discard db.stateStore.close()
proc close*(db: BeaconChainDB) =
discard db.backend.close()
proc close*(db: BeaconchainDB) =
if db.db == nil: return
# Close things in reverse order
discard db.summaries.close()
discard db.stateDiffs.close()
discard db.statesNoVal.close()
discard db.stateRoots.close()
discard db.blocks.close()
discard db.keyValues.close()
db.immutableValidators.close()
db.genesisDeposits.close()
db.v0.close()
db.db.close()
db.db = nil
func toBeaconBlockSummary(v: SomeBeaconBlock): BeaconBlockSummary =
BeaconBlockSummary(
slot: v.slot,
parent_root: v.parent_root,
)
proc putBeaconBlockSummary(
db: BeaconChainDB, root: Eth2Digest, value: BeaconBlockSummary) =
# Summaries are too simple / small to compress, store them as plain SSZ
db.summaries.putSSZ(root.data, value)
# TODO: we should only store TrustedSignedBeaconBlock in the DB.
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
db.backend.putEncoded(subkey(type value, value.root), value)
db.backend.putEncoded(
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
db.blocks.putSnappySSZ(value.root.data, value)
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
db.backend.putEncoded(subkey(SignedBeaconBlock, value.root), value)
db.backend.putEncoded(
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
proc putBlock*(db: BeaconChainDB, value: SigVerifiedSignedBeaconBlock) =
db.backend.putEncoded(subkey(SignedBeaconBlock, value.root), value)
db.backend.putEncoded(
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
proc updateImmutableValidators(
db: BeaconChainDB, immutableValidators: var seq[ImmutableValidatorData],
@ -458,76 +494,80 @@ proc updateImmutableValidators(
immutableValidators.add immutableValidator
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: var BeaconState) =
db.updateImmutableValidators(db.immutableValidatorsMem, value.validators)
db.statesNoVal.putSnappySSZ(
key.data,
updateImmutableValidators(db, db.immutableValidatorsMem, value.validators)
db.stateStore.putEncoded(
subkey(BeaconStateNoImmutableValidators, key),
isomorphicCast[BeaconStateNoImmutableValidators](value))
proc putState*(db: BeaconChainDB, value: var BeaconState) =
db.putState(hash_tree_root(value), value)
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
var ret: array[40, byte]
# big endian to get a naturally ascending order on slots in sorted indices
ret[0..<8] = toBytesBE(slot.uint64)
ret[8..<40] = root.data
proc putStateFull*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
db.backend.putEncoded(subkey(BeaconState, key), value)
ret
proc putStateFull*(db: BeaconChainDB, value: BeaconState) =
db.putStateFull(hash_tree_root(value), value)
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
value: Eth2Digest) =
db.stateRoots.putRaw(stateRootKey(root, slot), value)
db.backend.putRaw(subkey(root, slot), value)
proc putStateDiff*(db: BeaconChainDB, root: Eth2Digest, value: BeaconStateDiff) =
db.stateDiffs.putSnappySSZ(root.data, value)
db.backend.putEncoded(subkey(BeaconStateDiff, root), value)
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.blocks.del(key.data).expectDb()
db.summaries.del(key.data).expectDb()
db.backend.del(subkey(SignedBeaconBlock, key)).expect("working database (disk broken/full?)")
db.backend.del(subkey(BeaconBlockSummary, key)).expect("working database (disk broken/full?)")
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
db.statesNoVal.del(key.data).expectDb()
db.backend.del(subkey(BeaconState, key)).expect("working database (disk broken/full?)")
db.stateStore.del(subkey(BeaconStateNoImmutableValidators, key)).expect(
"working filesystem (disk broken/full?)")
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
db.stateRoots.del(stateRootKey(root, slot)).expectDb()
db.backend.del(subkey(root, slot)).expect("working database (disk broken/full?)")
proc delStateDiff*(db: BeaconChainDB, root: Eth2Digest) =
db.stateDiffs.del(root.data).expectDb()
db.backend.del(subkey(BeaconStateDiff, root)).expect("working database (disk broken/full?)")
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.keyValues.putRaw(subkey(kHeadBlock), key)
db.backend.putRaw(subkey(kHeadBlock), key)
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.keyValues.putRaw(subkey(kTailBlock), key)
db.backend.putRaw(subkey(kTailBlock), key)
proc putGenesisBlockRoot*(db: BeaconChainDB, key: Eth2Digest) =
db.keyValues.putRaw(subkey(kGenesisBlockRoot), key)
db.backend.putRaw(subkey(kGenesisBlockRoot), key)
proc putEth1FinalizedTo*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.backend.putEncoded(subkey(kDepositsFinalizedByEth1), eth1Checkpoint)
proc putEth2FinalizedTo*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
db.backend.putEncoded(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
proc getBlock(db: BeaconChainDBV0, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
proc putSpeculativeDeposits*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.backend.putEncoded(subkey(kSpeculativeDeposits), eth1Checkpoint)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
if db.backend.getSnappySSZ(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
if db.backend.getEncoded(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
result.err()
else:
# set root after deserializing (so it doesn't get zeroed)
result.get().root = key
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
proc getBlockSummary*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconBlockSummary] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
if db.blocks.getSnappySSZ(key.data, result.get) != GetResult.found:
result = db.v0.getBlock(key)
else:
# set root after deserializing (so it doesn't get zeroed)
result.get().root = key
result.ok(BeaconBlockSummary())
if db.backend.getEncoded(subkey(BeaconBlockSummary, key), result.get) != GetResult.found:
result.err()
proc getStateOnlyMutableValidators(
immutableValidatorsMem: openArray[ImmutableValidatorData],
store: KvStoreRef, key: openArray[byte], output: var BeaconState,
db: BeaconChainDB, store: KvStoreRef, key: Eth2Digest, output: var BeaconState,
rollback: RollbackProc): bool =
## Load state into `output` - BeaconState is large so we want to avoid
## re-allocating it if possible
@ -540,20 +580,23 @@ proc getStateOnlyMutableValidators(
# TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879
case store.getSnappySSZ(
key, isomorphicCast[BeaconStateNoImmutableValidators](output))
case store.getEncoded(
subkey(
BeaconStateNoImmutableValidators, key),
isomorphicCast[BeaconStateNoImmutableValidators](output))
of GetResult.found:
let numValidators = output.validators.len
doAssert immutableValidatorsMem.len >= numValidators
doAssert db.immutableValidatorsMem.len >= numValidators
for i in 0 ..< numValidators:
let
# Bypass hash cache invalidation
dstValidator = addr output.validators.data[i]
srcValidator = addr db.immutableValidatorsMem[i]
assign(dstValidator.pubkey, immutableValidatorsMem[i].pubkey)
assign(dstValidator.pubkey, srcValidator.pubkey)
assign(dstValidator.withdrawal_credentials,
immutableValidatorsMem[i].withdrawal_credentials)
srcValidator.withdrawal_credentials)
output.validators.resetCache()
@ -564,35 +607,6 @@ proc getStateOnlyMutableValidators(
rollback(output)
false
proc getState(
db: BeaconChainDBV0,
immutableValidatorsMem: openArray[ImmutableValidatorData],
key: Eth2Digest, output: var BeaconState,
rollback: RollbackProc): bool =
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
# from `stateStore`. We will try to read the state from all these locations.
if getStateOnlyMutableValidators(
immutableValidatorsMem, db.stateStore,
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
return true
if getStateOnlyMutableValidators(
immutableValidatorsMem, db.backend,
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
return true
case db.backend.getSnappySSZ(subkey(BeaconState, key), output)
of GetResult.found:
true
of GetResult.notFound:
false
of GetResult.corrupted:
rollback(output)
false
proc getState*(
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
rollback: RollbackProc): bool =
@ -606,74 +620,99 @@ proc getState*(
# https://github.com/nim-lang/Nim/issues/14126
# TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879
if not getStateOnlyMutableValidators(
db.immutableValidatorsMem, db.statesNoVal, key.data, output, rollback):
db.v0.getState(db.immutableValidatorsMem, key, output, rollback)
else:
true
if getStateOnlyMutableValidators(db, db.stateStore, key, output, rollback):
return true
proc getStateRoot(db: BeaconChainDBV0,
root: Eth2Digest,
slot: Slot): Opt[Eth2Digest] =
db.backend.getRaw(subkey(root, slot), Eth2Digest)
case db.backend.getEncoded(subkey(BeaconState, key), output)
of GetResult.found:
true
of GetResult.notFound:
false
of GetResult.corrupted:
rollback(output)
false
proc getStateRoot*(db: BeaconChainDB,
root: Eth2Digest,
slot: Slot): Opt[Eth2Digest] =
db.stateRoots.getRaw(stateRootKey(root, slot), Eth2Digest) or
db.v0.getStateRoot(root, slot)
db.backend.getRaw(subkey(root, slot), Eth2Digest)
proc getStateDiff*(db: BeaconChainDB,
root: Eth2Digest): Opt[BeaconStateDiff] =
result.ok(BeaconStateDiff())
if db.stateDiffs.getSnappySSZ(root.data, result.get) != GetResult.found:
if db.backend.getEncoded(subkey(BeaconStateDiff, root), result.get) != GetResult.found:
result.err
proc getHeadBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.backend.getRaw(subkey(kHeadBlock), Eth2Digest)
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.keyValues.getRaw(subkey(kHeadBlock), Eth2Digest) or
db.v0.getHeadBlock()
proc getTailBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.backend.getRaw(subkey(kTailBlock), Eth2Digest)
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.keyValues.getRaw(subkey(kTailBlock), Eth2Digest) or
db.v0.getTailBlock()
proc getGenesisBlockRoot(db: BeaconChainDBV0): Eth2Digest =
db.backend.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expectDb()
proc getGenesisBlockRoot*(db: BeaconChainDB): Eth2Digest =
db.keyValues.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expect(
db.backend.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expect(
"The database must be seeded with the genesis state")
proc getEth2FinalizedTo(db: BeaconChainDBV0): Opt[DepositContractSnapshot] =
proc getEth1FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.backend.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
let r = db.backend.getEncoded(subkey(kDepositsFinalizedByEth1), result.get)
if r != found: result.err()
proc getEth2FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.keyValues.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
if r != found: return db.v0.getEth2FinalizedTo()
let r = db.backend.getEncoded(subkey(kDepositsFinalizedByEth2), result.get)
if r != found: result.err()
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
db.backend.contains(subkey(SignedBeaconBlock, key)).expectDb()
proc getSpeculativeDeposits*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.backend.getEncoded(subkey(kSpeculativeDeposits), result.get)
if r != found: result.err()
proc delSpeculativeDeposits*(db: BeaconChainDB) =
db.backend.del(subkey(kSpeculativeDeposits)).expect("working database (disk broken/full?)")
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
db.blocks.contains(key.data).expectDb() or db.v0.containsBlock(key)
proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool =
let sk = subkey(BeaconStateNoImmutableValidators, key)
db.stateStore.contains(sk).expectDb() or
db.backend.contains(sk).expectDb() or
db.backend.contains(subkey(BeaconState, key)).expectDb
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database (disk broken/full?)")
proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
db.statesNoVal.contains(key.data).expectDb or db.v0.containsState(key)
db.stateStore.contains(subkey(BeaconStateNoImmutableValidators, key)).expect(
"working database (disk broken/full?)") or
db.backend.contains(subkey(BeaconState, key)).expect("working database (disk broken/full?)")
proc containsStateDiff*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconStateDiff, key)).expect("working database (disk broken/full?)")
proc repairGenesisState*(db: BeaconChainDB, key: Eth2Digest): KvResult[void] =
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
# from `stateStore`. This means that 1.2 cannot read a database created with
# 1.1 and earlier versions can't read databases created with either of 1.1
# and 1.2.
# Here, we will try to repair the database so that no matter what, there will
# be a `BeaconState` in `backend`:
if ? db.backend.contains(subkey(BeaconState, key)):
# No compatibility issues, life goes on
discard
elif ? db.backend.contains(subkey(BeaconStateNoImmutableValidators, key)):
# 1.1 writes this but not a full state - rewrite a full state
var output = new BeaconState
if not getStateOnlyMutableValidators(db, db.backend, key, output[], noRollback):
return err("Cannot load partial state")
putStateFull(db, output[])
elif ? db.stateStore.contains(subkey(BeaconStateNoImmutableValidators, key)):
# 1.2 writes this but not a full state - rewrite a full state
var output = new BeaconState
if not getStateOnlyMutableValidators(db, db.stateStore, key, output[], noRollback):
return err("Cannot load partial state")
putStateFull(db, output[])
ok()
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
TrustedSignedBeaconBlock =
@ -685,29 +724,11 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
var
res: TrustedSignedBeaconBlock
root = root
while db.blocks.getSnappySSZ(root.data, res) == GetResult.found or
db.v0.backend.getSnappySSZ(
subkey(SignedBeaconBlock, root), res) == GetResult.found:
while db.backend.getEncoded(subkey(SignedBeaconBlock, root), res) == GetResult.found:
res.root = root
yield res
root = res.message.parent_root
proc loadSummaries(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
# Load summaries into table - there's no telling what order they're in so we
# load them all - bugs in nim prevent this code from living in the iterator.
var summaries = initTable[Eth2Digest, BeaconBlockSummary](1024*1024)
discard db.summaries.find([], proc(k, v: openArray[byte]) =
var output: BeaconBlockSummary
if k.len() == sizeof(Eth2Digest) and decodeSSz(v, output):
summaries[Eth2Digest(data: toArray(sizeof(Eth2Digest), k))] = output
else:
warn "Invalid summary in database", klen = k.len(), vlen = v.len()
)
summaries
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
tuple[root: Eth2Digest, summary: BeaconBlockSummary] =
## Load a chain of ancestors for blck - returns a list of blocks with the
@ -715,45 +736,21 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
##
## The search will go on until the ancestor cannot be found.
# Summaries are loaded from the dedicated summaries table. For backwards
# compatibility, we also load from `kvstore` and finally, if no summaries
# can be found, by loading the blocks instead.
# First, load the full summary table into memory in one query - this makes
# initial startup very fast.
var
summaries = db.loadSummaries()
res: tuple[root: Eth2Digest, summary: BeaconBlockSummary]
blck: TrustedSignedBeaconBlock
foundOldSummary = false
tmp: TrustedSignedBeaconBlock
root = root
res.root = root
# Yield summaries in reverse chain order by walking the parent references.
# If a summary is missing, try loading it from the older version or create one
# from block data.
while true:
summaries.withValue(res.root, summary) do:
res.summary = summary[]
if db.backend.getEncoded(subkey(BeaconBlockSummary, root), res.summary) == GetResult.found:
res.root = root
yield res
do: # Summary was not found in summary table, look elsewhere
if db.v0.backend.getSnappySSZ(subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found:
yield res
elif db.v0.backend.getSnappySSZ(subkey(SignedBeaconBlock, res.root), blck) == GetResult.found:
res.summary = blck.message.toBeaconBlockSummary()
yield res
else:
break
# Next time, load them from the right place
db.putBeaconBlockSummary(res.root, res.summary)
elif db.backend.getEncoded(subkey(SignedBeaconBlock, root), tmp) == GetResult.found:
res.summary = tmp.message.toBeaconBlockSummary()
db.backend.putEncoded(subkey(BeaconBlockSummary, root), res.summary)
res.root = root
yield res
else:
break
res.root = res.summary.parent_root
if false:
# When the current version has been online for a bit, we can safely remove
# summaries from kvstore by enabling this little snippet - if users were
# to downgrade after the summaries have been purged, the old versions that
# use summaries can also recreate them on the fly from blocks.
db.db.exec(
"DELETE FROM kvstore WHERE key >= ? and key < ?",
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()
root = res.summary.parent_root

View File

@ -70,6 +70,10 @@ type
v2
both
StateDbKind* {.pure.} = enum
sql
file
BeaconNodeConf* = object
logLevel* {.
defaultValue: "INFO"
@ -144,6 +148,12 @@ type
desc: "The slashing DB flavour to use (v2) [=v2]"
name: "slashing-db-kind" }: SlashingDbKind
stateDbKind* {.
hidden
defaultValue: StateDbKind.sql
desc: "State DB kind (sql, file) [=sql]"
name: "state-db-kind" }: StateDbKind
case cmd* {.
command
defaultValue: noCommand }: BNStartUpCmd

View File

@ -396,6 +396,7 @@ proc init*(T: type ChainDAGRef,
tmpState.blck = cur.blck
break
if cur.blck.parent != nil and
cur.blck.slot.epoch != epoch(cur.blck.parent.slot):
# We store the state of the parent block with the epoch processing applied
@ -586,6 +587,10 @@ proc putState*(dag: ChainDAGRef, state: var StateData) =
# is resilient against one or the other going missing
dag.db.putState(state.data.root, state.data.data)
# Allow backwards-compatible version rollback with bounded recovery cost
if getStateField(state, slot).epoch mod 256 == 0:
dag.db.putStateFull(state.data.root, state.data.data)
dag.db.putStateRoot(
state.blck.root, getStateField(state, slot), state.data.root)
@ -1095,6 +1100,11 @@ proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
if not (headBlock.isSome() and tailBlock.isSome()):
return false
# 1.1 and 1.2 need a compatibility hack
if db.repairGenesisState(tailBlock.get().message.state_root).isErr():
notice "Could not repair genesis state"
return false
if not db.containsState(tailBlock.get().message.state_root):
return false
@ -1102,7 +1112,7 @@ proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
proc preInit*(
T: type ChainDAGRef, db: BeaconChainDB,
genesisState, tailState: var BeaconState, tailBlock: TrustedSignedBeaconBlock) =
genesisState, tailState: var BeaconState, tailBlock: SignedBeaconBlock) =
# write a genesis state, the way the ChainDAGRef expects it to be stored in
# database
# TODO probably should just init a block pool with the freshly written
@ -1116,6 +1126,7 @@ proc preInit*(
validators = tailState.validators.len()
db.putState(tailState)
db.putStateFull(tailState)
db.putBlock(tailBlock)
db.putTailBlock(tailBlock.root)
db.putHeadBlock(tailBlock.root)
@ -1126,6 +1137,7 @@ proc preInit*(
else:
doAssert genesisState.slot == GENESIS_SLOT
db.putState(genesisState)
db.putStateFull(genesisState)
let genesisBlock = get_initial_beacon_block(genesisState)
db.putBlock(genesisBlock)
db.putStateRoot(genesisBlock.root, GENESIS_SLOT, genesisBlock.message.state_root)
@ -1133,7 +1145,7 @@ proc preInit*(
proc setTailState*(dag: ChainDAGRef,
checkpointState: BeaconState,
checkpointBlock: TrustedSignedBeaconBlock) =
checkpointBlock: SignedBeaconBlock) =
# TODO(zah)
# Delete all records up to the tail node. If the tail node is not
# in the database, init the dabase in a way similar to `preInit`.

View File

@ -62,8 +62,6 @@ type
when hasGenesisDetection:
activeValidatorsCount*: uint64
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
Eth1Chain* = object
db: BeaconChainDB
preset: RuntimePreset
@ -216,7 +214,7 @@ when hasGenesisDetection:
m.genesisValidators.add ImmutableValidatorData(
pubkey: pubkey,
withdrawal_credentials: deposit.withdrawal_credentials)
m.genesisValidatorKeyToIndex[pubkey] = idx
m.genesisValidatorKeyToIndex.insert(pubkey, idx)
proc processGenesisDeposit*(m: Eth1Monitor, newDeposit: DepositData) =
m.db.genesisDeposits.add newDeposit

View File

@ -103,11 +103,12 @@ proc init*(T: type BeaconNode,
let
db = BeaconChainDB.new(
runtimePreset, config.databaseDir,
inMemory = false)
inMemory = false,
fileStateStorage = config.stateDbKind == StateDbKind.file)
var
genesisState, checkpointState: ref BeaconState
checkpointBlock: TrustedSignedBeaconBlock
checkpointBlock: SignedBeaconBlock
if config.finalizedCheckpointState.isSome:
let checkpointStatePath = config.finalizedCheckpointState.get.string
@ -128,8 +129,7 @@ proc init*(T: type BeaconNode,
else:
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
try:
# TODO Perform sanity checks like signature and slot verification at least
checkpointBlock = SSZ.loadFile(checkpointBlockPath, TrustedSignedBeaconBlock)
checkpointBlock = SSZ.loadFile(checkpointBlockPath, SignedBeaconBlock)
except SerializationError as err:
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
quit 1
@ -146,7 +146,7 @@ proc init*(T: type BeaconNode,
if not ChainDAGRef.isInitialized(db):
var
tailState: ref BeaconState
tailBlock: TrustedSignedBeaconBlock
tailBlock: SignedBeaconBlock
if genesisStateContents.len == 0 and checkpointState == nil:
when hasGenesisDetection:

View File

@ -339,15 +339,18 @@ proc initialize_hashed_beacon_state_from_eth1*(
preset, eth1_block_hash, eth1_timestamp, deposits, flags)
HashedBeaconState(data: genesisState[], root: hash_tree_root(genesisState[]))
template emptyBeaconBlockBody(): BeaconBlockBody =
BeaconBlockBody()
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#genesis-block
func get_initial_beacon_block*(state: BeaconState): TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
let message = TrustedBeaconBlock(
func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
let message = BeaconBlock(
slot: state.slot,
state_root: hash_tree_root(state),)
state_root: hash_tree_root(state),
body: emptyBeaconBlockBody())
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
TrustedSignedBeaconBlock(message: message, root: hash_tree_root(message))
SignedBeaconBlock(message: message, root: hash_tree_root(message))
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: BeaconState,

View File

@ -234,12 +234,3 @@ proc readValue*[T](r: var SszReader, val: var T) {.raises: [Defect, MalformedSsz
# TODO(zah) Read the fixed portion first and precisely measure the
# size of the dynamic portion to consume the right number of bytes.
readSszValue(r.stream.read(r.stream.len.get), val, r.updateRoot)
proc readSszBytes*[T](data: openArray[byte], val: var T, updateRoot = true) {.
raises: [Defect, MalformedSszError, SszSizeMismatchError].} =
when isFixedSize(T):
const minimalSize = fixedPortionSize(T)
if data.len < minimalSize:
raise newException(MalformedSszError, "SSZ input of insufficient size")
readSszValue(data, val, updateRoot)

View File

@ -54,6 +54,13 @@ type
## or validator client.
db_v2*: SlashingProtectionDB_v2
modes: set[SlashProtDBMode]
disagreementBehavior: DisagreementBehavior
DisagreementBehavior* = enum
## How to handle disagreement between DB versions
kCrash
kChooseV1
kChooseV2
# DB Multiversioning
# -------------------------------------------------------------
@ -69,7 +76,8 @@ proc init*(
T: type SlashingProtectionDB,
genesis_validators_root: Eth2Digest,
basePath, dbname: string,
modes: set[SlashProtDBMode]
modes: set[SlashProtDBMode],
disagreementBehavior: DisagreementBehavior
): T =
## Initialize or load a slashing protection DB
## This is for Beacon Node usage
@ -82,6 +90,7 @@ proc init*(
new result
result.modes = modes
result.disagreementBehavior = disagreementBehavior
let (db, requiresMigration) = SlashingProtectionDB_v2.initCompatV1(
genesis_validators_root,
@ -89,16 +98,16 @@ proc init*(
)
result.db_v2 = db
var db_v1: SlashingProtectionDB_v1
let rawdb = kvstore result.db_v2.getRawDBHandle()
if not rawdb.checkOrPutGenesis_DbV1(genesis_validators_root):
fatal "The slashing database refers to another chain/mainnet/testnet",
path = basePath/dbname,
genesis_validators_root = genesis_validators_root
db_v1.fromRawDB(rawdb)
if requiresMigration:
var db_v1: SlashingProtectionDB_v1
let rawdb = kvstore result.db_v2.getRawDBHandle().openKvStore().get()
if not rawdb.checkOrPutGenesis_DbV1(genesis_validators_root):
fatal "The slashing database refers to another chain/mainnet/testnet",
path = basePath/dbname,
genesis_validators_root = genesis_validators_root
db_v1.fromRawDB(rawdb)
info "Migrating local validators slashing DB from v1 to v2"
let spdir = try: db_v1.toSPDIR_lowWatermark()
except IOError as exc:
@ -119,8 +128,6 @@ proc init*(
fatal "Slashing DB migration failure. Aborting to protect validators."
quit 1
db_v1.close()
proc init*(
T: type SlashingProtectionDB,
genesis_validators_root: Eth2Digest,
@ -134,6 +141,7 @@ proc init*(
init(
T, genesis_validators_root, basePath, dbname,
modes = {kLowWatermarkV2},
disagreementBehavior = kChooseV2
)
proc loadUnchecked*(
@ -147,12 +155,13 @@ proc loadUnchecked*(
## Does not handle migration
result.modes = {kCompleteArchiveV1, kCompleteArchiveV2}
result.disagreementBehavior = kCrash
result.db_v2 = SlashingProtectionDB_v2.loadUnchecked(
basePath, dbname, readOnly
)
result.db_v1.fromRawDB(kvstore result.db_v2.getRawDBHandle().openKvStore())
result.db_v1.fromRawDB(kvstore result.db_v2.getRawDBHandle())
proc close*(db: SlashingProtectionDB) =
## Close a slashing protection database

View File

@ -143,7 +143,6 @@ type
## Database storing the blocks attested
## by validators attached to a beacon node
## or validator client.
db: SqStoreRef
backend: KvStoreRef
SlotDesc = object
@ -375,8 +374,7 @@ proc init*(
T: type SlashingProtectionDB_v1,
genesis_validators_root: Eth2Digest,
basePath, dbname: string): T =
let db = SqStoreRef.init(basePath, dbname).get()
result = T(db: db, backend: kvStore db.openKvStore().get())
result = T(backend: kvStore SqStoreRef.init(basePath, dbname).get())
if not result.backend.checkOrPutGenesis_DbV1(genesis_validators_root):
fatal "The slashing database refers to another chain/mainnet/testnet",
path = basePath/dbname,
@ -393,18 +391,16 @@ proc loadUnchecked*(
let alreadyExists = fileExists(path)
if not alreadyExists:
raise newException(IOError, "DB '" & path & "' does not exist.")
let db = SqStoreRef.init(basePath, dbname, readOnly = false).get()
let backend = kvStore db.openKvStore()
let backend = kvStore SqStoreRef.init(basePath, dbname, readOnly = false).get()
doAssert backend.contains(
subkey(kGenesisValidatorsRoot)
).get(), "The Slashing DB is missing genesis information"
result = T(db: db, backend: backend)
result = T(backend: backend)
proc close*(db: SlashingProtectionDB_v1) =
if db.db != nil:
db.db.close()
discard db.backend.close()
# DB Queries

View File

@ -638,6 +638,7 @@ proc initCompatV1*(T: type SlashingProtectionDB_v2,
result.db = T(backend: SqStoreRef.init(
basePath, dbname,
keyspaces = ["kvstore"] # The key compat part
).get())
if alreadyExists and result.db.getMetadataTable_DbV2().isSome():
result.db.checkDB(genesis_validators_root)

View File

@ -29,6 +29,10 @@ type
exportEra
validatorPerf
StateDbKind* {.pure.} = enum
sql
file
# TODO:
# This should probably allow specifying a run-time preset
DbConf = object
@ -41,6 +45,11 @@ type
desc: "The Eth2 network preset to use"
name: "network" }: Option[string]
stateDbKind* {.
defaultValue: StateDbKind.sql
desc: "State DB kind (sql, file) [=sql]"
name: "state-db-kind" }: StateDbKind
case cmd* {.
command
desc: ""
@ -145,7 +154,8 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
echo "Opening database..."
let
db = BeaconChainDB.new(
runtimePreset, conf.databaseDir.string,)
runtimePreset, conf.databaseDir.string,
fileStateStorage = conf.stateDbKind == StateDbKind.file)
dbBenchmark = BeaconChainDB.new(runtimePreset, "benchmark")
defer:
db.close()
@ -426,7 +436,8 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
echo "Opening database..."
let
db = BeaconChainDB.new(
runtimePreset, conf.databaseDir.string,)
runtimePreset, conf.databaseDir.string,
fileStateStorage = conf.stateDbKind == StateDbKind.file)
defer:
db.close()

View File

@ -115,6 +115,55 @@ suite "Beacon chain DB" & preset():
db.close()
test "sanity check full states" & preset():
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimePreset, db)
testStates = getTestStates(dag.headState.data)
# Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref HashedBeaconState) -> int:
cmp($x.root, $y.root)
for state in testStates:
db.putStateFull(state[].data)
let root = hash_tree_root(state[].data)
check:
db.containsState(root)
hash_tree_root(db.getStateRef(root)[]) == root
db.delState(root)
check: not db.containsState(root)
db.close()
test "sanity check full states, reusing buffers" & preset():
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimePreset, db)
let stateBuffer = BeaconStateRef()
var testStates = getTestStates(dag.headState.data)
# Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref HashedBeaconState) -> int:
cmp($x.root, $y.root)
for state in testStates:
db.putStateFull(state[].data)
let root = hash_tree_root(state[].data)
check:
db.getState(root, stateBuffer[], noRollback)
db.containsState(root)
hash_tree_root(stateBuffer[]) == root
db.delState(root)
check: not db.containsState(root)
db.close()
test "find ancestors" & preset():
var
db = BeaconChainDB.new(defaultRuntimePreset, "", inMemory = true)
@ -193,9 +242,10 @@ suite "Beacon chain DB" & preset():
db.putStateDiff(root, stateDiff)
check db.containsStateDiff(root)
let state2 = db.getStateDiff(root)
db.delStateDiff(root)
check db.getStateDiff(root).isNone()
check not db.containsStateDiff(root)
db.close()
check:

View File

@ -15,7 +15,7 @@ import
export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3
proc makeTestDB*(tailState: var BeaconState, tailBlock: TrustedSignedBeaconBlock): BeaconChainDB =
proc makeTestDB*(tailState: var BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
result = BeaconChainDB.new(defaultRuntimePreset, "", inMemory = true)
ChainDAGRef.preInit(result, tailState, tailState, tailBlock)

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 0064aec55912fc0f76d033b5640e804f3a577798
Subproject commit 16802c0e5218cce405cd623a554ce95549dd5181