* Revert "Revert "Upgrade database schema" (#2570)"

This reverts commit 6057c2ffb4.

* ssz: fix loading empty lists into existing instances

Not a problem earlier because we didn't reuse instances

* bump nim-eth

* bump nim-web3
This commit is contained in:
Jacek Sieka 2021-05-17 18:37:26 +02:00 committed by GitHub
parent 6057c2ffb4
commit 97f4e1fffe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 500 additions and 502 deletions

View File

@ -84,14 +84,12 @@ OK: 11/11 Fail: 0/11 Skip: 0/11
+ empty database [Preset: mainnet] OK
+ find ancestors [Preset: mainnet] OK
+ sanity check blocks [Preset: mainnet] OK
+ sanity check full states [Preset: mainnet] OK
+ sanity check full states, reusing buffers [Preset: mainnet] OK
+ sanity check genesis roundtrip [Preset: mainnet] OK
+ sanity check state diff roundtrip [Preset: mainnet] OK
+ sanity check states [Preset: mainnet] OK
+ sanity check states, reusing buffers [Preset: mainnet] OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
OK: 7/7 Fail: 0/7 Skip: 0/7
## Beacon state [Preset: mainnet]
```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
@ -323,4 +321,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 180/189 Fail: 0/189 Skip: 9/189
OK: 178/187 Fail: 0/187 Skip: 9/187

View File

@ -8,107 +8,52 @@
{.push raises: [Defect].}
import
typetraits, tables,
stew/[assign2, byteutils, endians2, io2, objects, results],
std/[typetraits, tables],
stew/[arrayops, assign2, byteutils, endians2, io2, objects, results],
serialization, chronicles, snappy,
eth/db/[kvstore, kvstore_sqlite3],
./networking/network_metadata, ./beacon_chain_db_immutable,
./spec/[crypto, datatypes, digest, state_transition],
./ssz/[ssz_serialization, merkleization],
./eth1/merkle_minimal,
./filepath
type
# TODO when DirStoreRef and helpers are placed in a separate module, kvStore
# doesn't find it.. :/
# eth/db/kvstore.nim(75, 6) Error: type mismatch: got <DirStoreRef, openArray[byte], openArray[byte]>
DirStoreRef* = ref object of RootObj
# DirStore is an experimental storage based on plain files stored in a
# directory tree - this _might_ be a suitable way of storing large blobs
# efficiently, where sqlite sometimes struggles - see
# https://github.com/status-im/nimbus-eth2/issues/2440
#
# The issue described by 2440 happens when both blocks and states are all
# stored in a single, giant table. The slow deletes have since been
# mitigated by using separate tables.
root: string
proc splitName(db: DirStoreRef, name: openArray[byte]): tuple[dir, file: string] =
# Splitting the name helps keep the number of files per directory down - up
# to 65536 folders will be created
if name.len() > 2:
(db.root & "/" & name.toOpenArray(0, 1).toHex(), name.toOpenArray(2, name.high()).toHex())
else:
(db.root & "/" & "0000", name.toHex())
proc get*(db: DirStoreRef, key: openArray[byte], onData: DataProc): KvResult[bool] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
var data: seq[byte]
if readFile(fileName, data).isOk():
onData(data)
ok(true)
else:
# Serious errors are caught when writing, so we simplify things and say
# the entry doesn't exist if for any reason we can't read it
# TODO align this with `contains` that simply checks if the file exists
ok(false)
proc del*(db: DirStoreRef, key: openArray[byte]): KvResult[void] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
removeFile(fileName).mapErr(ioErrorMsg)
proc contains*(db: DirStoreRef, key: openArray[byte]): KvResult[bool] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
ok(isFile(fileName))
proc put*(db: DirStoreRef, key, val: openArray[byte]): KvResult[void] =
let
(root, name) = db.splitName(key)
fileName = root & "/" & name
? createPath(root).mapErr(ioErrorMsg)
? io2.writeFile(fileName, val).mapErr(ioErrorMsg)
ok()
proc close*(db: DirStoreRef): KvResult[void] =
discard
proc init*(T: type DirStoreRef, root: string): T =
T(
root: root,
)
type
DbSeq*[T] = object
insertStmt: SqliteStmt[openArray[byte], void]
selectStmt: SqliteStmt[int64, openArray[byte]]
recordCount: int64
DbMap*[K, V] = object
db: SqStoreRef
keyspace: int
DepositsSeq = DbSeq[DepositData]
ImmutableValidatorsSeq = DbSeq[ImmutableValidatorData]
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
DepositContractSnapshot* = object
eth1Block*: Eth2Digest
depositContractState*: DepositContractState
BeaconChainDBV0* = ref object
## BeaconChainDBV0 based on old kvstore table that sets the WITHOUT ROWID
## option which becomes unbearably slow with large blobs. It is used as a
## read-only store to support old versions - by freezing it at its current
## data set, downgrading remains possible since it's no longer touched -
## anyone downgrading will have to sync up whatever they missed.
##
## Newer versions read from the new tables first - if the data is not found,
## they turn to the old tables for reading. Writing is done only to the new
## tables.
##
## V0 stored most data in a single table, prefixing each key with a tag
## identifying the type of data.
##
## 1.1 introduced BeaconStateNoImmutableValidators storage where immutable
## validator data is stored in a separate table and only a partial
## BeaconState is written to kvstore
##
## 1.2 moved BeaconStateNoImmutableValidators to a separate table to
## alleviate some of the btree balancing issues - this doubled the speed but
## was still slow
backend: KvStoreRef # kvstore
stateStore: KvStoreRef # state_no_validators
BeaconChainDB* = ref object
## Database storing resolved blocks and states - resolved blocks are such
## blocks that form a chain back to the tail block.
@ -122,7 +67,9 @@ type
## database - this may have a number of "natural" causes such as switching
## between different versions of the client and accidentally using an old
## database.
backend: KvStoreRef
db: SqStoreRef
v0: BeaconChainDBV0
preset*: RuntimePreset
genesisDeposits*: DepositsSeq
@ -133,12 +80,20 @@ type
checkpoint*: proc() {.gcsafe, raises: [Defect].}
stateStore: KvStoreRef
keyValues: KvStoreRef # Random stuff using DbKeyKind - suitable for small values mainly!
blocks: KvStoreRef # BlockRoot -> TrustedBeaconBlock
stateRoots: KvStoreRef # (Slot, BlockRoot) -> StateRoot
statesNoVal: KvStoreRef # StateRoot -> BeaconStateNoImmutableValidators
stateDiffs: KvStoreRef ##\
## StateRoot -> BeaconStateDiff
## Instead of storing full BeaconStates, one can store only the diff from
## a different state. As 75% of a typical BeaconState's serialized form's
## the validators, which are mostly immutable and append-only, just using
## a simple append-diff representation helps significantly. Various roots
## are stored in a mod-increment pattern across fixed-sized arrays, which
## addresses most of the rest of the BeaconState sizes.
Keyspaces* = enum
defaultKeyspace = "kvstore"
validatorIndexFromPubKey # Unused (?)
stateNoValidators = "state_no_validators"
summaries: KvStoreRef # BlockRoot -> BeaconBlockSummary
DbKeyKind = enum
kHashToState
@ -156,38 +111,26 @@ type
kGenesisBlockRoot
## Immutable reference to the network genesis state
## (needed for satisfying requests to the beacon node API).
kEth1PersistedTo
## (Obsolete) Used to point to the the latest ETH1 block hash which
## satisfied the follow distance and had its deposits persisted to disk.
kDepositsFinalizedByEth1
## A merkleizer checkpoint which can be used for computing the
## `deposit_root` of all eth1 finalized deposits (i.e. deposits
## confirmed by ETH1_FOLLOW_DISTANCE blocks). The `deposit_root`
## is acknowledged and confirmed by the attached web3 provider.
kEth1PersistedTo # Obsolete
kDepositsFinalizedByEth1 # Obsolete
kDepositsFinalizedByEth2
## A merkleizer checkpoint used for computing merkle proofs of
## deposits added to Eth2 blocks (it may lag behind the finalized
## eth1 deposits checkpoint).
kHashToBlockSummary
## Cache of beacon block summaries - during startup when we construct the
## chain dag, loading full blocks takes a lot of time - the block
## summary contains a minimal snapshot of what's needed to instanciate
## the BlockRef tree.
kHashToBlockSummary # Block summaries for fast startup
kSpeculativeDeposits
## A merkelizer checkpoint created on the basis of deposit events
## that we were not able to verify against a `deposit_root` served
## by the web3 provider. This may happen on Geth nodes that serve
## only recent contract state data (i.e. only recent `deposit_roots`).
kHashToStateDiff
## Instead of storing full BeaconStates, one can store only the diff from
## a different state. As 75% of a typical BeaconState's serialized form's
## the validators, which are mostly immutable and append-only, just using
## a simple append-diff representation helps significantly. Various roots
## are stored in a mod-increment pattern across fixed-sized arrays, which
## addresses most of the rest of the BeaconState sizes.
kHashToStateDiff # Obsolete
kHashToStateOnlyMutableValidators
BeaconBlockSummary* = object
## Cache of beacon block summaries - during startup when we construct the
## chain dag, loading full blocks takes a lot of time - the block
## summary contains a minimal snapshot of what's needed to instanciate
## the BlockRef tree.
slot*: Slot
parent_root*: Eth2Digest
@ -222,9 +165,6 @@ func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
subkey(kHashToBlockSummary, key.data)
func subkey(kind: type BeaconStateDiff, key: Eth2Digest): auto =
subkey(kHashToStateDiff, key.data)
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
var ret: array[40, byte]
# big endian to get a naturally ascending order on slots in sorted indices
@ -241,41 +181,51 @@ template panic =
# Review all usages.
raiseAssert "The database should not be corrupted"
template expectDb(x: auto): untyped =
# There's no meaningful error handling implemented for a corrupt database or
# full disk - this requires manual intervention, so we'll panic for now
x.expect("working database (disk broken/full?)")
proc init*[T](Seq: type DbSeq[T], db: SqStoreRef, name: string): Seq =
db.exec("""
CREATE TABLE IF NOT EXISTS """ & name & """(
id INTEGER PRIMARY KEY,
value BLOB
);
""").expect "working database (disk broken/full?)"
""").expectDb()
let
insertStmt = db.prepareStmt(
"INSERT INTO " & name & "(value) VALUES (?);",
openArray[byte], void).expect("this is a valid statement")
openArray[byte], void, managed = false).expect("this is a valid statement")
selectStmt = db.prepareStmt(
"SELECT value FROM " & name & " WHERE id = ?;",
int64, openArray[byte]).expect("this is a valid statement")
int64, openArray[byte], managed = false).expect("this is a valid statement")
countStmt = db.prepareStmt(
"SELECT COUNT(1) FROM " & name & ";",
NoParams, int64).expect("this is a valid statement")
NoParams, int64, managed = false).expect("this is a valid statement")
var recordCount = int64 0
let countQueryRes = countStmt.exec do (res: int64):
recordCount = res
let found = countQueryRes.expect("working database (disk broken/full?)")
let found = countQueryRes.expectDb()
if not found: panic()
countStmt.dispose()
Seq(insertStmt: insertStmt,
selectStmt: selectStmt,
recordCount: recordCount)
proc close*(s: DbSeq) =
s.insertStmt.dispose()
s.selectStmt.dispose()
proc add*[T](s: var DbSeq[T], val: T) =
var bytes = SSZ.encode(val)
s.insertStmt.exec(bytes).expect "working database (disk broken/full?)"
s.insertStmt.exec(bytes).expectDb()
inc s.recordCount
template len*[T](s: DbSeq[T]): int64 =
@ -291,124 +241,112 @@ proc get*[T](s: DbSeq[T], idx: int64): T =
except SerializationError:
panic()
let found = queryRes.expect("working database (disk broken/full?)")
let found = queryRes.expectDb()
if not found: panic()
proc createMap*(db: SqStoreRef, keyspace: int;
K, V: distinct type): DbMap[K, V] =
DbMap[K, V](db: db, keyspace: keyspace)
proc insert*[K, V](m: var DbMap[K, V], key: K, value: V) =
m.db.put(m.keyspace, SSZ.encode key, SSZ.encode value).expect("working database (disk broken/full?)")
proc contains*[K, V](m: DbMap[K, V], key: K): bool =
contains(m.db, SSZ.encode key).expect("working database (disk broken/full?)")
template insert*[K, V](t: var Table[K, V], key: K, value: V) =
add(t, key, value)
proc loadImmutableValidators(db: BeaconChainDB): seq[ImmutableValidatorData] =
# TODO not called, but build fails otherwise
for i in 0 ..< db.immutableValidators.len:
result.add db.immutableValidators.get(i)
type
SqKeyspaceStoreRef* = ref object of RootObj
# Wrapper around SqStoreRef to target a particular keyspace - using
# keyspaces helps keep performance decent when using large blobs in tables
# that otherwise contain lots of rows.
db: SqStoreRef
keyspace: int
proc get*(db: SqKeyspaceStoreRef, key: openArray[byte], onData: DataProc): KvResult[bool] =
get(db.db, db.keyspace, key, onData)
proc del*(db: SqKeyspaceStoreRef, key: openArray[byte]): KvResult[void] =
del(db.db, db.keyspace, key)
proc contains*(db: SqKeyspaceStoreRef, key: openArray[byte]): KvResult[bool] =
contains(db.db, db.keyspace, key)
proc put*(db: SqKeyspaceStoreRef, key, val: openArray[byte]): KvResult[void] =
put(db.db, db.keyspace, key, val)
proc close*(db: SqKeyspaceStoreRef): KvResult[void] =
ok() # Gets closed with the "default" keyspace
proc init(T: type SqKeyspaceStoreRef, db: SqStoreRef, keyspace: Keyspaces): T =
T(
db: db,
keyspace: int(keyspace)
)
proc new*(T: type BeaconChainDB,
preset: RuntimePreset,
dir: string,
inMemory = false,
fileStateStorage = false,
): BeaconChainDB =
var sqliteStore = if inMemory:
SqStoreRef.init("", "test", Keyspaces, inMemory = true).expect(
var db = if inMemory:
SqStoreRef.init("", "test", inMemory = true).expect(
"working database (out of memory?)")
else:
let s = secureCreatePath(dir)
doAssert s.isOk # TODO(zah) Handle this in a better way
SqStoreRef.init(
dir, "nbc", Keyspaces,
manualCheckpoint = true).expect("working database (disk broken/full?)")
dir, "nbc", manualCheckpoint = true).expectDb()
# Remove the deposits table we used before we switched
# to storing only deposit contract checkpoints
if sqliteStore.exec("DROP TABLE IF EXISTS deposits;").isErr:
if db.exec("DROP TABLE IF EXISTS deposits;").isErr:
debug "Failed to drop the deposits table"
var
genesisDepositsSeq =
DbSeq[DepositData].init(sqliteStore, "genesis_deposits")
immutableValidatorsSeq =
DbSeq[ImmutableValidatorData].init(sqliteStore, "immutable_validators")
backend = kvStore sqliteStore
stateStore =
if inMemory or (not fileStateStorage):
kvStore SqKeyspaceStoreRef.init(sqliteStore, stateNoValidators)
else:
kvStore DirStoreRef.init(dir & "/state")
# V0 compatibility tables
backend = kvStore db.openKvStore().expectDb()
stateStore = kvStore db.openKvStore("state_no_validators").expectDb()
T(backend: backend,
genesisDepositsSeq =
DbSeq[DepositData].init(db, "genesis_deposits")
immutableValidatorsSeq =
DbSeq[ImmutableValidatorData].init(db, "immutable_validators")
# V1 - expected-to-be small rows get without rowid optimizations
keyValues = kvStore db.openKvStore("key_values", true).expectDb()
blocks = kvStore db.openKvStore("blocks").expectDb()
stateRoots = kvStore db.openKvStore("state_roots", true).expectDb()
statesNoVal = kvStore db.openKvStore("state_no_validators2").expectDb()
stateDiffs = kvStore db.openKvStore("state_diffs").expectDb()
summaries = kvStore db.openKvStore("beacon_block_summaries", true).expectDb()
T(
db: db,
v0: BeaconChainDBV0(
backend: backend,
stateStore: stateStore,
),
preset: preset,
genesisDeposits: genesisDepositsSeq,
immutableValidators: immutableValidatorsSeq,
immutableValidatorsMem: loadImmutableValidators(immutableValidatorsSeq),
checkpoint: proc() = sqliteStore.checkpoint(),
stateStore: stateStore,
)
checkpoint: proc() = db.checkpoint(),
keyValues: keyValues,
blocks: blocks,
stateRoots: stateRoots,
statesNoVal: statesNoVal,
stateDiffs: stateDiffs,
summaries: summaries,
)
proc snappyEncode(inp: openArray[byte]): seq[byte] =
proc decodeSSZ[T](data: openArray[byte], output: var T): bool =
try:
snappy.encode(inp)
except CatchableError as err:
raiseAssert err.msg
readSszBytes(data, output, updateRoot = false)
true
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
false
proc sszEncode(v: auto): seq[byte] =
proc decodeSnappySSZ[T](data: openArray[byte], output: var T): bool =
try:
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
readSszBytes(decompressed, output, updateRoot = false)
true
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
false
proc encodeSSZ(v: auto): seq[byte] =
try:
SSZ.encode(v)
except IOError as err:
# In-memory encode shouldn't fail!
raiseAssert err.msg
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
db.put(key, v.data).expect("working database (disk broken/full?)")
proc putEncoded(db: KvStoreRef, key: openArray[byte], v: auto) =
db.put(key, snappyEncode(sszEncode(v))).expect(
"working database (disk broken/full?)")
proc encodeSnappySSZ(v: auto): seq[byte] =
try:
snappy.encode(SSZ.encode(v))
except CatchableError as err:
# In-memory encode shouldn't fail!
raiseAssert err.msg
proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
var res: Opt[T]
proc decode(data: openArray[byte]) =
if data.len == 32:
res.ok Eth2Digest(data: toArray(32, data))
if data.len == sizeof(Eth2Digest):
res.ok Eth2Digest(data: toArray(sizeof(Eth2Digest), data))
else:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
@ -416,63 +354,89 @@ proc getRaw(db: KvStoreRef, key: openArray[byte], T: type Eth2Digest): Opt[T] =
typ = name(T), dataLen = data.len
discard
discard db.get(key, decode).expect("working database (disk broken/full?)")
discard db.get(key, decode).expectDb()
res
proc putRaw(db: KvStoreRef, key: openArray[byte], v: Eth2Digest) =
db.put(key, v.data).expectDb()
type GetResult = enum
found = "Found"
notFound = "Not found"
corrupted = "Corrupted"
proc getEncoded[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
proc getSSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
var status = GetResult.notFound
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
proc decode(data: openArray[byte]) =
try:
let decompressed = snappy.decode(data, maxDecompressedDbRecordSize)
if decompressed.len > 0:
outputPtr[] = SSZ.decode(decompressed, T, updateRoot = false)
status = GetResult.found
else:
warn "Corrupt snappy record found in database", typ = name(T)
status = GetResult.corrupted
except SerializationError as e:
# If the data can't be deserialized, it could be because it's from a
# version of the software that uses a different SSZ encoding
warn "Unable to deserialize data, old database?",
err = e.msg, typ = name(T), dataLen = data.len
status = GetResult.corrupted
status =
if decodeSSZ(data, outputPtr[]): GetResult.found
else: GetResult.corrupted
discard db.get(key, decode).expect("working database (disk broken/full?)")
discard db.get(key, decode).expectDb()
status
proc close*(db: BeaconChainDB) =
proc putSSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
db.put(key, encodeSSZ(v)).expectDb()
proc getSnappySSZ[T](db: KvStoreRef, key: openArray[byte], output: var T): GetResult =
var status = GetResult.notFound
# TODO address is needed because there's no way to express lifetimes in nim
# we'll use unsafeAddr to find the code later
var outputPtr = unsafeAddr output # callback is local, ptr wont escape
proc decode(data: openArray[byte]) =
status =
if decodeSnappySSZ(data, outputPtr[]): GetResult.found
else: GetResult.corrupted
discard db.get(key, decode).expectDb()
status
proc putSnappySSZ(db: KvStoreRef, key: openArray[byte], v: auto) =
db.put(key, encodeSnappySSZ(v)).expectDb()
proc close*(db: BeaconChainDBV0) =
discard db.stateStore.close()
discard db.backend.close()
proc close*(db: BeaconchainDB) =
if db.db == nil: return
# Close things in reverse order
discard db.summaries.close()
discard db.stateDiffs.close()
discard db.statesNoVal.close()
discard db.stateRoots.close()
discard db.blocks.close()
discard db.keyValues.close()
db.immutableValidators.close()
db.genesisDeposits.close()
db.v0.close()
db.db.close()
db.db = nil
func toBeaconBlockSummary(v: SomeBeaconBlock): BeaconBlockSummary =
BeaconBlockSummary(
slot: v.slot,
parent_root: v.parent_root,
)
# TODO: we should only store TrustedSignedBeaconBlock in the DB.
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
db.backend.putEncoded(subkey(type value, value.root), value)
db.backend.putEncoded(
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
proc putBeaconBlockSummary(
db: BeaconChainDB, root: Eth2Digest, value: BeaconBlockSummary) =
# Summaries are too simple / small to compress, store them as plain SSZ
db.summaries.putSSZ(root.data, value)
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
db.backend.putEncoded(subkey(SignedBeaconBlock, value.root), value)
db.backend.putEncoded(
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
proc putBlock*(db: BeaconChainDB, value: SigVerifiedSignedBeaconBlock) =
db.backend.putEncoded(subkey(SignedBeaconBlock, value.root), value)
db.backend.putEncoded(
subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
db.blocks.putSnappySSZ(value.root.data, value)
db.putBeaconBlockSummary(value.root, value.message.toBeaconBlockSummary())
proc updateImmutableValidators(
db: BeaconChainDB, immutableValidators: var seq[ImmutableValidatorData],
@ -494,80 +458,76 @@ proc updateImmutableValidators(
immutableValidators.add immutableValidator
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: var BeaconState) =
updateImmutableValidators(db, db.immutableValidatorsMem, value.validators)
db.stateStore.putEncoded(
subkey(BeaconStateNoImmutableValidators, key),
db.updateImmutableValidators(db.immutableValidatorsMem, value.validators)
db.statesNoVal.putSnappySSZ(
key.data,
isomorphicCast[BeaconStateNoImmutableValidators](value))
proc putState*(db: BeaconChainDB, value: var BeaconState) =
db.putState(hash_tree_root(value), value)
proc putStateFull*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
db.backend.putEncoded(subkey(BeaconState, key), value)
func stateRootKey(root: Eth2Digest, slot: Slot): array[40, byte] =
var ret: array[40, byte]
# big endian to get a naturally ascending order on slots in sorted indices
ret[0..<8] = toBytesBE(slot.uint64)
ret[8..<40] = root.data
proc putStateFull*(db: BeaconChainDB, value: BeaconState) =
db.putStateFull(hash_tree_root(value), value)
ret
proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
value: Eth2Digest) =
db.backend.putRaw(subkey(root, slot), value)
db.stateRoots.putRaw(stateRootKey(root, slot), value)
proc putStateDiff*(db: BeaconChainDB, root: Eth2Digest, value: BeaconStateDiff) =
db.backend.putEncoded(subkey(BeaconStateDiff, root), value)
db.stateDiffs.putSnappySSZ(root.data, value)
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.del(subkey(SignedBeaconBlock, key)).expect("working database (disk broken/full?)")
db.backend.del(subkey(BeaconBlockSummary, key)).expect("working database (disk broken/full?)")
db.blocks.del(key.data).expectDb()
db.summaries.del(key.data).expectDb()
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.del(subkey(BeaconState, key)).expect("working database (disk broken/full?)")
db.stateStore.del(subkey(BeaconStateNoImmutableValidators, key)).expect(
"working filesystem (disk broken/full?)")
db.statesNoVal.del(key.data).expectDb()
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
db.backend.del(subkey(root, slot)).expect("working database (disk broken/full?)")
db.stateRoots.del(stateRootKey(root, slot)).expectDb()
proc delStateDiff*(db: BeaconChainDB, root: Eth2Digest) =
db.backend.del(subkey(BeaconStateDiff, root)).expect("working database (disk broken/full?)")
db.stateDiffs.del(root.data).expectDb()
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.putRaw(subkey(kHeadBlock), key)
db.keyValues.putRaw(subkey(kHeadBlock), key)
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.putRaw(subkey(kTailBlock), key)
db.keyValues.putRaw(subkey(kTailBlock), key)
proc putGenesisBlockRoot*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.putRaw(subkey(kGenesisBlockRoot), key)
proc putEth1FinalizedTo*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.backend.putEncoded(subkey(kDepositsFinalizedByEth1), eth1Checkpoint)
db.keyValues.putRaw(subkey(kGenesisBlockRoot), key)
proc putEth2FinalizedTo*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.backend.putEncoded(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
db.keyValues.putSnappySSZ(subkey(kDepositsFinalizedByEth2), eth1Checkpoint)
proc putSpeculativeDeposits*(db: BeaconChainDB,
eth1Checkpoint: DepositContractSnapshot) =
db.backend.putEncoded(subkey(kSpeculativeDeposits), eth1Checkpoint)
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
proc getBlock(db: BeaconChainDBV0, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(TrustedSignedBeaconBlock())
if db.backend.getEncoded(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
if db.backend.getSnappySSZ(subkey(SignedBeaconBlock, key), result.get) != GetResult.found:
result.err()
else:
# set root after deserializing (so it doesn't get zeroed)
result.get().root = key
proc getBlockSummary*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconBlockSummary] =
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock] =
# We only store blocks that we trust in the database
result.ok(BeaconBlockSummary())
if db.backend.getEncoded(subkey(BeaconBlockSummary, key), result.get) != GetResult.found:
result.err()
result.ok(TrustedSignedBeaconBlock())
if db.blocks.getSnappySSZ(key.data, result.get) != GetResult.found:
result = db.v0.getBlock(key)
else:
# set root after deserializing (so it doesn't get zeroed)
result.get().root = key
proc getStateOnlyMutableValidators(
db: BeaconChainDB, store: KvStoreRef, key: Eth2Digest, output: var BeaconState,
immutableValidatorsMem: openArray[ImmutableValidatorData],
store: KvStoreRef, key: openArray[byte], output: var BeaconState,
rollback: RollbackProc): bool =
## Load state into `output` - BeaconState is large so we want to avoid
## re-allocating it if possible
@ -580,23 +540,20 @@ proc getStateOnlyMutableValidators(
# TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879
case store.getEncoded(
subkey(
BeaconStateNoImmutableValidators, key),
isomorphicCast[BeaconStateNoImmutableValidators](output))
case store.getSnappySSZ(
key, isomorphicCast[BeaconStateNoImmutableValidators](output))
of GetResult.found:
let numValidators = output.validators.len
doAssert db.immutableValidatorsMem.len >= numValidators
doAssert immutableValidatorsMem.len >= numValidators
for i in 0 ..< numValidators:
let
# Bypass hash cache invalidation
dstValidator = addr output.validators.data[i]
srcValidator = addr db.immutableValidatorsMem[i]
assign(dstValidator.pubkey, srcValidator.pubkey)
assign(dstValidator.pubkey, immutableValidatorsMem[i].pubkey)
assign(dstValidator.withdrawal_credentials,
srcValidator.withdrawal_credentials)
immutableValidatorsMem[i].withdrawal_credentials)
output.validators.resetCache()
@ -607,6 +564,35 @@ proc getStateOnlyMutableValidators(
rollback(output)
false
proc getState(
db: BeaconChainDBV0,
immutableValidatorsMem: openArray[ImmutableValidatorData],
key: Eth2Digest, output: var BeaconState,
rollback: RollbackProc): bool =
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
# from `stateStore`. We will try to read the state from all these locations.
if getStateOnlyMutableValidators(
immutableValidatorsMem, db.stateStore,
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
return true
if getStateOnlyMutableValidators(
immutableValidatorsMem, db.backend,
subkey(BeaconStateNoImmutableValidators, key), output, rollback):
return true
case db.backend.getSnappySSZ(subkey(BeaconState, key), output)
of GetResult.found:
true
of GetResult.notFound:
false
of GetResult.corrupted:
rollback(output)
false
proc getState*(
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
rollback: RollbackProc): bool =
@ -620,99 +606,74 @@ proc getState*(
# https://github.com/nim-lang/Nim/issues/14126
# TODO RVO is inefficient for large objects:
# https://github.com/nim-lang/Nim/issues/13879
if getStateOnlyMutableValidators(db, db.stateStore, key, output, rollback):
return true
case db.backend.getEncoded(subkey(BeaconState, key), output)
of GetResult.found:
if not getStateOnlyMutableValidators(
db.immutableValidatorsMem, db.statesNoVal, key.data, output, rollback):
db.v0.getState(db.immutableValidatorsMem, key, output, rollback)
else:
true
of GetResult.notFound:
false
of GetResult.corrupted:
rollback(output)
false
proc getStateRoot*(db: BeaconChainDB,
proc getStateRoot(db: BeaconChainDBV0,
root: Eth2Digest,
slot: Slot): Opt[Eth2Digest] =
db.backend.getRaw(subkey(root, slot), Eth2Digest)
proc getStateRoot*(db: BeaconChainDB,
root: Eth2Digest,
slot: Slot): Opt[Eth2Digest] =
db.stateRoots.getRaw(stateRootKey(root, slot), Eth2Digest) or
db.v0.getStateRoot(root, slot)
proc getStateDiff*(db: BeaconChainDB,
root: Eth2Digest): Opt[BeaconStateDiff] =
result.ok(BeaconStateDiff())
if db.backend.getEncoded(subkey(BeaconStateDiff, root), result.get) != GetResult.found:
if db.stateDiffs.getSnappySSZ(root.data, result.get) != GetResult.found:
result.err
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
proc getHeadBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
db.backend.getRaw(subkey(kHeadBlock), Eth2Digest)
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
proc getHeadBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.keyValues.getRaw(subkey(kHeadBlock), Eth2Digest) or
db.v0.getHeadBlock()
proc getTailBlock(db: BeaconChainDBV0): Opt[Eth2Digest] =
db.backend.getRaw(subkey(kTailBlock), Eth2Digest)
proc getTailBlock*(db: BeaconChainDB): Opt[Eth2Digest] =
db.keyValues.getRaw(subkey(kTailBlock), Eth2Digest) or
db.v0.getTailBlock()
proc getGenesisBlockRoot(db: BeaconChainDBV0): Eth2Digest =
db.backend.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expectDb()
proc getGenesisBlockRoot*(db: BeaconChainDB): Eth2Digest =
db.backend.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expect(
db.keyValues.getRaw(subkey(kGenesisBlockRoot), Eth2Digest).expect(
"The database must be seeded with the genesis state")
proc getEth1FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
proc getEth2FinalizedTo(db: BeaconChainDBV0): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.backend.getEncoded(subkey(kDepositsFinalizedByEth1), result.get)
let r = db.backend.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
if r != found: result.err()
proc getEth2FinalizedTo*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.backend.getEncoded(subkey(kDepositsFinalizedByEth2), result.get)
if r != found: result.err()
let r = db.keyValues.getSnappySSZ(subkey(kDepositsFinalizedByEth2), result.get)
if r != found: return db.v0.getEth2FinalizedTo()
proc getSpeculativeDeposits*(db: BeaconChainDB): Opt[DepositContractSnapshot] =
result.ok(DepositContractSnapshot())
let r = db.backend.getEncoded(subkey(kSpeculativeDeposits), result.get)
if r != found: result.err()
proc delSpeculativeDeposits*(db: BeaconChainDB) =
db.backend.del(subkey(kSpeculativeDeposits)).expect("working database (disk broken/full?)")
proc containsBlock*(db: BeaconChainDBV0, key: Eth2Digest): bool =
db.backend.contains(subkey(SignedBeaconBlock, key)).expectDb()
proc containsBlock*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(SignedBeaconBlock, key)).expect("working database (disk broken/full?)")
db.blocks.contains(key.data).expectDb() or db.v0.containsBlock(key)
proc containsState*(db: BeaconChainDBV0, key: Eth2Digest): bool =
let sk = subkey(BeaconStateNoImmutableValidators, key)
db.stateStore.contains(sk).expectDb() or
db.backend.contains(sk).expectDb() or
db.backend.contains(subkey(BeaconState, key)).expectDb
proc containsState*(db: BeaconChainDB, key: Eth2Digest): bool =
db.stateStore.contains(subkey(BeaconStateNoImmutableValidators, key)).expect(
"working database (disk broken/full?)") or
db.backend.contains(subkey(BeaconState, key)).expect("working database (disk broken/full?)")
proc containsStateDiff*(db: BeaconChainDB, key: Eth2Digest): bool =
db.backend.contains(subkey(BeaconStateDiff, key)).expect("working database (disk broken/full?)")
proc repairGenesisState*(db: BeaconChainDB, key: Eth2Digest): KvResult[void] =
# Nimbus 1.0 reads and writes writes genesis BeaconState to `backend`
# Nimbus 1.1 writes a genesis BeaconStateNoImmutableValidators to `backend` and
# reads both BeaconState and BeaconStateNoImmutableValidators from `backend`
# Nimbus 1.2 writes a genesis BeaconStateNoImmutableValidators to `stateStore`
# and reads BeaconState from `backend` and BeaconStateNoImmutableValidators
# from `stateStore`. This means that 1.2 cannot read a database created with
# 1.1 and earlier versions can't read databases created with either of 1.1
# and 1.2.
# Here, we will try to repair the database so that no matter what, there will
# be a `BeaconState` in `backend`:
if ? db.backend.contains(subkey(BeaconState, key)):
# No compatibility issues, life goes on
discard
elif ? db.backend.contains(subkey(BeaconStateNoImmutableValidators, key)):
# 1.1 writes this but not a full state - rewrite a full state
var output = new BeaconState
if not getStateOnlyMutableValidators(db, db.backend, key, output[], noRollback):
return err("Cannot load partial state")
putStateFull(db, output[])
elif ? db.stateStore.contains(subkey(BeaconStateNoImmutableValidators, key)):
# 1.2 writes this but not a full state - rewrite a full state
var output = new BeaconState
if not getStateOnlyMutableValidators(db, db.stateStore, key, output[], noRollback):
return err("Cannot load partial state")
putStateFull(db, output[])
ok()
db.statesNoVal.contains(key.data).expectDb or db.v0.containsState(key)
iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
TrustedSignedBeaconBlock =
@ -724,11 +685,29 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
var
res: TrustedSignedBeaconBlock
root = root
while db.backend.getEncoded(subkey(SignedBeaconBlock, root), res) == GetResult.found:
while db.blocks.getSnappySSZ(root.data, res) == GetResult.found or
db.v0.backend.getSnappySSZ(
subkey(SignedBeaconBlock, root), res) == GetResult.found:
res.root = root
yield res
root = res.message.parent_root
proc loadSummaries(db: BeaconChainDB): Table[Eth2Digest, BeaconBlockSummary] =
# Load summaries into table - there's no telling what order they're in so we
# load them all - bugs in nim prevent this code from living in the iterator.
var summaries = initTable[Eth2Digest, BeaconBlockSummary](1024*1024)
discard db.summaries.find([], proc(k, v: openArray[byte]) =
var output: BeaconBlockSummary
if k.len() == sizeof(Eth2Digest) and decodeSSz(v, output):
summaries[Eth2Digest(data: toArray(sizeof(Eth2Digest), k))] = output
else:
warn "Invalid summary in database", klen = k.len(), vlen = v.len()
)
summaries
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
tuple[root: Eth2Digest, summary: BeaconBlockSummary] =
## Load a chain of ancestors for blck - returns a list of blocks with the
@ -736,21 +715,45 @@ iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
##
## The search will go on until the ancestor cannot be found.
# Summaries are loaded from the dedicated summaries table. For backwards
# compatibility, we also load from `kvstore` and finally, if no summaries
# can be found, by loading the blocks instead.
# First, load the full summary table into memory in one query - this makes
# initial startup very fast.
var
summaries = db.loadSummaries()
res: tuple[root: Eth2Digest, summary: BeaconBlockSummary]
tmp: TrustedSignedBeaconBlock
root = root
blck: TrustedSignedBeaconBlock
foundOldSummary = false
res.root = root
# Yield summaries in reverse chain order by walking the parent references.
# If a summary is missing, try loading it from the older version or create one
# from block data.
while true:
if db.backend.getEncoded(subkey(BeaconBlockSummary, root), res.summary) == GetResult.found:
res.root = root
summaries.withValue(res.root, summary) do:
res.summary = summary[]
yield res
elif db.backend.getEncoded(subkey(SignedBeaconBlock, root), tmp) == GetResult.found:
res.summary = tmp.message.toBeaconBlockSummary()
db.backend.putEncoded(subkey(BeaconBlockSummary, root), res.summary)
res.root = root
yield res
else:
break
do: # Summary was not found in summary table, look elsewhere
if db.v0.backend.getSnappySSZ(subkey(BeaconBlockSummary, res.root), res.summary) == GetResult.found:
yield res
elif db.v0.backend.getSnappySSZ(subkey(SignedBeaconBlock, res.root), blck) == GetResult.found:
res.summary = blck.message.toBeaconBlockSummary()
yield res
else:
break
# Next time, load them from the right place
db.putBeaconBlockSummary(res.root, res.summary)
root = res.summary.parent_root
res.root = res.summary.parent_root
if false:
# When the current version has been online for a bit, we can safely remove
# summaries from kvstore by enabling this little snippet - if users were
# to downgrade after the summaries have been purged, the old versions that
# use summaries can also recreate them on the fly from blocks.
db.db.exec(
"DELETE FROM kvstore WHERE key >= ? and key < ?",
([byte ord(kHashToBlockSummary)], [byte ord(kHashToBlockSummary) + 1])).expectDb()

View File

@ -70,10 +70,6 @@ type
v2
both
StateDbKind* {.pure.} = enum
sql
file
BeaconNodeConf* = object
logLevel* {.
defaultValue: "INFO"
@ -148,12 +144,6 @@ type
desc: "The slashing DB flavour to use (v2) [=v2]"
name: "slashing-db-kind" }: SlashingDbKind
stateDbKind* {.
hidden
defaultValue: StateDbKind.sql
desc: "State DB kind (sql, file) [=sql]"
name: "state-db-kind" }: StateDbKind
case cmd* {.
command
defaultValue: noCommand }: BNStartUpCmd

View File

@ -165,6 +165,10 @@ proc addRawBlockCheckStateTransition(
doAssert v.addr == addr dag.clearanceState.data
assign(dag.clearanceState, dag.headState)
logScope:
blck = shortLog(signedBlock.message)
blockRoot = shortLog(signedBlock.root)
var rewards: RewardInfo
if not state_transition(dag.runtimePreset, dag.clearanceState.data, signedBlock,
cache, rewards, dag.updateFlags + {slotProcessed}, restore):
@ -254,6 +258,10 @@ proc addRawBlockUnresolved(
): Result[BlockRef, (ValidationResult, BlockError)] =
## addRawBlock - Block is unresolved / has no parent
logScope:
blck = shortLog(signedBlock.message)
blockRoot = shortLog(signedBlock.root)
# This is an unresolved block - add it to the quarantine, which will cause its
# parent to be scheduled for downloading
if not quarantine.add(dag, signedBlock):

View File

@ -396,7 +396,6 @@ proc init*(T: type ChainDAGRef,
tmpState.blck = cur.blck
break
if cur.blck.parent != nil and
cur.blck.slot.epoch != epoch(cur.blck.parent.slot):
# We store the state of the parent block with the epoch processing applied
@ -587,10 +586,6 @@ proc putState*(dag: ChainDAGRef, state: var StateData) =
# is resilient against one or the other going missing
dag.db.putState(state.data.root, state.data.data)
# Allow backwards-compatible version rollback with bounded recovery cost
if getStateField(state, slot).epoch mod 256 == 0:
dag.db.putStateFull(state.data.root, state.data.data)
dag.db.putStateRoot(
state.blck.root, getStateField(state, slot), state.data.root)
@ -1100,11 +1095,6 @@ proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
if not (headBlock.isSome() and tailBlock.isSome()):
return false
# 1.1 and 1.2 need a compatibility hack
if db.repairGenesisState(tailBlock.get().message.state_root).isErr():
notice "Could not repair genesis state"
return false
if not db.containsState(tailBlock.get().message.state_root):
return false
@ -1112,7 +1102,7 @@ proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
proc preInit*(
T: type ChainDAGRef, db: BeaconChainDB,
genesisState, tailState: var BeaconState, tailBlock: SignedBeaconBlock) =
genesisState, tailState: var BeaconState, tailBlock: TrustedSignedBeaconBlock) =
# write a genesis state, the way the ChainDAGRef expects it to be stored in
# database
# TODO probably should just init a block pool with the freshly written
@ -1126,7 +1116,6 @@ proc preInit*(
validators = tailState.validators.len()
db.putState(tailState)
db.putStateFull(tailState)
db.putBlock(tailBlock)
db.putTailBlock(tailBlock.root)
db.putHeadBlock(tailBlock.root)
@ -1137,7 +1126,6 @@ proc preInit*(
else:
doAssert genesisState.slot == GENESIS_SLOT
db.putState(genesisState)
db.putStateFull(genesisState)
let genesisBlock = get_initial_beacon_block(genesisState)
db.putBlock(genesisBlock)
db.putStateRoot(genesisBlock.root, GENESIS_SLOT, genesisBlock.message.state_root)
@ -1145,7 +1133,7 @@ proc preInit*(
proc setTailState*(dag: ChainDAGRef,
checkpointState: BeaconState,
checkpointBlock: SignedBeaconBlock) =
checkpointBlock: TrustedSignedBeaconBlock) =
# TODO(zah)
# Delete all records up to the tail node. If the tail node is not
# in the database, init the dabase in a way similar to `preInit`.

View File

@ -62,6 +62,8 @@ type
when hasGenesisDetection:
activeValidatorsCount*: uint64
DepositsMerkleizer* = SszMerkleizer[depositContractLimit]
Eth1Chain* = object
db: BeaconChainDB
preset: RuntimePreset
@ -214,7 +216,7 @@ when hasGenesisDetection:
m.genesisValidators.add ImmutableValidatorData(
pubkey: pubkey,
withdrawal_credentials: deposit.withdrawal_credentials)
m.genesisValidatorKeyToIndex.insert(pubkey, idx)
m.genesisValidatorKeyToIndex[pubkey] = idx
proc processGenesisDeposit*(m: Eth1Monitor, newDeposit: DepositData) =
m.db.genesisDeposits.add newDeposit

View File

@ -103,12 +103,11 @@ proc init*(T: type BeaconNode,
let
db = BeaconChainDB.new(
runtimePreset, config.databaseDir,
inMemory = false,
fileStateStorage = config.stateDbKind == StateDbKind.file)
inMemory = false)
var
genesisState, checkpointState: ref BeaconState
checkpointBlock: SignedBeaconBlock
checkpointBlock: TrustedSignedBeaconBlock
if config.finalizedCheckpointState.isSome:
let checkpointStatePath = config.finalizedCheckpointState.get.string
@ -129,7 +128,8 @@ proc init*(T: type BeaconNode,
else:
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
try:
checkpointBlock = SSZ.loadFile(checkpointBlockPath, SignedBeaconBlock)
# TODO Perform sanity checks like signature and slot verification at least
checkpointBlock = SSZ.loadFile(checkpointBlockPath, TrustedSignedBeaconBlock)
except SerializationError as err:
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
quit 1
@ -146,7 +146,7 @@ proc init*(T: type BeaconNode,
if not ChainDAGRef.isInitialized(db):
var
tailState: ref BeaconState
tailBlock: SignedBeaconBlock
tailBlock: TrustedSignedBeaconBlock
if genesisStateContents.len == 0 and checkpointState == nil:
when hasGenesisDetection:

View File

@ -339,18 +339,15 @@ proc initialize_hashed_beacon_state_from_eth1*(
preset, eth1_block_hash, eth1_timestamp, deposits, flags)
HashedBeaconState(data: genesisState[], root: hash_tree_root(genesisState[]))
template emptyBeaconBlockBody(): BeaconBlockBody =
BeaconBlockBody()
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#genesis-block
func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
let message = BeaconBlock(
func get_initial_beacon_block*(state: BeaconState): TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
let message = TrustedBeaconBlock(
slot: state.slot,
state_root: hash_tree_root(state),
body: emptyBeaconBlockBody())
state_root: hash_tree_root(state),)
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
SignedBeaconBlock(message: message, root: hash_tree_root(message))
TrustedSignedBeaconBlock(message: message, root: hash_tree_root(message))
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: BeaconState,

View File

@ -168,6 +168,7 @@ func readSszValue*[T](input: openArray[byte],
if input.len == 0:
# This is an empty list.
# The default initialization of the return value is fine.
val.setOutputSize 0
return
elif input.len < offsetSize:
raise newException(MalformedSszError, "SSZ input of insufficient size")

View File

@ -234,3 +234,12 @@ proc readValue*[T](r: var SszReader, val: var T) {.raises: [Defect, MalformedSsz
# TODO(zah) Read the fixed portion first and precisely measure the
# size of the dynamic portion to consume the right number of bytes.
readSszValue(r.stream.read(r.stream.len.get), val, r.updateRoot)
proc readSszBytes*[T](data: openArray[byte], val: var T, updateRoot = true) {.
raises: [Defect, MalformedSszError, SszSizeMismatchError].} =
when isFixedSize(T):
const minimalSize = fixedPortionSize(T)
if data.len < minimalSize:
raise newException(MalformedSszError, "SSZ input of insufficient size")
readSszValue(data, val, updateRoot)

View File

@ -54,13 +54,6 @@ type
## or validator client.
db_v2*: SlashingProtectionDB_v2
modes: set[SlashProtDBMode]
disagreementBehavior: DisagreementBehavior
DisagreementBehavior* = enum
## How to handle disagreement between DB versions
kCrash
kChooseV1
kChooseV2
# DB Multiversioning
# -------------------------------------------------------------
@ -76,8 +69,7 @@ proc init*(
T: type SlashingProtectionDB,
genesis_validators_root: Eth2Digest,
basePath, dbname: string,
modes: set[SlashProtDBMode],
disagreementBehavior: DisagreementBehavior
modes: set[SlashProtDBMode]
): T =
## Initialize or load a slashing protection DB
## This is for Beacon Node usage
@ -90,7 +82,6 @@ proc init*(
new result
result.modes = modes
result.disagreementBehavior = disagreementBehavior
let (db, requiresMigration) = SlashingProtectionDB_v2.initCompatV1(
genesis_validators_root,
@ -98,16 +89,16 @@ proc init*(
)
result.db_v2 = db
var db_v1: SlashingProtectionDB_v1
let rawdb = kvstore result.db_v2.getRawDBHandle()
if not rawdb.checkOrPutGenesis_DbV1(genesis_validators_root):
fatal "The slashing database refers to another chain/mainnet/testnet",
path = basePath/dbname,
genesis_validators_root = genesis_validators_root
db_v1.fromRawDB(rawdb)
if requiresMigration:
var db_v1: SlashingProtectionDB_v1
let rawdb = kvstore result.db_v2.getRawDBHandle().openKvStore().get()
if not rawdb.checkOrPutGenesis_DbV1(genesis_validators_root):
fatal "The slashing database refers to another chain/mainnet/testnet",
path = basePath/dbname,
genesis_validators_root = genesis_validators_root
db_v1.fromRawDB(rawdb)
info "Migrating local validators slashing DB from v1 to v2"
let spdir = try: db_v1.toSPDIR_lowWatermark()
except IOError as exc:
@ -128,6 +119,8 @@ proc init*(
fatal "Slashing DB migration failure. Aborting to protect validators."
quit 1
db_v1.close()
proc init*(
T: type SlashingProtectionDB,
genesis_validators_root: Eth2Digest,
@ -141,7 +134,6 @@ proc init*(
init(
T, genesis_validators_root, basePath, dbname,
modes = {kLowWatermarkV2},
disagreementBehavior = kChooseV2
)
proc loadUnchecked*(
@ -155,13 +147,12 @@ proc loadUnchecked*(
## Does not handle migration
result.modes = {kCompleteArchiveV1, kCompleteArchiveV2}
result.disagreementBehavior = kCrash
result.db_v2 = SlashingProtectionDB_v2.loadUnchecked(
basePath, dbname, readOnly
)
result.db_v1.fromRawDB(kvstore result.db_v2.getRawDBHandle())
result.db_v1.fromRawDB(kvstore result.db_v2.getRawDBHandle().openKvStore())
proc close*(db: SlashingProtectionDB) =
## Close a slashing protection database

View File

@ -143,6 +143,7 @@ type
## Database storing the blocks attested
## by validators attached to a beacon node
## or validator client.
db: SqStoreRef
backend: KvStoreRef
SlotDesc = object
@ -374,7 +375,8 @@ proc init*(
T: type SlashingProtectionDB_v1,
genesis_validators_root: Eth2Digest,
basePath, dbname: string): T =
result = T(backend: kvStore SqStoreRef.init(basePath, dbname).get())
let db = SqStoreRef.init(basePath, dbname).get()
result = T(db: db, backend: kvStore db.openKvStore().get())
if not result.backend.checkOrPutGenesis_DbV1(genesis_validators_root):
fatal "The slashing database refers to another chain/mainnet/testnet",
path = basePath/dbname,
@ -391,16 +393,18 @@ proc loadUnchecked*(
let alreadyExists = fileExists(path)
if not alreadyExists:
raise newException(IOError, "DB '" & path & "' does not exist.")
let backend = kvStore SqStoreRef.init(basePath, dbname, readOnly = false).get()
let db = SqStoreRef.init(basePath, dbname, readOnly = false).get()
let backend = kvStore db.openKvStore()
doAssert backend.contains(
subkey(kGenesisValidatorsRoot)
).get(), "The Slashing DB is missing genesis information"
result = T(backend: backend)
result = T(db: db, backend: backend)
proc close*(db: SlashingProtectionDB_v1) =
if db.db != nil:
db.db.close()
discard db.backend.close()
# DB Queries

View File

@ -638,7 +638,6 @@ proc initCompatV1*(T: type SlashingProtectionDB_v2,
result.db = T(backend: SqStoreRef.init(
basePath, dbname,
keyspaces = ["kvstore"] # The key compat part
).get())
if alreadyExists and result.db.getMetadataTable_DbV2().isSome():
result.db.checkDB(genesis_validators_root)

View File

@ -29,10 +29,6 @@ type
exportEra
validatorPerf
StateDbKind* {.pure.} = enum
sql
file
# TODO:
# This should probably allow specifying a run-time preset
DbConf = object
@ -45,11 +41,6 @@ type
desc: "The Eth2 network preset to use"
name: "network" }: Option[string]
stateDbKind* {.
defaultValue: StateDbKind.sql
desc: "State DB kind (sql, file) [=sql]"
name: "state-db-kind" }: StateDbKind
case cmd* {.
command
desc: ""
@ -154,8 +145,7 @@ proc cmdBench(conf: DbConf, runtimePreset: RuntimePreset) =
echo "Opening database..."
let
db = BeaconChainDB.new(
runtimePreset, conf.databaseDir.string,
fileStateStorage = conf.stateDbKind == StateDbKind.file)
runtimePreset, conf.databaseDir.string,)
dbBenchmark = BeaconChainDB.new(runtimePreset, "benchmark")
defer:
db.close()
@ -436,8 +426,7 @@ proc cmdValidatorPerf(conf: DbConf, runtimePreset: RuntimePreset) =
echo "Opening database..."
let
db = BeaconChainDB.new(
runtimePreset, conf.databaseDir.string,
fileStateStorage = conf.stateDbKind == StateDbKind.file)
runtimePreset, conf.databaseDir.string,)
defer:
db.close()

View File

@ -115,55 +115,6 @@ suite "Beacon chain DB" & preset():
db.close()
test "sanity check full states" & preset():
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimePreset, db)
testStates = getTestStates(dag.headState.data)
# Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref HashedBeaconState) -> int:
cmp($x.root, $y.root)
for state in testStates:
db.putStateFull(state[].data)
let root = hash_tree_root(state[].data)
check:
db.containsState(root)
hash_tree_root(db.getStateRef(root)[]) == root
db.delState(root)
check: not db.containsState(root)
db.close()
test "sanity check full states, reusing buffers" & preset():
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimePreset, db)
let stateBuffer = BeaconStateRef()
var testStates = getTestStates(dag.headState.data)
# Ensure transitions beyond just adding validators and increasing slots
sort(testStates) do (x, y: ref HashedBeaconState) -> int:
cmp($x.root, $y.root)
for state in testStates:
db.putStateFull(state[].data)
let root = hash_tree_root(state[].data)
check:
db.getState(root, stateBuffer[], noRollback)
db.containsState(root)
hash_tree_root(stateBuffer[]) == root
db.delState(root)
check: not db.containsState(root)
db.close()
test "find ancestors" & preset():
var
db = BeaconChainDB.new(defaultRuntimePreset, "", inMemory = true)
@ -242,10 +193,9 @@ suite "Beacon chain DB" & preset():
db.putStateDiff(root, stateDiff)
check db.containsStateDiff(root)
let state2 = db.getStateDiff(root)
db.delStateDiff(root)
check not db.containsStateDiff(root)
check db.getStateDiff(root).isNone()
db.close()
check:

View File

@ -125,7 +125,9 @@ suite "Block pool processing" & preset():
stateData = newClone(dag.headState)
cache = StateCache()
rewards = RewardInfo()
b1 = addTestBlock(stateData.data, dag.tail.root, cache)
att0 = makeFullAttestations(
stateData.data.data, dag.tail.root, 0.Slot, cache)
b1 = addTestBlock(stateData.data, dag.tail.root, cache, attestations = att0)
b2 = addTestBlock(stateData.data, b1.root, cache)
test "getRef returns nil for missing blocks":
check:
@ -465,15 +467,6 @@ suite "chain DAG finalization tests" & preset():
let added2 = dag2.addRawBlock(quarantine, blck, nil)
check: added2.isOk()
suite "chain DAG finalization tests" & preset():
setup:
var
db = makeTestDB(SLOTS_PER_EPOCH)
dag = init(ChainDAGRef, defaultRuntimePreset, db)
quarantine = QuarantineRef.init(keys.newRng())
cache = StateCache()
rewards = RewardInfo()
test "init with gaps" & preset():
for blck in makeTestBlocks(
dag.headState.data, dag.head.root, cache, int(SLOTS_PER_EPOCH * 6 - 2),
@ -499,6 +492,20 @@ suite "chain DAG finalization tests" & preset():
dag.updateHead(added[], quarantine)
dag.pruneAtFinalization()
block:
# Check that we can rewind to every block from head to finalized
var
cur = dag.head
tmpStateData = assignClone(dag.headState)
while cur.slot >= dag.finalizedHead.slot:
assign(tmpStateData[], dag.headState)
dag.updateStateData(tmpStateData[], cur.atSlot(cur.slot), false, cache)
check:
dag.get(cur).data.message.state_root ==
tmpStateData[].data.root
tmpStateData[].data.root == hash_tree_root(tmpSTateData[].data.data)
cur = cur.parent
let
dag2 = init(ChainDAGRef, defaultRuntimePreset, db)

View File

@ -25,6 +25,9 @@ type
data: array[256, bool]
data2: HashArray[256, bool]
NonFixed = object
data: HashList[uint64, 1024]
template reject(stmt) =
doAssert(not compiles(stmt))
@ -39,6 +42,8 @@ static:
doAssert isFixedSize(Simple) == true
doAssert isFixedSize(List[bool, 128]) == false
doAssert isFixedSize(NonFixed) == false
reject fixedPortionSize(int)
type
@ -208,17 +213,21 @@ suite "hash":
both: check: it.li.add Eth2Digest()
var y: HashArray[32, uint64]
doAssert hash_tree_root(y) == hash_tree_root(y.data)
for i in 0..<y.len:
y[i] = 42'u64
doAssert hash_tree_root(y) == hash_tree_root(y.data)
var y: HashArray[32, uint64]
check: hash_tree_root(y) == hash_tree_root(y.data)
for i in 0..<y.len:
y[i] = 42'u64
check: hash_tree_root(y) == hash_tree_root(y.data)
test "HashList":
test "HashList fixed":
type MyList = HashList[uint64, 1024]
var
small, large: MyList
let
emptyBytes = SSZ.encode(small)
emptyRoot = hash_tree_root(small)
check: small.add(10'u64)
for i in 0..<100:
@ -228,8 +237,9 @@ suite "hash":
sroot = hash_tree_root(small)
lroot = hash_tree_root(large)
doAssert sroot == hash_tree_root(small.data)
doAssert lroot == hash_tree_root(large.data)
check:
sroot == hash_tree_root(small.data)
lroot == hash_tree_root(large.data)
var
sbytes = SSZ.encode(small)
@ -237,8 +247,9 @@ suite "hash":
sloaded = SSZ.decode(sbytes, MyList)
lloaded = SSZ.decode(lbytes, MyList)
doAssert sroot == hash_tree_root(sloaded)
doAssert lroot == hash_tree_root(lloaded)
check:
sroot == hash_tree_root(sloaded)
lroot == hash_tree_root(lloaded)
# Here we smoke test that the cache is reset correctly even when reading
# into an existing instance - the instances are size-swapped so the reader
@ -246,5 +257,56 @@ suite "hash":
readSszValue(sbytes, lloaded)
readSszValue(lbytes, sloaded)
doAssert lroot == hash_tree_root(sloaded)
doAssert sroot == hash_tree_root(lloaded)
check:
lroot == hash_tree_root(sloaded)
sroot == hash_tree_root(lloaded)
readSszValue(emptyBytes, sloaded)
check:
emptyRoot == hash_tree_root(sloaded)
test "HashList variable":
type MyList = HashList[NonFixed, 1024]
var
small, large: MyList
let
emptyBytes = SSZ.encode(small)
emptyRoot = hash_tree_root(small)
check: small.add(NonFixed())
for i in 0..<100:
check: large.add(NonFixed())
let
sroot = hash_tree_root(small)
lroot = hash_tree_root(large)
check:
sroot == hash_tree_root(small.data)
lroot == hash_tree_root(large.data)
var
sbytes = SSZ.encode(small)
lbytes = SSZ.encode(large)
sloaded = SSZ.decode(sbytes, MyList)
lloaded = SSZ.decode(lbytes, MyList)
check:
sroot == hash_tree_root(sloaded)
lroot == hash_tree_root(lloaded)
# Here we smoke test that the cache is reset correctly even when reading
# into an existing instance - the instances are size-swapped so the reader
# will have some more work to do
readSszValue(sbytes, lloaded)
readSszValue(lbytes, sloaded)
check:
lroot == hash_tree_root(sloaded)
sroot == hash_tree_root(lloaded)
readSszValue(emptyBytes, sloaded)
check:
emptyRoot == hash_tree_root(sloaded)

View File

@ -15,7 +15,7 @@ import
export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3
proc makeTestDB*(tailState: var BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
proc makeTestDB*(tailState: var BeaconState, tailBlock: TrustedSignedBeaconBlock): BeaconChainDB =
result = BeaconChainDB.new(defaultRuntimePreset, "", inMemory = true)
ChainDAGRef.preInit(result, tailState, tailState, tailBlock)

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 16802c0e5218cce405cd623a554ce95549dd5181
Subproject commit 1995afb87e14bfb2d3fc2f3474e4d30e10c927a7

2
vendor/nim-web3 vendored

@ -1 +1 @@
Subproject commit 75a1a0e5d8cc8fc7bb9cdb3bfe68a73e11b5c71a
Subproject commit 57f86f752b0b4c4ec1f6caf0b8d2eb9870112f4e