mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-12 13:24:21 +00:00
Unified database frontend (#1661)
* Remove 32bit os support from `custom_network` unit test also: * Fix compilation annoyance #1648 * Fix unit test on Kiln (changed `merge` logic?) * Hide unused sources do not compile why: * Get them out of the way before major update * Import and function prototype mismatch -- maybe some changes got out of scope. * Re-implemented `db_chain` as `core_db` why: Hiding `TrieDatabaseRef` and `HexaryTrie` by default allows to replace the current db wrapper by some other one, e.g. Aristo * Support compiler exception warnings for CoreDbRef base methods. * Allow `pairs()` iterator on all memory based key-value tables why: Previously only available for capture recorder. * Backport `chain_db.nim` changes into its re-implementation `core_apps.nim` * Fix exception annotation
This commit is contained in:
parent
12faf4bdb2
commit
322f1c2e9e
162
nimbus/db/core_db.nim
Normal file
162
nimbus/db/core_db.nim
Normal file
@ -0,0 +1,162 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
## Core database replacement wrapper object
|
||||
## ========================================
|
||||
##
|
||||
## See `core_db/README.md`
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
eth/[common, trie/db],
|
||||
./core_db/[base, core_apps, legacy]
|
||||
|
||||
export
|
||||
common,
|
||||
core_apps,
|
||||
|
||||
# Not all symbols from the object sources will be exported by default
|
||||
CoreDbCaptFlags,
|
||||
CoreDbCaptRef,
|
||||
CoreDbKvtRef,
|
||||
CoreDbMptRef,
|
||||
CoreDbPhkRef,
|
||||
CoreDbRef,
|
||||
CoreDbTxID,
|
||||
CoreDbTxRef,
|
||||
CoreDbType,
|
||||
LegacyCoreDbRef, # for shortTimeReadOnly()
|
||||
beginTransaction,
|
||||
commit,
|
||||
compensateLegacySetup,
|
||||
contains,
|
||||
dbType,
|
||||
del,
|
||||
dispose,
|
||||
get,
|
||||
getTransactionID,
|
||||
isPruning,
|
||||
kvt,
|
||||
maybeGet,
|
||||
mpt,
|
||||
mptPrune,
|
||||
newCoreDbCaptRef,
|
||||
parent,
|
||||
phk,
|
||||
phkPrune,
|
||||
put,
|
||||
recorder,
|
||||
rollback,
|
||||
rootHash,
|
||||
safeDispose,
|
||||
setTransactionID
|
||||
|
||||
logScope:
|
||||
topics = "core_db"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"ChainDB " & info
|
||||
|
||||
proc itNotImplemented(db: CoreDbRef|CoreDbKvtRef, name: string) {.used.} =
|
||||
debug logTxt "iterator not implemented", dbType=db.dbType, meth=name
|
||||
|
||||
proc tmplNotImplemented*(db: CoreDbRef, name: string) {.used.} =
|
||||
debug logTxt "template not implemented", dbType=db.dbType, meth=name
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newCoreDbRef*(
|
||||
db: TrieDatabaseRef;
|
||||
): CoreDbRef
|
||||
{.gcsafe, deprecated: "use newCoreDbRef(LegacyDbPersistent,<path>)".} =
|
||||
## Legacy constructor.
|
||||
##
|
||||
## Note: Using legacy notation `newCoreDbRef()` rather than
|
||||
## `CoreDbRef.init()` because of compiler coughing.
|
||||
db.newLegacyCoreDbRef()
|
||||
|
||||
proc newCoreDbRef*(
|
||||
dbType: static[CoreDbType];
|
||||
): CoreDbRef =
|
||||
## Constructor for volatile/memory type DB
|
||||
##
|
||||
## Note: Using legacy notation `newCoreDbRef()` rather than
|
||||
## `CoreDbRef.init()` because of compiler coughing.
|
||||
when dbType == LegacyDbMemory:
|
||||
newLegacyMemoryCoreDbRef()
|
||||
else:
|
||||
{.error: "Unsupported dbType for CoreDbRef.init()".}
|
||||
|
||||
proc newCoreDbRef*(
|
||||
dbType: static[CoreDbType];
|
||||
path: string;
|
||||
): CoreDbRef =
|
||||
## General constructor (the `path` argument is ignored for volatile/memory
|
||||
## type DB)
|
||||
##
|
||||
## Note: Using legacy notation `newCoreDbRef()` rather than
|
||||
## `CoreDbRef.init()` because of compiler coughing.
|
||||
when dbType == LegacyDbMemory:
|
||||
newLegacyMemoryCoreDbRef()
|
||||
elif dbType == LegacyDbPersistent:
|
||||
newLegacyPersistentCoreDbRef path
|
||||
else:
|
||||
{.error: "Unsupported dbType for CoreDbRef.init()".}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public template wrappers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template shortTimeReadOnly*(db: CoreDbRef; id: CoreDbTxID; body: untyped) =
|
||||
proc action() {.gcsafe, raises: [CatchableError].} =
|
||||
body
|
||||
case db.dbType:
|
||||
of LegacyDbMemory, LegacyDbPersistent:
|
||||
db.LegacyCoreDbRef.shortTimeReadOnly(id, action)
|
||||
else:
|
||||
db.tmplNotImplemented "shortTimeReadOnly"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator pairs*(
|
||||
db: CoreDbKvtRef;
|
||||
): (Blob, Blob)
|
||||
{.gcsafe.} =
|
||||
case db.dbType:
|
||||
of LegacyDbMemory:
|
||||
for k,v in db.LegacyCoreDbKvtRef:
|
||||
yield (k,v)
|
||||
else:
|
||||
db.itNotImplemented "pairs/kvt"
|
||||
|
||||
iterator pairs*(
|
||||
db: CoreDbMptRef;
|
||||
): (Blob, Blob)
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
case db.parent.dbType:
|
||||
of LegacyDbMemory, LegacyDbPersistent:
|
||||
for k,v in db.LegacyCoreDbMptRef:
|
||||
yield (k,v)
|
||||
else:
|
||||
db.parent.itNotImplemented "pairs/mpt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
1
nimbus/db/core_db/.gitignore
vendored
Normal file
1
nimbus/db/core_db/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.html
|
79
nimbus/db/core_db/README.md
Normal file
79
nimbus/db/core_db/README.md
Normal file
@ -0,0 +1,79 @@
|
||||
Core database replacement wrapper object
|
||||
========================================
|
||||
This wrapper replaces the *TrieDatabaseRef* and its derivatives by the new
|
||||
object *CoreDbRef*.
|
||||
|
||||
Relations to current *TrieDatabaseRef* implementation
|
||||
-----------------------------------------------------
|
||||
Here are some incomplete translations for objects and constructors.
|
||||
|
||||
### Object types:
|
||||
|
||||
| **Legacy notation** | **CoreDbRef based replacement** |
|
||||
|:----------------------------|:--------------------------------------|
|
||||
| | |
|
||||
| ChainDB | (don't use/avoid) |
|
||||
| ChainDbRef | CoreDbRef |
|
||||
| TrieDatabaseRef | CoreDbKvtRef |
|
||||
| HexaryTrie | CoreDbMptRef |
|
||||
| SecureHexaryTrie | CoreDbPhkRef |
|
||||
| DbTransaction | CoreDbTxRef |
|
||||
| TransactionID | CoreDbTxID |
|
||||
|
||||
|
||||
### Constructors:
|
||||
|
||||
| **Legacy notation** | **CoreDbRef based replacement** |
|
||||
|:----------------------------|:--------------------------------------|
|
||||
| | |
|
||||
| trieDB newChainDB("..") | newCoreDbRef(LegacyDbPersistent,"..") |
|
||||
| newMemoryDB() | newCoreDbRef(LegacyDbMemory) |
|
||||
| -- | |
|
||||
| initHexaryTrie(db,..) | db.mpt(..) (no pruning) |
|
||||
| | db.mptPrune(..) (w/pruning true/false)|
|
||||
| -- | |
|
||||
| initSecureHexaryTrie(db,..) | db.phk(..) (no pruning) |
|
||||
| | db.phkPrune(..) (w/pruning true/false)|
|
||||
| -- | |
|
||||
| newCaptureDB(db,memDB) | newCoreDbCaptRef(db) (see below) |
|
||||
|
||||
|
||||
Usage of the replacement wrapper
|
||||
--------------------------------
|
||||
|
||||
### Objects pedigree:
|
||||
|
||||
CoreDbRef -- base descriptor
|
||||
| | | |
|
||||
| | | +-- CoreDbMptRef -- hexary trie instance
|
||||
| | | | : :
|
||||
| | | +-- CoreDbMptRef -- hexary trie instance
|
||||
| | |
|
||||
| | |
|
||||
| | +---- CoreDbPhkRef -- pre-hashed key hexary trie instance
|
||||
| | | : :
|
||||
| | +---- CoreDbPhkRef -- pre-hashed key hexary trie instance
|
||||
| |
|
||||
| |
|
||||
| +------ CoreDbKvtRef -- single static key-value table
|
||||
|
|
||||
|
|
||||
+-------- CoreDbCaptRef -- tracer support descriptor
|
||||
|
||||
### Instantiating standard database object descriptors works as follows:
|
||||
|
||||
let
|
||||
db = newCoreDbRef(..) # new base descriptor
|
||||
mpt = db.mpt(..) # hexary trie/Merkle Patricia Tree
|
||||
phk = db.phk(..) # pre-hashed key hexary trie/MPT
|
||||
kvt = db.kvt # key-value table
|
||||
|
||||
### Tracer support setup by hiding the current *CoreDbRef* behind a replacement:
|
||||
|
||||
let
|
||||
capture = newCoreDbCaptRef(db)
|
||||
db = capture.recorder # use the recorder in place of db
|
||||
...
|
||||
|
||||
for key,value in capture.recorder.kvt:
|
||||
... # process recorded data
|
422
nimbus/db/core_db/base.nim
Normal file
422
nimbus/db/core_db/base.nim
Normal file
@ -0,0 +1,422 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
chronicles,
|
||||
eth/common
|
||||
|
||||
type
|
||||
CoreDbCaptFlags* {.pure.} = enum
|
||||
PersistPut
|
||||
PersistDel
|
||||
|
||||
CoreDbType* = enum
|
||||
Ooops
|
||||
LegacyDbMemory
|
||||
LegacyDbPersistent
|
||||
# AristoDbMemory
|
||||
# AristoDbPersistent
|
||||
|
||||
CoreDbRef* = ref object of RootRef
|
||||
## Database descriptor
|
||||
kvt: CoreDbKvtRef
|
||||
|
||||
CoreDbKvtRef* = ref object of RootRef
|
||||
## Statically initialised Key-Value pair table living in `CoreDbRef`
|
||||
dbType: CoreDbType
|
||||
|
||||
CoreDbMptRef* = ref object of RootRef
|
||||
## Hexary/Merkle-Patricia tree derived from `CoreDbRef`, will be
|
||||
## initialised on-the-fly.
|
||||
parent: CoreDbRef
|
||||
|
||||
CoreDbPhkRef* = ref object of RootRef
|
||||
## Similar to `CoreDbMptRef` but with pre-hashed keys. That is, any
|
||||
## argument key for `put()`, `get()` etc. will be hashed first before
|
||||
## being applied.
|
||||
parent: CoreDbRef
|
||||
|
||||
CoreDbCaptRef* = ref object of RootRef
|
||||
## Db transaction tracer derived from `CoreDbRef`
|
||||
parent: CoreDbRef
|
||||
flags: set[CoreDbCaptFlags]
|
||||
|
||||
CoreDbTxRef* = ref object of RootRef
|
||||
## Transaction descriptor derived from `CoreDbRef`
|
||||
parent: CoreDbRef
|
||||
|
||||
CoreDbTxID* = ref object of RootRef
|
||||
|
||||
logScope:
|
||||
topics = "core_db-base"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"CoreDb base: " & info
|
||||
|
||||
proc notImplemented(db: CoreDbKvtRef, name: string) {.used.} =
|
||||
debug logTxt "method not implemented", dbType=db.dbType, meth=name
|
||||
|
||||
proc notImplemented(db: CoreDbRef, name: string) {.used.} =
|
||||
db.kvt.notImplemented name
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(db: CoreDbRef; dbType: CoreDbType; kvt: CoreDbKvtRef) =
|
||||
db.kvt = kvt
|
||||
kvt.dbType = dbType
|
||||
|
||||
proc init*(db: CoreDbTxRef|CoreDbMptRef|CoreDbPhkRef; parent: CoreDbRef) =
|
||||
db.parent = parent
|
||||
|
||||
proc init*(db: CoreDbCaptRef; parent: CoreDbRef; flags: set[CoreDbCaptFlags]) =
|
||||
db.parent = parent
|
||||
db.flags = flags
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public getters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc dbType*(db: CoreDbRef): CoreDbType =
|
||||
db.kvt.dbType
|
||||
|
||||
proc dbType*(db: CoreDbKvtRef): CoreDbType =
|
||||
db.dbType
|
||||
|
||||
proc kvt*(db: CoreDbRef): CoreDbKvtRef =
|
||||
db.kvt
|
||||
|
||||
proc parent*(
|
||||
db: CoreDbTxRef|CoreDbMptRef|CoreDbPhkRef|CoreDbCaptRef;
|
||||
): CoreDbRef =
|
||||
db.parent
|
||||
|
||||
proc flags*(db: CoreDbCaptRef): set[CoreDbCaptFlags] =
|
||||
db.flags
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public legacy helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# On the persistent legacy hexary trie, this function is needed for
|
||||
# bootstrapping and Genesis setup when the `purge` flag is activated.
|
||||
method compensateLegacySetup*(db: CoreDbRef) {.base.} =
|
||||
db.notImplemented "compensateLegacySetup"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public tracer methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method newCoreDbCaptRef*(
|
||||
db: CoreDbRef;
|
||||
flags: set[CoreDbCaptFlags] = {};
|
||||
): CoreDbCaptRef
|
||||
{.base.} =
|
||||
## Start capture session on the argument `db`
|
||||
db.notImplemented "newCaptureRef"
|
||||
|
||||
method recorder*(
|
||||
db: CoreDbCaptRef;
|
||||
): CoreDbRef
|
||||
{.base.} =
|
||||
## Retrieve recording database descriptor
|
||||
db.parent.notImplemented "db"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public key-value table methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method get*(
|
||||
db: CoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
): Blob
|
||||
{.base.} =
|
||||
db.notImplemented "get/kvt"
|
||||
|
||||
method maybeGet*(
|
||||
db: CoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
): Option[Blob]
|
||||
{.base.} =
|
||||
db.notImplemented "maybeGet/kvt"
|
||||
|
||||
method del*(
|
||||
db: CoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
) {.base.} =
|
||||
db.notImplemented "del/kvt"
|
||||
|
||||
method put*(
|
||||
db: CoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
value: openArray[byte];
|
||||
) {.base.} =
|
||||
db.notImplemented "put/kvt"
|
||||
|
||||
method contains*(
|
||||
db: CoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
): bool
|
||||
{.base.} =
|
||||
db.notImplemented "contains/kvt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public hexary trie methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method mpt*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbMptRef
|
||||
{.base.} =
|
||||
db.notImplemented "mpt"
|
||||
|
||||
method mpt*(
|
||||
db: CoreDbRef;
|
||||
): CoreDbMptRef
|
||||
{.base.} =
|
||||
db.notImplemented "mpt"
|
||||
|
||||
method isPruning*(
|
||||
db: CoreDbMptRef;
|
||||
): bool
|
||||
{.base.} =
|
||||
db.parent.notImplemented "isPruning"
|
||||
|
||||
# -----
|
||||
|
||||
method mptPrune*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbMptRef
|
||||
{.base.} =
|
||||
## Legacy mode MPT, will go away
|
||||
db.notImplemented "mptPrune"
|
||||
|
||||
method mptPrune*(
|
||||
db: CoreDbRef;
|
||||
): CoreDbMptRef
|
||||
{.base.} =
|
||||
## Legacy mode MPT, will go away
|
||||
db.notImplemented "mptPrune"
|
||||
|
||||
method mptPrune*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
prune: bool;
|
||||
): CoreDbMptRef
|
||||
{.base.} =
|
||||
## Legacy mode MPT, will go away
|
||||
db.notImplemented "mptPrune"
|
||||
|
||||
method mptPrune*(
|
||||
db: CoreDbRef;
|
||||
prune: bool;
|
||||
): CoreDbMptRef
|
||||
{.base.} =
|
||||
## Legacy mode MPT, will go away
|
||||
db.notImplemented "mptPrune"
|
||||
|
||||
# -----
|
||||
|
||||
{.push hint[XCannotRaiseY]: off.}
|
||||
|
||||
method get*(
|
||||
db: CoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
): Blob
|
||||
{.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "get/mpt"
|
||||
|
||||
method maybeGet*(
|
||||
db: CoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
): Option[Blob]
|
||||
{.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "maybeGet/mpt"
|
||||
|
||||
method del*(
|
||||
db: CoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
) {.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "del/mpt"
|
||||
|
||||
method put*(
|
||||
db: CoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
value: openArray[byte];
|
||||
) {.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "put/mpt"
|
||||
|
||||
method contains*(
|
||||
db: CoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
): bool
|
||||
{.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "contains/mpt"
|
||||
|
||||
{.pop.}
|
||||
|
||||
method rootHash*(
|
||||
db: CoreDbMptRef;
|
||||
): Hash256
|
||||
{.base.} =
|
||||
db.parent.notImplemented "rootHash/mpt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public pre-kashed key hexary trie methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method phk*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbPhkRef
|
||||
{.base.} =
|
||||
db.notImplemented "phk"
|
||||
|
||||
method phk*(
|
||||
db: CoreDbRef;
|
||||
): CoreDbPhkRef
|
||||
{.base.} =
|
||||
db.notImplemented "phk"
|
||||
|
||||
method isPruning*(
|
||||
db: CoreDbPhkRef;
|
||||
): bool
|
||||
{.base.} =
|
||||
db.parent.notImplemented "isPruning"
|
||||
|
||||
# -----------
|
||||
|
||||
method phkPrune*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbPhkRef
|
||||
{.base.} =
|
||||
## Legacy mode PHK, will go away
|
||||
db.notImplemented "phkPrune"
|
||||
|
||||
method phkPrune*(
|
||||
db: CoreDbRef;
|
||||
): CoreDbPhkRef
|
||||
{.base.} =
|
||||
## Legacy mode PHK, will go away
|
||||
db.notImplemented "phkPrune"
|
||||
|
||||
method phkPrune*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
prune: bool;
|
||||
): CoreDbPhkRef
|
||||
{.base.} =
|
||||
## Legacy mode PHK, will go away
|
||||
db.notImplemented "phkPrune"
|
||||
|
||||
method phkPrune*(
|
||||
db: CoreDbRef;
|
||||
prune: bool;
|
||||
): CoreDbPhkRef
|
||||
{.base.} =
|
||||
## Legacy mode PHK, will go away
|
||||
db.notImplemented "phkPrune"
|
||||
|
||||
# -----------
|
||||
|
||||
{.push hint[XCannotRaiseY]: off.}
|
||||
|
||||
method get*(
|
||||
db: CoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
): Blob
|
||||
{.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "get/phk"
|
||||
|
||||
method maybeGet*(
|
||||
db: CoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
): Option[Blob]
|
||||
{.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "maybeGet/phk"
|
||||
|
||||
method del*(
|
||||
db: CoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
) {.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "del/phk"
|
||||
|
||||
method put*(
|
||||
db: CoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
value: openArray[byte];
|
||||
) {.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "put/phk"
|
||||
|
||||
method contains*(
|
||||
db: CoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
): bool
|
||||
{.base, raises: [RlpError].} =
|
||||
db.parent.notImplemented "contains/phk"
|
||||
|
||||
{.pop.}
|
||||
|
||||
method rootHash*(
|
||||
db: CoreDbPhkRef;
|
||||
): Hash256
|
||||
{.base.} =
|
||||
db.parent.notImplemented "rootHash/phk"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public transaction related methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method getTransactionID*(db: CoreDbRef): CoreDbTxID {.base.} =
|
||||
db.notImplemented "getTxID"
|
||||
|
||||
method setTransactionID*(db: CoreDbRef; id: CoreDbTxID) {.base.} =
|
||||
db.notImplemented "setTxID"
|
||||
|
||||
method beginTransaction*(db: CoreDbRef): CoreDbTxRef {.base.} =
|
||||
db.notImplemented "beginTransaction"
|
||||
|
||||
method commit*(t: CoreDbTxRef, applyDeletes = true) {.base.} =
|
||||
t.parent.notImplemented "commit"
|
||||
|
||||
method rollback*(t: CoreDbTxRef) {.base.} =
|
||||
t.parent.notImplemented "rollback"
|
||||
|
||||
method dispose*(t: CoreDbTxRef) {.base.} =
|
||||
t.parent.notImplemented "dispose"
|
||||
|
||||
method safeDispose*(t: CoreDbTxRef) {.base.} =
|
||||
t.parent.notImplemented "safeDispose"
|
||||
|
||||
{.push hint[XCannotRaiseY]: off.}
|
||||
|
||||
method shortTimeReadOnly*(
|
||||
db: CoreDbRef;
|
||||
id: CoreDbTxID;
|
||||
action: proc() {.gcsafe, raises: [CatchableError].};
|
||||
) {.base, raises: [CatchableError].} =
|
||||
db.notImplemented "shortTimeReadOnly"
|
||||
|
||||
{.pop.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
737
nimbus/db/core_db/core_apps.nim
Normal file
737
nimbus/db/core_db/core_apps.nim
Normal file
@ -0,0 +1,737 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, options, sequtils],
|
||||
chronicles,
|
||||
eth/[common, rlp],
|
||||
stew/byteutils,
|
||||
"../.."/[errors, constants],
|
||||
../storage_types,
|
||||
"."/base
|
||||
|
||||
logScope:
|
||||
topics = "core_db-apps"
|
||||
|
||||
type
|
||||
TransactionKey = tuple
|
||||
blockNumber: BlockNumber
|
||||
index: int
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Forward declarations
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var BlockHeader;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].}
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef,
|
||||
blockHash: Hash256;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [BlockNotFound].}
|
||||
|
||||
proc getBlockHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].}
|
||||
|
||||
proc addBlockNumberToHashLookup*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
) {.gcsafe.}
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
output: var BlockHeader;
|
||||
): bool
|
||||
{.gcsafe.}
|
||||
|
||||
# Copied from `utils/utils` which cannot be imported here in order to
|
||||
# avoid circular imports.
|
||||
func hash(b: BlockHeader): Hash256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator findNewAncestors(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Returns the chain leading up from the given header until the first
|
||||
## ancestor it has in common with our canonical chain.
|
||||
var h = header
|
||||
var orig: BlockHeader
|
||||
while true:
|
||||
if db.getBlockHeader(h.blockNumber, orig) and orig.hash == h.hash:
|
||||
break
|
||||
|
||||
yield h
|
||||
|
||||
if h.parentHash == GENESIS_PARENT_HASH:
|
||||
break
|
||||
else:
|
||||
h = db.getBlockHeader(h.parentHash)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator getBlockTransactionData*(
|
||||
db: CoreDbRef;
|
||||
transactionRoot: Hash256;
|
||||
): seq[byte]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var transactionDb = db.mptPrune transactionRoot
|
||||
var transactionIdx = 0
|
||||
while true:
|
||||
let transactionKey = rlp.encode(transactionIdx)
|
||||
if transactionKey in transactionDb:
|
||||
yield transactionDb.get(transactionKey)
|
||||
else:
|
||||
break
|
||||
inc transactionIdx
|
||||
|
||||
iterator getBlockTransactions*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
): Transaction
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
for encodedTx in db.getBlockTransactionData(header.txRoot):
|
||||
yield rlp.decode(encodedTx, Transaction)
|
||||
|
||||
iterator getBlockTransactionHashes*(
|
||||
db: CoreDbRef;
|
||||
blockHeader: BlockHeader;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## Returns an iterable of the transaction hashes from th block specified
|
||||
## by the given block header.
|
||||
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
|
||||
let tx = rlp.decode(encodedTx, Transaction)
|
||||
yield rlpHash(tx) # beware EIP-4844
|
||||
|
||||
iterator getWithdrawalsData*(
|
||||
db: CoreDbRef;
|
||||
withdrawalsRoot: Hash256;
|
||||
): seq[byte]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var wddb = db.mptPrune withdrawalsRoot
|
||||
var idx = 0
|
||||
while true:
|
||||
let wdKey = rlp.encode(idx)
|
||||
if wdKey in wddb:
|
||||
yield wddb.get(wdKey)
|
||||
else:
|
||||
break
|
||||
inc idx
|
||||
|
||||
iterator getReceipts*(
|
||||
db: CoreDbRef;
|
||||
receiptRoot: Hash256;
|
||||
): Receipt
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var receiptDb = db.mptPrune receiptRoot
|
||||
var receiptIdx = 0
|
||||
while true:
|
||||
let receiptKey = rlp.encode(receiptIdx)
|
||||
if receiptKey in receiptDb:
|
||||
let receiptData = receiptDb.get(receiptKey)
|
||||
yield rlp.decode(receiptData, Receipt)
|
||||
else:
|
||||
break
|
||||
inc receiptIdx
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func hash(b: BlockHeader): Hash256 =
|
||||
rlpHash(b)
|
||||
|
||||
proc removeTransactionFromCanonicalChain(
|
||||
db: CoreDbRef;
|
||||
transactionHash: Hash256;
|
||||
) =
|
||||
## Removes the transaction specified by the given hash from the canonical
|
||||
## chain.
|
||||
db.kvt.del(transactionHashToBlockKey(transactionHash).toOpenArray)
|
||||
|
||||
proc setAsCanonicalChainHead(
|
||||
db: CoreDbRef;
|
||||
headerHash: Hash256;
|
||||
): seq[BlockHeader]
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Sets the header as the canonical chain HEAD.
|
||||
let header = db.getBlockHeader(headerHash)
|
||||
|
||||
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
|
||||
reverse(newCanonicalHeaders)
|
||||
for h in newCanonicalHeaders:
|
||||
var oldHash: Hash256
|
||||
if not db.getBlockHash(h.blockNumber, oldHash):
|
||||
break
|
||||
|
||||
let oldHeader = db.getBlockHeader(oldHash)
|
||||
for txHash in db.getBlockTransactionHashes(oldHeader):
|
||||
db.removeTransactionFromCanonicalChain(txHash)
|
||||
# TODO re-add txn to internal pending pool (only if local sender)
|
||||
|
||||
for h in newCanonicalHeaders:
|
||||
db.addBlockNumberToHashLookup(h)
|
||||
|
||||
db.kvt.put(canonicalHeadHashKey().toOpenArray, rlp.encode(headerHash))
|
||||
|
||||
return newCanonicalHeaders
|
||||
|
||||
proc markCanonicalChain(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
headerHash: Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## mark this chain as canonical by adding block number to hash lookup
|
||||
## down to forking point
|
||||
var
|
||||
currHash = headerHash
|
||||
currHeader = header
|
||||
|
||||
# mark current header as canonical
|
||||
let key = blockNumberToHashKey(currHeader.blockNumber)
|
||||
db.kvt.put(key.toOpenArray, rlp.encode(currHash))
|
||||
|
||||
# it is a genesis block, done
|
||||
if currHeader.parentHash == Hash256():
|
||||
return true
|
||||
|
||||
# mark ancestor blocks as canonical too
|
||||
currHash = currHeader.parentHash
|
||||
if not db.getBlockHeader(currHeader.parentHash, currHeader):
|
||||
return false
|
||||
|
||||
while currHash != Hash256():
|
||||
let key = blockNumberToHashKey(currHeader.blockNumber)
|
||||
let data = db.kvt.get(key.toOpenArray)
|
||||
if data.len == 0:
|
||||
# not marked, mark it
|
||||
db.kvt.put(key.toOpenArray, rlp.encode(currHash))
|
||||
elif rlp.decode(data, Hash256) != currHash:
|
||||
# replace prev chain
|
||||
db.kvt.put(key.toOpenArray, rlp.encode(currHash))
|
||||
else:
|
||||
# forking point, done
|
||||
break
|
||||
|
||||
if currHeader.parentHash == Hash256():
|
||||
break
|
||||
|
||||
currHash = currHeader.parentHash
|
||||
if not db.getBlockHeader(currHeader.parentHash, currHeader):
|
||||
return false
|
||||
|
||||
return true
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc exists*(db: CoreDbRef, hash: Hash256): bool =
|
||||
db.kvt.contains(hash.data)
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
output: var BlockHeader;
|
||||
): bool =
|
||||
let data = db.kvt.get(genericHashKey(blockHash).toOpenArray)
|
||||
if data.len != 0:
|
||||
try:
|
||||
output = rlp.decode(data, BlockHeader)
|
||||
true
|
||||
except RlpError:
|
||||
false
|
||||
else:
|
||||
false
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef,
|
||||
blockHash: Hash256;
|
||||
): BlockHeader =
|
||||
## Returns the requested block header as specified by block hash.
|
||||
##
|
||||
## Raises BlockNotFound if it is not present in the db.
|
||||
if not db.getBlockHeader(blockHash, result):
|
||||
raise newException(
|
||||
BlockNotFound, "No block with hash " & blockHash.data.toHex)
|
||||
|
||||
proc getHash(
|
||||
db: CoreDbRef;
|
||||
key: DbKey;
|
||||
output: var Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
let data = db.kvt.get(key.toOpenArray)
|
||||
if data.len != 0:
|
||||
output = rlp.decode(data, Hash256)
|
||||
result = true
|
||||
|
||||
proc getCanonicalHead*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,EVMError].} =
|
||||
var headHash: Hash256
|
||||
if not db.getHash(canonicalHeadHashKey(), headHash) or
|
||||
not db.getBlockHeader(headHash, result):
|
||||
raise newException(
|
||||
CanonicalHeadNotFound, "No canonical head set for this chain")
|
||||
|
||||
proc getCanonicalHeaderHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].}=
|
||||
discard db.getHash(canonicalHeadHashKey(), result)
|
||||
|
||||
proc getBlockHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var Hash256;
|
||||
): bool =
|
||||
## Return the block hash for the given block number.
|
||||
db.getHash(blockNumberToHashKey(n), output)
|
||||
|
||||
proc getBlockHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Return the block hash for the given block number.
|
||||
if not db.getHash(blockNumberToHashKey(n), result):
|
||||
raise newException(BlockNotFound, "No block hash for number " & $n)
|
||||
|
||||
proc getHeadBlockHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if not db.getHash(canonicalHeadHashKey(), result):
|
||||
result = Hash256()
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
output: var BlockHeader;
|
||||
): bool =
|
||||
## Returns the block header with the given number in the canonical chain.
|
||||
var blockHash: Hash256
|
||||
if db.getBlockHash(n, blockHash):
|
||||
result = db.getBlockHeader(blockHash, output)
|
||||
|
||||
proc getBlockHeaderWithHash*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
): Option[(BlockHeader, Hash256)]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## Returns the block header and its hash, with the given number in the canonical chain.
|
||||
## Hash is returned to avoid recomputing it
|
||||
var hash: Hash256
|
||||
if db.getBlockHash(n, hash):
|
||||
# Note: this will throw if header is not present.
|
||||
var header: BlockHeader
|
||||
if db.getBlockHeader(hash, header):
|
||||
return some((header, hash))
|
||||
else:
|
||||
# this should not happen, but if it happen lets fail laudly as this means
|
||||
# something is super wrong
|
||||
raiseAssert("Corrupted database. Mapping number->hash present, without header in database")
|
||||
else:
|
||||
return none[(BlockHeader, Hash256)]()
|
||||
|
||||
proc getBlockHeader*(
|
||||
db: CoreDbRef;
|
||||
n: BlockNumber;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
## Returns the block header with the given number in the canonical chain.
|
||||
## Raises BlockNotFound error if the block is not in the DB.
|
||||
db.getBlockHeader(db.getBlockHash(n))
|
||||
|
||||
proc getScore*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
): UInt256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
rlp.decode(db.kvt.get(blockHashToScoreKey(blockHash).toOpenArray), UInt256)
|
||||
|
||||
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
|
||||
## for testing purpose
|
||||
db.kvt.put(blockHashToScoreKey(blockHash).toOpenArray, rlp.encode(score))
|
||||
|
||||
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
|
||||
let bytes = db.kvt.get(blockHashToScoreKey(blockHash).toOpenArray)
|
||||
if bytes.len == 0: return false
|
||||
try:
|
||||
td = rlp.decode(bytes, UInt256)
|
||||
except RlpError:
|
||||
return false
|
||||
return true
|
||||
|
||||
proc headTotalDifficulty*(
|
||||
db: CoreDbRef;
|
||||
): UInt256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
# this is actually a combination of `getHash` and `getScore`
|
||||
const key = canonicalHeadHashKey()
|
||||
let data = db.kvt.get(key.toOpenArray)
|
||||
if data.len == 0:
|
||||
return 0.u256
|
||||
|
||||
let blockHash = rlp.decode(data, Hash256)
|
||||
rlp.decode(db.kvt.get(blockHashToScoreKey(blockHash).toOpenArray), UInt256)
|
||||
|
||||
proc getAncestorsHashes*(
|
||||
db: CoreDbRef;
|
||||
limit: UInt256;
|
||||
header: BlockHeader;
|
||||
): seq[Hash256]
|
||||
{.gcsafe, raises: [BlockNotFound].} =
|
||||
var ancestorCount = min(header.blockNumber, limit).truncate(int)
|
||||
var h = header
|
||||
|
||||
result = newSeq[Hash256](ancestorCount)
|
||||
while ancestorCount > 0:
|
||||
h = db.getBlockHeader(h.parentHash)
|
||||
result[ancestorCount - 1] = h.hash
|
||||
dec ancestorCount
|
||||
|
||||
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
|
||||
db.kvt.put(
|
||||
blockNumberToHashKey(header.blockNumber).toOpenArray,
|
||||
rlp.encode(header.hash))
|
||||
|
||||
proc persistTransactions*(
|
||||
db: CoreDbRef;
|
||||
blockNumber: BlockNumber;
|
||||
transactions: openArray[Transaction];
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var trie = db.mptPrune()
|
||||
for idx, tx in transactions:
|
||||
let
|
||||
encodedTx = rlp.encode(tx.removeNetworkPayload)
|
||||
txHash = rlpHash(tx) # beware EIP-4844
|
||||
txKey: TransactionKey = (blockNumber, idx)
|
||||
trie.put(rlp.encode(idx), encodedTx)
|
||||
db.kvt.put(transactionHashToBlockKey(txHash).toOpenArray, rlp.encode(txKey))
|
||||
trie.rootHash
|
||||
|
||||
proc getTransaction*(
|
||||
db: CoreDbRef;
|
||||
txRoot: Hash256;
|
||||
txIndex: int;
|
||||
res: var Transaction;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var db = db.mptPrune txRoot
|
||||
let txData = db.get(rlp.encode(txIndex))
|
||||
if txData.len > 0:
|
||||
res = rlp.decode(txData, Transaction)
|
||||
result = true
|
||||
|
||||
proc getTransactionCount*(
|
||||
db: CoreDbRef;
|
||||
txRoot: Hash256;
|
||||
): int
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var trie = db.mptPrune txRoot
|
||||
var txCount = 0
|
||||
while true:
|
||||
let txKey = rlp.encode(txCount)
|
||||
if txKey in trie:
|
||||
inc txCount
|
||||
else:
|
||||
return txCount
|
||||
|
||||
doAssert(false, "unreachable")
|
||||
|
||||
proc getUnclesCount*(
|
||||
db: CoreDbRef;
|
||||
ommersHash: Hash256;
|
||||
): int
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = db.kvt.get(genericHashKey(ommersHash).toOpenArray)
|
||||
if encodedUncles.len != 0:
|
||||
let r = rlpFromBytes(encodedUncles)
|
||||
result = r.listLen
|
||||
|
||||
proc getUncles*(
|
||||
db: CoreDbRef;
|
||||
ommersHash: Hash256;
|
||||
): seq[BlockHeader]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = db.kvt.get(genericHashKey(ommersHash).toOpenArray)
|
||||
if encodedUncles.len != 0:
|
||||
result = rlp.decode(encodedUncles, seq[BlockHeader])
|
||||
|
||||
proc persistWithdrawals*(
|
||||
db: CoreDbRef;
|
||||
withdrawals: openArray[Withdrawal];
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var trie = db.mptPrune()
|
||||
for idx, wd in withdrawals:
|
||||
let encodedWd = rlp.encode(wd)
|
||||
trie.put(rlp.encode(idx), encodedWd)
|
||||
trie.rootHash
|
||||
|
||||
proc getWithdrawals*(
|
||||
db: CoreDbRef;
|
||||
withdrawalsRoot: Hash256;
|
||||
): seq[Withdrawal]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
for encodedWd in db.getWithdrawalsData(withdrawalsRoot):
|
||||
result.add(rlp.decode(encodedWd, Withdrawal))
|
||||
|
||||
proc getBlockBody*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
output: var BlockBody;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
result = true
|
||||
output.transactions = @[]
|
||||
output.uncles = @[]
|
||||
for encodedTx in db.getBlockTransactionData(header.txRoot):
|
||||
output.transactions.add(rlp.decode(encodedTx, Transaction))
|
||||
|
||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = db.kvt.get(genericHashKey(header.ommersHash).toOpenArray)
|
||||
if encodedUncles.len != 0:
|
||||
output.uncles = rlp.decode(encodedUncles, seq[BlockHeader])
|
||||
else:
|
||||
result = false
|
||||
|
||||
if header.withdrawalsRoot.isSome:
|
||||
output.withdrawals = some(db.getWithdrawals(header.withdrawalsRoot.get))
|
||||
|
||||
proc getBlockBody*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
output: var BlockBody;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var header: BlockHeader
|
||||
if db.getBlockHeader(blockHash, header):
|
||||
return db.getBlockBody(header, output)
|
||||
|
||||
proc getBlockBody*(
|
||||
db: CoreDbRef;
|
||||
hash: Hash256;
|
||||
): BlockBody
|
||||
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||
if not db.getBlockBody(hash, result):
|
||||
raise newException(ValueError, "Error when retrieving block body")
|
||||
|
||||
proc getUncleHashes*(
|
||||
db: CoreDbRef;
|
||||
blockHashes: openArray[Hash256];
|
||||
): seq[Hash256]
|
||||
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||
for blockHash in blockHashes:
|
||||
var blockBody = db.getBlockBody(blockHash)
|
||||
for uncle in blockBody.uncles:
|
||||
result.add uncle.hash
|
||||
|
||||
proc getUncleHashes*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
): seq[Hash256]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
if header.ommersHash != EMPTY_UNCLE_HASH:
|
||||
let encodedUncles = db.kvt.get(genericHashKey(header.ommersHash).toOpenArray)
|
||||
if encodedUncles.len != 0:
|
||||
let uncles = rlp.decode(encodedUncles, seq[BlockHeader])
|
||||
for x in uncles:
|
||||
result.add x.hash
|
||||
|
||||
proc getTransactionKey*(
|
||||
db: CoreDbRef;
|
||||
transactionHash: Hash256;
|
||||
): tuple[blockNumber: BlockNumber, index: int]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
let tx = db.kvt.get(transactionHashToBlockKey(transactionHash).toOpenArray)
|
||||
|
||||
if tx.len > 0:
|
||||
let key = rlp.decode(tx, TransactionKey)
|
||||
result = (key.blockNumber, key.index)
|
||||
else:
|
||||
result = (0.toBlockNumber, -1)
|
||||
|
||||
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
|
||||
## Returns True if the header with the given block hash is in our DB.
|
||||
db.kvt.contains(genericHashKey(blockHash).toOpenArray)
|
||||
|
||||
proc setHead*(
|
||||
db: CoreDbRef;
|
||||
blockHash: Hash256;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var header: BlockHeader
|
||||
if not db.getBlockHeader(blockHash, header):
|
||||
return false
|
||||
|
||||
if not db.markCanonicalChain(header, blockHash):
|
||||
return false
|
||||
|
||||
db.kvt.put(canonicalHeadHashKey().toOpenArray, rlp.encode(blockHash))
|
||||
return true
|
||||
|
||||
proc setHead*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
writeHeader = false;
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var headerHash = rlpHash(header)
|
||||
if writeHeader:
|
||||
db.kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header))
|
||||
if not db.markCanonicalChain(header, headerHash):
|
||||
return false
|
||||
db.kvt.put(canonicalHeadHashKey().toOpenArray, rlp.encode(headerHash))
|
||||
return true
|
||||
|
||||
proc persistReceipts*(
|
||||
db: CoreDbRef;
|
||||
receipts: openArray[Receipt];
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var trie = db.mptPrune()
|
||||
for idx, rec in receipts:
|
||||
trie.put(rlp.encode(idx), rlp.encode(rec))
|
||||
trie.rootHash
|
||||
|
||||
proc getReceipts*(
|
||||
db: CoreDbRef;
|
||||
receiptRoot: Hash256;
|
||||
): seq[Receipt]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var receipts = newSeq[Receipt]()
|
||||
for r in db.getReceipts(receiptRoot):
|
||||
receipts.add(r)
|
||||
return receipts
|
||||
|
||||
proc persistHeaderToDb*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
forceCanonical: bool;
|
||||
startOfHistory = GENESIS_PARENT_HASH;
|
||||
): seq[BlockHeader]
|
||||
{.gcsafe, raises: [RlpError,EVMError].} =
|
||||
let isStartOfHistory = header.parentHash == startOfHistory
|
||||
let headerHash = header.blockHash
|
||||
if not isStartOfHistory and not db.headerExists(header.parentHash):
|
||||
raise newException(ParentNotFound, "Cannot persist block header " &
|
||||
$headerHash & " with unknown parent " & $header.parentHash)
|
||||
db.kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header))
|
||||
|
||||
let score = if isStartOfHistory: header.difficulty
|
||||
else: db.getScore(header.parentHash) + header.difficulty
|
||||
db.kvt.put(blockHashToScoreKey(headerHash).toOpenArray, rlp.encode(score))
|
||||
|
||||
db.addBlockNumberToHashLookup(header)
|
||||
|
||||
var headScore: UInt256
|
||||
try:
|
||||
headScore = db.getScore(db.getCanonicalHead().hash)
|
||||
except CanonicalHeadNotFound:
|
||||
return db.setAsCanonicalChainHead(headerHash)
|
||||
|
||||
if score > headScore or forceCanonical:
|
||||
return db.setAsCanonicalChainHead(headerHash)
|
||||
|
||||
proc persistHeaderToDbWithoutSetHead*(
|
||||
db: CoreDbRef;
|
||||
header: BlockHeader;
|
||||
startOfHistory = GENESIS_PARENT_HASH;
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
let isStartOfHistory = header.parentHash == startOfHistory
|
||||
let headerHash = header.blockHash
|
||||
let score = if isStartOfHistory: header.difficulty
|
||||
else: db.getScore(header.parentHash) + header.difficulty
|
||||
|
||||
db.kvt.put(blockHashToScoreKey(headerHash).toOpenArray, rlp.encode(score))
|
||||
db.kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header))
|
||||
|
||||
# FIXME-Adam: This seems like a bad idea. I don't see a way to get the score
|
||||
# in stateless mode, but it seems dangerous to just shove the header into
|
||||
# the DB *without* also storing the score.
|
||||
proc persistHeaderToDbWithoutSetHeadOrScore*(db: CoreDbRef; header: BlockHeader) =
|
||||
db.addBlockNumberToHashLookup(header)
|
||||
db.kvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
|
||||
|
||||
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
|
||||
## Persists the list of uncles to the database.
|
||||
## Returns the uncles hash.
|
||||
let enc = rlp.encode(uncles)
|
||||
result = keccakHash(enc)
|
||||
db.kvt.put(genericHashKey(result).toOpenArray, enc)
|
||||
|
||||
proc safeHeaderHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
discard db.getHash(safeHashKey(), result)
|
||||
|
||||
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||
db.kvt.put(safeHashKey().toOpenArray, rlp.encode(headerHash))
|
||||
|
||||
proc finalizedHeaderHash*(
|
||||
db: CoreDbRef;
|
||||
): Hash256
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
discard db.getHash(finalizedHashKey(), result)
|
||||
|
||||
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
|
||||
db.kvt.put(finalizedHashKey().toOpenArray, rlp.encode(headerHash))
|
||||
|
||||
proc safeHeader*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
db.getBlockHeader(db.safeHeaderHash)
|
||||
|
||||
proc finalizedHeader*(
|
||||
db: CoreDbRef;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [RlpError,BlockNotFound].} =
|
||||
db.getBlockHeader(db.finalizedHeaderHash)
|
||||
|
||||
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
|
||||
var header: BlockHeader
|
||||
if not db.getBlockHeader(headerHash, header):
|
||||
return false
|
||||
# see if stateRoot exists
|
||||
db.exists(header.stateRoot)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
436
nimbus/db/core_db/legacy.nim
Normal file
436
nimbus/db/core_db/legacy.nim
Normal file
@ -0,0 +1,436 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
eth/[common, rlp, trie/db, trie/hexary],
|
||||
results,
|
||||
../../constants,
|
||||
../select_backend,
|
||||
./base
|
||||
|
||||
type
|
||||
LegacyCoreDbRef* = ref object of CoreDbRef
|
||||
backend: ChainDB
|
||||
|
||||
LegacyCoreDbKvtRef* = ref object of CoreDbKvtRef
|
||||
## Holds single database
|
||||
db*: TrieDatabaseRef
|
||||
|
||||
LegacyCoreDbMptRef* = ref object of CoreDbMptRef
|
||||
mpt: HexaryTrie
|
||||
|
||||
LegacyCoreDbPhkRef* = ref object of CoreDbPhkRef
|
||||
phk: SecureHexaryTrie
|
||||
|
||||
|
||||
LegacyCoreDbTxRef* = ref object of CoreDbTxRef
|
||||
tx: DbTransaction
|
||||
|
||||
LegacyCoreDbTxID* = ref object of CoreDbTxID
|
||||
tid: TransactionID
|
||||
|
||||
|
||||
LegacyCoreDbCaptRef* = ref object of CoreDbCaptRef
|
||||
recorder: TrieDatabaseRef
|
||||
appDb: LegacyCoreDbRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor and low level data retrieval, storage & transation frame
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newLegacyCoreDbRef*(db: TrieDatabaseRef): LegacyCoreDbRef =
|
||||
result = LegacyCoreDbRef()
|
||||
result.init(LegacyDbPersistent, LegacyCoreDbKvtRef(db: db))
|
||||
|
||||
proc newLegacyPersistentCoreDbRef*(
|
||||
path: string;
|
||||
): LegacyCoreDbRef =
|
||||
# Kludge: Compiler bails out on `results.tryGet()` with
|
||||
# ::
|
||||
# fatal.nim(54) sysFatal
|
||||
# Error: unhandled exception: types.nim(1251, 10) \
|
||||
# `b.kind in {tyObject} + skipPtrs` [AssertionDefect]
|
||||
#
|
||||
# when running `select_backend.newChainDB(path)`. The culprit seems to be
|
||||
# the `ResultError` exception (or any other `CatchableError`).
|
||||
#
|
||||
doAssert dbBackend == rocksdb
|
||||
let rc = RocksStoreRef.init(path, "nimbus")
|
||||
doAssert(rc.isOk, "Cannot start RocksDB: " & rc.error)
|
||||
doAssert(not rc.value.isNil, "Starting RocksDB returned nil")
|
||||
|
||||
let
|
||||
rdb = rc.value
|
||||
backend = ChainDB(kv: rdb.kvStore, rdb: rdb)
|
||||
|
||||
result = LegacyCoreDbRef(backend: backend)
|
||||
result.init(LegacyDbPersistent, LegacyCoreDbKvtRef(db: backend.trieDB))
|
||||
|
||||
proc newLegacyMemoryCoreDbRef*(): LegacyCoreDbRef =
|
||||
result = LegacyCoreDbRef()
|
||||
result.init(LegacyDbMemory, LegacyCoreDbKvtRef(db: newMemoryDB()))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public legacy helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method compensateLegacySetup*(db: LegacyCoreDbRef) =
|
||||
db.kvt.LegacyCoreDbKvtRef.db.put(EMPTY_ROOT_HASH.data, @[0x80u8])
|
||||
|
||||
proc toLegacyTrieRef*(
|
||||
db: CoreDbRef;
|
||||
): TrieDatabaseRef
|
||||
{.gcsafe, deprecated: "Will go away some time in future".} =
|
||||
db.kvt.LegacyCoreDbKvtRef.db
|
||||
|
||||
proc toLegacyBackend*(
|
||||
db: CoreDbRef;
|
||||
): ChainDB
|
||||
{.gcsafe, deprecated: "Will go away some time in future".} =
|
||||
db.LegacyCoreDbRef.backend
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public tracer methods (backport from capturedb/tracer sources)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc get(db: LegacyCoreDbCaptRef, key: openArray[byte]): Blob =
|
||||
## Mixin for `trieDB()`
|
||||
result = db.recorder.get(key)
|
||||
if result.len != 0: return
|
||||
result = db.parent.kvt.LegacyCoreDbKvtRef.db.get(key)
|
||||
if result.len != 0:
|
||||
db.recorder.put(key, result)
|
||||
|
||||
proc put(db: LegacyCoreDbCaptRef, key, value: openArray[byte]) =
|
||||
## Mixin for `trieDB()`
|
||||
db.recorder.put(key, value)
|
||||
if PersistPut in db.flags:
|
||||
db.parent.kvt.LegacyCoreDbKvtRef.db.put(key, value)
|
||||
|
||||
proc contains(db: LegacyCoreDbCaptRef, key: openArray[byte]): bool =
|
||||
## Mixin for `trieDB()`
|
||||
result = db.parent.kvt.LegacyCoreDbKvtRef.db.contains(key)
|
||||
doAssert(db.recorder.contains(key) == result)
|
||||
|
||||
proc del(db: LegacyCoreDbCaptRef, key: openArray[byte]) =
|
||||
## Mixin for `trieDB()`
|
||||
db.recorder.del(key)
|
||||
if PersistDel in db.flags:
|
||||
db.parent.kvt.LegacyCoreDbKvtRef.db.del(key)
|
||||
|
||||
method newCoreDbCaptRef*(
|
||||
db: LegacyCoreDbRef;
|
||||
flags: set[CoreDbCaptFlags] = {};
|
||||
): CoreDbCaptRef =
|
||||
var captDB = LegacyCoreDbCaptRef(recorder: newMemoryDB())
|
||||
captDB.init(db, flags)
|
||||
captDB.appDb = LegacyCoreDbRef()
|
||||
captDB.appDb.init(LegacyDbPersistent, LegacyCoreDbKvtRef(db: trieDB captDB))
|
||||
captDB
|
||||
|
||||
method recorder*(
|
||||
db: LegacyCoreDbCaptRef;
|
||||
): CoreDbRef =
|
||||
db.appDb
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public key-value table methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method get*(
|
||||
db: LegacyCoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
): Blob =
|
||||
db.db.get key
|
||||
|
||||
method maybeGet*(
|
||||
db: LegacyCoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
): Option[Blob] =
|
||||
db.db.maybeGet key
|
||||
|
||||
method del*(
|
||||
db: LegacyCoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
) =
|
||||
db.db.del key
|
||||
|
||||
method put*(
|
||||
db: LegacyCoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
value: openArray[byte];
|
||||
) =
|
||||
db.db.put(key, value)
|
||||
|
||||
method contains*(
|
||||
db: LegacyCoreDbKvtRef;
|
||||
key: openArray[byte];
|
||||
): bool =
|
||||
db.db.contains key
|
||||
|
||||
iterator pairs*(
|
||||
db: LegacyCoreDbKvtRef;
|
||||
): (Blob, Blob)
|
||||
{.gcsafe.} =
|
||||
for k,v in db.db.pairsInMemoryDB:
|
||||
yield (k,v)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public hexary trie methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method mpt*(
|
||||
db: LegacyCoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbMptRef =
|
||||
result = LegacyCoreDbMptRef(
|
||||
mpt: initHexaryTrie(db.kvt.LegacyCoreDbKvtRef.db, root, isPruning=false))
|
||||
result.init db
|
||||
|
||||
method mpt*(
|
||||
db: LegacyCoreDbRef;
|
||||
): CoreDbMptRef =
|
||||
result = LegacyCoreDbMptRef(
|
||||
mpt: initHexaryTrie(db.kvt.LegacyCoreDbKvtRef.db, isPruning=false))
|
||||
result.init db
|
||||
|
||||
method isPruning*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
): bool =
|
||||
db.mpt.isPruning
|
||||
|
||||
# ------
|
||||
|
||||
method mptPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbMptRef =
|
||||
result = LegacyCoreDbMptRef(
|
||||
mpt: initHexaryTrie(db.kvt.LegacyCoreDbKvtRef.db, root))
|
||||
result.init db
|
||||
|
||||
method mptPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
): CoreDbMptRef =
|
||||
result = LegacyCoreDbMptRef(
|
||||
mpt: initHexaryTrie(db.kvt.LegacyCoreDbKvtRef.db))
|
||||
result.init db
|
||||
|
||||
method mptPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
root: Hash256;
|
||||
prune: bool;
|
||||
): CoreDbMptRef =
|
||||
result = LegacyCoreDbMptRef(
|
||||
mpt: initHexaryTrie(db.kvt.LegacyCoreDbKvtRef.db, root, isPruning=prune))
|
||||
result.init db
|
||||
|
||||
method mptPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
prune: bool;
|
||||
): CoreDbMptRef =
|
||||
result = LegacyCoreDbMptRef(
|
||||
mpt: initHexaryTrie(db.kvt.LegacyCoreDbKvtRef.db, isPruning=prune))
|
||||
result.init db
|
||||
|
||||
# ------
|
||||
|
||||
method get*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
): Blob
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
db.mpt.get key
|
||||
|
||||
method maybeGet*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
): Option[Blob]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
db.mpt.maybeGet key
|
||||
|
||||
method del*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
db.mpt.del key
|
||||
|
||||
method put*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
value: openArray[byte];
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
db.mpt.put(key, value)
|
||||
|
||||
method contains*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
key: openArray[byte];
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
db.mpt.contains key
|
||||
|
||||
method rootHash*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
): Hash256 =
|
||||
db.mpt.rootHash
|
||||
|
||||
iterator pairs*(
|
||||
db: LegacyCoreDbMptRef;
|
||||
): (Blob, Blob)
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
for k,v in db.mpt:
|
||||
yield (k,v)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public pre-kashed key hexary trie methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method phk*(
|
||||
db: LegacyCoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbPhkRef =
|
||||
result = LegacyCoreDbPhkRef(
|
||||
phk: initSecureHexaryTrie(
|
||||
db.kvt.LegacyCoreDbKvtRef.db, root, isPruning=false))
|
||||
result.init db
|
||||
|
||||
method phk*(
|
||||
db: LegacyCoreDbRef;
|
||||
): CoreDbPhkRef =
|
||||
result = LegacyCoreDbPhkRef(
|
||||
phk: initSecureHexaryTrie(
|
||||
db.kvt.LegacyCoreDbKvtRef.db, isPruning=false))
|
||||
result.init db
|
||||
|
||||
method isPruning*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
): bool =
|
||||
db.phk.isPruning
|
||||
|
||||
# ------
|
||||
|
||||
method phkPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
root: Hash256;
|
||||
): CoreDbPhkRef =
|
||||
result = LegacyCoreDbPhkRef(
|
||||
phk: initSecureHexaryTrie(
|
||||
db.kvt.LegacyCoreDbKvtRef.db, root))
|
||||
result.init db
|
||||
|
||||
method phkPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
): CoreDbPhkRef =
|
||||
result = LegacyCoreDbPhkRef(
|
||||
phk: initSecureHexaryTrie(
|
||||
db.kvt.LegacyCoreDbKvtRef.db))
|
||||
result.init db
|
||||
|
||||
method phkPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
root: Hash256;
|
||||
prune: bool;
|
||||
): CoreDbPhkRef =
|
||||
result = LegacyCoreDbPhkRef(
|
||||
phk: initSecureHexaryTrie(
|
||||
db.kvt.LegacyCoreDbKvtRef.db, root, isPruning=prune))
|
||||
result.init db
|
||||
|
||||
method phkPrune*(
|
||||
db: LegacyCoreDbRef;
|
||||
prune: bool;
|
||||
): CoreDbPhkRef =
|
||||
result = LegacyCoreDbPhkRef(
|
||||
phk: initSecureHexaryTrie(
|
||||
db.kvt.LegacyCoreDbKvtRef.db, isPruning=prune))
|
||||
result.init db
|
||||
|
||||
# ------
|
||||
|
||||
method get*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
): Blob
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
db.phk.get key
|
||||
|
||||
method maybeGet*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
): Option[Blob]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
db.phk.maybeGet key
|
||||
|
||||
method del*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
db.phk.del key
|
||||
|
||||
method put*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
value: openArray[byte];
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
db.phk.put(key, value)
|
||||
|
||||
method contains*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
key: openArray[byte];
|
||||
): bool
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
db.phk.contains key
|
||||
|
||||
method rootHash*(
|
||||
db: LegacyCoreDbPhkRef;
|
||||
): Hash256 =
|
||||
db.phk.rootHash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public transaction related methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
method getTransactionID*(db: LegacyCoreDbRef): CoreDbTxID =
|
||||
LegacyCoreDbTxID(tid: db.kvt.LegacyCoreDbKvtRef.db.getTransactionID)
|
||||
|
||||
method setTransactionID*(db: LegacyCoreDbRef; id: CoreDbTxID) =
|
||||
db.kvt.LegacyCoreDbKvtRef.db.setTransactionID LegacyCoreDbTxID(id).tid
|
||||
|
||||
method beginTransaction*(db: LegacyCoreDbRef): CoreDbTxRef =
|
||||
result = LegacyCoreDbTxRef(
|
||||
tx: db.kvt.LegacyCoreDbKvtRef.db.beginTransaction())
|
||||
result.init db
|
||||
|
||||
method commit*(t: LegacyCoreDbTxRef, applyDeletes = true) =
|
||||
t.tx.commit applyDeletes
|
||||
|
||||
method rollback*(t: LegacyCoreDbTxRef) =
|
||||
t.tx.rollback()
|
||||
|
||||
method dispose*(t: LegacyCoreDbTxRef) =
|
||||
t.tx.dispose()
|
||||
|
||||
method safeDispose*(t: LegacyCoreDbTxRef) =
|
||||
t.tx.safeDispose()
|
||||
|
||||
method shortTimeReadOnly*(
|
||||
db: LegacyCoreDbRef;
|
||||
id: CoreDbTxID;
|
||||
action: proc() {.gcsafe, raises: [CatchableError].};
|
||||
) {.gcsafe, raises: [CatchableError].} =
|
||||
db.kvt.LegacyCoreDbKvtRef.db.shortTimeReadOnly LegacyCoreDbTxID(id).tid:
|
||||
action()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -28,15 +28,15 @@
|
||||
##
|
||||
|
||||
import
|
||||
std/[distros, os],
|
||||
std/os,
|
||||
chronicles,
|
||||
results,
|
||||
unittest2,
|
||||
../nimbus/core/chain, # must be early (compilation annoyance)
|
||||
../nimbus/config,
|
||||
../nimbus/db/select_backend,
|
||||
../nimbus/core/chain,
|
||||
../nimbus/common/common,
|
||||
./replay/[undump_blocks, pp],
|
||||
chronicles,
|
||||
stew/results,
|
||||
unittest2
|
||||
./replay/[undump_blocks, pp]
|
||||
|
||||
type
|
||||
ReplaySession = object
|
||||
@ -85,31 +85,7 @@ const
|
||||
termTotalDff: 20_000_000_000_000.u256,
|
||||
mergeFork: 1000,
|
||||
ttdReachedAt: 55127,
|
||||
failBlockAt: 9999999)
|
||||
|
||||
when not defined(linux):
|
||||
const isUbuntu32bit = false
|
||||
else:
|
||||
# The `detectOs(Ubuntu)` directive is not Windows compatible, causes an
|
||||
# error when running the system command `lsb_release -d` in the background.
|
||||
let isUbuntu32bit = detectOs(Ubuntu) and int.sizeof == 4
|
||||
|
||||
let
|
||||
# There is a problem with the Github/CI which results in spurious crashes
|
||||
# when leaving the `runner()` if the persistent ChainDBRef initialisation
|
||||
# was present. The Github/CI set up for Linux/i386 is
|
||||
#
|
||||
# Ubuntu 10.04.06 LTS
|
||||
# with repo kernel 5.4.0-1065-azure (see 'uname -a')
|
||||
#
|
||||
# base OS architecture is amd64
|
||||
# with i386 foreign architecture
|
||||
#
|
||||
# nimbus binary is an
|
||||
# ELF 32-bit LSB shared object,
|
||||
# Intel 80386, version 1 (SYSV), dynamically linked,
|
||||
#
|
||||
disablePersistentDB = isUbuntu32bit
|
||||
failBlockAt: 1000) # Kludge, some change at the `merge` logic?
|
||||
|
||||
# Block chains shared between test suites
|
||||
var
|
||||
@ -170,7 +146,6 @@ proc setErrorLevel =
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ddbCleanUp(dir: string) =
|
||||
if not disablePersistentDB:
|
||||
ddbDir = dir
|
||||
dir.flushDbDir
|
||||
|
||||
@ -195,14 +170,15 @@ proc importBlocks(c: ChainRef; h: seq[BlockHeader]; b: seq[BlockBody];
|
||||
bRng = if 1 < h.len: &"s [#{first}..#{last}]={h.len}" else: &" #{first}"
|
||||
blurb = &"persistBlocks([#{first}..#"
|
||||
|
||||
noisy.say "***", &"block{bRng} #txs={nTxs} #uncles={nUnc}"
|
||||
|
||||
catchException("persistBlocks()", trace = true):
|
||||
if c.persistBlocks(h, b).isOk:
|
||||
noisy.say "***", &"block{bRng} #txs={nTxs} #uncles={nUnc}"
|
||||
if not tddOk and c.com.ttdReached:
|
||||
noisy.say "***", &"block{bRng} => tddReached"
|
||||
return true
|
||||
|
||||
noisy.say "***", &"block{bRng} #txs={nTxs} #uncles={nUnc} -- failed"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runner
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -216,8 +192,7 @@ proc genesisLoadRunner(noisy = true;
|
||||
gFileInfo = sSpcs.genesisFile.splitFile.name.split(".")[0]
|
||||
gFilePath = sSpcs.genesisFile.findFilePath.value
|
||||
|
||||
tmpDir = if disablePersistentDB: "*notused*"
|
||||
else: gFilePath.splitFile.dir / "tmp"
|
||||
tmpDir = gFilePath.splitFile.dir / "tmp"
|
||||
|
||||
persistPruneInfo = if persistPruneTrie: "pruning enabled"
|
||||
else: "no pruning"
|
||||
@ -240,9 +215,6 @@ proc genesisLoadRunner(noisy = true;
|
||||
check mcom.toHardFork(sSpcs.mergeFork.toBlockNumber.blockNumberToForkDeterminationInfo) == MergeFork
|
||||
|
||||
test &"Construct persistent ChainDBRef on {tmpDir}, {persistPruneInfo}":
|
||||
if disablePersistentDB:
|
||||
skip()
|
||||
else:
|
||||
# Before allocating the database, the data directory needs to be
|
||||
# cleared. There might be left overs from a previous crash or
|
||||
# because there were file locks under Windows which prevented a
|
||||
@ -270,9 +242,6 @@ proc genesisLoadRunner(noisy = true;
|
||||
check storedhHeaderPP == onTheFlyHeaderPP
|
||||
|
||||
test "Initialise persistent Genesis":
|
||||
if disablePersistentDB:
|
||||
skip()
|
||||
else:
|
||||
dcom.initializeEmptyDb
|
||||
|
||||
# Must be the same as the in-memory DB value
|
||||
@ -386,6 +355,7 @@ when isMainModule:
|
||||
# typically on the `nimbus-eth1-blobs` module.
|
||||
noisy.testnetChainRunner(
|
||||
stopAfterBlock = 999999999)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
Loading…
x
Reference in New Issue
Block a user