Core db aristo and kvt updates preparing for integration (#1760)

* Kvt: Implemented multi-descriptor access on the same backend

why:
  This behaviour mirrors the one of Aristo and can be used for
  simultaneous transactions on Aristo + Kvt

* Kvt: Update database iterators

why:
  Forgot to run on the top layer first

* Kvt: Misc fixes

* Aristo, use `openArray[byte]` rather than `Blob` in prototype

* Aristo, by default hashify right after cloning descriptor

why:
  Typically, a completed descriptor is expected after cloning. Hashing
  can be suppressed by argument flag.

* Aristo provides `replicate()` iterator, similar to legacy `replicate()`

* Aristo API fixes and updates

* CoreDB: Rename `legacy_persistent` => `legacy_rocksdb`

why:
  More systematic, will be in line with Aristo DB which might have
  more than one persistent backends

* CoreDB: Prettify API sources

why:
  Better to read and maintain

details:
  Annotating with custom pragmas which cleans up the prototypes

* CoreDB: Update MPT/put() prototype allowing `CatchableError`

why:
  Will be needed for Aristo API (legacy is OK with `RlpError`)
This commit is contained in:
Jordan Hrycaj 2023-09-18 21:20:28 +01:00 committed by GitHub
parent 18b3ff3390
commit 6bc55d4e6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 515 additions and 160 deletions

View File

@ -18,18 +18,18 @@ import
# ------------------------------------------------------------------------------
func wdRoot(list: openArray[WithdrawalV1]): common.Hash256
{.gcsafe, raises:[RlpError].} =
{.gcsafe, raises:[CatchableError].} =
{.nosideEffect.}:
calcWithdrawalsRoot(ethWithdrawals list)
func wdRoot(x: Option[seq[WithdrawalV1]]): Option[common.Hash256]
{.gcsafe, raises:[RlpError].} =
{.gcsafe, raises:[CatchableError].} =
{.nosideEffect.}:
if x.isNone: none(common.Hash256)
else: some(wdRoot x.get)
func txRoot(list: openArray[Web3Tx]): common.Hash256
{.gcsafe, raises:[RlpError].} =
{.gcsafe, raises:[CatchableError].} =
{.nosideEffect.}:
calcTxRoot(ethTxs list)
@ -81,7 +81,7 @@ func executionPayloadV1V2*(blk: EthBlock): ExecutionPayloadV1OrV2 =
func blockHeader*(p: ExecutionPayload,
beaconRoot: Option[common.Hash256]):
common.BlockHeader {.gcsafe, raises:[RlpError].} =
common.BlockHeader {.gcsafe, raises:[CatchableError].} =
common.BlockHeader(
parentHash : ethHash p.parentHash,
ommersHash : EMPTY_UNCLE_HASH,
@ -115,7 +115,7 @@ func blockBody*(p: ExecutionPayload):
func ethBlock*(p: ExecutionPayload,
beaconRoot: Option[common.Hash256]):
common.EthBlock {.gcsafe, raises:[RlpError].} =
common.EthBlock {.gcsafe, raises:[CatchableError].} =
common.Ethblock(
header : blockHeader(p, beaconRoot),
uncles : @[],

View File

@ -1,6 +1,6 @@
import
std/tables,
eth/[common, rlp, eip1559],
eth/[common, eip1559],
eth/trie/trie_defs,
../db/[core_db, state_db],
../constants,
@ -23,7 +23,7 @@ proc toGenesisHeader*(
sdb: AccountStateDB;
fork: HardFork;
): BlockHeader
{.gcsafe, raises: [RlpError].} =
{.gcsafe, raises: [CatchableError].} =
## Initialise block chain DB accounts derived from the `genesis.alloc` table
## of the `db` descriptor argument.
##
@ -90,7 +90,7 @@ proc toGenesisHeader*(
fork: HardFork;
db = CoreDbRef(nil);
): BlockHeader
{.gcsafe, raises: [RlpError].} =
{.gcsafe, raises: [CatchableError].} =
## Generate the genesis block header from the `genesis` and `config`
## argument value.
let
@ -102,7 +102,7 @@ proc toGenesisHeader*(
params: NetworkParams;
db = CoreDbRef(nil);
): BlockHeader
{.raises: [RlpError].} =
{.raises: [CatchableError].} =
## Generate the genesis block header from the `genesis` and `config`
## argument value.
let map = toForkTransitionTable(params.config)

View File

@ -110,7 +110,7 @@ proc procBlkPreamble(vmState: BaseVMState;
proc procBlkEpilogue(vmState: BaseVMState;
header: BlockHeader; body: BlockBody): bool
{.gcsafe, raises: [RlpError].} =
{.gcsafe, raises: [CatchableError].} =
# Reward beneficiary
vmState.mutateStateDB:
if vmState.generateWitness:

View File

@ -64,7 +64,7 @@ logScope:
# raise newException(TxPackerError, info & "(): " & $e.name & " -- " & e.msg)
proc persist(pst: TxPackerStateRef)
{.gcsafe,raises: [RlpError].} =
{.gcsafe,raises: [CatchableError].} =
## Smart wrapper
if not pst.cleanState:
let fork = pst.xp.chain.nextFork

View File

@ -69,8 +69,14 @@ proc validateSeal(pow: PowRef; header: BlockHeader): Result[void,string] =
ok()
proc validateHeader(com: CommonRef; header, parentHeader: BlockHeader;
body: BlockBody; checkSealOK: bool): Result[void,string] =
proc validateHeader(
com: CommonRef;
header: BlockHeader;
parentHeader: BlockHeader;
body: BlockBody;
checkSealOK: bool;
): Result[void,string]
{.gcsafe, raises: [CatchableError].} =
template inDAOExtraRange(blockNumber: BlockNumber): bool =
# EIP-799
@ -391,7 +397,9 @@ proc validateHeaderAndKinship*(
com: CommonRef;
header: BlockHeader;
body: BlockBody;
checkSealOK: bool): Result[void, string] =
checkSealOK: bool;
): Result[void, string]
{.gcsafe, raises: [CatchableError].} =
if header.isGenesis:
if header.extraData.len > 32:
return err("BlockHeader.extraData larger than 32 bytes")

View File

@ -16,8 +16,11 @@ import
# https://eips.ethereum.org/EIPS/eip-4895
proc validateWithdrawals*(
com: CommonRef, header: BlockHeader, body: BlockBody
): Result[void, string] =
com: CommonRef,
header: BlockHeader,
body: BlockBody
): Result[void, string]
{.gcsafe, raises: [CatchableError].} =
if com.forkGTE(Shanghai):
if header.withdrawalsRoot.isNone:

View File

@ -15,28 +15,32 @@
import aristo/[
aristo_constants, aristo_delete, aristo_fetch, aristo_init,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils]
aristo_merge, aristo_nearby, aristo_tx, aristo_utils, aristo_walk]
export
aristo_constants, aristo_delete, aristo_fetch, aristo_init,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils
aristo_merge, aristo_nearby, aristo_tx, aristo_utils, aristo_walk
import
aristo/aristo_transcode
export
append, read
append, read, serialise
import aristo/aristo_desc/[desc_identifiers, desc_structural]
export
AristoAccount,
PayloadRef,
PayloadType,
desc_identifiers,
`==`
import
aristo/aristo_desc
export
AristoDbAction,
AristoDbRef,
AristoError,
AristoTxRef,
forget,
isValid
# End

View File

@ -375,7 +375,7 @@ proc delete*(
proc delete*(
db: AristoDbRef;
root: VertexID;
path: Blob;
path: openArray[byte];
): Result[void,(VertexID,AristoError)] =
## Variant of `fetchPayload()`
##

View File

@ -48,7 +48,7 @@ proc fetchPayload*(
proc fetchPayload*(
db: AristoDbRef;
root: VertexID;
path: Blob;
path: openArray[byte];
): Result[PayloadRef,(VertexID,AristoError)] =
## Variant of `fetchPayload()`
##

View File

@ -26,7 +26,8 @@ type
export
BackendType,
MemBackendRef
MemBackendRef,
QidLayoutRef
let
DefaultQidLayoutRef* = DEFAULT_QID_QUEUES.to(QidLayoutRef)

View File

@ -29,7 +29,7 @@ import
chronicles,
eth/[common, trie/nibbles],
results,
../../sync/protocol,
../../sync/protocol/snap/snap_types,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_transcode,
aristo_vid]

View File

@ -18,7 +18,7 @@ import
"."/[aristo_constants, aristo_desc, aristo_get]
type
ResolveVidFn = proc(vid: VertexID): HashKey
ResolveVidFn = proc(vid: VertexID): HashKey {.gcsafe, raises: [].}
## Resolve storage root vertex ID
# ------------------------------------------------------------------------------

View File

@ -20,10 +20,10 @@ import
type
DoSpanPrepFn =
proc(db: AristoDbRef; flg: bool): Result[void,AristoError]
proc(db: AristoDbRef; flg: bool): Result[void,AristoError] {.gcsafe.}
DoSpanExecFn =
proc(db: AristoDbRef)
proc(db: AristoDbRef) {.gcsafe.}
func isTop*(tx: AristoTxRef): bool
func level*(db: AristoDbRef): int
@ -147,7 +147,8 @@ proc doSpan(
prepFn = DoSpanPrepFn(nil); # Optional preparation layer
prepFlag = false; # `prepFn` argument
execFn: DoSpanExecFn; # Mandatory execution layer
): Result[void,AristoError] =
): Result[void,AristoError]
{.gcsafe.} =
## Common execution framework for `rollbackImpl()` or `commitImpl()` over
## all descriptors in the transaction span.
##
@ -181,7 +182,8 @@ proc doThisPrep(
db: AristoDbRef; # Top transaction on database
prepFn = DoSpanPrepFn(nil); # Mandatory preparation layer function
prepFlag = false; # `prepFn` argument
): Result[void,AristoError] =
): Result[void,AristoError]
{.gcsafe.} =
## ..
let
keep = db.top
@ -225,7 +227,10 @@ func to*(tx: AristoTxRef; T: type[AristoDbRef]): T =
tx.db
proc forkTx*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
proc forkTx*(
tx: AristoTxRef; # Transaction descriptor
dontHashify = false; # Process/fix MPT hashes
): Result[AristoDbRef,AristoError] =
## Clone a transaction into a new DB descriptor accessing the same backend
## (if any) database as the argument `db`. The new descriptor is linked to
## the transaction parent and is fully functional as a forked instance (see
@ -235,6 +240,9 @@ proc forkTx*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
## `tx` as top layer of level 1 (i.e. this is he only transaction.) Rolling
## back will end up at the backend layer (incl. backend filter.)
##
## If the arguent flag `dontHashify` is passed `true`, the clone descriptor
## will *NOT* be hashified right after construction.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
let db = tx.db
@ -276,9 +284,19 @@ proc forkTx*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
txUid: 1,
level: 1)
if db.top.dirty and not dontHashify:
let rc = txClone.hashify()
if rc.isErr:
discard txClone.forget()
return err(rc.error.fromVae)
ok(txClone)
proc forkTop*(db: AristoDbRef): Result[AristoDbRef,AristoError] =
proc forkTop*(
db: AristoDbRef;
dontHashify = false; # Process/fix MPT hashes
): Result[AristoDbRef,AristoError] =
## Variant of `forkTx()` for the top transaction if there is any. Otherwise
## the top layer is cloned, only.
##
@ -291,21 +309,30 @@ proc forkTop*(db: AristoDbRef): Result[AristoDbRef,AristoError] =
dbClone.roFilter = db.roFilter # no need to copy contents when updated
dbClone.backend = db.backend
if db.top.dirty and not dontHashify:
let rc = dbClone.hashify()
if rc.isErr:
discard dbClone.forget()
return err(rc.error.fromVae)
return ok(dbClone)
db.txRef.forkTx()
db.txRef.forkTx dontHashify
proc exec*(
tx: AristoTxRef;
action: AristoDbAction;
dontHashify = false; # Process/fix MPT hashes
): Result[void,AristoError]
{.gcsafe, raises: [CatchableError].} =
## Execute function argument `action()` on a temporary `tx.copyCat()`
## transaction database. After return, the temporary database gets
## Execute function argument `action()` on a temporary `tx.forkTx()`
## transaction clone database. After return, the temporary database gets
## destroyed.
##
let db = ? tx.forkTx()
## If the arguent flag `dontHashify` is passed `true`, the clone database
## will *NOT* be hashified right after construction.
##
let db = ? tx.forkTx dontHashify
db.action()
? db.forget()
ok()

View File

@ -16,6 +16,7 @@ import
../aristo_init/[memory_db, memory_only],
".."/[aristo_desc, aristo_init],
./walk_private
export
memory_db,
memory_only
@ -67,6 +68,14 @@ iterator walkPairs*[T: MemBackendRef|VoidBackendRef](
for (vid,vtx) in walkPairsImpl[T](db):
yield (vid,vtx)
iterator replicate*[T: MemBackendRef|VoidBackendRef](
_: type T;
db: AristoDbRef;
): tuple[vid: VertexID, key: HashKey, vtx: VertexRef, node: NodeRef] =
## Variant of `walkPairsImpl()` for legacy applications.
for (vid,key,vtx,node) in replicateImpl[T](db):
yield (vid,key,vtx,node)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -21,6 +21,7 @@ import
../aristo_init/[rocks_db, persistent],
".."/[aristo_desc, aristo_init],
"."/[walk_private, memory_only]
export
rocks_db,
memory_only,
@ -72,6 +73,14 @@ iterator walkPairs*(
for (vid,vtx) in walkPairsImpl[T](db):
yield (vid,vtx)
iterator replicate*(
T: type RdbBackendRef;
db: AristoDbRef;
): tuple[vid: VertexID, key: HashKey, vtx: VertexRef, node: NodeRef] =
## Variant of `walkPairsImpl()` for legacy applications.
for (vid,key,vtx,node) in replicateImpl[T](db):
yield (vid,key,vtx,node)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -12,7 +12,7 @@
import
std/[algorithm, sequtils, tables],
results,
".."/[aristo_desc, aristo_init]
".."/[aristo_desc, aristo_get, aristo_init, aristo_utils]
# ------------------------------------------------------------------------------
# Public generic iterators
@ -136,6 +136,19 @@ iterator walkPairsImpl*[T](
if vid notin db.top.sTab and vtx.isValid:
yield (vid,vtx)
iterator replicateImpl*[T](
db: AristoDbRef; # Database with top layer & backend filter
): tuple[vid: VertexID, key: HashKey, vtx: VertexRef, node: NodeRef] =
## Variant of `walkPairsImpl()` for legacy applications.
for (vid,vtx) in walkPairsImpl[T](db):
let node = block:
let rc = vtx.toNode(db)
if rc.isOk:
rc.value
else:
NodeRef(nil)
yield (vid, db.getKey vid, vtx, node)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -24,6 +24,11 @@ when defined(release):
else:
const AutoValidateDescriptors = true
# Annotation helpers
{.pragma: noRaise, gcsafe, raises: [].}
{.pragma: rlpRaise, gcsafe, raises: [RlpError].}
{.pragma: catchRaise, gcsafe, raises: [CatchableError].}
type
CoreDbCaptFlags* {.pure.} = enum
PersistPut
@ -39,13 +44,12 @@ type
# --------------------------------------------------
# Constructors
# --------------------------------------------------
CoreDbNewMptFn* = proc(root: Hash256): CoreDbMptRef {.gcsafe, raises: [].}
CoreDbNewLegacyMptFn* = proc(root: Hash256; prune: bool): CoreDbMptRef
{.gcsafe, raises: [].}
CoreDbNewTxGetIdFn* = proc(): CoreDbTxID {.gcsafe, raises: [].}
CoreDbNewTxBeginFn* = proc(): CoreDbTxRef {.gcsafe, raises: [].}
CoreDbNewCaptFn = proc(flags: set[CoreDbCaptFlags] = {}): CoreDbCaptRef
{.gcsafe, raises: [].}
CoreDbNewMptFn* = proc(root: Hash256): CoreDbMptRef {.noRaise.}
CoreDbNewLegacyMptFn* =
proc(root: Hash256; prune: bool): CoreDbMptRef {.noRaise.}
CoreDbNewTxGetIdFn* = proc(): CoreDbTxID {.noRaise.}
CoreDbNewTxBeginFn* = proc(): CoreDbTxRef {.noRaise.}
CoreDbNewCaptFn = proc(flags: set[CoreDbCaptFlags]): CoreDbCaptRef {.noRaise.}
CoreDbConstructors* = object
## Constructors
@ -65,7 +69,7 @@ type
# --------------------------------------------------
# Sub-descriptor: Misc methods for main descriptor
# --------------------------------------------------
CoreDbInitLegaSetupFn* = proc() {.gcsafe, raises: [].}
CoreDbInitLegaSetupFn* = proc() {.noRaise.}
CoreDbMiscFns* = object
legacySetupFn*: CoreDbInitLegaSetupFn
@ -73,14 +77,12 @@ type
# --------------------------------------------------
# Sub-descriptor: KVT methods
# --------------------------------------------------
CoreDbKvtGetFn* = proc(k: openArray[byte]): Blob {.gcsafe, raises: [].}
CoreDbKvtMaybeGetFn* = proc(key: openArray[byte]): Option[Blob]
{.gcsafe, raises: [].}
CoreDbKvtDelFn* = proc(k: openArray[byte]) {.gcsafe, raises: [].}
CoreDbKvtPutFn* = proc(k: openArray[byte]; v: openArray[byte])
{.gcsafe, raises: [].}
CoreDbKvtContainsFn* = proc(k: openArray[byte]): bool {.gcsafe, raises: [].}
CoreDbKvtPairsIt* = iterator(): (Blob,Blob) {.gcsafe, raises: [].}
CoreDbKvtGetFn* = proc(k: openArray[byte]): Blob {.noRaise.}
CoreDbKvtMaybeGetFn* = proc(key: openArray[byte]): Option[Blob] {.noRaise.}
CoreDbKvtDelFn* = proc(k: openArray[byte]) {.noRaise.}
CoreDbKvtPutFn* = proc(k: openArray[byte]; v: openArray[byte]) {.noRaise.}
CoreDbKvtContainsFn* = proc(k: openArray[byte]): bool {.noRaise.}
CoreDbKvtPairsIt* = iterator(): (Blob,Blob) {.noRaise.}
CoreDbKvtFns* = object
## Methods for key-value table
@ -95,19 +97,15 @@ type
# --------------------------------------------------
# Sub-descriptor: Mpt/hexary trie methods
# --------------------------------------------------
CoreDbMptGetFn* = proc(k: openArray[byte]): Blob
{.gcsafe, raises: [RlpError].}
CoreDbMptMaybeGetFn* = proc(k: openArray[byte]): Option[Blob]
{.gcsafe, raises: [RlpError].}
CoreDbMptDelFn* = proc(k: openArray[byte]) {.gcsafe, raises: [RlpError].}
CoreDbMptPutFn* = proc(k: openArray[byte]; v: openArray[byte])
{.gcsafe, raises: [RlpError].}
CoreDbMptContainsFn* = proc(k: openArray[byte]): bool
{.gcsafe, raises: [RlpError].}
CoreDbMptRootHashFn* = proc(): Hash256 {.gcsafe, raises: [].}
CoreDbMptIsPruningFn* = proc(): bool {.gcsafe, raises: [].}
CoreDbMptPairsIt* = iterator(): (Blob,Blob) {.gcsafe, raises: [RlpError].}
CoreDbMptReplicateIt* = iterator(): (Blob,Blob) {.gcsafe, raises: [RlpError].}
CoreDbMptGetFn* = proc(k: openArray[byte]): Blob {.rlpRaise.}
CoreDbMptMaybeGetFn* = proc(k: openArray[byte]): Option[Blob] {.rlpRaise.}
CoreDbMptDelFn* = proc(k: openArray[byte]) {.rlpRaise.}
CoreDbMptPutFn* = proc(k: openArray[byte]; v: openArray[byte]) {.catchRaise.}
CoreDbMptContainsFn* = proc(k: openArray[byte]): bool {.rlpRaise.}
CoreDbMptRootHashFn* = proc(): Hash256 {.noRaise.}
CoreDbMptIsPruningFn* = proc(): bool {.noRaise.}
CoreDbMptPairsIt* = iterator(): (Blob,Blob) {.rlpRaise.}
CoreDbMptReplicateIt* = iterator(): (Blob,Blob) {.rlpRaise.}
CoreDbMptFns* = object
## Methods for trie objects `CoreDbMptRef`
@ -125,10 +123,10 @@ type
# --------------------------------------------------
# Sub-descriptor: Transaction frame management
# --------------------------------------------------
CoreDbTxCommitFn* = proc(applyDeletes: bool) {.gcsafe, raises: [].}
CoreDbTxRollbackFn* = proc() {.gcsafe, raises: [].}
CoreDbTxDisposeFn* = proc() {.gcsafe, raises: [].}
CoreDbTxSafeDisposeFn* = proc() {.gcsafe, raises: [].}
CoreDbTxCommitFn* = proc(applyDeletes: bool) {.noRaise.}
CoreDbTxRollbackFn* = proc() {.noRaise.}
CoreDbTxDisposeFn* = proc() {.noRaise.}
CoreDbTxSafeDisposeFn* = proc() {.noRaise.}
CoreDbTxFns* = object
commitFn*: CoreDbTxCommitFn
@ -139,10 +137,9 @@ type
# --------------------------------------------------
# Sub-descriptor: Transaction ID management
# --------------------------------------------------
CoreDbTxIdSetIdFn* = proc() {.gcsafe, raises: [].}
CoreDbTxIdActionFn* = proc() {.gcsafe, raises: [CatchableError].}
CoreDbTxIdRoWrapperFn* = proc(action: CoreDbTxIdActionFn)
{.gcsafe, raises: [CatchableError].}
CoreDbTxIdSetIdFn* = proc() {.noRaise.}
CoreDbTxIdActionFn* = proc() {.catchRaise.}
CoreDbTxIdRoWrapperFn* = proc(action: CoreDbTxIdActionFn) {.catchRaise.}
CoreDbTxIdFns* = object
setIdFn*: CoreDbTxIdSetIdFn
roWrapperFn*: CoreDbTxIdRoWrapperFn
@ -151,8 +148,8 @@ type
# --------------------------------------------------
# Sub-descriptor: capture recorder methods
# --------------------------------------------------
CoreDbCaptRecorderFn* = proc(): CoreDbRef {.gcsafe, raises: [].}
CoreDbCaptFlagsFn* = proc(): set[CoreDbCaptFlags] {.gcsafe, raises: [].}
CoreDbCaptRecorderFn* = proc(): CoreDbRef {.noRaise.}
CoreDbCaptFlagsFn* = proc(): set[CoreDbCaptFlags] {.noRaise.}
CoreDbCaptFns* = object
recorderFn*: CoreDbCaptRecorderFn
@ -257,7 +254,7 @@ proc validateMethodsDesc(tx: CoreDbTxRef) =
proc validateMethodsDesc(id: CoreDbTxID) =
doAssert not id.parent.isNil
doAssert not id.methods.setIdFn.isNil
# doAssert not id.methods.setIdFn.isNil
doAssert not id.methods.roWrapperFn.isNil
proc validateConstructors(new: CoreDbConstructors) =
@ -274,30 +271,25 @@ proc toCoreDbPhkRef(mpt: CoreDbMptRef): CoreDbPhkRef =
result = CoreDbPhkRef(
parent: mpt,
methods: CoreDbMptFns(
getFn: proc(k: openArray[byte]): Blob
{.gcsafe, raises: [RlpError].} =
return mpt.methods.getFn(k.keccakHash.data),
getFn: proc(k: openArray[byte]): Blob {.rlpRaise.} =
mpt.methods.getFn(k.keccakHash.data),
maybeGetFn: proc(k: openArray[byte]): Option[Blob]
{.gcsafe, raises: [RlpError].} =
return mpt.methods.maybeGetFn(k.keccakHash.data),
maybeGetFn: proc(k: openArray[byte]): Option[Blob] {.rlpRaise.} =
mpt.methods.maybeGetFn(k.keccakHash.data),
delFn: proc(k: openArray[byte])
{.gcsafe, raises: [RlpError].} =
delFn: proc(k: openArray[byte]) {.rlpRaise.} =
mpt.methods.delFn(k.keccakHash.data),
putFn: proc(k:openArray[byte]; v:openArray[byte])
{.gcsafe, raises: [RlpError].} =
putFn: proc(k:openArray[byte]; v:openArray[byte]) {.catchRaise.} =
mpt.methods.putFn(k.keccakHash.data, v),
containsFn: proc(k: openArray[byte]): bool
{.gcsafe, raises: [RlpError].} =
return mpt.methods.containsFn(k.keccakHash.data),
containsFn: proc(k: openArray[byte]): bool {.rlpRaise.} =
mpt.methods.containsFn(k.keccakHash.data),
pairsIt: iterator(): (Blob, Blob) {.gcsafe.} =
pairsIt: iterator(): (Blob, Blob) {.noRaise.} =
mpt.parent.itNotImplemented("pairs/phk"),
replicateIt: iterator(): (Blob, Blob) {.gcsafe.} =
replicateIt: iterator(): (Blob, Blob) {.noRaise.} =
mpt.parent.itNotImplemented("replicate/phk"),
rootHashFn: mpt.methods.rootHashFn,
@ -509,34 +501,34 @@ proc get*(
trie: CoreDbMptRef|CoreDbPhkRef;
key: openArray[byte];
): Blob
{.gcsafe, raises: [RlpError].} =
{.rlpRaise.} =
trie.methods.getFn key
proc maybeGet*(
trie: CoreDbMptRef|CoreDbPhkRef;
key: openArray[byte];
): Option[Blob]
{.gcsafe, raises: [RlpError].} =
{.rlpRaise.} =
trie.methods.maybeGetFn key
proc del*(
trie: CoreDbMptRef|CoreDbPhkRef;
key: openArray[byte];
) {.gcsafe, raises: [RlpError].} =
) {.rlpRaise.} =
trie.methods.delFn key
proc put*(
trie: CoreDbMptRef|CoreDbPhkRef;
key: openArray[byte];
value: openArray[byte];
) {.gcsafe, raises: [RlpError].} =
) {.catchRaise.} =
trie.methods.putFn(key, value)
proc contains*(
trie: CoreDbMptRef|CoreDbPhkRef;
key: openArray[byte];
): bool
{.gcsafe, raises: [RlpError].} =
{.rlpRaise.} =
trie.methods.containsFn key
proc rootHash*(
@ -548,7 +540,7 @@ proc rootHash*(
iterator pairs*(
trie: CoreDbMptRef;
): (Blob, Blob)
{.gcsafe, raises: [RlpError].} =
{.rlpRaise.} =
## Trie traversal, only supported for `CoreDbMptRef`
for k,v in trie.methods.pairsIt():
yield (k,v)
@ -556,7 +548,7 @@ iterator pairs*(
iterator replicate*(
trie: CoreDbMptRef;
): (Blob, Blob)
{.gcsafe, raises: [RlpError].} =
{.rlpRaise.} =
## Low level trie dump, only supported for `CoreDbMptRef`
for k,v in trie.methods.replicateIt():
yield (k,v)
@ -579,8 +571,8 @@ proc setTransactionID*(id: CoreDbTxID) =
proc shortTimeReadOnly*(
id: CoreDbTxID;
action: proc() {.gcsafe, raises: [CatchableError].};
) {.gcsafe, raises: [CatchableError].} =
action: proc() {.catchRaise.};
) {.catchRaise.} =
## Run `action()` in an earlier transaction environment.
id.methods.roWrapperFn action

View File

@ -431,7 +431,7 @@ proc persistTransactions*(
blockNumber: BlockNumber;
transactions: openArray[Transaction];
): Hash256
{.gcsafe, raises: [RlpError].} =
{.gcsafe, raises: [CatchableError].} =
var trie = db.mptPrune()
for idx, tx in transactions:
let
@ -496,7 +496,7 @@ proc persistWithdrawals*(
db: CoreDbRef;
withdrawals: openArray[Withdrawal];
): Hash256
{.gcsafe, raises: [RlpError].} =
{.gcsafe, raises: [CatchableError].} =
var trie = db.mptPrune()
for idx, wd in withdrawals:
let encodedWd = rlp.encode(wd)
@ -623,7 +623,7 @@ proc persistReceipts*(
db: CoreDbRef;
receipts: openArray[Receipt];
): Hash256
{.gcsafe, raises: [RlpError].} =
{.gcsafe, raises: [CatchableError].} =
var trie = db.mptPrune()
for idx, rec in receipts:
trie.put(rlp.encode(idx), rlp.encode(rec))

View File

@ -28,11 +28,17 @@ type
recorder: TrieDatabaseRef
appDb: CoreDbRef
# Annotation helpers
{.pragma: noRaise, gcsafe, raises: [].}
{.pragma: rlpRaise, gcsafe, raises: [RlpError].}
{.pragma: catchRaise, gcsafe, raises: [CatchableError].}
proc init*(
db: LegacyDbRef;
dbType: CoreDbType;
tdb: TrieDatabaseRef;
): CoreDbRef {.gcsafe.}
): CoreDbRef
{.noRaise.}
# ------------------------------------------------------------------------------
# Private helpers
@ -97,46 +103,44 @@ proc miscMethods(tdb: TrieDatabaseRef): CoreDbMiscFns =
proc kvtMethods(tdb: TrieDatabaseRef): CoreDbKvtFns =
## Key-value database table handlers
CoreDbKvtFns(
getFn: proc(k: openArray[byte]): Blob = return tdb.get(k),
maybeGetFn: proc(k: openArray[byte]): Option[Blob] = return tdb.maybeGet(k),
delFn: proc(k: openArray[byte]) = tdb.del(k),
putFn: proc(k: openArray[byte]; v: openArray[byte]) = tdb.put(k,v),
containsFn: proc(k: openArray[byte]): bool = return tdb.contains(k),
pairsIt: iterator(): (Blob, Blob) {.gcsafe.} =
for k,v in tdb.pairsInMemoryDB:
yield (k,v))
getFn: proc(k: openArray[byte]): Blob = tdb.get(k),
maybeGetFn: proc(k: openArray[byte]): Option[Blob] = tdb.maybeGet(k),
delFn: proc(k: openArray[byte]) = tdb.del(k),
putFn: proc(k: openArray[byte]; v: openArray[byte]) = tdb.put(k,v),
containsFn: proc(k: openArray[byte]): bool = tdb.contains(k),
pairsIt: iterator(): (Blob, Blob) {.noRaise.} =
for k,v in tdb.pairsInMemoryDB:
yield (k,v))
proc mptMethods(mpt: HexaryTrieRef): CoreDbMptFns =
## Hexary trie database handlers
CoreDbMptFns(
getFn: proc(k: openArray[byte]): Blob {.gcsafe, raises: [RlpError].} =
return mpt.trie.get(k),
getFn: proc(k: openArray[byte]): Blob {.rlpRaise.} =
mpt.trie.get(k),
maybeGetFn: proc(k: openArray[byte]): Option[Blob]
{.gcsafe, raises: [RlpError].} =
return mpt.trie.maybeGet(k),
maybeGetFn: proc(k: openArray[byte]): Option[Blob] {.rlpRaise.} =
mpt.trie.maybeGet(k),
delFn: proc(k: openArray[byte]) {.gcsafe, raises: [RlpError].} =
delFn: proc(k: openArray[byte]) {.rlpRaise.} =
mpt.trie.del(k),
putFn: proc(k: openArray[byte]; v: openArray[byte])
{.gcsafe, raises: [RlpError].} =
putFn: proc(k: openArray[byte]; v: openArray[byte]) {.rlpRaise.} =
mpt.trie.put(k,v),
containsFn: proc(k: openArray[byte]): bool {.gcsafe, raises: [RlpError].} =
return mpt.trie.contains(k),
containsFn: proc(k: openArray[byte]): bool {.rlpRaise.} =
mpt.trie.contains(k),
rootHashFn: proc(): Hash256 =
return mpt.trie.rootHash,
mpt.trie.rootHash,
isPruningFn: proc(): bool =
return mpt.trie.isPruning,
mpt.trie.isPruning,
pairsIt: iterator(): (Blob, Blob) {.gcsafe, raises: [RlpError].} =
pairsIt: iterator(): (Blob, Blob) {.rlpRaise.} =
for k,v in mpt.trie.pairs():
yield (k,v),
replicateIt: iterator(): (Blob, Blob) {.gcsafe, raises: [RlpError].} =
replicateIt: iterator(): (Blob, Blob) {.rlpRaise.} =
for k,v in mpt.trie.replicate():
yield (k,v))
@ -152,17 +156,16 @@ proc tidMethods(tid: TransactionID; tdb: TrieDatabaseRef): CoreDbTxIdFns =
setIdFn: proc() =
tdb.setTransactionID(tid),
roWrapperFn: proc(action: CoreDbTxIdActionFn)
{.gcsafe, raises: [CatchableError].} =
roWrapperFn: proc(action: CoreDbTxIdActionFn) {.catchRaise.} =
tdb.shortTimeReadOnly(tid, action()))
proc cptMethods(cpt: RecorderRef): CoreDbCaptFns =
CoreDbCaptFns(
recorderFn: proc(): CoreDbRef =
return cpt.appDb,
cpt.appDb,
getFlagsFn: proc(): set[CoreDbCaptFlags] =
return cpt.flags)
cpt.flags)
# ------------------------------------------------------------------------------
# Private constructor functions table
@ -172,20 +175,20 @@ proc constructors(tdb: TrieDatabaseRef, parent: CoreDbRef): CoreDbConstructors =
CoreDbConstructors(
mptFn: proc(root: Hash256): CoreDbMptRef =
let mpt = HexaryTrieRef(trie: initHexaryTrie(tdb, root, false))
return newCoreDbMptRef(parent, mpt.mptMethods),
newCoreDbMptRef(parent, mpt.mptMethods),
legacyMptFn: proc(root: Hash256; prune: bool): CoreDbMptRef =
let mpt = HexaryTrieRef(trie: initHexaryTrie(tdb, root, prune))
return newCoreDbMptRef(parent, mpt.mptMethods),
newCoreDbMptRef(parent, mpt.mptMethods),
getIdFn: proc(): CoreDbTxID =
return newCoreDbTxID(parent, tdb.getTransactionID.tidMethods tdb),
newCoreDbTxID(parent, tdb.getTransactionID.tidMethods tdb),
beginFn: proc(): CoreDbTxRef =
return newCoreDbTxRef(parent, tdb.beginTransaction.txMethods),
newCoreDbTxRef(parent, tdb.beginTransaction.txMethods),
captureFn: proc(flags: set[CoreDbCaptFlags] = {}): CoreDbCaptRef =
return newCoreDbCaptRef(parent, newRecorderRef(tdb, flags).cptMethods))
newCoreDbCaptRef(parent, newRecorderRef(tdb, flags).cptMethods))
# ------------------------------------------------------------------------------
# Public constructor helpers
@ -202,7 +205,7 @@ proc init*(
dbMethods = tdb.miscMethods,
kvtMethods = tdb.kvtMethods,
new = tdb.constructors db)
return db
db
# ------------------------------------------------------------------------------
# Public constructor and low level data retrieval, storage & transation frame

View File

@ -13,7 +13,7 @@
{.push raises: [].}
import
"."/[memory_only, legacy_persistent]
"."/[memory_only, legacy_rocksdb]
export
memory_only
@ -25,6 +25,7 @@ proc newCoreDbRef*(dbType: static[CoreDbType]; path: string): CoreDbRef =
## `CoreDbRef.init()` because of compiler coughing.
when dbType == LegacyDbPersistent:
newLegacyPersistentCoreDbRef path
else:
{.error: "Unsupported dbType for persistent newCoreDbRef()".}

View File

@ -14,15 +14,17 @@
{.push raises: [].}
import kvt/[
kvt_constants, kvt_init, kvt_tx, kvt_utils]
kvt_constants, kvt_init, kvt_tx, kvt_utils, kvt_walk]
export
kvt_constants, kvt_init, kvt_tx, kvt_utils
kvt_constants, kvt_init, kvt_tx, kvt_utils, kvt_walk
import
kvt/kvt_desc
export
KvtDbAction,
KvtDbRef,
KvtError,
KvtTxRef,
isValid
# End

View File

@ -14,8 +14,9 @@
{.push raises: [].}
import
std/tables,
std/[hashes, sets, tables],
eth/common,
results,
./kvt_constants,
./kvt_desc/[desc_error, desc_structural]
@ -27,6 +28,9 @@ export
kvt_constants, desc_error, desc_structural
type
KvtDudes* = HashSet[KvtDbRef]
## Descriptor peers asharing the same backend
KvtTxRef* = ref object
## Transaction descriptor
db*: KvtDbRef ## Database descriptor
@ -34,6 +38,13 @@ type
txUid*: uint ## Unique ID among transactions
level*: int ## Stack index for this transaction
DudesRef = ref object
case rwOk: bool
of true:
roDudes: KvtDudes ## Read-only peers
else:
rwDb: KvtDbRef ## Link to writable descriptor
KvtDbRef* = ref KvtDbObj
KvtDbObj* = object
## Three tier database object supporting distributed instances.
@ -43,6 +54,7 @@ type
txRef*: KvtTxRef ## Latest active transaction
txUidGen*: uint ## Tx-relative unique number generator
dudes: DudesRef ## Related DB descriptors
KvtDbAction* = proc(db: KvtDbRef) {.gcsafe, raises: [CatchableError].}
## Generic call back function/closure.
@ -57,6 +69,153 @@ func getOrVoid*(tab: Table[Blob,Blob]; w: Blob): Blob =
func isValid*(key: Blob): bool =
key != EmptyBlob
# ------------------------------------------------------------------------------
# Public functions, miscellaneous
# ------------------------------------------------------------------------------
# Hash set helper
func hash*(db: KvtDbRef): Hash =
## Table/KeyedQueue/HashSet mixin
cast[pointer](db).hash
# ------------------------------------------------------------------------------
# Public functions, `dude` related
# ------------------------------------------------------------------------------
func isCentre*(db: KvtDbRef): bool =
## This function returns `true` is the argument `db` is the centre (see
## comments on `reCentre()` for details.)
##
db.dudes.isNil or db.dudes.rwOk
func getCentre*(db: KvtDbRef): KvtDbRef =
## Get the centre descriptor among all other descriptors accessing the same
## backend database (see comments on `reCentre()` for details.)
##
if db.dudes.isNil or db.dudes.rwOk:
db
else:
db.dudes.rwDb
proc reCentre*(
db: KvtDbRef;
force = false;
): Result[void,KvtError] =
## Re-focus the `db` argument descriptor so that it becomes the centre.
## Nothing is done if the `db` descriptor is the centre, already.
##
## With several descriptors accessing the same backend database there is a
## single one that has write permission for the backend (regardless whether
## there is a backend, at all.) The descriptor entity with write permission
## is called *the centre*.
##
## After invoking `reCentre()`, the argument database `db` can only be
## destructed by `finish()` which also destructs all other descriptors
## accessing the same backend database. Descriptors where `isCentre()`
## returns `false` must be single destructed with `forget()`.
##
## If there is an open transaction spanning several descriptors, the `force`
## flag must be set `true` (unless the argument `db` is centre, already.) The
## argument `db` must be covered by the transaction span. Then the re-centred
## descriptor will also be the centre of the transaction span.
##
if not db.isCentre:
let parent = db.dudes.rwDb
# Steal dudes list from parent, make the rw-parent a read-only dude
db.dudes = parent.dudes
parent.dudes = DudesRef(rwOk: false, rwDb: db)
# Exclude self
db.dudes.roDudes.excl db
# Update dudes
for w in db.dudes.roDudes:
# Let all other dudes refer to this one
w.dudes.rwDb = db
# Update dudes list (parent was alredy updated)
db.dudes.roDudes.incl parent
ok()
proc fork*(
db: KvtDbRef;
): Result[KvtDbRef,KvtError] =
## This function creates a new empty descriptor accessing the same backend
## (if any) database as the argument `db`. This new descriptor joins the
## list of descriptors accessing the same backend database.
##
## After use, any unused non centre descriptor should be destructed via
## `forget()`. Not doing so will not only hold memory ressources but might
## also cost computing ressources for maintaining and updating backend
## filters when writing to the backend database .
##
let clone = KvtDbRef(
top: LayerRef(),
backend: db.backend)
# Update dudes list
if db.dudes.isNil:
clone.dudes = DudesRef(rwOk: false, rwDb: db)
db.dudes = DudesRef(rwOk: true, roDudes: [clone].toHashSet)
else:
let parent = if db.dudes.rwOk: db else: db.dudes.rwDb
clone.dudes = DudesRef(rwOk: false, rwDb: parent)
parent.dudes.roDudes.incl clone
ok clone
iterator forked*(db: KvtDbRef): KvtDbRef =
## Interate over all non centre descriptors (see comments on `reCentre()`
## for details.)
if not db.dudes.isNil:
for dude in db.getCentre.dudes.roDudes.items:
yield dude
func nForked*(db: KvtDbRef): int =
## Returns the number of non centre descriptors (see comments on `reCentre()`
## for details.) This function is a fast version of `db.forked.toSeq.len`.
if not db.dudes.isNil:
return db.getCentre.dudes.roDudes.len
proc forget*(db: KvtDbRef): Result[void,KvtError] =
## Destruct the non centre argument `db` descriptor (see comments on
## `reCentre()` for details.)
##
## A non centre descriptor should always be destructed after use (see also
## comments on `fork()`.)
##
if db.isCentre:
return err(NotAllowedOnCentre)
# Unlink argument `db`
let parent = db.dudes.rwDb
if parent.dudes.roDudes.len < 2:
parent.dudes = DudesRef(nil)
else:
parent.dudes.roDudes.excl db
# Clear descriptor so it would not do harm if used wrongly
db[] = KvtDbObj(top: LayerRef())
ok()
proc forgetOthers*(db: KvtDbRef): Result[void,KvtError] =
## For the centre argument `db` descriptor (see comments on `reCentre()`
## for details), destruct all other descriptors accessing the same backend.
##
if not db.isCentre:
return err(MustBeOnCentre)
if not db.dudes.isNil:
for dude in db.dudes.roDudes.items:
dude[] = KvtDbObj(top: LayerRef())
db.dudes = DudesRef(nil)
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -39,4 +39,8 @@ type
TxStackGarbled
TxStackUnderflow
# Functions from `kvt_desc`
MustBeOnCentre
NotAllowedOnCentre
# End

View File

@ -56,7 +56,7 @@ proc init*(
KvtDbRef(top: LayerRef())
elif B is MemBackendRef:
KvtDbRef(top: LayerRef(), backend: memoryBackend(qidLayout))
KvtDbRef(top: LayerRef(), backend: memoryBackend())
proc init*(
T: type KvtDbRef; # Target type
@ -76,7 +76,10 @@ proc finish*(db: KvtDbRef; flush = false) =
if not db.isNil:
if not db.backend.isNil:
db.backend.closeFn flush
db[] = KvtDbObj(top: LayerRef())
let lebo = db.getCentre
discard lebo.forgetOthers()
lebo[] = KvtDbObj()
# ------------------------------------------------------------------------------
# End

View File

@ -40,7 +40,7 @@ proc init*[W: MemOnlyBackend|RdbBackendRef](
## aways succeed initialising.
##
when B is RdbBackendRef:
ok KvtDbRef(top: LayerRef(vGen: vGen), backend: ? rocksDbBackend basePath)
ok KvtDbRef(top: LayerRef(), backend: ? rocksDbBackend basePath)
else:
ok KvtDbRef.init B

View File

@ -72,6 +72,79 @@ func to*(tx: KvtTxRef; T: type[KvtDbRef]): T =
## Getter, retrieves the parent database descriptor from argument `tx`
tx.db
proc forkTx*(tx: KvtTxRef): Result[KvtDbRef,KvtError] =
## Clone a transaction into a new DB descriptor accessing the same backend
## (if any) database as the argument `db`. The new descriptor is linked to
## the transaction parent and is fully functional as a forked instance (see
## comments on `kvt_desc.reCentre()` for details.)
##
## The new DB descriptor will contain a copy of the argument transaction
## `tx` as top layer of level 1 (i.e. this is he only transaction.) Rolling
## back will end up at the backend layer (incl. backend filter.)
##
## Use `kvt_desc.forget()` to clean up this descriptor.
##
let db = tx.db
# Provide new top layer
var topLayer: LayerRef
if db.txRef == tx:
topLayer = db.top.dup
elif tx.level < db.stack.len:
topLayer = db.stack[tx.level].dup
else:
return err(TxArgStaleTx)
if topLayer.txUid != tx.txUid:
return err(TxArgStaleTx)
topLayer.txUid = 1
let txClone = ? db.fork()
# Set up clone associated to `db`
txClone.top = topLayer # is a deep copy
txClone.stack = @[LayerRef()]
txClone.backend = db.backend
txClone.txUidGen = 1
# Install transaction similar to `tx` on clone
txClone.txRef = KvtTxRef(
db: txClone,
txUid: 1,
level: 1)
ok(txClone)
proc forkTop*(db: KvtDbRef): Result[KvtDbRef,KvtError] =
## Variant of `forkTx()` for the top transaction if there is any. Otherwise
## the top layer is cloned, only.
##
## Use `kvt_desc.forget()` to clean up this descriptor.
##
if db.txRef.isNil:
let dbClone = ? db.fork()
dbClone.top = db.top.dup # is a deep copy
dbClone.backend = db.backend
return ok(dbClone)
db.txRef.forkTx()
proc exec*(
tx: KvtTxRef;
action: KvtDbAction;
): Result[void,KvtError]
{.gcsafe, raises: [CatchableError].} =
## Execute function argument `action()` on a temporary `tx.forkTx()`
## transaction database. After return, the temporary database gets
## destroyed.
##
let db = ? tx.forkTx()
db.action()
? db.forget()
ok()
# ------------------------------------------------------------------------------
# Public functions: Transaction frame
# ------------------------------------------------------------------------------
@ -156,6 +229,7 @@ proc collapse*(
db.top.txUid = 0
db.stack.setLen(0)
db.txRef = KvtTxRef(nil)
ok()
# ------------------------------------------------------------------------------

View File

@ -15,7 +15,9 @@
import
eth/common,
../kvt_init/[memory_db, memory_only],
../kvt_init
".."/[kvt_desc, kvt_init],
./walk_private
export
memory_db,
memory_only
@ -24,15 +26,13 @@ export
# Public iterators (all in one)
# ------------------------------------------------------------------------------
iterator walkPairs*(
be: MemBackendRef|VoidBackendRef;
iterator walkPairs*[T: MemBackendRef|VoidBackendRef](
_: type T;
db: KvtDbRef;
): tuple[key: Blob, data: Blob] =
## Iterate over backend filters.
when be isnot VoidBackendRef:
mixin walk
for (k,v) in be.walk:
yield (k,v)
for (vid,vtx) in walkPairsImpl[T](db):
yield (vid,vtx)
# ------------------------------------------------------------------------------
# End

View File

@ -20,7 +20,9 @@
import
eth/common,
../kvt_init/[rocks_db, persistent],
./memory_only
../kvt_desc,
"."/[memory_only, walk_private]
export
rocks_db,
memory_only,
@ -31,11 +33,12 @@ export
# ------------------------------------------------------------------------------
iterator walkPairs*(
be: RdbBackendRef;
T: type RdbBackendRef;
db: KvtDbRef;
): tuple[key: Blob, data: Blob] =
## Walk filter slots in fifo order.
for (k,v) in be.walk:
yield (k,v)
## Iterate over backend filters.
for (vid,vtx) in walkPairsImpl[T](db):
yield (vid,vtx)
# ------------------------------------------------------------------------------
# End

View File

@ -0,0 +1,40 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/tables,
eth/common,
".."/[kvt_desc, kvt_init]
# ------------------------------------------------------------------------------
# Public generic iterators
# ------------------------------------------------------------------------------
iterator walkPairsImpl*[T](
db: KvtDbRef; # Database with top layer & backend filter
): tuple[key: Blob, data: Blob] =
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
## are unsorted.
for (key,data) in db.top.tab.pairs:
if data.isValid:
yield (key,data)
when T isnot VoidBackendRef:
mixin walk
for (key,data) in db.backend.T.walk:
if key notin db.top.tab and data.isValid:
yield (key,data)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -14,7 +14,7 @@ import
std/tables,
chronicles,
eth/[common, p2p, trie/nibbles],
../../../../db/core_db/legacy_persistent,
../../../../db/core_db/legacy_rocksdb,
../../../../db/[core_db, select_backend, storage_types],
../../../protocol,
../../range_desc,

View File

@ -9,7 +9,7 @@ export eth_types_rlp
{.push raises: [].}
proc calcRootHash[T](items: openArray[T]): Hash256
{.gcsafe, raises: [RlpError]} =
{.gcsafe, raises: [CatchableError]} =
var tr = newCoreDbRef(LegacyDbMemory).mptPrune
for i, t in items:
tr.put(rlp.encode(i), rlp.encode(t))

View File

@ -17,7 +17,7 @@ import
eth/[common, p2p],
rocksdb,
unittest2,
../nimbus/db/core_db/[legacy_persistent, persistent],
../nimbus/db/core_db/[legacy_rocksdb, persistent],
../nimbus/core/chain,
../nimbus/sync/snap/range_desc,
../nimbus/sync/snap/worker/db/[hexary_desc, rocky_bulk_load],

View File

@ -18,7 +18,7 @@ import
unittest2,
../../nimbus/core/chain,
../../nimbus/db/core_db,
../../nimbus/db/core_db/legacy_persistent,
../../nimbus/db/core_db/legacy_rocksdb,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[hexary_desc, rocky_bulk_load],
../../nimbus/utils/prettify,

View File

@ -18,7 +18,7 @@ import
rocksdb,
unittest2,
../nimbus/db/[core_db, kvstore_rocksdb],
../nimbus/db/core_db/[legacy_persistent, persistent],
../nimbus/db/core_db/[legacy_rocksdb, persistent],
../nimbus/core/chain,
../nimbus/sync/types,
../nimbus/sync/snap/range_desc,