mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-28 11:50:45 +00:00
parent
3732b3f95e
commit
d346759008
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -8,9 +8,6 @@
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
./db/core_db/base/base_config
|
||||
|
||||
func vmName(): string =
|
||||
when defined(evmc_enabled):
|
||||
"evmc"
|
||||
@ -26,8 +23,6 @@ const
|
||||
rc &= ", logger line numbers"
|
||||
when defined(boehmgc):
|
||||
rc &= ", boehm/gc"
|
||||
when 0 < coreDbBaseConfigExtras.len:
|
||||
rc &= ", " & coreDbBaseConfigExtras
|
||||
rc &= " enabled"
|
||||
rc
|
||||
|
||||
|
@ -14,9 +14,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
aristo/[aristo_api, aristo_constants]
|
||||
aristo/aristo_constants
|
||||
export
|
||||
aristo_api, aristo_constants
|
||||
aristo_constants
|
||||
|
||||
import
|
||||
aristo/aristo_init/memory_only,
|
||||
|
@ -1,648 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2024-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Stackable API for `Aristo`
|
||||
## ==========================
|
||||
|
||||
|
||||
import
|
||||
std/times,
|
||||
eth/common/hashes,
|
||||
results,
|
||||
./aristo_desc/desc_backend,
|
||||
./aristo_init/memory_db,
|
||||
./aristo_init/memory_only,
|
||||
./aristo_init/init_common,
|
||||
"."/[aristo_delete, aristo_desc, aristo_fetch, aristo_merge,
|
||||
aristo_part, aristo_path, aristo_persist, aristo_profile, aristo_tx_frame]
|
||||
|
||||
export
|
||||
AristoDbProfListRef
|
||||
|
||||
const
|
||||
AutoValidateApiHooks = defined(release).not
|
||||
## No validatinon needed for production suite.
|
||||
|
||||
AristoPersistentBackendOk = AutoValidateApiHooks # and false
|
||||
## Set true for persistent backend profiling (which needs an extra
|
||||
## link library.)
|
||||
|
||||
when AristoPersistentBackendOk:
|
||||
import ./aristo_init/rocks_db
|
||||
|
||||
# Annotation helper(s)
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
|
||||
type
|
||||
AristoApiCheckpointFn* =
|
||||
proc(tx: AristoTxRef;
|
||||
blockNumber: uint64
|
||||
) {.noRaise.}
|
||||
## Update the txFrame to the given checkpoint "identifier", or block number
|
||||
|
||||
AristoApiDeleteAccountRecordFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[void,AristoError]
|
||||
{.noRaise.}
|
||||
## Delete the account leaf entry addressed by the argument `path`. If
|
||||
## this leaf entry referres to a storage tree, this one will be deleted
|
||||
## as well.
|
||||
|
||||
AristoApiDeleteStorageDataFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## For a given account argument `accPath`, this function deletes the
|
||||
## argument `stoPath` from the associated storage tree (if any, at all.)
|
||||
## If the if the argument `stoPath` deleted was the last one on the
|
||||
## storage tree, account leaf referred to by `accPath` will be updated
|
||||
## so that it will not refer to a storage tree anymore. In the latter
|
||||
## case only the function will return `true`.
|
||||
|
||||
AristoApiDeleteStorageTreeFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[void,AristoError]
|
||||
{.noRaise.}
|
||||
## Variant of `deleteStorageData()` for purging the whole storage tree
|
||||
## associated to the account argument `accPath`.
|
||||
|
||||
AristoApiFetchLastCheckpointFn* =
|
||||
proc(db: AristoTxRef
|
||||
): Result[uint64,AristoError]
|
||||
{.noRaise.}
|
||||
## The function returns the state of the last saved state. This is a
|
||||
## Merkle hash tag for vertex with ID 1 and a bespoke `uint64` identifier
|
||||
## (may be interpreted as block number.)
|
||||
|
||||
AristoApiFetchAccountRecordFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[AristoAccount,AristoError]
|
||||
{.noRaise.}
|
||||
## Fetch an account record from the database indexed by `accPath`.
|
||||
|
||||
AristoApiFetchStateRootFn* =
|
||||
proc(db: AristoTxRef;
|
||||
): Result[Hash32,AristoError]
|
||||
{.noRaise.}
|
||||
## Fetch the Merkle hash of the account root.
|
||||
|
||||
AristoApiFetchStorageDataFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
): Result[UInt256,AristoError]
|
||||
{.noRaise.}
|
||||
## For a storage tree related to account `accPath`, fetch the data
|
||||
## record from the database indexed by `stoPath`.
|
||||
|
||||
AristoApiFetchStorageRootFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[Hash32,AristoError]
|
||||
{.noRaise.}
|
||||
## Fetch the Merkle hash of the storage root related to `accPath`.
|
||||
|
||||
AristoApiFinishFn* =
|
||||
proc(db: AristoDbRef;
|
||||
eradicate = false;
|
||||
) {.noRaise.}
|
||||
## Backend destructor. The argument `eradicate` indicates that a full
|
||||
## database deletion is requested. If set `false` the outcome might
|
||||
## differ depending on the type of backend (e.g. the `BackendMemory`
|
||||
## backend will always eradicate on close.)
|
||||
##
|
||||
## In case of distributed descriptors accessing the same backend, all
|
||||
## distributed descriptors will be destroyed.
|
||||
##
|
||||
## This distructor may be used on already *destructed* descriptors.
|
||||
|
||||
AristoApiForgetFn* =
|
||||
proc(db: AristoTxRef;
|
||||
): Result[void,AristoError]
|
||||
{.noRaise.}
|
||||
## Destruct the non centre argument `db` descriptor (see comments on
|
||||
## `reCentre()` for details.)
|
||||
##
|
||||
## A non centre descriptor should always be destructed after use (see
|
||||
## also# comments on `fork()`.)
|
||||
|
||||
AristoApiHashifyFn* =
|
||||
proc(db: AristoTxRef;
|
||||
): Result[void,(VertexID,AristoError)]
|
||||
{.noRaise.}
|
||||
## Add keys to the `Patricia Trie` so that it becomes a `Merkle
|
||||
## Patricia Tree`.
|
||||
|
||||
AristoApiHasPathAccountFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## For an account record indexed by `accPath` query whether this record
|
||||
## exists on the database.
|
||||
|
||||
AristoApiHasPathStorageFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## For a storage tree related to account `accPath`, query whether the
|
||||
## data record indexed by `stoPath` exists on the database.
|
||||
|
||||
AristoApiHasStorageDataFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## For a storage tree related to account `accPath`, query whether there
|
||||
## is a non-empty data storage area at all.
|
||||
|
||||
AristoApiMergeAccountRecordFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
accRec: AristoAccount;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## Merge the key-value-pair argument `(accKey,accRec)` as an account
|
||||
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
|
||||
##
|
||||
## On success, the function returns `true` if the `accPath` argument was
|
||||
## not on the database already or the value differend from `accRec`, and
|
||||
## `false` otherwise.
|
||||
|
||||
AristoApiMergeStorageDataFn* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
stoData: UInt256;
|
||||
): Result[void,AristoError]
|
||||
{.noRaise.}
|
||||
## Store the `stoData` data argument on the storage area addressed by
|
||||
## `(accPath,stoPath)` where `accPath` is the account key (into the MPT)
|
||||
## and `stoPath` is the slot path of the corresponding storage area.
|
||||
|
||||
AristoApiPartAccountTwig* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[(seq[seq[byte]],bool), AristoError]
|
||||
{.noRaise.}
|
||||
## This function returns a chain of rlp-encoded nodes along the argument
|
||||
## path `(root,path)` followed by a `true` value if the `path` argument
|
||||
## exists in the database. If the argument `path` is not on the database,
|
||||
## a partial path will be returned follwed by a `false` value.
|
||||
##
|
||||
## Errors will only be returned for invalid paths.
|
||||
|
||||
AristoApiPartStorageTwig* =
|
||||
proc(db: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
): Result[(seq[seq[byte]],bool), AristoError]
|
||||
{.noRaise.}
|
||||
## Variant of `partAccountTwig()`. Note that the function always returns
|
||||
## an error unless the `accPath` is valid.
|
||||
|
||||
AristoApiPartUntwigPath* =
|
||||
proc(chain: openArray[seq[byte]];
|
||||
root: Hash32;
|
||||
path: Hash32;
|
||||
): Result[Opt[seq[byte]],AristoError]
|
||||
{.noRaise.}
|
||||
## Variant of `partUntwigGeneric()`.
|
||||
|
||||
AristoApiPartUntwigPathOk* =
|
||||
proc(chain: openArray[seq[byte]];
|
||||
root: Hash32;
|
||||
path: Hash32;
|
||||
payload: Opt[seq[byte]];
|
||||
): Result[void,AristoError]
|
||||
{.noRaise.}
|
||||
## Variant of `partUntwigGenericOk()`.
|
||||
|
||||
AristoApiPathAsBlobFn* =
|
||||
proc(tag: PathID;
|
||||
): seq[byte]
|
||||
{.noRaise.}
|
||||
## Converts the `tag` argument to a sequence of an even number of
|
||||
## nibbles represented by a `seq[byte]`. If the argument `tag` represents
|
||||
## an odd number of nibbles, a zero nibble is appendend.
|
||||
##
|
||||
## This function is useful only if there is a tacit agreement that all
|
||||
## paths used to index database leaf values can be represented as
|
||||
## `seq[byte]`, i.e. `PathID` type paths with an even number of nibbles.
|
||||
|
||||
AristoApiPersistFn* =
|
||||
proc(
|
||||
db: AristoDbRef;
|
||||
batch: PutHdlRef;
|
||||
txFrame: AristoTxRef;
|
||||
) {.noRaise.}
|
||||
## Persistently store the cumulative set of changes that `txFrame`
|
||||
## represents to the database. `txFrame` becomes the new base after this
|
||||
## operation.
|
||||
|
||||
AristoApiDisposeFn* =
|
||||
proc(tx: AristoTxRef;
|
||||
) {.noRaise.}
|
||||
## Release a frame releasing its associated resources. This operation
|
||||
## makes all frames built on top of it invalid - they still need to be
|
||||
## released however.
|
||||
|
||||
AristoApiTxFrameBeginFn* =
|
||||
proc(db: AristoDbRef; parent: AristoTxRef
|
||||
): AristoTxRef
|
||||
{.noRaise.}
|
||||
## Create a new layered transaction frame - the frame can later be
|
||||
## released or frozen and persisted.
|
||||
|
||||
AristoApiBaseTxFrameFn* =
|
||||
proc(db: AristoDbRef;
|
||||
): AristoTxRef
|
||||
{.noRaise.}
|
||||
|
||||
AristoApiRef* = ref AristoApiObj
|
||||
AristoApiObj* = object of RootObj
|
||||
## Useful set of `Aristo` fuctions that can be filtered, stacked etc.
|
||||
checkpoint*: AristoApiCheckpointFn
|
||||
|
||||
deleteAccountRecord*: AristoApiDeleteAccountRecordFn
|
||||
deleteStorageData*: AristoApiDeleteStorageDataFn
|
||||
deleteStorageTree*: AristoApiDeleteStorageTreeFn
|
||||
|
||||
fetchLastCheckpoint*: AristoApiFetchLastCheckpointFn
|
||||
|
||||
fetchAccountRecord*: AristoApiFetchAccountRecordFn
|
||||
fetchStateRoot*: AristoApiFetchStateRootFn
|
||||
fetchStorageData*: AristoApiFetchStorageDataFn
|
||||
fetchStorageRoot*: AristoApiFetchStorageRootFn
|
||||
|
||||
finish*: AristoApiFinishFn
|
||||
hasPathAccount*: AristoApiHasPathAccountFn
|
||||
hasPathStorage*: AristoApiHasPathStorageFn
|
||||
hasStorageData*: AristoApiHasStorageDataFn
|
||||
|
||||
mergeAccountRecord*: AristoApiMergeAccountRecordFn
|
||||
mergeStorageData*: AristoApiMergeStorageDataFn
|
||||
|
||||
partAccountTwig*: AristoApiPartAccountTwig
|
||||
partStorageTwig*: AristoApiPartStorageTwig
|
||||
partUntwigPath*: AristoApiPartUntwigPath
|
||||
partUntwigPathOk*: AristoApiPartUntwigPathOk
|
||||
|
||||
pathAsBlob*: AristoApiPathAsBlobFn
|
||||
persist*: AristoApiPersistFn
|
||||
dispose*: AristoApiDisposeFn
|
||||
txFrameBegin*: AristoApiTxFrameBeginFn
|
||||
baseTxFrame*: AristoApiBaseTxFrameFn
|
||||
|
||||
|
||||
AristoApiProfNames* = enum
|
||||
## Index/name mapping for profile slots
|
||||
AristoApiProfTotal = "total"
|
||||
AristoApiProfCheckpointFn = "checkpoint"
|
||||
|
||||
AristoApiProfDeleteAccountRecordFn = "deleteAccountRecord"
|
||||
AristoApiProfDeleteStorageDataFn = "deleteStorageData"
|
||||
AristoApiProfDeleteStorageTreeFn = "deleteStorageTree"
|
||||
|
||||
AristoApiProfFetchLastCheckpointFn = "fetchLastCheckpoint"
|
||||
|
||||
AristoApiProfFetchAccountRecordFn = "fetchAccountRecord"
|
||||
AristoApiProfFetchStateRootFn = "fetchStateRoot"
|
||||
AristoApiProfFetchStorageDataFn = "fetchStorageData"
|
||||
AristoApiProfFetchStorageRootFn = "fetchStorageRoot"
|
||||
|
||||
AristoApiProfFinishFn = "finish"
|
||||
|
||||
AristoApiProfHasPathAccountFn = "hasPathAccount"
|
||||
AristoApiProfHasPathStorageFn = "hasPathStorage"
|
||||
AristoApiProfHasStorageDataFn = "hasStorageData"
|
||||
|
||||
AristoApiProfMergeAccountRecordFn = "mergeAccountRecord"
|
||||
AristoApiProfMergeStorageDataFn = "mergeStorageData"
|
||||
|
||||
AristoApiProfPartAccountTwigFn = "partAccountTwig"
|
||||
AristoApiProfPartStorageTwigFn = "partStorageTwig"
|
||||
AristoApiProfPartUntwigPathFn = "partUntwigPath"
|
||||
AristoApiProfPartUntwigPathOkFn = "partUntwigPathOk"
|
||||
|
||||
AristoApiProfPathAsBlobFn = "pathAsBlob"
|
||||
AristoApiProfPersistFn = "persist"
|
||||
AristoApiProfDisposeFn = "dispose"
|
||||
AristoApiProfTxFrameBeginFn = "txFrameBegin"
|
||||
AristoApiProfBaseTxFrameFn = "baseTxFrame"
|
||||
|
||||
AristoApiProfBeGetVtxFn = "be/getVtx"
|
||||
AristoApiProfBeGetKeyFn = "be/getKey"
|
||||
AristoApiProfBeGetTuvFn = "be/getTuv"
|
||||
AristoApiProfBeGetLstFn = "be/getLst"
|
||||
AristoApiProfBePutVtxFn = "be/putVtx"
|
||||
AristoApiProfBePutTuvFn = "be/putTuv"
|
||||
AristoApiProfBePutLstFn = "be/putLst"
|
||||
AristoApiProfBePutEndFn = "be/putEnd"
|
||||
|
||||
AristoApiProfRef* = ref object of AristoApiRef
|
||||
## Profiling API extension of `AristoApiObj`
|
||||
data*: AristoDbProfListRef
|
||||
be*: BackendRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when AutoValidateApiHooks:
|
||||
proc validate(api: AristoApiObj) =
|
||||
for _, field in api.fieldPairs():
|
||||
doAssert not field.isNil
|
||||
|
||||
proc validate(prf: AristoApiProfRef) =
|
||||
prf.AristoApiRef[].validate
|
||||
doAssert not prf.data.isNil
|
||||
|
||||
proc dup(be: BackendRef): BackendRef =
|
||||
case be.kind:
|
||||
of BackendMemory:
|
||||
return MemBackendRef(be).dup
|
||||
|
||||
of BackendRocksDB:
|
||||
when AristoPersistentBackendOk:
|
||||
return RdbBackendRef(be).dup
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API constuctors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func init*(api: var AristoApiObj) =
|
||||
## Initialise an `api` argument descriptor
|
||||
##
|
||||
when AutoValidateApiHooks:
|
||||
api.reset
|
||||
api.checkpoint = checkpoint
|
||||
|
||||
api.deleteAccountRecord = deleteAccountRecord
|
||||
api.deleteStorageData = deleteStorageData
|
||||
api.deleteStorageTree = deleteStorageTree
|
||||
|
||||
api.fetchLastCheckpoint = fetchLastCheckpoint
|
||||
|
||||
api.fetchAccountRecord = fetchAccountRecord
|
||||
api.fetchStateRoot = fetchStateRoot
|
||||
api.fetchStorageData = fetchStorageData
|
||||
api.fetchStorageRoot = fetchStorageRoot
|
||||
|
||||
api.finish = finish
|
||||
|
||||
api.hasPathAccount = hasPathAccount
|
||||
api.hasPathStorage = hasPathStorage
|
||||
api.hasStorageData = hasStorageData
|
||||
|
||||
api.mergeAccountRecord = mergeAccountRecord
|
||||
api.mergeStorageData = mergeStorageData
|
||||
|
||||
api.partAccountTwig = partAccountTwig
|
||||
api.partStorageTwig = partStorageTwig
|
||||
api.partUntwigPath = partUntwigPath
|
||||
api.partUntwigPathOk = partUntwigPathOk
|
||||
|
||||
api.pathAsBlob = pathAsBlob
|
||||
api.persist = persist
|
||||
api.dispose = dispose
|
||||
api.txFrameBegin = txFrameBegin
|
||||
api.baseTxFrame = baseTxFrame
|
||||
|
||||
when AutoValidateApiHooks:
|
||||
api.validate
|
||||
|
||||
func init*(T: type AristoApiRef): T =
|
||||
new result
|
||||
result[].init()
|
||||
|
||||
func dup*(api: AristoApiRef): AristoApiRef =
|
||||
result = AristoApiRef()
|
||||
result[] = api[]
|
||||
when AutoValidateApiHooks:
|
||||
result[].validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public profile API constuctor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func init*(
|
||||
T: type AristoApiProfRef;
|
||||
api: AristoApiRef;
|
||||
be = BackendRef(nil);
|
||||
): T =
|
||||
## This constructor creates a profiling API descriptor to be derived from
|
||||
## an initialised `api` argument descriptor. For profiling the DB backend,
|
||||
## the field `.be` of the result descriptor must be assigned to the
|
||||
## `.backend` field of the `AristoTxRef` descriptor.
|
||||
##
|
||||
## The argument desctiptors `api` and `be` will not be modified and can be
|
||||
## used to restore the previous set up.
|
||||
##
|
||||
let
|
||||
data = AristoDbProfListRef(
|
||||
list: newSeq[AristoDbProfData](1 + high(AristoApiProfNames).ord))
|
||||
profApi = T(data: data)
|
||||
|
||||
template profileRunner(n: AristoApiProfNames, code: untyped): untyped =
|
||||
let start = getTime()
|
||||
code
|
||||
data.update(n.ord, getTime() - start)
|
||||
|
||||
profApi.checkpoint =
|
||||
proc(a: AristoTxRef): auto =
|
||||
AristoApiProfCheckpointFn.profileRunner:
|
||||
api.checkpoint(a)
|
||||
|
||||
profApi.deleteAccountRecord =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfDeleteAccountRecordFn.profileRunner:
|
||||
result = api.deleteAccountRecord(a, b)
|
||||
|
||||
profApi.deleteStorageData =
|
||||
proc(a: AristoTxRef; b: Hash32, c: Hash32): auto =
|
||||
AristoApiProfDeleteStorageDataFn.profileRunner:
|
||||
result = api.deleteStorageData(a, b, c)
|
||||
|
||||
profApi.deleteStorageTree =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfDeleteStorageTreeFn.profileRunner:
|
||||
result = api.deleteStorageTree(a, b)
|
||||
|
||||
profApi.fetchLastCheckpoint =
|
||||
proc(a: AristoTxRef): auto =
|
||||
AristoApiProfFetchLastCheckpointFn.profileRunner:
|
||||
result = api.fetchLastCheckpoint(a)
|
||||
|
||||
profApi.fetchAccountRecord =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfFetchAccountRecordFn.profileRunner:
|
||||
result = api.fetchAccountRecord(a, b)
|
||||
|
||||
profApi.fetchStateRoot =
|
||||
proc(a: AristoTxRef; b: bool): auto =
|
||||
AristoApiProfFetchStateRootFn.profileRunner:
|
||||
result = api.fetchStateRoot(a, b)
|
||||
|
||||
profApi.fetchStorageData =
|
||||
proc(a: AristoTxRef; b, stoPath: Hash32): auto =
|
||||
AristoApiProfFetchStorageDataFn.profileRunner:
|
||||
result = api.fetchStorageData(a, b, stoPath)
|
||||
|
||||
profApi.fetchStorageRoot =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfFetchStorageRootFn.profileRunner:
|
||||
result = api.fetchStorageRoot(a, b)
|
||||
|
||||
profApi.finish =
|
||||
proc(a: AristoTxRef; b = false) =
|
||||
AristoApiProfFinishFn.profileRunner:
|
||||
api.finish(a, b)
|
||||
|
||||
profApi.hasPathAccount =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfHasPathAccountFn.profileRunner:
|
||||
result = api.hasPathAccount(a, b)
|
||||
|
||||
profApi.hasPathStorage =
|
||||
proc(a: AristoTxRef; b, c: Hash32): auto =
|
||||
AristoApiProfHasPathStorageFn.profileRunner:
|
||||
result = api.hasPathStorage(a, b, c)
|
||||
|
||||
profApi.hasStorageData =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfHasStorageDataFn.profileRunner:
|
||||
result = api.hasStorageData(a, b)
|
||||
|
||||
profApi.mergeAccountRecord =
|
||||
proc(a: AristoTxRef; b: Hash32; c: AristoAccount): auto =
|
||||
AristoApiProfMergeAccountRecordFn.profileRunner:
|
||||
result = api.mergeAccountRecord(a, b, c)
|
||||
|
||||
profApi.mergeStorageData =
|
||||
proc(a: AristoTxRef; b, c: Hash32, d: UInt256): auto =
|
||||
AristoApiProfMergeStorageDataFn.profileRunner:
|
||||
result = api.mergeStorageData(a, b, c, d)
|
||||
|
||||
profApi.partAccountTwig =
|
||||
proc(a: AristoTxRef; b: Hash32): auto =
|
||||
AristoApiProfPartAccountTwigFn.profileRunner:
|
||||
result = api.partAccountTwig(a, b)
|
||||
|
||||
profApi.partStorageTwig =
|
||||
proc(a: AristoTxRef; b: Hash32; c: Hash32): auto =
|
||||
AristoApiProfPartStorageTwigFn.profileRunner:
|
||||
result = api.partStorageTwig(a, b, c)
|
||||
|
||||
profApi.partUntwigPath =
|
||||
proc(a: openArray[seq[byte]]; b, c: Hash32): auto =
|
||||
AristoApiProfPartUntwigPathFn.profileRunner:
|
||||
result = api.partUntwigPath(a, b, c)
|
||||
|
||||
profApi.partUntwigPathOk =
|
||||
proc(a: openArray[seq[byte]]; b, c: Hash32; d: Opt[seq[byte]]): auto =
|
||||
AristoApiProfPartUntwigPathOkFn.profileRunner:
|
||||
result = api.partUntwigPathOk(a, b, c, d)
|
||||
|
||||
profApi.pathAsBlob =
|
||||
proc(a: PathID): auto =
|
||||
AristoApiProfPathAsBlobFn.profileRunner:
|
||||
result = api.pathAsBlob(a)
|
||||
|
||||
profApi.persist =
|
||||
proc(a: AristoTxRef; b = 0u64): auto =
|
||||
AristoApiProfPersistFn.profileRunner:
|
||||
result = api.persist(a, b)
|
||||
|
||||
profApi.dispose =
|
||||
proc(a: AristoTxRef) =
|
||||
AristoApiProfDisposeFn.profileRunner:
|
||||
api.dispose(a)
|
||||
|
||||
profApi.txFrameBegin =
|
||||
proc(a: AristoTxRef): auto =
|
||||
AristoApiProfTxFrameBeginFn.profileRunner:
|
||||
result = api.txFrameBegin(a)
|
||||
|
||||
profApi.baseTxFrame =
|
||||
proc(a: AristoTxRef): auto =
|
||||
AristoApiProfBaseTxFrameFn.profileRunner:
|
||||
result = api.baseTxFrame(a)
|
||||
|
||||
let beDup = be.dup()
|
||||
if beDup.isNil:
|
||||
profApi.be = be
|
||||
|
||||
else:
|
||||
beDup.getVtxFn =
|
||||
proc(a: RootedVertexID, flags: set[GetVtxFlag]): auto =
|
||||
AristoApiProfBeGetVtxFn.profileRunner:
|
||||
result = be.getVtxFn(a, flags)
|
||||
data.list[AristoApiProfBeGetVtxFn.ord].masked = true
|
||||
|
||||
beDup.getKeyFn =
|
||||
proc(a: RootedVertexID): auto =
|
||||
AristoApiProfBeGetKeyFn.profileRunner:
|
||||
result = be.getKeyFn(a)
|
||||
data.list[AristoApiProfBeGetKeyFn.ord].masked = true
|
||||
|
||||
beDup.getTuvFn =
|
||||
proc(): auto =
|
||||
AristoApiProfBeGetTuvFn.profileRunner:
|
||||
result = be.getTuvFn()
|
||||
data.list[AristoApiProfBeGetTuvFn.ord].masked = true
|
||||
|
||||
beDup.getLstFn =
|
||||
proc(): auto =
|
||||
AristoApiProfBeGetLstFn.profileRunner:
|
||||
result = be.getLstFn()
|
||||
data.list[AristoApiProfBeGetLstFn.ord].masked = true
|
||||
|
||||
beDup.putVtxFn =
|
||||
proc(a: PutHdlRef; b: RootedVertexID, c: VertexRef) =
|
||||
AristoApiProfBePutVtxFn.profileRunner:
|
||||
be.putVtxFn(a, b, c)
|
||||
data.list[AristoApiProfBePutVtxFn.ord].masked = true
|
||||
|
||||
beDup.putTuvFn =
|
||||
proc(a: PutHdlRef; b: VertexID) =
|
||||
AristoApiProfBePutTuvFn.profileRunner:
|
||||
be.putTuvFn(a,b)
|
||||
data.list[AristoApiProfBePutTuvFn.ord].masked = true
|
||||
|
||||
beDup.putLstFn =
|
||||
proc(a: PutHdlRef; b: SavedState) =
|
||||
AristoApiProfBePutLstFn.profileRunner:
|
||||
be.putLstFn(a,b)
|
||||
data.list[AristoApiProfBePutLstFn.ord].masked = true
|
||||
|
||||
beDup.putEndFn =
|
||||
proc(a: PutHdlRef): auto =
|
||||
AristoApiProfBePutEndFn.profileRunner:
|
||||
result = be.putEndFn(a)
|
||||
data.list[AristoApiProfBePutEndFn.ord].masked = true
|
||||
|
||||
profApi.be = beDup
|
||||
|
||||
when AutoValidateApiHooks:
|
||||
profApi.validate
|
||||
|
||||
profApi
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,28 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Constructors for Aristo DB
|
||||
## ==========================
|
||||
##
|
||||
## See `./README.md` for implementation details
|
||||
##
|
||||
## This module provides a memory database only. For providing a persistent
|
||||
## constructor, import `aristo_init/persistent` though avoiding to
|
||||
## unnecessarily link to the persistent backend library (e.g. `rocksdb`)
|
||||
## when a memory only database is used.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./aristo_init/[init_common, memory_only]
|
||||
export
|
||||
init_common, memory_only
|
||||
|
||||
# End
|
@ -15,7 +15,6 @@
|
||||
## backend access
|
||||
## ::
|
||||
## import
|
||||
## aristo/aristo_init,
|
||||
## aristo/aristo_init/aristo_rocksdb
|
||||
##
|
||||
## let rc = AristoDb.init(BackendRocksDB, "/var/tmp")
|
||||
|
@ -1,18 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
aristo_init/persistent as init_persistent,
|
||||
aristo_walk/persistent as walk_persistent
|
||||
export
|
||||
init_persistent,
|
||||
walk_persistent
|
||||
|
||||
# End
|
@ -15,7 +15,7 @@ import
|
||||
../../aristo/[aristo_init/memory_only, aristo_walk/memory_only],
|
||||
../../kvt as use_kvt,
|
||||
../../kvt/[kvt_init/memory_only, kvt_walk/memory_only],
|
||||
../base/[base_config, base_desc, base_helpers]
|
||||
../base/[base_desc, base_helpers]
|
||||
|
||||
export base_desc
|
||||
|
||||
@ -28,19 +28,6 @@ proc create*(dbType: CoreDbType; kvt: KvtDbRef; mpt: AristoDbRef): CoreDbRef =
|
||||
var db = CoreDbRef(dbType: dbType)
|
||||
db.defCtx = db.bless CoreDbCtxRef(mpt: mpt, kvt: kvt)
|
||||
|
||||
when CoreDbEnableApiJumpTable:
|
||||
db.kvtApi = KvtApiRef.init()
|
||||
db.ariApi = AristoApiRef.init()
|
||||
|
||||
when CoreDbEnableProfiling:
|
||||
block:
|
||||
let profApi = KvtApiProfRef.init(db.kvtApi, kvt.backend)
|
||||
db.kvtApi = profApi
|
||||
kvt.backend = profApi.be
|
||||
block:
|
||||
let profApi = AristoApiProfRef.init(db.ariApi, mpt.backend)
|
||||
db.ariApi = profApi
|
||||
mpt.backend = profApi.be
|
||||
bless db
|
||||
|
||||
proc newMemoryCoreDbRef*(): CoreDbRef =
|
||||
|
@ -1,789 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
##
|
||||
## Database Backend Tracer
|
||||
## =======================
|
||||
##
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables, typetraits],
|
||||
stew/keyed_queue,
|
||||
eth/common,
|
||||
results,
|
||||
../../aristo as use_aristo,
|
||||
../../aristo/aristo_desc,
|
||||
../../kvt as use_kvt,
|
||||
../../kvt/kvt_desc,
|
||||
../base/[base_config, base_desc]
|
||||
|
||||
const
|
||||
LogJournalMax = 1_000_000
|
||||
## Maximal size of a journal (organised as LRU)
|
||||
|
||||
type
|
||||
TracePfx = enum
|
||||
TrpOops = 0
|
||||
TrpKvt
|
||||
TrpAccounts
|
||||
TrpStorage
|
||||
|
||||
TraceRequest* = enum
|
||||
TrqOops = 0
|
||||
TrqFind
|
||||
TrqAdd
|
||||
TrqModify
|
||||
TrqDelete
|
||||
|
||||
TraceDataType* = enum
|
||||
TdtOops = 0
|
||||
TdtBlob ## Kvt and Aristo
|
||||
TdtError ## Kvt and Aristo
|
||||
TdtVoid ## Kvt and Aristo
|
||||
TdtAccount ## Aristo only
|
||||
TdtBigNum ## Aristo only
|
||||
TdtHash ## Aristo only
|
||||
|
||||
TraceDataItemRef* = ref object
|
||||
## Log journal entry
|
||||
pfx*: TracePfx ## DB storage prefix
|
||||
info*: int ## `KvtApiProfNames` or `AristoApiProfNames`
|
||||
req*: TraceRequest ## Logged action request
|
||||
case kind*: TraceDataType
|
||||
of TdtBlob:
|
||||
blob*: seq[byte]
|
||||
of TdtError:
|
||||
error*: int ## `KvtError` or `AristoError`
|
||||
of TdtAccount:
|
||||
account*: AristoAccount
|
||||
of TdtBigNum:
|
||||
bigNum*: UInt256
|
||||
of TdtHash:
|
||||
hash*: Hash32
|
||||
of TdtVoid, TdtOops:
|
||||
discard
|
||||
|
||||
TraceLogInstRef* = ref object
|
||||
## Logger instance
|
||||
base: TraceRecorderRef
|
||||
level: int
|
||||
truncated: bool
|
||||
journal: KeyedQueue[seq[byte],TraceDataItemRef]
|
||||
|
||||
TraceRecorderRef* = ref object of RootRef
|
||||
log: seq[TraceLogInstRef] ## Production stack for log database
|
||||
db: CoreDbRef
|
||||
kvtSave: KvtApiRef ## Restore `KVT` data
|
||||
ariSave: AristoApiRef ## Restore `Aristo` data
|
||||
|
||||
doAssert LEAST_FREE_VID <= 256 # needed for journal key byte prefix
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
import
|
||||
std/strutils,
|
||||
chronicles,
|
||||
stew/byteutils
|
||||
|
||||
func squeezeHex(s: string; ignLen = false): string =
|
||||
result = if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1]
|
||||
if not ignLen:
|
||||
let n = (s.len + 1) div 2
|
||||
result &= "[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
|
||||
func stripZeros(a: string; toExp = false): string =
|
||||
if 0 < a.len:
|
||||
result = a.toLowerAscii.strip(leading=true, trailing=false, chars={'0'})
|
||||
if result.len == 0:
|
||||
result = "0"
|
||||
elif result[^1] == '0' and toExp:
|
||||
var n = 0
|
||||
while result[^1] == '0':
|
||||
let w = result.len
|
||||
result.setLen(w-1)
|
||||
n.inc
|
||||
if n == 1:
|
||||
result &= "0"
|
||||
elif n == 2:
|
||||
result &= "00"
|
||||
elif 2 < n:
|
||||
result &= "↑" & $n
|
||||
|
||||
func `$$`(w: openArray[byte]): string =
|
||||
w.toHex.squeezeHex
|
||||
|
||||
func `$`(w: seq[byte]): string =
|
||||
w.toHex.squeezeHex
|
||||
|
||||
func `$`(w: UInt256): string =
|
||||
"#" & w.toHex.stripZeros.squeezeHex
|
||||
|
||||
func `$`(w: Hash32): string =
|
||||
"£" & w.data.toHex.squeezeHex
|
||||
|
||||
func `$`(w: VertexID): string =
|
||||
if 0 < w.uint64: "$" & w.uint64.toHex.stripZeros else: "$ø"
|
||||
|
||||
func `$`(w: AristoAccount): string =
|
||||
"(" & $w.nonce & "," & $w.balance & "," & $w.codeHash & ")"
|
||||
|
||||
func `$`(ti: TraceDataItemRef): string =
|
||||
result = "(" &
|
||||
(if ti.pfx == TrpKvt: $KvtApiProfNames(ti.info)
|
||||
elif ti.pfx == TrpOops: "<oops>"
|
||||
else: $AristoApiProfNames(ti.info))
|
||||
|
||||
result &= "," & (
|
||||
case ti.req:
|
||||
of TrqOops: "<oops>"
|
||||
of TrqFind: ""
|
||||
of TrqModify: "="
|
||||
of TrqDelete: "-"
|
||||
of TrqAdd: "+")
|
||||
|
||||
result &= (
|
||||
case ti.kind:
|
||||
of TdtOops: "<oops>"
|
||||
of TdtBlob: $ti.blob
|
||||
of TdtBigNum: $ti.bigNum
|
||||
of TdtHash: $ti.hash
|
||||
of TdtVoid: "ø"
|
||||
of TdtError: (if ti.pfx == TrpKvt: $KvtError(ti.error)
|
||||
elif ti.pfx == TrpOops: "<oops>"
|
||||
else: $AristoError(ti.error))
|
||||
of TdtAccount: $ti.account)
|
||||
|
||||
result &= ")"
|
||||
|
||||
func toStr(pfx: TracePfx, key: openArray[byte]): string =
|
||||
case pfx:
|
||||
of TrpOops:
|
||||
"<oops>"
|
||||
of TrpKvt:
|
||||
$$(key.toOpenArray(0, key.len - 1))
|
||||
of TrpAccounts:
|
||||
"1:" & $$(key.toOpenArray(0, key.len - 1))
|
||||
of TrpStorage:
|
||||
"1:" & $$(key.toOpenArray(0, min(31, key.len - 1))) & ":" &
|
||||
(if 32 < key.len: $$(key.toOpenArray(32, key.len - 1)) else: "")
|
||||
|
||||
func `$`(key: openArray[byte]; ti: TraceDataItemRef): string =
|
||||
"(" &
|
||||
TracePfx(key[0]).toStr(key.toOpenArray(1, key.len - 1)) & "," &
|
||||
$ti & ")"
|
||||
|
||||
# -------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"trace " & info
|
||||
|
||||
func topLevel(tr: TraceRecorderRef): int =
|
||||
tr.log.len - 1
|
||||
|
||||
# --------------------
|
||||
|
||||
proc jLogger(
|
||||
tr: TraceRecorderRef;
|
||||
key: openArray[byte];
|
||||
ti: TraceDataItemRef;
|
||||
) =
|
||||
## Add or update journal entry. The `tr.pfx` argument indicates the key type:
|
||||
##
|
||||
## * `TrpKvt`: followed by KVT key
|
||||
## * `TrpAccounts`: followed by <account-path>
|
||||
## * `TrpGeneric`: followed by <root-ID> + <path>
|
||||
## * `TrpStorage`: followed by <account-path> + <storage-path>
|
||||
##
|
||||
doAssert ti.pfx != TrpOops
|
||||
let
|
||||
pfx = @[ti.pfx.byte]
|
||||
lRec = tr.log[^1].journal.lruFetch(pfx & @key).valueOr:
|
||||
if LogJournalMax <= tr.log[^1].journal.len:
|
||||
tr.log[^1].truncated = true
|
||||
discard tr.log[^1].journal.lruAppend(pfx & @key, ti, LogJournalMax)
|
||||
return
|
||||
if ti.req != TrqFind:
|
||||
lRec[] = ti[]
|
||||
|
||||
proc jLogger(
|
||||
tr: TraceRecorderRef;
|
||||
accPath: Hash32;
|
||||
ti: TraceDataItemRef;
|
||||
) =
|
||||
tr.jLogger(accPath.data.toSeq, ti)
|
||||
|
||||
proc jLogger(
|
||||
tr: TraceRecorderRef;
|
||||
ti: TraceDataItemRef;
|
||||
) =
|
||||
tr.jLogger(EmptyBlob, ti)
|
||||
|
||||
proc jLogger(
|
||||
tr: TraceRecorderRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
ti: TraceDataItemRef;
|
||||
) =
|
||||
tr.jLogger(accPath.data.toSeq & stoPath.data.toSeq, ti)
|
||||
|
||||
# --------------------
|
||||
|
||||
func to(w: AristoApiProfNames; T: type TracePfx): T =
|
||||
case w:
|
||||
of AristoApiProfFetchAccountRecordFn,
|
||||
AristoApiProfFetchStateRootFn,
|
||||
AristoApiProfDeleteAccountRecordFn,
|
||||
AristoApiProfMergeAccountRecordFn:
|
||||
return TrpAccounts
|
||||
of AristoApiProfFetchStorageDataFn,
|
||||
AristoApiProfFetchStorageRootFn,
|
||||
AristoApiProfDeleteStorageDataFn,
|
||||
AristoApiProfDeleteStorageTreeFn,
|
||||
AristoApiProfMergeStorageDataFn:
|
||||
return TrpStorage
|
||||
else:
|
||||
discard
|
||||
raiseAssert "Unsupported AristoApiProfNames: " & $w
|
||||
|
||||
func to(w: KvtApiProfNames; T: type TracePfx): T =
|
||||
TrpKvt
|
||||
|
||||
# --------------------
|
||||
|
||||
func logRecord(
|
||||
info: KvtApiProfNames | AristoApiProfNames;
|
||||
req: TraceRequest;
|
||||
data: openArray[byte];
|
||||
): TraceDataItemRef =
|
||||
TraceDataItemRef(
|
||||
pfx: info.to(TracePfx),
|
||||
info: info.ord,
|
||||
req: req,
|
||||
kind: TdtBlob,
|
||||
blob: @data)
|
||||
|
||||
func logRecord(
|
||||
info: KvtApiProfNames | AristoApiProfNames;
|
||||
req: TraceRequest;
|
||||
error: KvtError | AristoError;
|
||||
): TraceDataItemRef =
|
||||
TraceDataItemRef(
|
||||
pfx: info.to(TracePfx),
|
||||
info: info.ord,
|
||||
req: req,
|
||||
kind: TdtError,
|
||||
error: error.ord)
|
||||
|
||||
func logRecord(
|
||||
info: KvtApiProfNames | AristoApiProfNames;
|
||||
req: TraceRequest;
|
||||
): TraceDataItemRef =
|
||||
TraceDataItemRef(
|
||||
pfx: info.to(TracePfx),
|
||||
info: info.ord,
|
||||
req: req,
|
||||
kind: TdtVoid)
|
||||
|
||||
# --------------------
|
||||
|
||||
func logRecord(
|
||||
info: AristoApiProfNames;
|
||||
req: TraceRequest;
|
||||
accRec: AristoAccount;
|
||||
): TraceDataItemRef =
|
||||
TraceDataItemRef(
|
||||
pfx: info.to(TracePfx),
|
||||
info: info.ord,
|
||||
req: req,
|
||||
kind: TdtAccount,
|
||||
account: accRec)
|
||||
|
||||
func logRecord(
|
||||
info: AristoApiProfNames;
|
||||
req: TraceRequest;
|
||||
state: Hash32;
|
||||
): TraceDataItemRef =
|
||||
TraceDataItemRef(
|
||||
pfx: info.to(TracePfx),
|
||||
info: info.ord,
|
||||
req: req,
|
||||
kind: TdtHash,
|
||||
hash: state)
|
||||
|
||||
func logRecord(
|
||||
info: AristoApiProfNames;
|
||||
req: TraceRequest;
|
||||
sto: UInt256;
|
||||
): TraceDataItemRef =
|
||||
TraceDataItemRef(
|
||||
pfx: info.to(TracePfx),
|
||||
info: info.ord,
|
||||
req: req,
|
||||
kind: TdtBigNum,
|
||||
bigNum: sto)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc kvtTraceRecorder(tr: TraceRecorderRef) =
|
||||
let
|
||||
api = tr.db.kvtApi
|
||||
tracerApi = api.dup
|
||||
|
||||
# Set up new production api `tracerApi` and save the old one
|
||||
tr.kvtSave = api
|
||||
tr.db.kvtApi = tracerApi
|
||||
|
||||
# Update production api
|
||||
tracerApi.get =
|
||||
proc(kvt: KvtTxRef; key: openArray[byte]): Result[seq[byte],KvtError] =
|
||||
const info = KvtApiProfGetFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
let data = api.get(kvt, key).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), error
|
||||
tr.jLogger(key, logRecord(info, TrqFind, error))
|
||||
return err(error) # No way
|
||||
|
||||
tr.jLogger(key, logRecord(info, TrqFind, data))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), data=($$data)
|
||||
ok(data)
|
||||
|
||||
tracerApi.del =
|
||||
proc(kvt: KvtTxRef; key: openArray[byte]): Result[void,KvtError] =
|
||||
const info = KvtApiProfDelFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB (for comprehensive log record)
|
||||
let tiRec = block:
|
||||
let rc = api.get(kvt, key)
|
||||
if rc.isOk:
|
||||
logRecord(info, TrqDelete, rc.value)
|
||||
elif rc.error == GetNotFound:
|
||||
logRecord(info, TrqDelete)
|
||||
else:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), error=rc.error
|
||||
tr.jLogger(key, logRecord(info, TrqDelete, rc.error))
|
||||
return err(rc.error)
|
||||
|
||||
# Delete from DB
|
||||
api.del(kvt, key).isOkOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), error
|
||||
tr.jLogger(key, logRecord(info, TrqDelete, error))
|
||||
return err(error)
|
||||
|
||||
# Log on journal
|
||||
tr.jLogger(key, tiRec)
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key)
|
||||
ok()
|
||||
|
||||
tracerApi.put =
|
||||
proc(kvt: KvtTxRef; key, data: openArray[byte]): Result[void,KvtError] =
|
||||
const info = KvtApiProfPutFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB
|
||||
let
|
||||
hasKey = api.hasKeyRc(kvt, key).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), error
|
||||
tr.jLogger(key, logRecord(info, TrqAdd, error))
|
||||
return err(error)
|
||||
mode = if hasKey: TrqModify else: TrqAdd
|
||||
|
||||
# Store on DB
|
||||
api.put(kvt, key, data).isOkOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), data=($$data)
|
||||
tr.jLogger(key, logRecord(info, mode, error))
|
||||
return err(error)
|
||||
|
||||
tr.jLogger(key, logRecord(info, mode, data))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, key=($$key), data=($$data)
|
||||
ok()
|
||||
|
||||
assert tr.kvtSave != tr.db.kvtApi
|
||||
assert tr.kvtSave.del != tr.db.kvtApi.del
|
||||
assert tr.kvtSave.hasKeyRc == tr.db.kvtApi.hasKeyRc
|
||||
|
||||
|
||||
proc ariTraceRecorder(tr: TraceRecorderRef) =
|
||||
let
|
||||
api = tr.db.ariApi
|
||||
tracerApi = api.dup
|
||||
|
||||
# Set up new production api `tracerApi` and save the old one
|
||||
tr.ariSave = api
|
||||
tr.db.ariApi = tracerApi
|
||||
|
||||
tracerApi.fetchAccountRecord =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[AristoAccount,AristoError] =
|
||||
const info = AristoApiProfFetchAccountRecordFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB
|
||||
let accRec = api.fetchAccountRecord(mpt, accPath).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, error
|
||||
tr.jLogger(accPath, logRecord(info, TrqFind, error))
|
||||
return err(error)
|
||||
|
||||
tr.jLogger(accPath, logRecord(info, TrqFind, accRec))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, accRec
|
||||
ok accRec
|
||||
|
||||
tracerApi.fetchStateRoot =
|
||||
proc(mpt: AristoTxRef;
|
||||
): Result[Hash32,AristoError] =
|
||||
const info = AristoApiProfFetchStateRootFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB
|
||||
let state = api.fetchStateRoot(mpt).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, error
|
||||
tr.jLogger logRecord(info, TrqFind, error)
|
||||
return err(error)
|
||||
|
||||
tr.jLogger logRecord(info, TrqFind, state)
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, state
|
||||
ok state
|
||||
|
||||
tracerApi.fetchStorageData =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
): Result[UInt256,AristoError] =
|
||||
const info = AristoApiProfFetchStorageDataFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB
|
||||
let stoData = api.fetchStorageData(mpt, accPath, stoPath).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, error
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, TrqFind, error))
|
||||
return err(error)
|
||||
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, TrqFind, stoData))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, stoData
|
||||
ok stoData
|
||||
|
||||
tracerApi.fetchStorageRoot =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[Hash32,AristoError] =
|
||||
const info = AristoApiProfFetchStorageRootFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB
|
||||
let state = api.fetchStorageRoot(mpt, accPath).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, error
|
||||
tr.jLogger(accPath, logRecord(info, TrqFind, error))
|
||||
return err(error)
|
||||
|
||||
tr.jLogger(accPath, logRecord(info, TrqFind, state))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, state
|
||||
ok state
|
||||
|
||||
tracerApi.deleteAccountRecord =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[void,AristoError] =
|
||||
const info = AristoApiProfDeleteAccountRecordFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB (for comprehensive log record)
|
||||
let tiRec = block:
|
||||
let rc = api.fetchAccountRecord(mpt, accPath)
|
||||
if rc.isOk:
|
||||
logRecord(info, TrqDelete, rc.value)
|
||||
elif rc.error == FetchPathNotFound:
|
||||
logRecord(info, TrqDelete)
|
||||
else:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, error=rc.error
|
||||
tr.jLogger(accPath, logRecord(info, TrqDelete, rc.error))
|
||||
return err(rc.error)
|
||||
|
||||
# Delete from DB
|
||||
api.deleteAccountRecord(mpt, accPath).isOkOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, error
|
||||
tr.jLogger(accPath, logRecord(info, TrqDelete, error))
|
||||
return err(error)
|
||||
|
||||
# Log on journal
|
||||
tr.jLogger(accPath, tiRec)
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath
|
||||
ok()
|
||||
|
||||
tracerApi.deleteStorageData =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
): Result[bool,AristoError] =
|
||||
const info = AristoApiProfDeleteStorageDataFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB (for comprehensive log record)
|
||||
let tiRec = block:
|
||||
let rc = api.fetchStorageData(mpt, accPath, stoPath)
|
||||
if rc.isOk:
|
||||
logRecord(info, TrqDelete, rc.value)
|
||||
elif rc.error == FetchPathNotFound:
|
||||
logRecord(info, TrqDelete)
|
||||
else:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, error=rc.error
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, TrqDelete, rc.error))
|
||||
return err(rc.error)
|
||||
|
||||
let emptyTrie = api.deleteStorageData(mpt, accPath, stoPath).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, error
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, TrqDelete, error))
|
||||
return err(error)
|
||||
|
||||
# Log on journal
|
||||
tr.jLogger(accPath, stoPath, tiRec)
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, emptyTrie
|
||||
ok emptyTrie
|
||||
|
||||
tracerApi.deleteStorageTree =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
): Result[void,AristoError] =
|
||||
const info = AristoApiProfDeleteStorageTreeFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Delete from DB
|
||||
api.deleteStorageTree(mpt, accPath).isOkOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, error
|
||||
tr.jLogger(accPath, logRecord(info, TrqDelete, error))
|
||||
return err(error)
|
||||
|
||||
# Log on journal
|
||||
tr.jLogger(accPath, logRecord(info, TrqDelete))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath
|
||||
ok()
|
||||
|
||||
tracerApi.mergeAccountRecord =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
accRec: AristoAccount;
|
||||
): Result[bool,AristoError] =
|
||||
const info = AristoApiProfMergeAccountRecordFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB (for comprehensive log record)
|
||||
let
|
||||
hadPath = api.hasPathAccount(mpt, accPath).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, error
|
||||
tr.jLogger(accPath, logRecord(info, TrqAdd, error))
|
||||
return err(error)
|
||||
mode = if hadPath: TrqModify else: TrqAdd
|
||||
|
||||
# Do the merge
|
||||
let updated = api.mergeAccountRecord(mpt, accPath, accRec).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, hadPath, error
|
||||
tr.jLogger(accPath, logRecord(info, mode, error))
|
||||
return err(error)
|
||||
|
||||
# Log on journal
|
||||
tr.jLogger(accPath, logRecord(info, mode, accRec))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, accRec, hadPath, updated
|
||||
ok updated
|
||||
|
||||
tracerApi.mergeStorageData =
|
||||
proc(mpt: AristoTxRef;
|
||||
accPath: Hash32;
|
||||
stoPath: Hash32;
|
||||
stoData: UInt256;
|
||||
): Result[void,AristoError] =
|
||||
const info = AristoApiProfMergeStorageDataFn
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
let level = tr.topLevel()
|
||||
|
||||
# Find entry on DB (for comprehensive log record)
|
||||
let
|
||||
hadPath = api.hasPathStorage(mpt, accPath, stoPath).valueOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, error
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, TrqAdd, error))
|
||||
return err(error)
|
||||
mode = if hadPath: TrqModify else: TrqAdd
|
||||
|
||||
# Do the merge
|
||||
api.mergeStorageData(mpt, accPath, stoPath,stoData).isOkOr:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, error
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, mode, error))
|
||||
return err(error)
|
||||
|
||||
# Log on journal
|
||||
tr.jLogger(accPath, stoPath, logRecord(info, mode, stoData))
|
||||
|
||||
when CoreDbNoisyCaptJournal:
|
||||
debug logTxt $info, level, accPath, stoPath, stoData, hadPath
|
||||
ok()
|
||||
|
||||
assert tr.ariSave != tr.db.ariApi
|
||||
assert tr.ariSave.deleteAccountRecord != tr.db.ariApi.deleteAccountRecord
|
||||
assert tr.ariSave.hasPathAccount == tr.db.ariApi.hasPathAccount
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc topInst*(tr: TraceRecorderRef): TraceLogInstRef =
|
||||
## Get top level logger
|
||||
tr.log[^1]
|
||||
|
||||
func truncated*(log: TraceLogInstRef): bool =
|
||||
## True if journal was truncated due to collecting too many entries
|
||||
log.truncated
|
||||
|
||||
func level*(log: TraceLogInstRef): int =
|
||||
## Non-negative stack level of this log instance.
|
||||
log.level
|
||||
|
||||
func journal*(log: TraceLogInstRef): KeyedQueue[seq[byte],TraceDataItemRef] =
|
||||
## Get the journal
|
||||
log.journal
|
||||
|
||||
func db*(log: TraceLogInstRef): CoreDbRef =
|
||||
## Get database
|
||||
log.base.db
|
||||
|
||||
iterator kvtLog*(log: TraceLogInstRef): (seq[byte],TraceDataItemRef) =
|
||||
## Extract `Kvt` journal
|
||||
for p in log.journal.nextPairs:
|
||||
let pfx = TracePfx(p.key[0])
|
||||
if pfx == TrpKvt:
|
||||
yield (p.key[1..^1], p.data)
|
||||
|
||||
proc kvtLogBlobs*(log: TraceLogInstRef): seq[(seq[byte],seq[byte])] =
|
||||
log.kvtLog.toSeq
|
||||
.filterIt(it[1].kind==TdtBlob)
|
||||
.mapIt((it[0],it[1].blob))
|
||||
|
||||
iterator ariLog*(log: TraceLogInstRef): (VertexID,seq[byte],TraceDataItemRef) =
|
||||
## Extract `Aristo` journal
|
||||
for p in log.journal.nextPairs:
|
||||
let
|
||||
pfx = TracePfx(p.key[0])
|
||||
(root, key) = block:
|
||||
case pfx:
|
||||
of TrpAccounts,TrpStorage:
|
||||
(VertexID(1), p.key[1..^1])
|
||||
else:
|
||||
continue
|
||||
yield (root, key, p.data)
|
||||
|
||||
proc pop*(log: TraceLogInstRef): bool =
|
||||
## Reduce logger stack by the argument descriptor `log` which must be the
|
||||
## top entry on the stack. The function returns `true` if the descriptor
|
||||
## `log` was not the only one on stack and the stack was reduced by the
|
||||
## top entry. Otherwise nothing is done and `false` returned.
|
||||
##
|
||||
let tr = log.base
|
||||
doAssert log.level == tr.topLevel()
|
||||
if 1 < tr.log.len: # Always leave one instance on stack
|
||||
tr.log.setLen(tr.log.len - 1)
|
||||
return true
|
||||
|
||||
proc push*(tr: TraceRecorderRef) =
|
||||
## Push overlay logger instance
|
||||
tr.log.add TraceLogInstRef(base: tr, level: tr.log.len)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor/destructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type TraceRecorderRef; # Recorder desc to instantiate
|
||||
db: CoreDbRef; # Database
|
||||
): T =
|
||||
## Constructor, create initial/base tracer descriptor
|
||||
result = T(db: db)
|
||||
result.push()
|
||||
result.kvtTraceRecorder()
|
||||
result.ariTraceRecorder()
|
||||
|
||||
proc restore*(tr: TraceRecorderRef) =
|
||||
## Restore production API.
|
||||
tr.db.kvtApi = tr.kvtSave
|
||||
tr.db.ariApi = tr.ariSave
|
||||
tr[].reset
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -15,42 +15,19 @@ import
|
||||
eth/common/[accounts, base, hashes],
|
||||
../../constants,
|
||||
../[kvt, aristo],
|
||||
./base/[api_tracking, base_config, base_desc, base_helpers]
|
||||
./base/[base_desc, base_helpers]
|
||||
|
||||
export
|
||||
CoreDbAccRef,
|
||||
CoreDbAccount,
|
||||
CoreDbCtxRef,
|
||||
CoreDbErrorCode,
|
||||
CoreDbError,
|
||||
CoreDbKvtRef,
|
||||
CoreDbPersistentTypes,
|
||||
CoreDbRef,
|
||||
CoreDbTxRef,
|
||||
CoreDbType
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
import
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "core_db"
|
||||
const
|
||||
logTxt = "API"
|
||||
|
||||
when CoreDbEnableProfiling:
|
||||
export
|
||||
CoreDbFnInx,
|
||||
CoreDbProfListRef
|
||||
|
||||
when CoreDbEnableCaptJournal:
|
||||
import
|
||||
./backend/aristo_trace
|
||||
type
|
||||
CoreDbCaptRef* = distinct TraceLogInstRef
|
||||
func `$`(p: CoreDbCaptRef): string =
|
||||
if p.distinctBase.isNil: "<nil>" else: "<capt>"
|
||||
else:
|
||||
import
|
||||
import
|
||||
../aristo/[
|
||||
aristo_delete, aristo_desc, aristo_fetch, aristo_merge, aristo_part,
|
||||
aristo_persist, aristo_tx_frame],
|
||||
@ -77,8 +54,8 @@ proc baseTxFrame*(db: CoreDbRef): CoreDbTxRef =
|
||||
|
||||
CoreDbTxRef(
|
||||
ctx: db.ctx,
|
||||
aTx: db.ctx.parent.ariApi.call(baseTxFrame, db.ctx.mpt),
|
||||
kTx: db.ctx.parent.kvtApi.call(baseTxFrame, db.ctx.kvt))
|
||||
aTx: db.ctx.mpt.baseTxFrame(),
|
||||
kTx: db.ctx.kvt.baseTxFrame())
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public base descriptor methods
|
||||
@ -92,15 +69,17 @@ proc finish*(db: CoreDbRef; eradicate = false) =
|
||||
## depends on the backend database. Currently, only the `AristoDbRocks` type
|
||||
## backend removes the database on `true`.
|
||||
##
|
||||
db.setTrackNewApi BaseFinishFn
|
||||
CoreDbKvtRef(db.ctx).call(finish, db.ctx.kvt, eradicate)
|
||||
CoreDbAccRef(db.ctx).call(finish, db.ctx.mpt, eradicate)
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
db.ctx.kvt.finish(eradicate)
|
||||
db.ctx.mpt.finish(eradicate)
|
||||
|
||||
proc `$$`*(e: CoreDbError): string =
|
||||
## Pretty print error symbol
|
||||
##
|
||||
e.toStr()
|
||||
result = $e.error & "("
|
||||
result &= (if e.isAristo: "Aristo" else: "Kvt")
|
||||
result &= ", ctx=" & $e.ctx & ", error="
|
||||
result &= (if e.isAristo: $e.aErr else: $e.kErr)
|
||||
result &= ")"
|
||||
|
||||
proc persist*(
|
||||
db: CoreDbRef;
|
||||
@ -109,8 +88,6 @@ proc persist*(
|
||||
## This function persists changes up to and including the given frame to the
|
||||
## database.
|
||||
##
|
||||
db.setTrackNewApi BasePersistFn
|
||||
|
||||
# TODO these backend functions coud maybe be hidden behind an abstraction
|
||||
# layer - or... the abstraction layer could be removed from everywhere
|
||||
# else since it's not really needed
|
||||
@ -128,148 +105,103 @@ proc persist*(
|
||||
# kvt changes written to memory but not to disk because of an aristo
|
||||
# error), we have to panic instead.
|
||||
|
||||
CoreDbKvtRef(db.ctx).call(persist, db.ctx.kvt, kvtBatch[], txFrame.kTx)
|
||||
CoreDbAccRef(db.ctx).call(persist, db.ctx.mpt, mptBatch[], txFrame.aTx)
|
||||
db.ctx.kvt.persist(kvtBatch[], txFrame.kTx)
|
||||
db.ctx.mpt.persist(mptBatch[], txFrame.aTx)
|
||||
|
||||
db.defCtx.kvt.backend.putEndFn(kvtBatch[]).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
raiseAssert "" & ": " & $error
|
||||
|
||||
db.defCtx.mpt.backend.putEndFn(mptBatch[]).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
raiseAssert "" & ": " & $error
|
||||
|
||||
else:
|
||||
discard kvtBatch.expect($api & ": should always be able to create batch")
|
||||
discard mptBatch.expect($api & ": should always be able to create batch")
|
||||
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, blockNumber, result
|
||||
discard kvtBatch.expect("" & ": should always be able to create batch")
|
||||
discard mptBatch.expect("" & ": should always be able to create batch")
|
||||
|
||||
proc stateBlockNumber*(db: CoreDbTxRef): BlockNumber =
|
||||
## This function returns the block number stored with the latest `persist()`
|
||||
## directive.
|
||||
##
|
||||
db.setTrackNewApi BaseStateBlockNumberFn
|
||||
result = block:
|
||||
let rc = db.ctx.parent.ariApi.call(fetchLastCheckpoint, db.aTx)
|
||||
if rc.isOk:
|
||||
rc.value.BlockNumber
|
||||
else:
|
||||
0u64
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
let rc = db.aTx.fetchLastCheckpoint().valueOr:
|
||||
return 0u64
|
||||
|
||||
rc.BlockNumber
|
||||
|
||||
proc verify*(
|
||||
db: CoreDbRef | CoreDbAccRef;
|
||||
db: CoreDbRef;
|
||||
proof: openArray[seq[byte]];
|
||||
root: Hash32;
|
||||
path: Hash32;
|
||||
): CoreDbRc[Opt[seq[byte]]] =
|
||||
## Variant of `verify()`.
|
||||
template mpt: untyped =
|
||||
when db is CoreDbRef:
|
||||
CoreDbAccRef(db.defCtx)
|
||||
else:
|
||||
db
|
||||
mpt.setTrackNewApi BaseVerifyFn
|
||||
result = block:
|
||||
let rc = mpt.call(partUntwigPath, proof, root, path)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError($api, ProofVerify))
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
let rc = partUntwigPath(proof, root, path).valueOr:
|
||||
return err(error.toError("", ProofVerify))
|
||||
|
||||
ok(rc)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public key-value table methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getKvt*(ctx: CoreDbCtxRef): CoreDbKvtRef =
|
||||
## This function retrieves the common base object shared with other KVT
|
||||
## descriptors. Any changes are immediately visible to subscribers.
|
||||
## On destruction (when the constructed object gets out of scope), changes
|
||||
## are not saved to the backend database but are still cached and available.
|
||||
##
|
||||
CoreDbKvtRef(ctx)
|
||||
|
||||
# ----------- KVT ---------------
|
||||
|
||||
proc get*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[seq[byte]] =
|
||||
## This function always returns a non-empty `seq[byte]` or an error code.
|
||||
kvt.setTrackNewApi KvtGetFn
|
||||
result = block:
|
||||
let rc = kvt.ctx.parent.kvtApi.call(get, kvt.kTx, key)
|
||||
let rc = kvt.kTx.get(key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == GetNotFound:
|
||||
err(rc.error.toError($api, KvtNotFound))
|
||||
err(rc.error.toError("", KvtNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc getOrEmpty*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[seq[byte]] =
|
||||
## Variant of `get()` returning an empty `seq[byte]` if the key is not found
|
||||
## on the database.
|
||||
##
|
||||
kvt.setTrackNewApi KvtGetOrEmptyFn
|
||||
result = block:
|
||||
let rc = kvt.ctx.parent.kvtApi.call(get, kvt.kTx, key)
|
||||
let rc = kvt.kTx.get(key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == GetNotFound:
|
||||
CoreDbRc[seq[byte]].ok(EmptyBlob)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc len*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[int] =
|
||||
## This function returns the size of the value associated with `key`.
|
||||
kvt.setTrackNewApi KvtLenFn
|
||||
result = block:
|
||||
let rc = kvt.ctx.parent.kvtApi.call(len, kvt.kTx, key)
|
||||
let rc = kvt.kTx.len(key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == GetNotFound:
|
||||
err(rc.error.toError($api, KvtNotFound))
|
||||
err(rc.error.toError("", KvtNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc del*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
kvt.setTrackNewApi KvtDelFn
|
||||
result = block:
|
||||
let rc = kvt.ctx.parent.kvtApi.call(del, kvt.kTx, key)
|
||||
if rc.isOk:
|
||||
kvt.kTx.del(key).isOkOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok()
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc put*(
|
||||
kvt: CoreDbTxRef;
|
||||
key: openArray[byte];
|
||||
val: openArray[byte];
|
||||
): CoreDbRc[void] =
|
||||
kvt.setTrackNewApi KvtPutFn
|
||||
result = block:
|
||||
let rc = kvt.ctx.parent.kvtApi.call(put, kvt.kTx, key, val)
|
||||
if rc.isOk:
|
||||
kvt.kTx.put(key, val).isOkOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok()
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result
|
||||
|
||||
proc hasKeyRc*(kvt: CoreDbTxRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## For the argument `key` return `true` if `get()` returned a value on
|
||||
## that argument, `false` if it returned `GetNotFound`, and an error
|
||||
## otherwise.
|
||||
##
|
||||
kvt.setTrackNewApi KvtHasKeyRcFn
|
||||
result = block:
|
||||
let rc = kvt.ctx.parent.kvtApi.call(hasKeyRc, kvt.kTx, key)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
let rc = kvt.kTx.hasKeyRc(key).valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok(rc)
|
||||
|
||||
proc hasKey*(kvt: CoreDbTxRef; key: openArray[byte]): bool =
|
||||
## Simplified version of `hasKeyRc` where `false` is returned instead of
|
||||
@ -278,21 +210,12 @@ proc hasKey*(kvt: CoreDbTxRef; key: openArray[byte]): bool =
|
||||
## This function prototype is in line with the `hasKey` function for
|
||||
## `Tables`.
|
||||
##
|
||||
kvt.setTrackNewApi KvtHasKeyFn
|
||||
result = kvt.ctx.parent.kvtApi.call(hasKeyRc, kvt.kTx, key).valueOr: false
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
result = kvt.kTx.hasKeyRc(key).valueOr: false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods for accounts
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef =
|
||||
## Accounts column constructor, will defect on failure.
|
||||
##
|
||||
ctx.setTrackNewApi CtxGetAccountsFn
|
||||
result = CoreDbAccRef(ctx)
|
||||
ctx.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ----------- accounts ---------------
|
||||
|
||||
proc proof*(
|
||||
@ -305,14 +228,10 @@ proc proof*(
|
||||
## and `false` otherwise. In the latter case, the chain of rlp-encoded blobs
|
||||
## are the nodes proving that the `key` path does not exist.
|
||||
##
|
||||
acc.setTrackNewApi AccProofFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(partAccountTwig, acc.aTx, accPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError($api, ProofCreate))
|
||||
acc.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
let rc = acc.aTx.partAccountTwig(accPath).valueOr:
|
||||
return err(error.toError("", ProofCreate))
|
||||
|
||||
ok(rc)
|
||||
|
||||
proc fetch*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -321,16 +240,13 @@ proc fetch*(
|
||||
## Fetch the account data record for the particular account indexed by
|
||||
## the key `accPath`.
|
||||
##
|
||||
acc.setTrackNewApi AccFetchFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(fetchAccountRecord, acc.aTx, accPath)
|
||||
let rc = acc.aTx.fetchAccountRecord(accPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == FetchPathNotFound:
|
||||
err(rc.error.toError($api, AccNotFound))
|
||||
err(rc.error.toError("", AccNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc delete*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -339,18 +255,14 @@ proc delete*(
|
||||
## Delete the particular account indexed by the key `accPath`. This
|
||||
## will also destroy an associated storage area.
|
||||
##
|
||||
acc.setTrackNewApi AccDeleteFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(deleteAccountRecord, acc.aTx, accPath)
|
||||
let rc = acc.aTx.deleteAccountRecord(accPath)
|
||||
if rc.isOk:
|
||||
ok()
|
||||
elif rc.error == DelPathNotFound:
|
||||
# TODO: Would it be conseqient to just return `ok()` here?
|
||||
err(rc.error.toError($api, AccNotFound))
|
||||
err(rc.error.toError("", AccNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc clearStorage*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -359,15 +271,11 @@ proc clearStorage*(
|
||||
## Delete all data slots from the storage area associated with the
|
||||
## particular account indexed by the key `accPath`.
|
||||
##
|
||||
acc.setTrackNewApi AccClearStorageFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(deleteStorageTree, acc.aTx, accPath)
|
||||
let rc = acc.aTx.deleteStorageTree(accPath)
|
||||
if rc.isOk or rc.error in {DelStoRootMissing,DelStoAccMissing}:
|
||||
ok()
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc merge*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -377,15 +285,10 @@ proc merge*(
|
||||
## Add or update the argument account data record `account`. Note that the
|
||||
## `account` argument uniquely idendifies the particular account address.
|
||||
##
|
||||
acc.setTrackNewApi AccMergeFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(mergeAccountRecord, acc.aTx, accPath, accRec)
|
||||
if rc.isOk:
|
||||
acc.aTx.mergeAccountRecord(accPath, accRec).isOkOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok()
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc hasPath*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -393,27 +296,18 @@ proc hasPath*(
|
||||
): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
##
|
||||
acc.setTrackNewApi AccHasPathFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(hasPathAccount, acc.aTx, accPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
let rc = acc.aTx.hasPathAccount(accPath).valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok(rc)
|
||||
|
||||
proc getStateRoot*(acc: CoreDbTxRef): CoreDbRc[Hash32] =
|
||||
## This function retrieves the Merkle state hash of the accounts
|
||||
## column (if available.)
|
||||
acc.setTrackNewApi AccStateFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(fetchStateRoot, acc.aTx)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
let rc = acc.aTx.fetchStateRoot().valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok(rc)
|
||||
|
||||
# ------------ storage ---------------
|
||||
|
||||
@ -432,14 +326,10 @@ proc slotProof*(
|
||||
## Note that the function always returns an error unless the `accPath` is
|
||||
## valid.
|
||||
##
|
||||
acc.setTrackNewApi AccSlotProofFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(partStorageTwig, acc.aTx, accPath, stoPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError($api, ProofCreate))
|
||||
acc.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
let rc = acc.aTx.partStorageTwig(accPath, stoPath).valueOr:
|
||||
return err(error.toError("", ProofCreate))
|
||||
|
||||
ok(rc)
|
||||
|
||||
proc slotFetch*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -447,18 +337,13 @@ proc slotFetch*(
|
||||
stoPath: Hash32;
|
||||
): CoreDbRc[UInt256] =
|
||||
## Like `fetch()` but with cascaded index `(accPath,slot)`.
|
||||
acc.setTrackNewApi AccSlotFetchFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(fetchStorageData, acc.aTx, accPath, stoPath)
|
||||
let rc = acc.aTx.fetchStorageData(accPath, stoPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
elif rc.error == FetchPathNotFound:
|
||||
err(rc.error.toError($api, StoNotFound))
|
||||
err(rc.error.toError("", StoNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc slotDelete*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -466,20 +351,15 @@ proc slotDelete*(
|
||||
stoPath: Hash32;
|
||||
): CoreDbRc[void] =
|
||||
## Like `delete()` but with cascaded index `(accPath,slot)`.
|
||||
acc.setTrackNewApi AccSlotDeleteFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(deleteStorageData, acc.aTx, accPath, stoPath)
|
||||
let rc = acc.aTx.deleteStorageData(accPath, stoPath)
|
||||
if rc.isOk or rc.error == DelStoRootMissing:
|
||||
# The second `if` clause is insane but legit: A storage column was
|
||||
# announced for an account but no data have been added, yet.
|
||||
ok()
|
||||
elif rc.error == DelPathNotFound:
|
||||
err(rc.error.toError($api, StoNotFound))
|
||||
err(rc.error.toError("", StoNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), result
|
||||
err(rc.error.toError(""))
|
||||
|
||||
proc slotHasPath*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -487,16 +367,10 @@ proc slotHasPath*(
|
||||
stoPath: Hash32;
|
||||
): CoreDbRc[bool] =
|
||||
## Like `hasPath()` but with cascaded index `(accPath,slot)`.
|
||||
acc.setTrackNewApi AccSlotHasPathFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(hasPathStorage, acc.aTx, accPath, stoPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), result
|
||||
let rc = acc.aTx.hasPathStorage(accPath, stoPath).valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok(rc)
|
||||
|
||||
proc slotMerge*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -505,16 +379,10 @@ proc slotMerge*(
|
||||
stoData: UInt256;
|
||||
): CoreDbRc[void] =
|
||||
## Like `merge()` but with cascaded index `(accPath,slot)`.
|
||||
acc.setTrackNewApi AccSlotMergeFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(mergeStorageData, acc.aTx, accPath, stoPath, stoData)
|
||||
if rc.isOk:
|
||||
acc.aTx.mergeStorageData(accPath, stoPath, stoData).isOkOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok()
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), stoData, result
|
||||
|
||||
proc slotStorageRoot*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -524,15 +392,10 @@ proc slotStorageRoot*(
|
||||
## column (if available) related to the account indexed by the key
|
||||
## `accPath`.`.
|
||||
##
|
||||
acc.setTrackNewApi AccSlotStorageRootFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(fetchStorageRoot, acc.aTx, accPath)
|
||||
if rc.isOk:
|
||||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
let rc = acc.aTx.fetchStorageRoot(accPath).valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok(rc)
|
||||
|
||||
proc slotStorageEmpty*(
|
||||
acc: CoreDbTxRef;
|
||||
@ -541,30 +404,20 @@ proc slotStorageEmpty*(
|
||||
## This function returns `true` if the storage data column is empty or
|
||||
## missing.
|
||||
##
|
||||
acc.setTrackNewApi AccSlotStorageEmptyFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(hasStorageData, acc.aTx, accPath)
|
||||
if rc.isOk:
|
||||
ok(not rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
let rc = acc.aTx.hasStorageData(accPath).valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok(not rc)
|
||||
|
||||
proc slotStorageEmptyOrVoid*(
|
||||
acc: CoreDbTxRef;
|
||||
accPath: Hash32;
|
||||
): bool =
|
||||
## Convenience wrapper, returns `true` where `slotStorageEmpty()` would fail.
|
||||
acc.setTrackNewApi AccSlotStorageEmptyOrVoidFn
|
||||
result = block:
|
||||
let rc = acc.ctx.parent.ariApi.call(hasStorageData, acc.aTx, accPath)
|
||||
if rc.isOk:
|
||||
not rc.value
|
||||
else:
|
||||
true
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
let rc = acc.aTx.hasStorageData(accPath).valueOr:
|
||||
return true
|
||||
|
||||
not rc
|
||||
|
||||
# ------------- other ----------------
|
||||
|
||||
@ -577,20 +430,14 @@ proc recast*(
|
||||
## of an account statement. This conversion may fail if the storage colState
|
||||
## hash (see `slotStorageRoot()` above) is currently unavailable.
|
||||
##
|
||||
acc.setTrackNewApi AccRecastFn
|
||||
let rc = acc.ctx.parent.ariApi.call(fetchStorageRoot, acc.aTx, accPath)
|
||||
result = block:
|
||||
if rc.isOk:
|
||||
let rc = acc.aTx.fetchStorageRoot(accPath).valueOr:
|
||||
return err(error.toError(""))
|
||||
|
||||
ok Account(
|
||||
nonce: accRec.nonce,
|
||||
balance: accRec.balance,
|
||||
codeHash: accRec.codeHash,
|
||||
storageRoot: rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
let storageRoot = if rc.isOk: $$(rc.value) else: "n/a"
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), storageRoot, result
|
||||
storageRoot: rc)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public transaction related methods
|
||||
@ -599,86 +446,23 @@ proc recast*(
|
||||
proc txFrameBegin*(ctx: CoreDbCtxRef, parent: CoreDbTxRef): CoreDbTxRef =
|
||||
## Constructor
|
||||
##
|
||||
ctx.setTrackNewApi BaseNewTxFn
|
||||
let
|
||||
kTx = CoreDbKvtRef(ctx).call(txFrameBegin, ctx.kvt, if parent != nil: parent.kTx else: nil)
|
||||
aTx = CoreDbAccRef(ctx).call(txFrameBegin, ctx.mpt, if parent != nil: parent.aTx else: nil)
|
||||
kTx = ctx.kvt.txFrameBegin(if parent != nil: parent.kTx else: nil)
|
||||
aTx = ctx.mpt.txFrameBegin(if parent != nil: parent.aTx else: nil)
|
||||
|
||||
result = ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx)
|
||||
ctx.ifTrackNewApi:
|
||||
let newLevel = CoreDbAccRef(ctx).call(level, ctx.mpt)
|
||||
debug logTxt, api, elapsed, newLevel
|
||||
ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx)
|
||||
|
||||
proc checkpoint*(tx: CoreDbTxRef, blockNumber: BlockNumber) =
|
||||
tx.setTrackNewApi TxCommitFn:
|
||||
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
||||
CoreDbAccRef(tx.ctx).call(checkpoint, tx.aTx, blockNumber)
|
||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||
tx.aTx.checkpoint(blockNumber)
|
||||
|
||||
proc dispose*(tx: CoreDbTxRef) =
|
||||
tx.setTrackNewApi TxRollbackFn:
|
||||
let prvLevel {.used.} = CoreDbAccRef(tx.ctx).call(level, tx.aTx)
|
||||
CoreDbAccRef(tx.ctx).call(dispose, tx.aTx)
|
||||
CoreDbKvtRef(tx.ctx).call(dispose, tx.kTx)
|
||||
tx.aTx.dispose()
|
||||
tx.kTx.dispose()
|
||||
tx[].reset()
|
||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||
|
||||
proc txFrameBegin*(tx: CoreDbTxRef): CoreDbTxRef =
|
||||
tx.ctx.txFrameBegin(tx)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public tracer methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when CoreDbEnableCaptJournal:
|
||||
proc pushCapture*(db: CoreDbRef): CoreDbCaptRef =
|
||||
## ..
|
||||
##
|
||||
db.setTrackNewApi BasePushCaptureFn
|
||||
if db.tracerHook.isNil:
|
||||
db.tracerHook = TraceRecorderRef.init(db)
|
||||
else:
|
||||
TraceRecorderRef(db.tracerHook).push()
|
||||
result = TraceRecorderRef(db.tracerHook).topInst().CoreDbCaptRef
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
proc level*(cpt: CoreDbCaptRef): int =
|
||||
## Getter, returns the positive number of stacked instances.
|
||||
##
|
||||
let log = cpt.distinctBase
|
||||
log.db.setTrackNewApi CptLevelFn
|
||||
result = log.level()
|
||||
log.db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
proc kvtLog*(cpt: CoreDbCaptRef): seq[(seq[byte],seq[byte])] =
|
||||
## Getter, returns the `Kvt` logger list for the argument instance.
|
||||
##
|
||||
let log = cpt.distinctBase
|
||||
log.db.setTrackNewApi CptKvtLogFn
|
||||
result = log.kvtLogBlobs()
|
||||
log.db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
proc pop*(cpt: CoreDbCaptRef) =
|
||||
## Explicitely stop recording the current tracer instance and reset to
|
||||
## previous level.
|
||||
##
|
||||
let db = cpt.distinctBase.db
|
||||
db.setTrackNewApi CptPopFn
|
||||
if not cpt.distinctBase.pop():
|
||||
TraceRecorderRef(db.tracerHook).restore()
|
||||
db.tracerHook = TraceRecorderRef(nil)
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, cpt
|
||||
|
||||
proc stopCapture*(db: CoreDbRef) =
|
||||
## Discard capture instances. This function is equivalent to `pop()`-ing
|
||||
## all instances.
|
||||
##
|
||||
db.setTrackNewApi CptStopCaptureFn
|
||||
if not db.tracerHook.isNil:
|
||||
TraceRecorderRef(db.tracerHook).restore()
|
||||
db.tracerHook = TraceRecorderRef(nil)
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -1,219 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, strutils, times, typetraits],
|
||||
eth/common/[accounts, base, hashes],
|
||||
results,
|
||||
stew/byteutils,
|
||||
../../aristo/aristo_profile,
|
||||
"."/[base_config, base_desc]
|
||||
|
||||
type
|
||||
Elapsed* = distinct Duration
|
||||
## Needed for local `$` as it would be ambiguous for `Duration`
|
||||
|
||||
CoreDbApiTrackRef* =
|
||||
CoreDbRef | CoreDbKvtRef | CoreDbCtxRef | CoreDbAccRef |
|
||||
CoreDbTxRef
|
||||
|
||||
CoreDbFnInx* = enum
|
||||
## Profiling table index
|
||||
SummaryItem = "total"
|
||||
|
||||
AccClearStorageFn = "clearStorage"
|
||||
AccDeleteFn = "acc/delete"
|
||||
AccFetchFn = "acc/fetch"
|
||||
AccHasPathFn = "acc/hasPath"
|
||||
AccMergeFn = "acc/merge"
|
||||
AccProofFn = "acc/proof"
|
||||
AccRecastFn = "recast"
|
||||
AccStateFn = "acc/state"
|
||||
|
||||
AccSlotFetchFn = "slotFetch"
|
||||
AccSlotDeleteFn = "slotDelete"
|
||||
AccSlotHasPathFn = "slotHasPath"
|
||||
AccSlotMergeFn = "slotMerge"
|
||||
AccSlotProofFn = "slotProof"
|
||||
AccSlotStorageRootFn = "slotStorageRoot"
|
||||
AccSlotStorageEmptyFn = "slotStorageEmpty"
|
||||
AccSlotStorageEmptyOrVoidFn = "slotStorageEmptyOrVoid"
|
||||
AccSlotPairsIt = "slotPairs"
|
||||
|
||||
BaseFinishFn = "finish"
|
||||
BaseLevelFn = "level"
|
||||
BasePushCaptureFn = "pushCapture"
|
||||
BaseNewTxFn = "txFrameBegin"
|
||||
BasePersistFn = "persist"
|
||||
BaseStateBlockNumberFn = "stateBlockNumber"
|
||||
BaseVerifyFn = "verify"
|
||||
|
||||
CptKvtLogFn = "kvtLog"
|
||||
CptLevelFn = "level"
|
||||
CptPopFn = "pop"
|
||||
CptStopCaptureFn = "stopCapture"
|
||||
|
||||
CtxGetAccountsFn = "getAccounts"
|
||||
CtxGetGenericFn = "getGeneric"
|
||||
|
||||
KvtDelFn = "del"
|
||||
KvtGetFn = "get"
|
||||
KvtGetOrEmptyFn = "getOrEmpty"
|
||||
KvtHasKeyRcFn = "hasKeyRc"
|
||||
KvtHasKeyFn = "hasKey"
|
||||
KvtLenFn = "len"
|
||||
KvtPairsIt = "pairs"
|
||||
KvtPutFn = "put"
|
||||
|
||||
TxCommitFn = "commit"
|
||||
TxDisposeFn = "dispose"
|
||||
TxRollbackFn = "rollback"
|
||||
TxSaveDisposeFn = "safeDispose"
|
||||
|
||||
func toStr*(e: CoreDbError): string {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func oaToStr(w: openArray[byte]): string =
|
||||
w.toHex.toLowerAscii
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func toStr(w: Hash32): string =
|
||||
if w == EMPTY_ROOT_HASH: "EMPTY_ROOT_HASH" else: w.data.oaToStr
|
||||
|
||||
func toStr(ela: Duration): string =
|
||||
aristo_profile.toStr(ela)
|
||||
|
||||
func toStr*(rc: CoreDbRc[int]|CoreDbRc[UInt256]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[bool]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[void]): string =
|
||||
if rc.isOk: "ok()" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[seq[byte]]): string =
|
||||
if rc.isOk: "ok(seq[byte,#" & $rc.value.len & "])"
|
||||
else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[seq[seq[byte]]]): string =
|
||||
if rc.isOk: "ok([" & rc.value.mapIt("[#" & $it.len & "]").join(",") & "])"
|
||||
else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[(seq[seq[byte]],bool)]): string =
|
||||
if rc.isOk: "ok([" & rc.value[0].mapIt("[#" & $it.len & "]").join(",") &
|
||||
"]," & $rc.value[1] & ")"
|
||||
else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[Hash32]): string =
|
||||
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[Account]): string =
|
||||
if rc.isOk: "ok(Account)" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[CoreDbAccount]): string =
|
||||
if rc.isOk: "ok(AristoAccount)" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr[T](rc: CoreDbRc[T]; ifOk: static[string]): string =
|
||||
if rc.isOk: "ok(" & ifOk & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
func toStr(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db"
|
||||
func toStr(rc: CoreDbRc[CoreDbKvtRef]): string = rc.toStr "kvt"
|
||||
func toStr(rc: CoreDbRc[CoreDbTxRef]): string = rc.toStr "tx"
|
||||
func toStr(rc: CoreDbRc[CoreDbCtxRef]): string = rc.toStr "ctx"
|
||||
func toStr(rc: CoreDbRc[CoreDbAccRef]): string = rc.toStr "acc"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func toStr*(e: CoreDbError): string =
|
||||
result = $e.error & "("
|
||||
result &= (if e.isAristo: "Aristo" else: "Kvt")
|
||||
result &= ", ctx=" & $e.ctx & ", error="
|
||||
result &= (if e.isAristo: $e.aErr else: $e.kErr)
|
||||
result &= ")"
|
||||
|
||||
func toStr*(w: openArray[byte]): string =
|
||||
w.oaToStr
|
||||
|
||||
func toLenStr*(w: openArray[byte]): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "openArray[" & $w.len & "]"
|
||||
|
||||
func `$`*[T](rc: CoreDbRc[T]): string = rc.toStr
|
||||
func `$`*(t: Elapsed): string = t.Duration.toStr
|
||||
func `$$`*(h: Hash32): string = h.toStr # otherwise collision w/existing `$`
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public new API logging framework
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template setTrackNewApi*(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
code: untyped;
|
||||
) =
|
||||
## Template with code section that will be discarded if logging is
|
||||
## disabled at compile time when `EnableApiTracking` is `false`.
|
||||
when CoreDbEnableApiTracking:
|
||||
#w.beginNewApi(s)
|
||||
when CoreDbEnableProfiling:
|
||||
const bnaCtx {.inject.} = s # Local use only
|
||||
let bnaStart {.inject.} = getTime() # Local use only
|
||||
code
|
||||
const api {.inject,used.} = s
|
||||
|
||||
template setTrackNewApi*(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
) =
|
||||
w.setTrackNewApi(s):
|
||||
discard
|
||||
|
||||
template ifTrackNewApi*(w: CoreDbApiTrackRef; code: untyped) =
|
||||
when CoreDbEnableApiTracking:
|
||||
#w.endNewApiIf:
|
||||
# code
|
||||
block body:
|
||||
when typeof(w) is CoreDbRef:
|
||||
let db = w
|
||||
elif typeof(w) is CoreDbTxRef:
|
||||
let db = w.ctx.parent
|
||||
if w.isNil: break body
|
||||
else:
|
||||
let db = w.distinctBase.parent
|
||||
if w.distinctBase.isNil: break body
|
||||
when CoreDbEnableProfiling:
|
||||
let elapsed {.inject,used.} = (getTime() - bnaStart).Elapsed
|
||||
aristo_profile.update(db.profTab, bnaCtx.ord, elapsed.Duration)
|
||||
if db.trackCoreDbApi:
|
||||
when not CoreDbEnableProfiling: # otherwise use variable above
|
||||
let elapsed {.inject,used.} = (getTime() - bnaStart).Elapsed
|
||||
code
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func init*(T: type CoreDbProfListRef): T =
|
||||
T(list: newSeq[CoreDbProfData](1 + high(CoreDbFnInx).ord))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,97 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
# Configuration section
|
||||
const
|
||||
EnableApiTracking = false
|
||||
## When enabled, functions using this tracking facility need to import
|
||||
## `chronicles`, as well. Also, some `func` designators might need to
|
||||
## be changed to `proc` for possible side effects.
|
||||
##
|
||||
## Tracking noise is then enabled by setting the flag `trackCoreDbApi` to
|
||||
## `true` in the `CoreDbRef` descriptor.
|
||||
|
||||
AutoValidateDescriptors = defined(release).not or
|
||||
defined(unittest2DisableParamFiltering)
|
||||
## No validatinon needed for production suite.
|
||||
##
|
||||
## The `unittest2DisableParamFiltering` flag is coincidentally used by
|
||||
## unit/integration tests which makes it convenient to piggyback on that
|
||||
## for enabling debugging checks.
|
||||
|
||||
EnableApiJumpTable = defined(dbjapi_enabled) or
|
||||
defined(unittest2DisableParamFiltering)
|
||||
## This flag enables the functions jump table even if `EnableApiProfiling`
|
||||
## and `EnableCaptJournal` is set `false` in realease mode. This setting
|
||||
## should be used for debugging, only.
|
||||
##
|
||||
## The `unittest2DisableParamFiltering` flag is coincidentally used by
|
||||
## unit/integration tests which makes it convenient to piggyback on that
|
||||
## for providing API jump tables.
|
||||
|
||||
EnableProfiling = false
|
||||
## Enables profiling of the backend if the flags ` EnableApiJumpTable`
|
||||
## and `EnableApiTracking` are also set. Profiling will then be enabled
|
||||
## with the flag `trackCoreDbApi` (which also enables extra logging.)
|
||||
|
||||
EnableCaptJournal = true
|
||||
## Enables the tracer facility if the flag ` EnableApiJumpTable` is
|
||||
## also set. In that case the capture journal directives like
|
||||
## `newCapture()` will be available.
|
||||
|
||||
NoisyCaptJournal = true
|
||||
## Provide extra logging with the tracer facility if available.
|
||||
|
||||
|
||||
# Exportable constants (leave alone this section)
|
||||
const
|
||||
CoreDbEnableApiTracking* = EnableApiTracking
|
||||
|
||||
CoreDbAutoValidateDescriptors* = AutoValidateDescriptors
|
||||
|
||||
# Api jump table dependent settings:
|
||||
|
||||
CoreDbEnableApiJumpTable* = EnableApiJumpTable
|
||||
|
||||
CoreDbEnableProfiling* = EnableProfiling and CoreDbEnableApiJumpTable
|
||||
|
||||
CoreDbEnableCaptJournal* = EnableCaptJournal and CoreDbEnableApiJumpTable
|
||||
|
||||
CoreDbNoisyCaptJournal* = NoisyCaptJournal and CoreDbEnableCaptJournal
|
||||
|
||||
|
||||
# Support warning about extra compile time options. For production, non of
|
||||
# the above features should be enabled.
|
||||
import strutils
|
||||
const coreDbBaseConfigExtras* = block:
|
||||
var s: seq[string]
|
||||
when CoreDbEnableApiTracking:
|
||||
s.add "logging"
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
s.add "validate"
|
||||
when CoreDbEnableProfiling:
|
||||
s.add "profiling"
|
||||
when CoreDbEnableCaptJournal:
|
||||
when CoreDbNoisyCaptJournal:
|
||||
s.add "noisy tracer"
|
||||
else:
|
||||
s.add "tracer"
|
||||
when CoreDbEnableApiJumpTable and
|
||||
not CoreDbEnableProfiling and
|
||||
not CoreDbEnableCaptJournal:
|
||||
s.add "Api jump table"
|
||||
if s.len == 0:
|
||||
""
|
||||
else:
|
||||
"CoreDb(" & s.join(", ") & ")"
|
||||
|
||||
# End
|
@ -12,8 +12,7 @@
|
||||
|
||||
import
|
||||
results,
|
||||
"../.."/[aristo, aristo/aristo_profile, kvt],
|
||||
./base_config
|
||||
"../.."/[aristo, kvt]
|
||||
|
||||
type
|
||||
CoreDbType* = enum
|
||||
@ -26,12 +25,6 @@ const
|
||||
## List of persistent DB types (currently only a single one)
|
||||
|
||||
type
|
||||
CoreDbProfListRef* = AristoDbProfListRef
|
||||
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||
|
||||
CoreDbProfData* = AristoDbProfData
|
||||
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||
|
||||
CoreDbRc*[T] = Result[T,CoreDbError]
|
||||
|
||||
CoreDbAccount* = AristoAccount
|
||||
@ -64,31 +57,12 @@ type
|
||||
dbType*: CoreDbType ## Type of database backend
|
||||
defCtx*: CoreDbCtxRef ## Default context
|
||||
|
||||
# Optional api interface (can be re-directed/intercepted)
|
||||
ariApi*: AristoApiRef ## `Aristo` api
|
||||
kvtApi*: KvtApiRef ## `KVT` api
|
||||
|
||||
# Optional profiling and debugging stuff
|
||||
when CoreDbEnableApiTracking:
|
||||
trackLedgerApi*: bool ## Debugging, suggestion for ledger
|
||||
trackCoreDbApi*: bool ## Debugging, support
|
||||
when CoreDbEnableApiJumpTable:
|
||||
profTab*: CoreDbProfListRef ## Profiling data (if any)
|
||||
ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger
|
||||
tracerHook*: RootRef ## Debugging/tracing
|
||||
|
||||
CoreDbCtxRef* = ref object
|
||||
## Shared context for `CoreDbAccRef`, `CoreDbKvtRef`
|
||||
parent*: CoreDbRef
|
||||
mpt*: AristoDbRef ## `Aristo` database
|
||||
kvt*: KvtDbRef ## `KVT` key-value table
|
||||
|
||||
CoreDbKvtRef* = distinct CoreDbCtxRef
|
||||
## Statically initialised Key-Value pair table
|
||||
|
||||
CoreDbAccRef* = distinct CoreDbCtxRef
|
||||
## Similar to `CoreDbKvtRef`, only dealing with `Aristo` accounts
|
||||
|
||||
CoreDbTxRef* = ref object
|
||||
## Transaction descriptor
|
||||
ctx*: CoreDbCtxRef ## Context (also contains `Aristo` descriptor)
|
||||
|
@ -12,15 +12,7 @@
|
||||
|
||||
import
|
||||
"../.."/[aristo, kvt],
|
||||
"."/[base_config, base_desc]
|
||||
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
import
|
||||
./base_validate
|
||||
|
||||
when CoreDbEnableProfiling:
|
||||
import
|
||||
./api_tracking
|
||||
./base_desc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor helper
|
||||
@ -28,48 +20,22 @@ when CoreDbEnableProfiling:
|
||||
|
||||
proc bless*(db: CoreDbRef): CoreDbRef =
|
||||
## Verify descriptor
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
db.validate
|
||||
when CoreDbEnableProfiling:
|
||||
db.profTab = CoreDbProfListRef.init()
|
||||
db
|
||||
|
||||
proc bless*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
||||
ctx.parent = db
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
ctx.validate
|
||||
ctx
|
||||
|
||||
proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbTxRef): auto =
|
||||
dsc.ctx = ctx
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
dsc.validate
|
||||
dsc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public KVT helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template kvt*(dsc: CoreDbKvtRef): KvtDbRef =
|
||||
CoreDbCtxRef(dsc).kvt
|
||||
|
||||
template kvt*(tx: CoreDbTxRef): KvtDbRef =
|
||||
tx.ctx.kvt
|
||||
|
||||
template ctx*(kvt: CoreDbKvtRef): CoreDbCtxRef =
|
||||
CoreDbCtxRef(kvt)
|
||||
|
||||
# ---------------
|
||||
|
||||
template call*(api: KvtApiRef; fn: untyped; args: varargs[untyped]): untyped =
|
||||
when CoreDbEnableApiJumpTable:
|
||||
api.fn(args)
|
||||
else:
|
||||
fn(args)
|
||||
|
||||
template call*(kvt: CoreDbKvtRef; fn: untyped; args: varargs[untyped]): untyped =
|
||||
CoreDbCtxRef(kvt).parent.kvtApi.call(fn, args)
|
||||
|
||||
# ---------------
|
||||
|
||||
func toError*(e: KvtError; s: string; error = Unspecified): CoreDbError =
|
||||
@ -83,30 +49,9 @@ func toError*(e: KvtError; s: string; error = Unspecified): CoreDbError =
|
||||
# Public Aristo helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template mpt*(dsc: CoreDbAccRef): AristoDbRef =
|
||||
CoreDbCtxRef(dsc).mpt
|
||||
|
||||
template mpt*(tx: CoreDbTxRef): AristoDbRef =
|
||||
tx.ctx.mpt
|
||||
|
||||
template ctx*(acc: CoreDbAccRef): CoreDbCtxRef =
|
||||
CoreDbCtxRef(acc)
|
||||
|
||||
# ---------------
|
||||
|
||||
template call*(api: AristoApiRef; fn: untyped; args: varargs[untyped]): untyped =
|
||||
when CoreDbEnableApiJumpTable:
|
||||
api.fn(args)
|
||||
else:
|
||||
fn(args)
|
||||
|
||||
template call*(
|
||||
acc: CoreDbAccRef;
|
||||
fn: untyped;
|
||||
args: varargs[untyped];
|
||||
): untyped =
|
||||
CoreDbCtxRef(acc).parent.ariApi.call(fn, args)
|
||||
|
||||
# ---------------
|
||||
|
||||
func toError*(e: AristoError; s: string; error = Unspecified): CoreDbError =
|
||||
|
@ -1,57 +0,0 @@
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../../aristo,
|
||||
./base_desc
|
||||
|
||||
type
|
||||
ValidateSubDesc* = CoreDbCtxRef | CoreDbTxRef # | CoreDbCaptRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc validateSubDescRef(ctx: CoreDbCtxRef) =
|
||||
doAssert not ctx.isNil
|
||||
doAssert not ctx.parent.isNil
|
||||
doAssert not ctx.mpt.isNil
|
||||
doAssert not ctx.kvt.isNil
|
||||
|
||||
proc validateSubDescRef(tx: CoreDbTxRef) =
|
||||
doAssert not tx.isNil
|
||||
doAssert not tx.ctx.isNil
|
||||
doAssert not tx.aTx.isNil
|
||||
doAssert not tx.kTx.isNil
|
||||
|
||||
when false: # currently disabled
|
||||
proc validateSubDescRef(cpt: CoreDbCaptRef) =
|
||||
doAssert not cpt.isNil
|
||||
doAssert not cpt.parent.isNil
|
||||
doAssert not cpt.methods.recorderFn.isNil
|
||||
doAssert not cpt.methods.getFlagsFn.isNil
|
||||
doAssert not cpt.methods.forgetFn.isNil
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc validate*(dsc: ValidateSubDesc) =
|
||||
dsc.validateSubDescRef
|
||||
|
||||
proc validate*(db: CoreDbRef) =
|
||||
doAssert not db.isNil
|
||||
doAssert db.dbType != CoreDbType(0)
|
||||
db.defCtx.validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -14,42 +14,20 @@ import
|
||||
stint,
|
||||
eth/common/hashes,
|
||||
../aristo as use_ari,
|
||||
./base/[api_tracking, base_config, base_desc]
|
||||
./base/base_desc
|
||||
|
||||
export stint, hashes
|
||||
|
||||
when CoreDbEnableApiJumpTable:
|
||||
discard
|
||||
else:
|
||||
import
|
||||
import
|
||||
../aristo/[aristo_desc, aristo_path]
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
import
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "core_db"
|
||||
const
|
||||
logTxt = "API"
|
||||
|
||||
# ---------------
|
||||
|
||||
template call(api: AristoApiRef; fn: untyped; args: varargs[untyped]): untyped =
|
||||
when CoreDbEnableApiJumpTable:
|
||||
api.fn(args)
|
||||
else:
|
||||
fn(args)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator slotPairs*(acc: CoreDbTxRef; accPath: Hash32): (seq[byte], UInt256) =
|
||||
acc.setTrackNewApi AccSlotPairsIt
|
||||
for (path,data) in acc.aTx.rightPairsStorage accPath:
|
||||
yield (acc.ctx.parent.ariApi.call(pathAsBlob, path), data)
|
||||
acc.ifTrackNewApi:
|
||||
debug logTxt, api, elapsed
|
||||
yield (pathAsBlob(path), data)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -13,7 +13,6 @@
|
||||
import
|
||||
../aristo,
|
||||
./backend/aristo_db,
|
||||
./base/base_config,
|
||||
"."/[base_iterators, core_apps]
|
||||
|
||||
import
|
||||
@ -22,7 +21,6 @@ import
|
||||
export
|
||||
EmptyBlob,
|
||||
base,
|
||||
base_config,
|
||||
base_iterators,
|
||||
core_apps
|
||||
|
||||
|
@ -14,9 +14,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
kvt/[kvt_api, kvt_constants]
|
||||
kvt/kvt_constants
|
||||
export
|
||||
kvt_api, kvt_constants
|
||||
kvt_constants
|
||||
|
||||
import
|
||||
kvt/kvt_init/memory_only
|
||||
@ -28,7 +28,6 @@ export
|
||||
import
|
||||
kvt/kvt_desc
|
||||
export
|
||||
KvtDbAction,
|
||||
KvtDbRef,
|
||||
KvtError,
|
||||
KvtTxRef,
|
||||
|
@ -1,265 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2024-2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Stackable API for `Kvt`
|
||||
## =======================
|
||||
|
||||
import
|
||||
std/times,
|
||||
results,
|
||||
../aristo/aristo_profile,
|
||||
./kvt_desc/desc_backend,
|
||||
./kvt_init/memory_db,
|
||||
./kvt_init/memory_only,
|
||||
"."/[kvt_desc, kvt_persist, kvt_tx_frame, kvt_utils]
|
||||
|
||||
const
|
||||
AutoValidateApiHooks = defined(release).not
|
||||
## No validatinon needed for production suite.
|
||||
|
||||
KvtPersistentBackendOk = AutoValidateApiHooks # and false
|
||||
## Set true for persistent backend profiling (which needs an extra
|
||||
## link library.)
|
||||
|
||||
when KvtPersistentBackendOk:
|
||||
import ./kvt_init/rocks_db
|
||||
|
||||
# Annotation helper(s)
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
|
||||
type
|
||||
KvtDbProfListRef* = AristoDbProfListRef
|
||||
## Borrowed from `aristo_profile`
|
||||
|
||||
KvtDbProfData* = AristoDbProfData
|
||||
## Borrowed from `aristo_profile`
|
||||
|
||||
KvtApiDelFn* = proc(db: KvtTxRef,
|
||||
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||
KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.}
|
||||
KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
||||
KvtApiGetFn* = proc(db: KvtTxRef,
|
||||
key: openArray[byte]): Result[seq[byte],KvtError] {.noRaise.}
|
||||
KvtApiLenFn* = proc(db: KvtTxRef,
|
||||
key: openArray[byte]): Result[int,KvtError] {.noRaise.}
|
||||
KvtApiHasKeyRcFn* = proc(db: KvtTxRef,
|
||||
key: openArray[byte]): Result[bool,KvtError] {.noRaise.}
|
||||
KvtApiPutFn* = proc(db: KvtTxRef,
|
||||
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||
KvtApiDisposeFn* = proc(tx: KvtTxRef) {.noRaise.}
|
||||
KvtApiPersistFn* = proc(db: KvtDbRef, batch: PutHdlRef, txFrame: KvtTxRef) {.noRaise.}
|
||||
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
|
||||
KvtApiTxFrameBeginFn* = proc(db: KvtDbRef, parent: KvtTxRef): KvtTxRef {.noRaise.}
|
||||
KvtApiBaseTxFrameFn* = proc(db: KvtDbRef): KvtTxRef {.noRaise.}
|
||||
|
||||
KvtApiRef* = ref KvtApiObj
|
||||
KvtApiObj* = object of RootObj
|
||||
## Useful set of `Kvt` fuctions that can be filtered, stacked etc. Note
|
||||
## that this API is modelled after a subset of the `Aristo` API.
|
||||
del*: KvtApiDelFn
|
||||
finish*: KvtApiFinishFn
|
||||
get*: KvtApiGetFn
|
||||
len*: KvtApiLenFn
|
||||
hasKeyRc*: KvtApiHasKeyRcFn
|
||||
put*: KvtApiPutFn
|
||||
dispose*: KvtApiDisposeFn
|
||||
persist*: KvtApiPersistFn
|
||||
txFrameBegin*: KvtApiTxFrameBeginFn
|
||||
baseTxFrame*: KvtApiBaseTxFrameFn
|
||||
|
||||
|
||||
KvtApiProfNames* = enum
|
||||
## index/name mapping for profile slots
|
||||
KvtApiProfTotal = "total"
|
||||
|
||||
KvtApiProfDelFn = "del"
|
||||
KvtApiProfFinishFn = "finish"
|
||||
KvtApiProfGetFn = "get"
|
||||
KvtApiProfLenFn = "len"
|
||||
KvtApiProfHasKeyRcFn = "hasKeyRc"
|
||||
KvtApiProfPutFn = "put"
|
||||
KvtApiProfDisposeFn = "dispose"
|
||||
KvtApiProfPersistFn = "persist"
|
||||
KvtApiProfTxFrameBeginFn = "txFrameBegin"
|
||||
KvtApiProfBaseTxFrameFn = "baseTxFrame"
|
||||
|
||||
KvtApiProfBeGetKvpFn = "be/getKvp"
|
||||
KvtApiProfBeLenKvpFn = "be/lenKvp"
|
||||
KvtApiProfBePutKvpFn = "be/putKvp"
|
||||
KvtApiProfBePutEndFn = "be/putEnd"
|
||||
|
||||
KvtApiProfRef* = ref object of KvtApiRef
|
||||
## Profiling API extension of `KvtApiObj`
|
||||
data*: KvtDbProfListRef
|
||||
be*: BackendRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when AutoValidateApiHooks:
|
||||
proc validate(api: KvtApiObj) =
|
||||
for _, field in api.fieldPairs:
|
||||
doAssert not field.isNil
|
||||
|
||||
proc validate(prf: KvtApiProfRef) =
|
||||
prf.KvtApiRef[].validate
|
||||
doAssert not prf.data.isNil
|
||||
|
||||
proc dup(be: BackendRef): BackendRef =
|
||||
case be.kind:
|
||||
of BackendMemory:
|
||||
return MemBackendRef(be).dup
|
||||
|
||||
of BackendRocksDB:
|
||||
when KvtPersistentBackendOk:
|
||||
return RdbBackendRef(be).dup
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API constuctors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func init*(api: var KvtApiObj) =
|
||||
when AutoValidateApiHooks:
|
||||
api.reset
|
||||
api.del = del
|
||||
api.finish = finish
|
||||
api.get = get
|
||||
api.len = len
|
||||
api.hasKeyRc = hasKeyRc
|
||||
api.put = put
|
||||
api.dispose = dispose
|
||||
api.persist = persist
|
||||
api.txFrameBegin = txFrameBegin
|
||||
api.baseTxFrame = baseTxFrame
|
||||
|
||||
when AutoValidateApiHooks:
|
||||
api.validate
|
||||
|
||||
func init*(T: type KvtApiRef): T =
|
||||
result = new T
|
||||
result[].init()
|
||||
|
||||
func dup*(api: KvtApiRef): KvtApiRef =
|
||||
result = KvtApiRef()
|
||||
result[] = api[]
|
||||
when AutoValidateApiHooks:
|
||||
result[].validate
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public profile API constuctor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func init*(
|
||||
T: type KvtApiProfRef;
|
||||
api: KvtApiRef;
|
||||
be = BackendRef(nil);
|
||||
): T =
|
||||
## This constructor creates a profiling API descriptor to be derived from
|
||||
## an initialised `api` argument descriptor. For profiling the DB backend,
|
||||
## the field `.be` of the result descriptor must be assigned to the
|
||||
## `.backend` field of the `KvtDbRef` descriptor.
|
||||
##
|
||||
## The argument desctiptors `api` and `be` will not be modified and can be
|
||||
## used to restore the previous set up.
|
||||
##
|
||||
let
|
||||
data = KvtDbProfListRef(
|
||||
list: newSeq[KvtDbProfData](1 + high(KvtApiProfNames).ord))
|
||||
profApi = T(data: data)
|
||||
|
||||
template profileRunner(n: KvtApiProfNames, code: untyped): untyped =
|
||||
let start = getTime()
|
||||
code
|
||||
data.update(n.ord, getTime() - start)
|
||||
|
||||
profApi.del =
|
||||
proc(a: KvtDbRef; b: openArray[byte]): auto =
|
||||
KvtApiProfDelFn.profileRunner:
|
||||
result = api.del(a, b)
|
||||
|
||||
profApi.finish =
|
||||
proc(a: KvtDbRef; b = false) =
|
||||
KvtApiProfFinishFn.profileRunner:
|
||||
api.finish(a, b)
|
||||
|
||||
profApi.get =
|
||||
proc(a: KvtDbRef, b: openArray[byte]): auto =
|
||||
KvtApiProfGetFn.profileRunner:
|
||||
result = api.get(a, b)
|
||||
|
||||
profApi.len =
|
||||
proc(a: KvtDbRef, b: openArray[byte]): auto =
|
||||
KvtApiProfLenFn.profileRunner:
|
||||
result = api.len(a, b)
|
||||
|
||||
profApi.hasKeyRc =
|
||||
proc(a: KvtDbRef, b: openArray[byte]): auto =
|
||||
KvtApiProfHasKeyRcFn.profileRunner:
|
||||
result = api.hasKeyRc(a, b)
|
||||
|
||||
profApi.put =
|
||||
proc(a: KvtDbRef; b, c: openArray[byte]): auto =
|
||||
KvtApiProfPutFn.profileRunner:
|
||||
result = api.put(a, b, c)
|
||||
|
||||
profApi.dispose =
|
||||
proc(a: KvtTxRef): auto =
|
||||
KvtApiProfDisposeFn.profileRunner:
|
||||
result = api.dispose(a)
|
||||
|
||||
profApi.persist =
|
||||
proc(a: KvtDbRef): auto =
|
||||
KvtApiProfPersistFn.profileRunner:
|
||||
result = api.persist(a)
|
||||
|
||||
profApi.txFrameBegin =
|
||||
proc(a: KvtDbRef) =
|
||||
KvtApiProfTxFrameBeginFn.profileRunner:
|
||||
api.txFrameBegin(a)
|
||||
|
||||
let beDup = be.dup()
|
||||
if beDup.isNil:
|
||||
profApi.be = be
|
||||
|
||||
else:
|
||||
beDup.getKvpFn =
|
||||
proc(a: openArray[byte]): auto =
|
||||
KvtApiProfBeGetKvpFn.profileRunner:
|
||||
result = be.getKvpFn(a)
|
||||
data.list[KvtApiProfBeGetKvpFn.ord].masked = true
|
||||
|
||||
beDup.lenKvpFn =
|
||||
proc(a: openArray[byte]): auto =
|
||||
KvtApiProfBeLenKvpFn.profileRunner:
|
||||
result = be.lenKvpFn(a)
|
||||
data.list[KvtApiProfBeLenKvpFn.ord].masked = true
|
||||
|
||||
beDup.putKvpFn =
|
||||
proc(a: PutHdlRef; b, c: openArray[byte]) =
|
||||
KvtApiProfBePutKvpFn.profileRunner:
|
||||
be.putKvpFn(a, b, c)
|
||||
data.list[KvtApiProfBePutKvpFn.ord].masked = true
|
||||
|
||||
beDup.putEndFn =
|
||||
proc(a: PutHdlRef): auto =
|
||||
KvtApiProfBePutEndFn.profileRunner:
|
||||
result = be.putEndFn(a)
|
||||
data.list[KvtApiProfBePutEndFn.ord].masked = true
|
||||
|
||||
profApi.be = beDup
|
||||
|
||||
when AutoValidateApiHooks:
|
||||
profApi.validate
|
||||
|
||||
profApi
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -45,8 +45,6 @@ type
|
||||
xMap*: Table[seq[byte],uint64] ## For pretty printing
|
||||
pAmx*: Table[uint64,seq[byte]] ## For pretty printing
|
||||
|
||||
KvtDbAction* = proc(db: KvtDbRef) {.gcsafe, raises: [].}
|
||||
## Generic call back function/closure.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
|
@ -26,15 +26,11 @@ import
|
||||
./transaction,
|
||||
./utils/utils
|
||||
|
||||
when not CoreDbEnableCaptJournal:
|
||||
{.error: "Compiler flag missing for tracer, try -d:dbjapi_enabled".}
|
||||
|
||||
type
|
||||
CaptCtxRef = ref object
|
||||
db: CoreDbRef # not `nil`
|
||||
root: common.Hash32
|
||||
ctx: CoreDbCtxRef # not `nil`
|
||||
cpt: CoreDbCaptRef # not `nil`
|
||||
restore: CoreDbCtxRef # `nil` unless `ctx` activated
|
||||
|
||||
const
|
||||
@ -44,7 +40,6 @@ const
|
||||
uncleName = "uncle"
|
||||
internalTxName = "internalTx"
|
||||
|
||||
proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) {.gcsafe.}
|
||||
proc toJson*(receipts: seq[Receipt]): JsonNode {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -65,7 +60,7 @@ proc init(
|
||||
else:
|
||||
{.warning: "TODO make a temporary context? newCtxByKey has been obsoleted".}
|
||||
com.db.ctx
|
||||
T(db: com.db, root: root, cpt: com.db.pushCapture(), ctx: ctx)
|
||||
T(db: com.db, root: root, ctx: ctx)
|
||||
|
||||
proc init(
|
||||
T: type CaptCtxRef;
|
||||
@ -90,8 +85,6 @@ proc release(cc: CaptCtxRef) =
|
||||
# doAssert ctx == cc.ctx
|
||||
if true:
|
||||
raiseAssert "TODO release context"
|
||||
# cc.ctx.forget() # dispose
|
||||
cc.cpt.pop() # discard top layer of actions tracer
|
||||
|
||||
# -------------------
|
||||
|
||||
@ -232,15 +225,9 @@ proc traceTransactionImpl(
|
||||
if TracerFlags.DisableStateDiff notin tracerFlags:
|
||||
result["stateDiff"] = stateDiff
|
||||
|
||||
# now we dump captured state db
|
||||
if TracerFlags.DisableState notin tracerFlags:
|
||||
result.dumpMemoryDB(cx.cpt)
|
||||
|
||||
|
||||
proc dumpBlockStateImpl(
|
||||
com: CommonRef;
|
||||
blk: EthBlock;
|
||||
dumpState = false;
|
||||
): JsonNode =
|
||||
template header: Header = blk.header
|
||||
|
||||
@ -301,10 +288,6 @@ proc dumpBlockStateImpl(
|
||||
|
||||
result = %{"before": before, "after": after}
|
||||
|
||||
if dumpState:
|
||||
result.dumpMemoryDB(cc.cpt)
|
||||
|
||||
|
||||
proc traceBlockImpl(
|
||||
com: CommonRef;
|
||||
blk: EthBlock;
|
||||
@ -339,9 +322,6 @@ proc traceBlockImpl(
|
||||
result = tracerInst.getTracingResult()
|
||||
result["gas"] = %gasUsed
|
||||
|
||||
if TracerFlags.DisableState notin tracerFlags:
|
||||
result.dumpMemoryDB(cc.cpt)
|
||||
|
||||
proc traceTransactionsImpl(
|
||||
com: CommonRef;
|
||||
header: Header;
|
||||
@ -368,12 +348,6 @@ proc toJson*(receipts: seq[Receipt]): JsonNode =
|
||||
for receipt in receipts:
|
||||
result.add receipt.toJson
|
||||
|
||||
proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) =
|
||||
var n = newJObject()
|
||||
for (k,v) in cpt.kvtLog:
|
||||
n[k.toHex(false)] = %v
|
||||
node["state"] = n
|
||||
|
||||
proc dumpReceipts*(chainDB: CoreDbTxRef, header: Header): JsonNode =
|
||||
chainDB.dumpReceiptsImpl header
|
||||
|
||||
@ -389,9 +363,8 @@ proc traceTransaction*(
|
||||
proc dumpBlockState*(
|
||||
com: CommonRef;
|
||||
blk: EthBlock;
|
||||
dumpState = false;
|
||||
): JsonNode =
|
||||
com.dumpBlockStateImpl(blk, dumpState)
|
||||
com.dumpBlockStateImpl(blk)
|
||||
|
||||
proc traceTransactions*(
|
||||
com: CommonRef;
|
||||
|
@ -15,4 +15,3 @@
|
||||
|
||||
-d:"chronicles_disable_thread_id"
|
||||
-d:"chronicles_runtime_filtering=on"
|
||||
-d:dbjapi_enabled
|
@ -19,13 +19,12 @@ import
|
||||
aristo_delete,
|
||||
aristo_desc,
|
||||
aristo_fetch,
|
||||
aristo_hike,
|
||||
aristo_init,
|
||||
aristo_tx_frame,
|
||||
aristo_init/init_common,
|
||||
aristo_init/memory_db,
|
||||
aristo_layers,
|
||||
aristo_merge,
|
||||
aristo_persist,
|
||||
aristo_tx_frame,
|
||||
aristo_persist
|
||||
]
|
||||
|
||||
proc makeAccount(i: uint64): (Hash32, AristoAccount) =
|
||||
|
@ -185,9 +185,6 @@ proc initRunnerDB(
|
||||
pruneHistory = pruneHistory)
|
||||
|
||||
setErrorLevel()
|
||||
when CoreDbEnableApiTracking:
|
||||
coreDB.trackCoreDbApi = false
|
||||
coreDB.trackLedgerApi = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runners: accounts and accounts storages
|
||||
@ -240,11 +237,6 @@ proc chainSyncRunner(
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks
|
||||
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
if noisy:
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
check noisy.test_chainSync(filePaths, com, numBlocks,
|
||||
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
|
||||
oldLogAlign=oldLogAlign)
|
||||
@ -293,11 +285,6 @@ proc persistentSyncPreLoadAndResumeRunner(
|
||||
com.db.finish(eradicate = finalDiskCleanUpOk)
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint firstPart
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
if noisy:
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
check noisy.test_chainSync(filePaths, com, firstPart,
|
||||
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
|
||||
oldLogAlign=oldLogAlign)
|
||||
@ -310,11 +297,6 @@ proc persistentSyncPreLoadAndResumeRunner(
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint secndPart
|
||||
if finalDiskCleanUpOk: dbDir.flushDbDir
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
if noisy:
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
check noisy.test_chainSync(filePaths, com, secndPart,
|
||||
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
|
||||
oldLogAlign=oldLogAlign)
|
||||
|
@ -19,19 +19,6 @@ import
|
||||
../replay/[pp, undump_blocks, undump_blocks_era1, xcheck],
|
||||
./test_helpers
|
||||
|
||||
when CoreDbEnableProfiling:
|
||||
import
|
||||
std/sequtils
|
||||
|
||||
when CoreDbEnableProfiling:
|
||||
import
|
||||
../../execution_chain/db/aristo/[aristo_api, aristo_profile],
|
||||
../../execution_chain/db/kvt/kvt_api
|
||||
var
|
||||
aristoProfData: AristoDbProfListRef
|
||||
kvtProfData: KvtDbProfListRef
|
||||
cdbProfData: CoreDbProfListRef
|
||||
|
||||
const
|
||||
EnableExtraLoggingControl = true
|
||||
var
|
||||
@ -63,16 +50,10 @@ template initLogging(noisy: bool, com: CommonRef) =
|
||||
debug "start undumping into persistent blocks"
|
||||
logStartTime = Time()
|
||||
setErrorLevel()
|
||||
when CoreDbEnableApiTracking:
|
||||
logSavedEnv = (com.db.trackCoreDbApi, com.db.trackLedgerApi)
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
proc finishLogging(com: CommonRef) =
|
||||
when EnableExtraLoggingControl:
|
||||
setErrorLevel()
|
||||
when CoreDbEnableApiTracking:
|
||||
(com.db.trackCoreDbApi, com.db.trackLedgerApi) = logSavedEnv
|
||||
|
||||
|
||||
template startLogging(noisy: bool; num: BlockNumber) =
|
||||
@ -118,19 +99,6 @@ proc test_chainSyncProfilingPrint*(
|
||||
else: ""
|
||||
discard info
|
||||
var blurb: seq[string]
|
||||
when CoreDbEnableProfiling:
|
||||
blurb.add cdbProfData.profilingPrinter(
|
||||
names = CoreDbFnInx.toSeq.mapIt($it),
|
||||
header = "CoreDb profiling results" & info,
|
||||
indent)
|
||||
blurb.add aristoProfData.profilingPrinter(
|
||||
names = AristoApiProfNames.toSeq.mapIt($it),
|
||||
header = "Aristo backend profiling results" & info,
|
||||
indent)
|
||||
blurb.add kvtProfData.profilingPrinter(
|
||||
names = KvtApiProfNames.toSeq.mapIt($it),
|
||||
header = "Kvt backend profiling results" & info,
|
||||
indent)
|
||||
for s in blurb:
|
||||
if 0 < s.len: true.say "***", s, "\n"
|
||||
|
||||
@ -173,14 +141,6 @@ proc test_chainSync*(
|
||||
noisy.say "***", "stop: sample exhausted"
|
||||
return true
|
||||
|
||||
# Profile variables will be non-nil if profiling is available. The profiling
|
||||
# API data need to be captured so it will be available after the services
|
||||
# have terminated.
|
||||
when CoreDbEnableProfiling:
|
||||
aristoProfData = com.db.ariApi.AristoApiProfRef.data
|
||||
kvtProfData = com.db.kvtApi.KvtApiProfRef.data
|
||||
cdbProfData = com.db.profTab
|
||||
|
||||
# This will enable printing the `era1` covered block ranges (if any)
|
||||
undump_blocks_era1.noisy = noisy
|
||||
|
||||
@ -226,9 +186,6 @@ proc test_chainSync*(
|
||||
if noisy:
|
||||
noisy.whisper "***", "Re-run with logging enabled...\n"
|
||||
setTraceLevel()
|
||||
when CoreDbEnableApiTracking:
|
||||
com.db.trackCoreDbApi = false
|
||||
com.db.trackLedgerApi = false
|
||||
discard chain.persistBlocks(w)
|
||||
blocks += w.len
|
||||
continue
|
||||
|
@ -10,13 +10,12 @@
|
||||
|
||||
import
|
||||
std/[json, os, tables, strutils],
|
||||
stew/byteutils,
|
||||
chronicles,
|
||||
unittest2,
|
||||
results,
|
||||
./test_helpers,
|
||||
../execution_chain/db/aristo,
|
||||
../execution_chain/db/aristo/[aristo_desc, aristo_part],
|
||||
../execution_chain/db/aristo/[aristo_desc],
|
||||
../execution_chain/db/aristo/aristo_part/part_debug,
|
||||
../execution_chain/db/kvt/kvt_utils,
|
||||
../execution_chain/[tracer, evm/types],
|
||||
|
Loading…
x
Reference in New Issue
Block a user