Coredb use stackable api for aristo backend (#2060)

* Aristo/Kvt: Provide function hooks APIs

why:
  These APIs can be used for installing tracers, profiling functoinality,
  and other niceties on the databases.

* Aristo: Provide optional API profiling

details:
  It basically is a re-implementation of the `CoreDb` profiling
  implementation

* Kvt: Provide optional API profiling similar to `Aristo`

* CoreDb: Re-implementing profiling using `aristo_profile`

* Ledger: Re-implementing profiling using `aristo_profile`

* CoreDb: Update unit tests for maintainability

* update copyright dates
This commit is contained in:
Jordan Hrycaj 2024-02-29 21:10:24 +00:00 committed by GitHub
parent 7089226d43
commit 587ca3abbe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1678 additions and 689 deletions

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -13,31 +13,26 @@
##
{.push raises: [].}
import aristo/[
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
aristo_walk]
import
aristo/[aristo_api, aristo_constants, aristo_sign]
export
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
aristo_walk
aristo_api, aristo_constants, aristo_sign
import
aristo/aristo_get
aristo/aristo_init
export
getKeyRc
MemBackendRef,
VoidBackendRef,
init
import
aristo/aristo_hashify
aristo/aristo_nearby
export
hashify
leftPairs, # iterators
rightPairs
import
aristo/aristo_path
export
pathAsBlob
import aristo/aristo_desc/[desc_identifiers, desc_structural]
aristo/aristo_desc/[desc_identifiers, desc_structural]
export
AristoAccount,
PayloadRef,
@ -53,7 +48,7 @@ export
AristoError,
AristoTxRef,
MerkleSignRef,
forget,
QidLayoutRef,
isValid
# End

View File

@ -0,0 +1,623 @@
# nimbus-eth1
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Stackable API for `Aristo`
## ==========================
import
std/times,
eth/[common, trie/nibbles],
results,
"."/[aristo_delete, aristo_desc, aristo_desc/desc_backend, aristo_fetch,
aristo_get, aristo_hashify, aristo_hike, aristo_init, aristo_merge,
aristo_path, aristo_profile, aristo_serialise, aristo_tx, aristo_vid]
export
AristoDbProfListRef
# Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].}
type
AristoApiCommitFn* =
proc(tx: AristoTxRef;
): Result[void,AristoError]
{.noRaise.}
## Given a *top level* handle, this function accepts all database
## operations performed through this handle and merges it to the
## previous layer. The previous transaction is returned if there
## was any.
AristoApiDeleteFn* =
proc(db: AristoDbRef;
root: VertexID;
path: openArray[byte];
accPath: PathID;
): Result[bool,(VertexID,AristoError)]
{.noRaise.}
## Delete a leaf with path `path` starting at root `root`.
##
## For a `root` with `VertexID` greater than `LEAST_FREE_VID`, the
## sub-tree generated by `payload.root` is considered a storage trie
## linked to an account leaf referred to by a valid `accPath` (i.e.
## different from `VOID_PATH_ID`.) In that case, an account must
## exists. If there is payload of type `AccountData`, its `storageID`
## field must be unset or equal to the `root` vertex ID.
##
## The return code is `true` iff the trie has become empty.
AristoApiDelTreeFn* =
proc(db: AristoDbRef;
root: VertexID;
accPath: PathID;
): Result[void,(VertexID,AristoError)]
{.noRaise.}
## Delete sub-trie below `root`. The maximum supported sub-tree size
## is `SUB_TREE_DISPOSAL_MAX`. Larger tries must be disposed by
## walk-deleting leaf nodes using `left()` or `right()` traversal
## functions.
##
## For a `root` argument greater than `LEAST_FREE_VID`, the sub-tree
## spanned by `root` is considered a storage trie linked to an account
## leaf referred to by a valid `accPath` (i.e. different from
## `VOID_PATH_ID`.) In that case, an account must exists. If there is
## payload of type `AccountData`, its `storageID` field must be unset
## or equal to the `hike.root` vertex ID.
AristoApiFetchPayloadFn* =
proc(db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[PayloadRef,(VertexID,AristoError)]
{.noRaise.}
## Cascaded attempt to traverse the `Aristo Trie` and fetch the value
## of a leaf vertex. This function is complementary to `mergePayload()`.
AristoApiFinishFn* =
proc(db: AristoDbRef;
flush = false;
) {.noRaise.}
## Backend destructor. The argument `flush` indicates that a full
## database deletion is requested. If set `false` the outcome might
## differ depending on the type of backend (e.g. the `BackendMemory`
## backend will always flush on close.)
##
## In case of distributed descriptors accessing the same backend, all
## distributed descriptors will be destroyed.
##
## This distructor may be used on already *destructed* descriptors.
AristoApiForgetFn* =
proc(db: AristoDbRef;
): Result[void,AristoError]
{.noRaise.}
## Destruct the non centre argument `db` descriptor (see comments on
## `reCentre()` for details.)
##
## A non centre descriptor should always be destructed after use (see
## also# comments on `fork()`.)
AristoApiForkFn* =
proc(db: AristoDbRef;
rawTopLayer = false;
): Result[AristoDbRef,AristoError]
{.noRaise.}
## This function creates a new empty descriptor accessing the same
## backend (if any) database as the argument `db`. This new descriptor
## joins the list of descriptors accessing the same backend database.
##
## After use, any unused non centre descriptor should be destructed
## via `forget()`. Not doing so will not only hold memory ressources
## but might also cost computing ressources for maintaining and
## updating backend filters when writing to the backend database .
##
## If the argument `rawTopLayer` is set `true` the function will
## provide an uninitalised and inconsistent (!) top layer. This
## setting avoids some database lookup for cases where the top layer
## is redefined anyway.
AristoApiForkTopFn* =
proc(db: AristoDbRef;
dontHashify = false;
): Result[AristoDbRef,AristoError]
{.noRaise.}
## Clone a top transaction into a new DB descriptor accessing the same
## backend database (if any) as the argument `db`. The new descriptor
## is linked to the transaction parent and is fully functional as a
## forked instance (see comments on `aristo_desc.reCentre()` for
## details.) If there is no active transaction, the top layer state
## is cloned.
##
## Input situation:
## ::
## tx -> db0 with tx is top transaction, tx.level > 0
##
## Output situation:
## ::
## tx -> db0 \
## > share the same backend
## tx1 -> db1 /
##
## where `tx.level > 0`, `db1.level == 1` and `db1` is returned. The
## transaction `tx1` can be retrieved via `db1.txTop()`.
##
## The new DB descriptor will contain a copy of the argument transaction
## `tx` as top layer of level 1 (i.e. this is he only transaction.)
## Rolling back will end up at the backend layer (incl. backend filter.)
##
## If the arguent flag `dontHashify` is passed `true`, the clone
## descriptor will *NOT* be hashified right after construction.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
AristoApiGetKeyRcFn* =
proc(db: AristoDbRef;
vid: VertexID;
): Result[HashKey,AristoError]
{.noRaise.}
## Cascaded attempt to fetch a Merkle hash from the cache layers or
## the backend (if available.)
AristoApiHashifyFn* =
proc(db: AristoDbRef;
): Result[void,(VertexID,AristoError)]
{.noRaise.}
## Add keys to the `Patricia Trie` so that it becomes a `Merkle
## Patricia Tree`.
AristoApiHasPathFn* =
proc(db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[bool,(VertexID,AristoError)]
{.noRaise.}
## Variant of `fetchPayload()` without returning data. It returns
## `true` iff the database `db` contains a leaf item with the argument
## path.
AristoApiHikeUpFn* =
proc(path: NibblesSeq;
root: VertexID;
db: AristoDbRef;
): Result[Hike,(VertexID,AristoError,Hike)]
{.noRaise.}
## For the argument `path`, find and return the logest possible path
## in the argument database `db`.
AristoApiIsTopFn* =
proc(tx: AristoTxRef;
): bool
{.noRaise.}
## Getter, returns `true` if the argument `tx` referes to the current
## top level transaction.
AristoApiLevelFn* =
proc(db: AristoDbRef;
): int
{.noRaise.}
## Getter, non-negative nesting level (i.e. number of pending
## transactions)
AristoApiNForkedFn* =
proc(db: AristoDbRef;
): int
{.noRaise.}
## Returns the number of non centre descriptors (see comments on
## `reCentre()` for details.) This function is a fast version of
## `db.forked.toSeq.len`.
AristoApiMergeFn* =
proc(db: AristoDbRef;
root: VertexID;
path: openArray[byte];
data: openArray[byte];
accPath: PathID;
): Result[bool,AristoError]
{.noRaise.}
## Veriant of `mergePayload()` where the `data` argument will be
## converted to a `RawBlob` type `PayloadRef` value.
AristoApiMergePayloadFn* =
proc(db: AristoDbRef;
root: VertexID;
path: openArray[byte];
payload: PayloadRef;
accPath = VOID_PATH_ID;
): Result[bool,AristoError]
{.noRaise.}
## Merge the argument key-value-pair `(path,payload)` into the top level
## vertex table of the database `db`.
##
## For a `root` argument with `VertexID` greater than `LEAST_FREE_VID`,
## the sub-tree generated by `payload.root` is considered a storage trie
## linked to an account leaf referred to by a valid `accPath` (i.e.
## different from `VOID_PATH_ID`.) In that case, an account must exists.
## If there is payload of type `AccountData`, its `storageID` field must
## be unset or equal to the `payload.root` vertex ID.
AristoApiPathAsBlobFn* =
proc(tag: PathID;
): Blob
{.noRaise.}
## Converts the `tag` argument to a sequence of an even number of
## nibbles represented by a `Blob`. If the argument `tag` represents
## an odd number of nibbles, a zero nibble is appendend.
##
## This function is useful only if there is a tacit agreement that all
## paths used to index database leaf values can be represented as
## `Blob`, i.e. `PathID` type paths with an even number of nibbles.
AristoApiRollbackFn* =
proc(tx: AristoTxRef;
): Result[void,AristoError]
{.noRaise.}
## Given a *top level* handle, this function discards all database
## operations performed for this transactio. The previous transaction
## is returned if there was any.
AristoApiSerialiseFn* =
proc(db: AristoDbRef;
pyl: PayloadRef;
): Result[Blob,(VertexID,AristoError)]
{.noRaise.}
## Encode the data payload of the argument `pyl` as RLP `Blob` if
## it is of account type, otherwise pass the data as is.
AristoApiStowFn* =
proc(db: AristoDbRef;
persistent = false;
chunkedMpt = false;
): Result[void,AristoError]
{.noRaise.}
## If there is no backend while the `persistent` argument is set `true`,
## the function returns immediately with an error. The same happens if
## there is a pending transaction.
##
## The function then merges the data from the top layer cache into the
## backend stage area. After that, the top layer cache is cleared.
##
## Staging the top layer cache might fail withh a partial MPT when it
## is set up from partial MPT chunks as it happens with `snap` sync
## processing. In this case, the `chunkedMpt` argument must be set
## `true` (see alse `fwdFilter`.)
##
## If the argument `persistent` is set `true`, all the staged data are
## merged into the physical backend database and the staged data area
## is cleared.
AristoApiTxBeginFn* =
proc(db: AristoDbRef
): Result[AristoTxRef,AristoError]
{.noRaise.}
## Starts a new transaction.
##
## Example:
## ::
## proc doSomething(db: AristoDbRef) =
## let tx = db.begin
## defer: tx.rollback()
## ... continue using db ...
## tx.commit()
AristoApiTxTopFn* =
proc(db: AristoDbRef;
): Result[AristoTxRef,AristoError]
{.noRaise.}
## Getter, returns top level transaction if there is any.
AristoApiVidFetchFn* =
proc(db: AristoDbRef;
pristine = false;
): VertexID
{.noRaise.}
## Recycle or create a new `VertexID`. Reusable vertex *ID*s are kept
## in a list where the top entry *ID* has the property that any other
## *ID* larger is also not used on the database.
##
## The function prefers to return recycled vertex *ID*s if there are
## any. When the argument `pristine` is set `true`, the function
## guarantees to return a non-recycled, brand new vertex *ID* which
## is the preferred mode when creating leaf vertices.
AristoApiVidDisposeFn* =
proc(db: AristoDbRef;
vid: VertexID;
) {.noRaise.}
## Recycle the argument `vtxID` which is useful after deleting entries
## from the vertex table to prevent the `VertexID` type key values
## small.
AristoApiRef* = ref AristoApiObj
AristoApiObj* = object of RootObj
## Useful set of `Aristo` fuctions that can be filtered, stacked etc.
commit*: AristoApiCommitFn
delete*: AristoApiDeleteFn
delTree*: AristoApiDelTreeFn
fetchPayload*: AristoApiFetchPayloadFn
finish*: AristoApiFinishFn
forget*: AristoApiForgetFn
fork*: AristoApiForkFn
forkTop*: AristoApiForkTopFn
getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn
hasPath*: AristoApiHasPathFn
hikeUp*: AristoApiHikeUpFn
isTop*: AristoApiIsTopFn
level*: AristoApiLevelFn
nForked*: AristoApiNForkedFn
merge*: AristoApiMergeFn
mergePayload*: AristoApiMergePayloadFn
pathAsBlob*: AristoApiPathAsBlobFn
rollback*: AristoApiRollbackFn
serialise*: AristoApiSerialiseFn
stow*: AristoApiStowFn
txBegin*: AristoApiTxBeginFn
txTop*: AristoApiTxTopFn
vidFetch*: AristoApiVidFetchFn
vidDispose*: AristoApiVidDisposeFn
AristoApiProfNames* = enum
## Index/name mapping for profile slots
AristoApiProfTotal = "total"
AristoApiProfCommitFn = "commit"
AristoApiProfDeleteFn = "delete"
AristoApiProfDelTreeFn = "delTree"
AristoApiProfFetchPayloadFn = "fetchPayload"
AristoApiProfFinishFn = "finish"
AristoApiProfForgetFn = "forget"
AristoApiProfForkFn = "fork"
AristoApiProfForkTopFn = "forkTop"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath"
AristoApiProfHikeUpFn = "hikeUp"
AristoApiProfIsTopFn = "isTop"
AristoApiProfLevelFn = "level"
AristoApiProfNForkedFn = "nForked"
AristoApiProfMergeFn = "merge"
AristoApiProfMergePayloadFn = "mergePayload"
AristoApiProfPathAsBlobFn = "pathAsBlob"
AristoApiProfRollbackFn = "rollback"
AristoApiProfSerialiseFn = "serialise"
AristoApiProfStowFn = "stow"
AristoApiProfTxBeginFn = "txBegin"
AristoApiProfTxTopFn = "txTop"
AristoApiProfVidFetchFn = "vidFetch"
AristoApiProfVidDisposeFn = "vidDispose"
AristoApiProfBeGetVtxFn = "be/getVtx"
AristoApiProfBeGetKeyFn = "be/getKey"
AristoApiProfBePutEndFn = "be/putEnd"
AristoApiProfRef* = ref object of AristoApiRef
## Profiling API extension of `AristoApiObj`
data*: AristoDbProfListRef
be*: BackendRef
# ------------------------------------------------------------------------------
# Public API constuctors
# ------------------------------------------------------------------------------
func init*(api: var AristoApiObj) =
## Initialise an `api` argument descriptor
##
api.commit = commit
api.delete = delete
api.delTree = delTree
api.fetchPayload = fetchPayload
api.finish = finish
api.forget = forget
api.fork = fork
api.forkTop = forkTop
api.getKeyRc = getKeyRc
api.hashify = hashify
api.hasPath = hasPath
api.hikeUp = hikeUp
api.isTop = isTop
api.level = level
api.nForked = nForked
api.merge = merge
api.mergePayload = mergePayload
api.pathAsBlob = pathAsBlob
api.rollback = rollback
api.serialise = serialise
api.stow = stow
api.txBegin = txBegin
api.txTop = txTop
api.vidFetch = vidFetch
api.vidDispose = vidDispose
func init*(T: type AristoApiRef): T =
new result
result[].init()
func dup*(api: AristoApiRef): AristoApiRef =
new result
result[] = api[]
# ------------------------------------------------------------------------------
# Public profile API constuctor
# ------------------------------------------------------------------------------
func init*(
T: type AristoApiProfRef;
api: AristoApiRef;
be = BackendRef(nil);
): T =
## This constructor creates a profiling API descriptor to be derived from
## an initialised `api` argument descriptor. For profiling the DB backend,
## the field `.be` of the result descriptor must be assigned to the
## `.backend` field of the `AristoDbRef` descriptor.
##
## The argument desctiptors `api` and `be` will not be modified and can be
## used to restore the previous set up.
##
let
data = AristoDbProfListRef(
list: newSeq[AristoDbProfData](1 + high(AristoApiProfNames).ord))
profApi = T(data: data)
template profileRunner(n: AristoApiProfNames, code: untyped): untyped =
let start = getTime()
code
data.update(n.ord, getTime() - start)
profApi.commit =
proc(a: AristoTxRef): auto =
AristoApiProfCommitFn.profileRunner:
result = api.commit(a)
profApi.delete =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]; d: PathID): auto =
AristoApiProfDeleteFn.profileRunner:
result = api.delete(a, b, c, d)
profApi.delTree =
proc(a: AristoDbRef; b: VertexID; c: PathID): auto =
AristoApiProfDelTreeFn.profileRunner:
result = api.delTree(a, b, c)
profApi.fetchPayload =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
AristoApiProfFetchPayloadFn.profileRunner:
result = api.fetchPayload(a, b, c)
profApi.finish =
proc(a: AristoDbRef; b = false) =
AristoApiProfFinishFn.profileRunner:
api.finish(a, b)
profApi.forget =
proc(a: AristoDbRef): auto =
AristoApiProfForgetFn.profileRunner:
result = api.forget(a)
profApi.fork =
proc(a: AristoDbRef; b = false): auto =
AristoApiProfForkFn.profileRunner:
result = api.fork(a, b)
profApi.forkTop =
proc(a: AristoDbRef; b = false): auto =
AristoApiProfForkTopFn.profileRunner:
result = api.forkTop(a, b)
profApi.getKeyRc =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyRcFn.profileRunner:
result = api.getKeyRc(a, b)
profApi.hashify =
proc(a: AristoDbRef): auto =
AristoApiProfHashifyFn.profileRunner:
result = api.hashify(a)
profApi.hasPath =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
AristoApiProfHasPathFn.profileRunner:
result = api.hasPath(a, b, c)
profApi.hikeUp =
proc(a: NibblesSeq; b: VertexID; c: AristoDbRef): auto =
AristoApiProfHikeUpFn.profileRunner:
result = api.hikeUp(a, b, c)
profApi.isTop =
proc(a: AristoTxRef): auto =
AristoApiProfIsTopFn.profileRunner:
result = api.isTop(a)
profApi.level =
proc(a: AristoDbRef): auto =
AristoApiProfLevelFn.profileRunner:
result = api.level(a)
profApi.nForked =
proc(a: AristoDbRef): auto =
AristoApiProfNForkedFn.profileRunner:
result = api.nForked(a)
profApi.merge =
proc(a: AristoDbRef; b: VertexID; c,d: openArray[byte]; e: PathID): auto =
AristoApiProfMergeFn.profileRunner:
result = api.merge(a, b, c, d ,e)
profApi.mergePayload =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]; d: PayloadRef;
e = VOID_PATH_ID): auto =
AristoApiProfMergePayloadFn.profileRunner:
result = api.mergePayload(a, b, c, d ,e)
profApi.pathAsBlob =
proc(a: PathID): auto =
AristoApiProfPathAsBlobFn.profileRunner:
result = api.pathAsBlob(a)
profApi.rollback =
proc(a: AristoTxRef): auto =
AristoApiProfRollbackFn.profileRunner:
result = api.rollback(a)
profApi.serialise =
proc(a: AristoDbRef; b: PayloadRef): auto =
AristoApiProfSerialiseFn.profileRunner:
result = api.serialise(a, b)
profApi.stow =
proc(a: AristoDbRef; b = false; c = false): auto =
AristoApiProfStowFn.profileRunner:
result = api.stow(a, b, c)
profApi.txBegin =
proc(a: AristoDbRef): auto =
AristoApiProfTxBeginFn.profileRunner:
result = api.txBegin(a)
profApi.txTop =
proc(a: AristoDbRef): auto =
AristoApiProfTxTopFn.profileRunner:
result = api.txTop(a)
profApi.vidFetch =
proc(a: AristoDbRef; b = false): auto =
AristoApiProfVidFetchFn.profileRunner:
result = api.vidFetch(a, b)
profApi.vidDispose =
proc(a: AristoDbRef;b: VertexID) =
AristoApiProfVidDisposeFn.profileRunner:
api.vidDispose(a, b)
if not be.isNil:
profApi.be = be.dup
profApi.be.getVtxFn =
proc(a: VertexID): auto =
AristoApiProfBeGetVtxFn.profileRunner:
result = be.getVtxFn(a)
profApi.be.getKeyFn =
proc(a: VertexID): auto =
AristoApiProfBeGetKeyFn.profileRunner:
result = be.getKeyFn(a)
profApi.be.putEndFn =
proc(a: PutHdlRef): auto =
AristoApiProfBePutEndFn.profileRunner:
result = be.putEndFn(a)
profApi
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -16,7 +16,7 @@ import
stew/interval_set,
../../aristo,
../aristo_walk/persistent,
".."/[aristo_desc, aristo_get, aristo_layers]
".."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
const
Vid2 = @[VertexID(LEAST_FREE_VID)].toHashSet

View File

@ -385,16 +385,17 @@ proc deleteImpl(
of Leaf:
? db.collapseLeaf(hike, nibble.byte, nxt.vtx)
let emptySubTreeOk = not db.getVtx(hike.root).isValid
# Squeze list of recycled vertex IDs
db.top.final.vGen = db.vGen.vidReorg()
ok(not db.getVtx(hike.root).isValid)
ok(emptySubTreeOk)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc delete*(
proc delTree*(
db: AristoDbRef; # Database, top layer
root: VertexID; # Root vertex
accPath: PathID; # Needed for real storage tries

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -120,6 +120,24 @@ type
closeFn*: CloseFn ## Generic destructor
func dup*(be: BackendRef): BackendRef =
if not be.isNil:
result = BackendRef(
filters: be.filters,
getVtxFn: be.getVtxFn,
getKeyFn: be.getKeyFn,
getFilFn: be.getFilFn,
getIdgFn: be.getIdgFn,
getFqsFn: be.getFqsFn,
putBegFn: be.putBegFn,
putVtxFn: be.putVtxFn,
putKeyFn: be.putKeyFn,
putFilFn: be.putFilFn,
putIdgFn: be.putIdgFn,
putFqsFn: be.putFqsFn,
putEndFn: be.putEndFn,
closeFn: be.closeFn)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -47,7 +47,7 @@ proc fetchPayload*(
key: LeafTie;
): Result[PayloadRef,(VertexID,AristoError)] =
## Cascaded attempt to traverse the `Aristo Trie` and fetch the value of a
## leaf vertex. This function is complementary to `merge()`.
## leaf vertex. This function is complementary to `mergePayload()`.
##
key.hikeUp(db).fetchPayloadImpl

View File

@ -309,8 +309,7 @@ proc hashify*(
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
## Tree`. If successful, the function returns the keys (aka Merkle hash) of
## the root vertices.
## Tree`.
##
if 0 < db.dirty.len:
# Set up widh-first traversal schedule

View File

@ -549,7 +549,7 @@ proc mergeNodeImpl(
# Public functions
# ------------------------------------------------------------------------------
proc merge*(
proc mergePayload*(
db: AristoDbRef; # Database, top layer
leafTie: LeafTie; # Leaf item to add to the database
payload: PayloadRef; # Payload value
@ -612,27 +612,17 @@ proc merge*(
ok okHike
proc merge*(
proc mergePayload*(
db: AristoDbRef; # Database, top layer
root: VertexID; # MPT state root
path: openArray[byte]; # Even nibbled byte path
payload: PayloadRef; # Payload value
accPath: PathID; # Needed for accounts payload
accPath = VOID_PATH_ID; # Needed for accounts payload
): Result[bool,AristoError] =
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
## object.
let lty = LeafTie(root: root, path: ? path.pathToTag)
db.merge(lty, payload, accPath).to(typeof result)
proc merge*(
db: AristoDbRef; # Database, top layer
path: openArray[byte]; # Even nibbled byte path
payload: PayloadRef; # Payload value
): Result[bool,AristoError] =
## Variant of `merge()` for `(VertexID(1),path)` arguments instead of a
## `LeafTie` object.
let lty = LeafTie(root: VertexID(1), path: ? path.pathToTag)
db.merge(lty, payload, VOID_PATH_ID).to(typeof result)
db.mergePayload(lty, payload, accPath).to(typeof result)
proc merge*(
@ -645,9 +635,9 @@ proc merge*(
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
## The argument `data` is stored as-is as a `RawData` payload value.
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
db.merge(root, path, pyl, accPath)
db.mergePayload(root, path, pyl, accPath)
proc merge*(
proc mergeAccount*(
db: AristoDbRef; # Database, top layer
path: openArray[byte]; # Leaf item to add to the database
data: openArray[byte]; # Raw data payload value
@ -656,26 +646,17 @@ proc merge*(
## `LeafTie`. The argument `data` is stored as-is as a `RawData` payload
## value.
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
db.merge(VertexID(1), path, pyl, VOID_PATH_ID)
db.mergePayload(VertexID(1), path, pyl, VOID_PATH_ID)
proc merge*(
proc mergeLeaf*(
db: AristoDbRef; # Database, top layer
leaf: LeafTiePayload; # Leaf item to add to the database
accPath: PathID; # Needed for accounts payload
accPath = VOID_PATH_ID; # Needed for accounts payload
): Result[bool,AristoError] =
## Variant of `merge()`. This function will not indicate if the leaf
## was cached, already.
db.merge(leaf.leafTie, leaf.payload, accPath).to(typeof result)
proc merge*(
db: AristoDbRef; # Database, top layer
leaf: LeafTiePayload; # Leaf item to add to the database
): Result[bool,AristoError] =
## Variant of `merge()`, shortcut for `db.merge(leaf, VOID_PATH_ID)`. Note
## that this function fails unless `leaf.root == VertexID(1)`.
db.merge(leaf.leafTie, leaf.payload, VOID_PATH_ID).to(typeof result)
db.mergePayload(leaf.leafTie, leaf.payload, accPath).to(typeof result)
# ---------------------

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -411,7 +411,7 @@ proc right*(
root: lty.root,
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=true))
iterator right*(
iterator rightPairs*(
db: AristoDbRef; # Database layer
start = low(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) =
@ -472,7 +472,7 @@ proc left*(
root: lty.root,
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false))
iterator left*(
iterator leftPairs*(
db: AristoDbRef; # Database layer
start = high(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) =

View File

@ -0,0 +1,202 @@
# Nimbus
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
std/[algorithm, math, sequtils, strformat, strutils, tables, times],
eth/common
type
AristoDbProfData* = tuple[sum: float, sqSum: float, count: int]
AristoDbProfListRef* = ref object of RootRef
## Statistic table synced with name indexes from `AristoDbProfNames`. Here
## a `ref` is used so it can be modified when part of another object.
##
list*: seq[AristoDbProfData]
AristoDbProfEla* = seq[(Duration,seq[uint])]
AristoDbProfMean* = seq[(Duration,seq[uint])]
AristoDbProfCount* = seq[(int,seq[uint])]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc toDuration(fl: float): Duration =
## Convert the nanoseconds argument `ns` to a `Duration`.
let (s, ns) = fl.splitDecimal
initDuration(seconds = s.int, nanoseconds = (ns * 1_000_000_000).int)
func toFloat(ela: Duration): float =
## Convert the argument `ela` to a floating point seconds result.
let
elaS = ela.inSeconds
elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds
elaS.float + elaNs.float / 1_000_000_000
proc updateTotal(t: AristoDbProfListRef; fnInx: uint) =
## Summary update helper
if fnInx == 0:
t.list[0] = (0.0, 0.0, 0)
else:
t.list[0][0] += t.list[fnInx][0]
t.list[0][1] += t.list[fnInx][1]
t.list[0][2] += t.list[fnInx][2]
# ---------------------
func ppUs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMicroseconds
let ns = elapsed.inNanoseconds mod 1_000 # fraction of a micro second
if ns != 0:
# to rounded deca milli seconds
let du = (ns + 5i64) div 10i64
result &= &".{du:02}"
result &= "us"
func ppMs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMilliseconds
let ns = elapsed.inNanoseconds mod 1_000_000 # fraction of a milli second
if ns != 0:
# to rounded deca milli seconds
let dm = (ns + 5_000i64) div 10_000i64
result &= &".{dm:02}"
result &= "ms"
func ppSecs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inSeconds
let ns = elapsed.inNanoseconds mod 1_000_000_000 # fraction of a second
if ns != 0:
# round up
let ds = (ns + 5_000_000i64) div 10_000_000i64
result &= &".{ds:02}"
result &= "s"
func ppMins(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMinutes
let ns = elapsed.inNanoseconds mod 60_000_000_000 # fraction of a minute
if ns != 0:
# round up
let dm = (ns + 500_000_000i64) div 1_000_000_000i64
result &= &":{dm:02}"
result &= "m"
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func toStr*(elapsed: Duration): string =
try:
if 0 < times.inMinutes(elapsed):
result = elapsed.ppMins
elif 0 < times.inSeconds(elapsed):
result = elapsed.ppSecs
elif 0 < times.inMilliSeconds(elapsed):
result = elapsed.ppMs
elif 0 < times.inMicroSeconds(elapsed):
result = elapsed.ppUs
else:
result = $elapsed.inNanoSeconds & "ns"
except ValueError:
result = $elapsed
proc update*(t: AristoDbProfListRef; inx: uint; ela: Duration) =
## Register time `ela` spent while executing function `fn`
let s = ela.toFloat
t.list[inx].sum += s
t.list[inx].sqSum += s * s
t.list[inx].count.inc
proc byElapsed*(t: AristoDbProfListRef): AristoDbProfEla =
## Collate `CoreDb` function symbols by elapsed times, sorted with largest
## `Duration` first. Zero `Duration` entries are discarded.
var u: Table[Duration,seq[uint]]
for inx in 0u ..< t.list.len.uint:
t.updateTotal inx
let (secs,_,count) = t.list[inx]
if 0 < count:
let ela = secs.toDuration
u.withValue(ela,val):
val[].add inx
do:
u[ela] = @[inx]
result.add (t.list[0u].sum.toDuration, @[0u])
for ela in u.keys.toSeq.sorted Descending:
u.withValue(ela,val):
result.add (ela, val[])
proc byMean*(t: AristoDbProfListRef): AristoDbProfMean =
## Collate `CoreDb` function symbols by elapsed mean times, sorted with
## largest `Duration` first. Zero `Duration` entries are discarded.
var u: Table[Duration,seq[uint]]
for inx in 0u ..< t.list.len.uint:
t.updateTotal inx
let (secs,_,count) = t.list[inx]
if 0 < count:
let ela = (secs / count.float).toDuration
u.withValue(ela,val):
val[].add inx
do:
u[ela] = @[inx]
result.add ((t.list[0u].sum / t.list[0u].count.float).toDuration, @[0u])
for mean in u.keys.toSeq.sorted Descending:
u.withValue(mean,val):
result.add (mean, val[])
proc byVisits*(t: AristoDbProfListRef): AristoDbProfCount =
## Collate `CoreDb` function symbols by number of visits, sorted with
## largest number first.
var u: Table[int,seq[uint]]
for fnInx in 0 ..< t.list.len:
t.updateTotal fnInx.uint
let (_,_,count) = t.list[fnInx]
if 0 < count:
u.withValue(count,val):
val[].add fnInx.uint
do:
u[count] = @[fnInx.uint]
result.add (t.list[0u].count, @[0u])
for count in u.keys.toSeq.sorted Descending:
u.withValue(count,val):
result.add (count, val[])
func stats*(
t: AristoDbProfListRef;
inx: uint;
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
## Print mean and strandard deviation of timing
let data = t.list[inx]
result.n = data.count
if 0 < result.n:
let
mean = data.sum / result.n.float
sqMean = data.sqSum / result.n.float
meanSq = mean * mean
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
# if `meanSq` and `sqMean` are approximately the same.
sigma = sqMean - min(meanSq,sqMean)
stdDev = sigma.sqrt
result.mean = mean.toDuration
result.stdDev = stdDev.sqrt.toDuration
if 0 < mean:
result.devRatio = stdDev / mean
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -22,14 +22,15 @@ import
# ------------------------------------------------------------------------------
proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
## Create a new `VertexID`. Reusable vertex *ID*s are kept in a list where
## the top entry *ID* has the property that any other *ID* larger is also not
## not used on the database.
## Recycle or create a new `VertexID`. Reusable vertex *ID*s are kept in a
## list where the top entry *ID* has the property that any other *ID* larger
## is also not used on the database.
##
## The function prefers to return recycled vertex *ID*s if there are any.
## When the argument `pristine` is set `true`, the function guarantees to
## return a non-recycled, brand new vertex *ID* which is the preferred mode
## when creating leaf vertices.
##
if db.vGen.len == 0:
# Note that `VertexID(1)` is the root of the main trie
db.top.final.vGen = @[VertexID(LEAST_FREE_VID+1)]
@ -47,6 +48,7 @@ proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
proc vidPeek*(db: AristoDbRef): VertexID =
## Like `new()` without consuming this *ID*. It will return the *ID* that
## would be returned by the `new()` function.
##
case db.vGen.len:
of 0:
VertexID(LEAST_FREE_VID)
@ -59,6 +61,7 @@ proc vidPeek*(db: AristoDbRef): VertexID =
proc vidDispose*(db: AristoDbRef; vid: VertexID) =
## Recycle the argument `vtxID` which is useful after deleting entries from
## the vertex table to prevent the `VertexID` type key values small.
##
if LEAST_FREE_VID <= vid.distinctBase:
if db.vGen.len == 0:
db.top.final.vGen = @[vid]

View File

@ -13,8 +13,12 @@
import
eth/common,
results,
"../.."/[aristo, aristo/aristo_desc, aristo/aristo_walk],
"../.."/[kvt, kvt/kvt_desc, kvt/kvt_init/memory_only],
../../aristo,
../../aristo/[
aristo_desc, aristo_nearby, aristo_path, aristo_tx, aristo_serialise,
aristo_walk],
../../kvt,
../../kvt/[kvt_desc, kvt_init, kvt_tx, kvt_walk],
".."/[base, base/base_desc],
./aristo_db/[common_desc, handlers_aristo, handlers_kvt]
@ -244,6 +248,14 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef =
# Public helpers for direct backend access
# ------------------------------------------------------------------------------
func toAristoProfData*(
db: CoreDbRef;
): tuple[aristo: AristoDbProfListRef, kvt: KvtDbProfListRef] =
when CoreDbEnableApiProfiling:
if db.isAristo:
result.aristo = db.AristoCoreDbRef.adbBase.api.AristoApiProfRef.data
result.kvt = db.AristoCoreDbRef.kdbBase.api.KvtApiProfRef.data
func toAristo*(be: CoreDbKvtBackendRef): KvtDbRef =
if be.parent.isAristo:
return be.AristoCoreDbKvtBE.kdb
@ -268,7 +280,7 @@ iterator aristoKvtPairs*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
iterator aristoMptPairs*(dsc: CoreDxMptRef): (Blob,Blob) {.noRaise.} =
let mpt = dsc.to(AristoDbRef)
for (k,v) in mpt.right LeafTie(root: dsc.rootID):
for (k,v) in mpt.rightPairs LeafTie(root: dsc.rootID):
yield (k.path.pathAsBlob, mpt.serialise(v).valueOr(EmptyBlob))
iterator aristoReplicateMem*(dsc: CoreDxMptRef): (Blob,Blob) {.rlpRaise.} =

View File

@ -17,7 +17,6 @@ import
stew/byteutils,
results,
../../../aristo,
../../../aristo/[aristo_desc, aristo_hike, aristo_vid],
../../base,
../../base/base_desc,
./common_desc
@ -26,6 +25,7 @@ type
AristoBaseRef* = ref object
parent: CoreDbRef ## Opaque top level descriptor
adb: AristoDbRef ## Aristo MPT database
api*: AristoApiRef ## Api functions can be re-directed
gq: seq[AristoChildDbRef] ## Garbage queue, deferred disposal
accCache: CoreDxAccRef ## Pre-configured accounts descriptor to share
mptCache: MptCacheArray ## Pre-configured accounts descriptor to share
@ -112,7 +112,7 @@ func to(address: EthAddress; T: type PathID): T =
# ------------------------------------------------------------------------------
# Auto destructor should appear before constructor
# to prevent cannot bind another `=destroy` error
# to prevent **cannot bind another `=destroy` error**
# ------------------------------------------------------------------------------
proc `=destroy`(cMpt: var AristoChildDbObj) =
@ -244,8 +244,11 @@ proc newTrieCtx(
info: static[string];
): CoreDbRc[AristoCoreDbTrie] =
base.gc()
var trie = AristoCoreDbTrie(trie)
let db = base.parent
var
trie = AristoCoreDbTrie(trie)
let
db = base.parent
api = base.api
# Update `trie` argument, handle default settings
block validateRoot:
@ -271,9 +274,9 @@ proc newTrieCtx(
# Get normalised `svaeMode` and `MPT`
let (mode, mpt) = case saveMode:
of TopShot:
(saveMode, ? base.adb.forkTop.toRc(db, info))
(saveMode, ? api.forkTop(base.adb).toRc(db, info))
of Companion:
(saveMode, ? base.adb.fork.toRc(db, info))
(saveMode, ? api.fork(base.adb).toRc(db, info))
of Shared, AutoSave:
if base.adb.backend.isNil:
(Shared, base.adb)
@ -287,8 +290,6 @@ proc newTrieCtx(
break body
# Use cached descriptor
# AristoCoreDxMptRef(base.mptCache[trie.kind])
let ctx = base.mptCache[trie.kind].ctx
if not trie.ctx.isValid:
trie.ctx = ctx
@ -326,6 +327,7 @@ proc getTrieFn(
kind = if LEAST_FREE_VID <= root.distinctBase: StorageTrie
else: CoreDbSubTrie(root)
doAssert kind != StorageTrie or cMpt.accPath.isValid
result = cMpt.base.parent.bless AristoCoreDbTrie(
kind: kind,
root: root,
@ -342,13 +344,14 @@ proc persistent(
let
base = cMpt.base
mpt = cMpt.mpt
api = base.api
db = base.parent
rc = mpt.stow(persistent = true)
rc = api.stow(mpt, persistent = true)
# note that `gc()` may call `persistent()` so there is no `base.gc()` here
if rc.isOk:
ok()
elif mpt.level == 0:
elif api.level(mpt) == 0:
err(rc.error.toError(db, info))
else:
err(rc.error.toError(db, info, cMpt.txError))
@ -367,9 +370,10 @@ proc forget(
if mpt != base.adb:
let
db = base.parent
rc = cMpt.mpt.forget()
api = base.api
rc = api.forget(cMpt.mpt)
if rc.isErr:
let db = base.parent
result = err(rc.error.toError(db, info))
# ------------------------------------------------------------------------------
@ -409,9 +413,10 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
let
mpt = cMpt.mpt
rc = mpt.fetchPayload(cMpt.root, k)
api = cMpt.base.api
rc = api.fetchPayload(mpt, cMpt.root, k)
if rc.isOk:
mpt.serialise(rc.value).toRc(db, info)
api.serialise(mpt, rc.value).toRc(db, info)
elif rc.error[1] != FetchPathNotFound:
err(rc.error.toError(db, info))
else:
@ -425,18 +430,19 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
): CoreDbRc[void] =
let
db = cMpt.base.parent
api = cMpt.base.api
mpt = cMpt.mpt
rootOk = cMpt.root.isValid
# Provide root ID on-the-fly
if not rootOk:
cMpt.root = mpt.vidFetch(pristine=true)
cMpt.root = api.vidFetch(mpt, pristine=true)
let rc = mpt.merge(cMpt.root, k, v, cMpt.accPath)
let rc = api.merge(mpt, cMpt.root, k, v, cMpt.accPath)
if rc.isErr:
# Re-cycle unused ID (prevents from leaking IDs)
if not rootOk:
mpt.vidDispose cMpt.root
api.vidDispose(mpt, cMpt.root)
cMpt.root = VoidTrieID
return err(rc.error.toError(db, info))
ok()
@ -448,6 +454,7 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
): CoreDbRc[void] =
let
db = cMpt.base.parent
api = cMpt.base.api
mpt = cMpt.mpt
if not cMpt.root.isValid and cMpt.accPath.isValid:
@ -455,7 +462,7 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
# but no data have been added, yet.
return ok()
let rc = mpt.delete(cMpt.root, k, cMpt.accPath)
let rc = api.delete(mpt, cMpt.root, k, cMpt.accPath)
if rc.isErr:
if rc.error[1] == DelPathNotFound:
return err(rc.error.toError(db, info, MptNotFound))
@ -473,10 +480,11 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
info: static[string];
): CoreDbRc[bool] =
let
db = cMpt.base.parent
mpt = cMpt.mpt
rc = mpt.hasPath(cMpt.root, key)
api = cMpt.base.api
rc = api.hasPath(mpt, cMpt.root, key)
if rc.isErr:
let db = cMpt.base.parent
return err(rc.error.toError(db, info))
ok(rc.value)
@ -545,11 +553,12 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
): CoreDbRc[CoreDbAccount] =
let
db = cAcc.base.parent
api = cAcc.base.api
mpt = cAcc.mpt
pyl = block:
let
key = address.keccakHash.data
rc = mpt.fetchPayload(cAcc.root, key)
rc = api.fetchPayload(mpt, cAcc.root, key)
if rc.isOk:
rc.value
elif rc.error[1] != FetchPathNotFound:
@ -569,10 +578,11 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
): CoreDbRc[void] =
let
db = cAcc.base.parent
api = cAcc.base.api
mpt = cAcc.mpt
key = acc.address.keccakHash.data
val = acc.toPayloadRef()
rc = mpt.merge(cAcc.root, key, val, VOID_PATH_ID)
rc = api.mergePayload(mpt, cAcc.root, key, val)
if rc.isErr:
return err(rc.error.toError(db, info))
ok()
@ -584,9 +594,10 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
): CoreDbRc[void] =
let
db = cAcc.base.parent
api = cAcc.base.api
mpt = cAcc.mpt
key = address.keccakHash.data
rc = mpt.delete(cAcc.root, key, VOID_PATH_ID)
rc = api.delete(mpt, cAcc.root, key, VOID_PATH_ID)
if rc.isErr:
if rc.error[1] == DelPathNotFound:
return err(rc.error.toError(db, info, AccNotFound))
@ -600,16 +611,17 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
): CoreDbRc[void] =
let
db = cAcc.base.parent
api = cAcc.base.api
mpt = cAcc.mpt
key = address.keccakHash.data
pyl = mpt.fetchPayload(cAcc.root, key).valueOr:
pyl = api.fetchPayload(mpt, cAcc.root, key).valueOr:
return ok()
# Use storage ID from account and delete that sub-trie
if pyl.pType == AccountData:
let stoID = pyl.account.storageID
if stoID.isValid:
let rc = mpt.delete(stoID, address.to(PathID))
let rc = api.delTree(mpt, stoID, address.to(PathID))
if rc.isErr:
return err(rc.error.toError(db, info))
ok()
@ -621,9 +633,10 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
): CoreDbRc[bool] =
let
db = cAcc.base.parent
api = cAcc.base.api
mpt = cAcc.mpt
key = address.keccakHash.data
rc = mpt.hasPath(cAcc.root, key)
rc = api.hasPath(mpt, cAcc.root, key)
if rc.isErr:
return err(rc.error.toError(db, info))
ok(rc.value)
@ -700,6 +713,8 @@ proc gc*(base: AristoBaseRef) =
## entry and mostly be empty.
const
info = "gc()"
let
api = base.api
var
resetQ = 0
first = 0
@ -709,7 +724,7 @@ proc gc*(base: AristoBaseRef) =
if base.gq[0].mpt == base.adb:
first = 1
let cMpt = base.gq[0]
if 0 < cMpt.mpt.level:
if 0 < api.level(cMpt.mpt):
resetQ = 1
else:
let rc = cMpt.persistent info
@ -721,7 +736,7 @@ proc gc*(base: AristoBaseRef) =
for n in first ..< base.gq.len:
let cMpt = base.gq[n]
# FIXME: Currently no strategy for `Companion` and `TopShot`
let rc = cMpt.mpt.forget
let rc = base.api.forget(cMpt.mpt)
if rc.isErr:
let error = rc.error.toError(base.parent, info).errorPrint
debug logTxt info, saveMode=cMpt.saveMode, error
@ -740,19 +755,18 @@ func txTop*(
base: AristoBaseRef;
info: static[string];
): CoreDbRc[AristoTxRef] =
base.adb.txTop.toRc(base.parent, info)
base.api.txTop(base.adb).toRc(base.parent, info)
proc txBegin*(
base: AristoBaseRef;
info: static[string];
): CoreDbRc[AristoTxRef] =
base.adb.txBegin.toRc(base.parent, info)
base.api.txBegin(base.adb).toRc(base.parent, info)
# ---------------------
func getLevel*(base: AristoBaseRef): int =
base.adb.level
proc getLevel*(base: AristoBaseRef): int =
base.api.level(base.adb)
proc tryHash*(
base: AristoBaseRef;
@ -767,7 +781,7 @@ proc tryHash*(
if not root.isValid:
return ok(EMPTY_ROOT_HASH)
let rc = trie.ctx.mpt.getKeyRc root
let rc = base.api.getKeyRc(trie.ctx.mpt, root)
if rc.isErr:
return err(rc.error.toError(base.parent, info, HashNotAvailable))
@ -807,10 +821,10 @@ proc rootHash*(
): CoreDbRc[Hash256] =
let
db = base.parent
api = base.api
trie = trie.AristoCoreDbTrie
if not trie.ctx.isValid:
return err(MptContextMissing.toError(db, info, HashNotAvailable))
let
mpt = trie.ctx.mpt
root = trie.to(VertexID)
@ -818,10 +832,10 @@ proc rootHash*(
if not root.isValid:
return ok(EMPTY_ROOT_HASH)
? mpt.hashify.toVoidRc(db, info, HashNotAvailable)
? api.hashify(mpt).toVoidRc(db, info, HashNotAvailable)
let key = block:
let rc = mpt.getKeyRc root
let rc = api.getKeyRc(mpt, root)
if rc.isErr:
doAssert rc.error in {GetKeyNotFound,GetKeyUpdateNeeded}
return err(rc.error.toError(base.parent, info, HashNotAvailable))
@ -832,6 +846,7 @@ proc rootHash*(
proc rootHash*(mpt: CoreDxMptRef): VertexID =
AristoCoreDxMptRef(mpt).ctx.root
proc getTrie*(
base: AristoBaseRef;
kind: CoreDbSubTrie;
@ -842,6 +857,7 @@ proc getTrie*(
let
db = base.parent
adb = base.adb
api = base.api
ethAddr = (if address.isNone: EthAddress.default else: address.unsafeGet)
path = (if address.isNone: VOID_PATH_ID else: ethAddr.to(PathID))
base.gc() # update pending changes
@ -859,14 +875,15 @@ proc getTrie*(
trie.address = ethAddr
return ok(db.bless trie)
? adb.hashify.toVoidRc(db, info, HashNotAvailable)
? api.hashify(adb).toVoidRc(db, info, HashNotAvailable)
# Check whether hash is available as state root on main trie
block:
let rc = adb.getKeyRc VertexID(kind)
let rc = api.getKeyRc(adb, VertexID kind)
if rc.isErr:
doAssert rc.error == GetKeyNotFound
elif rc.value == root.to(HashKey):
doAssert kind != StorageTrie or path.isValid
var trie = AristoCoreDbTrie(
kind: kind,
root: VertexID(kind),
@ -891,7 +908,8 @@ proc verify*(base: AristoBaseRef; trie: CoreDbTrieRef): bool =
return false
if not trie.root.isValid:
return true
if trie.accPath.to(NibblesSeq).hikeUp(AccountsTrieID,base.adb).isOk:
let path = trie.accPath.to(NibblesSeq)
if base.api.hikeUp(path, AccountsTrieID, base.adb).isOk:
return true
false
@ -904,10 +922,12 @@ proc newMptHandler*(
let
trie = ? base.newTrieCtx(trie, saveMode, info)
db = base.parent
api = base.api
if trie.kind == StorageTrie and trie.root.isValid:
let
adb = base.adb
rc = trie.accPath.to(NibblesSeq).hikeUp(AccountsTrieID,adb)
path = trie.accPath.to(NibblesSeq)
rc = api.hikeUp(path, AccountsTrieID, adb)
if rc.isErr:
return err(rc.error[1].toError(db, info, AccNotFound))
if trie.reset:
@ -915,7 +935,8 @@ proc newMptHandler*(
# beween `VertexID(2) ..< LEAST_FREE_VID`. At the moment, this applies to
# `GenericTrie` type sub-tries somehow emulating the behaviour of a new
# empty MPT on the legacy database (handle with care, though.)
let rc = trie.ctx.mpt.delete(trie.root, VOID_PATH_ID)
let
rc = api.delTree(trie.ctx.mpt, trie.root, VOID_PATH_ID)
if rc.isErr:
return err(rc.error.toError(db, info, AutoFlushFailed))
trie.reset = false
@ -955,11 +976,19 @@ proc destroy*(base: AristoBaseRef; flush: bool) =
base.gc()
# Close descriptor
base.adb.finish(flush)
base.api.finish(base.adb, flush)
func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =
result = T(parent: db, adb: adb)
result = T(
parent: db,
api: AristoApiRef.init(),
adb: adb)
when CoreDbEnableApiProfiling:
let profApi = AristoApiProfRef.init(result.api, adb.backend)
result.api = profApi
result.adb.backend = profApi.be
# Provide pre-configured handlers to share
for trie in AccountsTrie .. high(CoreDbSubTrie):

View File

@ -15,7 +15,6 @@ import
eth/common,
results,
../../../kvt,
../../../kvt/kvt_desc,
../../base,
../../base/base_desc,
./common_desc
@ -24,6 +23,7 @@ type
KvtBaseRef* = ref object
parent: CoreDbRef ## Opaque top level descriptor
kdb: KvtDbRef ## Key-value table
api*: KvtApiRef ## Api functions can be re-directed
gq: seq[KvtChildDbRef] ## Garbage queue, deferred disposal
cache: CoreDxKvtRef ## Pre-configured descriptor to share
@ -93,7 +93,7 @@ proc `=destroy`(cKvt: var KvtChildDbObj) =
# Do some heuristics to avoid duplicates:
block addToBatchQueue:
if kvt != base.kdb: # not base descriptor?
if kvt.level == 0: # no transaction pending?
if base.api.level(kvt) == 0: # no transaction pending?
break addToBatchQueue # add to destructor queue
else:
break body # ignore `kvt`
@ -129,13 +129,14 @@ proc persistent(
let
base = cKvt.base
kvt = cKvt.kvt
api = base.api
db = base.parent
rc = kvt.stow()
rc = api.stow(kvt)
# Note that `gc()` may call `persistent()` so there is no `base.gc()` here
if rc.isOk:
ok()
elif kvt.level == 0:
elif api.level(kvt) == 0:
err(rc.error.toError(db, info))
else:
err(rc.error.toError(db, info, KvtTxPending))
@ -154,7 +155,7 @@ proc forget(
if kvt != base.kdb:
let
db = base.parent
rc = kvt.forget()
rc = base.api.forget(kvt)
if rc.isErr:
result = err(rc.error.toError(db, info))
@ -186,9 +187,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
info: static[string];
): CoreDbRc[Blob] =
## Member of `CoreDbKvtFns`
let rc = cKvt.kvt.get(k)
let
base = cKvt.base
rc = base.api.get(cKvt.kvt, k)
if rc.isErr:
let db = cKvt.base.parent
let db = base.parent
if rc.error == GetNotFound:
return err(rc.error.toError(db, info, KvtNotFound))
else:
@ -201,9 +204,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
v: openArray[byte];
info: static[string];
): CoreDbRc[void] =
let rc = cKvt.kvt.put(k,v)
let
base = cKvt.base
rc = base.api.put(cKvt.kvt, k,v)
if rc.isErr:
return err(rc.error.toError(cKvt.base.parent, info))
return err(rc.error.toError(base.parent, info))
ok()
proc kvtDel(
@ -211,9 +216,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
k: openArray[byte];
info: static[string];
): CoreDbRc[void] =
let rc = cKvt.kvt.del k
let
base = cKvt.base
rc = base.api.del(cKvt.kvt, k)
if rc.isErr:
return err(rc.error.toError(cKvt.base.parent, info))
return err(rc.error.toError(base.parent, info))
ok()
proc kvtHasKey(
@ -221,9 +228,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
k: openArray[byte];
info: static[string];
): CoreDbRc[bool] =
let rc = cKvt.kvt.hasKey(k)
let
base = cKvt.base
rc = base.api.hasKey(cKvt.kvt, k)
if rc.isErr:
return err(rc.error.toError(cKvt.base.parent, info))
return err(rc.error.toError(base.parent, info))
ok(rc.value)
CoreDbKvtFns(
@ -274,6 +283,7 @@ proc gc*(base: KvtBaseRef) =
## entry and mostly be empty.
const info = "gc()"
var kdbAutoSave = false
let api = base.api
proc saveAndDestroy(cKvt: KvtChildDbRef): CoreDbRc[void] =
if cKvt.kvt != base.kdb:
@ -289,14 +299,14 @@ proc gc*(base: KvtBaseRef) =
# There might be a single queue item left over from the last run
# which can be ignored right away as the body below would not change
# anything.
if base.gq.len != 1 or base.gq[0].kvt.level == 0:
if base.gq.len != 1 or api.level(base.gq[0].kvt) == 0:
var later = KvtChildDbRef(nil)
while 0 < base.gq.len:
var q: seq[KvtChildDbRef]
base.gq.swap q # now `=destroy()` may refill while destructing, below
for cKvt in q:
if 0 < cKvt.kvt.level:
if 0 < api.level(cKvt.kvt):
assert cKvt.kvt == base.kdb and cKvt.saveMode == AutoSave
later = cKvt # do it later when no transaction pending
continue
@ -317,13 +327,13 @@ func txTop*(
base: KvtBaseRef;
info: static[string];
): CoreDbRc[KvtTxRef] =
base.kdb.txTop.toRc(base.parent, info)
base.api.txTop(base.kdb).toRc(base.parent, info)
proc txBegin*(
base: KvtBaseRef;
info: static[string];
): CoreDbRc[KvtTxRef] =
base.kdb.txBegin.toRc(base.parent, info)
base.api.txBegin(base.kdb).toRc(base.parent, info)
# ------------------------------------------------------------------------------
# Public constructors and related
@ -374,11 +384,14 @@ proc destroy*(base: KvtBaseRef; flush: bool) =
base.gc()
# Close descriptor
base.kdb.finish(flush)
base.api.finish(base.kdb, flush)
func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =
result = T(parent: db, kdb: kdb)
result = T(
parent: db,
api: KvtApiRef.init(),
kdb: kdb)
# Provide pre-configured handlers to share
let cKvt = KvtChildDbRef(
@ -386,6 +399,11 @@ func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =
kvt: kdb,
saveMode: Shared)
when CoreDbEnableApiProfiling:
let profApi = KvtApiProfRef.init(result.api, kdb.backend)
result.api = profApi
result.kdb.backend = profApi.be
result.cache = db.bless KvtCoreDxKvtRef(
ctx: cKvt,
methods: cKvt.kvtMethods)

View File

@ -14,7 +14,8 @@ import
eth/common,
results,
../../aristo,
../../aristo/[aristo_persistent, aristo_walk/persistent],
../../aristo/[
aristo_desc, aristo_persistent, aristo_walk/persistent, aristo_tx],
../../kvt,
../../kvt/kvt_persistent,
../base,

View File

@ -49,6 +49,7 @@ export
CoreDbKvtBackendRef,
CoreDbMptBackendRef,
CoreDbPersistentTypes,
CoreDbProfListRef,
CoreDbRef,
CoreDbSaveFlags,
CoreDbSubTrie,
@ -59,13 +60,7 @@ export
CoreDxKvtRef,
CoreDxMptRef,
CoreDxPhkRef,
CoreDxTxRef,
# Profiling support
byElapsed,
byMean,
byVisits,
stats
CoreDxTxRef
const
CoreDbProvideLegacyAPI* = ProvideLegacyAPI
@ -81,10 +76,6 @@ when ProvideLegacyAPI:
when AutoValidateDescriptors:
import ./base/validate
when EnableApiTracking and EnableApiProfiling:
var coreDbProfTab*: CoreDbProfFnInx
# More settings
const
logTxt = "CoreDb "
@ -128,7 +119,7 @@ when ProvideLegacyAPI:
## Template with code section that will be discarded if logging is
## disabled at compile time when `EnableApiTracking` is `false`.
when EnableApiTracking:
w.beginLegaApi()
w.beginLegaApi(s)
code
const ctx {.inject,used.} = s
@ -142,8 +133,6 @@ when ProvideLegacyAPI:
template ifTrackLegaApi*(w: CoreDbApiTrackRef; code: untyped) =
when EnableApiTracking:
w.endLegaApiIf:
when EnableApiProfiling:
coreDbProfTab.update(ctx, elapsed)
code
@ -155,7 +144,7 @@ template setTrackNewApi(
## Template with code section that will be discarded if logging is
## disabled at compile time when `EnableApiTracking` is `false`.
when EnableApiTracking:
w.beginNewApi()
w.beginNewApi(s)
code
const ctx {.inject,used.} = s
@ -169,8 +158,6 @@ template setTrackNewApi*(
template ifTrackNewApi*(w: CoreDxApiTrackRef; code: untyped) =
when EnableApiTracking:
w.endNewApiIf:
when EnableApiProfiling:
coreDbProfTab.update(ctx, elapsed)
code
# ---------
@ -212,6 +199,8 @@ proc bless*(db: CoreDbRef): CoreDbRef =
## Verify descriptor
when AutoValidateDescriptors:
db.validate
when CoreDbEnableApiProfiling:
db.profTab = CoreDbProfListRef.init()
db
proc bless*(db: CoreDbRef; trie: CoreDbTrieRef): CoreDbTrieRef =
@ -268,6 +257,13 @@ proc verify*(trie: CoreDbTrieRef): bool =
# Public main descriptor methods
# ------------------------------------------------------------------------------
proc dbProfData*(db: CoreDbRef): CoreDbProfListRef =
## Return profiling data table (only available in profiling mode). If
## available (i.e. non-nil), result data can be organised by the functions
## available with `aristo_profile`.
when CoreDbEnableApiProfiling:
db.profTab
proc dbType*(db: CoreDbRef): CoreDbType =
## Getter, print DB type identifier
##

View File

@ -11,11 +11,11 @@
{.push raises: [].}
import
std/[algorithm, math, sequtils, strformat, strutils, tables, times,
typetraits],
std/[strutils, times, typetraits],
eth/common,
results,
stew/byteutils,
../../aristo/aristo_profile,
"."/[api_new_desc, api_legacy_desc, base_desc]
type
@ -141,77 +141,13 @@ type
TxRollbackFn = "tx/rollback"
TxSaveDisposeFn = "tx/safeDispose"
CoreDbProfFnInx* = array[CoreDbFnInx,(float,float,int)]
CoreDbProfEla* = seq[(Duration,seq[CoreDbFnInx])]
CoreDbProfMean* = seq[(Duration,seq[CoreDbFnInx])]
CoreDbProfCount* = seq[(int,seq[CoreDbFnInx])]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc toDuration(fl: float): Duration =
## Convert the nanoseconds argument `ns` to a `Duration`.
let (s, ns) = fl.splitDecimal
initDuration(seconds = s.int, nanoseconds = (ns * 1_000_000_000).int)
func toFloat(ela: Duration): float =
## Convert the argument `ela` to a floating point seconds result.
let
elaS = ela.inSeconds
elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds
elaS.float + elaNs.float / 1_000_000_000
proc updateTotal(t: var CoreDbProfFnInx; fnInx: CoreDbFnInx) =
## Summary update helper
if fnInx == SummaryItem:
t[SummaryItem] = (0.0, 0.0, 0)
else:
t[SummaryItem][0] += t[fnInx][0]
t[SummaryItem][1] += t[fnInx][1]
t[SummaryItem][2] += t[fnInx][2]
# -----------------
func oaToStr(w: openArray[byte]): string =
w.toHex.toLowerAscii
func ppUs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMicroseconds
let ns = elapsed.inNanoseconds mod 1_000 # fraction of a micro second
if ns != 0:
# to rounded deca milli seconds
let du = (ns + 5i64) div 10i64
result &= &".{du:02}"
result &= "us"
func ppMs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMilliseconds
let ns = elapsed.inNanoseconds mod 1_000_000 # fraction of a milli second
if ns != 0:
# to rounded deca milli seconds
let dm = (ns + 5_000i64) div 10_000i64
result &= &".{dm:02}"
result &= "ms"
func ppSecs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inSeconds
let ns = elapsed.inNanoseconds mod 1_000_000_000 # fraction of a second
if ns != 0:
# round up
let ds = (ns + 5_000_000i64) div 10_000_000i64
result &= &".{ds:02}"
result &= "s"
func ppMins(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMinutes
let ns = elapsed.inNanoseconds mod 60_000_000_000 # fraction of a minute
if ns != 0:
# round up
let dm = (ns + 500_000_000i64) div 1_000_000_000i64
result &= &":{dm:02}"
result &= "m"
# ------------------------------------------------------------------------------
# Public API logging helpers
# ------------------------------------------------------------------------------
@ -275,26 +211,14 @@ proc toStr*(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "capt"
proc toStr*(rc: CoreDbRc[CoreDxMptRef]): string = rc.toStr "mpt"
proc toStr*(rc: CoreDbRc[CoreDxAccRef]): string = rc.toStr "acc"
func toStr*(elapsed: Duration): string =
try:
if 0 < times.inMinutes(elapsed):
result = elapsed.ppMins
elif 0 < times.inSeconds(elapsed):
result = elapsed.ppSecs
elif 0 < times.inMilliSeconds(elapsed):
result = elapsed.ppMs
elif 0 < times.inMicroSeconds(elapsed):
result = elapsed.ppUs
else:
result = $elapsed.inNanoSeconds & "ns"
except ValueError:
result = $elapsed
func toStr*(ela: Duration): string =
aristo_profile.toStr(ela)
# ------------------------------------------------------------------------------
# Public legacy API logging framework
# ------------------------------------------------------------------------------
template beginLegaApi*(w: CoreDbApiTrackRef) =
template beginLegaApi*(w: CoreDbApiTrackRef; s: static[CoreDbFnInx]) =
when typeof(w) is CoreDbRef:
let db = w
else:
@ -304,7 +228,9 @@ template beginLegaApi*(w: CoreDbApiTrackRef) =
db.trackNewApi = false
defer: db.trackNewApi = save
let blaStart {.inject.} = getTime()
when CoreDbEnableApiProfiling:
const blaCtx {.inject.} = s # Local use only
let blaStart {.inject.} = getTime() # Local use only
template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
block:
@ -312,16 +238,22 @@ template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
let db = w
else:
let db = w.distinctBase.parent
if db.trackLegaApi:
when CoreDbEnableApiProfiling:
let elapsed {.inject,used.} = getTime() - blaStart
aristo_profile.update(db.profTab, blaCtx.ord, elapsed)
if db.trackLegaApi:
when not CoreDbEnableApiProfiling: # otherwise use variable above
let elapsed {.inject,used.} = getTime() - blaStart
code
# ------------------------------------------------------------------------------
# Public new API logging framework
# ------------------------------------------------------------------------------
template beginNewApi*(w: CoreDxApiTrackRef) =
let bnaStart {.inject.} = getTime()
template beginNewApi*(w: CoreDxApiTrackRef; s: static[CoreDbFnInx]) =
when CoreDbEnableApiProfiling:
const bnaCtx {.inject.} = s # Local use only
let bnaStart {.inject.} = getTime() # Local use only
template endNewApiIf*(w: CoreDxApiTrackRef; code: untyped) =
block:
@ -330,102 +262,20 @@ template endNewApiIf*(w: CoreDxApiTrackRef; code: untyped) =
else:
if w.isNil: break
let db = w.parent
if db.trackNewApi:
when CoreDbEnableApiProfiling:
let elapsed {.inject,used.} = getTime() - bnaStart
aristo_profile.update(db.profTab, bnaCtx.ord, elapsed)
if db.trackNewApi:
when not CoreDbEnableApiProfiling: # otherwise use variable above
let elapsed {.inject,used.} = getTime() - bnaStart
code
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc update*(t: var CoreDbProfFnInx; fn: CoreDbFnInx; ela: Duration) =
## Register time `ela` spent while executing function `fn`
let s = ela.toFloat
t[fn][0] += s
t[fn][1] += s * s
t[fn][2].inc
proc byElapsed*(t: var CoreDbProfFnInx): CoreDbProfEla =
## Collate `CoreDb` function symbols by elapsed times, sorted with largest
## `Duration` first. Zero `Duration` entries are discarded.
var u: Table[Duration,seq[CoreDbFnInx]]
for fn in CoreDbFnInx:
t.updateTotal fn
let (secs,_,count) = t[fn]
if 0 < count:
let ela = secs.toDuration
u.withValue(ela,val):
val[].add fn
do:
u[ela] = @[fn]
result.add (t[SummaryItem][0].toDuration, @[SummaryItem])
for ela in u.keys.toSeq.sorted Descending:
u.withValue(ela,val):
result.add (ela, val[])
proc byMean*(t: var CoreDbProfFnInx): CoreDbProfMean =
## Collate `CoreDb` function symbols by elapsed mean times, sorted with
## largest `Duration` first. Zero `Duration` entries are discarded.
var u: Table[Duration,seq[CoreDbFnInx]]
for fn in CoreDbFnInx:
t.updateTotal fn
let (secs,_,count) = t[fn]
if 0 < count:
let ela = (secs / count.float).toDuration
u.withValue(ela,val):
val[].add fn
do:
u[ela] = @[fn]
result.add (
(t[SummaryItem][0] / t[SummaryItem][2].float).toDuration, @[SummaryItem])
for mean in u.keys.toSeq.sorted Descending:
u.withValue(mean,val):
result.add (mean, val[])
proc byVisits*(t: var CoreDbProfFnInx): CoreDbProfCount =
## Collate `CoreDb` function symbols by number of visits, sorted with
## largest number first.
var u: Table[int,seq[CoreDbFnInx]]
for fn in CoreDbFnInx:
t.updateTotal fn
let (_,_,count) = t[fn]
if 0 < count:
u.withValue(count,val):
val[].add fn
do:
u[count] = @[fn]
result.add (t[SummaryItem][2], @[SummaryItem])
for count in u.keys.toSeq.sorted Descending:
u.withValue(count,val):
result.add (count, val[])
proc stats*(
t: CoreDbProfFnInx;
fnInx: CoreDbFnInx;
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
## Print mean and strandard deviation of timing
let data = t[fnInx]
result.n = data[2]
if 0 < result.n:
let
mean = data[0] / result.n.float
sqMean = data[1] / result.n.float
meanSq = mean * mean
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
# if `meanSq` and `sqMean` are approximately the same.
sigma = sqMean - min(meanSq,sqMean)
stdDev = sigma.sqrt
result.mean = mean.toDuration
result.stdDev = stdDev.sqrt.toDuration
if 0 < mean:
result.devRatio = stdDev / mean
func init*(T: type CoreDbProfListRef): T =
T(list: newSeq[CoreDbProfData](1 + high(CoreDbFnInx).ord))
# ------------------------------------------------------------------------------
# End

View File

@ -12,7 +12,8 @@
import
eth/common,
results
results,
../../aristo/aristo_profile
# Annotation helpers
{.pragma: noRaise, gcsafe, raises: [].}
@ -31,6 +32,12 @@ const
CoreDbPersistentTypes* = {LegacyDbPersistent, AristoDbRocks}
type
CoreDbProfListRef* = AristoDbProfListRef
## Borrowed from `aristo_profile`, only used in profiling mode
CoreDbProfData* = AristoDbProfData
## Borrowed from `aristo_profile`, only used in profiling mode
CoreDbRc*[T] = Result[T,CoreDbErrorRef]
CoreDbAccount* = object
@ -261,11 +268,13 @@ type
# --------------------------------------------------
CoreDbRef* = ref object of RootRef
## Database descriptor
dbType*: CoreDbType ## Type of database backend
trackLegaApi*: bool ## Debugging, support
trackNewApi*: bool ## Debugging, support
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
localDbOnly*: bool ## Debugging, suggestion to ignore async fetch
dbType*: CoreDbType ## Type of database backend
trackLegaApi*: bool ## Debugging, support
trackNewApi*: bool ## Debugging, support
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
localDbOnly*: bool ## Debugging, suggestion to ignore async fetch
profTab*: CoreDbProfListRef ## Profiling data (if any)
ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger
methods*: CoreDbBaseFns
CoreDbErrorRef* = ref object of RootRef

View File

@ -32,6 +32,7 @@ export
# see `aristo_db`
toAristo,
toAristoProfData,
# see `legacy_db`
isLegacy,

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -13,10 +13,17 @@
##
{.push raises: [].}
import kvt/[
kvt_constants, kvt_init, kvt_tx, kvt_utils, kvt_walk]
import
kvt/[kvt_api, kvt_constants]
export
kvt_constants, kvt_init, kvt_tx, kvt_utils, kvt_walk
kvt_api, kvt_constants
import
kvt/kvt_init
export
MemBackendRef,
VoidBackendRef,
init
import
kvt/kvt_desc

258
nimbus/db/kvt/kvt_api.nim Normal file
View File

@ -0,0 +1,258 @@
# nimbus-eth1
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Stackable API for `Kvt`
## =======================
import
std/times,
eth/common,
results,
../aristo/aristo_profile,
"."/[kvt_desc, kvt_desc/desc_backend, kvt_init, kvt_tx, kvt_utils]
# Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].}
type
KvtDbProfListRef* = AristoDbProfListRef
## Borrowed from `aristo_profile`
KvtDbProfData* = AristoDbProfData
## Borrowed from `aristo_profile`
KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
KvtApiDelFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
KvtApiFinishFn* = proc(db: KvtDbRef, flush = false) {.noRaise.}
KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiForkFn* = proc(db: KvtDbRef): Result[KvtDbRef,KvtError] {.noRaise.}
KvtApiForkTopFn* = proc(db: KvtDbRef): Result[KvtDbRef,KvtError] {.noRaise.}
KvtApiGetFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[Blob,KvtError] {.noRaise.}
KvtApiHasKeyFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[bool,KvtError] {.noRaise.}
KvtApiIsTopFn* = proc(tx: KvtTxRef): bool {.noRaise.}
KvtApiLevelFn* = proc(db: KvtDbRef): int {.noRaise.}
KvtApiNForkedFn* = proc(db: KvtDbRef): int {.noRaise.}
KvtApiPutFn* = proc(db: KvtDbRef,
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
KvtApiStowFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiTxBeginFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.}
KvtApiTxTopFn* =
proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.}
KvtApiRef* = ref KvtApiObj
KvtApiObj* = object of RootObj
## Useful set of `Kvt` fuctions that can be filtered, stacked etc. Note
## that this API is modelled after a subset of the `Aristo` API.
commit*: KvtApiCommitFn
del*: KvtApiDelFn
finish*: KvtApiFinishFn
forget*: KvtApiForgetFn
fork*: KvtApiForkFn
forkTop*: KvtApiForkTopFn
get*: KvtApiGetFn
hasKey*: KvtApiHasKeyFn
isTop*: KvtApiIsTopFn
level*: KvtApiLevelFn
nForked*: KvtApiNForkedFn
put*: KvtApiPutFn
rollback*: KvtApiRollbackFn
stow*: KvtApiStowFn
txBegin*: KvtApiTxBeginFn
txTop*: KvtApiTxTopFn
KvtApiProfNames* = enum
## index/name mapping for profile slots
KvtApiProfTotal = "total"
KvtApiProfCommitFn = "commit"
KvtApiProfDelFn = "del"
KvtApiProfFinishFn = "finish"
KvtApiProfForgetFn = "forget"
KvtApiProfForkFn = "fork"
KvtApiProfForkTopFn = "forkTop"
KvtApiProfGetFn = "get"
KvtApiProfHasKeyFn = "hasKey"
KvtApiProfIsTopFn = "isTop"
KvtApiProfLevelFn = "level"
KvtApiProfNForkedFn = "nForked"
KvtApiProfPutFn = "put"
KvtApiProfRollbackFn = "rollback"
KvtApiProfStowFn = "stow"
KvtApiProfTxBeginFn = "txBegin"
KvtApiProfTxTopFn = "txTop"
KvtApiProfBeGetKvpFn = "be/getKvp"
KvtApiProfBePutEndFn = "be/putEnd"
KvtApiProfRef* = ref object of KvtApiRef
## Profiling API extension of `KvtApiObj`
data*: KvtDbProfListRef
be*: BackendRef
# ------------------------------------------------------------------------------
# Public API constuctors
# ------------------------------------------------------------------------------
func init*(api: var KvtApiObj) =
api.commit = commit
api.del = del
api.finish = finish
api.forget = forget
api.fork = fork
api.forkTop = forkTop
api.get = get
api.hasKey = hasKey
api.isTop = isTop
api.level = level
api.nForked = nForked
api.put = put
api.rollback = rollback
api.stow = stow
api.txBegin = txBegin
api.txTop = txTop
func init*(T: type KvtApiRef): T =
result = new T
result[].init()
func dup*(api: KvtApiRef): KvtApiRef =
new result
result[] = api[]
# ------------------------------------------------------------------------------
# Public profile API constuctor
# ------------------------------------------------------------------------------
func init*(
T: type KvtApiProfRef;
api: KvtApiRef;
be = BackendRef(nil);
): T =
## This constructor creates a profiling API descriptor to be derived from
## an initialised `api` argument descriptor. For profiling the DB backend,
## the field `.be` of the result descriptor must be assigned to the
## `.backend` field of the `KvtDbRef` descriptor.
##
## The argument desctiptors `api` and `be` will not be modified and can be
## used to restore the previous set up.
##
let
data = KvtDbProfListRef(
list: newSeq[KvtDbProfData](1 + high(KvtApiProfNames).ord))
profApi = T(data: data)
template profileRunner(n: KvtApiProfNames, code: untyped): untyped =
let start = getTime()
code
data.update(n.ord, getTime() - start)
profApi.commit =
proc(a: KvtTxRef): auto =
KvtApiProfCommitFn.profileRunner:
result = api.commit(a)
profApi.del =
proc(a: KvtDbRef; b: openArray[byte]): auto =
KvtApiProfDelFn.profileRunner:
result = api.del(a, b)
profApi.finish =
proc(a: KvtDbRef; b = false) =
KvtApiProfFinishFn.profileRunner:
api.finish(a, b)
profApi.forget =
proc(a: KvtDbRef): auto =
KvtApiProfForgetFn.profileRunner:
result = api.forget(a)
profApi.fork =
proc(a: KvtDbRef): auto =
KvtApiProfForkFn.profileRunner:
result = api.fork(a)
profApi.forkTop =
proc(a: KvtDbRef): auto =
KvtApiProfForkTopFn.profileRunner:
result = api.forkTop(a)
profApi.get =
proc(a: KvtDbRef, b: openArray[byte]): auto =
KvtApiProfGetFn.profileRunner:
result = api.get(a, b)
profApi.hasKey =
proc(a: KvtDbRef, b: openArray[byte]): auto =
KvtApiProfHasKeyFn.profileRunner:
result = api.hasKey(a, b)
profApi.isTop =
proc(a: KvtTxRef): auto =
KvtApiProfIsTopFn.profileRunner:
result = api.isTop(a)
profApi.level =
proc(a: KvtDbRef): auto =
KvtApiProfLevelFn.profileRunner:
result = api.level(a)
profApi.nForked =
proc(a: KvtDbRef): auto =
KvtApiProfNForkedFn.profileRunner:
result = api.nForked(a)
profApi.put =
proc(a: KvtDbRef; b, c: openArray[byte]): auto =
KvtApiProfPutFn.profileRunner:
result = api.put(a, b, c)
profApi.rollback =
proc(a: KvtTxRef): auto =
KvtApiProfRollbackFn.profileRunner:
result = api.rollback(a)
profApi.stow =
proc(a: KvtDbRef): auto =
KvtApiProfStowFn.profileRunner:
result = api.stow(a)
profApi.txBegin =
proc(a: KvtDbRef): auto =
KvtApiProfTxBeginFn.profileRunner:
result = api.txBegin(a)
profApi.txTop =
proc(a: KvtDbRef): auto =
KvtApiProfTxTopFn.profileRunner:
result = api.txTop(a)
if not be.isNil:
profApi.be = be.dup
profApi.be.getKvpFn =
proc(a: openArray[byte]): auto =
KvtApiProfBeGetKvpFn.profileRunner:
result = be.getKvpFn(a)
profApi.be.putEndFn =
proc(a: PutHdlRef): auto =
KvtApiProfBePutEndFn.profileRunner:
result = be.putEndFn(a)
profApi
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -66,6 +66,15 @@ type
closeFn*: CloseFn ## Generic destructor
func dup*(be: BackendRef): BackendRef =
if not be.isNil:
result = BackendRef(
getKvpFn: be.getKvpFn,
putBegFn: be.putBegFn,
putKvpFn: be.putKvpFn,
putEndFn: be.putEndFn,
closeFn: be.closeFn)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -36,28 +36,22 @@ type
ReadOnlyStateDB* = distinct LedgerRef
export
LedgerFnInx,
LedgerProfListRef,
LedgerType,
LedgerRef,
LedgerSpRef,
# Profiling support
byElapsed,
byMean,
byVisits,
stats
LedgerSpRef
const
LedgerEnableApiTracking* = EnableApiTracking
LedgerEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
LedgerApiTxt* = apiTxt
when EnableApiTracking and EnableApiProfiling:
var ledgerProfTab*: LedgerProfFnInx
when AutoValidateDescriptors:
import ./base/validate
proc ldgProfData*(db: CoreDbRef): LedgerProfListRef {.gcsafe.}
# ------------------------------------------------------------------------------
# Logging/tracking helpers (some public)
# ------------------------------------------------------------------------------
@ -81,14 +75,11 @@ when EnableApiTracking:
# Publicly available for API logging
template beginTrackApi*(ldg: LedgerRef; s: LedgerFnInx) =
when EnableApiTracking:
ldg.beginApi
let ctx {.inject.} = s
ldg.beginApi(s)
template ifTrackApi*(ldg: LedgerRef; code: untyped) =
when EnableApiTracking:
ldg.endApiIf:
when EnableApiProfiling:
ledgerProfTab.update(ctx, elapsed)
code
# ------------------------------------------------------------------------------
@ -101,6 +92,8 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
ldg.validate()
when EnableApiTracking:
ldg.trackApi = db.trackLedgerApi
when LedgerEnableApiProfiling:
ldg.profTab = db.ldgProfData()
ldg.ifTrackApi: debug apiTxt, ctx, elapsed, ldgType=ldg.ldgType
ldg
@ -108,6 +101,19 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
# Public methods
# ------------------------------------------------------------------------------
proc ldgProfData*(db: CoreDbRef): LedgerProfListRef =
## Return profiling data table (only available in profiling mode). If
## available (i.e. non-nil), result data can be organised by the functions
## available with `aristo_profile`.
##
## Note that profiling these data have accumulated over several ledger
## sessions running on the same `CoreDb` instance.
##
when LedgerEnableApiProfiling:
if db.ledgerHook.isNil:
db.ledgerHook = LedgerProfListRef.init()
cast[LedgerProfListRef](db.ledgerHook)
proc accessList*(ldg: LedgerRef, eAddr: EthAddress) =
ldg.beginTrackApi LdgAccessListFn
ldg.methods.accessListFn eAddr

View File

@ -11,9 +11,10 @@
{.push raises: [].}
import
std/[algorithm, math, sequtils, strformat, strutils, tables, times],
std/[strutils, times],
eth/common,
stew/byteutils,
../../aristo/aristo_profile,
../../core_db,
"."/base_desc
@ -76,77 +77,13 @@ type
LdgPairsIt = "pairs"
LdgStorageIt = "storage"
LedgerProfFnInx* = array[LedgerFnInx,(float,float,int)]
LedgerProfEla* = seq[(Duration,seq[LedgerFnInx])]
LedgerProfMean* = seq[(Duration,seq[LedgerFnInx])]
LedgerProfCount* = seq[(int,seq[LedgerFnInx])]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc toDuration(fl: float): Duration =
## Convert the nanoseconds argument `ns` to a `Duration`.
let (s, ns) = fl.splitDecimal
initDuration(seconds = s.int, nanoseconds = (ns * 1_000_000_000).int)
func toFloat(ela: Duration): float =
## Convert the argument `ela` to a floating point seconds result.
let
elaS = ela.inSeconds
elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds
elaS.float + elaNs.float / 1_000_000_000
proc updateTotal(t: var LedgerProfFnInx; fnInx: LedgerFnInx) =
## Summary update helper
if fnInx == SummaryItem:
t[SummaryItem] = (0.0, 0.0, 0)
else:
t[SummaryItem][0] += t[fnInx][0]
t[SummaryItem][1] += t[fnInx][1]
t[SummaryItem][2] += t[fnInx][2]
# -----------------
func oaToStr(w: openArray[byte]): string =
w.toHex.toLowerAscii
func ppUs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMicroseconds
let ns = elapsed.inNanoseconds mod 1_000 # fraction of a micro second
if ns != 0:
# to rounded deca milli seconds
let du = (ns + 5i64) div 10i64
result &= &".{du:02}"
result &= "us"
func ppMs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMilliseconds
let ns = elapsed.inNanoseconds mod 1_000_000 # fraction of a milli second
if ns != 0:
# to rounded deca milli seconds
let dm = (ns + 5_000i64) div 10_000i64
result &= &".{dm:02}"
result &= "ms"
func ppSecs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inSeconds
let ns = elapsed.inNanoseconds mod 1_000_000_000 # fraction of a second
if ns != 0:
# round up
let ds = (ns + 5_000_000i64) div 10_000_000i64
result &= &".{ds:02}"
result &= "s"
func ppMins(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
result = $elapsed.inMinutes
let ns = elapsed.inNanoseconds mod 60_000_000_000 # fraction of a minute
if ns != 0:
# round up
let dm = (ns + 500_000_000i64) div 1_000_000_000i64
result &= &":{dm:02}"
result &= "m"
# ------------------------------------------------------------------------------
# Public API logging helpers
# ------------------------------------------------------------------------------
@ -167,125 +104,32 @@ func toStr*(w: Blob): string =
func toStr*(w: seq[Log]): string =
"Logs[" & $w.len & "]"
func toStr*(elapsed: Duration): string =
try:
if 0 < times.inMinutes(elapsed):
result = elapsed.ppMins
elif 0 < times.inSeconds(elapsed):
result = elapsed.ppSecs
elif 0 < times.inMilliSeconds(elapsed):
result = elapsed.ppMs
elif 0 < times.inMicroSeconds(elapsed):
result = elapsed.ppUs
else:
result = $elapsed.inNanoSeconds & "ns"
except ValueError:
result = $elapsed
func toStr*(ela: Duration): string =
aristo_profile.toStr(ela)
# ------------------------------------------------------------------------------
# Public API logging framework
# ------------------------------------------------------------------------------
template beginApi*(ldg: LedgerRef) =
let baStart {.inject.} = getTime()
template beginApi*(ldg: LedgerRef; s: static[LedgerFnInx]) =
const ctx {.inject,used.} = s # Generally available
let baStart {.inject.} = getTime() # Local use only
template endApiIf*(ldg: LedgerRef; code: untyped) =
if ldg.trackApi:
when CoreDbEnableApiProfiling:
let elapsed {.inject,used.} = getTime() - baStart
aristo_profile.update(ldg.profTab, ctx.ord, elapsed)
if ldg.trackApi:
when not CoreDbEnableApiProfiling: # otherwise use variable above
let elapsed {.inject,used.} = getTime() - baStart
code
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc update*(t: var LedgerProfFnInx; fn: LedgerFnInx; ela: Duration) =
## Register time `ela` spent while executing function `fn`
let s = ela.toFloat
t[fn][0] += s
t[fn][1] += s * s
t[fn][2].inc
proc byElapsed*(t: var LedgerProfFnInx): LedgerProfEla =
## Collate `Ledger` function symbols by elapsed times, sorted with largest
## `Duration` first. Zero `Duration` entries are discarded.
var u: Table[Duration,seq[LedgerFnInx]]
for fn in LedgerFnInx:
t.updateTotal fn
let (secs,_,count) = t[fn]
if 0 < count:
let ela = secs.toDuration
u.withValue(ela,val):
val[].add fn
do:
u[ela] = @[fn]
result.add (t[SummaryItem][0].toDuration, @[SummaryItem])
for ela in u.keys.toSeq.sorted Descending:
u.withValue(ela,val):
result.add (ela, val[])
proc byMean*(t: var LedgerProfFnInx): LedgerProfMean =
## Collate `Ledger` function symbols by elapsed mean times, sorted with
## largest `Duration` first. Zero `Duration` entries are discarded.
var u: Table[Duration,seq[LedgerFnInx]]
for fn in LedgerFnInx:
t.updateTotal fn
let (secs,_,count) = t[fn]
if 0 < count:
let ela = (secs / count.float).toDuration
u.withValue(ela,val):
val[].add fn
do:
u[ela] = @[fn]
result.add (
(t[SummaryItem][0] / t[SummaryItem][2].float).toDuration, @[SummaryItem])
for mean in u.keys.toSeq.sorted Descending:
u.withValue(mean,val):
result.add (mean, val[])
proc byVisits*(t: var LedgerProfFnInx): LedgerProfCount =
## Collate `Ledger` function symbols by number of visits, sorted with
## largest number first.
var u: Table[int,seq[LedgerFnInx]]
for fn in LedgerFnInx:
t.updateTotal fn
let (_,_,count) = t[fn]
if 0 < count:
u.withValue(count,val):
val[].add fn
do:
u[count] = @[fn]
result.add (t[SummaryItem][2], @[SummaryItem])
for count in u.keys.toSeq.sorted Descending:
u.withValue(count,val):
result.add (count, val[])
proc stats*(
t: LedgerProfFnInx;
fnInx: LedgerFnInx;
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
## Print mean and strandard deviation of timing
let data = t[fnInx]
result.n = data[2]
if 0 < result.n:
let
mean = data[0] / result.n.float
sqMean = data[1] / result.n.float
meanSq = mean * mean
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
# if `meanSq` and `sqMean` are approximately the same.
sigma = sqMean - min(meanSq,sqMean)
stdDev = sigma.sqrt
result.mean = mean.toDuration
result.stdDev = stdDev.sqrt.toDuration
if 0 < mean:
result.devRatio = stdDev / mean
func init*(T: type LedgerProfListRef): T =
T(list: newSeq[LedgerProfData](1 + high(LedgerFnInx).ord))
# ------------------------------------------------------------------------------
# End

View File

@ -13,12 +13,19 @@
import
eth/common,
../../core_db,
../../../../stateless/multi_keys
../../../../stateless/multi_keys,
../../aristo/aristo_profile
# Annotation helpers
{.pragma: noRaise, gcsafe, raises: [].}
type
LedgerProfListRef* = AristoDbProfListRef
## Borrowed from `aristo_profile`, only used in profiling mode
LedgerProfData* = AristoDbProfData
## Borrowed from `aristo_profile`, only used in profiling mode
LedgerType* = enum
Ooops = 0
LegacyAccountsCache,
@ -29,9 +36,10 @@ type
LedgerRef* = ref object of RootRef
## Root object with closures
ldgType*: LedgerType ## For debugging
trackApi*: bool ## For debugging
extras*: LedgerExtras ## Support might go away
ldgType*: LedgerType ## For debugging
trackApi*: bool ## For debugging
profTab*: LedgerProfListRef ## Profiling data (if any)
extras*: LedgerExtras ## Support might go away
methods*: LedgerFns
RawRootHashFn* = proc(): Hash256 {.noRaise.}

View File

@ -17,18 +17,18 @@ import
unittest2,
stew/endians2,
../../nimbus/sync/protocol,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[
aristo_blobify,
aristo_debug,
aristo_desc,
aristo_desc/desc_backend,
aristo_get,
aristo_hashify,
aristo_init/memory_db,
aristo_init/rocks_db,
aristo_layers,
aristo_merge,
aristo_persistent,
aristo_blobify,
aristo_tx,
aristo_vid],
../replay/xcheck,
./test_helpers
@ -40,13 +40,6 @@ const
# Private helpers
# ------------------------------------------------------------------------------
when not declared(aristo_hashify.noisy):
proc hashify(
db: AristoDbRef;
noisy: bool;
): Result[void,(VertexID,AristoError)] =
aristo_hashify.hashify(db)
func hash(filter: FilterRef): Hash =
## Unique hash/filter -- cannot use de/blobify as the expressions
## `filter.blobify` and `filter.blobify.value.deblobify.value.blobify` are

View File

@ -17,11 +17,19 @@ import
results,
unittest2,
../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
aristo_layers, aristo_merge, aristo_persistent, aristo_blobify],
../../nimbus/db/aristo,
../../nimbus/db/aristo/aristo_desc/desc_backend,
../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler],
aristo_blobify,
aristo_check,
aristo_debug,
aristo_desc,
aristo_desc/desc_backend,
aristo_filter,
aristo_filter/filter_fifos,
aristo_filter/filter_scheduler,
aristo_get,
aristo_layers,
aristo_merge,
aristo_persistent,
aristo_tx],
../replay/xcheck,
./test_helpers
@ -765,7 +773,7 @@ proc testFilterBacklog*(
if sampleSize < n:
break
block:
let rc = db.merge w
let rc = db.mergeLeaf w
xCheckRc rc.error == 0
block:
let rc = db.stow(persistent=true)

View File

@ -13,7 +13,8 @@ import
eth/common,
rocksdb,
../../nimbus/db/aristo/[
aristo_debug, aristo_desc, aristo_filter/filter_scheduler, aristo_merge],
aristo_debug, aristo_desc, aristo_delete, aristo_filter/filter_scheduler,
aristo_hashify, aristo_hike, aristo_merge],
../../nimbus/db/kvstore_rocksdb,
../../nimbus/sync/protocol/snap/snap_types,
../test_sync_snap/test_types,
@ -46,26 +47,6 @@ func to(a: NodeKey; T: type UInt256): T =
func to(a: NodeKey; T: type PathID): T =
a.to(UInt256).to(T)
when not declared(aristo_merge.noisy):
import ../../nimbus/db/aristo/aristo_hike
proc merge(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
data: openArray[byte];
accPath: PathID;
noisy: bool;
): Result[bool, AristoError] =
aristo_merge.merge(db, root, path, data, accPath)
proc merge(
db: AristoDbRef;
lty: LeafTie;
pyl: PayloadRef;
accPath: PathID;
noisy: bool;
): Result[Hike, AristoError] =
aristo_merge.merge(db, lty, pyl, accPath)
# ------------------------------------------------------------------------------
# Public pretty printing
# ------------------------------------------------------------------------------
@ -231,6 +212,77 @@ func mapRootVid*(
# Public functions
# ------------------------------------------------------------------------------
proc hashify*(
db: AristoDbRef;
noisy: bool;
): Result[void,(VertexID,AristoError)] =
when declared(aristo_hashify.noisy):
aristo_hashify.exec(aristo_hashify.hashify(db), noisy)
else:
aristo_hashify.hashify(db)
proc delete*(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
accPath: PathID;
noisy: bool;
): Result[bool,(VertexID,AristoError)] =
when declared(aristo_delete.noisy):
aristo_delete.exec(aristo_delete.delete(db, root, path, accPath), noisy)
else:
aristo_delete.delete(db, root, path, accPath)
proc delete*(
db: AristoDbRef;
lty: LeafTie;
accPath: PathID;
noisy: bool;
): Result[bool,(VertexID,AristoError)] =
when declared(aristo_delete.noisy):
aristo_delete.exec(aristo_delete.delete(db, lty, accPath), noisy)
else:
aristo_delete.delete(db, lty, accPath)
proc delTree*(
db: AristoDbRef;
root: VertexID;
accPath: PathID;
noisy: bool;
): Result[void,(VertexID,AristoError)] =
when declared(aristo_delete.noisy):
aristo_delete.exec(aristo_delete.delTree(db, root, accPath), noisy)
else:
aristo_delete.delTree(db, root, accPath)
proc merge(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
data: openArray[byte];
accPath: PathID;
noisy: bool;
): Result[bool, AristoError] =
when declared(aristo_merge.noisy):
aristo_merge.exec(aristo_merge.merge(db, root, path, data, accPath), noisy)
else:
aristo_merge.merge(db, root, path, data, accPath)
proc mergePayload*(
db: AristoDbRef;
lty: LeafTie;
pyl: PayloadRef;
accPath: PathID;
noisy: bool;
): Result[Hike,AristoError] =
when declared(aristo_merge.noisy):
aristo_merge.exec(aristo_merge.mergePayload(db, lty, pyl, accPath), noisy)
else:
aristo_merge.mergePayload(db, lty, pyl, accPath)
proc mergeList*(
db: AristoDbRef; # Database, top layer
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
@ -241,7 +293,7 @@ proc mergeList*(
for n,w in leafs:
noisy.say "*** mergeList",
" n=", n, "/", leafs.len
let rc = db.merge(w.leafTie, w.payload, VOID_PATH_ID, noisy=noisy)
let rc = db.mergePayload(w.leafTie, w.payload, VOID_PATH_ID, noisy=noisy)
noisy.say "*** mergeList",
" n=", n, "/", leafs.len,
" rc=", (if rc.isOk: "ok" else: $rc.error),

View File

@ -17,9 +17,17 @@ import
unittest2,
stew/endians2,
../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get,
aristo_hike, aristo_layers, aristo_merge],
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
aristo_check,
aristo_debug,
aristo_delete,
aristo_desc,
aristo_get,
aristo_hike,
aristo_init/persistent,
aristo_layers,
aristo_merge,
aristo_nearby,
aristo_tx],
../replay/xcheck,
./test_helpers
@ -245,7 +253,7 @@ proc fwdWalkVerify(
leftOver = leftOver
last = LeafTie()
n = 0
for (key,_) in db.right low(LeafTie,root):
for (key,_) in db.rightPairs low(LeafTie,root):
xCheck key in leftOver:
noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID
leftOver.excl key
@ -277,7 +285,7 @@ proc revWalkVerify(
leftOver = leftOver
last = LeafTie()
n = 0
for (key,_) in db.left high(LeafTie,root):
for (key,_) in db.leftPairs high(LeafTie,root):
xCheck key in leftOver:
noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID
leftOver.excl key
@ -302,7 +310,7 @@ proc mergeRlpData*(
rlpData: openArray[byte]; # RLP encoded payload data
): Result[void,AristoError] =
block body:
discard db.merge(
discard db.mergeLeaf(
LeafTiePayload(
leafTie: LeafTie(
root: VertexID(1),
@ -357,7 +365,7 @@ proc testTxMergeAndDeleteOneByOne*(
# e.g. lst.setLen(min(5,lst.len))
lst
for i,leaf in kvpLeafs:
let rc = db.merge leaf
let rc = db.mergeLeaf leaf
xCheckRc rc.error == 0
# List of all leaf entries that should be on the database
@ -462,7 +470,7 @@ proc testTxMergeAndDeleteSubTree*(
# e.g. lst.setLen(min(5,lst.len))
lst
for i,leaf in kvpLeafs:
let rc = db.merge leaf
let rc = db.mergeLeaf leaf
xCheckRc rc.error == 0
# List of all leaf entries that should be on the database
@ -485,7 +493,7 @@ proc testTxMergeAndDeleteSubTree*(
""
# Delete sub-tree
block:
let rc = db.delete(VertexID(1), VOID_PATH_ID)
let rc = db.delTree(VertexID(1), VOID_PATH_ID)
xCheckRc rc.error == (0,0):
noisy.say "***", "del(2)",
" n=", n, "/", list.len,

View File

@ -9,7 +9,6 @@
# distributed except according to those terms.
import
std/strutils,
eth/common,
../../nimbus/db/core_db,
../../nimbus/common/chain_config
@ -26,111 +25,136 @@ type
numBlocks*: int ## Number of blocks to load
dbType*: CoreDbType ## Use `CoreDbType(0)` for default
func cloneWith(
dsc: CaptureSpecs;
name = "";
network = NetworkId(0);
genesis = "";
files = seq[string].default;
numBlocks = 0;
dbType = CoreDbType(0);
): CaptureSpecs =
result = dsc
if network != NetworkId(0):
result.builtIn = true
result.network = network
elif 0 < genesis.len:
result.builtIn = false
result.genesis = genesis
if 0 < name.len:
if name[0] == '-':
result.name &= name
elif name[0] == '+' and 1 < name.len:
result.name &= name[1 .. ^1]
else:
result.name = name
if 0 < files.len:
result.files = files
if 0 < numBlocks:
result.numBlocks = numBlocks
if dbType != CoreDbType(0):
result.dbType = dbType
# Must not use `const` here, see `//github.com/nim-lang/Nim/issues/23295`
# Waiting for fix `//github.com/nim-lang/Nim/pull/23297` (or similar) to
# appear on local `Nim` compiler version.
let
bulkTest0* = CaptureSpecs(
builtIn: true,
name: "goerli-some",
network: GoerliNet,
files: @["goerli68161.txt.gz"],
numBlocks: 1_000)
goerliSample = CaptureSpecs(
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @["goerli68161.txt.gz"]) # lon local replay folder
bulkTest1* = CaptureSpecs(
builtIn: true,
name: "goerli-more",
network: GoerliNet,
files: @["goerli68161.txt.gz"],
numBlocks: high(int))
goerliSampleEx = CaptureSpecs(
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @[
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
"goerli482305-504192.txt.gz"])
bulkTest2* = CaptureSpecs(
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @[
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
"goerli482305-504192.txt.gz"],
numBlocks: high(int))
bulkTest3* = CaptureSpecs(
builtIn: true,
name: "main",
network: MainNet,
files: @[
mainSampleEx = CaptureSpecs(
builtIn: true,
name: "main",
network: MainNet,
files: @[
"mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay
"mainnet332161-550848.txt.gz",
"mainnet550849-719232.txt.gz",
"mainnet719233-843841.txt.gz"],
numBlocks: high(int))
"mainnet719233-843841.txt.gz"])
# ------------------
bulkTest0* = goerliSample
.cloneWith(
name = "-some",
numBlocks = 1_000)
bulkTest1* = goerliSample
.cloneWith(
name = "-more",
numBlocks = high(int))
bulkTest2* = goerliSampleEx
.cloneWith(
numBlocks = high(int))
bulkTest3* = mainSampleEx
.cloneWith(
numBlocks = high(int))
# Test samples with all the problems one can expect
ariTest0* = CaptureSpecs(
builtIn: true,
name: bulkTest2.name & "-am",
network: bulkTest2.network,
files: bulkTest2.files,
numBlocks: high(int),
dbType: AristoDbMemory)
ariTest0* = goerliSampleEx
.cloneWith(
name = "-am",
numBlocks = high(int),
dbType = AristoDbMemory)
ariTest1* = CaptureSpecs(
builtIn: true,
name: bulkTest2.name & "-ar",
network: bulkTest2.network,
files: bulkTest2.files,
numBlocks: high(int),
dbType: AristoDbRocks)
ariTest1* = goerliSampleEx
.cloneWith(
name = "-ar",
numBlocks = high(int),
dbType = AristoDbRocks)
ariTest2* = CaptureSpecs(
builtIn: true,
name: bulkTest3.name & "-am",
network: bulkTest3.network,
files: bulkTest3.files,
numBlocks: 500_000,
dbType: AristoDbMemory)
ariTest2* = mainSampleEx
.cloneWith(
name = "-am",
numBlocks = 500_000,
dbType = AristoDbMemory)
ariTest3* = CaptureSpecs(
builtIn: true,
name: bulkTest3.name & "-ar",
network: bulkTest3.network,
files: bulkTest3.files,
numBlocks: high(int),
dbType: AristoDbRocks)
ariTest3* = mainSampleEx
.cloneWith(
name = "-ar",
numBlocks = high(int),
dbType = AristoDbRocks)
# To be compared against the proof-of-concept implementation as
# reference
# To be compared against the proof-of-concept implementation as reference
legaTest0* = CaptureSpecs(
builtIn: true,
name: ariTest0.name.replace("-am", "-lm"),
network: ariTest0.network,
files: ariTest0.files,
numBlocks: ariTest0.numBlocks,
dbType: LegacyDbMemory)
legaTest0* = goerliSampleEx
.cloneWith(
name = "-lm",
numBlocks = 500, # high(int),
dbType = LegacyDbMemory)
legaTest1* = CaptureSpecs(
builtIn: true,
name: ariTest1.name.replace("-ar", "-lp"),
network: ariTest1.network,
files: ariTest1.files,
numBlocks: ariTest1.numBlocks,
dbType: LegacyDbPersistent)
legaTest1* = goerliSampleEx
.cloneWith(
name = "-lp",
numBlocks = high(int),
dbType = LegacyDbPersistent)
legaTest2* = CaptureSpecs(
builtIn: true,
name: ariTest2.name.replace("-ar", "-lm"),
network: ariTest2.network,
files: ariTest2.files,
numBlocks: ariTest2.numBlocks,
dbType: LegacyDbMemory)
legaTest2* = mainSampleEx
.cloneWith(
name = "-lm",
numBlocks = 500_000,
dbType = LegacyDbMemory)
legaTest3* = CaptureSpecs(
builtIn: true,
name: ariTest3.name.replace("-ar", "-lp"),
network: ariTest3.network,
files: ariTest3.files,
numBlocks: ariTest3.numBlocks,
dbType: LegacyDbPersistent)
legaTest3* = mainSampleEx
.cloneWith(
name = "-lp",
numBlocks = high(int),
dbType = LegacyDbPersistent)
# ------------------

View File

@ -19,10 +19,25 @@ import
../replay/[pp, undump_blocks, xcheck],
./test_helpers
type StopMoaningAboutLedger {.used.} = LedgerType
type
StopMoaningAboutLedger {.used.} = LedgerType
when CoreDbEnableApiProfiling or LedgerEnableApiProfiling:
import std/[algorithm, sequtils, strutils]
when CoreDbEnableApiProfiling:
import
std/[algorithm, sequtils, strutils],
../../nimbus/db/aristo/[aristo_api, aristo_profile],
../../nimbus/db/kvt/kvt_api
var
aristoProfData: AristoDbProfListRef
kvtProfData: KvtDbProfListRef
cdbProfData: CoreDbProfListRef
when LedgerEnableApiProfiling:
when not CoreDbEnableApiProfiling:
import
std/[algorithm, sequtils, strutils]
var
ldgProfData: LedgerProfListRef
const
EnableExtraLoggingControl = true
@ -99,35 +114,29 @@ template stopLoggingAfter(noisy: bool; code: untyped) =
# --------------
proc coreDbProfResults(info: string; indent = 4): string =
when CoreDbEnableApiProfiling:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = "CoreDb profiling results" & info & ":"
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,w) in coreDbProfTab.byElapsed:
result &= pfx2 & ela.pp & ": " &
w.mapIt($it & coreDbProfTab.stats(it).pp(true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,w) in coreDbProfTab.byVisits:
result &= pfx2 & $count & ": " &
w.mapIt($it & coreDbProfTab.stats(it).pp).sorted.join(", ")
when CoreDbEnableApiProfiling or
LedgerEnableApiProfiling:
proc profilingPrinter(
data: AristoDbProfListRef;
names: openArray[string];
header: string;
indent = 4;
): string =
if not data.isNil:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = header & ":"
proc ledgerProfResults(info: string; indent = 4): string =
when LedgerEnableApiProfiling:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = "Ledger profiling results" & info & ":"
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,w) in ledgerProfTab.byElapsed:
result &= pfx2 & ela.pp & ": " &
w.mapIt($it & ledgerProfTab.stats(it).pp(true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,w) in ledgerProfTab.byVisits:
result &= pfx2 & $count & ": " &
w.mapIt($it & ledgerProfTab.stats(it).pp).sorted.join(", ")
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,fns) in data.byElapsed:
result &= pfx2 & ela.pp & ": " & fns.mapIt(
names[it] & data.stats(it).pp(true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,fns) in data.byVisits:
result &= pfx2 & $count & ": " & fns.mapIt(
names[it] & data.stats(it).pp).sorted.join(", ")
# ------------------------------------------------------------------------------
# Public test function
@ -136,18 +145,33 @@ proc ledgerProfResults(info: string; indent = 4): string =
proc test_chainSyncProfilingPrint*(
noisy = false;
nBlocks: int;
indent = 2;
) =
if noisy:
let info =
if 0 < nBlocks and nBlocks < high(int): " (" & $nBlocks & " blocks)"
else: ""
block:
let s = info.coreDbProfResults()
var blurb: seq[string]
when LedgerEnableApiProfiling:
blurb.add ldgProfData.profilingPrinter(
names = LedgerFnInx.toSeq.mapIt($it),
header = "Ledger profiling results" & info,
indent)
when CoreDbEnableApiProfiling:
blurb.add cdbProfData.profilingPrinter(
names = CoreDbFnInx.toSeq.mapIt($it),
header = "CoreDb profiling results" & info,
indent)
blurb.add aristoProfData.profilingPrinter(
names = AristoApiProfNames.toSeq.mapIt($it),
header = "Aristo backend profiling results" & info,
indent)
blurb.add kvtProfData.profilingPrinter(
names = KvtApiProfNames.toSeq.mapIt($it),
header = "Kvt backend profiling results" & info,
indent)
for s in blurb:
if 0 < s.len: true.say "***", s, "\n"
block:
let s = info.ledgerProfResults()
if 0 < s.len: true.say "***", s, "\n"
proc test_chainSync*(
noisy: bool;
@ -166,6 +190,16 @@ proc test_chainSync*(
noisy.initLogging com
defer: com.finishLogging()
# Profile variables will be non-nil if profiling is available. The profiling
# API data need to be captured so it will be available after the services
# have terminated.
when CoreDbEnableApiProfiling:
# terminated.
(aristoProfData, kvtProfData) = com.db.toAristoProfData()
cdbProfData = com.db.dbProfData()
when LedgerEnableApiProfiling:
ldgProfData = com.db.ldgProfData()
for w in filePaths.undumpBlocks:
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
if fromBlock == 0.u256: