mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-23 01:08:26 +00:00
Coredb use stackable api for aristo backend (#2060)
* Aristo/Kvt: Provide function hooks APIs why: These APIs can be used for installing tracers, profiling functoinality, and other niceties on the databases. * Aristo: Provide optional API profiling details: It basically is a re-implementation of the `CoreDb` profiling implementation * Kvt: Provide optional API profiling similar to `Aristo` * CoreDb: Re-implementing profiling using `aristo_profile` * Ledger: Re-implementing profiling using `aristo_profile` * CoreDb: Update unit tests for maintainability * update copyright dates
This commit is contained in:
parent
7089226d43
commit
587ca3abbe
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -13,31 +13,26 @@
|
|||||||
##
|
##
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import aristo/[
|
import
|
||||||
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
|
aristo/[aristo_api, aristo_constants, aristo_sign]
|
||||||
aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
|
|
||||||
aristo_walk]
|
|
||||||
export
|
export
|
||||||
aristo_constants, aristo_delete, aristo_fetch, aristo_init, aristo_merge,
|
aristo_api, aristo_constants, aristo_sign
|
||||||
aristo_nearby, aristo_serialise, aristo_sign, aristo_tx, aristo_utils,
|
|
||||||
aristo_walk
|
|
||||||
|
|
||||||
import
|
import
|
||||||
aristo/aristo_get
|
aristo/aristo_init
|
||||||
export
|
export
|
||||||
getKeyRc
|
MemBackendRef,
|
||||||
|
VoidBackendRef,
|
||||||
|
init
|
||||||
|
|
||||||
import
|
import
|
||||||
aristo/aristo_hashify
|
aristo/aristo_nearby
|
||||||
export
|
export
|
||||||
hashify
|
leftPairs, # iterators
|
||||||
|
rightPairs
|
||||||
|
|
||||||
import
|
import
|
||||||
aristo/aristo_path
|
aristo/aristo_desc/[desc_identifiers, desc_structural]
|
||||||
export
|
|
||||||
pathAsBlob
|
|
||||||
|
|
||||||
import aristo/aristo_desc/[desc_identifiers, desc_structural]
|
|
||||||
export
|
export
|
||||||
AristoAccount,
|
AristoAccount,
|
||||||
PayloadRef,
|
PayloadRef,
|
||||||
@ -53,7 +48,7 @@ export
|
|||||||
AristoError,
|
AristoError,
|
||||||
AristoTxRef,
|
AristoTxRef,
|
||||||
MerkleSignRef,
|
MerkleSignRef,
|
||||||
forget,
|
QidLayoutRef,
|
||||||
isValid
|
isValid
|
||||||
|
|
||||||
# End
|
# End
|
||||||
|
623
nimbus/db/aristo/aristo_api.nim
Normal file
623
nimbus/db/aristo/aristo_api.nim
Normal file
@ -0,0 +1,623 @@
|
|||||||
|
# nimbus-eth1
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
|
# except according to those terms.
|
||||||
|
|
||||||
|
## Stackable API for `Aristo`
|
||||||
|
## ==========================
|
||||||
|
|
||||||
|
|
||||||
|
import
|
||||||
|
std/times,
|
||||||
|
eth/[common, trie/nibbles],
|
||||||
|
results,
|
||||||
|
"."/[aristo_delete, aristo_desc, aristo_desc/desc_backend, aristo_fetch,
|
||||||
|
aristo_get, aristo_hashify, aristo_hike, aristo_init, aristo_merge,
|
||||||
|
aristo_path, aristo_profile, aristo_serialise, aristo_tx, aristo_vid]
|
||||||
|
|
||||||
|
export
|
||||||
|
AristoDbProfListRef
|
||||||
|
|
||||||
|
# Annotation helper(s)
|
||||||
|
{.pragma: noRaise, gcsafe, raises: [].}
|
||||||
|
|
||||||
|
type
|
||||||
|
AristoApiCommitFn* =
|
||||||
|
proc(tx: AristoTxRef;
|
||||||
|
): Result[void,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Given a *top level* handle, this function accepts all database
|
||||||
|
## operations performed through this handle and merges it to the
|
||||||
|
## previous layer. The previous transaction is returned if there
|
||||||
|
## was any.
|
||||||
|
|
||||||
|
AristoApiDeleteFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
accPath: PathID;
|
||||||
|
): Result[bool,(VertexID,AristoError)]
|
||||||
|
{.noRaise.}
|
||||||
|
## Delete a leaf with path `path` starting at root `root`.
|
||||||
|
##
|
||||||
|
## For a `root` with `VertexID` greater than `LEAST_FREE_VID`, the
|
||||||
|
## sub-tree generated by `payload.root` is considered a storage trie
|
||||||
|
## linked to an account leaf referred to by a valid `accPath` (i.e.
|
||||||
|
## different from `VOID_PATH_ID`.) In that case, an account must
|
||||||
|
## exists. If there is payload of type `AccountData`, its `storageID`
|
||||||
|
## field must be unset or equal to the `root` vertex ID.
|
||||||
|
##
|
||||||
|
## The return code is `true` iff the trie has become empty.
|
||||||
|
|
||||||
|
AristoApiDelTreeFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
accPath: PathID;
|
||||||
|
): Result[void,(VertexID,AristoError)]
|
||||||
|
{.noRaise.}
|
||||||
|
## Delete sub-trie below `root`. The maximum supported sub-tree size
|
||||||
|
## is `SUB_TREE_DISPOSAL_MAX`. Larger tries must be disposed by
|
||||||
|
## walk-deleting leaf nodes using `left()` or `right()` traversal
|
||||||
|
## functions.
|
||||||
|
##
|
||||||
|
## For a `root` argument greater than `LEAST_FREE_VID`, the sub-tree
|
||||||
|
## spanned by `root` is considered a storage trie linked to an account
|
||||||
|
## leaf referred to by a valid `accPath` (i.e. different from
|
||||||
|
## `VOID_PATH_ID`.) In that case, an account must exists. If there is
|
||||||
|
## payload of type `AccountData`, its `storageID` field must be unset
|
||||||
|
## or equal to the `hike.root` vertex ID.
|
||||||
|
|
||||||
|
AristoApiFetchPayloadFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
): Result[PayloadRef,(VertexID,AristoError)]
|
||||||
|
{.noRaise.}
|
||||||
|
## Cascaded attempt to traverse the `Aristo Trie` and fetch the value
|
||||||
|
## of a leaf vertex. This function is complementary to `mergePayload()`.
|
||||||
|
|
||||||
|
AristoApiFinishFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
flush = false;
|
||||||
|
) {.noRaise.}
|
||||||
|
## Backend destructor. The argument `flush` indicates that a full
|
||||||
|
## database deletion is requested. If set `false` the outcome might
|
||||||
|
## differ depending on the type of backend (e.g. the `BackendMemory`
|
||||||
|
## backend will always flush on close.)
|
||||||
|
##
|
||||||
|
## In case of distributed descriptors accessing the same backend, all
|
||||||
|
## distributed descriptors will be destroyed.
|
||||||
|
##
|
||||||
|
## This distructor may be used on already *destructed* descriptors.
|
||||||
|
|
||||||
|
AristoApiForgetFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
): Result[void,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Destruct the non centre argument `db` descriptor (see comments on
|
||||||
|
## `reCentre()` for details.)
|
||||||
|
##
|
||||||
|
## A non centre descriptor should always be destructed after use (see
|
||||||
|
## also# comments on `fork()`.)
|
||||||
|
|
||||||
|
AristoApiForkFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
rawTopLayer = false;
|
||||||
|
): Result[AristoDbRef,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## This function creates a new empty descriptor accessing the same
|
||||||
|
## backend (if any) database as the argument `db`. This new descriptor
|
||||||
|
## joins the list of descriptors accessing the same backend database.
|
||||||
|
##
|
||||||
|
## After use, any unused non centre descriptor should be destructed
|
||||||
|
## via `forget()`. Not doing so will not only hold memory ressources
|
||||||
|
## but might also cost computing ressources for maintaining and
|
||||||
|
## updating backend filters when writing to the backend database .
|
||||||
|
##
|
||||||
|
## If the argument `rawTopLayer` is set `true` the function will
|
||||||
|
## provide an uninitalised and inconsistent (!) top layer. This
|
||||||
|
## setting avoids some database lookup for cases where the top layer
|
||||||
|
## is redefined anyway.
|
||||||
|
|
||||||
|
AristoApiForkTopFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
dontHashify = false;
|
||||||
|
): Result[AristoDbRef,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Clone a top transaction into a new DB descriptor accessing the same
|
||||||
|
## backend database (if any) as the argument `db`. The new descriptor
|
||||||
|
## is linked to the transaction parent and is fully functional as a
|
||||||
|
## forked instance (see comments on `aristo_desc.reCentre()` for
|
||||||
|
## details.) If there is no active transaction, the top layer state
|
||||||
|
## is cloned.
|
||||||
|
##
|
||||||
|
## Input situation:
|
||||||
|
## ::
|
||||||
|
## tx -> db0 with tx is top transaction, tx.level > 0
|
||||||
|
##
|
||||||
|
## Output situation:
|
||||||
|
## ::
|
||||||
|
## tx -> db0 \
|
||||||
|
## > share the same backend
|
||||||
|
## tx1 -> db1 /
|
||||||
|
##
|
||||||
|
## where `tx.level > 0`, `db1.level == 1` and `db1` is returned. The
|
||||||
|
## transaction `tx1` can be retrieved via `db1.txTop()`.
|
||||||
|
##
|
||||||
|
## The new DB descriptor will contain a copy of the argument transaction
|
||||||
|
## `tx` as top layer of level 1 (i.e. this is he only transaction.)
|
||||||
|
## Rolling back will end up at the backend layer (incl. backend filter.)
|
||||||
|
##
|
||||||
|
## If the arguent flag `dontHashify` is passed `true`, the clone
|
||||||
|
## descriptor will *NOT* be hashified right after construction.
|
||||||
|
##
|
||||||
|
## Use `aristo_desc.forget()` to clean up this descriptor.
|
||||||
|
|
||||||
|
AristoApiGetKeyRcFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
vid: VertexID;
|
||||||
|
): Result[HashKey,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Cascaded attempt to fetch a Merkle hash from the cache layers or
|
||||||
|
## the backend (if available.)
|
||||||
|
|
||||||
|
AristoApiHashifyFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
): Result[void,(VertexID,AristoError)]
|
||||||
|
{.noRaise.}
|
||||||
|
## Add keys to the `Patricia Trie` so that it becomes a `Merkle
|
||||||
|
## Patricia Tree`.
|
||||||
|
|
||||||
|
AristoApiHasPathFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
): Result[bool,(VertexID,AristoError)]
|
||||||
|
{.noRaise.}
|
||||||
|
## Variant of `fetchPayload()` without returning data. It returns
|
||||||
|
## `true` iff the database `db` contains a leaf item with the argument
|
||||||
|
## path.
|
||||||
|
|
||||||
|
AristoApiHikeUpFn* =
|
||||||
|
proc(path: NibblesSeq;
|
||||||
|
root: VertexID;
|
||||||
|
db: AristoDbRef;
|
||||||
|
): Result[Hike,(VertexID,AristoError,Hike)]
|
||||||
|
{.noRaise.}
|
||||||
|
## For the argument `path`, find and return the logest possible path
|
||||||
|
## in the argument database `db`.
|
||||||
|
|
||||||
|
AristoApiIsTopFn* =
|
||||||
|
proc(tx: AristoTxRef;
|
||||||
|
): bool
|
||||||
|
{.noRaise.}
|
||||||
|
## Getter, returns `true` if the argument `tx` referes to the current
|
||||||
|
## top level transaction.
|
||||||
|
|
||||||
|
AristoApiLevelFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
): int
|
||||||
|
{.noRaise.}
|
||||||
|
## Getter, non-negative nesting level (i.e. number of pending
|
||||||
|
## transactions)
|
||||||
|
|
||||||
|
AristoApiNForkedFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
): int
|
||||||
|
{.noRaise.}
|
||||||
|
## Returns the number of non centre descriptors (see comments on
|
||||||
|
## `reCentre()` for details.) This function is a fast version of
|
||||||
|
## `db.forked.toSeq.len`.
|
||||||
|
|
||||||
|
AristoApiMergeFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
data: openArray[byte];
|
||||||
|
accPath: PathID;
|
||||||
|
): Result[bool,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Veriant of `mergePayload()` where the `data` argument will be
|
||||||
|
## converted to a `RawBlob` type `PayloadRef` value.
|
||||||
|
|
||||||
|
AristoApiMergePayloadFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
payload: PayloadRef;
|
||||||
|
accPath = VOID_PATH_ID;
|
||||||
|
): Result[bool,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Merge the argument key-value-pair `(path,payload)` into the top level
|
||||||
|
## vertex table of the database `db`.
|
||||||
|
##
|
||||||
|
## For a `root` argument with `VertexID` greater than `LEAST_FREE_VID`,
|
||||||
|
## the sub-tree generated by `payload.root` is considered a storage trie
|
||||||
|
## linked to an account leaf referred to by a valid `accPath` (i.e.
|
||||||
|
## different from `VOID_PATH_ID`.) In that case, an account must exists.
|
||||||
|
## If there is payload of type `AccountData`, its `storageID` field must
|
||||||
|
## be unset or equal to the `payload.root` vertex ID.
|
||||||
|
|
||||||
|
AristoApiPathAsBlobFn* =
|
||||||
|
proc(tag: PathID;
|
||||||
|
): Blob
|
||||||
|
{.noRaise.}
|
||||||
|
## Converts the `tag` argument to a sequence of an even number of
|
||||||
|
## nibbles represented by a `Blob`. If the argument `tag` represents
|
||||||
|
## an odd number of nibbles, a zero nibble is appendend.
|
||||||
|
##
|
||||||
|
## This function is useful only if there is a tacit agreement that all
|
||||||
|
## paths used to index database leaf values can be represented as
|
||||||
|
## `Blob`, i.e. `PathID` type paths with an even number of nibbles.
|
||||||
|
|
||||||
|
AristoApiRollbackFn* =
|
||||||
|
proc(tx: AristoTxRef;
|
||||||
|
): Result[void,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Given a *top level* handle, this function discards all database
|
||||||
|
## operations performed for this transactio. The previous transaction
|
||||||
|
## is returned if there was any.
|
||||||
|
|
||||||
|
AristoApiSerialiseFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
pyl: PayloadRef;
|
||||||
|
): Result[Blob,(VertexID,AristoError)]
|
||||||
|
{.noRaise.}
|
||||||
|
## Encode the data payload of the argument `pyl` as RLP `Blob` if
|
||||||
|
## it is of account type, otherwise pass the data as is.
|
||||||
|
|
||||||
|
AristoApiStowFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
persistent = false;
|
||||||
|
chunkedMpt = false;
|
||||||
|
): Result[void,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## If there is no backend while the `persistent` argument is set `true`,
|
||||||
|
## the function returns immediately with an error. The same happens if
|
||||||
|
## there is a pending transaction.
|
||||||
|
##
|
||||||
|
## The function then merges the data from the top layer cache into the
|
||||||
|
## backend stage area. After that, the top layer cache is cleared.
|
||||||
|
##
|
||||||
|
## Staging the top layer cache might fail withh a partial MPT when it
|
||||||
|
## is set up from partial MPT chunks as it happens with `snap` sync
|
||||||
|
## processing. In this case, the `chunkedMpt` argument must be set
|
||||||
|
## `true` (see alse `fwdFilter`.)
|
||||||
|
##
|
||||||
|
## If the argument `persistent` is set `true`, all the staged data are
|
||||||
|
## merged into the physical backend database and the staged data area
|
||||||
|
## is cleared.
|
||||||
|
|
||||||
|
AristoApiTxBeginFn* =
|
||||||
|
proc(db: AristoDbRef
|
||||||
|
): Result[AristoTxRef,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Starts a new transaction.
|
||||||
|
##
|
||||||
|
## Example:
|
||||||
|
## ::
|
||||||
|
## proc doSomething(db: AristoDbRef) =
|
||||||
|
## let tx = db.begin
|
||||||
|
## defer: tx.rollback()
|
||||||
|
## ... continue using db ...
|
||||||
|
## tx.commit()
|
||||||
|
|
||||||
|
AristoApiTxTopFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
): Result[AristoTxRef,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
|
## Getter, returns top level transaction if there is any.
|
||||||
|
|
||||||
|
AristoApiVidFetchFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
pristine = false;
|
||||||
|
): VertexID
|
||||||
|
{.noRaise.}
|
||||||
|
## Recycle or create a new `VertexID`. Reusable vertex *ID*s are kept
|
||||||
|
## in a list where the top entry *ID* has the property that any other
|
||||||
|
## *ID* larger is also not used on the database.
|
||||||
|
##
|
||||||
|
## The function prefers to return recycled vertex *ID*s if there are
|
||||||
|
## any. When the argument `pristine` is set `true`, the function
|
||||||
|
## guarantees to return a non-recycled, brand new vertex *ID* which
|
||||||
|
## is the preferred mode when creating leaf vertices.
|
||||||
|
|
||||||
|
AristoApiVidDisposeFn* =
|
||||||
|
proc(db: AristoDbRef;
|
||||||
|
vid: VertexID;
|
||||||
|
) {.noRaise.}
|
||||||
|
## Recycle the argument `vtxID` which is useful after deleting entries
|
||||||
|
## from the vertex table to prevent the `VertexID` type key values
|
||||||
|
## small.
|
||||||
|
|
||||||
|
AristoApiRef* = ref AristoApiObj
|
||||||
|
AristoApiObj* = object of RootObj
|
||||||
|
## Useful set of `Aristo` fuctions that can be filtered, stacked etc.
|
||||||
|
commit*: AristoApiCommitFn
|
||||||
|
delete*: AristoApiDeleteFn
|
||||||
|
delTree*: AristoApiDelTreeFn
|
||||||
|
fetchPayload*: AristoApiFetchPayloadFn
|
||||||
|
finish*: AristoApiFinishFn
|
||||||
|
forget*: AristoApiForgetFn
|
||||||
|
fork*: AristoApiForkFn
|
||||||
|
forkTop*: AristoApiForkTopFn
|
||||||
|
getKeyRc*: AristoApiGetKeyRcFn
|
||||||
|
hashify*: AristoApiHashifyFn
|
||||||
|
hasPath*: AristoApiHasPathFn
|
||||||
|
hikeUp*: AristoApiHikeUpFn
|
||||||
|
isTop*: AristoApiIsTopFn
|
||||||
|
level*: AristoApiLevelFn
|
||||||
|
nForked*: AristoApiNForkedFn
|
||||||
|
merge*: AristoApiMergeFn
|
||||||
|
mergePayload*: AristoApiMergePayloadFn
|
||||||
|
pathAsBlob*: AristoApiPathAsBlobFn
|
||||||
|
rollback*: AristoApiRollbackFn
|
||||||
|
serialise*: AristoApiSerialiseFn
|
||||||
|
stow*: AristoApiStowFn
|
||||||
|
txBegin*: AristoApiTxBeginFn
|
||||||
|
txTop*: AristoApiTxTopFn
|
||||||
|
vidFetch*: AristoApiVidFetchFn
|
||||||
|
vidDispose*: AristoApiVidDisposeFn
|
||||||
|
|
||||||
|
|
||||||
|
AristoApiProfNames* = enum
|
||||||
|
## Index/name mapping for profile slots
|
||||||
|
AristoApiProfTotal = "total"
|
||||||
|
|
||||||
|
AristoApiProfCommitFn = "commit"
|
||||||
|
AristoApiProfDeleteFn = "delete"
|
||||||
|
AristoApiProfDelTreeFn = "delTree"
|
||||||
|
AristoApiProfFetchPayloadFn = "fetchPayload"
|
||||||
|
AristoApiProfFinishFn = "finish"
|
||||||
|
AristoApiProfForgetFn = "forget"
|
||||||
|
AristoApiProfForkFn = "fork"
|
||||||
|
AristoApiProfForkTopFn = "forkTop"
|
||||||
|
AristoApiProfGetKeyRcFn = "getKeyRc"
|
||||||
|
AristoApiProfHashifyFn = "hashify"
|
||||||
|
AristoApiProfHasPathFn = "hasPath"
|
||||||
|
AristoApiProfHikeUpFn = "hikeUp"
|
||||||
|
AristoApiProfIsTopFn = "isTop"
|
||||||
|
AristoApiProfLevelFn = "level"
|
||||||
|
AristoApiProfNForkedFn = "nForked"
|
||||||
|
AristoApiProfMergeFn = "merge"
|
||||||
|
AristoApiProfMergePayloadFn = "mergePayload"
|
||||||
|
AristoApiProfPathAsBlobFn = "pathAsBlob"
|
||||||
|
AristoApiProfRollbackFn = "rollback"
|
||||||
|
AristoApiProfSerialiseFn = "serialise"
|
||||||
|
AristoApiProfStowFn = "stow"
|
||||||
|
AristoApiProfTxBeginFn = "txBegin"
|
||||||
|
AristoApiProfTxTopFn = "txTop"
|
||||||
|
AristoApiProfVidFetchFn = "vidFetch"
|
||||||
|
AristoApiProfVidDisposeFn = "vidDispose"
|
||||||
|
|
||||||
|
AristoApiProfBeGetVtxFn = "be/getVtx"
|
||||||
|
AristoApiProfBeGetKeyFn = "be/getKey"
|
||||||
|
AristoApiProfBePutEndFn = "be/putEnd"
|
||||||
|
|
||||||
|
AristoApiProfRef* = ref object of AristoApiRef
|
||||||
|
## Profiling API extension of `AristoApiObj`
|
||||||
|
data*: AristoDbProfListRef
|
||||||
|
be*: BackendRef
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public API constuctors
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func init*(api: var AristoApiObj) =
|
||||||
|
## Initialise an `api` argument descriptor
|
||||||
|
##
|
||||||
|
api.commit = commit
|
||||||
|
api.delete = delete
|
||||||
|
api.delTree = delTree
|
||||||
|
api.fetchPayload = fetchPayload
|
||||||
|
api.finish = finish
|
||||||
|
api.forget = forget
|
||||||
|
api.fork = fork
|
||||||
|
api.forkTop = forkTop
|
||||||
|
api.getKeyRc = getKeyRc
|
||||||
|
api.hashify = hashify
|
||||||
|
api.hasPath = hasPath
|
||||||
|
api.hikeUp = hikeUp
|
||||||
|
api.isTop = isTop
|
||||||
|
api.level = level
|
||||||
|
api.nForked = nForked
|
||||||
|
api.merge = merge
|
||||||
|
api.mergePayload = mergePayload
|
||||||
|
api.pathAsBlob = pathAsBlob
|
||||||
|
api.rollback = rollback
|
||||||
|
api.serialise = serialise
|
||||||
|
api.stow = stow
|
||||||
|
api.txBegin = txBegin
|
||||||
|
api.txTop = txTop
|
||||||
|
api.vidFetch = vidFetch
|
||||||
|
api.vidDispose = vidDispose
|
||||||
|
|
||||||
|
func init*(T: type AristoApiRef): T =
|
||||||
|
new result
|
||||||
|
result[].init()
|
||||||
|
|
||||||
|
func dup*(api: AristoApiRef): AristoApiRef =
|
||||||
|
new result
|
||||||
|
result[] = api[]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public profile API constuctor
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
T: type AristoApiProfRef;
|
||||||
|
api: AristoApiRef;
|
||||||
|
be = BackendRef(nil);
|
||||||
|
): T =
|
||||||
|
## This constructor creates a profiling API descriptor to be derived from
|
||||||
|
## an initialised `api` argument descriptor. For profiling the DB backend,
|
||||||
|
## the field `.be` of the result descriptor must be assigned to the
|
||||||
|
## `.backend` field of the `AristoDbRef` descriptor.
|
||||||
|
##
|
||||||
|
## The argument desctiptors `api` and `be` will not be modified and can be
|
||||||
|
## used to restore the previous set up.
|
||||||
|
##
|
||||||
|
let
|
||||||
|
data = AristoDbProfListRef(
|
||||||
|
list: newSeq[AristoDbProfData](1 + high(AristoApiProfNames).ord))
|
||||||
|
profApi = T(data: data)
|
||||||
|
|
||||||
|
template profileRunner(n: AristoApiProfNames, code: untyped): untyped =
|
||||||
|
let start = getTime()
|
||||||
|
code
|
||||||
|
data.update(n.ord, getTime() - start)
|
||||||
|
|
||||||
|
profApi.commit =
|
||||||
|
proc(a: AristoTxRef): auto =
|
||||||
|
AristoApiProfCommitFn.profileRunner:
|
||||||
|
result = api.commit(a)
|
||||||
|
|
||||||
|
profApi.delete =
|
||||||
|
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]; d: PathID): auto =
|
||||||
|
AristoApiProfDeleteFn.profileRunner:
|
||||||
|
result = api.delete(a, b, c, d)
|
||||||
|
|
||||||
|
profApi.delTree =
|
||||||
|
proc(a: AristoDbRef; b: VertexID; c: PathID): auto =
|
||||||
|
AristoApiProfDelTreeFn.profileRunner:
|
||||||
|
result = api.delTree(a, b, c)
|
||||||
|
|
||||||
|
profApi.fetchPayload =
|
||||||
|
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
|
||||||
|
AristoApiProfFetchPayloadFn.profileRunner:
|
||||||
|
result = api.fetchPayload(a, b, c)
|
||||||
|
|
||||||
|
profApi.finish =
|
||||||
|
proc(a: AristoDbRef; b = false) =
|
||||||
|
AristoApiProfFinishFn.profileRunner:
|
||||||
|
api.finish(a, b)
|
||||||
|
|
||||||
|
profApi.forget =
|
||||||
|
proc(a: AristoDbRef): auto =
|
||||||
|
AristoApiProfForgetFn.profileRunner:
|
||||||
|
result = api.forget(a)
|
||||||
|
|
||||||
|
profApi.fork =
|
||||||
|
proc(a: AristoDbRef; b = false): auto =
|
||||||
|
AristoApiProfForkFn.profileRunner:
|
||||||
|
result = api.fork(a, b)
|
||||||
|
|
||||||
|
profApi.forkTop =
|
||||||
|
proc(a: AristoDbRef; b = false): auto =
|
||||||
|
AristoApiProfForkTopFn.profileRunner:
|
||||||
|
result = api.forkTop(a, b)
|
||||||
|
|
||||||
|
profApi.getKeyRc =
|
||||||
|
proc(a: AristoDbRef; b: VertexID): auto =
|
||||||
|
AristoApiProfGetKeyRcFn.profileRunner:
|
||||||
|
result = api.getKeyRc(a, b)
|
||||||
|
|
||||||
|
profApi.hashify =
|
||||||
|
proc(a: AristoDbRef): auto =
|
||||||
|
AristoApiProfHashifyFn.profileRunner:
|
||||||
|
result = api.hashify(a)
|
||||||
|
|
||||||
|
profApi.hasPath =
|
||||||
|
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
|
||||||
|
AristoApiProfHasPathFn.profileRunner:
|
||||||
|
result = api.hasPath(a, b, c)
|
||||||
|
|
||||||
|
profApi.hikeUp =
|
||||||
|
proc(a: NibblesSeq; b: VertexID; c: AristoDbRef): auto =
|
||||||
|
AristoApiProfHikeUpFn.profileRunner:
|
||||||
|
result = api.hikeUp(a, b, c)
|
||||||
|
|
||||||
|
profApi.isTop =
|
||||||
|
proc(a: AristoTxRef): auto =
|
||||||
|
AristoApiProfIsTopFn.profileRunner:
|
||||||
|
result = api.isTop(a)
|
||||||
|
|
||||||
|
profApi.level =
|
||||||
|
proc(a: AristoDbRef): auto =
|
||||||
|
AristoApiProfLevelFn.profileRunner:
|
||||||
|
result = api.level(a)
|
||||||
|
|
||||||
|
profApi.nForked =
|
||||||
|
proc(a: AristoDbRef): auto =
|
||||||
|
AristoApiProfNForkedFn.profileRunner:
|
||||||
|
result = api.nForked(a)
|
||||||
|
|
||||||
|
profApi.merge =
|
||||||
|
proc(a: AristoDbRef; b: VertexID; c,d: openArray[byte]; e: PathID): auto =
|
||||||
|
AristoApiProfMergeFn.profileRunner:
|
||||||
|
result = api.merge(a, b, c, d ,e)
|
||||||
|
|
||||||
|
profApi.mergePayload =
|
||||||
|
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]; d: PayloadRef;
|
||||||
|
e = VOID_PATH_ID): auto =
|
||||||
|
AristoApiProfMergePayloadFn.profileRunner:
|
||||||
|
result = api.mergePayload(a, b, c, d ,e)
|
||||||
|
|
||||||
|
profApi.pathAsBlob =
|
||||||
|
proc(a: PathID): auto =
|
||||||
|
AristoApiProfPathAsBlobFn.profileRunner:
|
||||||
|
result = api.pathAsBlob(a)
|
||||||
|
|
||||||
|
profApi.rollback =
|
||||||
|
proc(a: AristoTxRef): auto =
|
||||||
|
AristoApiProfRollbackFn.profileRunner:
|
||||||
|
result = api.rollback(a)
|
||||||
|
|
||||||
|
profApi.serialise =
|
||||||
|
proc(a: AristoDbRef; b: PayloadRef): auto =
|
||||||
|
AristoApiProfSerialiseFn.profileRunner:
|
||||||
|
result = api.serialise(a, b)
|
||||||
|
|
||||||
|
profApi.stow =
|
||||||
|
proc(a: AristoDbRef; b = false; c = false): auto =
|
||||||
|
AristoApiProfStowFn.profileRunner:
|
||||||
|
result = api.stow(a, b, c)
|
||||||
|
|
||||||
|
profApi.txBegin =
|
||||||
|
proc(a: AristoDbRef): auto =
|
||||||
|
AristoApiProfTxBeginFn.profileRunner:
|
||||||
|
result = api.txBegin(a)
|
||||||
|
|
||||||
|
profApi.txTop =
|
||||||
|
proc(a: AristoDbRef): auto =
|
||||||
|
AristoApiProfTxTopFn.profileRunner:
|
||||||
|
result = api.txTop(a)
|
||||||
|
|
||||||
|
profApi.vidFetch =
|
||||||
|
proc(a: AristoDbRef; b = false): auto =
|
||||||
|
AristoApiProfVidFetchFn.profileRunner:
|
||||||
|
result = api.vidFetch(a, b)
|
||||||
|
|
||||||
|
profApi.vidDispose =
|
||||||
|
proc(a: AristoDbRef;b: VertexID) =
|
||||||
|
AristoApiProfVidDisposeFn.profileRunner:
|
||||||
|
api.vidDispose(a, b)
|
||||||
|
|
||||||
|
if not be.isNil:
|
||||||
|
profApi.be = be.dup
|
||||||
|
|
||||||
|
profApi.be.getVtxFn =
|
||||||
|
proc(a: VertexID): auto =
|
||||||
|
AristoApiProfBeGetVtxFn.profileRunner:
|
||||||
|
result = be.getVtxFn(a)
|
||||||
|
|
||||||
|
profApi.be.getKeyFn =
|
||||||
|
proc(a: VertexID): auto =
|
||||||
|
AristoApiProfBeGetKeyFn.profileRunner:
|
||||||
|
result = be.getKeyFn(a)
|
||||||
|
|
||||||
|
profApi.be.putEndFn =
|
||||||
|
proc(a: PutHdlRef): auto =
|
||||||
|
AristoApiProfBePutEndFn.profileRunner:
|
||||||
|
result = be.putEndFn(a)
|
||||||
|
|
||||||
|
profApi
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
@ -16,7 +16,7 @@ import
|
|||||||
stew/interval_set,
|
stew/interval_set,
|
||||||
../../aristo,
|
../../aristo,
|
||||||
../aristo_walk/persistent,
|
../aristo_walk/persistent,
|
||||||
".."/[aristo_desc, aristo_get, aristo_layers]
|
".."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
|
||||||
|
|
||||||
const
|
const
|
||||||
Vid2 = @[VertexID(LEAST_FREE_VID)].toHashSet
|
Vid2 = @[VertexID(LEAST_FREE_VID)].toHashSet
|
||||||
|
@ -385,16 +385,17 @@ proc deleteImpl(
|
|||||||
of Leaf:
|
of Leaf:
|
||||||
? db.collapseLeaf(hike, nibble.byte, nxt.vtx)
|
? db.collapseLeaf(hike, nibble.byte, nxt.vtx)
|
||||||
|
|
||||||
|
let emptySubTreeOk = not db.getVtx(hike.root).isValid
|
||||||
|
|
||||||
# Squeze list of recycled vertex IDs
|
# Squeze list of recycled vertex IDs
|
||||||
db.top.final.vGen = db.vGen.vidReorg()
|
db.top.final.vGen = db.vGen.vidReorg()
|
||||||
|
ok(emptySubTreeOk)
|
||||||
ok(not db.getVtx(hike.root).isValid)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc delete*(
|
proc delTree*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
root: VertexID; # Root vertex
|
root: VertexID; # Root vertex
|
||||||
accPath: PathID; # Needed for real storage tries
|
accPath: PathID; # Needed for real storage tries
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -120,6 +120,24 @@ type
|
|||||||
|
|
||||||
closeFn*: CloseFn ## Generic destructor
|
closeFn*: CloseFn ## Generic destructor
|
||||||
|
|
||||||
|
func dup*(be: BackendRef): BackendRef =
|
||||||
|
if not be.isNil:
|
||||||
|
result = BackendRef(
|
||||||
|
filters: be.filters,
|
||||||
|
getVtxFn: be.getVtxFn,
|
||||||
|
getKeyFn: be.getKeyFn,
|
||||||
|
getFilFn: be.getFilFn,
|
||||||
|
getIdgFn: be.getIdgFn,
|
||||||
|
getFqsFn: be.getFqsFn,
|
||||||
|
putBegFn: be.putBegFn,
|
||||||
|
putVtxFn: be.putVtxFn,
|
||||||
|
putKeyFn: be.putKeyFn,
|
||||||
|
putFilFn: be.putFilFn,
|
||||||
|
putIdgFn: be.putIdgFn,
|
||||||
|
putFqsFn: be.putFqsFn,
|
||||||
|
putEndFn: be.putEndFn,
|
||||||
|
closeFn: be.closeFn)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -47,7 +47,7 @@ proc fetchPayload*(
|
|||||||
key: LeafTie;
|
key: LeafTie;
|
||||||
): Result[PayloadRef,(VertexID,AristoError)] =
|
): Result[PayloadRef,(VertexID,AristoError)] =
|
||||||
## Cascaded attempt to traverse the `Aristo Trie` and fetch the value of a
|
## Cascaded attempt to traverse the `Aristo Trie` and fetch the value of a
|
||||||
## leaf vertex. This function is complementary to `merge()`.
|
## leaf vertex. This function is complementary to `mergePayload()`.
|
||||||
##
|
##
|
||||||
key.hikeUp(db).fetchPayloadImpl
|
key.hikeUp(db).fetchPayloadImpl
|
||||||
|
|
||||||
|
@ -309,8 +309,7 @@ proc hashify*(
|
|||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
): Result[void,(VertexID,AristoError)] =
|
): Result[void,(VertexID,AristoError)] =
|
||||||
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
|
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
|
||||||
## Tree`. If successful, the function returns the keys (aka Merkle hash) of
|
## Tree`.
|
||||||
## the root vertices.
|
|
||||||
##
|
##
|
||||||
if 0 < db.dirty.len:
|
if 0 < db.dirty.len:
|
||||||
# Set up widh-first traversal schedule
|
# Set up widh-first traversal schedule
|
||||||
|
@ -549,7 +549,7 @@ proc mergeNodeImpl(
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc merge*(
|
proc mergePayload*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
leafTie: LeafTie; # Leaf item to add to the database
|
leafTie: LeafTie; # Leaf item to add to the database
|
||||||
payload: PayloadRef; # Payload value
|
payload: PayloadRef; # Payload value
|
||||||
@ -612,27 +612,17 @@ proc merge*(
|
|||||||
ok okHike
|
ok okHike
|
||||||
|
|
||||||
|
|
||||||
proc merge*(
|
proc mergePayload*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
root: VertexID; # MPT state root
|
root: VertexID; # MPT state root
|
||||||
path: openArray[byte]; # Even nibbled byte path
|
path: openArray[byte]; # Even nibbled byte path
|
||||||
payload: PayloadRef; # Payload value
|
payload: PayloadRef; # Payload value
|
||||||
accPath: PathID; # Needed for accounts payload
|
accPath = VOID_PATH_ID; # Needed for accounts payload
|
||||||
): Result[bool,AristoError] =
|
): Result[bool,AristoError] =
|
||||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
||||||
## object.
|
## object.
|
||||||
let lty = LeafTie(root: root, path: ? path.pathToTag)
|
let lty = LeafTie(root: root, path: ? path.pathToTag)
|
||||||
db.merge(lty, payload, accPath).to(typeof result)
|
db.mergePayload(lty, payload, accPath).to(typeof result)
|
||||||
|
|
||||||
proc merge*(
|
|
||||||
db: AristoDbRef; # Database, top layer
|
|
||||||
path: openArray[byte]; # Even nibbled byte path
|
|
||||||
payload: PayloadRef; # Payload value
|
|
||||||
): Result[bool,AristoError] =
|
|
||||||
## Variant of `merge()` for `(VertexID(1),path)` arguments instead of a
|
|
||||||
## `LeafTie` object.
|
|
||||||
let lty = LeafTie(root: VertexID(1), path: ? path.pathToTag)
|
|
||||||
db.merge(lty, payload, VOID_PATH_ID).to(typeof result)
|
|
||||||
|
|
||||||
|
|
||||||
proc merge*(
|
proc merge*(
|
||||||
@ -645,9 +635,9 @@ proc merge*(
|
|||||||
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
|
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
|
||||||
## The argument `data` is stored as-is as a `RawData` payload value.
|
## The argument `data` is stored as-is as a `RawData` payload value.
|
||||||
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
||||||
db.merge(root, path, pyl, accPath)
|
db.mergePayload(root, path, pyl, accPath)
|
||||||
|
|
||||||
proc merge*(
|
proc mergeAccount*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
path: openArray[byte]; # Leaf item to add to the database
|
path: openArray[byte]; # Leaf item to add to the database
|
||||||
data: openArray[byte]; # Raw data payload value
|
data: openArray[byte]; # Raw data payload value
|
||||||
@ -656,26 +646,17 @@ proc merge*(
|
|||||||
## `LeafTie`. The argument `data` is stored as-is as a `RawData` payload
|
## `LeafTie`. The argument `data` is stored as-is as a `RawData` payload
|
||||||
## value.
|
## value.
|
||||||
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
||||||
db.merge(VertexID(1), path, pyl, VOID_PATH_ID)
|
db.mergePayload(VertexID(1), path, pyl, VOID_PATH_ID)
|
||||||
|
|
||||||
|
|
||||||
proc merge*(
|
proc mergeLeaf*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||||
accPath: PathID; # Needed for accounts payload
|
accPath = VOID_PATH_ID; # Needed for accounts payload
|
||||||
): Result[bool,AristoError] =
|
): Result[bool,AristoError] =
|
||||||
## Variant of `merge()`. This function will not indicate if the leaf
|
## Variant of `merge()`. This function will not indicate if the leaf
|
||||||
## was cached, already.
|
## was cached, already.
|
||||||
db.merge(leaf.leafTie, leaf.payload, accPath).to(typeof result)
|
db.mergePayload(leaf.leafTie, leaf.payload, accPath).to(typeof result)
|
||||||
|
|
||||||
|
|
||||||
proc merge*(
|
|
||||||
db: AristoDbRef; # Database, top layer
|
|
||||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
|
||||||
): Result[bool,AristoError] =
|
|
||||||
## Variant of `merge()`, shortcut for `db.merge(leaf, VOID_PATH_ID)`. Note
|
|
||||||
## that this function fails unless `leaf.root == VertexID(1)`.
|
|
||||||
db.merge(leaf.leafTie, leaf.payload, VOID_PATH_ID).to(typeof result)
|
|
||||||
|
|
||||||
# ---------------------
|
# ---------------------
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -411,7 +411,7 @@ proc right*(
|
|||||||
root: lty.root,
|
root: lty.root,
|
||||||
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=true))
|
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=true))
|
||||||
|
|
||||||
iterator right*(
|
iterator rightPairs*(
|
||||||
db: AristoDbRef; # Database layer
|
db: AristoDbRef; # Database layer
|
||||||
start = low(LeafTie); # Before or at first value
|
start = low(LeafTie); # Before or at first value
|
||||||
): (LeafTie,PayloadRef) =
|
): (LeafTie,PayloadRef) =
|
||||||
@ -472,7 +472,7 @@ proc left*(
|
|||||||
root: lty.root,
|
root: lty.root,
|
||||||
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false))
|
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false))
|
||||||
|
|
||||||
iterator left*(
|
iterator leftPairs*(
|
||||||
db: AristoDbRef; # Database layer
|
db: AristoDbRef; # Database layer
|
||||||
start = high(LeafTie); # Before or at first value
|
start = high(LeafTie); # Before or at first value
|
||||||
): (LeafTie,PayloadRef) =
|
): (LeafTie,PayloadRef) =
|
||||||
|
202
nimbus/db/aristo/aristo_profile.nim
Normal file
202
nimbus/db/aristo/aristo_profile.nim
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
|
# according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[algorithm, math, sequtils, strformat, strutils, tables, times],
|
||||||
|
eth/common
|
||||||
|
|
||||||
|
type
|
||||||
|
AristoDbProfData* = tuple[sum: float, sqSum: float, count: int]
|
||||||
|
|
||||||
|
AristoDbProfListRef* = ref object of RootRef
|
||||||
|
## Statistic table synced with name indexes from `AristoDbProfNames`. Here
|
||||||
|
## a `ref` is used so it can be modified when part of another object.
|
||||||
|
##
|
||||||
|
list*: seq[AristoDbProfData]
|
||||||
|
|
||||||
|
AristoDbProfEla* = seq[(Duration,seq[uint])]
|
||||||
|
AristoDbProfMean* = seq[(Duration,seq[uint])]
|
||||||
|
AristoDbProfCount* = seq[(int,seq[uint])]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc toDuration(fl: float): Duration =
|
||||||
|
## Convert the nanoseconds argument `ns` to a `Duration`.
|
||||||
|
let (s, ns) = fl.splitDecimal
|
||||||
|
initDuration(seconds = s.int, nanoseconds = (ns * 1_000_000_000).int)
|
||||||
|
|
||||||
|
func toFloat(ela: Duration): float =
|
||||||
|
## Convert the argument `ela` to a floating point seconds result.
|
||||||
|
let
|
||||||
|
elaS = ela.inSeconds
|
||||||
|
elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds
|
||||||
|
elaS.float + elaNs.float / 1_000_000_000
|
||||||
|
|
||||||
|
proc updateTotal(t: AristoDbProfListRef; fnInx: uint) =
|
||||||
|
## Summary update helper
|
||||||
|
if fnInx == 0:
|
||||||
|
t.list[0] = (0.0, 0.0, 0)
|
||||||
|
else:
|
||||||
|
t.list[0][0] += t.list[fnInx][0]
|
||||||
|
t.list[0][1] += t.list[fnInx][1]
|
||||||
|
t.list[0][2] += t.list[fnInx][2]
|
||||||
|
|
||||||
|
# ---------------------
|
||||||
|
|
||||||
|
func ppUs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
||||||
|
result = $elapsed.inMicroseconds
|
||||||
|
let ns = elapsed.inNanoseconds mod 1_000 # fraction of a micro second
|
||||||
|
if ns != 0:
|
||||||
|
# to rounded deca milli seconds
|
||||||
|
let du = (ns + 5i64) div 10i64
|
||||||
|
result &= &".{du:02}"
|
||||||
|
result &= "us"
|
||||||
|
|
||||||
|
func ppMs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
||||||
|
result = $elapsed.inMilliseconds
|
||||||
|
let ns = elapsed.inNanoseconds mod 1_000_000 # fraction of a milli second
|
||||||
|
if ns != 0:
|
||||||
|
# to rounded deca milli seconds
|
||||||
|
let dm = (ns + 5_000i64) div 10_000i64
|
||||||
|
result &= &".{dm:02}"
|
||||||
|
result &= "ms"
|
||||||
|
|
||||||
|
func ppSecs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
||||||
|
result = $elapsed.inSeconds
|
||||||
|
let ns = elapsed.inNanoseconds mod 1_000_000_000 # fraction of a second
|
||||||
|
if ns != 0:
|
||||||
|
# round up
|
||||||
|
let ds = (ns + 5_000_000i64) div 10_000_000i64
|
||||||
|
result &= &".{ds:02}"
|
||||||
|
result &= "s"
|
||||||
|
|
||||||
|
func ppMins(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
||||||
|
result = $elapsed.inMinutes
|
||||||
|
let ns = elapsed.inNanoseconds mod 60_000_000_000 # fraction of a minute
|
||||||
|
if ns != 0:
|
||||||
|
# round up
|
||||||
|
let dm = (ns + 500_000_000i64) div 1_000_000_000i64
|
||||||
|
result &= &":{dm:02}"
|
||||||
|
result &= "m"
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func toStr*(elapsed: Duration): string =
|
||||||
|
try:
|
||||||
|
if 0 < times.inMinutes(elapsed):
|
||||||
|
result = elapsed.ppMins
|
||||||
|
elif 0 < times.inSeconds(elapsed):
|
||||||
|
result = elapsed.ppSecs
|
||||||
|
elif 0 < times.inMilliSeconds(elapsed):
|
||||||
|
result = elapsed.ppMs
|
||||||
|
elif 0 < times.inMicroSeconds(elapsed):
|
||||||
|
result = elapsed.ppUs
|
||||||
|
else:
|
||||||
|
result = $elapsed.inNanoSeconds & "ns"
|
||||||
|
except ValueError:
|
||||||
|
result = $elapsed
|
||||||
|
|
||||||
|
proc update*(t: AristoDbProfListRef; inx: uint; ela: Duration) =
|
||||||
|
## Register time `ela` spent while executing function `fn`
|
||||||
|
let s = ela.toFloat
|
||||||
|
t.list[inx].sum += s
|
||||||
|
t.list[inx].sqSum += s * s
|
||||||
|
t.list[inx].count.inc
|
||||||
|
|
||||||
|
|
||||||
|
proc byElapsed*(t: AristoDbProfListRef): AristoDbProfEla =
|
||||||
|
## Collate `CoreDb` function symbols by elapsed times, sorted with largest
|
||||||
|
## `Duration` first. Zero `Duration` entries are discarded.
|
||||||
|
var u: Table[Duration,seq[uint]]
|
||||||
|
for inx in 0u ..< t.list.len.uint:
|
||||||
|
t.updateTotal inx
|
||||||
|
let (secs,_,count) = t.list[inx]
|
||||||
|
if 0 < count:
|
||||||
|
let ela = secs.toDuration
|
||||||
|
u.withValue(ela,val):
|
||||||
|
val[].add inx
|
||||||
|
do:
|
||||||
|
u[ela] = @[inx]
|
||||||
|
result.add (t.list[0u].sum.toDuration, @[0u])
|
||||||
|
for ela in u.keys.toSeq.sorted Descending:
|
||||||
|
u.withValue(ela,val):
|
||||||
|
result.add (ela, val[])
|
||||||
|
|
||||||
|
|
||||||
|
proc byMean*(t: AristoDbProfListRef): AristoDbProfMean =
|
||||||
|
## Collate `CoreDb` function symbols by elapsed mean times, sorted with
|
||||||
|
## largest `Duration` first. Zero `Duration` entries are discarded.
|
||||||
|
var u: Table[Duration,seq[uint]]
|
||||||
|
for inx in 0u ..< t.list.len.uint:
|
||||||
|
t.updateTotal inx
|
||||||
|
let (secs,_,count) = t.list[inx]
|
||||||
|
if 0 < count:
|
||||||
|
let ela = (secs / count.float).toDuration
|
||||||
|
u.withValue(ela,val):
|
||||||
|
val[].add inx
|
||||||
|
do:
|
||||||
|
u[ela] = @[inx]
|
||||||
|
result.add ((t.list[0u].sum / t.list[0u].count.float).toDuration, @[0u])
|
||||||
|
for mean in u.keys.toSeq.sorted Descending:
|
||||||
|
u.withValue(mean,val):
|
||||||
|
result.add (mean, val[])
|
||||||
|
|
||||||
|
|
||||||
|
proc byVisits*(t: AristoDbProfListRef): AristoDbProfCount =
|
||||||
|
## Collate `CoreDb` function symbols by number of visits, sorted with
|
||||||
|
## largest number first.
|
||||||
|
var u: Table[int,seq[uint]]
|
||||||
|
for fnInx in 0 ..< t.list.len:
|
||||||
|
t.updateTotal fnInx.uint
|
||||||
|
let (_,_,count) = t.list[fnInx]
|
||||||
|
if 0 < count:
|
||||||
|
u.withValue(count,val):
|
||||||
|
val[].add fnInx.uint
|
||||||
|
do:
|
||||||
|
u[count] = @[fnInx.uint]
|
||||||
|
result.add (t.list[0u].count, @[0u])
|
||||||
|
for count in u.keys.toSeq.sorted Descending:
|
||||||
|
u.withValue(count,val):
|
||||||
|
result.add (count, val[])
|
||||||
|
|
||||||
|
|
||||||
|
func stats*(
|
||||||
|
t: AristoDbProfListRef;
|
||||||
|
inx: uint;
|
||||||
|
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
|
||||||
|
## Print mean and strandard deviation of timing
|
||||||
|
let data = t.list[inx]
|
||||||
|
result.n = data.count
|
||||||
|
if 0 < result.n:
|
||||||
|
let
|
||||||
|
mean = data.sum / result.n.float
|
||||||
|
sqMean = data.sqSum / result.n.float
|
||||||
|
meanSq = mean * mean
|
||||||
|
|
||||||
|
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
|
||||||
|
# if `meanSq` and `sqMean` are approximately the same.
|
||||||
|
sigma = sqMean - min(meanSq,sqMean)
|
||||||
|
stdDev = sigma.sqrt
|
||||||
|
|
||||||
|
result.mean = mean.toDuration
|
||||||
|
result.stdDev = stdDev.sqrt.toDuration
|
||||||
|
|
||||||
|
if 0 < mean:
|
||||||
|
result.devRatio = stdDev / mean
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
@ -22,14 +22,15 @@ import
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
|
proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
|
||||||
## Create a new `VertexID`. Reusable vertex *ID*s are kept in a list where
|
## Recycle or create a new `VertexID`. Reusable vertex *ID*s are kept in a
|
||||||
## the top entry *ID* has the property that any other *ID* larger is also not
|
## list where the top entry *ID* has the property that any other *ID* larger
|
||||||
## not used on the database.
|
## is also not used on the database.
|
||||||
##
|
##
|
||||||
## The function prefers to return recycled vertex *ID*s if there are any.
|
## The function prefers to return recycled vertex *ID*s if there are any.
|
||||||
## When the argument `pristine` is set `true`, the function guarantees to
|
## When the argument `pristine` is set `true`, the function guarantees to
|
||||||
## return a non-recycled, brand new vertex *ID* which is the preferred mode
|
## return a non-recycled, brand new vertex *ID* which is the preferred mode
|
||||||
## when creating leaf vertices.
|
## when creating leaf vertices.
|
||||||
|
##
|
||||||
if db.vGen.len == 0:
|
if db.vGen.len == 0:
|
||||||
# Note that `VertexID(1)` is the root of the main trie
|
# Note that `VertexID(1)` is the root of the main trie
|
||||||
db.top.final.vGen = @[VertexID(LEAST_FREE_VID+1)]
|
db.top.final.vGen = @[VertexID(LEAST_FREE_VID+1)]
|
||||||
@ -47,6 +48,7 @@ proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
|
|||||||
proc vidPeek*(db: AristoDbRef): VertexID =
|
proc vidPeek*(db: AristoDbRef): VertexID =
|
||||||
## Like `new()` without consuming this *ID*. It will return the *ID* that
|
## Like `new()` without consuming this *ID*. It will return the *ID* that
|
||||||
## would be returned by the `new()` function.
|
## would be returned by the `new()` function.
|
||||||
|
##
|
||||||
case db.vGen.len:
|
case db.vGen.len:
|
||||||
of 0:
|
of 0:
|
||||||
VertexID(LEAST_FREE_VID)
|
VertexID(LEAST_FREE_VID)
|
||||||
@ -59,6 +61,7 @@ proc vidPeek*(db: AristoDbRef): VertexID =
|
|||||||
proc vidDispose*(db: AristoDbRef; vid: VertexID) =
|
proc vidDispose*(db: AristoDbRef; vid: VertexID) =
|
||||||
## Recycle the argument `vtxID` which is useful after deleting entries from
|
## Recycle the argument `vtxID` which is useful after deleting entries from
|
||||||
## the vertex table to prevent the `VertexID` type key values small.
|
## the vertex table to prevent the `VertexID` type key values small.
|
||||||
|
##
|
||||||
if LEAST_FREE_VID <= vid.distinctBase:
|
if LEAST_FREE_VID <= vid.distinctBase:
|
||||||
if db.vGen.len == 0:
|
if db.vGen.len == 0:
|
||||||
db.top.final.vGen = @[vid]
|
db.top.final.vGen = @[vid]
|
||||||
|
@ -13,8 +13,12 @@
|
|||||||
import
|
import
|
||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
"../.."/[aristo, aristo/aristo_desc, aristo/aristo_walk],
|
../../aristo,
|
||||||
"../.."/[kvt, kvt/kvt_desc, kvt/kvt_init/memory_only],
|
../../aristo/[
|
||||||
|
aristo_desc, aristo_nearby, aristo_path, aristo_tx, aristo_serialise,
|
||||||
|
aristo_walk],
|
||||||
|
../../kvt,
|
||||||
|
../../kvt/[kvt_desc, kvt_init, kvt_tx, kvt_walk],
|
||||||
".."/[base, base/base_desc],
|
".."/[base, base/base_desc],
|
||||||
./aristo_db/[common_desc, handlers_aristo, handlers_kvt]
|
./aristo_db/[common_desc, handlers_aristo, handlers_kvt]
|
||||||
|
|
||||||
@ -244,6 +248,14 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef =
|
|||||||
# Public helpers for direct backend access
|
# Public helpers for direct backend access
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func toAristoProfData*(
|
||||||
|
db: CoreDbRef;
|
||||||
|
): tuple[aristo: AristoDbProfListRef, kvt: KvtDbProfListRef] =
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
if db.isAristo:
|
||||||
|
result.aristo = db.AristoCoreDbRef.adbBase.api.AristoApiProfRef.data
|
||||||
|
result.kvt = db.AristoCoreDbRef.kdbBase.api.KvtApiProfRef.data
|
||||||
|
|
||||||
func toAristo*(be: CoreDbKvtBackendRef): KvtDbRef =
|
func toAristo*(be: CoreDbKvtBackendRef): KvtDbRef =
|
||||||
if be.parent.isAristo:
|
if be.parent.isAristo:
|
||||||
return be.AristoCoreDbKvtBE.kdb
|
return be.AristoCoreDbKvtBE.kdb
|
||||||
@ -268,7 +280,7 @@ iterator aristoKvtPairs*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
|
|||||||
|
|
||||||
iterator aristoMptPairs*(dsc: CoreDxMptRef): (Blob,Blob) {.noRaise.} =
|
iterator aristoMptPairs*(dsc: CoreDxMptRef): (Blob,Blob) {.noRaise.} =
|
||||||
let mpt = dsc.to(AristoDbRef)
|
let mpt = dsc.to(AristoDbRef)
|
||||||
for (k,v) in mpt.right LeafTie(root: dsc.rootID):
|
for (k,v) in mpt.rightPairs LeafTie(root: dsc.rootID):
|
||||||
yield (k.path.pathAsBlob, mpt.serialise(v).valueOr(EmptyBlob))
|
yield (k.path.pathAsBlob, mpt.serialise(v).valueOr(EmptyBlob))
|
||||||
|
|
||||||
iterator aristoReplicateMem*(dsc: CoreDxMptRef): (Blob,Blob) {.rlpRaise.} =
|
iterator aristoReplicateMem*(dsc: CoreDxMptRef): (Blob,Blob) {.rlpRaise.} =
|
||||||
|
@ -17,7 +17,6 @@ import
|
|||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
results,
|
results,
|
||||||
../../../aristo,
|
../../../aristo,
|
||||||
../../../aristo/[aristo_desc, aristo_hike, aristo_vid],
|
|
||||||
../../base,
|
../../base,
|
||||||
../../base/base_desc,
|
../../base/base_desc,
|
||||||
./common_desc
|
./common_desc
|
||||||
@ -26,6 +25,7 @@ type
|
|||||||
AristoBaseRef* = ref object
|
AristoBaseRef* = ref object
|
||||||
parent: CoreDbRef ## Opaque top level descriptor
|
parent: CoreDbRef ## Opaque top level descriptor
|
||||||
adb: AristoDbRef ## Aristo MPT database
|
adb: AristoDbRef ## Aristo MPT database
|
||||||
|
api*: AristoApiRef ## Api functions can be re-directed
|
||||||
gq: seq[AristoChildDbRef] ## Garbage queue, deferred disposal
|
gq: seq[AristoChildDbRef] ## Garbage queue, deferred disposal
|
||||||
accCache: CoreDxAccRef ## Pre-configured accounts descriptor to share
|
accCache: CoreDxAccRef ## Pre-configured accounts descriptor to share
|
||||||
mptCache: MptCacheArray ## Pre-configured accounts descriptor to share
|
mptCache: MptCacheArray ## Pre-configured accounts descriptor to share
|
||||||
@ -112,7 +112,7 @@ func to(address: EthAddress; T: type PathID): T =
|
|||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Auto destructor should appear before constructor
|
# Auto destructor should appear before constructor
|
||||||
# to prevent cannot bind another `=destroy` error
|
# to prevent **cannot bind another `=destroy` error**
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc `=destroy`(cMpt: var AristoChildDbObj) =
|
proc `=destroy`(cMpt: var AristoChildDbObj) =
|
||||||
@ -244,8 +244,11 @@ proc newTrieCtx(
|
|||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[AristoCoreDbTrie] =
|
): CoreDbRc[AristoCoreDbTrie] =
|
||||||
base.gc()
|
base.gc()
|
||||||
var trie = AristoCoreDbTrie(trie)
|
var
|
||||||
let db = base.parent
|
trie = AristoCoreDbTrie(trie)
|
||||||
|
let
|
||||||
|
db = base.parent
|
||||||
|
api = base.api
|
||||||
|
|
||||||
# Update `trie` argument, handle default settings
|
# Update `trie` argument, handle default settings
|
||||||
block validateRoot:
|
block validateRoot:
|
||||||
@ -271,9 +274,9 @@ proc newTrieCtx(
|
|||||||
# Get normalised `svaeMode` and `MPT`
|
# Get normalised `svaeMode` and `MPT`
|
||||||
let (mode, mpt) = case saveMode:
|
let (mode, mpt) = case saveMode:
|
||||||
of TopShot:
|
of TopShot:
|
||||||
(saveMode, ? base.adb.forkTop.toRc(db, info))
|
(saveMode, ? api.forkTop(base.adb).toRc(db, info))
|
||||||
of Companion:
|
of Companion:
|
||||||
(saveMode, ? base.adb.fork.toRc(db, info))
|
(saveMode, ? api.fork(base.adb).toRc(db, info))
|
||||||
of Shared, AutoSave:
|
of Shared, AutoSave:
|
||||||
if base.adb.backend.isNil:
|
if base.adb.backend.isNil:
|
||||||
(Shared, base.adb)
|
(Shared, base.adb)
|
||||||
@ -287,8 +290,6 @@ proc newTrieCtx(
|
|||||||
break body
|
break body
|
||||||
|
|
||||||
# Use cached descriptor
|
# Use cached descriptor
|
||||||
# AristoCoreDxMptRef(base.mptCache[trie.kind])
|
|
||||||
|
|
||||||
let ctx = base.mptCache[trie.kind].ctx
|
let ctx = base.mptCache[trie.kind].ctx
|
||||||
if not trie.ctx.isValid:
|
if not trie.ctx.isValid:
|
||||||
trie.ctx = ctx
|
trie.ctx = ctx
|
||||||
@ -326,6 +327,7 @@ proc getTrieFn(
|
|||||||
kind = if LEAST_FREE_VID <= root.distinctBase: StorageTrie
|
kind = if LEAST_FREE_VID <= root.distinctBase: StorageTrie
|
||||||
else: CoreDbSubTrie(root)
|
else: CoreDbSubTrie(root)
|
||||||
|
|
||||||
|
doAssert kind != StorageTrie or cMpt.accPath.isValid
|
||||||
result = cMpt.base.parent.bless AristoCoreDbTrie(
|
result = cMpt.base.parent.bless AristoCoreDbTrie(
|
||||||
kind: kind,
|
kind: kind,
|
||||||
root: root,
|
root: root,
|
||||||
@ -342,13 +344,14 @@ proc persistent(
|
|||||||
let
|
let
|
||||||
base = cMpt.base
|
base = cMpt.base
|
||||||
mpt = cMpt.mpt
|
mpt = cMpt.mpt
|
||||||
|
api = base.api
|
||||||
db = base.parent
|
db = base.parent
|
||||||
rc = mpt.stow(persistent = true)
|
rc = api.stow(mpt, persistent = true)
|
||||||
|
|
||||||
# note that `gc()` may call `persistent()` so there is no `base.gc()` here
|
# note that `gc()` may call `persistent()` so there is no `base.gc()` here
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok()
|
ok()
|
||||||
elif mpt.level == 0:
|
elif api.level(mpt) == 0:
|
||||||
err(rc.error.toError(db, info))
|
err(rc.error.toError(db, info))
|
||||||
else:
|
else:
|
||||||
err(rc.error.toError(db, info, cMpt.txError))
|
err(rc.error.toError(db, info, cMpt.txError))
|
||||||
@ -367,9 +370,10 @@ proc forget(
|
|||||||
|
|
||||||
if mpt != base.adb:
|
if mpt != base.adb:
|
||||||
let
|
let
|
||||||
db = base.parent
|
api = base.api
|
||||||
rc = cMpt.mpt.forget()
|
rc = api.forget(cMpt.mpt)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
|
let db = base.parent
|
||||||
result = err(rc.error.toError(db, info))
|
result = err(rc.error.toError(db, info))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -409,9 +413,10 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||||||
|
|
||||||
let
|
let
|
||||||
mpt = cMpt.mpt
|
mpt = cMpt.mpt
|
||||||
rc = mpt.fetchPayload(cMpt.root, k)
|
api = cMpt.base.api
|
||||||
|
rc = api.fetchPayload(mpt, cMpt.root, k)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
mpt.serialise(rc.value).toRc(db, info)
|
api.serialise(mpt, rc.value).toRc(db, info)
|
||||||
elif rc.error[1] != FetchPathNotFound:
|
elif rc.error[1] != FetchPathNotFound:
|
||||||
err(rc.error.toError(db, info))
|
err(rc.error.toError(db, info))
|
||||||
else:
|
else:
|
||||||
@ -425,18 +430,19 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let
|
let
|
||||||
db = cMpt.base.parent
|
db = cMpt.base.parent
|
||||||
|
api = cMpt.base.api
|
||||||
mpt = cMpt.mpt
|
mpt = cMpt.mpt
|
||||||
rootOk = cMpt.root.isValid
|
rootOk = cMpt.root.isValid
|
||||||
|
|
||||||
# Provide root ID on-the-fly
|
# Provide root ID on-the-fly
|
||||||
if not rootOk:
|
if not rootOk:
|
||||||
cMpt.root = mpt.vidFetch(pristine=true)
|
cMpt.root = api.vidFetch(mpt, pristine=true)
|
||||||
|
|
||||||
let rc = mpt.merge(cMpt.root, k, v, cMpt.accPath)
|
let rc = api.merge(mpt, cMpt.root, k, v, cMpt.accPath)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
# Re-cycle unused ID (prevents from leaking IDs)
|
# Re-cycle unused ID (prevents from leaking IDs)
|
||||||
if not rootOk:
|
if not rootOk:
|
||||||
mpt.vidDispose cMpt.root
|
api.vidDispose(mpt, cMpt.root)
|
||||||
cMpt.root = VoidTrieID
|
cMpt.root = VoidTrieID
|
||||||
return err(rc.error.toError(db, info))
|
return err(rc.error.toError(db, info))
|
||||||
ok()
|
ok()
|
||||||
@ -448,6 +454,7 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let
|
let
|
||||||
db = cMpt.base.parent
|
db = cMpt.base.parent
|
||||||
|
api = cMpt.base.api
|
||||||
mpt = cMpt.mpt
|
mpt = cMpt.mpt
|
||||||
|
|
||||||
if not cMpt.root.isValid and cMpt.accPath.isValid:
|
if not cMpt.root.isValid and cMpt.accPath.isValid:
|
||||||
@ -455,7 +462,7 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||||||
# but no data have been added, yet.
|
# but no data have been added, yet.
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
let rc = mpt.delete(cMpt.root, k, cMpt.accPath)
|
let rc = api.delete(mpt, cMpt.root, k, cMpt.accPath)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
if rc.error[1] == DelPathNotFound:
|
if rc.error[1] == DelPathNotFound:
|
||||||
return err(rc.error.toError(db, info, MptNotFound))
|
return err(rc.error.toError(db, info, MptNotFound))
|
||||||
@ -473,10 +480,11 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[bool] =
|
): CoreDbRc[bool] =
|
||||||
let
|
let
|
||||||
db = cMpt.base.parent
|
|
||||||
mpt = cMpt.mpt
|
mpt = cMpt.mpt
|
||||||
rc = mpt.hasPath(cMpt.root, key)
|
api = cMpt.base.api
|
||||||
|
rc = api.hasPath(mpt, cMpt.root, key)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
|
let db = cMpt.base.parent
|
||||||
return err(rc.error.toError(db, info))
|
return err(rc.error.toError(db, info))
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
|
|
||||||
@ -545,11 +553,12 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||||||
): CoreDbRc[CoreDbAccount] =
|
): CoreDbRc[CoreDbAccount] =
|
||||||
let
|
let
|
||||||
db = cAcc.base.parent
|
db = cAcc.base.parent
|
||||||
|
api = cAcc.base.api
|
||||||
mpt = cAcc.mpt
|
mpt = cAcc.mpt
|
||||||
pyl = block:
|
pyl = block:
|
||||||
let
|
let
|
||||||
key = address.keccakHash.data
|
key = address.keccakHash.data
|
||||||
rc = mpt.fetchPayload(cAcc.root, key)
|
rc = api.fetchPayload(mpt, cAcc.root, key)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
rc.value
|
rc.value
|
||||||
elif rc.error[1] != FetchPathNotFound:
|
elif rc.error[1] != FetchPathNotFound:
|
||||||
@ -569,10 +578,11 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let
|
let
|
||||||
db = cAcc.base.parent
|
db = cAcc.base.parent
|
||||||
|
api = cAcc.base.api
|
||||||
mpt = cAcc.mpt
|
mpt = cAcc.mpt
|
||||||
key = acc.address.keccakHash.data
|
key = acc.address.keccakHash.data
|
||||||
val = acc.toPayloadRef()
|
val = acc.toPayloadRef()
|
||||||
rc = mpt.merge(cAcc.root, key, val, VOID_PATH_ID)
|
rc = api.mergePayload(mpt, cAcc.root, key, val)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(db, info))
|
return err(rc.error.toError(db, info))
|
||||||
ok()
|
ok()
|
||||||
@ -584,9 +594,10 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let
|
let
|
||||||
db = cAcc.base.parent
|
db = cAcc.base.parent
|
||||||
|
api = cAcc.base.api
|
||||||
mpt = cAcc.mpt
|
mpt = cAcc.mpt
|
||||||
key = address.keccakHash.data
|
key = address.keccakHash.data
|
||||||
rc = mpt.delete(cAcc.root, key, VOID_PATH_ID)
|
rc = api.delete(mpt, cAcc.root, key, VOID_PATH_ID)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
if rc.error[1] == DelPathNotFound:
|
if rc.error[1] == DelPathNotFound:
|
||||||
return err(rc.error.toError(db, info, AccNotFound))
|
return err(rc.error.toError(db, info, AccNotFound))
|
||||||
@ -600,16 +611,17 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let
|
let
|
||||||
db = cAcc.base.parent
|
db = cAcc.base.parent
|
||||||
|
api = cAcc.base.api
|
||||||
mpt = cAcc.mpt
|
mpt = cAcc.mpt
|
||||||
key = address.keccakHash.data
|
key = address.keccakHash.data
|
||||||
pyl = mpt.fetchPayload(cAcc.root, key).valueOr:
|
pyl = api.fetchPayload(mpt, cAcc.root, key).valueOr:
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
# Use storage ID from account and delete that sub-trie
|
# Use storage ID from account and delete that sub-trie
|
||||||
if pyl.pType == AccountData:
|
if pyl.pType == AccountData:
|
||||||
let stoID = pyl.account.storageID
|
let stoID = pyl.account.storageID
|
||||||
if stoID.isValid:
|
if stoID.isValid:
|
||||||
let rc = mpt.delete(stoID, address.to(PathID))
|
let rc = api.delTree(mpt, stoID, address.to(PathID))
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(db, info))
|
return err(rc.error.toError(db, info))
|
||||||
ok()
|
ok()
|
||||||
@ -621,9 +633,10 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||||||
): CoreDbRc[bool] =
|
): CoreDbRc[bool] =
|
||||||
let
|
let
|
||||||
db = cAcc.base.parent
|
db = cAcc.base.parent
|
||||||
|
api = cAcc.base.api
|
||||||
mpt = cAcc.mpt
|
mpt = cAcc.mpt
|
||||||
key = address.keccakHash.data
|
key = address.keccakHash.data
|
||||||
rc = mpt.hasPath(cAcc.root, key)
|
rc = api.hasPath(mpt, cAcc.root, key)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(db, info))
|
return err(rc.error.toError(db, info))
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
@ -700,6 +713,8 @@ proc gc*(base: AristoBaseRef) =
|
|||||||
## entry and mostly be empty.
|
## entry and mostly be empty.
|
||||||
const
|
const
|
||||||
info = "gc()"
|
info = "gc()"
|
||||||
|
let
|
||||||
|
api = base.api
|
||||||
var
|
var
|
||||||
resetQ = 0
|
resetQ = 0
|
||||||
first = 0
|
first = 0
|
||||||
@ -709,7 +724,7 @@ proc gc*(base: AristoBaseRef) =
|
|||||||
if base.gq[0].mpt == base.adb:
|
if base.gq[0].mpt == base.adb:
|
||||||
first = 1
|
first = 1
|
||||||
let cMpt = base.gq[0]
|
let cMpt = base.gq[0]
|
||||||
if 0 < cMpt.mpt.level:
|
if 0 < api.level(cMpt.mpt):
|
||||||
resetQ = 1
|
resetQ = 1
|
||||||
else:
|
else:
|
||||||
let rc = cMpt.persistent info
|
let rc = cMpt.persistent info
|
||||||
@ -721,7 +736,7 @@ proc gc*(base: AristoBaseRef) =
|
|||||||
for n in first ..< base.gq.len:
|
for n in first ..< base.gq.len:
|
||||||
let cMpt = base.gq[n]
|
let cMpt = base.gq[n]
|
||||||
# FIXME: Currently no strategy for `Companion` and `TopShot`
|
# FIXME: Currently no strategy for `Companion` and `TopShot`
|
||||||
let rc = cMpt.mpt.forget
|
let rc = base.api.forget(cMpt.mpt)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
let error = rc.error.toError(base.parent, info).errorPrint
|
let error = rc.error.toError(base.parent, info).errorPrint
|
||||||
debug logTxt info, saveMode=cMpt.saveMode, error
|
debug logTxt info, saveMode=cMpt.saveMode, error
|
||||||
@ -740,19 +755,18 @@ func txTop*(
|
|||||||
base: AristoBaseRef;
|
base: AristoBaseRef;
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[AristoTxRef] =
|
): CoreDbRc[AristoTxRef] =
|
||||||
base.adb.txTop.toRc(base.parent, info)
|
base.api.txTop(base.adb).toRc(base.parent, info)
|
||||||
|
|
||||||
proc txBegin*(
|
proc txBegin*(
|
||||||
base: AristoBaseRef;
|
base: AristoBaseRef;
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[AristoTxRef] =
|
): CoreDbRc[AristoTxRef] =
|
||||||
base.adb.txBegin.toRc(base.parent, info)
|
base.api.txBegin(base.adb).toRc(base.parent, info)
|
||||||
|
|
||||||
# ---------------------
|
# ---------------------
|
||||||
|
|
||||||
func getLevel*(base: AristoBaseRef): int =
|
proc getLevel*(base: AristoBaseRef): int =
|
||||||
base.adb.level
|
base.api.level(base.adb)
|
||||||
|
|
||||||
|
|
||||||
proc tryHash*(
|
proc tryHash*(
|
||||||
base: AristoBaseRef;
|
base: AristoBaseRef;
|
||||||
@ -767,7 +781,7 @@ proc tryHash*(
|
|||||||
if not root.isValid:
|
if not root.isValid:
|
||||||
return ok(EMPTY_ROOT_HASH)
|
return ok(EMPTY_ROOT_HASH)
|
||||||
|
|
||||||
let rc = trie.ctx.mpt.getKeyRc root
|
let rc = base.api.getKeyRc(trie.ctx.mpt, root)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(base.parent, info, HashNotAvailable))
|
return err(rc.error.toError(base.parent, info, HashNotAvailable))
|
||||||
|
|
||||||
@ -807,10 +821,10 @@ proc rootHash*(
|
|||||||
): CoreDbRc[Hash256] =
|
): CoreDbRc[Hash256] =
|
||||||
let
|
let
|
||||||
db = base.parent
|
db = base.parent
|
||||||
|
api = base.api
|
||||||
trie = trie.AristoCoreDbTrie
|
trie = trie.AristoCoreDbTrie
|
||||||
if not trie.ctx.isValid:
|
if not trie.ctx.isValid:
|
||||||
return err(MptContextMissing.toError(db, info, HashNotAvailable))
|
return err(MptContextMissing.toError(db, info, HashNotAvailable))
|
||||||
|
|
||||||
let
|
let
|
||||||
mpt = trie.ctx.mpt
|
mpt = trie.ctx.mpt
|
||||||
root = trie.to(VertexID)
|
root = trie.to(VertexID)
|
||||||
@ -818,10 +832,10 @@ proc rootHash*(
|
|||||||
if not root.isValid:
|
if not root.isValid:
|
||||||
return ok(EMPTY_ROOT_HASH)
|
return ok(EMPTY_ROOT_HASH)
|
||||||
|
|
||||||
? mpt.hashify.toVoidRc(db, info, HashNotAvailable)
|
? api.hashify(mpt).toVoidRc(db, info, HashNotAvailable)
|
||||||
|
|
||||||
let key = block:
|
let key = block:
|
||||||
let rc = mpt.getKeyRc root
|
let rc = api.getKeyRc(mpt, root)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
doAssert rc.error in {GetKeyNotFound,GetKeyUpdateNeeded}
|
doAssert rc.error in {GetKeyNotFound,GetKeyUpdateNeeded}
|
||||||
return err(rc.error.toError(base.parent, info, HashNotAvailable))
|
return err(rc.error.toError(base.parent, info, HashNotAvailable))
|
||||||
@ -832,6 +846,7 @@ proc rootHash*(
|
|||||||
proc rootHash*(mpt: CoreDxMptRef): VertexID =
|
proc rootHash*(mpt: CoreDxMptRef): VertexID =
|
||||||
AristoCoreDxMptRef(mpt).ctx.root
|
AristoCoreDxMptRef(mpt).ctx.root
|
||||||
|
|
||||||
|
|
||||||
proc getTrie*(
|
proc getTrie*(
|
||||||
base: AristoBaseRef;
|
base: AristoBaseRef;
|
||||||
kind: CoreDbSubTrie;
|
kind: CoreDbSubTrie;
|
||||||
@ -842,6 +857,7 @@ proc getTrie*(
|
|||||||
let
|
let
|
||||||
db = base.parent
|
db = base.parent
|
||||||
adb = base.adb
|
adb = base.adb
|
||||||
|
api = base.api
|
||||||
ethAddr = (if address.isNone: EthAddress.default else: address.unsafeGet)
|
ethAddr = (if address.isNone: EthAddress.default else: address.unsafeGet)
|
||||||
path = (if address.isNone: VOID_PATH_ID else: ethAddr.to(PathID))
|
path = (if address.isNone: VOID_PATH_ID else: ethAddr.to(PathID))
|
||||||
base.gc() # update pending changes
|
base.gc() # update pending changes
|
||||||
@ -859,14 +875,15 @@ proc getTrie*(
|
|||||||
trie.address = ethAddr
|
trie.address = ethAddr
|
||||||
return ok(db.bless trie)
|
return ok(db.bless trie)
|
||||||
|
|
||||||
? adb.hashify.toVoidRc(db, info, HashNotAvailable)
|
? api.hashify(adb).toVoidRc(db, info, HashNotAvailable)
|
||||||
|
|
||||||
# Check whether hash is available as state root on main trie
|
# Check whether hash is available as state root on main trie
|
||||||
block:
|
block:
|
||||||
let rc = adb.getKeyRc VertexID(kind)
|
let rc = api.getKeyRc(adb, VertexID kind)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
doAssert rc.error == GetKeyNotFound
|
doAssert rc.error == GetKeyNotFound
|
||||||
elif rc.value == root.to(HashKey):
|
elif rc.value == root.to(HashKey):
|
||||||
|
doAssert kind != StorageTrie or path.isValid
|
||||||
var trie = AristoCoreDbTrie(
|
var trie = AristoCoreDbTrie(
|
||||||
kind: kind,
|
kind: kind,
|
||||||
root: VertexID(kind),
|
root: VertexID(kind),
|
||||||
@ -891,7 +908,8 @@ proc verify*(base: AristoBaseRef; trie: CoreDbTrieRef): bool =
|
|||||||
return false
|
return false
|
||||||
if not trie.root.isValid:
|
if not trie.root.isValid:
|
||||||
return true
|
return true
|
||||||
if trie.accPath.to(NibblesSeq).hikeUp(AccountsTrieID,base.adb).isOk:
|
let path = trie.accPath.to(NibblesSeq)
|
||||||
|
if base.api.hikeUp(path, AccountsTrieID, base.adb).isOk:
|
||||||
return true
|
return true
|
||||||
false
|
false
|
||||||
|
|
||||||
@ -904,10 +922,12 @@ proc newMptHandler*(
|
|||||||
let
|
let
|
||||||
trie = ? base.newTrieCtx(trie, saveMode, info)
|
trie = ? base.newTrieCtx(trie, saveMode, info)
|
||||||
db = base.parent
|
db = base.parent
|
||||||
|
api = base.api
|
||||||
if trie.kind == StorageTrie and trie.root.isValid:
|
if trie.kind == StorageTrie and trie.root.isValid:
|
||||||
let
|
let
|
||||||
adb = base.adb
|
adb = base.adb
|
||||||
rc = trie.accPath.to(NibblesSeq).hikeUp(AccountsTrieID,adb)
|
path = trie.accPath.to(NibblesSeq)
|
||||||
|
rc = api.hikeUp(path, AccountsTrieID, adb)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error[1].toError(db, info, AccNotFound))
|
return err(rc.error[1].toError(db, info, AccNotFound))
|
||||||
if trie.reset:
|
if trie.reset:
|
||||||
@ -915,7 +935,8 @@ proc newMptHandler*(
|
|||||||
# beween `VertexID(2) ..< LEAST_FREE_VID`. At the moment, this applies to
|
# beween `VertexID(2) ..< LEAST_FREE_VID`. At the moment, this applies to
|
||||||
# `GenericTrie` type sub-tries somehow emulating the behaviour of a new
|
# `GenericTrie` type sub-tries somehow emulating the behaviour of a new
|
||||||
# empty MPT on the legacy database (handle with care, though.)
|
# empty MPT on the legacy database (handle with care, though.)
|
||||||
let rc = trie.ctx.mpt.delete(trie.root, VOID_PATH_ID)
|
let
|
||||||
|
rc = api.delTree(trie.ctx.mpt, trie.root, VOID_PATH_ID)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(db, info, AutoFlushFailed))
|
return err(rc.error.toError(db, info, AutoFlushFailed))
|
||||||
trie.reset = false
|
trie.reset = false
|
||||||
@ -955,11 +976,19 @@ proc destroy*(base: AristoBaseRef; flush: bool) =
|
|||||||
base.gc()
|
base.gc()
|
||||||
|
|
||||||
# Close descriptor
|
# Close descriptor
|
||||||
base.adb.finish(flush)
|
base.api.finish(base.adb, flush)
|
||||||
|
|
||||||
|
|
||||||
func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =
|
func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =
|
||||||
result = T(parent: db, adb: adb)
|
result = T(
|
||||||
|
parent: db,
|
||||||
|
api: AristoApiRef.init(),
|
||||||
|
adb: adb)
|
||||||
|
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
let profApi = AristoApiProfRef.init(result.api, adb.backend)
|
||||||
|
result.api = profApi
|
||||||
|
result.adb.backend = profApi.be
|
||||||
|
|
||||||
# Provide pre-configured handlers to share
|
# Provide pre-configured handlers to share
|
||||||
for trie in AccountsTrie .. high(CoreDbSubTrie):
|
for trie in AccountsTrie .. high(CoreDbSubTrie):
|
||||||
|
@ -15,7 +15,6 @@ import
|
|||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
../../../kvt,
|
../../../kvt,
|
||||||
../../../kvt/kvt_desc,
|
|
||||||
../../base,
|
../../base,
|
||||||
../../base/base_desc,
|
../../base/base_desc,
|
||||||
./common_desc
|
./common_desc
|
||||||
@ -24,6 +23,7 @@ type
|
|||||||
KvtBaseRef* = ref object
|
KvtBaseRef* = ref object
|
||||||
parent: CoreDbRef ## Opaque top level descriptor
|
parent: CoreDbRef ## Opaque top level descriptor
|
||||||
kdb: KvtDbRef ## Key-value table
|
kdb: KvtDbRef ## Key-value table
|
||||||
|
api*: KvtApiRef ## Api functions can be re-directed
|
||||||
gq: seq[KvtChildDbRef] ## Garbage queue, deferred disposal
|
gq: seq[KvtChildDbRef] ## Garbage queue, deferred disposal
|
||||||
cache: CoreDxKvtRef ## Pre-configured descriptor to share
|
cache: CoreDxKvtRef ## Pre-configured descriptor to share
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ proc `=destroy`(cKvt: var KvtChildDbObj) =
|
|||||||
# Do some heuristics to avoid duplicates:
|
# Do some heuristics to avoid duplicates:
|
||||||
block addToBatchQueue:
|
block addToBatchQueue:
|
||||||
if kvt != base.kdb: # not base descriptor?
|
if kvt != base.kdb: # not base descriptor?
|
||||||
if kvt.level == 0: # no transaction pending?
|
if base.api.level(kvt) == 0: # no transaction pending?
|
||||||
break addToBatchQueue # add to destructor queue
|
break addToBatchQueue # add to destructor queue
|
||||||
else:
|
else:
|
||||||
break body # ignore `kvt`
|
break body # ignore `kvt`
|
||||||
@ -129,13 +129,14 @@ proc persistent(
|
|||||||
let
|
let
|
||||||
base = cKvt.base
|
base = cKvt.base
|
||||||
kvt = cKvt.kvt
|
kvt = cKvt.kvt
|
||||||
|
api = base.api
|
||||||
db = base.parent
|
db = base.parent
|
||||||
rc = kvt.stow()
|
rc = api.stow(kvt)
|
||||||
|
|
||||||
# Note that `gc()` may call `persistent()` so there is no `base.gc()` here
|
# Note that `gc()` may call `persistent()` so there is no `base.gc()` here
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok()
|
ok()
|
||||||
elif kvt.level == 0:
|
elif api.level(kvt) == 0:
|
||||||
err(rc.error.toError(db, info))
|
err(rc.error.toError(db, info))
|
||||||
else:
|
else:
|
||||||
err(rc.error.toError(db, info, KvtTxPending))
|
err(rc.error.toError(db, info, KvtTxPending))
|
||||||
@ -154,7 +155,7 @@ proc forget(
|
|||||||
if kvt != base.kdb:
|
if kvt != base.kdb:
|
||||||
let
|
let
|
||||||
db = base.parent
|
db = base.parent
|
||||||
rc = kvt.forget()
|
rc = base.api.forget(kvt)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
result = err(rc.error.toError(db, info))
|
result = err(rc.error.toError(db, info))
|
||||||
|
|
||||||
@ -186,9 +187,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
|
|||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[Blob] =
|
): CoreDbRc[Blob] =
|
||||||
## Member of `CoreDbKvtFns`
|
## Member of `CoreDbKvtFns`
|
||||||
let rc = cKvt.kvt.get(k)
|
let
|
||||||
|
base = cKvt.base
|
||||||
|
rc = base.api.get(cKvt.kvt, k)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
let db = cKvt.base.parent
|
let db = base.parent
|
||||||
if rc.error == GetNotFound:
|
if rc.error == GetNotFound:
|
||||||
return err(rc.error.toError(db, info, KvtNotFound))
|
return err(rc.error.toError(db, info, KvtNotFound))
|
||||||
else:
|
else:
|
||||||
@ -201,9 +204,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
|
|||||||
v: openArray[byte];
|
v: openArray[byte];
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let rc = cKvt.kvt.put(k,v)
|
let
|
||||||
|
base = cKvt.base
|
||||||
|
rc = base.api.put(cKvt.kvt, k,v)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(cKvt.base.parent, info))
|
return err(rc.error.toError(base.parent, info))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc kvtDel(
|
proc kvtDel(
|
||||||
@ -211,9 +216,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
|
|||||||
k: openArray[byte];
|
k: openArray[byte];
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[void] =
|
): CoreDbRc[void] =
|
||||||
let rc = cKvt.kvt.del k
|
let
|
||||||
|
base = cKvt.base
|
||||||
|
rc = base.api.del(cKvt.kvt, k)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(cKvt.base.parent, info))
|
return err(rc.error.toError(base.parent, info))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc kvtHasKey(
|
proc kvtHasKey(
|
||||||
@ -221,9 +228,11 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
|
|||||||
k: openArray[byte];
|
k: openArray[byte];
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[bool] =
|
): CoreDbRc[bool] =
|
||||||
let rc = cKvt.kvt.hasKey(k)
|
let
|
||||||
|
base = cKvt.base
|
||||||
|
rc = base.api.hasKey(cKvt.kvt, k)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error.toError(cKvt.base.parent, info))
|
return err(rc.error.toError(base.parent, info))
|
||||||
ok(rc.value)
|
ok(rc.value)
|
||||||
|
|
||||||
CoreDbKvtFns(
|
CoreDbKvtFns(
|
||||||
@ -274,6 +283,7 @@ proc gc*(base: KvtBaseRef) =
|
|||||||
## entry and mostly be empty.
|
## entry and mostly be empty.
|
||||||
const info = "gc()"
|
const info = "gc()"
|
||||||
var kdbAutoSave = false
|
var kdbAutoSave = false
|
||||||
|
let api = base.api
|
||||||
|
|
||||||
proc saveAndDestroy(cKvt: KvtChildDbRef): CoreDbRc[void] =
|
proc saveAndDestroy(cKvt: KvtChildDbRef): CoreDbRc[void] =
|
||||||
if cKvt.kvt != base.kdb:
|
if cKvt.kvt != base.kdb:
|
||||||
@ -289,14 +299,14 @@ proc gc*(base: KvtBaseRef) =
|
|||||||
# There might be a single queue item left over from the last run
|
# There might be a single queue item left over from the last run
|
||||||
# which can be ignored right away as the body below would not change
|
# which can be ignored right away as the body below would not change
|
||||||
# anything.
|
# anything.
|
||||||
if base.gq.len != 1 or base.gq[0].kvt.level == 0:
|
if base.gq.len != 1 or api.level(base.gq[0].kvt) == 0:
|
||||||
var later = KvtChildDbRef(nil)
|
var later = KvtChildDbRef(nil)
|
||||||
|
|
||||||
while 0 < base.gq.len:
|
while 0 < base.gq.len:
|
||||||
var q: seq[KvtChildDbRef]
|
var q: seq[KvtChildDbRef]
|
||||||
base.gq.swap q # now `=destroy()` may refill while destructing, below
|
base.gq.swap q # now `=destroy()` may refill while destructing, below
|
||||||
for cKvt in q:
|
for cKvt in q:
|
||||||
if 0 < cKvt.kvt.level:
|
if 0 < api.level(cKvt.kvt):
|
||||||
assert cKvt.kvt == base.kdb and cKvt.saveMode == AutoSave
|
assert cKvt.kvt == base.kdb and cKvt.saveMode == AutoSave
|
||||||
later = cKvt # do it later when no transaction pending
|
later = cKvt # do it later when no transaction pending
|
||||||
continue
|
continue
|
||||||
@ -317,13 +327,13 @@ func txTop*(
|
|||||||
base: KvtBaseRef;
|
base: KvtBaseRef;
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[KvtTxRef] =
|
): CoreDbRc[KvtTxRef] =
|
||||||
base.kdb.txTop.toRc(base.parent, info)
|
base.api.txTop(base.kdb).toRc(base.parent, info)
|
||||||
|
|
||||||
proc txBegin*(
|
proc txBegin*(
|
||||||
base: KvtBaseRef;
|
base: KvtBaseRef;
|
||||||
info: static[string];
|
info: static[string];
|
||||||
): CoreDbRc[KvtTxRef] =
|
): CoreDbRc[KvtTxRef] =
|
||||||
base.kdb.txBegin.toRc(base.parent, info)
|
base.api.txBegin(base.kdb).toRc(base.parent, info)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructors and related
|
# Public constructors and related
|
||||||
@ -374,11 +384,14 @@ proc destroy*(base: KvtBaseRef; flush: bool) =
|
|||||||
base.gc()
|
base.gc()
|
||||||
|
|
||||||
# Close descriptor
|
# Close descriptor
|
||||||
base.kdb.finish(flush)
|
base.api.finish(base.kdb, flush)
|
||||||
|
|
||||||
|
|
||||||
func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =
|
func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =
|
||||||
result = T(parent: db, kdb: kdb)
|
result = T(
|
||||||
|
parent: db,
|
||||||
|
api: KvtApiRef.init(),
|
||||||
|
kdb: kdb)
|
||||||
|
|
||||||
# Provide pre-configured handlers to share
|
# Provide pre-configured handlers to share
|
||||||
let cKvt = KvtChildDbRef(
|
let cKvt = KvtChildDbRef(
|
||||||
@ -386,6 +399,11 @@ func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =
|
|||||||
kvt: kdb,
|
kvt: kdb,
|
||||||
saveMode: Shared)
|
saveMode: Shared)
|
||||||
|
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
let profApi = KvtApiProfRef.init(result.api, kdb.backend)
|
||||||
|
result.api = profApi
|
||||||
|
result.kdb.backend = profApi.be
|
||||||
|
|
||||||
result.cache = db.bless KvtCoreDxKvtRef(
|
result.cache = db.bless KvtCoreDxKvtRef(
|
||||||
ctx: cKvt,
|
ctx: cKvt,
|
||||||
methods: cKvt.kvtMethods)
|
methods: cKvt.kvtMethods)
|
||||||
|
@ -14,7 +14,8 @@ import
|
|||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
../../aristo,
|
../../aristo,
|
||||||
../../aristo/[aristo_persistent, aristo_walk/persistent],
|
../../aristo/[
|
||||||
|
aristo_desc, aristo_persistent, aristo_walk/persistent, aristo_tx],
|
||||||
../../kvt,
|
../../kvt,
|
||||||
../../kvt/kvt_persistent,
|
../../kvt/kvt_persistent,
|
||||||
../base,
|
../base,
|
||||||
|
@ -49,6 +49,7 @@ export
|
|||||||
CoreDbKvtBackendRef,
|
CoreDbKvtBackendRef,
|
||||||
CoreDbMptBackendRef,
|
CoreDbMptBackendRef,
|
||||||
CoreDbPersistentTypes,
|
CoreDbPersistentTypes,
|
||||||
|
CoreDbProfListRef,
|
||||||
CoreDbRef,
|
CoreDbRef,
|
||||||
CoreDbSaveFlags,
|
CoreDbSaveFlags,
|
||||||
CoreDbSubTrie,
|
CoreDbSubTrie,
|
||||||
@ -59,13 +60,7 @@ export
|
|||||||
CoreDxKvtRef,
|
CoreDxKvtRef,
|
||||||
CoreDxMptRef,
|
CoreDxMptRef,
|
||||||
CoreDxPhkRef,
|
CoreDxPhkRef,
|
||||||
CoreDxTxRef,
|
CoreDxTxRef
|
||||||
|
|
||||||
# Profiling support
|
|
||||||
byElapsed,
|
|
||||||
byMean,
|
|
||||||
byVisits,
|
|
||||||
stats
|
|
||||||
|
|
||||||
const
|
const
|
||||||
CoreDbProvideLegacyAPI* = ProvideLegacyAPI
|
CoreDbProvideLegacyAPI* = ProvideLegacyAPI
|
||||||
@ -81,10 +76,6 @@ when ProvideLegacyAPI:
|
|||||||
when AutoValidateDescriptors:
|
when AutoValidateDescriptors:
|
||||||
import ./base/validate
|
import ./base/validate
|
||||||
|
|
||||||
when EnableApiTracking and EnableApiProfiling:
|
|
||||||
var coreDbProfTab*: CoreDbProfFnInx
|
|
||||||
|
|
||||||
|
|
||||||
# More settings
|
# More settings
|
||||||
const
|
const
|
||||||
logTxt = "CoreDb "
|
logTxt = "CoreDb "
|
||||||
@ -128,7 +119,7 @@ when ProvideLegacyAPI:
|
|||||||
## Template with code section that will be discarded if logging is
|
## Template with code section that will be discarded if logging is
|
||||||
## disabled at compile time when `EnableApiTracking` is `false`.
|
## disabled at compile time when `EnableApiTracking` is `false`.
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
w.beginLegaApi()
|
w.beginLegaApi(s)
|
||||||
code
|
code
|
||||||
const ctx {.inject,used.} = s
|
const ctx {.inject,used.} = s
|
||||||
|
|
||||||
@ -142,8 +133,6 @@ when ProvideLegacyAPI:
|
|||||||
template ifTrackLegaApi*(w: CoreDbApiTrackRef; code: untyped) =
|
template ifTrackLegaApi*(w: CoreDbApiTrackRef; code: untyped) =
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
w.endLegaApiIf:
|
w.endLegaApiIf:
|
||||||
when EnableApiProfiling:
|
|
||||||
coreDbProfTab.update(ctx, elapsed)
|
|
||||||
code
|
code
|
||||||
|
|
||||||
|
|
||||||
@ -155,7 +144,7 @@ template setTrackNewApi(
|
|||||||
## Template with code section that will be discarded if logging is
|
## Template with code section that will be discarded if logging is
|
||||||
## disabled at compile time when `EnableApiTracking` is `false`.
|
## disabled at compile time when `EnableApiTracking` is `false`.
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
w.beginNewApi()
|
w.beginNewApi(s)
|
||||||
code
|
code
|
||||||
const ctx {.inject,used.} = s
|
const ctx {.inject,used.} = s
|
||||||
|
|
||||||
@ -169,8 +158,6 @@ template setTrackNewApi*(
|
|||||||
template ifTrackNewApi*(w: CoreDxApiTrackRef; code: untyped) =
|
template ifTrackNewApi*(w: CoreDxApiTrackRef; code: untyped) =
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
w.endNewApiIf:
|
w.endNewApiIf:
|
||||||
when EnableApiProfiling:
|
|
||||||
coreDbProfTab.update(ctx, elapsed)
|
|
||||||
code
|
code
|
||||||
|
|
||||||
# ---------
|
# ---------
|
||||||
@ -212,6 +199,8 @@ proc bless*(db: CoreDbRef): CoreDbRef =
|
|||||||
## Verify descriptor
|
## Verify descriptor
|
||||||
when AutoValidateDescriptors:
|
when AutoValidateDescriptors:
|
||||||
db.validate
|
db.validate
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
db.profTab = CoreDbProfListRef.init()
|
||||||
db
|
db
|
||||||
|
|
||||||
proc bless*(db: CoreDbRef; trie: CoreDbTrieRef): CoreDbTrieRef =
|
proc bless*(db: CoreDbRef; trie: CoreDbTrieRef): CoreDbTrieRef =
|
||||||
@ -268,6 +257,13 @@ proc verify*(trie: CoreDbTrieRef): bool =
|
|||||||
# Public main descriptor methods
|
# Public main descriptor methods
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc dbProfData*(db: CoreDbRef): CoreDbProfListRef =
|
||||||
|
## Return profiling data table (only available in profiling mode). If
|
||||||
|
## available (i.e. non-nil), result data can be organised by the functions
|
||||||
|
## available with `aristo_profile`.
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
db.profTab
|
||||||
|
|
||||||
proc dbType*(db: CoreDbRef): CoreDbType =
|
proc dbType*(db: CoreDbRef): CoreDbType =
|
||||||
## Getter, print DB type identifier
|
## Getter, print DB type identifier
|
||||||
##
|
##
|
||||||
|
@ -11,11 +11,11 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, math, sequtils, strformat, strutils, tables, times,
|
std/[strutils, times, typetraits],
|
||||||
typetraits],
|
|
||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
|
../../aristo/aristo_profile,
|
||||||
"."/[api_new_desc, api_legacy_desc, base_desc]
|
"."/[api_new_desc, api_legacy_desc, base_desc]
|
||||||
|
|
||||||
type
|
type
|
||||||
@ -141,77 +141,13 @@ type
|
|||||||
TxRollbackFn = "tx/rollback"
|
TxRollbackFn = "tx/rollback"
|
||||||
TxSaveDisposeFn = "tx/safeDispose"
|
TxSaveDisposeFn = "tx/safeDispose"
|
||||||
|
|
||||||
CoreDbProfFnInx* = array[CoreDbFnInx,(float,float,int)]
|
|
||||||
CoreDbProfEla* = seq[(Duration,seq[CoreDbFnInx])]
|
|
||||||
CoreDbProfMean* = seq[(Duration,seq[CoreDbFnInx])]
|
|
||||||
CoreDbProfCount* = seq[(int,seq[CoreDbFnInx])]
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc toDuration(fl: float): Duration =
|
|
||||||
## Convert the nanoseconds argument `ns` to a `Duration`.
|
|
||||||
let (s, ns) = fl.splitDecimal
|
|
||||||
initDuration(seconds = s.int, nanoseconds = (ns * 1_000_000_000).int)
|
|
||||||
|
|
||||||
func toFloat(ela: Duration): float =
|
|
||||||
## Convert the argument `ela` to a floating point seconds result.
|
|
||||||
let
|
|
||||||
elaS = ela.inSeconds
|
|
||||||
elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds
|
|
||||||
elaS.float + elaNs.float / 1_000_000_000
|
|
||||||
|
|
||||||
proc updateTotal(t: var CoreDbProfFnInx; fnInx: CoreDbFnInx) =
|
|
||||||
## Summary update helper
|
|
||||||
if fnInx == SummaryItem:
|
|
||||||
t[SummaryItem] = (0.0, 0.0, 0)
|
|
||||||
else:
|
|
||||||
t[SummaryItem][0] += t[fnInx][0]
|
|
||||||
t[SummaryItem][1] += t[fnInx][1]
|
|
||||||
t[SummaryItem][2] += t[fnInx][2]
|
|
||||||
|
|
||||||
# -----------------
|
|
||||||
|
|
||||||
func oaToStr(w: openArray[byte]): string =
|
func oaToStr(w: openArray[byte]): string =
|
||||||
w.toHex.toLowerAscii
|
w.toHex.toLowerAscii
|
||||||
|
|
||||||
func ppUs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inMicroseconds
|
|
||||||
let ns = elapsed.inNanoseconds mod 1_000 # fraction of a micro second
|
|
||||||
if ns != 0:
|
|
||||||
# to rounded deca milli seconds
|
|
||||||
let du = (ns + 5i64) div 10i64
|
|
||||||
result &= &".{du:02}"
|
|
||||||
result &= "us"
|
|
||||||
|
|
||||||
func ppMs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inMilliseconds
|
|
||||||
let ns = elapsed.inNanoseconds mod 1_000_000 # fraction of a milli second
|
|
||||||
if ns != 0:
|
|
||||||
# to rounded deca milli seconds
|
|
||||||
let dm = (ns + 5_000i64) div 10_000i64
|
|
||||||
result &= &".{dm:02}"
|
|
||||||
result &= "ms"
|
|
||||||
|
|
||||||
func ppSecs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inSeconds
|
|
||||||
let ns = elapsed.inNanoseconds mod 1_000_000_000 # fraction of a second
|
|
||||||
if ns != 0:
|
|
||||||
# round up
|
|
||||||
let ds = (ns + 5_000_000i64) div 10_000_000i64
|
|
||||||
result &= &".{ds:02}"
|
|
||||||
result &= "s"
|
|
||||||
|
|
||||||
func ppMins(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inMinutes
|
|
||||||
let ns = elapsed.inNanoseconds mod 60_000_000_000 # fraction of a minute
|
|
||||||
if ns != 0:
|
|
||||||
# round up
|
|
||||||
let dm = (ns + 500_000_000i64) div 1_000_000_000i64
|
|
||||||
result &= &":{dm:02}"
|
|
||||||
result &= "m"
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public API logging helpers
|
# Public API logging helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -275,26 +211,14 @@ proc toStr*(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "capt"
|
|||||||
proc toStr*(rc: CoreDbRc[CoreDxMptRef]): string = rc.toStr "mpt"
|
proc toStr*(rc: CoreDbRc[CoreDxMptRef]): string = rc.toStr "mpt"
|
||||||
proc toStr*(rc: CoreDbRc[CoreDxAccRef]): string = rc.toStr "acc"
|
proc toStr*(rc: CoreDbRc[CoreDxAccRef]): string = rc.toStr "acc"
|
||||||
|
|
||||||
func toStr*(elapsed: Duration): string =
|
func toStr*(ela: Duration): string =
|
||||||
try:
|
aristo_profile.toStr(ela)
|
||||||
if 0 < times.inMinutes(elapsed):
|
|
||||||
result = elapsed.ppMins
|
|
||||||
elif 0 < times.inSeconds(elapsed):
|
|
||||||
result = elapsed.ppSecs
|
|
||||||
elif 0 < times.inMilliSeconds(elapsed):
|
|
||||||
result = elapsed.ppMs
|
|
||||||
elif 0 < times.inMicroSeconds(elapsed):
|
|
||||||
result = elapsed.ppUs
|
|
||||||
else:
|
|
||||||
result = $elapsed.inNanoSeconds & "ns"
|
|
||||||
except ValueError:
|
|
||||||
result = $elapsed
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public legacy API logging framework
|
# Public legacy API logging framework
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
template beginLegaApi*(w: CoreDbApiTrackRef) =
|
template beginLegaApi*(w: CoreDbApiTrackRef; s: static[CoreDbFnInx]) =
|
||||||
when typeof(w) is CoreDbRef:
|
when typeof(w) is CoreDbRef:
|
||||||
let db = w
|
let db = w
|
||||||
else:
|
else:
|
||||||
@ -304,7 +228,9 @@ template beginLegaApi*(w: CoreDbApiTrackRef) =
|
|||||||
db.trackNewApi = false
|
db.trackNewApi = false
|
||||||
defer: db.trackNewApi = save
|
defer: db.trackNewApi = save
|
||||||
|
|
||||||
let blaStart {.inject.} = getTime()
|
when CoreDbEnableApiProfiling:
|
||||||
|
const blaCtx {.inject.} = s # Local use only
|
||||||
|
let blaStart {.inject.} = getTime() # Local use only
|
||||||
|
|
||||||
template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
|
template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
|
||||||
block:
|
block:
|
||||||
@ -312,16 +238,22 @@ template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
|
|||||||
let db = w
|
let db = w
|
||||||
else:
|
else:
|
||||||
let db = w.distinctBase.parent
|
let db = w.distinctBase.parent
|
||||||
if db.trackLegaApi:
|
when CoreDbEnableApiProfiling:
|
||||||
let elapsed {.inject,used.} = getTime() - blaStart
|
let elapsed {.inject,used.} = getTime() - blaStart
|
||||||
|
aristo_profile.update(db.profTab, blaCtx.ord, elapsed)
|
||||||
|
if db.trackLegaApi:
|
||||||
|
when not CoreDbEnableApiProfiling: # otherwise use variable above
|
||||||
|
let elapsed {.inject,used.} = getTime() - blaStart
|
||||||
code
|
code
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public new API logging framework
|
# Public new API logging framework
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
template beginNewApi*(w: CoreDxApiTrackRef) =
|
template beginNewApi*(w: CoreDxApiTrackRef; s: static[CoreDbFnInx]) =
|
||||||
let bnaStart {.inject.} = getTime()
|
when CoreDbEnableApiProfiling:
|
||||||
|
const bnaCtx {.inject.} = s # Local use only
|
||||||
|
let bnaStart {.inject.} = getTime() # Local use only
|
||||||
|
|
||||||
template endNewApiIf*(w: CoreDxApiTrackRef; code: untyped) =
|
template endNewApiIf*(w: CoreDxApiTrackRef; code: untyped) =
|
||||||
block:
|
block:
|
||||||
@ -330,102 +262,20 @@ template endNewApiIf*(w: CoreDxApiTrackRef; code: untyped) =
|
|||||||
else:
|
else:
|
||||||
if w.isNil: break
|
if w.isNil: break
|
||||||
let db = w.parent
|
let db = w.parent
|
||||||
if db.trackNewApi:
|
when CoreDbEnableApiProfiling:
|
||||||
let elapsed {.inject,used.} = getTime() - bnaStart
|
let elapsed {.inject,used.} = getTime() - bnaStart
|
||||||
|
aristo_profile.update(db.profTab, bnaCtx.ord, elapsed)
|
||||||
|
if db.trackNewApi:
|
||||||
|
when not CoreDbEnableApiProfiling: # otherwise use variable above
|
||||||
|
let elapsed {.inject,used.} = getTime() - bnaStart
|
||||||
code
|
code
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc update*(t: var CoreDbProfFnInx; fn: CoreDbFnInx; ela: Duration) =
|
func init*(T: type CoreDbProfListRef): T =
|
||||||
## Register time `ela` spent while executing function `fn`
|
T(list: newSeq[CoreDbProfData](1 + high(CoreDbFnInx).ord))
|
||||||
let s = ela.toFloat
|
|
||||||
t[fn][0] += s
|
|
||||||
t[fn][1] += s * s
|
|
||||||
t[fn][2].inc
|
|
||||||
|
|
||||||
|
|
||||||
proc byElapsed*(t: var CoreDbProfFnInx): CoreDbProfEla =
|
|
||||||
## Collate `CoreDb` function symbols by elapsed times, sorted with largest
|
|
||||||
## `Duration` first. Zero `Duration` entries are discarded.
|
|
||||||
var u: Table[Duration,seq[CoreDbFnInx]]
|
|
||||||
for fn in CoreDbFnInx:
|
|
||||||
t.updateTotal fn
|
|
||||||
let (secs,_,count) = t[fn]
|
|
||||||
if 0 < count:
|
|
||||||
let ela = secs.toDuration
|
|
||||||
u.withValue(ela,val):
|
|
||||||
val[].add fn
|
|
||||||
do:
|
|
||||||
u[ela] = @[fn]
|
|
||||||
result.add (t[SummaryItem][0].toDuration, @[SummaryItem])
|
|
||||||
for ela in u.keys.toSeq.sorted Descending:
|
|
||||||
u.withValue(ela,val):
|
|
||||||
result.add (ela, val[])
|
|
||||||
|
|
||||||
|
|
||||||
proc byMean*(t: var CoreDbProfFnInx): CoreDbProfMean =
|
|
||||||
## Collate `CoreDb` function symbols by elapsed mean times, sorted with
|
|
||||||
## largest `Duration` first. Zero `Duration` entries are discarded.
|
|
||||||
var u: Table[Duration,seq[CoreDbFnInx]]
|
|
||||||
for fn in CoreDbFnInx:
|
|
||||||
t.updateTotal fn
|
|
||||||
let (secs,_,count) = t[fn]
|
|
||||||
if 0 < count:
|
|
||||||
let ela = (secs / count.float).toDuration
|
|
||||||
u.withValue(ela,val):
|
|
||||||
val[].add fn
|
|
||||||
do:
|
|
||||||
u[ela] = @[fn]
|
|
||||||
result.add (
|
|
||||||
(t[SummaryItem][0] / t[SummaryItem][2].float).toDuration, @[SummaryItem])
|
|
||||||
for mean in u.keys.toSeq.sorted Descending:
|
|
||||||
u.withValue(mean,val):
|
|
||||||
result.add (mean, val[])
|
|
||||||
|
|
||||||
|
|
||||||
proc byVisits*(t: var CoreDbProfFnInx): CoreDbProfCount =
|
|
||||||
## Collate `CoreDb` function symbols by number of visits, sorted with
|
|
||||||
## largest number first.
|
|
||||||
var u: Table[int,seq[CoreDbFnInx]]
|
|
||||||
for fn in CoreDbFnInx:
|
|
||||||
t.updateTotal fn
|
|
||||||
let (_,_,count) = t[fn]
|
|
||||||
if 0 < count:
|
|
||||||
u.withValue(count,val):
|
|
||||||
val[].add fn
|
|
||||||
do:
|
|
||||||
u[count] = @[fn]
|
|
||||||
result.add (t[SummaryItem][2], @[SummaryItem])
|
|
||||||
for count in u.keys.toSeq.sorted Descending:
|
|
||||||
u.withValue(count,val):
|
|
||||||
result.add (count, val[])
|
|
||||||
|
|
||||||
|
|
||||||
proc stats*(
|
|
||||||
t: CoreDbProfFnInx;
|
|
||||||
fnInx: CoreDbFnInx;
|
|
||||||
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
|
|
||||||
## Print mean and strandard deviation of timing
|
|
||||||
let data = t[fnInx]
|
|
||||||
result.n = data[2]
|
|
||||||
if 0 < result.n:
|
|
||||||
let
|
|
||||||
mean = data[0] / result.n.float
|
|
||||||
sqMean = data[1] / result.n.float
|
|
||||||
meanSq = mean * mean
|
|
||||||
|
|
||||||
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
|
|
||||||
# if `meanSq` and `sqMean` are approximately the same.
|
|
||||||
sigma = sqMean - min(meanSq,sqMean)
|
|
||||||
stdDev = sigma.sqrt
|
|
||||||
|
|
||||||
result.mean = mean.toDuration
|
|
||||||
result.stdDev = stdDev.sqrt.toDuration
|
|
||||||
|
|
||||||
if 0 < mean:
|
|
||||||
result.devRatio = stdDev / mean
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -12,7 +12,8 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
eth/common,
|
eth/common,
|
||||||
results
|
results,
|
||||||
|
../../aristo/aristo_profile
|
||||||
|
|
||||||
# Annotation helpers
|
# Annotation helpers
|
||||||
{.pragma: noRaise, gcsafe, raises: [].}
|
{.pragma: noRaise, gcsafe, raises: [].}
|
||||||
@ -31,6 +32,12 @@ const
|
|||||||
CoreDbPersistentTypes* = {LegacyDbPersistent, AristoDbRocks}
|
CoreDbPersistentTypes* = {LegacyDbPersistent, AristoDbRocks}
|
||||||
|
|
||||||
type
|
type
|
||||||
|
CoreDbProfListRef* = AristoDbProfListRef
|
||||||
|
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||||
|
|
||||||
|
CoreDbProfData* = AristoDbProfData
|
||||||
|
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||||
|
|
||||||
CoreDbRc*[T] = Result[T,CoreDbErrorRef]
|
CoreDbRc*[T] = Result[T,CoreDbErrorRef]
|
||||||
|
|
||||||
CoreDbAccount* = object
|
CoreDbAccount* = object
|
||||||
@ -261,11 +268,13 @@ type
|
|||||||
# --------------------------------------------------
|
# --------------------------------------------------
|
||||||
CoreDbRef* = ref object of RootRef
|
CoreDbRef* = ref object of RootRef
|
||||||
## Database descriptor
|
## Database descriptor
|
||||||
dbType*: CoreDbType ## Type of database backend
|
dbType*: CoreDbType ## Type of database backend
|
||||||
trackLegaApi*: bool ## Debugging, support
|
trackLegaApi*: bool ## Debugging, support
|
||||||
trackNewApi*: bool ## Debugging, support
|
trackNewApi*: bool ## Debugging, support
|
||||||
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
|
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
|
||||||
localDbOnly*: bool ## Debugging, suggestion to ignore async fetch
|
localDbOnly*: bool ## Debugging, suggestion to ignore async fetch
|
||||||
|
profTab*: CoreDbProfListRef ## Profiling data (if any)
|
||||||
|
ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger
|
||||||
methods*: CoreDbBaseFns
|
methods*: CoreDbBaseFns
|
||||||
|
|
||||||
CoreDbErrorRef* = ref object of RootRef
|
CoreDbErrorRef* = ref object of RootRef
|
||||||
|
@ -32,6 +32,7 @@ export
|
|||||||
|
|
||||||
# see `aristo_db`
|
# see `aristo_db`
|
||||||
toAristo,
|
toAristo,
|
||||||
|
toAristoProfData,
|
||||||
|
|
||||||
# see `legacy_db`
|
# see `legacy_db`
|
||||||
isLegacy,
|
isLegacy,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2021 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -13,10 +13,17 @@
|
|||||||
##
|
##
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import kvt/[
|
import
|
||||||
kvt_constants, kvt_init, kvt_tx, kvt_utils, kvt_walk]
|
kvt/[kvt_api, kvt_constants]
|
||||||
export
|
export
|
||||||
kvt_constants, kvt_init, kvt_tx, kvt_utils, kvt_walk
|
kvt_api, kvt_constants
|
||||||
|
|
||||||
|
import
|
||||||
|
kvt/kvt_init
|
||||||
|
export
|
||||||
|
MemBackendRef,
|
||||||
|
VoidBackendRef,
|
||||||
|
init
|
||||||
|
|
||||||
import
|
import
|
||||||
kvt/kvt_desc
|
kvt/kvt_desc
|
||||||
|
258
nimbus/db/kvt/kvt_api.nim
Normal file
258
nimbus/db/kvt/kvt_api.nim
Normal file
@ -0,0 +1,258 @@
|
|||||||
|
# nimbus-eth1
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
|
# except according to those terms.
|
||||||
|
|
||||||
|
## Stackable API for `Kvt`
|
||||||
|
## =======================
|
||||||
|
|
||||||
|
import
|
||||||
|
std/times,
|
||||||
|
eth/common,
|
||||||
|
results,
|
||||||
|
../aristo/aristo_profile,
|
||||||
|
"."/[kvt_desc, kvt_desc/desc_backend, kvt_init, kvt_tx, kvt_utils]
|
||||||
|
|
||||||
|
# Annotation helper(s)
|
||||||
|
{.pragma: noRaise, gcsafe, raises: [].}
|
||||||
|
|
||||||
|
type
|
||||||
|
KvtDbProfListRef* = AristoDbProfListRef
|
||||||
|
## Borrowed from `aristo_profile`
|
||||||
|
|
||||||
|
KvtDbProfData* = AristoDbProfData
|
||||||
|
## Borrowed from `aristo_profile`
|
||||||
|
|
||||||
|
KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
||||||
|
KvtApiDelFn* = proc(db: KvtDbRef,
|
||||||
|
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||||
|
KvtApiFinishFn* = proc(db: KvtDbRef, flush = false) {.noRaise.}
|
||||||
|
KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
||||||
|
KvtApiForkFn* = proc(db: KvtDbRef): Result[KvtDbRef,KvtError] {.noRaise.}
|
||||||
|
KvtApiForkTopFn* = proc(db: KvtDbRef): Result[KvtDbRef,KvtError] {.noRaise.}
|
||||||
|
KvtApiGetFn* = proc(db: KvtDbRef,
|
||||||
|
key: openArray[byte]): Result[Blob,KvtError] {.noRaise.}
|
||||||
|
KvtApiHasKeyFn* = proc(db: KvtDbRef,
|
||||||
|
key: openArray[byte]): Result[bool,KvtError] {.noRaise.}
|
||||||
|
KvtApiIsTopFn* = proc(tx: KvtTxRef): bool {.noRaise.}
|
||||||
|
KvtApiLevelFn* = proc(db: KvtDbRef): int {.noRaise.}
|
||||||
|
KvtApiNForkedFn* = proc(db: KvtDbRef): int {.noRaise.}
|
||||||
|
KvtApiPutFn* = proc(db: KvtDbRef,
|
||||||
|
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||||
|
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
||||||
|
KvtApiStowFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
||||||
|
KvtApiTxBeginFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.}
|
||||||
|
KvtApiTxTopFn* =
|
||||||
|
proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.}
|
||||||
|
|
||||||
|
KvtApiRef* = ref KvtApiObj
|
||||||
|
KvtApiObj* = object of RootObj
|
||||||
|
## Useful set of `Kvt` fuctions that can be filtered, stacked etc. Note
|
||||||
|
## that this API is modelled after a subset of the `Aristo` API.
|
||||||
|
commit*: KvtApiCommitFn
|
||||||
|
del*: KvtApiDelFn
|
||||||
|
finish*: KvtApiFinishFn
|
||||||
|
forget*: KvtApiForgetFn
|
||||||
|
fork*: KvtApiForkFn
|
||||||
|
forkTop*: KvtApiForkTopFn
|
||||||
|
get*: KvtApiGetFn
|
||||||
|
hasKey*: KvtApiHasKeyFn
|
||||||
|
isTop*: KvtApiIsTopFn
|
||||||
|
level*: KvtApiLevelFn
|
||||||
|
nForked*: KvtApiNForkedFn
|
||||||
|
put*: KvtApiPutFn
|
||||||
|
rollback*: KvtApiRollbackFn
|
||||||
|
stow*: KvtApiStowFn
|
||||||
|
txBegin*: KvtApiTxBeginFn
|
||||||
|
txTop*: KvtApiTxTopFn
|
||||||
|
|
||||||
|
|
||||||
|
KvtApiProfNames* = enum
|
||||||
|
## index/name mapping for profile slots
|
||||||
|
KvtApiProfTotal = "total"
|
||||||
|
|
||||||
|
KvtApiProfCommitFn = "commit"
|
||||||
|
KvtApiProfDelFn = "del"
|
||||||
|
KvtApiProfFinishFn = "finish"
|
||||||
|
KvtApiProfForgetFn = "forget"
|
||||||
|
KvtApiProfForkFn = "fork"
|
||||||
|
KvtApiProfForkTopFn = "forkTop"
|
||||||
|
KvtApiProfGetFn = "get"
|
||||||
|
KvtApiProfHasKeyFn = "hasKey"
|
||||||
|
KvtApiProfIsTopFn = "isTop"
|
||||||
|
KvtApiProfLevelFn = "level"
|
||||||
|
KvtApiProfNForkedFn = "nForked"
|
||||||
|
KvtApiProfPutFn = "put"
|
||||||
|
KvtApiProfRollbackFn = "rollback"
|
||||||
|
KvtApiProfStowFn = "stow"
|
||||||
|
KvtApiProfTxBeginFn = "txBegin"
|
||||||
|
KvtApiProfTxTopFn = "txTop"
|
||||||
|
|
||||||
|
KvtApiProfBeGetKvpFn = "be/getKvp"
|
||||||
|
KvtApiProfBePutEndFn = "be/putEnd"
|
||||||
|
|
||||||
|
KvtApiProfRef* = ref object of KvtApiRef
|
||||||
|
## Profiling API extension of `KvtApiObj`
|
||||||
|
data*: KvtDbProfListRef
|
||||||
|
be*: BackendRef
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public API constuctors
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func init*(api: var KvtApiObj) =
|
||||||
|
api.commit = commit
|
||||||
|
api.del = del
|
||||||
|
api.finish = finish
|
||||||
|
api.forget = forget
|
||||||
|
api.fork = fork
|
||||||
|
api.forkTop = forkTop
|
||||||
|
api.get = get
|
||||||
|
api.hasKey = hasKey
|
||||||
|
api.isTop = isTop
|
||||||
|
api.level = level
|
||||||
|
api.nForked = nForked
|
||||||
|
api.put = put
|
||||||
|
api.rollback = rollback
|
||||||
|
api.stow = stow
|
||||||
|
api.txBegin = txBegin
|
||||||
|
api.txTop = txTop
|
||||||
|
|
||||||
|
func init*(T: type KvtApiRef): T =
|
||||||
|
result = new T
|
||||||
|
result[].init()
|
||||||
|
|
||||||
|
func dup*(api: KvtApiRef): KvtApiRef =
|
||||||
|
new result
|
||||||
|
result[] = api[]
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public profile API constuctor
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func init*(
|
||||||
|
T: type KvtApiProfRef;
|
||||||
|
api: KvtApiRef;
|
||||||
|
be = BackendRef(nil);
|
||||||
|
): T =
|
||||||
|
## This constructor creates a profiling API descriptor to be derived from
|
||||||
|
## an initialised `api` argument descriptor. For profiling the DB backend,
|
||||||
|
## the field `.be` of the result descriptor must be assigned to the
|
||||||
|
## `.backend` field of the `KvtDbRef` descriptor.
|
||||||
|
##
|
||||||
|
## The argument desctiptors `api` and `be` will not be modified and can be
|
||||||
|
## used to restore the previous set up.
|
||||||
|
##
|
||||||
|
let
|
||||||
|
data = KvtDbProfListRef(
|
||||||
|
list: newSeq[KvtDbProfData](1 + high(KvtApiProfNames).ord))
|
||||||
|
profApi = T(data: data)
|
||||||
|
|
||||||
|
template profileRunner(n: KvtApiProfNames, code: untyped): untyped =
|
||||||
|
let start = getTime()
|
||||||
|
code
|
||||||
|
data.update(n.ord, getTime() - start)
|
||||||
|
|
||||||
|
profApi.commit =
|
||||||
|
proc(a: KvtTxRef): auto =
|
||||||
|
KvtApiProfCommitFn.profileRunner:
|
||||||
|
result = api.commit(a)
|
||||||
|
|
||||||
|
profApi.del =
|
||||||
|
proc(a: KvtDbRef; b: openArray[byte]): auto =
|
||||||
|
KvtApiProfDelFn.profileRunner:
|
||||||
|
result = api.del(a, b)
|
||||||
|
|
||||||
|
profApi.finish =
|
||||||
|
proc(a: KvtDbRef; b = false) =
|
||||||
|
KvtApiProfFinishFn.profileRunner:
|
||||||
|
api.finish(a, b)
|
||||||
|
|
||||||
|
profApi.forget =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfForgetFn.profileRunner:
|
||||||
|
result = api.forget(a)
|
||||||
|
|
||||||
|
profApi.fork =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfForkFn.profileRunner:
|
||||||
|
result = api.fork(a)
|
||||||
|
|
||||||
|
profApi.forkTop =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfForkTopFn.profileRunner:
|
||||||
|
result = api.forkTop(a)
|
||||||
|
|
||||||
|
profApi.get =
|
||||||
|
proc(a: KvtDbRef, b: openArray[byte]): auto =
|
||||||
|
KvtApiProfGetFn.profileRunner:
|
||||||
|
result = api.get(a, b)
|
||||||
|
|
||||||
|
profApi.hasKey =
|
||||||
|
proc(a: KvtDbRef, b: openArray[byte]): auto =
|
||||||
|
KvtApiProfHasKeyFn.profileRunner:
|
||||||
|
result = api.hasKey(a, b)
|
||||||
|
|
||||||
|
profApi.isTop =
|
||||||
|
proc(a: KvtTxRef): auto =
|
||||||
|
KvtApiProfIsTopFn.profileRunner:
|
||||||
|
result = api.isTop(a)
|
||||||
|
|
||||||
|
profApi.level =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfLevelFn.profileRunner:
|
||||||
|
result = api.level(a)
|
||||||
|
|
||||||
|
profApi.nForked =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfNForkedFn.profileRunner:
|
||||||
|
result = api.nForked(a)
|
||||||
|
|
||||||
|
profApi.put =
|
||||||
|
proc(a: KvtDbRef; b, c: openArray[byte]): auto =
|
||||||
|
KvtApiProfPutFn.profileRunner:
|
||||||
|
result = api.put(a, b, c)
|
||||||
|
|
||||||
|
profApi.rollback =
|
||||||
|
proc(a: KvtTxRef): auto =
|
||||||
|
KvtApiProfRollbackFn.profileRunner:
|
||||||
|
result = api.rollback(a)
|
||||||
|
|
||||||
|
profApi.stow =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfStowFn.profileRunner:
|
||||||
|
result = api.stow(a)
|
||||||
|
|
||||||
|
profApi.txBegin =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfTxBeginFn.profileRunner:
|
||||||
|
result = api.txBegin(a)
|
||||||
|
|
||||||
|
profApi.txTop =
|
||||||
|
proc(a: KvtDbRef): auto =
|
||||||
|
KvtApiProfTxTopFn.profileRunner:
|
||||||
|
result = api.txTop(a)
|
||||||
|
|
||||||
|
if not be.isNil:
|
||||||
|
profApi.be = be.dup
|
||||||
|
|
||||||
|
profApi.be.getKvpFn =
|
||||||
|
proc(a: openArray[byte]): auto =
|
||||||
|
KvtApiProfBeGetKvpFn.profileRunner:
|
||||||
|
result = be.getKvpFn(a)
|
||||||
|
|
||||||
|
profApi.be.putEndFn =
|
||||||
|
proc(a: PutHdlRef): auto =
|
||||||
|
KvtApiProfBePutEndFn.profileRunner:
|
||||||
|
result = be.putEndFn(a)
|
||||||
|
|
||||||
|
profApi
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
@ -1,5 +1,5 @@
|
|||||||
# nimbus-eth1
|
# nimbus-eth1
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
@ -66,6 +66,15 @@ type
|
|||||||
|
|
||||||
closeFn*: CloseFn ## Generic destructor
|
closeFn*: CloseFn ## Generic destructor
|
||||||
|
|
||||||
|
func dup*(be: BackendRef): BackendRef =
|
||||||
|
if not be.isNil:
|
||||||
|
result = BackendRef(
|
||||||
|
getKvpFn: be.getKvpFn,
|
||||||
|
putBegFn: be.putBegFn,
|
||||||
|
putKvpFn: be.putKvpFn,
|
||||||
|
putEndFn: be.putEndFn,
|
||||||
|
closeFn: be.closeFn)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -36,28 +36,22 @@ type
|
|||||||
ReadOnlyStateDB* = distinct LedgerRef
|
ReadOnlyStateDB* = distinct LedgerRef
|
||||||
|
|
||||||
export
|
export
|
||||||
|
LedgerFnInx,
|
||||||
|
LedgerProfListRef,
|
||||||
LedgerType,
|
LedgerType,
|
||||||
LedgerRef,
|
LedgerRef,
|
||||||
LedgerSpRef,
|
LedgerSpRef
|
||||||
|
|
||||||
# Profiling support
|
|
||||||
byElapsed,
|
|
||||||
byMean,
|
|
||||||
byVisits,
|
|
||||||
stats
|
|
||||||
|
|
||||||
const
|
const
|
||||||
LedgerEnableApiTracking* = EnableApiTracking
|
LedgerEnableApiTracking* = EnableApiTracking
|
||||||
LedgerEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
|
LedgerEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
|
||||||
LedgerApiTxt* = apiTxt
|
LedgerApiTxt* = apiTxt
|
||||||
|
|
||||||
when EnableApiTracking and EnableApiProfiling:
|
|
||||||
var ledgerProfTab*: LedgerProfFnInx
|
|
||||||
|
|
||||||
|
|
||||||
when AutoValidateDescriptors:
|
when AutoValidateDescriptors:
|
||||||
import ./base/validate
|
import ./base/validate
|
||||||
|
|
||||||
|
proc ldgProfData*(db: CoreDbRef): LedgerProfListRef {.gcsafe.}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Logging/tracking helpers (some public)
|
# Logging/tracking helpers (some public)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -81,14 +75,11 @@ when EnableApiTracking:
|
|||||||
# Publicly available for API logging
|
# Publicly available for API logging
|
||||||
template beginTrackApi*(ldg: LedgerRef; s: LedgerFnInx) =
|
template beginTrackApi*(ldg: LedgerRef; s: LedgerFnInx) =
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
ldg.beginApi
|
ldg.beginApi(s)
|
||||||
let ctx {.inject.} = s
|
|
||||||
|
|
||||||
template ifTrackApi*(ldg: LedgerRef; code: untyped) =
|
template ifTrackApi*(ldg: LedgerRef; code: untyped) =
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
ldg.endApiIf:
|
ldg.endApiIf:
|
||||||
when EnableApiProfiling:
|
|
||||||
ledgerProfTab.update(ctx, elapsed)
|
|
||||||
code
|
code
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -101,6 +92,8 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
|
|||||||
ldg.validate()
|
ldg.validate()
|
||||||
when EnableApiTracking:
|
when EnableApiTracking:
|
||||||
ldg.trackApi = db.trackLedgerApi
|
ldg.trackApi = db.trackLedgerApi
|
||||||
|
when LedgerEnableApiProfiling:
|
||||||
|
ldg.profTab = db.ldgProfData()
|
||||||
ldg.ifTrackApi: debug apiTxt, ctx, elapsed, ldgType=ldg.ldgType
|
ldg.ifTrackApi: debug apiTxt, ctx, elapsed, ldgType=ldg.ldgType
|
||||||
ldg
|
ldg
|
||||||
|
|
||||||
@ -108,6 +101,19 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
|
|||||||
# Public methods
|
# Public methods
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc ldgProfData*(db: CoreDbRef): LedgerProfListRef =
|
||||||
|
## Return profiling data table (only available in profiling mode). If
|
||||||
|
## available (i.e. non-nil), result data can be organised by the functions
|
||||||
|
## available with `aristo_profile`.
|
||||||
|
##
|
||||||
|
## Note that profiling these data have accumulated over several ledger
|
||||||
|
## sessions running on the same `CoreDb` instance.
|
||||||
|
##
|
||||||
|
when LedgerEnableApiProfiling:
|
||||||
|
if db.ledgerHook.isNil:
|
||||||
|
db.ledgerHook = LedgerProfListRef.init()
|
||||||
|
cast[LedgerProfListRef](db.ledgerHook)
|
||||||
|
|
||||||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress) =
|
proc accessList*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||||
ldg.beginTrackApi LdgAccessListFn
|
ldg.beginTrackApi LdgAccessListFn
|
||||||
ldg.methods.accessListFn eAddr
|
ldg.methods.accessListFn eAddr
|
||||||
|
@ -11,9 +11,10 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, math, sequtils, strformat, strutils, tables, times],
|
std/[strutils, times],
|
||||||
eth/common,
|
eth/common,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
|
../../aristo/aristo_profile,
|
||||||
../../core_db,
|
../../core_db,
|
||||||
"."/base_desc
|
"."/base_desc
|
||||||
|
|
||||||
@ -76,77 +77,13 @@ type
|
|||||||
LdgPairsIt = "pairs"
|
LdgPairsIt = "pairs"
|
||||||
LdgStorageIt = "storage"
|
LdgStorageIt = "storage"
|
||||||
|
|
||||||
LedgerProfFnInx* = array[LedgerFnInx,(float,float,int)]
|
|
||||||
LedgerProfEla* = seq[(Duration,seq[LedgerFnInx])]
|
|
||||||
LedgerProfMean* = seq[(Duration,seq[LedgerFnInx])]
|
|
||||||
LedgerProfCount* = seq[(int,seq[LedgerFnInx])]
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc toDuration(fl: float): Duration =
|
|
||||||
## Convert the nanoseconds argument `ns` to a `Duration`.
|
|
||||||
let (s, ns) = fl.splitDecimal
|
|
||||||
initDuration(seconds = s.int, nanoseconds = (ns * 1_000_000_000).int)
|
|
||||||
|
|
||||||
func toFloat(ela: Duration): float =
|
|
||||||
## Convert the argument `ela` to a floating point seconds result.
|
|
||||||
let
|
|
||||||
elaS = ela.inSeconds
|
|
||||||
elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds
|
|
||||||
elaS.float + elaNs.float / 1_000_000_000
|
|
||||||
|
|
||||||
proc updateTotal(t: var LedgerProfFnInx; fnInx: LedgerFnInx) =
|
|
||||||
## Summary update helper
|
|
||||||
if fnInx == SummaryItem:
|
|
||||||
t[SummaryItem] = (0.0, 0.0, 0)
|
|
||||||
else:
|
|
||||||
t[SummaryItem][0] += t[fnInx][0]
|
|
||||||
t[SummaryItem][1] += t[fnInx][1]
|
|
||||||
t[SummaryItem][2] += t[fnInx][2]
|
|
||||||
|
|
||||||
# -----------------
|
|
||||||
|
|
||||||
func oaToStr(w: openArray[byte]): string =
|
func oaToStr(w: openArray[byte]): string =
|
||||||
w.toHex.toLowerAscii
|
w.toHex.toLowerAscii
|
||||||
|
|
||||||
func ppUs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inMicroseconds
|
|
||||||
let ns = elapsed.inNanoseconds mod 1_000 # fraction of a micro second
|
|
||||||
if ns != 0:
|
|
||||||
# to rounded deca milli seconds
|
|
||||||
let du = (ns + 5i64) div 10i64
|
|
||||||
result &= &".{du:02}"
|
|
||||||
result &= "us"
|
|
||||||
|
|
||||||
func ppMs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inMilliseconds
|
|
||||||
let ns = elapsed.inNanoseconds mod 1_000_000 # fraction of a milli second
|
|
||||||
if ns != 0:
|
|
||||||
# to rounded deca milli seconds
|
|
||||||
let dm = (ns + 5_000i64) div 10_000i64
|
|
||||||
result &= &".{dm:02}"
|
|
||||||
result &= "ms"
|
|
||||||
|
|
||||||
func ppSecs(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inSeconds
|
|
||||||
let ns = elapsed.inNanoseconds mod 1_000_000_000 # fraction of a second
|
|
||||||
if ns != 0:
|
|
||||||
# round up
|
|
||||||
let ds = (ns + 5_000_000i64) div 10_000_000i64
|
|
||||||
result &= &".{ds:02}"
|
|
||||||
result &= "s"
|
|
||||||
|
|
||||||
func ppMins(elapsed: Duration): string {.gcsafe, raises: [ValueError].} =
|
|
||||||
result = $elapsed.inMinutes
|
|
||||||
let ns = elapsed.inNanoseconds mod 60_000_000_000 # fraction of a minute
|
|
||||||
if ns != 0:
|
|
||||||
# round up
|
|
||||||
let dm = (ns + 500_000_000i64) div 1_000_000_000i64
|
|
||||||
result &= &":{dm:02}"
|
|
||||||
result &= "m"
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public API logging helpers
|
# Public API logging helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -167,125 +104,32 @@ func toStr*(w: Blob): string =
|
|||||||
func toStr*(w: seq[Log]): string =
|
func toStr*(w: seq[Log]): string =
|
||||||
"Logs[" & $w.len & "]"
|
"Logs[" & $w.len & "]"
|
||||||
|
|
||||||
func toStr*(elapsed: Duration): string =
|
func toStr*(ela: Duration): string =
|
||||||
try:
|
aristo_profile.toStr(ela)
|
||||||
if 0 < times.inMinutes(elapsed):
|
|
||||||
result = elapsed.ppMins
|
|
||||||
elif 0 < times.inSeconds(elapsed):
|
|
||||||
result = elapsed.ppSecs
|
|
||||||
elif 0 < times.inMilliSeconds(elapsed):
|
|
||||||
result = elapsed.ppMs
|
|
||||||
elif 0 < times.inMicroSeconds(elapsed):
|
|
||||||
result = elapsed.ppUs
|
|
||||||
else:
|
|
||||||
result = $elapsed.inNanoSeconds & "ns"
|
|
||||||
except ValueError:
|
|
||||||
result = $elapsed
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public API logging framework
|
# Public API logging framework
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
template beginApi*(ldg: LedgerRef) =
|
template beginApi*(ldg: LedgerRef; s: static[LedgerFnInx]) =
|
||||||
let baStart {.inject.} = getTime()
|
const ctx {.inject,used.} = s # Generally available
|
||||||
|
let baStart {.inject.} = getTime() # Local use only
|
||||||
|
|
||||||
template endApiIf*(ldg: LedgerRef; code: untyped) =
|
template endApiIf*(ldg: LedgerRef; code: untyped) =
|
||||||
if ldg.trackApi:
|
when CoreDbEnableApiProfiling:
|
||||||
let elapsed {.inject,used.} = getTime() - baStart
|
let elapsed {.inject,used.} = getTime() - baStart
|
||||||
|
aristo_profile.update(ldg.profTab, ctx.ord, elapsed)
|
||||||
|
if ldg.trackApi:
|
||||||
|
when not CoreDbEnableApiProfiling: # otherwise use variable above
|
||||||
|
let elapsed {.inject,used.} = getTime() - baStart
|
||||||
code
|
code
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc update*(t: var LedgerProfFnInx; fn: LedgerFnInx; ela: Duration) =
|
func init*(T: type LedgerProfListRef): T =
|
||||||
## Register time `ela` spent while executing function `fn`
|
T(list: newSeq[LedgerProfData](1 + high(LedgerFnInx).ord))
|
||||||
let s = ela.toFloat
|
|
||||||
t[fn][0] += s
|
|
||||||
t[fn][1] += s * s
|
|
||||||
t[fn][2].inc
|
|
||||||
|
|
||||||
|
|
||||||
proc byElapsed*(t: var LedgerProfFnInx): LedgerProfEla =
|
|
||||||
## Collate `Ledger` function symbols by elapsed times, sorted with largest
|
|
||||||
## `Duration` first. Zero `Duration` entries are discarded.
|
|
||||||
var u: Table[Duration,seq[LedgerFnInx]]
|
|
||||||
for fn in LedgerFnInx:
|
|
||||||
t.updateTotal fn
|
|
||||||
let (secs,_,count) = t[fn]
|
|
||||||
if 0 < count:
|
|
||||||
let ela = secs.toDuration
|
|
||||||
u.withValue(ela,val):
|
|
||||||
val[].add fn
|
|
||||||
do:
|
|
||||||
u[ela] = @[fn]
|
|
||||||
result.add (t[SummaryItem][0].toDuration, @[SummaryItem])
|
|
||||||
for ela in u.keys.toSeq.sorted Descending:
|
|
||||||
u.withValue(ela,val):
|
|
||||||
result.add (ela, val[])
|
|
||||||
|
|
||||||
|
|
||||||
proc byMean*(t: var LedgerProfFnInx): LedgerProfMean =
|
|
||||||
## Collate `Ledger` function symbols by elapsed mean times, sorted with
|
|
||||||
## largest `Duration` first. Zero `Duration` entries are discarded.
|
|
||||||
var u: Table[Duration,seq[LedgerFnInx]]
|
|
||||||
for fn in LedgerFnInx:
|
|
||||||
t.updateTotal fn
|
|
||||||
let (secs,_,count) = t[fn]
|
|
||||||
if 0 < count:
|
|
||||||
let ela = (secs / count.float).toDuration
|
|
||||||
u.withValue(ela,val):
|
|
||||||
val[].add fn
|
|
||||||
do:
|
|
||||||
u[ela] = @[fn]
|
|
||||||
result.add (
|
|
||||||
(t[SummaryItem][0] / t[SummaryItem][2].float).toDuration, @[SummaryItem])
|
|
||||||
for mean in u.keys.toSeq.sorted Descending:
|
|
||||||
u.withValue(mean,val):
|
|
||||||
result.add (mean, val[])
|
|
||||||
|
|
||||||
|
|
||||||
proc byVisits*(t: var LedgerProfFnInx): LedgerProfCount =
|
|
||||||
## Collate `Ledger` function symbols by number of visits, sorted with
|
|
||||||
## largest number first.
|
|
||||||
var u: Table[int,seq[LedgerFnInx]]
|
|
||||||
for fn in LedgerFnInx:
|
|
||||||
t.updateTotal fn
|
|
||||||
let (_,_,count) = t[fn]
|
|
||||||
if 0 < count:
|
|
||||||
u.withValue(count,val):
|
|
||||||
val[].add fn
|
|
||||||
do:
|
|
||||||
u[count] = @[fn]
|
|
||||||
result.add (t[SummaryItem][2], @[SummaryItem])
|
|
||||||
for count in u.keys.toSeq.sorted Descending:
|
|
||||||
u.withValue(count,val):
|
|
||||||
result.add (count, val[])
|
|
||||||
|
|
||||||
|
|
||||||
proc stats*(
|
|
||||||
t: LedgerProfFnInx;
|
|
||||||
fnInx: LedgerFnInx;
|
|
||||||
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
|
|
||||||
## Print mean and strandard deviation of timing
|
|
||||||
let data = t[fnInx]
|
|
||||||
result.n = data[2]
|
|
||||||
if 0 < result.n:
|
|
||||||
let
|
|
||||||
mean = data[0] / result.n.float
|
|
||||||
sqMean = data[1] / result.n.float
|
|
||||||
meanSq = mean * mean
|
|
||||||
|
|
||||||
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
|
|
||||||
# if `meanSq` and `sqMean` are approximately the same.
|
|
||||||
sigma = sqMean - min(meanSq,sqMean)
|
|
||||||
stdDev = sigma.sqrt
|
|
||||||
|
|
||||||
result.mean = mean.toDuration
|
|
||||||
result.stdDev = stdDev.sqrt.toDuration
|
|
||||||
|
|
||||||
if 0 < mean:
|
|
||||||
result.devRatio = stdDev / mean
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -13,12 +13,19 @@
|
|||||||
import
|
import
|
||||||
eth/common,
|
eth/common,
|
||||||
../../core_db,
|
../../core_db,
|
||||||
../../../../stateless/multi_keys
|
../../../../stateless/multi_keys,
|
||||||
|
../../aristo/aristo_profile
|
||||||
|
|
||||||
# Annotation helpers
|
# Annotation helpers
|
||||||
{.pragma: noRaise, gcsafe, raises: [].}
|
{.pragma: noRaise, gcsafe, raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
|
LedgerProfListRef* = AristoDbProfListRef
|
||||||
|
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||||
|
|
||||||
|
LedgerProfData* = AristoDbProfData
|
||||||
|
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||||
|
|
||||||
LedgerType* = enum
|
LedgerType* = enum
|
||||||
Ooops = 0
|
Ooops = 0
|
||||||
LegacyAccountsCache,
|
LegacyAccountsCache,
|
||||||
@ -29,9 +36,10 @@ type
|
|||||||
|
|
||||||
LedgerRef* = ref object of RootRef
|
LedgerRef* = ref object of RootRef
|
||||||
## Root object with closures
|
## Root object with closures
|
||||||
ldgType*: LedgerType ## For debugging
|
ldgType*: LedgerType ## For debugging
|
||||||
trackApi*: bool ## For debugging
|
trackApi*: bool ## For debugging
|
||||||
extras*: LedgerExtras ## Support might go away
|
profTab*: LedgerProfListRef ## Profiling data (if any)
|
||||||
|
extras*: LedgerExtras ## Support might go away
|
||||||
methods*: LedgerFns
|
methods*: LedgerFns
|
||||||
|
|
||||||
RawRootHashFn* = proc(): Hash256 {.noRaise.}
|
RawRootHashFn* = proc(): Hash256 {.noRaise.}
|
||||||
|
@ -17,18 +17,18 @@ import
|
|||||||
unittest2,
|
unittest2,
|
||||||
stew/endians2,
|
stew/endians2,
|
||||||
../../nimbus/sync/protocol,
|
../../nimbus/sync/protocol,
|
||||||
../../nimbus/db/aristo,
|
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
|
aristo_blobify,
|
||||||
aristo_debug,
|
aristo_debug,
|
||||||
aristo_desc,
|
aristo_desc,
|
||||||
aristo_desc/desc_backend,
|
aristo_desc/desc_backend,
|
||||||
aristo_get,
|
aristo_get,
|
||||||
aristo_hashify,
|
|
||||||
aristo_init/memory_db,
|
aristo_init/memory_db,
|
||||||
aristo_init/rocks_db,
|
aristo_init/rocks_db,
|
||||||
aristo_layers,
|
aristo_layers,
|
||||||
|
aristo_merge,
|
||||||
aristo_persistent,
|
aristo_persistent,
|
||||||
aristo_blobify,
|
aristo_tx,
|
||||||
aristo_vid],
|
aristo_vid],
|
||||||
../replay/xcheck,
|
../replay/xcheck,
|
||||||
./test_helpers
|
./test_helpers
|
||||||
@ -40,13 +40,6 @@ const
|
|||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
when not declared(aristo_hashify.noisy):
|
|
||||||
proc hashify(
|
|
||||||
db: AristoDbRef;
|
|
||||||
noisy: bool;
|
|
||||||
): Result[void,(VertexID,AristoError)] =
|
|
||||||
aristo_hashify.hashify(db)
|
|
||||||
|
|
||||||
func hash(filter: FilterRef): Hash =
|
func hash(filter: FilterRef): Hash =
|
||||||
## Unique hash/filter -- cannot use de/blobify as the expressions
|
## Unique hash/filter -- cannot use de/blobify as the expressions
|
||||||
## `filter.blobify` and `filter.blobify.value.deblobify.value.blobify` are
|
## `filter.blobify` and `filter.blobify.value.deblobify.value.blobify` are
|
||||||
|
@ -17,11 +17,19 @@ import
|
|||||||
results,
|
results,
|
||||||
unittest2,
|
unittest2,
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
|
aristo_blobify,
|
||||||
aristo_layers, aristo_merge, aristo_persistent, aristo_blobify],
|
aristo_check,
|
||||||
../../nimbus/db/aristo,
|
aristo_debug,
|
||||||
../../nimbus/db/aristo/aristo_desc/desc_backend,
|
aristo_desc,
|
||||||
../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler],
|
aristo_desc/desc_backend,
|
||||||
|
aristo_filter,
|
||||||
|
aristo_filter/filter_fifos,
|
||||||
|
aristo_filter/filter_scheduler,
|
||||||
|
aristo_get,
|
||||||
|
aristo_layers,
|
||||||
|
aristo_merge,
|
||||||
|
aristo_persistent,
|
||||||
|
aristo_tx],
|
||||||
../replay/xcheck,
|
../replay/xcheck,
|
||||||
./test_helpers
|
./test_helpers
|
||||||
|
|
||||||
@ -765,7 +773,7 @@ proc testFilterBacklog*(
|
|||||||
if sampleSize < n:
|
if sampleSize < n:
|
||||||
break
|
break
|
||||||
block:
|
block:
|
||||||
let rc = db.merge w
|
let rc = db.mergeLeaf w
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
block:
|
block:
|
||||||
let rc = db.stow(persistent=true)
|
let rc = db.stow(persistent=true)
|
||||||
|
@ -13,7 +13,8 @@ import
|
|||||||
eth/common,
|
eth/common,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
aristo_debug, aristo_desc, aristo_filter/filter_scheduler, aristo_merge],
|
aristo_debug, aristo_desc, aristo_delete, aristo_filter/filter_scheduler,
|
||||||
|
aristo_hashify, aristo_hike, aristo_merge],
|
||||||
../../nimbus/db/kvstore_rocksdb,
|
../../nimbus/db/kvstore_rocksdb,
|
||||||
../../nimbus/sync/protocol/snap/snap_types,
|
../../nimbus/sync/protocol/snap/snap_types,
|
||||||
../test_sync_snap/test_types,
|
../test_sync_snap/test_types,
|
||||||
@ -46,26 +47,6 @@ func to(a: NodeKey; T: type UInt256): T =
|
|||||||
func to(a: NodeKey; T: type PathID): T =
|
func to(a: NodeKey; T: type PathID): T =
|
||||||
a.to(UInt256).to(T)
|
a.to(UInt256).to(T)
|
||||||
|
|
||||||
when not declared(aristo_merge.noisy):
|
|
||||||
import ../../nimbus/db/aristo/aristo_hike
|
|
||||||
proc merge(
|
|
||||||
db: AristoDbRef;
|
|
||||||
root: VertexID;
|
|
||||||
path: openArray[byte];
|
|
||||||
data: openArray[byte];
|
|
||||||
accPath: PathID;
|
|
||||||
noisy: bool;
|
|
||||||
): Result[bool, AristoError] =
|
|
||||||
aristo_merge.merge(db, root, path, data, accPath)
|
|
||||||
proc merge(
|
|
||||||
db: AristoDbRef;
|
|
||||||
lty: LeafTie;
|
|
||||||
pyl: PayloadRef;
|
|
||||||
accPath: PathID;
|
|
||||||
noisy: bool;
|
|
||||||
): Result[Hike, AristoError] =
|
|
||||||
aristo_merge.merge(db, lty, pyl, accPath)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public pretty printing
|
# Public pretty printing
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -231,6 +212,77 @@ func mapRootVid*(
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hashify*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
noisy: bool;
|
||||||
|
): Result[void,(VertexID,AristoError)] =
|
||||||
|
when declared(aristo_hashify.noisy):
|
||||||
|
aristo_hashify.exec(aristo_hashify.hashify(db), noisy)
|
||||||
|
else:
|
||||||
|
aristo_hashify.hashify(db)
|
||||||
|
|
||||||
|
|
||||||
|
proc delete*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
accPath: PathID;
|
||||||
|
noisy: bool;
|
||||||
|
): Result[bool,(VertexID,AristoError)] =
|
||||||
|
when declared(aristo_delete.noisy):
|
||||||
|
aristo_delete.exec(aristo_delete.delete(db, root, path, accPath), noisy)
|
||||||
|
else:
|
||||||
|
aristo_delete.delete(db, root, path, accPath)
|
||||||
|
|
||||||
|
proc delete*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
lty: LeafTie;
|
||||||
|
accPath: PathID;
|
||||||
|
noisy: bool;
|
||||||
|
): Result[bool,(VertexID,AristoError)] =
|
||||||
|
when declared(aristo_delete.noisy):
|
||||||
|
aristo_delete.exec(aristo_delete.delete(db, lty, accPath), noisy)
|
||||||
|
else:
|
||||||
|
aristo_delete.delete(db, lty, accPath)
|
||||||
|
|
||||||
|
proc delTree*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
accPath: PathID;
|
||||||
|
noisy: bool;
|
||||||
|
): Result[void,(VertexID,AristoError)] =
|
||||||
|
when declared(aristo_delete.noisy):
|
||||||
|
aristo_delete.exec(aristo_delete.delTree(db, root, accPath), noisy)
|
||||||
|
else:
|
||||||
|
aristo_delete.delTree(db, root, accPath)
|
||||||
|
|
||||||
|
|
||||||
|
proc merge(
|
||||||
|
db: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
data: openArray[byte];
|
||||||
|
accPath: PathID;
|
||||||
|
noisy: bool;
|
||||||
|
): Result[bool, AristoError] =
|
||||||
|
when declared(aristo_merge.noisy):
|
||||||
|
aristo_merge.exec(aristo_merge.merge(db, root, path, data, accPath), noisy)
|
||||||
|
else:
|
||||||
|
aristo_merge.merge(db, root, path, data, accPath)
|
||||||
|
|
||||||
|
proc mergePayload*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
lty: LeafTie;
|
||||||
|
pyl: PayloadRef;
|
||||||
|
accPath: PathID;
|
||||||
|
noisy: bool;
|
||||||
|
): Result[Hike,AristoError] =
|
||||||
|
when declared(aristo_merge.noisy):
|
||||||
|
aristo_merge.exec(aristo_merge.mergePayload(db, lty, pyl, accPath), noisy)
|
||||||
|
else:
|
||||||
|
aristo_merge.mergePayload(db, lty, pyl, accPath)
|
||||||
|
|
||||||
|
|
||||||
proc mergeList*(
|
proc mergeList*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
||||||
@ -241,7 +293,7 @@ proc mergeList*(
|
|||||||
for n,w in leafs:
|
for n,w in leafs:
|
||||||
noisy.say "*** mergeList",
|
noisy.say "*** mergeList",
|
||||||
" n=", n, "/", leafs.len
|
" n=", n, "/", leafs.len
|
||||||
let rc = db.merge(w.leafTie, w.payload, VOID_PATH_ID, noisy=noisy)
|
let rc = db.mergePayload(w.leafTie, w.payload, VOID_PATH_ID, noisy=noisy)
|
||||||
noisy.say "*** mergeList",
|
noisy.say "*** mergeList",
|
||||||
" n=", n, "/", leafs.len,
|
" n=", n, "/", leafs.len,
|
||||||
" rc=", (if rc.isOk: "ok" else: $rc.error),
|
" rc=", (if rc.isOk: "ok" else: $rc.error),
|
||||||
|
@ -17,9 +17,17 @@ import
|
|||||||
unittest2,
|
unittest2,
|
||||||
stew/endians2,
|
stew/endians2,
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get,
|
aristo_check,
|
||||||
aristo_hike, aristo_layers, aristo_merge],
|
aristo_debug,
|
||||||
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
|
aristo_delete,
|
||||||
|
aristo_desc,
|
||||||
|
aristo_get,
|
||||||
|
aristo_hike,
|
||||||
|
aristo_init/persistent,
|
||||||
|
aristo_layers,
|
||||||
|
aristo_merge,
|
||||||
|
aristo_nearby,
|
||||||
|
aristo_tx],
|
||||||
../replay/xcheck,
|
../replay/xcheck,
|
||||||
./test_helpers
|
./test_helpers
|
||||||
|
|
||||||
@ -245,7 +253,7 @@ proc fwdWalkVerify(
|
|||||||
leftOver = leftOver
|
leftOver = leftOver
|
||||||
last = LeafTie()
|
last = LeafTie()
|
||||||
n = 0
|
n = 0
|
||||||
for (key,_) in db.right low(LeafTie,root):
|
for (key,_) in db.rightPairs low(LeafTie,root):
|
||||||
xCheck key in leftOver:
|
xCheck key in leftOver:
|
||||||
noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID
|
noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID
|
||||||
leftOver.excl key
|
leftOver.excl key
|
||||||
@ -277,7 +285,7 @@ proc revWalkVerify(
|
|||||||
leftOver = leftOver
|
leftOver = leftOver
|
||||||
last = LeafTie()
|
last = LeafTie()
|
||||||
n = 0
|
n = 0
|
||||||
for (key,_) in db.left high(LeafTie,root):
|
for (key,_) in db.leftPairs high(LeafTie,root):
|
||||||
xCheck key in leftOver:
|
xCheck key in leftOver:
|
||||||
noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID
|
noisy.say "*** revWalkVerify", " id=", n + (nLeafs + 1) * debugID
|
||||||
leftOver.excl key
|
leftOver.excl key
|
||||||
@ -302,7 +310,7 @@ proc mergeRlpData*(
|
|||||||
rlpData: openArray[byte]; # RLP encoded payload data
|
rlpData: openArray[byte]; # RLP encoded payload data
|
||||||
): Result[void,AristoError] =
|
): Result[void,AristoError] =
|
||||||
block body:
|
block body:
|
||||||
discard db.merge(
|
discard db.mergeLeaf(
|
||||||
LeafTiePayload(
|
LeafTiePayload(
|
||||||
leafTie: LeafTie(
|
leafTie: LeafTie(
|
||||||
root: VertexID(1),
|
root: VertexID(1),
|
||||||
@ -357,7 +365,7 @@ proc testTxMergeAndDeleteOneByOne*(
|
|||||||
# e.g. lst.setLen(min(5,lst.len))
|
# e.g. lst.setLen(min(5,lst.len))
|
||||||
lst
|
lst
|
||||||
for i,leaf in kvpLeafs:
|
for i,leaf in kvpLeafs:
|
||||||
let rc = db.merge leaf
|
let rc = db.mergeLeaf leaf
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
|
||||||
# List of all leaf entries that should be on the database
|
# List of all leaf entries that should be on the database
|
||||||
@ -462,7 +470,7 @@ proc testTxMergeAndDeleteSubTree*(
|
|||||||
# e.g. lst.setLen(min(5,lst.len))
|
# e.g. lst.setLen(min(5,lst.len))
|
||||||
lst
|
lst
|
||||||
for i,leaf in kvpLeafs:
|
for i,leaf in kvpLeafs:
|
||||||
let rc = db.merge leaf
|
let rc = db.mergeLeaf leaf
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
|
||||||
# List of all leaf entries that should be on the database
|
# List of all leaf entries that should be on the database
|
||||||
@ -485,7 +493,7 @@ proc testTxMergeAndDeleteSubTree*(
|
|||||||
""
|
""
|
||||||
# Delete sub-tree
|
# Delete sub-tree
|
||||||
block:
|
block:
|
||||||
let rc = db.delete(VertexID(1), VOID_PATH_ID)
|
let rc = db.delTree(VertexID(1), VOID_PATH_ID)
|
||||||
xCheckRc rc.error == (0,0):
|
xCheckRc rc.error == (0,0):
|
||||||
noisy.say "***", "del(2)",
|
noisy.say "***", "del(2)",
|
||||||
" n=", n, "/", list.len,
|
" n=", n, "/", list.len,
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
# distributed except according to those terms.
|
# distributed except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/strutils,
|
|
||||||
eth/common,
|
eth/common,
|
||||||
../../nimbus/db/core_db,
|
../../nimbus/db/core_db,
|
||||||
../../nimbus/common/chain_config
|
../../nimbus/common/chain_config
|
||||||
@ -26,111 +25,136 @@ type
|
|||||||
numBlocks*: int ## Number of blocks to load
|
numBlocks*: int ## Number of blocks to load
|
||||||
dbType*: CoreDbType ## Use `CoreDbType(0)` for default
|
dbType*: CoreDbType ## Use `CoreDbType(0)` for default
|
||||||
|
|
||||||
|
func cloneWith(
|
||||||
|
dsc: CaptureSpecs;
|
||||||
|
name = "";
|
||||||
|
network = NetworkId(0);
|
||||||
|
genesis = "";
|
||||||
|
files = seq[string].default;
|
||||||
|
numBlocks = 0;
|
||||||
|
dbType = CoreDbType(0);
|
||||||
|
): CaptureSpecs =
|
||||||
|
result = dsc
|
||||||
|
if network != NetworkId(0):
|
||||||
|
result.builtIn = true
|
||||||
|
result.network = network
|
||||||
|
elif 0 < genesis.len:
|
||||||
|
result.builtIn = false
|
||||||
|
result.genesis = genesis
|
||||||
|
if 0 < name.len:
|
||||||
|
if name[0] == '-':
|
||||||
|
result.name &= name
|
||||||
|
elif name[0] == '+' and 1 < name.len:
|
||||||
|
result.name &= name[1 .. ^1]
|
||||||
|
else:
|
||||||
|
result.name = name
|
||||||
|
if 0 < files.len:
|
||||||
|
result.files = files
|
||||||
|
if 0 < numBlocks:
|
||||||
|
result.numBlocks = numBlocks
|
||||||
|
if dbType != CoreDbType(0):
|
||||||
|
result.dbType = dbType
|
||||||
|
|
||||||
|
|
||||||
# Must not use `const` here, see `//github.com/nim-lang/Nim/issues/23295`
|
# Must not use `const` here, see `//github.com/nim-lang/Nim/issues/23295`
|
||||||
# Waiting for fix `//github.com/nim-lang/Nim/pull/23297` (or similar) to
|
# Waiting for fix `//github.com/nim-lang/Nim/pull/23297` (or similar) to
|
||||||
# appear on local `Nim` compiler version.
|
# appear on local `Nim` compiler version.
|
||||||
let
|
let
|
||||||
bulkTest0* = CaptureSpecs(
|
goerliSample = CaptureSpecs(
|
||||||
builtIn: true,
|
builtIn: true,
|
||||||
name: "goerli-some",
|
name: "goerli",
|
||||||
network: GoerliNet,
|
network: GoerliNet,
|
||||||
files: @["goerli68161.txt.gz"],
|
files: @["goerli68161.txt.gz"]) # lon local replay folder
|
||||||
numBlocks: 1_000)
|
|
||||||
|
|
||||||
bulkTest1* = CaptureSpecs(
|
goerliSampleEx = CaptureSpecs(
|
||||||
builtIn: true,
|
builtIn: true,
|
||||||
name: "goerli-more",
|
name: "goerli",
|
||||||
network: GoerliNet,
|
network: GoerliNet,
|
||||||
files: @["goerli68161.txt.gz"],
|
files: @[
|
||||||
numBlocks: high(int))
|
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
|
||||||
|
"goerli482305-504192.txt.gz"])
|
||||||
|
|
||||||
bulkTest2* = CaptureSpecs(
|
mainSampleEx = CaptureSpecs(
|
||||||
builtIn: true,
|
builtIn: true,
|
||||||
name: "goerli",
|
name: "main",
|
||||||
network: GoerliNet,
|
network: MainNet,
|
||||||
files: @[
|
files: @[
|
||||||
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
|
|
||||||
"goerli482305-504192.txt.gz"],
|
|
||||||
numBlocks: high(int))
|
|
||||||
|
|
||||||
bulkTest3* = CaptureSpecs(
|
|
||||||
builtIn: true,
|
|
||||||
name: "main",
|
|
||||||
network: MainNet,
|
|
||||||
files: @[
|
|
||||||
"mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay
|
"mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay
|
||||||
"mainnet332161-550848.txt.gz",
|
"mainnet332161-550848.txt.gz",
|
||||||
"mainnet550849-719232.txt.gz",
|
"mainnet550849-719232.txt.gz",
|
||||||
"mainnet719233-843841.txt.gz"],
|
"mainnet719233-843841.txt.gz"])
|
||||||
numBlocks: high(int))
|
|
||||||
|
|
||||||
|
# ------------------
|
||||||
|
|
||||||
|
bulkTest0* = goerliSample
|
||||||
|
.cloneWith(
|
||||||
|
name = "-some",
|
||||||
|
numBlocks = 1_000)
|
||||||
|
|
||||||
|
bulkTest1* = goerliSample
|
||||||
|
.cloneWith(
|
||||||
|
name = "-more",
|
||||||
|
numBlocks = high(int))
|
||||||
|
|
||||||
|
bulkTest2* = goerliSampleEx
|
||||||
|
.cloneWith(
|
||||||
|
numBlocks = high(int))
|
||||||
|
|
||||||
|
bulkTest3* = mainSampleEx
|
||||||
|
.cloneWith(
|
||||||
|
numBlocks = high(int))
|
||||||
|
|
||||||
# Test samples with all the problems one can expect
|
# Test samples with all the problems one can expect
|
||||||
ariTest0* = CaptureSpecs(
|
ariTest0* = goerliSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: bulkTest2.name & "-am",
|
name = "-am",
|
||||||
network: bulkTest2.network,
|
numBlocks = high(int),
|
||||||
files: bulkTest2.files,
|
dbType = AristoDbMemory)
|
||||||
numBlocks: high(int),
|
|
||||||
dbType: AristoDbMemory)
|
|
||||||
|
|
||||||
ariTest1* = CaptureSpecs(
|
ariTest1* = goerliSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: bulkTest2.name & "-ar",
|
name = "-ar",
|
||||||
network: bulkTest2.network,
|
numBlocks = high(int),
|
||||||
files: bulkTest2.files,
|
dbType = AristoDbRocks)
|
||||||
numBlocks: high(int),
|
|
||||||
dbType: AristoDbRocks)
|
|
||||||
|
|
||||||
ariTest2* = CaptureSpecs(
|
ariTest2* = mainSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: bulkTest3.name & "-am",
|
name = "-am",
|
||||||
network: bulkTest3.network,
|
numBlocks = 500_000,
|
||||||
files: bulkTest3.files,
|
dbType = AristoDbMemory)
|
||||||
numBlocks: 500_000,
|
|
||||||
dbType: AristoDbMemory)
|
|
||||||
|
|
||||||
ariTest3* = CaptureSpecs(
|
ariTest3* = mainSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: bulkTest3.name & "-ar",
|
name = "-ar",
|
||||||
network: bulkTest3.network,
|
numBlocks = high(int),
|
||||||
files: bulkTest3.files,
|
dbType = AristoDbRocks)
|
||||||
numBlocks: high(int),
|
|
||||||
dbType: AristoDbRocks)
|
|
||||||
|
|
||||||
|
# To be compared against the proof-of-concept implementation as
|
||||||
|
# reference
|
||||||
|
|
||||||
# To be compared against the proof-of-concept implementation as reference
|
legaTest0* = goerliSampleEx
|
||||||
legaTest0* = CaptureSpecs(
|
.cloneWith(
|
||||||
builtIn: true,
|
name = "-lm",
|
||||||
name: ariTest0.name.replace("-am", "-lm"),
|
numBlocks = 500, # high(int),
|
||||||
network: ariTest0.network,
|
dbType = LegacyDbMemory)
|
||||||
files: ariTest0.files,
|
|
||||||
numBlocks: ariTest0.numBlocks,
|
|
||||||
dbType: LegacyDbMemory)
|
|
||||||
|
|
||||||
legaTest1* = CaptureSpecs(
|
legaTest1* = goerliSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: ariTest1.name.replace("-ar", "-lp"),
|
name = "-lp",
|
||||||
network: ariTest1.network,
|
numBlocks = high(int),
|
||||||
files: ariTest1.files,
|
dbType = LegacyDbPersistent)
|
||||||
numBlocks: ariTest1.numBlocks,
|
|
||||||
dbType: LegacyDbPersistent)
|
|
||||||
|
|
||||||
legaTest2* = CaptureSpecs(
|
legaTest2* = mainSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: ariTest2.name.replace("-ar", "-lm"),
|
name = "-lm",
|
||||||
network: ariTest2.network,
|
numBlocks = 500_000,
|
||||||
files: ariTest2.files,
|
dbType = LegacyDbMemory)
|
||||||
numBlocks: ariTest2.numBlocks,
|
|
||||||
dbType: LegacyDbMemory)
|
|
||||||
|
|
||||||
legaTest3* = CaptureSpecs(
|
legaTest3* = mainSampleEx
|
||||||
builtIn: true,
|
.cloneWith(
|
||||||
name: ariTest3.name.replace("-ar", "-lp"),
|
name = "-lp",
|
||||||
network: ariTest3.network,
|
numBlocks = high(int),
|
||||||
files: ariTest3.files,
|
dbType = LegacyDbPersistent)
|
||||||
numBlocks: ariTest3.numBlocks,
|
|
||||||
dbType: LegacyDbPersistent)
|
|
||||||
|
|
||||||
# ------------------
|
# ------------------
|
||||||
|
|
||||||
|
@ -19,10 +19,25 @@ import
|
|||||||
../replay/[pp, undump_blocks, xcheck],
|
../replay/[pp, undump_blocks, xcheck],
|
||||||
./test_helpers
|
./test_helpers
|
||||||
|
|
||||||
type StopMoaningAboutLedger {.used.} = LedgerType
|
type
|
||||||
|
StopMoaningAboutLedger {.used.} = LedgerType
|
||||||
|
|
||||||
when CoreDbEnableApiProfiling or LedgerEnableApiProfiling:
|
when CoreDbEnableApiProfiling:
|
||||||
import std/[algorithm, sequtils, strutils]
|
import
|
||||||
|
std/[algorithm, sequtils, strutils],
|
||||||
|
../../nimbus/db/aristo/[aristo_api, aristo_profile],
|
||||||
|
../../nimbus/db/kvt/kvt_api
|
||||||
|
var
|
||||||
|
aristoProfData: AristoDbProfListRef
|
||||||
|
kvtProfData: KvtDbProfListRef
|
||||||
|
cdbProfData: CoreDbProfListRef
|
||||||
|
|
||||||
|
when LedgerEnableApiProfiling:
|
||||||
|
when not CoreDbEnableApiProfiling:
|
||||||
|
import
|
||||||
|
std/[algorithm, sequtils, strutils]
|
||||||
|
var
|
||||||
|
ldgProfData: LedgerProfListRef
|
||||||
|
|
||||||
const
|
const
|
||||||
EnableExtraLoggingControl = true
|
EnableExtraLoggingControl = true
|
||||||
@ -99,35 +114,29 @@ template stopLoggingAfter(noisy: bool; code: untyped) =
|
|||||||
|
|
||||||
# --------------
|
# --------------
|
||||||
|
|
||||||
proc coreDbProfResults(info: string; indent = 4): string =
|
when CoreDbEnableApiProfiling or
|
||||||
when CoreDbEnableApiProfiling:
|
LedgerEnableApiProfiling:
|
||||||
let
|
proc profilingPrinter(
|
||||||
pfx = indent.toPfx
|
data: AristoDbProfListRef;
|
||||||
pfx2 = pfx & " "
|
names: openArray[string];
|
||||||
result = "CoreDb profiling results" & info & ":"
|
header: string;
|
||||||
result &= "\n" & pfx & "by accumulated duration per procedure"
|
indent = 4;
|
||||||
for (ela,w) in coreDbProfTab.byElapsed:
|
): string =
|
||||||
result &= pfx2 & ela.pp & ": " &
|
if not data.isNil:
|
||||||
w.mapIt($it & coreDbProfTab.stats(it).pp(true)).sorted.join(", ")
|
let
|
||||||
result &= "\n" & pfx & "by number of visits"
|
pfx = indent.toPfx
|
||||||
for (count,w) in coreDbProfTab.byVisits:
|
pfx2 = pfx & " "
|
||||||
result &= pfx2 & $count & ": " &
|
result = header & ":"
|
||||||
w.mapIt($it & coreDbProfTab.stats(it).pp).sorted.join(", ")
|
|
||||||
|
|
||||||
proc ledgerProfResults(info: string; indent = 4): string =
|
result &= "\n" & pfx & "by accumulated duration per procedure"
|
||||||
when LedgerEnableApiProfiling:
|
for (ela,fns) in data.byElapsed:
|
||||||
let
|
result &= pfx2 & ela.pp & ": " & fns.mapIt(
|
||||||
pfx = indent.toPfx
|
names[it] & data.stats(it).pp(true)).sorted.join(", ")
|
||||||
pfx2 = pfx & " "
|
|
||||||
result = "Ledger profiling results" & info & ":"
|
result &= "\n" & pfx & "by number of visits"
|
||||||
result &= "\n" & pfx & "by accumulated duration per procedure"
|
for (count,fns) in data.byVisits:
|
||||||
for (ela,w) in ledgerProfTab.byElapsed:
|
result &= pfx2 & $count & ": " & fns.mapIt(
|
||||||
result &= pfx2 & ela.pp & ": " &
|
names[it] & data.stats(it).pp).sorted.join(", ")
|
||||||
w.mapIt($it & ledgerProfTab.stats(it).pp(true)).sorted.join(", ")
|
|
||||||
result &= "\n" & pfx & "by number of visits"
|
|
||||||
for (count,w) in ledgerProfTab.byVisits:
|
|
||||||
result &= pfx2 & $count & ": " &
|
|
||||||
w.mapIt($it & ledgerProfTab.stats(it).pp).sorted.join(", ")
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public test function
|
# Public test function
|
||||||
@ -136,18 +145,33 @@ proc ledgerProfResults(info: string; indent = 4): string =
|
|||||||
proc test_chainSyncProfilingPrint*(
|
proc test_chainSyncProfilingPrint*(
|
||||||
noisy = false;
|
noisy = false;
|
||||||
nBlocks: int;
|
nBlocks: int;
|
||||||
|
indent = 2;
|
||||||
) =
|
) =
|
||||||
if noisy:
|
if noisy:
|
||||||
let info =
|
let info =
|
||||||
if 0 < nBlocks and nBlocks < high(int): " (" & $nBlocks & " blocks)"
|
if 0 < nBlocks and nBlocks < high(int): " (" & $nBlocks & " blocks)"
|
||||||
else: ""
|
else: ""
|
||||||
block:
|
var blurb: seq[string]
|
||||||
let s = info.coreDbProfResults()
|
when LedgerEnableApiProfiling:
|
||||||
|
blurb.add ldgProfData.profilingPrinter(
|
||||||
|
names = LedgerFnInx.toSeq.mapIt($it),
|
||||||
|
header = "Ledger profiling results" & info,
|
||||||
|
indent)
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
blurb.add cdbProfData.profilingPrinter(
|
||||||
|
names = CoreDbFnInx.toSeq.mapIt($it),
|
||||||
|
header = "CoreDb profiling results" & info,
|
||||||
|
indent)
|
||||||
|
blurb.add aristoProfData.profilingPrinter(
|
||||||
|
names = AristoApiProfNames.toSeq.mapIt($it),
|
||||||
|
header = "Aristo backend profiling results" & info,
|
||||||
|
indent)
|
||||||
|
blurb.add kvtProfData.profilingPrinter(
|
||||||
|
names = KvtApiProfNames.toSeq.mapIt($it),
|
||||||
|
header = "Kvt backend profiling results" & info,
|
||||||
|
indent)
|
||||||
|
for s in blurb:
|
||||||
if 0 < s.len: true.say "***", s, "\n"
|
if 0 < s.len: true.say "***", s, "\n"
|
||||||
block:
|
|
||||||
let s = info.ledgerProfResults()
|
|
||||||
if 0 < s.len: true.say "***", s, "\n"
|
|
||||||
|
|
||||||
|
|
||||||
proc test_chainSync*(
|
proc test_chainSync*(
|
||||||
noisy: bool;
|
noisy: bool;
|
||||||
@ -166,6 +190,16 @@ proc test_chainSync*(
|
|||||||
noisy.initLogging com
|
noisy.initLogging com
|
||||||
defer: com.finishLogging()
|
defer: com.finishLogging()
|
||||||
|
|
||||||
|
# Profile variables will be non-nil if profiling is available. The profiling
|
||||||
|
# API data need to be captured so it will be available after the services
|
||||||
|
# have terminated.
|
||||||
|
when CoreDbEnableApiProfiling:
|
||||||
|
# terminated.
|
||||||
|
(aristoProfData, kvtProfData) = com.db.toAristoProfData()
|
||||||
|
cdbProfData = com.db.dbProfData()
|
||||||
|
when LedgerEnableApiProfiling:
|
||||||
|
ldgProfData = com.db.ldgProfData()
|
||||||
|
|
||||||
for w in filePaths.undumpBlocks:
|
for w in filePaths.undumpBlocks:
|
||||||
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
||||||
if fromBlock == 0.u256:
|
if fromBlock == 0.u256:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user