Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-10-03 11:56:13 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
2024-03-07 19:24:05 +00:00
|
|
|
std/[algorithm, os, sequtils],
|
2023-10-03 11:56:13 +00:00
|
|
|
eth/common,
|
2024-02-12 19:37:00 +00:00
|
|
|
results,
|
2023-12-12 17:47:41 +00:00
|
|
|
../../nimbus/utils/prettify,
|
2024-03-07 19:24:05 +00:00
|
|
|
../../nimbus/db/aristo/aristo_profile,
|
2023-11-20 20:22:27 +00:00
|
|
|
../replay/pp
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-03-07 19:24:05 +00:00
|
|
|
func pp(
|
|
|
|
w: AristoDbProfStats,
|
|
|
|
spaced = false;
|
|
|
|
count = true;
|
|
|
|
): string =
|
|
|
|
result = "("
|
|
|
|
if w.count < 2:
|
|
|
|
result &= w.mean.pp
|
|
|
|
else:
|
|
|
|
let space = if spaced: " " else: ""
|
|
|
|
if count:
|
|
|
|
result &= $w.count
|
|
|
|
else:
|
|
|
|
result &= w.total.pp
|
|
|
|
result &= "," & space & w.mean.pp
|
|
|
|
if w.devRatio != 0.0: # when all items are the same
|
|
|
|
let dr = if 0.2 < w.devRatio: w.devRatio.toPC(0) else: w.devRatio.toPC(1)
|
|
|
|
result &= space & "±" & space & dr
|
|
|
|
result &= ")"
|
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public pretty printing
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|
|
|
if noisy:
|
|
|
|
if args.len == 0:
|
|
|
|
echo "*** ", pfx
|
|
|
|
elif 0 < pfx.len and pfx[^1] != ' ':
|
|
|
|
echo pfx, " ", args.toSeq.join
|
|
|
|
else:
|
|
|
|
echo pfx, args.toSeq.join
|
|
|
|
|
2024-05-23 15:37:51 +00:00
|
|
|
proc whisper*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|
|
|
if noisy:
|
|
|
|
if args.len == 0:
|
|
|
|
stdout.write("*** ", pfx)
|
|
|
|
elif 0 < pfx.len and pfx[^1] != ' ':
|
|
|
|
stdout.write(pfx, " ", args.toSeq.join)
|
|
|
|
else:
|
|
|
|
stdout.write(pfx, args.toSeq.join)
|
|
|
|
stdout.flushFile()
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
proc toPfx*(indent: int): string =
|
|
|
|
"\n" & " ".repeat(indent)
|
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-02-12 19:37:00 +00:00
|
|
|
proc findFilePathHelper*(
|
|
|
|
file: string;
|
|
|
|
baseDir: openArray[string];
|
|
|
|
repoDir: openArray[string];
|
|
|
|
subDir: openArray[string];
|
|
|
|
): Result[string,void] =
|
|
|
|
for dir in baseDir:
|
|
|
|
if dir.dirExists:
|
|
|
|
for repo in repoDir:
|
|
|
|
if (dir / repo).dirExists:
|
|
|
|
for sub in subDir:
|
|
|
|
if (dir / repo / sub).dirExists:
|
|
|
|
let path = dir / repo / sub / file
|
|
|
|
if path.fileExists:
|
|
|
|
return ok(path)
|
|
|
|
echo "*** File not found \"", file, "\"."
|
|
|
|
err()
|
|
|
|
|
2024-03-07 19:24:05 +00:00
|
|
|
|
|
|
|
proc profilingPrinter*(
|
|
|
|
data: AristoDbProfListRef;
|
|
|
|
names: openArray[string];
|
|
|
|
header: string;
|
|
|
|
indent = 4;
|
|
|
|
): string =
|
|
|
|
if not data.isNil:
|
|
|
|
let
|
|
|
|
pfx = indent.toPfx
|
|
|
|
pfx2 = pfx & " "
|
|
|
|
result = header & ":"
|
2024-04-19 18:37:27 +00:00
|
|
|
let names = @names
|
|
|
|
|
|
|
|
proc pp(w: uint, spaced: bool): string =
|
|
|
|
let (a,z) = (if data.list[w].masked: ("[","]") else: ("",""))
|
|
|
|
a & names[w] & data.stats(w).pp(spaced=spaced) & z
|
2024-03-07 19:24:05 +00:00
|
|
|
|
|
|
|
result &= "\n" & pfx & "by accumulated duration per procedure"
|
|
|
|
for (ela,fns) in data.byElapsed:
|
2024-04-19 18:37:27 +00:00
|
|
|
result &= pfx2 & ela.pp & ": " & fns.mapIt(it.pp true).sorted.join(", ")
|
2024-03-07 19:24:05 +00:00
|
|
|
|
|
|
|
result &= "\n" & pfx & "by number of visits"
|
|
|
|
for (count,fns) in data.byVisits:
|
2024-04-19 18:37:27 +00:00
|
|
|
result &= pfx2 & $count & ": " & fns.mapIt(it.pp false).sorted.join(", ")
|
2024-03-07 19:24:05 +00:00
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|