Core n lega db update tracer api (#2063)

* Aristo: Remove cruft

* Prettifying profile statistics printing & source code cosmetics

* Aristo/Kvt: API tools update

* CoreDb: Corrections, mostly API related

* CoreDb: Changed the tracer `logDb()` result

why:
  Only the key-value table is used from the logger database

* CoreDb: Update legacy tracer

why:
  The `contains()` directive did not correspond to `0 < get().len`

also:
  Copy `CoreDb` meta settings like `trackLegaApi`, `trackNewApi`, etc. to
  overlay tracer descriptor

* CoreDb: Extend/update tracer API

why:
  Get ready for accommodating `Aristo` tracer

* Fix missing import

why:
  Some CI compilers might have (cached?) a different NIM patch level

* Ditto
This commit is contained in:
Jordan Hrycaj 2024-03-07 19:24:05 +00:00 committed by GitHub
parent 88a93beb26
commit 3e1e493368
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 429 additions and 170 deletions

View File

@ -23,6 +23,10 @@ import
export
AristoDbProfListRef
const
AutoValidateApiHooks = defined(release).not
## No validatinon needed for production suite.
# Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].}
@ -158,6 +162,14 @@ type
##
## Use `aristo_desc.forget()` to clean up this descriptor.
AristoApiGetKeyFn* =
proc(db: AristoDbRef;
vid: VertexID;
): HashKey
{.noRaise.}
## Simplified version of `getKey(0` (see below) returns `VOID_HASH_KEY`
## also on fetch errors.
AristoApiGetKeyRcFn* =
proc(db: AristoDbRef;
vid: VertexID;
@ -346,6 +358,7 @@ type
forget*: AristoApiForgetFn
fork*: AristoApiForkFn
forkTop*: AristoApiForkTopFn
getKey*: AristoApiGetKeyFn
getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn
hasPath*: AristoApiHasPathFn
@ -377,6 +390,7 @@ type
AristoApiProfForgetFn = "forget"
AristoApiProfForkFn = "fork"
AristoApiProfForkTopFn = "forkTop"
AristoApiProfGetKeyFn = "getKey"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath"
@ -404,6 +418,45 @@ type
data*: AristoDbProfListRef
be*: BackendRef
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
when AutoValidateApiHooks:
proc validate(api: AristoApiObj|AristoApiRef) =
doAssert not api.commit.isNil
doAssert not api.delete.isNil
doAssert not api.delTree.isNil
doAssert not api.fetchPayload.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.fork.isNil
doAssert not api.forkTop.isNil
doAssert not api.getKey.isNil
doAssert not api.getKeyRc.isNil
doAssert not api.hashify.isNil
doAssert not api.hasPath.isNil
doAssert not api.hikeUp.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.merge.isNil
doAssert not api.mergePayload.isNil
doAssert not api.pathAsBlob.isNil
doAssert not api.rollback.isNil
doAssert not api.serialise.isNil
doAssert not api.stow.isNil
doAssert not api.txBegin.isNil
doAssert not api.txTop.isNil
doAssert not api.vidFetch.isNil
doAssert not api.vidDispose.isNil
proc validate(prf: AristoApiProfRef; be: BackendRef) =
prf.AristoApiRef.validate
doAssert not prf.data.isNil
if not be.isNil:
doAssert not prf.be.isNil
# ------------------------------------------------------------------------------
# Public API constuctors
# ------------------------------------------------------------------------------
@ -411,6 +464,8 @@ type
func init*(api: var AristoApiObj) =
## Initialise an `api` argument descriptor
##
when AutoValidateApiHooks:
api.reset
api.commit = commit
api.delete = delete
api.delTree = delTree
@ -419,6 +474,7 @@ func init*(api: var AristoApiObj) =
api.forget = forget
api.fork = fork
api.forkTop = forkTop
api.getKey = getKey
api.getKeyRc = getKeyRc
api.hashify = hashify
api.hasPath = hasPath
@ -436,14 +492,43 @@ func init*(api: var AristoApiObj) =
api.txTop = txTop
api.vidFetch = vidFetch
api.vidDispose = vidDispose
when AutoValidateApiHooks:
api.validate
func init*(T: type AristoApiRef): T =
new result
result[].init()
func dup*(api: AristoApiRef): AristoApiRef =
new result
result[] = api[]
result = AristoApiRef(
commit: api.commit,
delete: api.delete,
delTree: api.delTree,
fetchPayload: api.fetchPayload,
finish: api.finish,
forget: api.forget,
fork: api.fork,
forkTop: api.forkTop,
getKey: api.getKey,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPath: api.hasPath,
hikeUp: api.hikeUp,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
merge: api.merge,
mergePayload: api.mergePayload,
pathAsBlob: api.pathAsBlob,
rollback: api.rollback,
serialise: api.serialise,
stow: api.stow,
txBegin: api.txBegin,
txTop: api.txTop,
vidFetch: api.vidFetch,
vidDispose: api.vidDispose)
when AutoValidateApiHooks:
api.validate
# ------------------------------------------------------------------------------
# Public profile API constuctor
@ -512,6 +597,11 @@ func init*(
AristoApiProfForkTopFn.profileRunner:
result = api.forkTop(a, b)
profApi.getKey =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyFn.profileRunner:
result = api.getKey(a, b)
profApi.getKeyRc =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyRcFn.profileRunner:
@ -616,6 +706,9 @@ func init*(
AristoApiProfBePutEndFn.profileRunner:
result = be.putEndFn(a)
when AutoValidateApiHooks:
profApi.validate be
profApi
# ------------------------------------------------------------------------------

View File

@ -216,14 +216,6 @@ proc collapseLeaf(
par.vtx.bVid[hike.legs[^3].nibble] = lf.vid
db.layersPutVtx(hike.root, par.vid, par.vtx)
db.layersPutVtx(hike.root, lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from
# the backend.
let
lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx
lfPath.pathToTag.isOkOr:
return err((lf.vid,error))
return ok()
of Extension: # (2) or (3)
@ -240,14 +232,6 @@ proc collapseLeaf(
gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid
db.layersPutVtx(hike.root, gpr.vid, gpr.vtx)
db.layersPutVtx(hike.root, lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from
# the backend.
let
lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx
lfPath.pathToTag.isOKOr:
return err((lf.vid,error))
return ok()
# No grandparent, so ^3 is root vertex # (3)

View File

@ -26,6 +26,12 @@ type
AristoDbProfEla* = seq[(Duration,seq[uint])]
AristoDbProfMean* = seq[(Duration,seq[uint])]
AristoDbProfCount* = seq[(int,seq[uint])]
AristoDbProfStats* = tuple
count: int
total: Duration
mean: Duration
stdDev: Duration
devRatio: float
# ------------------------------------------------------------------------------
# Private helpers
@ -176,14 +182,14 @@ proc byVisits*(t: AristoDbProfListRef): AristoDbProfCount =
func stats*(
t: AristoDbProfListRef;
inx: uint;
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] =
): AristoDbProfStats =
## Print mean and strandard deviation of timing
let data = t.list[inx]
result.n = data.count
if 0 < result.n:
result.count = data.count
if 0 < result.count:
let
mean = data.sum / result.n.float
sqMean = data.sqSum / result.n.float
mean = data.sum / result.count.float
sqMean = data.sqSum / result.count.float
meanSq = mean * mean
# Mathematically, `meanSq <= sqMean` but there might be rounding errors
@ -191,6 +197,7 @@ func stats*(
sigma = sqMean - min(meanSq,sqMean)
stdDev = sigma.sqrt
result.total = data.sum.toDuration
result.mean = mean.toDuration
result.stdDev = stdDev.sqrt.toDuration

View File

@ -13,20 +13,17 @@
import
eth/common,
results,
../../aristo,
../../aristo/[
aristo_desc, aristo_nearby, aristo_path, aristo_tx, aristo_serialise,
aristo_walk],
../../kvt,
../../kvt/[kvt_desc, kvt_init, kvt_tx, kvt_walk],
"../.."/[aristo, aristo/aristo_walk],
"../.."/[kvt, kvt/kvt_init/memory_only, kvt/kvt_walk],
".."/[base, base/base_desc],
./aristo_db/[common_desc, handlers_aristo, handlers_kvt]
import
../../aristo/aristo_init/memory_only as aristo_memory_only
include
./aristo_db/aristo_replicate
# Caveat:
# additional direct include(s) -- not import(s) -- is placed near
# the end of this source file
# Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].}
@ -44,6 +41,8 @@ type
AristoCoreDbBE = ref object of CoreDbBackendRef
proc newAristoVoidCoreDbRef*(): CoreDbRef {.noRaise.}
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
@ -72,28 +71,33 @@ proc txMethods(
commitFn: proc(ignore: bool): CoreDbRc[void] =
const info = "commitFn()"
? aTx.commit.toVoidRc(db, info)
? kTx.commit.toVoidRc(db, info)
? db.adbBase.api.commit(aTx).toVoidRc(db, info)
? db.kdbBase.api.commit(kTx).toVoidRc(db, info)
ok(),
rollbackFn: proc(): CoreDbRc[void] =
const info = "rollbackFn()"
? aTx.rollback.toVoidRc(db, info)
? kTx.rollback.toVoidRc(db, info)
? db.adbBase.api.rollback(aTx).toVoidRc(db, info)
? db.kdbBase.api.rollback(kTx).toVoidRc(db, info)
ok(),
disposeFn: proc(): CoreDbRc[void] =
const info = "disposeFn()"
if aTx.isTop: ? aTx.rollback.toVoidRc(db, info)
if kTx.isTop: ? kTx.rollback.toVoidRc(db, info)
if db.adbBase.api.isTop(aTx):
? db.adbBase.api.rollback(aTx).toVoidRc(db, info)
if db.kdbBase.api.isTop(kTx):
? db.kdbBase.api.rollback(kTx).toVoidRc(db, info)
ok(),
safeDisposeFn: proc(): CoreDbRc[void] =
const info = "safeDisposeFn()"
if aTx.isTop: ? aTx.rollback.toVoidRc(db, info)
if kTx.isTop: ? kTx.rollback.toVoidRc(db, info)
if db.adbBase.api.isTop(aTx):
? db.adbBase.api.rollback(aTx).toVoidRc(db, info)
if db.kdbBase.api.isTop(kTx):
? db.kdbBase.api.rollback(kTx).toVoidRc(db, info)
ok())
proc baseMethods(
db: AristoCoreDbRef;
A: typedesc;
@ -165,7 +169,7 @@ proc baseMethods(
getIdFn: proc(): CoreDbRc[CoreDxTxID] =
CoreDxTxID.notImplemented(db, "getIdFn()"),
captureFn: proc(flags: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
newCaptureFn: proc(flags: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
CoreDxCaptRef.notImplemented(db, "capture()"))
# ------------------------------------------------------------------------------
@ -245,7 +249,7 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef =
AristoDbVoid.init(kvt.VoidBackendRef, aristo.VoidBackendRef)
# ------------------------------------------------------------------------------
# Public helpers for direct backend access
# Public helpers, e.g. for direct backend access
# ------------------------------------------------------------------------------
func toAristoProfData*(
@ -256,6 +260,18 @@ func toAristoProfData*(
result.aristo = db.AristoCoreDbRef.adbBase.api.AristoApiProfRef.data
result.kvt = db.AristoCoreDbRef.kdbBase.api.KvtApiProfRef.data
func toAristoApi*(dsc: CoreDxKvtRef): KvtApiRef =
doAssert not dsc.parent.isNil
doAssert dsc.parent.isAristo
if dsc.parent.isAristo:
return AristoCoreDbRef(dsc.parent).kdbBase.api
func toAristoApi*(dsc: CoreDxMptRef): AristoApiRef =
doAssert not dsc.parent.isNil
doAssert dsc.parent.isAristo
if dsc.parent.isAristo:
return AristoCoreDbRef(dsc.parent).adbBase.api
func toAristo*(be: CoreDbKvtBackendRef): KvtDbRef =
if be.parent.isAristo:
return be.AristoCoreDbKvtBE.kdb
@ -272,16 +288,33 @@ func toAristo*(be: CoreDbAccBackendRef): AristoDbRef =
# Public aristo iterators
# ------------------------------------------------------------------------------
iterator aristoKvtPairs*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
let p = dsc.to(KvtDbRef).forkTop.valueOrApiError "aristoKvtPairs()"
defer: discard p.forget()
include
./aristo_db/aristo_replicate
# ------------------------
iterator aristoKvtPairsVoid*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
let
api = dsc.toAristoApi()
p = api.forkTop(dsc.to(KvtDbRef)).valueOrApiError "aristoKvtPairs()"
defer: discard api.forget(p)
for (k,v) in kvt.VoidBackendRef.walkPairs p:
yield (k,v)
iterator aristoKvtPairsMem*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
let
api = dsc.toAristoApi()
p = api.forkTop(dsc.to(KvtDbRef)).valueOrApiError "aristoKvtPairs()"
defer: discard api.forget(p)
for (k,v) in kvt.MemBackendRef.walkPairs p:
yield (k,v)
iterator aristoMptPairs*(dsc: CoreDxMptRef): (Blob,Blob) {.noRaise.} =
let mpt = dsc.to(AristoDbRef)
let
api = dsc.toAristoApi()
mpt = dsc.to(AristoDbRef)
for (k,v) in mpt.rightPairs LeafTie(root: dsc.rootID):
yield (k.path.pathAsBlob, mpt.serialise(v).valueOr(EmptyBlob))
yield (api.pathAsBlob(k.path), api.serialise(mpt, v).valueOr(EmptyBlob))
iterator aristoReplicateMem*(dsc: CoreDxMptRef): (Blob,Blob) {.rlpRaise.} =
## Instantiation for `MemBackendRef`

View File

@ -27,9 +27,10 @@ iterator aristoReplicate[T](
##
let
root = dsc.rootID
mpt = dsc.to(AristoDbRef)
p = mpt.forkTop.valueOrApiError "aristoReplicate()"
defer: discard p.forget()
mpt = dsc.to(AristoDbRef)
api = dsc.toAristoApi()
p = api.forkTop(mpt).valueOrApiError "aristoReplicate()"
defer: discard api.forget(p)
for (vid,key,vtx,node) in T.replicate(p):
if key.len == 32:
yield (@key, node.encode)

View File

@ -35,7 +35,7 @@ type
# ------------------------------------------------------------------------------
func isAristo*(be: CoreDbRef): bool =
be.dbType in {AristoDbMemory, AristoDbRocks}
be.dbType in {AristoDbMemory, AristoDbRocks, AristoDbVoid}
func toStr*(n: VertexID): string =
result = "$"

View File

@ -471,7 +471,6 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
if rc.value:
# Trie has become empty
cMpt.root = VoidTrieID
ok()
proc mptHasPath(

View File

@ -87,13 +87,14 @@ proc `=destroy`(cKvt: var KvtChildDbObj) =
## Auto destructor
let
base = cKvt.base
api = base.api
kvt = cKvt.kvt
if not kvt.isNil:
block body:
# Do some heuristics to avoid duplicates:
block addToBatchQueue:
if kvt != base.kdb: # not base descriptor?
if base.api.level(kvt) == 0: # no transaction pending?
if api.level(kvt) == 0: # no transaction pending?
break addToBatchQueue # add to destructor queue
else:
break body # ignore `kvt`
@ -132,7 +133,6 @@ proc persistent(
api = base.api
db = base.parent
rc = api.stow(kvt)
# Note that `gc()` may call `persistent()` so there is no `base.gc()` here
if rc.isOk:
ok()
@ -155,7 +155,8 @@ proc forget(
if kvt != base.kdb:
let
db = base.parent
rc = base.api.forget(kvt)
api = base.api
rc = api.forget(kvt)
if rc.isErr:
result = err(rc.error.toError(db, info))
@ -206,7 +207,7 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
): CoreDbRc[void] =
let
base = cKvt.base
rc = base.api.put(cKvt.kvt, k,v)
rc = base.api.put(cKvt.kvt, k, v)
if rc.isErr:
return err(rc.error.toError(base.parent, info))
ok()
@ -348,12 +349,13 @@ proc newKvtHandler*(
let
db = base.parent
api = base.api
(mode, kvt) = case saveMode:
of TopShot:
(saveMode, ? base.kdb.forkTop.toRc(db, info))
(saveMode, ? api.forkTop(base.kdb).toRc(db, info))
of Companion:
(saveMode, ? base.kdb.fork.toRc(db, info))
(saveMode, ? api.fork(base.kdb).toRc(db, info))
of Shared, AutoSave:
if base.kdb.backend.isNil:
(Shared, base.kdb)

View File

@ -11,6 +11,7 @@
{.push raises: [].}
import
std/tables,
eth/[common, rlp, trie/db, trie/hexary],
stew/byteutils,
results,
@ -27,6 +28,7 @@ type
kvt: CoreDxKvtRef ## Cache, no need to rebuild methods descriptor
tdb: TrieDatabaseRef ## Descriptor reference copy captured with closures
top: LegacyCoreDxTxRef ## Top transaction (if any)
level: int ## Debugging
LegacyDbClose* = proc() {.gcsafe, raises: [].}
## Custom destructor
@ -46,7 +48,7 @@ type
RecorderRef = ref object of RootRef
flags: set[CoreDbCaptFlags]
parent: TrieDatabaseRef
logger: LegacyDbRef
logger: TableRef[Blob,Blob]
appDb: LegacyDbRef
LegacyCoreDbTrie* = ref object of CoreDbTrieRef
@ -184,40 +186,50 @@ proc toAccount(
proc get(db: RecorderRef, key: openArray[byte]): Blob =
## Mixin for `trieDB()`
result = db.logger.tdb.get(key)
result = db.logger.getOrDefault @key
if result.len == 0:
result = db.parent.get(key)
if result.len != 0:
db.logger.tdb.put(key, result)
db.logger[@key] = result
proc put(db: RecorderRef, key, value: openArray[byte]) =
## Mixin for `trieDB()`
db.logger.tdb.put(key, value)
db.logger[@key] = @value
if PersistPut in db.flags:
db.parent.put(key, value)
proc contains(db: RecorderRef, key: openArray[byte]): bool =
## Mixin for `trieDB()`
result = db.parent.contains(key)
doAssert(db.logger.tdb.contains(key) == result)
if db.logger.hasKey @key:
return true
if db.parent.contains key:
return true
proc del(db: RecorderRef, key: openArray[byte]) =
## Mixin for `trieDB()`
db.logger.tdb.del(key)
db.logger.del @key
if PersistDel in db.flags:
db.parent.del(key)
db.parent.del key
proc newRecorderRef(
tdb: TrieDatabaseRef;
dbType: CoreDbType,
db: LegacyDbRef;
flags: set[CoreDbCaptFlags];
): RecorderRef =
## Capture constuctor, uses `mixin` values from above
result = RecorderRef(
flags: flags,
parent: tdb,
logger: LegacyDbRef().init(LegacyDbMemory, newMemoryDB()).LegacyDbRef)
result.appDb = LegacyDbRef().init(dbType, trieDB result).LegacyDbRef
flags: flags,
parent: db.tdb,
logger: newTable[Blob,Blob]())
let newDb = LegacyDbRef(
level: db.level+1,
trackLegaApi: db.trackLegaApi,
trackNewApi: db.trackNewApi,
trackLedgerApi: db.trackLedgerApi,
localDbOnly: db.localDbOnly,
profTab: db.profTab,
ledgerHook: db.ledgerHook)
# Note: the **mixin** magic happens in `trieDB()`
result.appDb = newDb.init(db.dbType, trieDB result).LegacyDbRef
# ------------------------------------------------------------------------------
# Private database method function tables
@ -399,16 +411,19 @@ proc tidMethods(tid: TransactionID; tdb: TrieDatabaseRef): CoreDbTxIdFns =
tdb.shortTimeReadOnly(tid, action())
ok())
proc cptMethods(cpt: RecorderRef): CoreDbCaptFns =
proc cptMethods(cpt: RecorderRef; db: LegacyDbRef): CoreDbCaptFns =
CoreDbCaptFns(
recorderFn: proc(): CoreDbRc[CoreDbRef] =
ok(cpt.appDb),
recorderFn: proc(): CoreDbRef =
cpt.appDb,
logDbFn: proc(): CoreDbRc[CoreDbRef] =
ok(cpt.logger),
logDbFn: proc(): TableRef[Blob,Blob] =
cpt.logger,
getFlagsFn: proc(): set[CoreDbCaptFlags] =
cpt.flags)
cpt.flags,
forgetFn: proc(): CoreDbRc[void] =
err(db.bless(NotImplemented, LegacyCoreDbError(ctx: "disposeFn()"))))
# ------------------------------------------------------------------------------
# Private base methods (including constructors)
@ -513,8 +528,8 @@ proc baseMethods(
db.top.methods = db.top.txMethods()
ok(db.bless db.top),
captureFn: proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
let fns = newRecorderRef(tdb, dbType, flgs).cptMethods
newCaptureFn: proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
let fns = db.newRecorderRef(flgs).cptMethods(db)
ok(db.bless CoreDxCaptRef(methods: fns)))
# ------------------------------------------------------------------------------

View File

@ -990,28 +990,65 @@ proc newCapture*(
db: CoreDbRef;
flags: set[CoreDbCaptFlags] = {};
): CoreDbRc[CoreDxCaptRef] =
## Constructor
db.setTrackNewApi BaseCaptureFn
result = db.methods.captureFn flags
## Trace constructor providing an overlay on top of the argument database
## `db`. This overlay provides a replacement database handle that can be
## retrieved via `db.recorder()` (which can in turn be ovelayed.) While
## running the overlay stores data in a log-table which can be retrieved
## via `db.logDb()`.
##
## Caveat:
## The original database argument `db` should not be used while the tracer
## is active (i.e. exists as overlay). The behaviour for this situation
## is undefined and depends on the backend implementation of the tracer.
##
db.setTrackNewApi BaseNewCaptureFn
result = db.methods.newCaptureFn flags
db.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
proc recorder*(cp: CoreDxCaptRef): CoreDbRc[CoreDbRef] =
## Getter
cp.setTrackNewApi CptRecorderFn
result = cp.methods.recorderFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
proc recorder*(cpt: CoreDxCaptRef): CoreDbRef =
## Getter, returns a tracer replacement handle to be used as new database.
## It records every action like fetch, store, hasKey, hasPath and delete.
## This descriptor can be superseded by a new overlay tracer (using
## `newCapture()`, again.)
##
## Caveat:
## Unless the desriptor `cpt` referes to the top level overlay tracer, the
## result is undefined and depends on the backend implementation of the
## tracer.
##
cpt.setTrackNewApi CptRecorderFn
result = cpt.methods.recorderFn()
cpt.ifTrackNewApi: debug newApiTxt, ctx, elapsed
proc logDb*(cp: CoreDxCaptRef): CoreDbRc[CoreDbRef] =
proc logDb*(cp: CoreDxCaptRef): TableRef[Blob,Blob] =
## Getter, returns the logger table for the overlay tracer database.
##
## Caveat:
## Unless the desriptor `cpt` referes to the top level overlay tracer, the
## result is undefined and depends on the backend implementation of the
## tracer.
##
cp.setTrackNewApi CptLogDbFn
result = cp.methods.logDbFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed
proc flags*(cp: CoreDxCaptRef): set[CoreDbCaptFlags] =
proc flags*(cp: CoreDxCaptRef):set[CoreDbCaptFlags] =
## Getter
cp.setTrackNewApi CptFlagsFn
result = cp.methods.getFlagsFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
proc forget*(cp: CoreDxCaptRef): CoreDbRc[void] =
## Explicitely stop recording the current tracer instance. If this call was
## successful, the the database argument `db` used when starting the trace
## with `newCapture()` will be fully operational, again. This will also
## implicitely take place when the`NIM` garbage collector recycles an
## abondoned capture descriptor.
##
cp.setTrackNewApi CptForgetFn
result = cp.methods.forgetFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
# ------------------------------------------------------------------------------
# Public methods, legacy API
# ------------------------------------------------------------------------------
@ -1241,12 +1278,12 @@ when ProvideLegacyAPI:
proc recorder*(cp: CoreDbCaptRef): CoreDbRef =
cp.setTrackLegaApi LegaCptRecorderFn
result = cp.distinctBase.recorder().expect $ctx
result = cp.distinctBase.recorder()
cp.ifTrackLegaApi: debug legaApiTxt, ctx, elapsed
proc logDb*(cp: CoreDbCaptRef): CoreDbRef =
proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] =
cp.setTrackLegaApi LegaCptLogDbFn
result = cp.distinctBase.logDb().expect $ctx
result = cp.distinctBase.logDb()
cp.ifTrackLegaApi: debug legaApiTxt, ctx, elapsed
proc flags*(cp: CoreDbCaptRef): set[CoreDbCaptFlags] =

View File

@ -40,13 +40,13 @@ type
AnyBackendFn = "any/backend"
AnyIsPruningFn = "any/isPruning"
BaseCaptureFn = "newCapture"
BaseDbTypeFn = "dbType"
BaseFinishFn = "finish"
BaseGetTrieFn = "getTrie"
BaseLegacySetupFn = "compensateLegacySetup"
BaseLevelFn = "level"
BaseNewAccFn = "newAccMpt"
BaseNewCaptureFn = "newCapture"
BaseNewKvtFn = "newKvt"
BaseNewMptFn = "newMpt"
BaseNewTxFn = "newTransaction"
@ -54,6 +54,7 @@ type
CptFlagsFn = "cpt/flags"
CptLogDbFn = "cpt/logDb"
CptRecorderFn = "cpt/recorder"
CptForgetFn = "cpt/forget"
ErrorPrintFn = "$$"
EthAccRecastFn = "recast"
@ -197,6 +198,9 @@ proc toStr*(rc: CoreDbRc[Hash256]): string =
proc toStr*(rc: CoreDbRc[CoreDbTrieRef]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[set[CoreDbCaptFlags]]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[Account]): string =
if rc.isOk: "ok(Account)" else: "err(" & rc.error.toStr & ")"
@ -205,6 +209,7 @@ proc toStr[T](rc: CoreDbRc[T]; ifOk: static[string]): string =
proc toStr*(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db"
proc toStr*(rc: CoreDbRc[CoreDbAccount]): string = rc.toStr "acc"
proc toStr*(rc: CoreDbRc[CoreDxKvtRef]): string = rc.toStr "kvt"
proc toStr*(rc: CoreDbRc[CoreDxTxID]): string = rc.toStr "txId"
proc toStr*(rc: CoreDbRc[CoreDxTxRef]): string = rc.toStr "tx"
proc toStr*(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "capt"

View File

@ -11,6 +11,7 @@
{.push raises: [].}
import
std/tables,
eth/common,
results,
../../aristo/aristo_profile
@ -65,6 +66,7 @@ type
HashNotAvailable
TrieLocked
StorageFailed
NotImplemented
CoreDbSubTrie* = enum
StorageTrie = 0
@ -110,8 +112,9 @@ type
): CoreDbRc[CoreDxAccRef] {.noRaise.}
CoreDbBaseTxGetIdFn* = proc(): CoreDbRc[CoreDxTxID] {.noRaise.}
CoreDbBaseTxBeginFn* = proc(): CoreDbRc[CoreDxTxRef] {.noRaise.}
CoreDbBaseCaptFn* =
CoreDbBaseNewCaptFn* =
proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] {.noRaise.}
CoreDbBaseGetCaptFn* = proc(): CoreDbRc[CoreDxCaptRef] {.noRaise.}
CoreDbBaseFns* = object
verifyFn*: CoreDbBaseVerifyFn
@ -137,7 +140,7 @@ type
beginFn*: CoreDbBaseTxBeginFn
# capture/tracer constructors
captureFn*: CoreDbBaseCaptFn
newCaptureFn*: CoreDbBaseNewCaptFn
# --------------------------------------------------
@ -254,14 +257,16 @@ type
# --------------------------------------------------
# Sub-descriptor: capture recorder methods
# --------------------------------------------------
CoreDbCaptRecorderFn* = proc(): CoreDbRc[CoreDbRef] {.noRaise.}
CoreDbCaptLogDbFn* = proc(): CoreDbRc[CoreDbRef] {.noRaise.}
CoreDbCaptRecorderFn* = proc(): CoreDbRef {.noRaise.}
CoreDbCaptLogDbFn* = proc(): TableRef[Blob,Blob] {.noRaise.}
CoreDbCaptFlagsFn* = proc(): set[CoreDbCaptFlags] {.noRaise.}
CoreDbCaptForgetFn* = proc(): CoreDbRc[void] {.noRaise.}
CoreDbCaptFns* = object
recorderFn*: CoreDbCaptRecorderFn
logDbFn*: CoreDbCaptLogDbFn
getFlagsFn*: CoreDbCaptFlagsFn
forgetFn*: CoreDbCaptForgetFn
# --------------------------------------------------
# Production descriptors

View File

@ -45,7 +45,7 @@ proc validateMethodsDesc(base: CoreDbBaseFns) =
doAssert not base.newAccFn.isNil
doAssert not base.getIdFn.isNil
doAssert not base.beginFn.isNil
doAssert not base.captureFn.isNil
doAssert not base.newCaptureFn.isNil
proc validateMethodsDesc(kvt: CoreDbKvtFns) =
doAssert not kvt.backendFn.isNil
@ -121,6 +121,7 @@ proc validateMethodsDesc(cpt: CoreDxCaptRef) =
doAssert not cpt.parent.isNil
doAssert not cpt.methods.recorderFn.isNil
doAssert not cpt.methods.getFlagsFn.isNil
doAssert not cpt.methods.forgetFn.isNil
proc validateMethodsDesc(tx: CoreDxTxRef) =
doAssert not tx.isNil

View File

@ -45,7 +45,10 @@ iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
for k,v in kvt.legaKvtPairs():
yield (k,v)
of AristoDbMemory:
for k,v in kvt.aristoKvtPairs():
for k,v in kvt.aristoKvtPairsMem():
yield (k,v)
of AristoDbVoid:
for k,v in kvt.aristoKvtPairsVoid():
yield (k,v)
else:
raiseAssert: "Unsupported database type: " & $kvt.parent.dbType

View File

@ -17,7 +17,7 @@ import
./backend/[aristo_db, legacy_db]
import
#./core_apps_legacy as core_apps
#./core_apps_legacy as core_apps -- avoid
./core_apps_newapi as core_apps
import
./base except bless

View File

@ -18,6 +18,10 @@ import
../aristo/aristo_profile,
"."/[kvt_desc, kvt_desc/desc_backend, kvt_init, kvt_tx, kvt_utils]
const
AutoValidateApiHooks = defined(release).not
## No validatinon needed for production suite.
# Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].}
@ -101,35 +105,85 @@ type
data*: KvtDbProfListRef
be*: BackendRef
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
when AutoValidateApiHooks:
proc validate(api: KvtApiObj|KvtApiRef) =
doAssert not api.commit.isNil
doAssert not api.del.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.fork.isNil
doAssert not api.forkTop.isNil
doAssert not api.get.isNil
doAssert not api.hasKey.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.put.isNil
doAssert not api.rollback.isNil
doAssert not api.stow.isNil
doAssert not api.txBegin.isNil
doAssert not api.txTop.isNil
proc validate(prf: KvtApiProfRef; be: BackendRef) =
prf.KvtApiRef.validate
doAssert not prf.data.isNil
if not be.isNil:
doAssert not prf.be.isNil
# ------------------------------------------------------------------------------
# Public API constuctors
# ------------------------------------------------------------------------------
func init*(api: var KvtApiObj) =
api.commit = commit
api.del = del
api.finish = finish
api.forget = forget
api.fork = fork
api.forkTop = forkTop
api.get = get
api.hasKey = hasKey
api.isTop = isTop
api.level = level
api.nForked = nForked
api.put = put
api.rollback = rollback
api.stow = stow
api.txBegin = txBegin
api.txTop = txTop
when AutoValidateApiHooks:
api.reset
api.commit = commit
api.del = del
api.finish = finish
api.forget = forget
api.fork = fork
api.forkTop = forkTop
api.get = get
api.hasKey = hasKey
api.isTop = isTop
api.level = level
api.nForked = nForked
api.put = put
api.rollback = rollback
api.stow = stow
api.txBegin = txBegin
api.txTop = txTop
when AutoValidateApiHooks:
api.validate
func init*(T: type KvtApiRef): T =
result = new T
result[].init()
func dup*(api: KvtApiRef): KvtApiRef =
new result
result[] = api[]
result = KvtApiRef(
commit: api.commit,
del: api.del,
finish: api.finish,
forget: api.forget,
fork: api.fork,
forkTop: api.forkTop,
get: api.get,
hasKey: api.hasKey,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
put: api.put,
rollback: api.rollback,
stow: api.stow,
txBegin: api.txBegin,
txTop: api.txTop)
when AutoValidateApiHooks:
api.validate
# ------------------------------------------------------------------------------
# Public profile API constuctor
@ -251,6 +305,9 @@ func init*(
KvtApiProfBePutEndFn.profileRunner:
result = be.putEndFn(a)
when AutoValidateApiHooks:
profApi.validate be
profApi
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -8,6 +8,13 @@
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## Populates the tracer API methods
## ================================
##
## The module name `legacy_tracer` is probably a misonmer as it also works
## with the new APIs for `CoreDb` and `Ledger`.
##
import
std/[json, sets, strutils, hashes],
eth/common/eth_types,

View File

@ -86,6 +86,12 @@ proc dumpMemoryDB*(node: JsonNode, db: CoreDbRef) =
n[k.toHex(false)] = %v
node["state"] = n
proc dumpMemoryDB*(node: JsonNode, kvt: TableRef[Blob,Blob]) =
var n = newJObject()
for k, v in kvt:
n[k.toHex(false)] = %v
node["state"] = n
proc dumpMemoryDB*(node: JsonNode, capture: CoreDbCaptRef) =
node.dumpMemoryDB capture.logDb
@ -149,7 +155,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
break
# internal transactions:
var stateBefore = AccountsCache.init(capture.recorder, beforeRoot, com.pruneTrie)
var stateBefore = AccountsLedgerRef.init(capture.recorder, beforeRoot, com.pruneTrie)
for idx, acc in tracedAccountsPairs(tracerInst):
before.captureAccount(stateBefore, acc, internalTxName & $idx)
@ -180,7 +186,7 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS
var
before = newJArray()
after = newJArray()
stateBefore = AccountsCache.init(capture.recorder, parent.stateRoot, com.pruneTrie)
stateBefore = AccountsLedgerRef.init(capture.recorder, parent.stateRoot, com.pruneTrie)
for idx, tx in body.transactions:
let sender = tx.getSender

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -9,8 +9,9 @@
# according to those terms.
import
std/[os, sequtils, strformat, strutils, tables],
std/[os, strformat, strutils, tables],
chronicles,
stew/byteutils,
../nimbus/db/ledger,
../nimbus/common/common,
../nimbus/core/chain,
@ -62,14 +63,14 @@ proc findFilePath(file: string): string =
return path
proc pp*(a: EthAddress): string =
a.mapIt(it.toHex(2)).join[32 .. 39].toLowerAscii
a.toHex[32 .. 39].toLowerAscii
proc pp*(tx: Transaction): string =
# "(" & tx.ecRecover.value.pp & "," & $tx.nonce & ")"
"(" & tx.getSender.pp & "," & $tx.nonce & ")"
proc pp*(h: KeccakHash): string =
h.data.mapIt(it.toHex(2)).join[52 .. 63].toLowerAscii
h.data.toHex[52 .. 63].toLowerAscii
proc pp*(tx: Transaction; vmState: BaseVMState): string =
let address = tx.getSender

View File

@ -24,7 +24,7 @@ type
when CoreDbEnableApiProfiling:
import
std/[algorithm, sequtils, strutils],
std/sequtils,
../../nimbus/db/aristo/[aristo_api, aristo_profile],
../../nimbus/db/kvt/kvt_api
var
@ -35,7 +35,7 @@ when CoreDbEnableApiProfiling:
when LedgerEnableApiProfiling:
when not CoreDbEnableApiProfiling:
import
std/[algorithm, sequtils, strutils]
std/sequtils
var
ldgProfData: LedgerProfListRef
@ -112,32 +112,6 @@ template stopLoggingAfter(noisy: bool; code: untyped) =
defer: noisy.stopLogging()
code
# --------------
when CoreDbEnableApiProfiling or
LedgerEnableApiProfiling:
proc profilingPrinter(
data: AristoDbProfListRef;
names: openArray[string];
header: string;
indent = 4;
): string =
if not data.isNil:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = header & ":"
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,fns) in data.byElapsed:
result &= pfx2 & ela.pp & ": " & fns.mapIt(
names[it] & data.stats(it).pp(true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,fns) in data.byVisits:
result &= pfx2 & $count & ": " & fns.mapIt(
names[it] & data.stats(it).pp).sorted.join(", ")
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------

View File

@ -9,16 +9,37 @@
# distributed except according to those terms.
import
std/[os, sequtils, times],
std/[algorithm, os, sequtils],
eth/common,
results,
../../nimbus/utils/prettify,
../../nimbus/db/aristo/aristo_profile,
../replay/pp
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func pp(
w: AristoDbProfStats,
spaced = false;
count = true;
): string =
result = "("
if w.count < 2:
result &= w.mean.pp
else:
let space = if spaced: " " else: ""
if count:
result &= $w.count
else:
result &= w.total.pp
result &= "," & space & w.mean.pp
if w.devRatio != 0.0: # when all items are the same
let dr = if 0.2 < w.devRatio: w.devRatio.toPC(0) else: w.devRatio.toPC(1)
result &= space & "±" & space & dr
result &= ")"
# ------------------------------------------------------------------------------
# Public pretty printing
# ------------------------------------------------------------------------------
@ -35,21 +56,6 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
proc toPfx*(indent: int): string =
"\n" & " ".repeat(indent)
func pp*(
w: tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float];
spaced = false;
): string =
result = "("
if w.n < 2:
result &= w.mean.pp
else:
let space = if spaced: " " else: ""
result &= $w.n & "," & space & w.mean.pp
if w.devRatio != 0.0: # when all items are the same
let dr = if 0.2 < w.devRatio: w.devRatio.toPC(0) else: w.devRatio.toPC(1)
result &= space & "±" & space & dr
result &= ")"
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
@ -72,6 +78,29 @@ proc findFilePathHelper*(
echo "*** File not found \"", file, "\"."
err()
proc profilingPrinter*(
data: AristoDbProfListRef;
names: openArray[string];
header: string;
indent = 4;
): string =
if not data.isNil:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = header & ":"
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,fns) in data.byElapsed:
result &= pfx2 & ela.pp & ": " & fns.mapIt(
names[it] & data.stats(it).pp(spaced=true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,fns) in data.byVisits:
result &= pfx2 & $count & ": " & fns.mapIt(
names[it] & data.stats(it).pp(count=false)).sorted.join(", ")
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------