Core n lega db update tracer api (#2063)

* Aristo: Remove cruft

* Prettifying profile statistics printing & source code cosmetics

* Aristo/Kvt: API tools update

* CoreDb: Corrections, mostly API related

* CoreDb: Changed the tracer `logDb()` result

why:
  Only the key-value table is used from the logger database

* CoreDb: Update legacy tracer

why:
  The `contains()` directive did not correspond to `0 < get().len`

also:
  Copy `CoreDb` meta settings like `trackLegaApi`, `trackNewApi`, etc. to
  overlay tracer descriptor

* CoreDb: Extend/update tracer API

why:
  Get ready for accommodating `Aristo` tracer

* Fix missing import

why:
  Some CI compilers might have (cached?) a different NIM patch level

* Ditto
This commit is contained in:
Jordan Hrycaj 2024-03-07 19:24:05 +00:00 committed by GitHub
parent 88a93beb26
commit 3e1e493368
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 429 additions and 170 deletions

View File

@ -23,6 +23,10 @@ import
export export
AristoDbProfListRef AristoDbProfListRef
const
AutoValidateApiHooks = defined(release).not
## No validatinon needed for production suite.
# Annotation helper(s) # Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].} {.pragma: noRaise, gcsafe, raises: [].}
@ -158,6 +162,14 @@ type
## ##
## Use `aristo_desc.forget()` to clean up this descriptor. ## Use `aristo_desc.forget()` to clean up this descriptor.
AristoApiGetKeyFn* =
proc(db: AristoDbRef;
vid: VertexID;
): HashKey
{.noRaise.}
## Simplified version of `getKey(0` (see below) returns `VOID_HASH_KEY`
## also on fetch errors.
AristoApiGetKeyRcFn* = AristoApiGetKeyRcFn* =
proc(db: AristoDbRef; proc(db: AristoDbRef;
vid: VertexID; vid: VertexID;
@ -346,6 +358,7 @@ type
forget*: AristoApiForgetFn forget*: AristoApiForgetFn
fork*: AristoApiForkFn fork*: AristoApiForkFn
forkTop*: AristoApiForkTopFn forkTop*: AristoApiForkTopFn
getKey*: AristoApiGetKeyFn
getKeyRc*: AristoApiGetKeyRcFn getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn hashify*: AristoApiHashifyFn
hasPath*: AristoApiHasPathFn hasPath*: AristoApiHasPathFn
@ -377,6 +390,7 @@ type
AristoApiProfForgetFn = "forget" AristoApiProfForgetFn = "forget"
AristoApiProfForkFn = "fork" AristoApiProfForkFn = "fork"
AristoApiProfForkTopFn = "forkTop" AristoApiProfForkTopFn = "forkTop"
AristoApiProfGetKeyFn = "getKey"
AristoApiProfGetKeyRcFn = "getKeyRc" AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify" AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath" AristoApiProfHasPathFn = "hasPath"
@ -404,6 +418,45 @@ type
data*: AristoDbProfListRef data*: AristoDbProfListRef
be*: BackendRef be*: BackendRef
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
when AutoValidateApiHooks:
proc validate(api: AristoApiObj|AristoApiRef) =
doAssert not api.commit.isNil
doAssert not api.delete.isNil
doAssert not api.delTree.isNil
doAssert not api.fetchPayload.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.fork.isNil
doAssert not api.forkTop.isNil
doAssert not api.getKey.isNil
doAssert not api.getKeyRc.isNil
doAssert not api.hashify.isNil
doAssert not api.hasPath.isNil
doAssert not api.hikeUp.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.merge.isNil
doAssert not api.mergePayload.isNil
doAssert not api.pathAsBlob.isNil
doAssert not api.rollback.isNil
doAssert not api.serialise.isNil
doAssert not api.stow.isNil
doAssert not api.txBegin.isNil
doAssert not api.txTop.isNil
doAssert not api.vidFetch.isNil
doAssert not api.vidDispose.isNil
proc validate(prf: AristoApiProfRef; be: BackendRef) =
prf.AristoApiRef.validate
doAssert not prf.data.isNil
if not be.isNil:
doAssert not prf.be.isNil
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public API constuctors # Public API constuctors
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -411,6 +464,8 @@ type
func init*(api: var AristoApiObj) = func init*(api: var AristoApiObj) =
## Initialise an `api` argument descriptor ## Initialise an `api` argument descriptor
## ##
when AutoValidateApiHooks:
api.reset
api.commit = commit api.commit = commit
api.delete = delete api.delete = delete
api.delTree = delTree api.delTree = delTree
@ -419,6 +474,7 @@ func init*(api: var AristoApiObj) =
api.forget = forget api.forget = forget
api.fork = fork api.fork = fork
api.forkTop = forkTop api.forkTop = forkTop
api.getKey = getKey
api.getKeyRc = getKeyRc api.getKeyRc = getKeyRc
api.hashify = hashify api.hashify = hashify
api.hasPath = hasPath api.hasPath = hasPath
@ -436,14 +492,43 @@ func init*(api: var AristoApiObj) =
api.txTop = txTop api.txTop = txTop
api.vidFetch = vidFetch api.vidFetch = vidFetch
api.vidDispose = vidDispose api.vidDispose = vidDispose
when AutoValidateApiHooks:
api.validate
func init*(T: type AristoApiRef): T = func init*(T: type AristoApiRef): T =
new result new result
result[].init() result[].init()
func dup*(api: AristoApiRef): AristoApiRef = func dup*(api: AristoApiRef): AristoApiRef =
new result result = AristoApiRef(
result[] = api[] commit: api.commit,
delete: api.delete,
delTree: api.delTree,
fetchPayload: api.fetchPayload,
finish: api.finish,
forget: api.forget,
fork: api.fork,
forkTop: api.forkTop,
getKey: api.getKey,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPath: api.hasPath,
hikeUp: api.hikeUp,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
merge: api.merge,
mergePayload: api.mergePayload,
pathAsBlob: api.pathAsBlob,
rollback: api.rollback,
serialise: api.serialise,
stow: api.stow,
txBegin: api.txBegin,
txTop: api.txTop,
vidFetch: api.vidFetch,
vidDispose: api.vidDispose)
when AutoValidateApiHooks:
api.validate
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public profile API constuctor # Public profile API constuctor
@ -512,6 +597,11 @@ func init*(
AristoApiProfForkTopFn.profileRunner: AristoApiProfForkTopFn.profileRunner:
result = api.forkTop(a, b) result = api.forkTop(a, b)
profApi.getKey =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyFn.profileRunner:
result = api.getKey(a, b)
profApi.getKeyRc = profApi.getKeyRc =
proc(a: AristoDbRef; b: VertexID): auto = proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyRcFn.profileRunner: AristoApiProfGetKeyRcFn.profileRunner:
@ -616,6 +706,9 @@ func init*(
AristoApiProfBePutEndFn.profileRunner: AristoApiProfBePutEndFn.profileRunner:
result = be.putEndFn(a) result = be.putEndFn(a)
when AutoValidateApiHooks:
profApi.validate be
profApi profApi
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -216,14 +216,6 @@ proc collapseLeaf(
par.vtx.bVid[hike.legs[^3].nibble] = lf.vid par.vtx.bVid[hike.legs[^3].nibble] = lf.vid
db.layersPutVtx(hike.root, par.vid, par.vtx) db.layersPutVtx(hike.root, par.vid, par.vtx)
db.layersPutVtx(hike.root, lf.vid, lf.vtx) db.layersPutVtx(hike.root, lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from
# the backend.
let
lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx
lfPath.pathToTag.isOkOr:
return err((lf.vid,error))
return ok() return ok()
of Extension: # (2) or (3) of Extension: # (2) or (3)
@ -240,14 +232,6 @@ proc collapseLeaf(
gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid
db.layersPutVtx(hike.root, gpr.vid, gpr.vtx) db.layersPutVtx(hike.root, gpr.vid, gpr.vtx)
db.layersPutVtx(hike.root, lf.vid, lf.vtx) db.layersPutVtx(hike.root, lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from
# the backend.
let
lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx
lfPath.pathToTag.isOKOr:
return err((lf.vid,error))
return ok() return ok()
# No grandparent, so ^3 is root vertex # (3) # No grandparent, so ^3 is root vertex # (3)

View File

@ -26,6 +26,12 @@ type
AristoDbProfEla* = seq[(Duration,seq[uint])] AristoDbProfEla* = seq[(Duration,seq[uint])]
AristoDbProfMean* = seq[(Duration,seq[uint])] AristoDbProfMean* = seq[(Duration,seq[uint])]
AristoDbProfCount* = seq[(int,seq[uint])] AristoDbProfCount* = seq[(int,seq[uint])]
AristoDbProfStats* = tuple
count: int
total: Duration
mean: Duration
stdDev: Duration
devRatio: float
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
@ -176,14 +182,14 @@ proc byVisits*(t: AristoDbProfListRef): AristoDbProfCount =
func stats*( func stats*(
t: AristoDbProfListRef; t: AristoDbProfListRef;
inx: uint; inx: uint;
): tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float] = ): AristoDbProfStats =
## Print mean and strandard deviation of timing ## Print mean and strandard deviation of timing
let data = t.list[inx] let data = t.list[inx]
result.n = data.count result.count = data.count
if 0 < result.n: if 0 < result.count:
let let
mean = data.sum / result.n.float mean = data.sum / result.count.float
sqMean = data.sqSum / result.n.float sqMean = data.sqSum / result.count.float
meanSq = mean * mean meanSq = mean * mean
# Mathematically, `meanSq <= sqMean` but there might be rounding errors # Mathematically, `meanSq <= sqMean` but there might be rounding errors
@ -191,6 +197,7 @@ func stats*(
sigma = sqMean - min(meanSq,sqMean) sigma = sqMean - min(meanSq,sqMean)
stdDev = sigma.sqrt stdDev = sigma.sqrt
result.total = data.sum.toDuration
result.mean = mean.toDuration result.mean = mean.toDuration
result.stdDev = stdDev.sqrt.toDuration result.stdDev = stdDev.sqrt.toDuration

View File

@ -13,20 +13,17 @@
import import
eth/common, eth/common,
results, results,
../../aristo, "../.."/[aristo, aristo/aristo_walk],
../../aristo/[ "../.."/[kvt, kvt/kvt_init/memory_only, kvt/kvt_walk],
aristo_desc, aristo_nearby, aristo_path, aristo_tx, aristo_serialise,
aristo_walk],
../../kvt,
../../kvt/[kvt_desc, kvt_init, kvt_tx, kvt_walk],
".."/[base, base/base_desc], ".."/[base, base/base_desc],
./aristo_db/[common_desc, handlers_aristo, handlers_kvt] ./aristo_db/[common_desc, handlers_aristo, handlers_kvt]
import import
../../aristo/aristo_init/memory_only as aristo_memory_only ../../aristo/aristo_init/memory_only as aristo_memory_only
include # Caveat:
./aristo_db/aristo_replicate # additional direct include(s) -- not import(s) -- is placed near
# the end of this source file
# Annotation helper(s) # Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].} {.pragma: noRaise, gcsafe, raises: [].}
@ -44,6 +41,8 @@ type
AristoCoreDbBE = ref object of CoreDbBackendRef AristoCoreDbBE = ref object of CoreDbBackendRef
proc newAristoVoidCoreDbRef*(): CoreDbRef {.noRaise.}
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -72,28 +71,33 @@ proc txMethods(
commitFn: proc(ignore: bool): CoreDbRc[void] = commitFn: proc(ignore: bool): CoreDbRc[void] =
const info = "commitFn()" const info = "commitFn()"
? aTx.commit.toVoidRc(db, info) ? db.adbBase.api.commit(aTx).toVoidRc(db, info)
? kTx.commit.toVoidRc(db, info) ? db.kdbBase.api.commit(kTx).toVoidRc(db, info)
ok(), ok(),
rollbackFn: proc(): CoreDbRc[void] = rollbackFn: proc(): CoreDbRc[void] =
const info = "rollbackFn()" const info = "rollbackFn()"
? aTx.rollback.toVoidRc(db, info) ? db.adbBase.api.rollback(aTx).toVoidRc(db, info)
? kTx.rollback.toVoidRc(db, info) ? db.kdbBase.api.rollback(kTx).toVoidRc(db, info)
ok(), ok(),
disposeFn: proc(): CoreDbRc[void] = disposeFn: proc(): CoreDbRc[void] =
const info = "disposeFn()" const info = "disposeFn()"
if aTx.isTop: ? aTx.rollback.toVoidRc(db, info) if db.adbBase.api.isTop(aTx):
if kTx.isTop: ? kTx.rollback.toVoidRc(db, info) ? db.adbBase.api.rollback(aTx).toVoidRc(db, info)
if db.kdbBase.api.isTop(kTx):
? db.kdbBase.api.rollback(kTx).toVoidRc(db, info)
ok(), ok(),
safeDisposeFn: proc(): CoreDbRc[void] = safeDisposeFn: proc(): CoreDbRc[void] =
const info = "safeDisposeFn()" const info = "safeDisposeFn()"
if aTx.isTop: ? aTx.rollback.toVoidRc(db, info) if db.adbBase.api.isTop(aTx):
if kTx.isTop: ? kTx.rollback.toVoidRc(db, info) ? db.adbBase.api.rollback(aTx).toVoidRc(db, info)
if db.kdbBase.api.isTop(kTx):
? db.kdbBase.api.rollback(kTx).toVoidRc(db, info)
ok()) ok())
proc baseMethods( proc baseMethods(
db: AristoCoreDbRef; db: AristoCoreDbRef;
A: typedesc; A: typedesc;
@ -165,7 +169,7 @@ proc baseMethods(
getIdFn: proc(): CoreDbRc[CoreDxTxID] = getIdFn: proc(): CoreDbRc[CoreDxTxID] =
CoreDxTxID.notImplemented(db, "getIdFn()"), CoreDxTxID.notImplemented(db, "getIdFn()"),
captureFn: proc(flags: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] = newCaptureFn: proc(flags: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
CoreDxCaptRef.notImplemented(db, "capture()")) CoreDxCaptRef.notImplemented(db, "capture()"))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -245,7 +249,7 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef =
AristoDbVoid.init(kvt.VoidBackendRef, aristo.VoidBackendRef) AristoDbVoid.init(kvt.VoidBackendRef, aristo.VoidBackendRef)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers for direct backend access # Public helpers, e.g. for direct backend access
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func toAristoProfData*( func toAristoProfData*(
@ -256,6 +260,18 @@ func toAristoProfData*(
result.aristo = db.AristoCoreDbRef.adbBase.api.AristoApiProfRef.data result.aristo = db.AristoCoreDbRef.adbBase.api.AristoApiProfRef.data
result.kvt = db.AristoCoreDbRef.kdbBase.api.KvtApiProfRef.data result.kvt = db.AristoCoreDbRef.kdbBase.api.KvtApiProfRef.data
func toAristoApi*(dsc: CoreDxKvtRef): KvtApiRef =
doAssert not dsc.parent.isNil
doAssert dsc.parent.isAristo
if dsc.parent.isAristo:
return AristoCoreDbRef(dsc.parent).kdbBase.api
func toAristoApi*(dsc: CoreDxMptRef): AristoApiRef =
doAssert not dsc.parent.isNil
doAssert dsc.parent.isAristo
if dsc.parent.isAristo:
return AristoCoreDbRef(dsc.parent).adbBase.api
func toAristo*(be: CoreDbKvtBackendRef): KvtDbRef = func toAristo*(be: CoreDbKvtBackendRef): KvtDbRef =
if be.parent.isAristo: if be.parent.isAristo:
return be.AristoCoreDbKvtBE.kdb return be.AristoCoreDbKvtBE.kdb
@ -272,16 +288,33 @@ func toAristo*(be: CoreDbAccBackendRef): AristoDbRef =
# Public aristo iterators # Public aristo iterators
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
iterator aristoKvtPairs*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} = include
let p = dsc.to(KvtDbRef).forkTop.valueOrApiError "aristoKvtPairs()" ./aristo_db/aristo_replicate
defer: discard p.forget()
# ------------------------
iterator aristoKvtPairsVoid*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
let
api = dsc.toAristoApi()
p = api.forkTop(dsc.to(KvtDbRef)).valueOrApiError "aristoKvtPairs()"
defer: discard api.forget(p)
for (k,v) in kvt.VoidBackendRef.walkPairs p:
yield (k,v)
iterator aristoKvtPairsMem*(dsc: CoreDxKvtRef): (Blob,Blob) {.rlpRaise.} =
let
api = dsc.toAristoApi()
p = api.forkTop(dsc.to(KvtDbRef)).valueOrApiError "aristoKvtPairs()"
defer: discard api.forget(p)
for (k,v) in kvt.MemBackendRef.walkPairs p: for (k,v) in kvt.MemBackendRef.walkPairs p:
yield (k,v) yield (k,v)
iterator aristoMptPairs*(dsc: CoreDxMptRef): (Blob,Blob) {.noRaise.} = iterator aristoMptPairs*(dsc: CoreDxMptRef): (Blob,Blob) {.noRaise.} =
let mpt = dsc.to(AristoDbRef) let
api = dsc.toAristoApi()
mpt = dsc.to(AristoDbRef)
for (k,v) in mpt.rightPairs LeafTie(root: dsc.rootID): for (k,v) in mpt.rightPairs LeafTie(root: dsc.rootID):
yield (k.path.pathAsBlob, mpt.serialise(v).valueOr(EmptyBlob)) yield (api.pathAsBlob(k.path), api.serialise(mpt, v).valueOr(EmptyBlob))
iterator aristoReplicateMem*(dsc: CoreDxMptRef): (Blob,Blob) {.rlpRaise.} = iterator aristoReplicateMem*(dsc: CoreDxMptRef): (Blob,Blob) {.rlpRaise.} =
## Instantiation for `MemBackendRef` ## Instantiation for `MemBackendRef`

View File

@ -27,9 +27,10 @@ iterator aristoReplicate[T](
## ##
let let
root = dsc.rootID root = dsc.rootID
mpt = dsc.to(AristoDbRef) mpt = dsc.to(AristoDbRef)
p = mpt.forkTop.valueOrApiError "aristoReplicate()" api = dsc.toAristoApi()
defer: discard p.forget() p = api.forkTop(mpt).valueOrApiError "aristoReplicate()"
defer: discard api.forget(p)
for (vid,key,vtx,node) in T.replicate(p): for (vid,key,vtx,node) in T.replicate(p):
if key.len == 32: if key.len == 32:
yield (@key, node.encode) yield (@key, node.encode)

View File

@ -35,7 +35,7 @@ type
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func isAristo*(be: CoreDbRef): bool = func isAristo*(be: CoreDbRef): bool =
be.dbType in {AristoDbMemory, AristoDbRocks} be.dbType in {AristoDbMemory, AristoDbRocks, AristoDbVoid}
func toStr*(n: VertexID): string = func toStr*(n: VertexID): string =
result = "$" result = "$"

View File

@ -471,7 +471,6 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
if rc.value: if rc.value:
# Trie has become empty # Trie has become empty
cMpt.root = VoidTrieID cMpt.root = VoidTrieID
ok() ok()
proc mptHasPath( proc mptHasPath(

View File

@ -87,13 +87,14 @@ proc `=destroy`(cKvt: var KvtChildDbObj) =
## Auto destructor ## Auto destructor
let let
base = cKvt.base base = cKvt.base
api = base.api
kvt = cKvt.kvt kvt = cKvt.kvt
if not kvt.isNil: if not kvt.isNil:
block body: block body:
# Do some heuristics to avoid duplicates: # Do some heuristics to avoid duplicates:
block addToBatchQueue: block addToBatchQueue:
if kvt != base.kdb: # not base descriptor? if kvt != base.kdb: # not base descriptor?
if base.api.level(kvt) == 0: # no transaction pending? if api.level(kvt) == 0: # no transaction pending?
break addToBatchQueue # add to destructor queue break addToBatchQueue # add to destructor queue
else: else:
break body # ignore `kvt` break body # ignore `kvt`
@ -132,7 +133,6 @@ proc persistent(
api = base.api api = base.api
db = base.parent db = base.parent
rc = api.stow(kvt) rc = api.stow(kvt)
# Note that `gc()` may call `persistent()` so there is no `base.gc()` here # Note that `gc()` may call `persistent()` so there is no `base.gc()` here
if rc.isOk: if rc.isOk:
ok() ok()
@ -155,7 +155,8 @@ proc forget(
if kvt != base.kdb: if kvt != base.kdb:
let let
db = base.parent db = base.parent
rc = base.api.forget(kvt) api = base.api
rc = api.forget(kvt)
if rc.isErr: if rc.isErr:
result = err(rc.error.toError(db, info)) result = err(rc.error.toError(db, info))
@ -206,7 +207,7 @@ proc kvtMethods(cKvt: KvtChildDbRef): CoreDbKvtFns =
): CoreDbRc[void] = ): CoreDbRc[void] =
let let
base = cKvt.base base = cKvt.base
rc = base.api.put(cKvt.kvt, k,v) rc = base.api.put(cKvt.kvt, k, v)
if rc.isErr: if rc.isErr:
return err(rc.error.toError(base.parent, info)) return err(rc.error.toError(base.parent, info))
ok() ok()
@ -348,12 +349,13 @@ proc newKvtHandler*(
let let
db = base.parent db = base.parent
api = base.api
(mode, kvt) = case saveMode: (mode, kvt) = case saveMode:
of TopShot: of TopShot:
(saveMode, ? base.kdb.forkTop.toRc(db, info)) (saveMode, ? api.forkTop(base.kdb).toRc(db, info))
of Companion: of Companion:
(saveMode, ? base.kdb.fork.toRc(db, info)) (saveMode, ? api.fork(base.kdb).toRc(db, info))
of Shared, AutoSave: of Shared, AutoSave:
if base.kdb.backend.isNil: if base.kdb.backend.isNil:
(Shared, base.kdb) (Shared, base.kdb)

View File

@ -11,6 +11,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/tables,
eth/[common, rlp, trie/db, trie/hexary], eth/[common, rlp, trie/db, trie/hexary],
stew/byteutils, stew/byteutils,
results, results,
@ -27,6 +28,7 @@ type
kvt: CoreDxKvtRef ## Cache, no need to rebuild methods descriptor kvt: CoreDxKvtRef ## Cache, no need to rebuild methods descriptor
tdb: TrieDatabaseRef ## Descriptor reference copy captured with closures tdb: TrieDatabaseRef ## Descriptor reference copy captured with closures
top: LegacyCoreDxTxRef ## Top transaction (if any) top: LegacyCoreDxTxRef ## Top transaction (if any)
level: int ## Debugging
LegacyDbClose* = proc() {.gcsafe, raises: [].} LegacyDbClose* = proc() {.gcsafe, raises: [].}
## Custom destructor ## Custom destructor
@ -46,7 +48,7 @@ type
RecorderRef = ref object of RootRef RecorderRef = ref object of RootRef
flags: set[CoreDbCaptFlags] flags: set[CoreDbCaptFlags]
parent: TrieDatabaseRef parent: TrieDatabaseRef
logger: LegacyDbRef logger: TableRef[Blob,Blob]
appDb: LegacyDbRef appDb: LegacyDbRef
LegacyCoreDbTrie* = ref object of CoreDbTrieRef LegacyCoreDbTrie* = ref object of CoreDbTrieRef
@ -184,40 +186,50 @@ proc toAccount(
proc get(db: RecorderRef, key: openArray[byte]): Blob = proc get(db: RecorderRef, key: openArray[byte]): Blob =
## Mixin for `trieDB()` ## Mixin for `trieDB()`
result = db.logger.tdb.get(key) result = db.logger.getOrDefault @key
if result.len == 0: if result.len == 0:
result = db.parent.get(key) result = db.parent.get(key)
if result.len != 0: if result.len != 0:
db.logger.tdb.put(key, result) db.logger[@key] = result
proc put(db: RecorderRef, key, value: openArray[byte]) = proc put(db: RecorderRef, key, value: openArray[byte]) =
## Mixin for `trieDB()` ## Mixin for `trieDB()`
db.logger.tdb.put(key, value) db.logger[@key] = @value
if PersistPut in db.flags: if PersistPut in db.flags:
db.parent.put(key, value) db.parent.put(key, value)
proc contains(db: RecorderRef, key: openArray[byte]): bool = proc contains(db: RecorderRef, key: openArray[byte]): bool =
## Mixin for `trieDB()` ## Mixin for `trieDB()`
result = db.parent.contains(key) if db.logger.hasKey @key:
doAssert(db.logger.tdb.contains(key) == result) return true
if db.parent.contains key:
return true
proc del(db: RecorderRef, key: openArray[byte]) = proc del(db: RecorderRef, key: openArray[byte]) =
## Mixin for `trieDB()` ## Mixin for `trieDB()`
db.logger.tdb.del(key) db.logger.del @key
if PersistDel in db.flags: if PersistDel in db.flags:
db.parent.del(key) db.parent.del key
proc newRecorderRef( proc newRecorderRef(
tdb: TrieDatabaseRef; db: LegacyDbRef;
dbType: CoreDbType,
flags: set[CoreDbCaptFlags]; flags: set[CoreDbCaptFlags];
): RecorderRef = ): RecorderRef =
## Capture constuctor, uses `mixin` values from above ## Capture constuctor, uses `mixin` values from above
result = RecorderRef( result = RecorderRef(
flags: flags, flags: flags,
parent: tdb, parent: db.tdb,
logger: LegacyDbRef().init(LegacyDbMemory, newMemoryDB()).LegacyDbRef) logger: newTable[Blob,Blob]())
result.appDb = LegacyDbRef().init(dbType, trieDB result).LegacyDbRef let newDb = LegacyDbRef(
level: db.level+1,
trackLegaApi: db.trackLegaApi,
trackNewApi: db.trackNewApi,
trackLedgerApi: db.trackLedgerApi,
localDbOnly: db.localDbOnly,
profTab: db.profTab,
ledgerHook: db.ledgerHook)
# Note: the **mixin** magic happens in `trieDB()`
result.appDb = newDb.init(db.dbType, trieDB result).LegacyDbRef
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private database method function tables # Private database method function tables
@ -399,16 +411,19 @@ proc tidMethods(tid: TransactionID; tdb: TrieDatabaseRef): CoreDbTxIdFns =
tdb.shortTimeReadOnly(tid, action()) tdb.shortTimeReadOnly(tid, action())
ok()) ok())
proc cptMethods(cpt: RecorderRef): CoreDbCaptFns = proc cptMethods(cpt: RecorderRef; db: LegacyDbRef): CoreDbCaptFns =
CoreDbCaptFns( CoreDbCaptFns(
recorderFn: proc(): CoreDbRc[CoreDbRef] = recorderFn: proc(): CoreDbRef =
ok(cpt.appDb), cpt.appDb,
logDbFn: proc(): CoreDbRc[CoreDbRef] = logDbFn: proc(): TableRef[Blob,Blob] =
ok(cpt.logger), cpt.logger,
getFlagsFn: proc(): set[CoreDbCaptFlags] = getFlagsFn: proc(): set[CoreDbCaptFlags] =
cpt.flags) cpt.flags,
forgetFn: proc(): CoreDbRc[void] =
err(db.bless(NotImplemented, LegacyCoreDbError(ctx: "disposeFn()"))))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private base methods (including constructors) # Private base methods (including constructors)
@ -513,8 +528,8 @@ proc baseMethods(
db.top.methods = db.top.txMethods() db.top.methods = db.top.txMethods()
ok(db.bless db.top), ok(db.bless db.top),
captureFn: proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] = newCaptureFn: proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
let fns = newRecorderRef(tdb, dbType, flgs).cptMethods let fns = db.newRecorderRef(flgs).cptMethods(db)
ok(db.bless CoreDxCaptRef(methods: fns))) ok(db.bless CoreDxCaptRef(methods: fns)))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -990,28 +990,65 @@ proc newCapture*(
db: CoreDbRef; db: CoreDbRef;
flags: set[CoreDbCaptFlags] = {}; flags: set[CoreDbCaptFlags] = {};
): CoreDbRc[CoreDxCaptRef] = ): CoreDbRc[CoreDxCaptRef] =
## Constructor ## Trace constructor providing an overlay on top of the argument database
db.setTrackNewApi BaseCaptureFn ## `db`. This overlay provides a replacement database handle that can be
result = db.methods.captureFn flags ## retrieved via `db.recorder()` (which can in turn be ovelayed.) While
## running the overlay stores data in a log-table which can be retrieved
## via `db.logDb()`.
##
## Caveat:
## The original database argument `db` should not be used while the tracer
## is active (i.e. exists as overlay). The behaviour for this situation
## is undefined and depends on the backend implementation of the tracer.
##
db.setTrackNewApi BaseNewCaptureFn
result = db.methods.newCaptureFn flags
db.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result db.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
proc recorder*(cp: CoreDxCaptRef): CoreDbRc[CoreDbRef] = proc recorder*(cpt: CoreDxCaptRef): CoreDbRef =
## Getter ## Getter, returns a tracer replacement handle to be used as new database.
cp.setTrackNewApi CptRecorderFn ## It records every action like fetch, store, hasKey, hasPath and delete.
result = cp.methods.recorderFn() ## This descriptor can be superseded by a new overlay tracer (using
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result ## `newCapture()`, again.)
##
## Caveat:
## Unless the desriptor `cpt` referes to the top level overlay tracer, the
## result is undefined and depends on the backend implementation of the
## tracer.
##
cpt.setTrackNewApi CptRecorderFn
result = cpt.methods.recorderFn()
cpt.ifTrackNewApi: debug newApiTxt, ctx, elapsed
proc logDb*(cp: CoreDxCaptRef): CoreDbRc[CoreDbRef] = proc logDb*(cp: CoreDxCaptRef): TableRef[Blob,Blob] =
## Getter, returns the logger table for the overlay tracer database.
##
## Caveat:
## Unless the desriptor `cpt` referes to the top level overlay tracer, the
## result is undefined and depends on the backend implementation of the
## tracer.
##
cp.setTrackNewApi CptLogDbFn cp.setTrackNewApi CptLogDbFn
result = cp.methods.logDbFn() result = cp.methods.logDbFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed
proc flags*(cp: CoreDxCaptRef): set[CoreDbCaptFlags] = proc flags*(cp: CoreDxCaptRef):set[CoreDbCaptFlags] =
## Getter ## Getter
cp.setTrackNewApi CptFlagsFn cp.setTrackNewApi CptFlagsFn
result = cp.methods.getFlagsFn() result = cp.methods.getFlagsFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
proc forget*(cp: CoreDxCaptRef): CoreDbRc[void] =
## Explicitely stop recording the current tracer instance. If this call was
## successful, the the database argument `db` used when starting the trace
## with `newCapture()` will be fully operational, again. This will also
## implicitely take place when the`NIM` garbage collector recycles an
## abondoned capture descriptor.
##
cp.setTrackNewApi CptForgetFn
result = cp.methods.forgetFn()
cp.ifTrackNewApi: debug newApiTxt, ctx, elapsed, result
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public methods, legacy API # Public methods, legacy API
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -1241,12 +1278,12 @@ when ProvideLegacyAPI:
proc recorder*(cp: CoreDbCaptRef): CoreDbRef = proc recorder*(cp: CoreDbCaptRef): CoreDbRef =
cp.setTrackLegaApi LegaCptRecorderFn cp.setTrackLegaApi LegaCptRecorderFn
result = cp.distinctBase.recorder().expect $ctx result = cp.distinctBase.recorder()
cp.ifTrackLegaApi: debug legaApiTxt, ctx, elapsed cp.ifTrackLegaApi: debug legaApiTxt, ctx, elapsed
proc logDb*(cp: CoreDbCaptRef): CoreDbRef = proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] =
cp.setTrackLegaApi LegaCptLogDbFn cp.setTrackLegaApi LegaCptLogDbFn
result = cp.distinctBase.logDb().expect $ctx result = cp.distinctBase.logDb()
cp.ifTrackLegaApi: debug legaApiTxt, ctx, elapsed cp.ifTrackLegaApi: debug legaApiTxt, ctx, elapsed
proc flags*(cp: CoreDbCaptRef): set[CoreDbCaptFlags] = proc flags*(cp: CoreDbCaptRef): set[CoreDbCaptFlags] =

View File

@ -40,13 +40,13 @@ type
AnyBackendFn = "any/backend" AnyBackendFn = "any/backend"
AnyIsPruningFn = "any/isPruning" AnyIsPruningFn = "any/isPruning"
BaseCaptureFn = "newCapture"
BaseDbTypeFn = "dbType" BaseDbTypeFn = "dbType"
BaseFinishFn = "finish" BaseFinishFn = "finish"
BaseGetTrieFn = "getTrie" BaseGetTrieFn = "getTrie"
BaseLegacySetupFn = "compensateLegacySetup" BaseLegacySetupFn = "compensateLegacySetup"
BaseLevelFn = "level" BaseLevelFn = "level"
BaseNewAccFn = "newAccMpt" BaseNewAccFn = "newAccMpt"
BaseNewCaptureFn = "newCapture"
BaseNewKvtFn = "newKvt" BaseNewKvtFn = "newKvt"
BaseNewMptFn = "newMpt" BaseNewMptFn = "newMpt"
BaseNewTxFn = "newTransaction" BaseNewTxFn = "newTransaction"
@ -54,6 +54,7 @@ type
CptFlagsFn = "cpt/flags" CptFlagsFn = "cpt/flags"
CptLogDbFn = "cpt/logDb" CptLogDbFn = "cpt/logDb"
CptRecorderFn = "cpt/recorder" CptRecorderFn = "cpt/recorder"
CptForgetFn = "cpt/forget"
ErrorPrintFn = "$$" ErrorPrintFn = "$$"
EthAccRecastFn = "recast" EthAccRecastFn = "recast"
@ -197,6 +198,9 @@ proc toStr*(rc: CoreDbRc[Hash256]): string =
proc toStr*(rc: CoreDbRc[CoreDbTrieRef]): string = proc toStr*(rc: CoreDbRc[CoreDbTrieRef]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")" if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[set[CoreDbCaptFlags]]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[Account]): string = proc toStr*(rc: CoreDbRc[Account]): string =
if rc.isOk: "ok(Account)" else: "err(" & rc.error.toStr & ")" if rc.isOk: "ok(Account)" else: "err(" & rc.error.toStr & ")"
@ -205,6 +209,7 @@ proc toStr[T](rc: CoreDbRc[T]; ifOk: static[string]): string =
proc toStr*(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db" proc toStr*(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db"
proc toStr*(rc: CoreDbRc[CoreDbAccount]): string = rc.toStr "acc" proc toStr*(rc: CoreDbRc[CoreDbAccount]): string = rc.toStr "acc"
proc toStr*(rc: CoreDbRc[CoreDxKvtRef]): string = rc.toStr "kvt"
proc toStr*(rc: CoreDbRc[CoreDxTxID]): string = rc.toStr "txId" proc toStr*(rc: CoreDbRc[CoreDxTxID]): string = rc.toStr "txId"
proc toStr*(rc: CoreDbRc[CoreDxTxRef]): string = rc.toStr "tx" proc toStr*(rc: CoreDbRc[CoreDxTxRef]): string = rc.toStr "tx"
proc toStr*(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "capt" proc toStr*(rc: CoreDbRc[CoreDxCaptRef]): string = rc.toStr "capt"

View File

@ -11,6 +11,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/tables,
eth/common, eth/common,
results, results,
../../aristo/aristo_profile ../../aristo/aristo_profile
@ -65,6 +66,7 @@ type
HashNotAvailable HashNotAvailable
TrieLocked TrieLocked
StorageFailed StorageFailed
NotImplemented
CoreDbSubTrie* = enum CoreDbSubTrie* = enum
StorageTrie = 0 StorageTrie = 0
@ -110,8 +112,9 @@ type
): CoreDbRc[CoreDxAccRef] {.noRaise.} ): CoreDbRc[CoreDxAccRef] {.noRaise.}
CoreDbBaseTxGetIdFn* = proc(): CoreDbRc[CoreDxTxID] {.noRaise.} CoreDbBaseTxGetIdFn* = proc(): CoreDbRc[CoreDxTxID] {.noRaise.}
CoreDbBaseTxBeginFn* = proc(): CoreDbRc[CoreDxTxRef] {.noRaise.} CoreDbBaseTxBeginFn* = proc(): CoreDbRc[CoreDxTxRef] {.noRaise.}
CoreDbBaseCaptFn* = CoreDbBaseNewCaptFn* =
proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] {.noRaise.} proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] {.noRaise.}
CoreDbBaseGetCaptFn* = proc(): CoreDbRc[CoreDxCaptRef] {.noRaise.}
CoreDbBaseFns* = object CoreDbBaseFns* = object
verifyFn*: CoreDbBaseVerifyFn verifyFn*: CoreDbBaseVerifyFn
@ -137,7 +140,7 @@ type
beginFn*: CoreDbBaseTxBeginFn beginFn*: CoreDbBaseTxBeginFn
# capture/tracer constructors # capture/tracer constructors
captureFn*: CoreDbBaseCaptFn newCaptureFn*: CoreDbBaseNewCaptFn
# -------------------------------------------------- # --------------------------------------------------
@ -254,14 +257,16 @@ type
# -------------------------------------------------- # --------------------------------------------------
# Sub-descriptor: capture recorder methods # Sub-descriptor: capture recorder methods
# -------------------------------------------------- # --------------------------------------------------
CoreDbCaptRecorderFn* = proc(): CoreDbRc[CoreDbRef] {.noRaise.} CoreDbCaptRecorderFn* = proc(): CoreDbRef {.noRaise.}
CoreDbCaptLogDbFn* = proc(): CoreDbRc[CoreDbRef] {.noRaise.} CoreDbCaptLogDbFn* = proc(): TableRef[Blob,Blob] {.noRaise.}
CoreDbCaptFlagsFn* = proc(): set[CoreDbCaptFlags] {.noRaise.} CoreDbCaptFlagsFn* = proc(): set[CoreDbCaptFlags] {.noRaise.}
CoreDbCaptForgetFn* = proc(): CoreDbRc[void] {.noRaise.}
CoreDbCaptFns* = object CoreDbCaptFns* = object
recorderFn*: CoreDbCaptRecorderFn recorderFn*: CoreDbCaptRecorderFn
logDbFn*: CoreDbCaptLogDbFn logDbFn*: CoreDbCaptLogDbFn
getFlagsFn*: CoreDbCaptFlagsFn getFlagsFn*: CoreDbCaptFlagsFn
forgetFn*: CoreDbCaptForgetFn
# -------------------------------------------------- # --------------------------------------------------
# Production descriptors # Production descriptors

View File

@ -45,7 +45,7 @@ proc validateMethodsDesc(base: CoreDbBaseFns) =
doAssert not base.newAccFn.isNil doAssert not base.newAccFn.isNil
doAssert not base.getIdFn.isNil doAssert not base.getIdFn.isNil
doAssert not base.beginFn.isNil doAssert not base.beginFn.isNil
doAssert not base.captureFn.isNil doAssert not base.newCaptureFn.isNil
proc validateMethodsDesc(kvt: CoreDbKvtFns) = proc validateMethodsDesc(kvt: CoreDbKvtFns) =
doAssert not kvt.backendFn.isNil doAssert not kvt.backendFn.isNil
@ -121,6 +121,7 @@ proc validateMethodsDesc(cpt: CoreDxCaptRef) =
doAssert not cpt.parent.isNil doAssert not cpt.parent.isNil
doAssert not cpt.methods.recorderFn.isNil doAssert not cpt.methods.recorderFn.isNil
doAssert not cpt.methods.getFlagsFn.isNil doAssert not cpt.methods.getFlagsFn.isNil
doAssert not cpt.methods.forgetFn.isNil
proc validateMethodsDesc(tx: CoreDxTxRef) = proc validateMethodsDesc(tx: CoreDxTxRef) =
doAssert not tx.isNil doAssert not tx.isNil

View File

@ -45,7 +45,10 @@ iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
for k,v in kvt.legaKvtPairs(): for k,v in kvt.legaKvtPairs():
yield (k,v) yield (k,v)
of AristoDbMemory: of AristoDbMemory:
for k,v in kvt.aristoKvtPairs(): for k,v in kvt.aristoKvtPairsMem():
yield (k,v)
of AristoDbVoid:
for k,v in kvt.aristoKvtPairsVoid():
yield (k,v) yield (k,v)
else: else:
raiseAssert: "Unsupported database type: " & $kvt.parent.dbType raiseAssert: "Unsupported database type: " & $kvt.parent.dbType

View File

@ -17,7 +17,7 @@ import
./backend/[aristo_db, legacy_db] ./backend/[aristo_db, legacy_db]
import import
#./core_apps_legacy as core_apps #./core_apps_legacy as core_apps -- avoid
./core_apps_newapi as core_apps ./core_apps_newapi as core_apps
import import
./base except bless ./base except bless

View File

@ -18,6 +18,10 @@ import
../aristo/aristo_profile, ../aristo/aristo_profile,
"."/[kvt_desc, kvt_desc/desc_backend, kvt_init, kvt_tx, kvt_utils] "."/[kvt_desc, kvt_desc/desc_backend, kvt_init, kvt_tx, kvt_utils]
const
AutoValidateApiHooks = defined(release).not
## No validatinon needed for production suite.
# Annotation helper(s) # Annotation helper(s)
{.pragma: noRaise, gcsafe, raises: [].} {.pragma: noRaise, gcsafe, raises: [].}
@ -101,35 +105,85 @@ type
data*: KvtDbProfListRef data*: KvtDbProfListRef
be*: BackendRef be*: BackendRef
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
when AutoValidateApiHooks:
proc validate(api: KvtApiObj|KvtApiRef) =
doAssert not api.commit.isNil
doAssert not api.del.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.fork.isNil
doAssert not api.forkTop.isNil
doAssert not api.get.isNil
doAssert not api.hasKey.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.put.isNil
doAssert not api.rollback.isNil
doAssert not api.stow.isNil
doAssert not api.txBegin.isNil
doAssert not api.txTop.isNil
proc validate(prf: KvtApiProfRef; be: BackendRef) =
prf.KvtApiRef.validate
doAssert not prf.data.isNil
if not be.isNil:
doAssert not prf.be.isNil
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public API constuctors # Public API constuctors
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func init*(api: var KvtApiObj) = func init*(api: var KvtApiObj) =
api.commit = commit when AutoValidateApiHooks:
api.del = del api.reset
api.finish = finish api.commit = commit
api.forget = forget api.del = del
api.fork = fork api.finish = finish
api.forkTop = forkTop api.forget = forget
api.get = get api.fork = fork
api.hasKey = hasKey api.forkTop = forkTop
api.isTop = isTop api.get = get
api.level = level api.hasKey = hasKey
api.nForked = nForked api.isTop = isTop
api.put = put api.level = level
api.rollback = rollback api.nForked = nForked
api.stow = stow api.put = put
api.txBegin = txBegin api.rollback = rollback
api.txTop = txTop api.stow = stow
api.txBegin = txBegin
api.txTop = txTop
when AutoValidateApiHooks:
api.validate
func init*(T: type KvtApiRef): T = func init*(T: type KvtApiRef): T =
result = new T result = new T
result[].init() result[].init()
func dup*(api: KvtApiRef): KvtApiRef = func dup*(api: KvtApiRef): KvtApiRef =
new result result = KvtApiRef(
result[] = api[] commit: api.commit,
del: api.del,
finish: api.finish,
forget: api.forget,
fork: api.fork,
forkTop: api.forkTop,
get: api.get,
hasKey: api.hasKey,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
put: api.put,
rollback: api.rollback,
stow: api.stow,
txBegin: api.txBegin,
txTop: api.txTop)
when AutoValidateApiHooks:
api.validate
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public profile API constuctor # Public profile API constuctor
@ -251,6 +305,9 @@ func init*(
KvtApiProfBePutEndFn.profileRunner: KvtApiProfBePutEndFn.profileRunner:
result = be.putEndFn(a) result = be.putEndFn(a)
when AutoValidateApiHooks:
profApi.validate be
profApi profApi
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2023 Status Research & Development GmbH # Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -8,6 +8,13 @@
# at your option. This file may not be copied, modified, or distributed except # at your option. This file may not be copied, modified, or distributed except
# according to those terms. # according to those terms.
## Populates the tracer API methods
## ================================
##
## The module name `legacy_tracer` is probably a misonmer as it also works
## with the new APIs for `CoreDb` and `Ledger`.
##
import import
std/[json, sets, strutils, hashes], std/[json, sets, strutils, hashes],
eth/common/eth_types, eth/common/eth_types,

View File

@ -86,6 +86,12 @@ proc dumpMemoryDB*(node: JsonNode, db: CoreDbRef) =
n[k.toHex(false)] = %v n[k.toHex(false)] = %v
node["state"] = n node["state"] = n
proc dumpMemoryDB*(node: JsonNode, kvt: TableRef[Blob,Blob]) =
var n = newJObject()
for k, v in kvt:
n[k.toHex(false)] = %v
node["state"] = n
proc dumpMemoryDB*(node: JsonNode, capture: CoreDbCaptRef) = proc dumpMemoryDB*(node: JsonNode, capture: CoreDbCaptRef) =
node.dumpMemoryDB capture.logDb node.dumpMemoryDB capture.logDb
@ -149,7 +155,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
break break
# internal transactions: # internal transactions:
var stateBefore = AccountsCache.init(capture.recorder, beforeRoot, com.pruneTrie) var stateBefore = AccountsLedgerRef.init(capture.recorder, beforeRoot, com.pruneTrie)
for idx, acc in tracedAccountsPairs(tracerInst): for idx, acc in tracedAccountsPairs(tracerInst):
before.captureAccount(stateBefore, acc, internalTxName & $idx) before.captureAccount(stateBefore, acc, internalTxName & $idx)
@ -180,7 +186,7 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS
var var
before = newJArray() before = newJArray()
after = newJArray() after = newJArray()
stateBefore = AccountsCache.init(capture.recorder, parent.stateRoot, com.pruneTrie) stateBefore = AccountsLedgerRef.init(capture.recorder, parent.stateRoot, com.pruneTrie)
for idx, tx in body.transactions: for idx, tx in body.transactions:
let sender = tx.getSender let sender = tx.getSender

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH # Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -9,8 +9,9 @@
# according to those terms. # according to those terms.
import import
std/[os, sequtils, strformat, strutils, tables], std/[os, strformat, strutils, tables],
chronicles, chronicles,
stew/byteutils,
../nimbus/db/ledger, ../nimbus/db/ledger,
../nimbus/common/common, ../nimbus/common/common,
../nimbus/core/chain, ../nimbus/core/chain,
@ -62,14 +63,14 @@ proc findFilePath(file: string): string =
return path return path
proc pp*(a: EthAddress): string = proc pp*(a: EthAddress): string =
a.mapIt(it.toHex(2)).join[32 .. 39].toLowerAscii a.toHex[32 .. 39].toLowerAscii
proc pp*(tx: Transaction): string = proc pp*(tx: Transaction): string =
# "(" & tx.ecRecover.value.pp & "," & $tx.nonce & ")" # "(" & tx.ecRecover.value.pp & "," & $tx.nonce & ")"
"(" & tx.getSender.pp & "," & $tx.nonce & ")" "(" & tx.getSender.pp & "," & $tx.nonce & ")"
proc pp*(h: KeccakHash): string = proc pp*(h: KeccakHash): string =
h.data.mapIt(it.toHex(2)).join[52 .. 63].toLowerAscii h.data.toHex[52 .. 63].toLowerAscii
proc pp*(tx: Transaction; vmState: BaseVMState): string = proc pp*(tx: Transaction; vmState: BaseVMState): string =
let address = tx.getSender let address = tx.getSender

View File

@ -24,7 +24,7 @@ type
when CoreDbEnableApiProfiling: when CoreDbEnableApiProfiling:
import import
std/[algorithm, sequtils, strutils], std/sequtils,
../../nimbus/db/aristo/[aristo_api, aristo_profile], ../../nimbus/db/aristo/[aristo_api, aristo_profile],
../../nimbus/db/kvt/kvt_api ../../nimbus/db/kvt/kvt_api
var var
@ -35,7 +35,7 @@ when CoreDbEnableApiProfiling:
when LedgerEnableApiProfiling: when LedgerEnableApiProfiling:
when not CoreDbEnableApiProfiling: when not CoreDbEnableApiProfiling:
import import
std/[algorithm, sequtils, strutils] std/sequtils
var var
ldgProfData: LedgerProfListRef ldgProfData: LedgerProfListRef
@ -112,32 +112,6 @@ template stopLoggingAfter(noisy: bool; code: untyped) =
defer: noisy.stopLogging() defer: noisy.stopLogging()
code code
# --------------
when CoreDbEnableApiProfiling or
LedgerEnableApiProfiling:
proc profilingPrinter(
data: AristoDbProfListRef;
names: openArray[string];
header: string;
indent = 4;
): string =
if not data.isNil:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = header & ":"
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,fns) in data.byElapsed:
result &= pfx2 & ela.pp & ": " & fns.mapIt(
names[it] & data.stats(it).pp(true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,fns) in data.byVisits:
result &= pfx2 & $count & ": " & fns.mapIt(
names[it] & data.stats(it).pp).sorted.join(", ")
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public test function # Public test function
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -9,16 +9,37 @@
# distributed except according to those terms. # distributed except according to those terms.
import import
std/[os, sequtils, times], std/[algorithm, os, sequtils],
eth/common, eth/common,
results, results,
../../nimbus/utils/prettify, ../../nimbus/utils/prettify,
../../nimbus/db/aristo/aristo_profile,
../replay/pp ../replay/pp
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func pp(
w: AristoDbProfStats,
spaced = false;
count = true;
): string =
result = "("
if w.count < 2:
result &= w.mean.pp
else:
let space = if spaced: " " else: ""
if count:
result &= $w.count
else:
result &= w.total.pp
result &= "," & space & w.mean.pp
if w.devRatio != 0.0: # when all items are the same
let dr = if 0.2 < w.devRatio: w.devRatio.toPC(0) else: w.devRatio.toPC(1)
result &= space & "±" & space & dr
result &= ")"
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public pretty printing # Public pretty printing
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -35,21 +56,6 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
proc toPfx*(indent: int): string = proc toPfx*(indent: int): string =
"\n" & " ".repeat(indent) "\n" & " ".repeat(indent)
func pp*(
w: tuple[n: int, mean: Duration, stdDev: Duration, devRatio: float];
spaced = false;
): string =
result = "("
if w.n < 2:
result &= w.mean.pp
else:
let space = if spaced: " " else: ""
result &= $w.n & "," & space & w.mean.pp
if w.devRatio != 0.0: # when all items are the same
let dr = if 0.2 < w.devRatio: w.devRatio.toPC(0) else: w.devRatio.toPC(1)
result &= space & "±" & space & dr
result &= ")"
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers # Public helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -72,6 +78,29 @@ proc findFilePathHelper*(
echo "*** File not found \"", file, "\"." echo "*** File not found \"", file, "\"."
err() err()
proc profilingPrinter*(
data: AristoDbProfListRef;
names: openArray[string];
header: string;
indent = 4;
): string =
if not data.isNil:
let
pfx = indent.toPfx
pfx2 = pfx & " "
result = header & ":"
result &= "\n" & pfx & "by accumulated duration per procedure"
for (ela,fns) in data.byElapsed:
result &= pfx2 & ela.pp & ": " & fns.mapIt(
names[it] & data.stats(it).pp(spaced=true)).sorted.join(", ")
result &= "\n" & pfx & "by number of visits"
for (count,fns) in data.byVisits:
result &= pfx2 & $count & ": " & fns.mapIt(
names[it] & data.stats(it).pp(count=false)).sorted.join(", ")
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------