mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-12 20:16:27 +00:00
Core db+aristo provides tracer funtionality (#2089)
* Aristo: Provide descriptor fork based on search in transaction stack details: Try to find the tx that has a particular pair `(vertex-id,hash-key)`, and by extension try filter and backend if the former fails. * Cleanup & docu * CoreDb+Aristo: Implement context re-position to earlier in-memory state why: It is a easy way to explore how there can be concurrent access to the same backend storage DB with different view states. This one can access an earlier state from the transaction stack. * CoreDb+Aristo: Populate tracer stubs with real functionality * Update `tracer.nim` to new API why: Legacy API does not sufficiently support `Aristo` * Fix logging problems in tracer details: Debug logging turned off by default * Fix function prototypes * Add Copyright header * Add tables import why: For older compiler versions on CI
This commit is contained in:
parent
30277be1f3
commit
8ed40c78e0
@ -76,8 +76,12 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
|
||||
toBlock = headers[^1].blockNumber
|
||||
|
||||
for i in 0 ..< headers.len:
|
||||
let
|
||||
(header, body) = (headers[i], bodies[i])
|
||||
let (header, body) = (headers[i], bodies[i])
|
||||
|
||||
# This transaction keeps the current state open for inspection
|
||||
# if an error occurs (as needed for `Aristo`.).
|
||||
let lapTx = c.db.beginTransaction()
|
||||
defer: lapTx.dispose()
|
||||
|
||||
c.com.hardForkTransition(header)
|
||||
|
||||
@ -111,11 +115,8 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
|
||||
when not defined(release):
|
||||
if validationResult == ValidationResult.Error and
|
||||
body.transactions.calcTxRoot == header.txRoot:
|
||||
if c.com.ledgerType == LegacyAccountsCache:
|
||||
dumpDebuggingMetaData(c.com, header, body, vmState)
|
||||
warn "Validation error. Debugging metadata dumped."
|
||||
else:
|
||||
warn "Validation error", blockNumber=header.blockNumber
|
||||
vmState.dumpDebuggingMetaData(header, body)
|
||||
warn "Validation error. Debugging metadata dumped."
|
||||
|
||||
if validationResult != ValidationResult.OK:
|
||||
return validationResult
|
||||
@ -168,6 +169,9 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
|
||||
# between eth_blockNumber and eth_syncing
|
||||
c.com.syncCurrent = header.blockNumber
|
||||
|
||||
# Done with this bllock
|
||||
lapTx.commit()
|
||||
|
||||
dbTx.commit()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -131,6 +131,23 @@ type
|
||||
##
|
||||
## Use `aristo_desc.forget()` to clean up this descriptor.
|
||||
|
||||
AristoApiForkWithFn* =
|
||||
proc(db: AristoDbRef;
|
||||
vid: VertexID;
|
||||
key: HashKey;
|
||||
dontHashify = false;
|
||||
): Result[AristoDbRef,AristoError]
|
||||
{.noRaise.}
|
||||
## Find the transaction where the vertex with ID `vid` exists and has
|
||||
## the Merkle hash key `key`. If there is no transaction available,
|
||||
## search in the filter and then in the backend.
|
||||
##
|
||||
## If the above procedure succeeds, a new descriptor is forked with
|
||||
## exactly one transaction which contains the all the bottom layers up
|
||||
## until the layer where the `(vid,key)` pair is found. In case the
|
||||
## pair was found on the filter or the backend, this transaction is
|
||||
## empty.
|
||||
|
||||
AristoApiGetKeyFn* =
|
||||
proc(db: AristoDbRef;
|
||||
vid: VertexID;
|
||||
@ -203,8 +220,8 @@ type
|
||||
accPath: PathID;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## Veriant of `mergePayload()` where the `data` argument will be
|
||||
## converted to a `RawBlob` type `PayloadRef` value.
|
||||
## Veriant of `mergePayload()` where the `data` argument will be
|
||||
## converted to a `RawBlob` type `PayloadRef` value.
|
||||
|
||||
AristoApiMergePayloadFn* =
|
||||
proc(db: AristoDbRef;
|
||||
@ -214,27 +231,43 @@ type
|
||||
accPath = VOID_PATH_ID;
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## Merge the argument key-value-pair `(path,payload)` into the top level
|
||||
## vertex table of the database `db`.
|
||||
##
|
||||
## For a `root` argument with `VertexID` greater than `LEAST_FREE_VID`,
|
||||
## the sub-tree generated by `payload.root` is considered a storage trie
|
||||
## linked to an account leaf referred to by a valid `accPath` (i.e.
|
||||
## different from `VOID_PATH_ID`.) In that case, an account must exists.
|
||||
## If there is payload of type `AccountData`, its `storageID` field must
|
||||
## be unset or equal to the `payload.root` vertex ID.
|
||||
## Merge the argument key-value-pair `(path,payload)` into the top level
|
||||
## vertex table of the database `db`.
|
||||
##
|
||||
## For a `root` argument with `VertexID` greater than `LEAST_FREE_VID`,
|
||||
## the sub-tree generated by `payload.root` is considered a storage trie
|
||||
## linked to an account leaf referred to by a valid `accPath` (i.e.
|
||||
## different from `VOID_PATH_ID`.) In that case, an account must exists.
|
||||
## If there is payload of type `AccountData`, its `storageID` field must
|
||||
## be unset or equal to the `payload.root` vertex ID.
|
||||
|
||||
AristoApiPathAsBlobFn* =
|
||||
proc(tag: PathID;
|
||||
): Blob
|
||||
{.noRaise.}
|
||||
## Converts the `tag` argument to a sequence of an even number of
|
||||
## nibbles represented by a `Blob`. If the argument `tag` represents
|
||||
## an odd number of nibbles, a zero nibble is appendend.
|
||||
##
|
||||
## This function is useful only if there is a tacit agreement that all
|
||||
## paths used to index database leaf values can be represented as
|
||||
## `Blob`, i.e. `PathID` type paths with an even number of nibbles.
|
||||
## Converts the `tag` argument to a sequence of an even number of
|
||||
## nibbles represented by a `Blob`. If the argument `tag` represents
|
||||
## an odd number of nibbles, a zero nibble is appendend.
|
||||
##
|
||||
## This function is useful only if there is a tacit agreement that all
|
||||
## paths used to index database leaf values can be represented as
|
||||
## `Blob`, i.e. `PathID` type paths with an even number of nibbles.
|
||||
|
||||
AristoApiReCentreFn* =
|
||||
proc(db: AristoDbRef;
|
||||
) {.noRaise.}
|
||||
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
||||
## Nothing is done if the `db` descriptor is the centre, already.
|
||||
##
|
||||
## With several descriptors accessing the same backend database there is
|
||||
## a single one that has write permission for the backend (regardless
|
||||
## whether there is a backend, at all.) The descriptor entity with write
|
||||
## permission is called *the centre*.
|
||||
##
|
||||
## After invoking `reCentre()`, the argument database `db` can only be
|
||||
## destructed by `finish()` which also destructs all other descriptors
|
||||
## accessing the same backend database. Descriptors where `isCentre()`
|
||||
## returns `false` must be single destructed with `forget()`.
|
||||
|
||||
AristoApiRollbackFn* =
|
||||
proc(tx: AristoTxRef;
|
||||
@ -326,6 +359,7 @@ type
|
||||
finish*: AristoApiFinishFn
|
||||
forget*: AristoApiForgetFn
|
||||
forkTop*: AristoApiForkTopFn
|
||||
forkWith*: AristoApiForkWithFn
|
||||
getKey*: AristoApiGetKeyFn
|
||||
getKeyRc*: AristoApiGetKeyRcFn
|
||||
hashify*: AristoApiHashifyFn
|
||||
@ -337,6 +371,7 @@ type
|
||||
merge*: AristoApiMergeFn
|
||||
mergePayload*: AristoApiMergePayloadFn
|
||||
pathAsBlob*: AristoApiPathAsBlobFn
|
||||
reCentre*: AristoApiReCentreFn
|
||||
rollback*: AristoApiRollbackFn
|
||||
serialise*: AristoApiSerialiseFn
|
||||
stow*: AristoApiStowFn
|
||||
@ -357,6 +392,7 @@ type
|
||||
AristoApiProfFinishFn = "finish"
|
||||
AristoApiProfForgetFn = "forget"
|
||||
AristoApiProfForkTopFn = "forkTop"
|
||||
AristoApiProfForkWithFn = "forkWith"
|
||||
AristoApiProfGetKeyFn = "getKey"
|
||||
AristoApiProfGetKeyRcFn = "getKeyRc"
|
||||
AristoApiProfHashifyFn = "hashify"
|
||||
@ -368,6 +404,7 @@ type
|
||||
AristoApiProfMergeFn = "merge"
|
||||
AristoApiProfMergePayloadFn = "mergePayload"
|
||||
AristoApiProfPathAsBlobFn = "pathAsBlob"
|
||||
AristoApiProfReCentreFn = "reCentre"
|
||||
AristoApiProfRollbackFn = "rollback"
|
||||
AristoApiProfSerialiseFn = "serialise"
|
||||
AristoApiProfStowFn = "stow"
|
||||
@ -398,6 +435,7 @@ when AutoValidateApiHooks:
|
||||
doAssert not api.finish.isNil
|
||||
doAssert not api.forget.isNil
|
||||
doAssert not api.forkTop.isNil
|
||||
doAssert not api.forkWith.isNil
|
||||
doAssert not api.getKey.isNil
|
||||
doAssert not api.getKeyRc.isNil
|
||||
doAssert not api.hashify.isNil
|
||||
@ -409,6 +447,7 @@ when AutoValidateApiHooks:
|
||||
doAssert not api.merge.isNil
|
||||
doAssert not api.mergePayload.isNil
|
||||
doAssert not api.pathAsBlob.isNil
|
||||
doAssert not api.reCentre.isNil
|
||||
doAssert not api.rollback.isNil
|
||||
doAssert not api.serialise.isNil
|
||||
doAssert not api.stow.isNil
|
||||
@ -449,6 +488,7 @@ func init*(api: var AristoApiObj) =
|
||||
api.finish = finish
|
||||
api.forget = forget
|
||||
api.forkTop = forkTop
|
||||
api.forkWith = forkWith
|
||||
api.getKey = getKey
|
||||
api.getKeyRc = getKeyRc
|
||||
api.hashify = hashify
|
||||
@ -460,6 +500,7 @@ func init*(api: var AristoApiObj) =
|
||||
api.merge = merge
|
||||
api.mergePayload = mergePayload
|
||||
api.pathAsBlob = pathAsBlob
|
||||
api.reCentre = reCentre
|
||||
api.rollback = rollback
|
||||
api.serialise = serialise
|
||||
api.stow = stow
|
||||
@ -483,6 +524,7 @@ func dup*(api: AristoApiRef): AristoApiRef =
|
||||
finish: api.finish,
|
||||
forget: api.forget,
|
||||
forkTop: api.forkTop,
|
||||
forkWith: api.forkWith,
|
||||
getKey: api.getKey,
|
||||
getKeyRc: api.getKeyRc,
|
||||
hashify: api.hashify,
|
||||
@ -494,6 +536,7 @@ func dup*(api: AristoApiRef): AristoApiRef =
|
||||
merge: api.merge,
|
||||
mergePayload: api.mergePayload,
|
||||
pathAsBlob: api.pathAsBlob,
|
||||
reCentre: api.reCentre,
|
||||
rollback: api.rollback,
|
||||
serialise: api.serialise,
|
||||
stow: api.stow,
|
||||
@ -566,6 +609,11 @@ func init*(
|
||||
AristoApiProfForkTopFn.profileRunner:
|
||||
result = api.forkTop(a, b)
|
||||
|
||||
profApi.forkWith =
|
||||
proc(a: AristoDbRef; b: VertexID; c: HashKey; d = false): auto =
|
||||
AristoApiProfForkWithFn.profileRunner:
|
||||
result = api.forkWith(a, b, c, d)
|
||||
|
||||
profApi.getKey =
|
||||
proc(a: AristoDbRef; b: VertexID): auto =
|
||||
AristoApiProfGetKeyFn.profileRunner:
|
||||
@ -622,6 +670,11 @@ func init*(
|
||||
AristoApiProfPathAsBlobFn.profileRunner:
|
||||
result = api.pathAsBlob(a)
|
||||
|
||||
profApi.reCentre =
|
||||
proc(a: AristoDbRef) =
|
||||
AristoApiProfReCentreFn.profileRunner:
|
||||
api.reCentre(a)
|
||||
|
||||
profApi.rollback =
|
||||
proc(a: AristoTxRef): auto =
|
||||
AristoApiProfRollbackFn.profileRunner:
|
||||
|
@ -249,13 +249,15 @@ type
|
||||
|
||||
# Transaction wrappers
|
||||
TxArgStaleTx
|
||||
TxArgsUseless
|
||||
TxBackendNotWritable
|
||||
TxGarbledSpan
|
||||
TxNoPendingTx
|
||||
TxPendingTx
|
||||
TxNotFound
|
||||
TxNotTopTx
|
||||
TxPendingTx
|
||||
TxStackGarbled
|
||||
TxStackUnderflow
|
||||
TxGarbledSpan
|
||||
|
||||
# Functions from `aristo_desc.nim`
|
||||
MustBeOnCentre
|
||||
|
@ -38,7 +38,7 @@ type
|
||||
RlpData ## Marked RLP encoded
|
||||
AccountData ## `Aristo account` with vertex IDs links
|
||||
|
||||
PayloadRef* = ref object
|
||||
PayloadRef* = ref object of RootRef
|
||||
case pType*: PayloadType
|
||||
of RawData:
|
||||
rawBlob*: Blob ## Opaque data, default value
|
||||
|
@ -55,17 +55,9 @@ logScope:
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Hashify " & info
|
||||
|
||||
func getOrVoid(tab: Table[VertexID,VertexID]; vid: VertexID): VertexID =
|
||||
tab.getOrDefault(vid, VertexID(0))
|
||||
|
||||
when false:
|
||||
func contains(wff: WidthFirstForest; vid: VertexID): bool =
|
||||
vid in wff.base or vid in wff.pool or vid in wff.root
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -158,10 +158,7 @@ proc insertBranch(
|
||||
debug "Branch link leaf path garbled", linkID, path
|
||||
return err(MergeBranchLinkLeafGarbled)
|
||||
|
||||
let
|
||||
local = db.vidFetch(pristine = true)
|
||||
# lty = LeafTie(root: hike.root, path: rc.value)
|
||||
|
||||
let local = db.vidFetch(pristine = true)
|
||||
db.setVtxAndKey(hike.root, local, linkVtx)
|
||||
linkVtx.lPfx = linkVtx.lPfx.slice(1+n)
|
||||
forkVtx.bVid[linkInx] = local
|
||||
|
@ -14,7 +14,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
std/[sets, tables],
|
||||
results,
|
||||
"."/[aristo_desc, aristo_filter, aristo_get, aristo_layers, aristo_hashify]
|
||||
|
||||
@ -40,6 +40,57 @@ proc getTxUid(db: AristoDbRef): uint =
|
||||
db.txUidGen.inc
|
||||
db.txUidGen
|
||||
|
||||
proc txGet(
|
||||
db: AristoDbRef;
|
||||
vid: VertexID;
|
||||
key: HashKey;
|
||||
): Result[AristoTxRef,AristoError] =
|
||||
## Getter, returns the transaction where the vertex with ID `vid` exists and
|
||||
## has the Merkle hash key `key`.
|
||||
##
|
||||
var tx = db.txRef
|
||||
if tx.isNil:
|
||||
return err(TxNoPendingTx)
|
||||
if tx.level != db.stack.len or
|
||||
tx.txUid != db.top.txUid:
|
||||
return err(TxStackGarbled)
|
||||
|
||||
# Check the top level
|
||||
if db.top.final.dirty.len == 0 and
|
||||
db.top.delta.kMap.getOrVoid(vid) == key:
|
||||
let rc = db.getVtxRc vid
|
||||
if rc.isOk:
|
||||
return ok(tx)
|
||||
if rc.error != GetVtxNotFound:
|
||||
return err(rc.error) # oops
|
||||
|
||||
# Walk down the transaction stack
|
||||
for level in (tx.level-1).countDown(1):
|
||||
tx = tx.parent
|
||||
if tx.isNil or tx.level != level:
|
||||
return err(TxStackGarbled)
|
||||
|
||||
let layer = db.stack[level]
|
||||
if tx.txUid != layer.txUid:
|
||||
return err(TxStackGarbled)
|
||||
|
||||
if layer.final.dirty.len == 0 and
|
||||
layer.delta.kMap.getOrVoid(vid) == key:
|
||||
|
||||
# Need to check validity on lower layers
|
||||
for n in level.countDown(0):
|
||||
if db.stack[n].delta.sTab.getOrVoid(vid).isValid:
|
||||
return ok(tx)
|
||||
|
||||
# Not found, check whether the key exists on the backend
|
||||
let rc = db.getVtxBE vid
|
||||
if rc.isOk:
|
||||
return ok(tx)
|
||||
if rc.error != GetVtxNotFound:
|
||||
return err(rc.error) # oops
|
||||
|
||||
err(TxNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, getters
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -149,6 +200,54 @@ proc forkTx*(
|
||||
ok(txClone)
|
||||
|
||||
|
||||
proc forkWith*(
|
||||
db: AristoDbRef;
|
||||
vid: VertexID; # Pivot vertex (typically `VertexID(1)`)
|
||||
key: HashKey; # Hash key of pivot verte
|
||||
dontHashify = false; # Process/fix MPT hashes
|
||||
): Result[AristoDbRef,AristoError] =
|
||||
## Find the transaction where the vertex with ID `vid` exists and has the
|
||||
## Merkle hash key `key`. If there is no transaction available, search in
|
||||
## the filter and then in the backend.
|
||||
##
|
||||
## If the above procedure succeeds, a new descriptor is forked with exactly
|
||||
## one transaction which contains the all the bottom layers up until the
|
||||
## layer where the `(vid,key)` pair is found. In case the pair was found on
|
||||
## the filter or the backend, this transaction is empty.
|
||||
##
|
||||
if not vid.isValid or
|
||||
not key.isValid:
|
||||
return err(TxArgsUseless)
|
||||
|
||||
# Find `(vid,key)` on transaction layers
|
||||
block:
|
||||
let rc = db.txGet(vid, key)
|
||||
if rc.isOk:
|
||||
return rc.value.forkTx(dontHashify)
|
||||
if rc.error notin {TxNotFound,GetVtxNotFound}:
|
||||
return err(rc.error)
|
||||
|
||||
# Try filter
|
||||
if not db.roFilter.isNil:
|
||||
let roKey = db.roFilter.kMap.getOrVoid vid
|
||||
if roKey == key:
|
||||
let rc = db.fork(noFilter = false)
|
||||
if rc.isOk:
|
||||
discard rc.value.txBegin
|
||||
return rc
|
||||
|
||||
# Try backend alone
|
||||
block:
|
||||
let beKey = db.getKeyUBE(vid).valueOr: VOID_HASH_KEY
|
||||
if beKey == key:
|
||||
let rc = db.fork(noFilter = true)
|
||||
if rc.isOk:
|
||||
discard rc.value.txBegin
|
||||
return rc
|
||||
|
||||
err(TxNotFound)
|
||||
|
||||
|
||||
proc forkTop*(
|
||||
db: AristoDbRef;
|
||||
dontHashify = false; # Process/fix MPT hashes
|
||||
|
@ -43,24 +43,26 @@ Usage of the replacement wrapper
|
||||
|
||||
### Objects pedigree:
|
||||
|
||||
CoreDbRef -- base descriptor
|
||||
| | | |
|
||||
| | | +-- CoreDbMptRef -- hexary trie instance
|
||||
| | | | : :
|
||||
| | | +-- CoreDbMptRef -- hexary trie instance
|
||||
CoreDbRef -- base descriptor
|
||||
| | |
|
||||
| | |
|
||||
| | +---- CoreDbPhkRef -- pre-hashed key hexary trie instance
|
||||
| | | : :
|
||||
| | +---- CoreDbPhkRef -- pre-hashed key hexary trie instance
|
||||
| | +--- CoreDbCtxRef -- MPT context descriptor
|
||||
| | | |
|
||||
| | | +-- CoreDbMptRef -- hexary trie instance
|
||||
| | | | : :
|
||||
| | | +-- CoreDbMptRef -- hexary trie instance
|
||||
| | |
|
||||
| | |
|
||||
| | +---- CoreDbPhkRef -- pre-hashed key hexary trie instance
|
||||
| | | : :
|
||||
| | +---- CoreDbPhkRef -- pre-hashed key hexary trie instance
|
||||
| |
|
||||
| |
|
||||
| +------ CoreDbKvtRef -- single static key-value table
|
||||
| +------ CoreDbKvtRef -- single static key-value table
|
||||
|
|
||||
|
|
||||
+-------- CoreDbCaptRef -- tracer support descriptor
|
||||
+-------- CoreDbCaptRef -- tracer support descriptor
|
||||
|
||||
### Instantiating standard database object descriptors works as follows:
|
||||
### Instantiating legacy standard database object descriptors works as follows:
|
||||
|
||||
let
|
||||
db = newCoreDbRef(..) # new base descriptor
|
||||
|
@ -11,12 +11,13 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
eth/common,
|
||||
results,
|
||||
"../.."/[aristo, aristo/aristo_walk],
|
||||
"../.."/[kvt, kvt/kvt_init/memory_only, kvt/kvt_walk],
|
||||
".."/[base, base/base_desc],
|
||||
./aristo_db/[common_desc, handlers_aristo, handlers_kvt]
|
||||
./aristo_db/[common_desc, handlers_aristo, handlers_kvt, handlers_trace]
|
||||
|
||||
import
|
||||
../../aristo/aristo_init/memory_only as aristo_memory_only
|
||||
@ -38,23 +39,16 @@ type
|
||||
## Main descriptor
|
||||
kdbBase: KvtBaseRef ## Kvt subsystem
|
||||
adbBase: AristoBaseRef ## Aristo subsystem
|
||||
tracer: AristoTracerRef ## Currently active recorder
|
||||
|
||||
AristoTracerRef = ref object of TraceRecorderRef
|
||||
## Sub-handle for tracer
|
||||
parent: AristoCoreDbRef
|
||||
|
||||
AristoCoreDbBE = ref object of CoreDbBackendRef
|
||||
|
||||
proc newAristoVoidCoreDbRef*(): CoreDbRef {.noRaise.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func notImplemented[T](
|
||||
_: typedesc[T];
|
||||
db: AristoCoreDbRef;
|
||||
info: string;
|
||||
): CoreDbRc[T] {.gcsafe.} =
|
||||
## Applies only to `Aristo` methods
|
||||
err((VertexID(0),aristo.NotImplemented).toError(db.adbBase, info))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private tx and base methods
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -100,12 +94,48 @@ proc txMethods(
|
||||
if kdbApi.isTop(kTx): ? kdbApi.rollback(kTx).toVoidRc(kdbBase, info)
|
||||
ok())
|
||||
|
||||
proc cptMethods(
|
||||
tracer: AristoTracerRef;
|
||||
): CoreDbCaptFns =
|
||||
let
|
||||
tracer = tracer # So it can savely be captured
|
||||
db = tracer.parent # Will not change and can be captured
|
||||
log = tracer.topInst() # Ditto
|
||||
|
||||
CoreDbCaptFns(
|
||||
recorderFn: proc(): CoreDbRef =
|
||||
db,
|
||||
|
||||
logDbFn: proc(): TableRef[Blob,Blob] =
|
||||
log.kLog,
|
||||
|
||||
getFlagsFn: proc(): set[CoreDbCaptFlags] =
|
||||
log.flags,
|
||||
|
||||
forgetFn: proc() =
|
||||
if tracer.pop():
|
||||
tracer.restore())
|
||||
|
||||
|
||||
proc baseMethods(
|
||||
db: AristoCoreDbRef;
|
||||
A: typedesc;
|
||||
K: typedesc;
|
||||
): CoreDbBaseFns =
|
||||
|
||||
proc tracerSetup(
|
||||
db: AristoCoreDbRef;
|
||||
flags: set[CoreDbCaptFlags];
|
||||
): CoreDxCaptRef =
|
||||
let dx = db.adbBase.ctx.mpt
|
||||
if db.tracer.isNil:
|
||||
db.tracer = AristoTracerRef(parent: db)
|
||||
db.tracer.init(db.kdbBase, db.adbBase, flags)
|
||||
else:
|
||||
db.tracer.push(flags)
|
||||
CoreDxCaptRef(methods: db.tracer.cptMethods)
|
||||
|
||||
|
||||
CoreDbBaseFns(
|
||||
backendFn: proc(): CoreDbBackendRef =
|
||||
db.bless(AristoCoreDbBE()),
|
||||
@ -132,9 +162,15 @@ proc baseMethods(
|
||||
newKvtFn: proc(sharedTable: bool): CoreDbRc[CoreDxKvtRef] =
|
||||
db.kdbBase.newKvtHandler(sharedTable, "newKvtFn()"),
|
||||
|
||||
getCtxFn: proc(): CoreDbCtxRef =
|
||||
newCtxFn: proc(): CoreDbCtxRef =
|
||||
db.adbBase.ctx,
|
||||
|
||||
newCtxFromTxFn: proc(r: Hash256; k: CoreDbSubTrie): CoreDbRc[CoreDbCtxRef] =
|
||||
CoreDbCtxRef.init(db.adbBase, r, k),
|
||||
|
||||
swapCtxFn: proc(ctx: CoreDbCtxRef): CoreDbCtxRef =
|
||||
db.adbBase.swapCtx(ctx),
|
||||
|
||||
beginFn: proc(): CoreDbRc[CoreDxTxRef] =
|
||||
const info = "beginFn()"
|
||||
let
|
||||
@ -143,7 +179,7 @@ proc baseMethods(
|
||||
ok(db.bless CoreDxTxRef(methods: db.txMethods(aTx, kTx))),
|
||||
|
||||
newCaptureFn: proc(flags: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
|
||||
CoreDxCaptRef.notImplemented(db, "capture()"))
|
||||
ok(db.bless db.tracerSetup(flags)))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private constructor helpers
|
||||
|
@ -27,9 +27,9 @@ type
|
||||
api*: AristoApiRef ## Api functions can be re-directed
|
||||
ctx*: AristoCoreDbCtxRef ## Currently active context
|
||||
|
||||
AristoCoreDbCtxRef = ref object of CoreDbCtxRef
|
||||
AristoCoreDbCtxRef* = ref object of CoreDbCtxRef
|
||||
base: AristoBaseRef ## Local base descriptor
|
||||
mpt: AristoDbRef ## Aristo MPT database
|
||||
mpt*: AristoDbRef ## Aristo MPT database
|
||||
|
||||
AristoCoreDxAccRef = ref object of CoreDxAccRef
|
||||
base: AristoBaseRef ## Local base descriptor
|
||||
@ -58,13 +58,10 @@ type
|
||||
|
||||
const
|
||||
VoidTrieID = VertexID(0)
|
||||
# StorageTrieID = VertexID(StorageTrie) -- currently unused
|
||||
AccountsTrieID = VertexID(AccountsTrie)
|
||||
GenericTrieID = VertexID(GenericTrie)
|
||||
|
||||
when false:
|
||||
const
|
||||
StorageTrieID = VertexID(StorageTrie)
|
||||
|
||||
logScope:
|
||||
topics = "aristo-hdl"
|
||||
|
||||
@ -179,27 +176,6 @@ func toVoidRc[T](
|
||||
return ok()
|
||||
err rc.error.toError(base, info, error)
|
||||
|
||||
# -------------------------------
|
||||
|
||||
proc tryHash(
|
||||
base: AristoBaseRef;
|
||||
trie: CoreDbTrieRef;
|
||||
info: static[string];
|
||||
): CoreDbRc[Hash256] =
|
||||
let trie = trie.AristoCoreDbTrie
|
||||
if not trie.isValid:
|
||||
return err(TrieInvalid.toError(base, info, HashNotAvailable))
|
||||
|
||||
let root = trie.to(VertexID)
|
||||
if not root.isValid:
|
||||
return ok(EMPTY_ROOT_HASH)
|
||||
|
||||
let rc = base.api.getKeyRc(trie.base.ctx.mpt, root)
|
||||
if rc.isErr:
|
||||
return err(rc.error.toError(base, info, HashNotAvailable))
|
||||
|
||||
ok rc.value.to(Hash256)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private `MPT` call back functions
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -475,18 +451,19 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
|
||||
|
||||
proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
||||
let
|
||||
cCtx = cCtx # So it can savely be captured
|
||||
base = cCtx.base # Will not change and can be captured
|
||||
db = base.parent # Ditto
|
||||
api = base.api # Ditto
|
||||
mpt = cCtx.mpt # Ditto
|
||||
cCtx = cCtx # So it can savely be captured
|
||||
base = cCtx.base # Will not change and can be captured
|
||||
db = base.parent # Ditto
|
||||
api = base.api # Ditto
|
||||
mpt = cCtx.mpt # Ditto
|
||||
|
||||
proc ctxNewTrie(
|
||||
kind: CoreDbSubTrie;
|
||||
root: Hash256;
|
||||
address: Option[EthAddress];
|
||||
info: static[string];
|
||||
): CoreDbRc[CoreDbTrieRef] =
|
||||
const info = "newTrieFn()"
|
||||
|
||||
let trie = AristoCoreDbTrie(
|
||||
base: base,
|
||||
kind: kind)
|
||||
@ -519,10 +496,9 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
||||
err(aristo.GenericError.toError(base, info, RootNotFound))
|
||||
|
||||
|
||||
proc ctxGetMpt(
|
||||
trie: CoreDbTrieRef;
|
||||
info: static[string];
|
||||
): CoreDbRc[CoreDxMptRef] =
|
||||
proc ctxGetMpt(trie: CoreDbTrieRef): CoreDbRc[CoreDxMptRef] =
|
||||
const
|
||||
info = "getMptFn()"
|
||||
let
|
||||
trie = AristoCoreDbTrie(trie)
|
||||
var
|
||||
@ -563,14 +539,11 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
||||
|
||||
newMpt.base = base
|
||||
newMpt.methods = newMpt.mptMethods()
|
||||
|
||||
ok(db.bless newMpt)
|
||||
|
||||
proc ctxGetAcc(trie: CoreDbTrieRef): CoreDbRc[CoreDxAccRef] =
|
||||
const info = "getAccFn()"
|
||||
|
||||
proc ctxGetAcc(
|
||||
trie: CoreDbTrieRef;
|
||||
info: static[string];
|
||||
): CoreDbRc[CoreDxAccRef] =
|
||||
let trie = AristoCoreDbTrie(trie)
|
||||
if trie.kind != AccountsTrie:
|
||||
let error = (AccountsTrieID, AccRootUnacceptable)
|
||||
@ -581,32 +554,27 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
||||
|
||||
ok(db.bless acc)
|
||||
|
||||
proc ctxForget() =
|
||||
api.forget(mpt).isOkOr:
|
||||
raiseAssert "forgetFn(): " & $error
|
||||
|
||||
|
||||
CoreDbCtxFns(
|
||||
fromTxFn: proc(root: Hash256; kind: CoreDbSubTrie): CoreDbRc[CoreDbCtxRef] =
|
||||
const info = "fromTxFn()"
|
||||
err(aristo.NotImplemented.toError(base, info, base_desc.NotImplemented)),
|
||||
|
||||
swapFn: proc(cty: CoreDbCtxRef): CoreDbCtxRef =
|
||||
doAssert not cty.isNil
|
||||
base.ctx.swap(AristoCoreDbCtxRef(cty)),
|
||||
|
||||
newTrieFn: proc(
|
||||
trie: CoreDbSubTrie;
|
||||
root: Hash256;
|
||||
address: Option[EthAddress];
|
||||
): CoreDbRc[CoreDbTrieRef] =
|
||||
ctxNewTrie(trie, root, address, "newTrieFn()"),
|
||||
ctxNewTrie(trie, root, address),
|
||||
|
||||
getMptFn: proc(trie: CoreDbTrieRef; prune: bool): CoreDbRc[CoreDxMptRef] =
|
||||
ctxGetMpt(trie, "newMptFn()"),
|
||||
ctxGetMpt(trie),
|
||||
|
||||
getAccFn: proc(trie: CoreDbTrieRef; prune: bool): CoreDbRc[CoreDxAccRef] =
|
||||
ctxGetAcc(trie, "newAccFn()"),
|
||||
ctxGetAcc(trie),
|
||||
|
||||
forgetFn: proc() =
|
||||
api.forget(mpt).isOkOr:
|
||||
raiseAssert "forgetFn(): " & $error
|
||||
discard)
|
||||
ctxForget())
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public handlers and helpers
|
||||
@ -654,11 +622,11 @@ proc txBegin*(
|
||||
): CoreDbRc[AristoTxRef] =
|
||||
base.api.txBegin(base.ctx.mpt).toRc(base, info)
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc getLevel*(base: AristoBaseRef): int =
|
||||
base.api.level(base.ctx.mpt)
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc triePrint*(
|
||||
base: AristoBaseRef;
|
||||
trie: CoreDbTrieRef;
|
||||
@ -666,18 +634,28 @@ proc triePrint*(
|
||||
if trie.isValid:
|
||||
let
|
||||
trie = trie.AristoCoreDbTrie
|
||||
rc = base.tryHash(trie, "triePrint()")
|
||||
root = trie.to(VertexID)
|
||||
|
||||
result = "(" & $trie.kind
|
||||
if trie.kind == StorageTrie:
|
||||
result &= trie.stoRoot.toStr
|
||||
if trie.stoAddr != EthAddress.default:
|
||||
result &= ",%" & $trie.stoAddr.toHex
|
||||
else:
|
||||
result &= VertexID(trie.kind).toStr
|
||||
if rc.isErr:
|
||||
result &= "," & $rc.error.AristoCoreDbError.aErr
|
||||
result &= "," & VertexID(trie.kind).toStr
|
||||
|
||||
# Do the Merkle hash key
|
||||
if not root.isValid:
|
||||
result &= ",£ø"
|
||||
else:
|
||||
result &= ",£" & (if rc.value.isValid: rc.value.data.toHex else: "ø")
|
||||
let rc = base.api.getKeyRc(trie.base.ctx.mpt, root)
|
||||
if rc.isErr:
|
||||
result &= "," & $rc.error
|
||||
elif rc.value.isValid:
|
||||
result &= ",£" & rc.value.to(Hash256).data.toHex
|
||||
else:
|
||||
result &= ",£ø"
|
||||
|
||||
result &= ")"
|
||||
elif not trie.isNil:
|
||||
result &= "$?"
|
||||
@ -710,6 +688,15 @@ proc rootHash*(
|
||||
|
||||
ok key.to(Hash256)
|
||||
|
||||
|
||||
proc swapCtx*(base: AristoBaseRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
||||
doAssert not ctx.isNil
|
||||
result = base.ctx
|
||||
|
||||
# Set read-write access and install
|
||||
base.ctx = AristoCoreDbCtxRef(ctx)
|
||||
base.api.reCentre(base.ctx.mpt)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructors and related
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -735,6 +722,36 @@ func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =
|
||||
result.api = profApi
|
||||
result.ctx.mpt.backend = profApi.be
|
||||
|
||||
|
||||
proc init*(
|
||||
T: type CoreDbCtxRef;
|
||||
base: AristoBaseRef;
|
||||
root: Hash256;
|
||||
kind: CoreDbSubTrie;
|
||||
): CoreDbRc[CoreDbCtxRef] =
|
||||
const info = "fromTxFn()"
|
||||
|
||||
if kind.ord == 0:
|
||||
return err(aristo.GenericError.toError(base, info, SubTrieUnacceptable))
|
||||
|
||||
let
|
||||
api = base.api
|
||||
vid = VertexID(kind)
|
||||
key = root.to(HashKey)
|
||||
|
||||
newMpt = block:
|
||||
let rc = api.forkWith(base.ctx.mpt, vid, key)
|
||||
if rc.isErr:
|
||||
return err(rc.error.toError(base, info))
|
||||
rc.value
|
||||
|
||||
# Create new context
|
||||
let ctx = AristoCoreDbCtxRef(
|
||||
base: base,
|
||||
mpt: newMpt)
|
||||
ctx.methods = ctx.ctxMethods
|
||||
ok( base.parent.bless ctx)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
470
nimbus/db/core_db/backend/aristo_db/handlers_trace.nim
Normal file
470
nimbus/db/core_db/backend/aristo_db/handlers_trace.nim
Normal file
@ -0,0 +1,470 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[strutils, tables],
|
||||
eth/common,
|
||||
stew/byteutils,
|
||||
results,
|
||||
../../../aristo as use_aristo,
|
||||
../../../aristo/aristo_path,
|
||||
../../../kvt as use_kvt,
|
||||
../../base,
|
||||
../../base/base_desc,
|
||||
"."/[handlers_kvt, handlers_aristo]
|
||||
|
||||
const
|
||||
EnableDebugLog = CoreDbEnableApiTracking
|
||||
|
||||
type
|
||||
TraceKdbRecorder = object
|
||||
base: KvtBaseRef ## Restore position
|
||||
savedApi: KvtApiRef ## Restore data
|
||||
|
||||
TraceAdbRecorder = object
|
||||
base: AristoBaseRef
|
||||
savedApi: AristoApiRef
|
||||
|
||||
TracerLogInstRef* = ref object
|
||||
## Logger instance
|
||||
level*: uint8
|
||||
flags*: set[CoreDbCaptFlags]
|
||||
kLog*: TableRef[Blob,Blob]
|
||||
mLog*: TableRef[LeafTie,CoreDbPayloadRef]
|
||||
|
||||
TraceRecorderRef* = ref object of RootRef
|
||||
inst: seq[TracerLogInstRef] ## Production stack for log database
|
||||
kdb: TraceKdbRecorder ## Contains restore information
|
||||
adb: TraceAdbRecorder ## Contains restore information
|
||||
|
||||
when EnableDebugLog:
|
||||
import chronicles
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func toStr(key: openArray[byte]): string =
|
||||
key.toHex
|
||||
|
||||
func `$`(root: VertexID): string =
|
||||
let vid = root.uint64
|
||||
if 0 < vid:
|
||||
"$" & vid.toHex.strip(leading=true, trailing=false, chars={'0'})
|
||||
else:
|
||||
"$ø"
|
||||
|
||||
func `$`(pyl: PayloadRef): string =
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
pyl.rawBlob.toStr
|
||||
of RlpData:
|
||||
pyl.rlpBlob.toStr
|
||||
of AccountData:
|
||||
"<AccountData>"
|
||||
|
||||
func `$`(pyl: CoreDbPayloadRef): string =
|
||||
if 0 < pyl.blob.len:
|
||||
pyl.blob.toStr
|
||||
else:
|
||||
$pyl
|
||||
|
||||
func `$`(data: Blob): string =
|
||||
data.toStr
|
||||
|
||||
func `$`(lty: LeafTie): string =
|
||||
$lty.root & ":" & $lty.path
|
||||
|
||||
# -------------------------
|
||||
|
||||
func getOrVoid(tab: TableRef[Blob,Blob]; w: openArray[byte]): Blob =
|
||||
tab.getOrDefault(@w, EmptyBlob)
|
||||
|
||||
func getOrVoid(
|
||||
tab: TableRef[LeafTie,CoreDbPayloadRef];
|
||||
lty: LeafTie;
|
||||
): CoreDbPayloadRef =
|
||||
tab.getOrDefault(lty, CoreDbPayloadRef(nil))
|
||||
|
||||
func leafTie(
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[LeafTie,(VertexID,AristoError)] =
|
||||
let tag = path.pathToTag.valueOr:
|
||||
return err((VertexID(root), error))
|
||||
ok LeafTie(root: root, path: tag)
|
||||
|
||||
func to(pyl: PayloadRef; T: type CoreDbPayloadRef): T =
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
T(pType: RawData, rawBlob: pyl.rawBlob)
|
||||
of RlpData:
|
||||
T(pType: RlpData, rlpBlob: pyl.rlpBlob)
|
||||
of AccountData:
|
||||
T(pType: AccountData, account: pyl.account)
|
||||
|
||||
func to(data: openArray[byte]; T: type CoreDbPayloadRef): T =
|
||||
T(pType: RawData, rawBlob: @data)
|
||||
|
||||
proc update(
|
||||
pyl: CoreDbPayloadRef;
|
||||
api: AristoApiRef;
|
||||
mpt: AristoDbRef;
|
||||
): Result[CoreDbPayloadRef,(VertexID,AristoError)] =
|
||||
if pyl.pType == AccountData:
|
||||
pyl.blob = block:
|
||||
let rc = api.serialise(mpt, pyl)
|
||||
if rc.isOk:
|
||||
rc.value
|
||||
else:
|
||||
? api.hashify(mpt)
|
||||
? api.serialise(mpt, pyl)
|
||||
ok(pyl)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc traceRecorder(
|
||||
tr: TraceRecorderRef;
|
||||
base: KvtBaseRef;
|
||||
): TraceKdbRecorder =
|
||||
let
|
||||
api = base.api
|
||||
tracerApi = api.dup
|
||||
|
||||
# Update production api
|
||||
tracerApi.get =
|
||||
proc(kvt: KvtDbRef; key: openArray[byte]): Result[Blob,KvtError] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace get"
|
||||
|
||||
# Try to fetch data from the stacked logger instances
|
||||
var (data, pos) = (EmptyBlob, -1)
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
data = tr.inst[level].kLog.getOrVoid key
|
||||
if 0 < data.len:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="get()", key=key.toStr, result=data.toStr
|
||||
pos = level
|
||||
break
|
||||
|
||||
# Alternatively fetch data from the production DB instance
|
||||
if pos < 0:
|
||||
data = api.get(kvt, key).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key=key.toStr, error
|
||||
return err(error) # No way
|
||||
|
||||
# Data available, store in all top level instances
|
||||
for level in pos+1 ..< tr.inst.len:
|
||||
tr.inst[level].kLog[@key] = data
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="put()", key=key.toStr, result=data.toStr
|
||||
|
||||
ok(data)
|
||||
|
||||
tracerApi.del =
|
||||
proc(kvt: KvtDbRef; key: openArray[byte]): Result[void,KvtError] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace del"
|
||||
|
||||
# Delete data on the stacked logger instances
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
let flags = tr.inst[level].flags
|
||||
tr.inst[level].kLog.del @key
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="del()", flags, key=key.toStr
|
||||
if PersistDel notin flags:
|
||||
return ok()
|
||||
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key=key.toStr
|
||||
api.del(kvt, key)
|
||||
|
||||
tracerApi.put =
|
||||
proc(kvt: KvtDbRef; key, data: openArray[byte]): Result[void,KvtError] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace put"
|
||||
|
||||
# Store data on the stacked logger instances
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
let flags = tr.inst[level].flags
|
||||
tr.inst[level].kLog[@key] = @data
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="put()",
|
||||
flags, key=key.toStr, data=data.toStr
|
||||
if PersistPut notin flags:
|
||||
return ok()
|
||||
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key=key.toStr, data=data.toStr
|
||||
api.put(kvt, key, data)
|
||||
|
||||
tracerApi.hasKey =
|
||||
proc(kvt: KvtDbRef; key: openArray[byte]): Result[bool,KvtError] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace hasKey"
|
||||
|
||||
# Try to fetch data from the stacked logger instances
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
if tr.inst[level].kLog.hasKey @key:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="get()", key=key.toStr, result=true
|
||||
return ok(true)
|
||||
|
||||
# Alternatively fetch data from the production DB instance
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key=key.toStr
|
||||
api.hasKey(kvt, key)
|
||||
|
||||
result = TraceKdbRecorder(
|
||||
base: base,
|
||||
savedApi: api)
|
||||
base.api = tracerApi
|
||||
assert result.savedApi != base.api
|
||||
|
||||
|
||||
proc traceRecorder(
|
||||
tr: TraceRecorderRef;
|
||||
base: AristoBaseRef;
|
||||
): TraceAdbRecorder =
|
||||
let
|
||||
api = base.api
|
||||
tracerApi = api.dup
|
||||
|
||||
tracerApi.fetchPayload =
|
||||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[PayloadRef,(VertexID,AristoError)] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace fetchPayload"
|
||||
|
||||
let key = leafTie(root, path).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, root, path=path.toStr, error=error[1]
|
||||
return err(error)
|
||||
|
||||
# Try to fetch data from the stacked logger instances
|
||||
var (pyl, pos) = (CoreDbPayloadRef(nil), -1)
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
pyl = tr.inst[level].mLog.getOrVoid key
|
||||
if not pyl.isNil:
|
||||
pos = level
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, key, result=($pyl)
|
||||
break
|
||||
|
||||
# Alternatively fetch data from the production DB instance
|
||||
if pyl.isNil:
|
||||
pyl = block:
|
||||
let rc = api.fetchPayload(mpt, root, path)
|
||||
if rc.isErr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level=0, key, error=rc.error[1]
|
||||
return err(rc.error)
|
||||
rc.value.to(CoreDbPayloadRef)
|
||||
|
||||
# For accounts payload serialise the data
|
||||
pyl = pyl.update(api, mpt).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key, pyl, error=(error[1])
|
||||
return err(error)
|
||||
|
||||
# Data and payload available, store in all top level instances
|
||||
for level in pos+1 ..< tr.inst.len:
|
||||
tr.inst[level].mLog[key] = pyl
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="put()", key, result=($pyl)
|
||||
|
||||
ok(pyl)
|
||||
|
||||
tracerApi.delete =
|
||||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
accPath: PathID;
|
||||
): Result[bool,(VertexID,AristoError)] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace delete"
|
||||
|
||||
let key = leafTie(root, path).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, root, path=path.toStr, error=error[1]
|
||||
return err(error)
|
||||
|
||||
# Delete data on the stacked logger instances
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
let flags = tr.inst[level].flags
|
||||
tr.inst[level].mLog.del key
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="del()", flags, key
|
||||
if PersistDel notin flags:
|
||||
return ok(false)
|
||||
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key, accPath
|
||||
api.delete(mpt, root, path, accPath)
|
||||
|
||||
tracerApi.merge =
|
||||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path, data: openArray[byte];
|
||||
accPath: PathID;
|
||||
): Result[bool,AristoError] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace merge"
|
||||
|
||||
let key = leafTie(root, path).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, root, path=path.toStr, error=error[1]
|
||||
return err(error[1])
|
||||
|
||||
# Store data on the stacked logger instances
|
||||
let pyl = data.to(CoreDbPayloadRef)
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
let flags = tr.inst[level].flags
|
||||
tr.inst[level].mLog[key] = pyl
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="put()", flags, key, data=($pyl)
|
||||
if PersistPut notin flags:
|
||||
return ok(false)
|
||||
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key, data=($pyl), accPath
|
||||
api.merge(mpt, root, path, data, accPath)
|
||||
|
||||
tracerApi.mergePayload =
|
||||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
pyl: PayloadRef;
|
||||
accPath = VOID_PATH_ID;
|
||||
): Result[bool,AristoError] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace mergePayload"
|
||||
|
||||
let key = leafTie(root, path).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, root, path=path.toStr, error=error[1]
|
||||
return err(error[1])
|
||||
|
||||
# For accounts payload add serialised version of the data to `pyl`
|
||||
var pyl = pyl.to(CoreDbPayloadRef).update(api, mpt).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key, pyl, error=(error[1])
|
||||
return err(error[1])
|
||||
|
||||
# Store data on the stacked logger instances
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
let flags = tr.inst[level].flags
|
||||
tr.inst[level].mLog[key] = pyl
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="put()", flags, key, pyl
|
||||
if PersistPut notin flags:
|
||||
return ok(false)
|
||||
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key, pyl
|
||||
api.mergePayload(mpt, root, path, pyl, accPath)
|
||||
|
||||
tracerApi.hasPath =
|
||||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[bool,(VertexID,AristoError)] =
|
||||
when EnableDebugLog:
|
||||
const logTxt = "trace hasPath"
|
||||
|
||||
let key = leafTie(root, path).valueOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, root, path=path.toStr, error=error[1]
|
||||
return err(error)
|
||||
|
||||
# Try to fetch data from the stacked logger instances
|
||||
for level in (tr.inst.len-1).countDown(0):
|
||||
if tr.inst[level].mLog.hasKey key:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, log="get()", key, result=true
|
||||
return ok(true)
|
||||
|
||||
# Alternatively fetch data from the production DB instance
|
||||
when EnableDebugLog:
|
||||
debug logTxt, key
|
||||
api.hasPath(mpt, root, path)
|
||||
|
||||
result = TraceAdbRecorder(
|
||||
base: base,
|
||||
savedApi: api)
|
||||
base.api = tracerApi
|
||||
assert result.savedApi != base.api
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc topInst*(tr: TraceRecorderRef): TracerLogInstRef =
|
||||
## Get top level KVT logger
|
||||
if not tr.isNil and 0 < tr.inst.len:
|
||||
result = tr.inst[^1]
|
||||
|
||||
proc pop*(tr: TraceRecorderRef): bool =
|
||||
## Reduce logger stack, returns `true` on success. There will always be
|
||||
## at least one logger left on stack.
|
||||
if 1 < tr.inst.len: # Always leave one instance on stack
|
||||
tr.inst.setLen(tr.inst.len - 1)
|
||||
return true
|
||||
|
||||
proc push*(
|
||||
tr: TraceRecorderRef;
|
||||
flags: set[CoreDbCaptFlags];
|
||||
) =
|
||||
## Push overlay logger instance
|
||||
if not tr.isNil and 0 < tr.inst.len:
|
||||
let stackLen = tr.inst.len.uint8
|
||||
doAssert stackLen < 254 # so length can be securely held as a `uint8`
|
||||
tr.inst.add TracerLogInstRef(
|
||||
level: stackLen + 1u8,
|
||||
kLog: newTable[Blob,Blob](),
|
||||
mLog: newTable[LeafTie,CoreDbPayloadRef](),
|
||||
flags: flags)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor/destructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
db: TraceRecorderRef; # Recorder desc to initialise
|
||||
kBase: KvtBaseRef; # `Kvt` base descriptor
|
||||
aBase: AristoBaseRef; # `Aristo` base descriptor
|
||||
flags: set[CoreDbCaptFlags];
|
||||
) =
|
||||
## Constructor, create initial/base tracer descriptor
|
||||
db.inst = @[TracerLogInstRef(
|
||||
level: 1,
|
||||
kLog: newTable[Blob,Blob](),
|
||||
mLog: newTable[LeafTie,CoreDbPayloadRef](),
|
||||
flags: flags)]
|
||||
db.kdb = db.traceRecorder kBase
|
||||
db.adb = db.traceRecorder aBase
|
||||
|
||||
proc restore*(db: TraceRecorderRef) =
|
||||
## Restore production API, might be called directly or be invoked from the
|
||||
## call-back handler.
|
||||
if 0 < db.inst.len:
|
||||
db.kdb.base.api = db.kdb.savedApi
|
||||
db.adb.base.api = db.adb.savedApi
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -378,20 +378,6 @@ proc ctxMethods(ctx: LegacyCoreDbCtxRef): CoreDbCtxFns =
|
||||
tdb = db.tdb
|
||||
|
||||
CoreDbCtxFns(
|
||||
fromTxFn: proc(
|
||||
root: Hash256;
|
||||
kind: CoreDbSubTrie;
|
||||
): CoreDbRc[CoreDbCtxRef] =
|
||||
# This is not 100% on the tx layer but should work anyway with
|
||||
# the application as it emulates sort of `Aristo` behaviour.
|
||||
if db.tdb.contains root.data:
|
||||
return ok(ctx)
|
||||
err(db.bless(CtxNotFound, LegacyCoreDbError(ctx: "fromTxFn()"))),
|
||||
|
||||
swapFn: proc(cty: CoreDbCtxRef): CoreDbCtxRef =
|
||||
doAssert cty == ctx
|
||||
ctx,
|
||||
|
||||
newTrieFn: proc(
|
||||
kind: CoreDbSubTrie;
|
||||
root: Hash256;
|
||||
@ -475,8 +461,8 @@ proc cptMethods(cpt: RecorderRef; db: LegacyDbRef): CoreDbCaptFns =
|
||||
getFlagsFn: proc(): set[CoreDbCaptFlags] =
|
||||
cpt.flags,
|
||||
|
||||
forgetFn: proc(): CoreDbRc[void] =
|
||||
err(db.bless(NotImplemented, LegacyCoreDbError(ctx: "disposeFn()"))))
|
||||
forgetFn: proc() =
|
||||
discard)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private base methods (including constructors)
|
||||
@ -487,7 +473,7 @@ proc baseMethods(
|
||||
dbType: CoreDbType;
|
||||
closeDb: LegacyDbClose;
|
||||
): CoreDbBaseFns =
|
||||
let tdb = db.tdb
|
||||
let db = db
|
||||
CoreDbBaseFns(
|
||||
backendFn: proc(): CoreDbBackendRef =
|
||||
db.bless(LegacyCoreDbBE(base: db)),
|
||||
@ -514,12 +500,26 @@ proc baseMethods(
|
||||
newKvtFn: proc(sharedTable = true): CoreDbRc[CoreDxKvtRef] =
|
||||
ok(db.kvt),
|
||||
|
||||
getCtxFn: proc(): CoreDbCtxRef =
|
||||
newCtxFn: proc(): CoreDbCtxRef =
|
||||
db.ctx,
|
||||
|
||||
swapCtxFn: proc(ctx: CoreDbCtxRef): CoreDbCtxRef =
|
||||
doAssert CoreDbCtxRef(db.ctx) == ctx
|
||||
ctx,
|
||||
|
||||
newCtxFromTxFn: proc(
|
||||
root: Hash256;
|
||||
kind: CoreDbSubTrie;
|
||||
): CoreDbRc[CoreDbCtxRef] =
|
||||
# This is not 100% on the tx layer but should work anyway with
|
||||
# the application as it emulates sort of `Aristo` behaviour.
|
||||
if db.tdb.contains root.data:
|
||||
return ok(db.ctx)
|
||||
err(db.bless(CtxNotFound, LegacyCoreDbError(ctx: "fromTxFn()"))),
|
||||
|
||||
beginFn: proc(): CoreDbRc[CoreDxTxRef] =
|
||||
db.top = LegacyCoreDxTxRef(
|
||||
ltx: tdb.beginTransaction,
|
||||
ltx: db.tdb.beginTransaction,
|
||||
level: (if db.top.isNil: 1 else: db.top.level + 1),
|
||||
back: db.top)
|
||||
db.top.methods = db.top.txMethods()
|
||||
|
@ -19,7 +19,7 @@ import
|
||||
./base/[api_new_desc, api_tracking, base_desc]
|
||||
|
||||
from ../aristo
|
||||
import EmptyBlob, isValid
|
||||
import EmptyBlob, PayloadRef, isValid
|
||||
|
||||
const
|
||||
ProvideLegacyAPI = true
|
||||
@ -49,6 +49,7 @@ export
|
||||
CoreDbFnInx,
|
||||
CoreDbKvtBackendRef,
|
||||
CoreDbMptBackendRef,
|
||||
CoreDbPayloadRef,
|
||||
CoreDbPersistentTypes,
|
||||
CoreDbProfListRef,
|
||||
CoreDbRef,
|
||||
@ -60,7 +61,8 @@ export
|
||||
CoreDxKvtRef,
|
||||
CoreDxMptRef,
|
||||
CoreDxPhkRef,
|
||||
CoreDxTxRef
|
||||
CoreDxTxRef,
|
||||
PayloadRef
|
||||
|
||||
const
|
||||
CoreDbProvideLegacyAPI* = ProvideLegacyAPI
|
||||
@ -410,35 +412,42 @@ proc forget*(kvt: CoreDxKvtRef): CoreDbRc[void] {.discardable.} =
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public Merkle Patricia Tree context administration
|
||||
# Public Merkle Patricia Tree context constructors and administration
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ctx*(db: CoreDbRef): CoreDbCtxRef =
|
||||
## Get currently active context.
|
||||
##
|
||||
db.setTrackNewApi BaseGetCtxFn
|
||||
result = db.methods.getCtxFn()
|
||||
db.setTrackNewApi BaseNewCtxFn
|
||||
result = db.methods.newCtxFn()
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
proc fromTx*(
|
||||
ctx: CoreDbCtxRef;
|
||||
proc ctxFromTx*(
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
kind = AccountsTrie;
|
||||
): CoreDbRc[CoreDbCtxRef] =
|
||||
## Create new context derived from matching transaction of the currently
|
||||
## active context.
|
||||
## active context. Fir the legacy backend, this function always returns
|
||||
## the currently active context (i.e. the same as `db.ctx()`.)
|
||||
##
|
||||
ctx.setTrackNewApi CtxFromTxFn
|
||||
result = ctx.methods.fromTxFn(root, kind)
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
db.setTrackNewApi BaseNewCtxFromTxFn
|
||||
result = db.methods.newCtxFromTxFn(root, kind)
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
|
||||
proc swap*(ctx: CoreDbCtxRef; other: CoreDbCtxRef): CoreDbCtxRef =
|
||||
## Activate argument context `other` and return the previously active
|
||||
## context.
|
||||
proc swapCtx*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
||||
## Activate argument context `ctx` and return the previously active context.
|
||||
## This function goes typically together with `forget()`. A valid scenario
|
||||
## might look like
|
||||
## ::
|
||||
## proc doSomething(db: CoreDbRef; ctx: CoreDbCtxRef) =
|
||||
## let saved = db.swapCtx ctx
|
||||
## defer: db.swapCtx(saved).forget()
|
||||
## ...
|
||||
##
|
||||
ctx.setTrackNewApi CtxSwapFn
|
||||
result = ctx.methods.swapFn(other)
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
db.setTrackNewApi BaseSwapCtxFn
|
||||
result = db.methods.swapCtxFn ctx
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
proc forget*(ctx: CoreDbCtxRef) =
|
||||
## Dispose contextand all MPT views related.
|
||||
@ -1007,20 +1016,18 @@ proc logDb*(cp: CoreDxCaptRef): TableRef[Blob,Blob] =
|
||||
|
||||
proc flags*(cp: CoreDxCaptRef):set[CoreDbCaptFlags] =
|
||||
## Getter
|
||||
##
|
||||
cp.setTrackNewApi CptFlagsFn
|
||||
result = cp.methods.getFlagsFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
|
||||
proc forget*(cp: CoreDxCaptRef): CoreDbRc[void] =
|
||||
## Explicitely stop recording the current tracer instance. If this call was
|
||||
## successful, the the database argument `db` used when starting the trace
|
||||
## with `newCapture()` will be fully operational, again. This will also
|
||||
## implicitely take place when the`NIM` garbage collector recycles an
|
||||
## abondoned capture descriptor.
|
||||
proc forget*(cp: CoreDxCaptRef) =
|
||||
## Explicitely stop recording the current tracer instance and reset to
|
||||
## previous level.
|
||||
##
|
||||
cp.setTrackNewApi CptForgetFn
|
||||
result = cp.methods.forgetFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
cp.methods.forgetFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods, legacy API
|
||||
|
@ -42,10 +42,12 @@ type
|
||||
|
||||
BaseDbTypeFn = "dbType"
|
||||
BaseFinishFn = "finish"
|
||||
BaseGetCtxFn = "ctx"
|
||||
BaseLegacySetupFn = "compensateLegacySetup"
|
||||
BaseLevelFn = "level"
|
||||
BaseNewCaptureFn = "newCapture"
|
||||
BaseNewCtxFn = "ctx"
|
||||
BaseNewCtxFromTxFn = "ctxFromTx"
|
||||
BaseSwapCtxFn = "swapCtx"
|
||||
BaseNewKvtFn = "newKvt"
|
||||
BaseNewTxFn = "newTransaction"
|
||||
|
||||
@ -55,12 +57,10 @@ type
|
||||
CptForgetFn = "cpt/forget"
|
||||
|
||||
CtxForgetFn = "ctx/forget"
|
||||
CtxFromTxFn = "ctx/fromTx"
|
||||
CtxGetAccFn = "ctx/getAcc"
|
||||
CtxGetAccMptFn = "ctx/getAccMpt"
|
||||
CtxGetMptFn = "ctx/getMpt"
|
||||
CtxNewTrieFn = "ctx/newTrie"
|
||||
CtxSwapFn = "ctx/swap"
|
||||
|
||||
ErrorPrintFn = "$$"
|
||||
EthAccRecastFn = "recast"
|
||||
|
@ -16,6 +16,9 @@ import
|
||||
results,
|
||||
../../aristo/aristo_profile
|
||||
|
||||
from ../../aristo
|
||||
import PayloadRef
|
||||
|
||||
# Annotation helpers
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
{.pragma: apiRaise, gcsafe, raises: [CoreDbApiError].}
|
||||
@ -49,6 +52,10 @@ type
|
||||
stoTrie*: CoreDbTrieRef ## Implies storage root sub-MPT
|
||||
codeHash*: Hash256
|
||||
|
||||
CoreDbPayloadRef* = ref object of PayloadRef
|
||||
## Extension of `Aristo` payload used in the tracer
|
||||
blob*: Blob ## Serialised version for accounts data
|
||||
|
||||
CoreDbErrorCode* = enum
|
||||
Unset = 0
|
||||
Unspecified
|
||||
@ -68,6 +75,7 @@ type
|
||||
RootNotFound
|
||||
RootUnacceptable
|
||||
StorageFailed
|
||||
SubTrieUnacceptable
|
||||
TrieLocked
|
||||
|
||||
CoreDbSubTrie* = enum
|
||||
@ -95,32 +103,37 @@ type
|
||||
CoreDbBaseLevelFn* = proc(): int {.noRaise.}
|
||||
CoreDbBaseNewKvtFn* =
|
||||
proc(sharedTable: bool): CoreDbRc[CoreDxKvtRef] {.noRaise.}
|
||||
CoreDbBaseGetCtxFn* = proc(): CoreDbCtxRef {.noRaise.}
|
||||
CoreDbBaseNewCtxFn* = proc(): CoreDbCtxRef {.noRaise.}
|
||||
CoreDbBaseNewCtxFromTxFn* = proc(
|
||||
root: Hash256; kind: CoreDbSubTrie;): CoreDbRc[CoreDbCtxRef] {.noRaise.}
|
||||
CoreDbBaseSwapCtxFn* = proc(ctx: CoreDbCtxRef): CoreDbCtxRef {.noRaise.}
|
||||
CoreDbBaseTxBeginFn* = proc(): CoreDbRc[CoreDxTxRef] {.noRaise.}
|
||||
CoreDbBaseNewCaptFn* =
|
||||
proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] {.noRaise.}
|
||||
CoreDbBaseGetCaptFn* = proc(): CoreDbRc[CoreDxCaptRef] {.noRaise.}
|
||||
|
||||
CoreDbBaseFns* = object
|
||||
backendFn*: CoreDbBaseBackendFn
|
||||
destroyFn*: CoreDbBaseDestroyFn
|
||||
rootHashFn*: CoreDbBaseRootHashFn
|
||||
triePrintFn*: CoreDbBaseTriePrintFn
|
||||
errorPrintFn*: CoreDbBaseErrorPrintFn
|
||||
legacySetupFn*: CoreDbBaseInitLegaSetupFn
|
||||
levelFn*: CoreDbBaseLevelFn
|
||||
backendFn*: CoreDbBaseBackendFn
|
||||
destroyFn*: CoreDbBaseDestroyFn
|
||||
rootHashFn*: CoreDbBaseRootHashFn
|
||||
triePrintFn*: CoreDbBaseTriePrintFn
|
||||
errorPrintFn*: CoreDbBaseErrorPrintFn
|
||||
legacySetupFn*: CoreDbBaseInitLegaSetupFn
|
||||
levelFn*: CoreDbBaseLevelFn
|
||||
|
||||
# Kvt constructor
|
||||
newKvtFn*: CoreDbBaseNewKvtFn
|
||||
newKvtFn*: CoreDbBaseNewKvtFn
|
||||
|
||||
# MPT context constructor
|
||||
getCtxFn*: CoreDbBaseGetCtxFn
|
||||
newCtxFn*: CoreDbBaseNewCtxFn
|
||||
newCtxFromTxFn*: CoreDbBaseNewCtxFromTxFn
|
||||
swapCtxFn*: CoreDbBaseSwapCtxFn
|
||||
|
||||
# Transactions constructors
|
||||
beginFn*: CoreDbBaseTxBeginFn
|
||||
beginFn*: CoreDbBaseTxBeginFn
|
||||
|
||||
# capture/tracer constructors
|
||||
newCaptureFn*: CoreDbBaseNewCaptFn
|
||||
newCaptureFn*: CoreDbBaseNewCaptFn
|
||||
|
||||
|
||||
# --------------------------------------------------
|
||||
@ -150,7 +163,6 @@ type
|
||||
# --------------------------------------------------
|
||||
CoreDbCtxFromTxFn* =
|
||||
proc(root: Hash256; kind: CoreDbSubTrie): CoreDbRc[CoreDbCtxRef] {.noRaise.}
|
||||
CoreDbCtxSwapFn* = proc(ctx: CoreDbCtxRef): CoreDbCtxRef {.noRaise.}
|
||||
CoreDbCtxNewTrieFn* = proc(
|
||||
trie: CoreDbSubTrie; root: Hash256; address: Option[EthAddress];
|
||||
): CoreDbRc[CoreDbTrieRef] {.noRaise.}
|
||||
@ -162,8 +174,6 @@ type
|
||||
|
||||
CoreDbCtxFns* = object
|
||||
## Methods for context maniulation
|
||||
fromTxFn*: CoreDbCtxFromTxFn
|
||||
swapFn*: CoreDbCtxSwapFn
|
||||
newTrieFn*: CoreDbCtxNewTrieFn
|
||||
getMptFn*: CoreDbCtxGetMptFn
|
||||
getAccFn*: CoreDbCtxGetAccFn
|
||||
@ -257,7 +267,7 @@ type
|
||||
CoreDbCaptRecorderFn* = proc(): CoreDbRef {.noRaise.}
|
||||
CoreDbCaptLogDbFn* = proc(): TableRef[Blob,Blob] {.noRaise.}
|
||||
CoreDbCaptFlagsFn* = proc(): set[CoreDbCaptFlags] {.noRaise.}
|
||||
CoreDbCaptForgetFn* = proc(): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbCaptForgetFn* = proc() {.noRaise.}
|
||||
|
||||
CoreDbCaptFns* = object
|
||||
recorderFn*: CoreDbCaptRecorderFn
|
||||
|
@ -38,7 +38,9 @@ proc validateMethodsDesc(base: CoreDbBaseFns) =
|
||||
doAssert not base.legacySetupFn.isNil
|
||||
doAssert not base.levelFn.isNil
|
||||
doAssert not base.newKvtFn.isNil
|
||||
doAssert not base.getCtxFn.isNil
|
||||
doAssert not base.newCtxFn.isNil
|
||||
doAssert not base.newCtxFromTxFn.isNil
|
||||
doAssert not base.swapCtxFn.isNil
|
||||
doAssert not base.beginFn.isNil
|
||||
doAssert not base.newCaptureFn.isNil
|
||||
|
||||
@ -52,8 +54,6 @@ proc validateMethodsDesc(kvt: CoreDbKvtFns) =
|
||||
doAssert not kvt.forgetFn.isNil
|
||||
|
||||
proc validateMethodsDesc(ctx: CoreDbCtxFns) =
|
||||
doAssert not ctx.fromTxFn.isNil
|
||||
doAssert not ctx.swapFn.isNil
|
||||
doAssert not ctx.newTrieFn.isNil
|
||||
doAssert not ctx.getMptFn.isNil
|
||||
doAssert not ctx.getAccFn.isNil
|
||||
@ -71,6 +71,7 @@ proc validateMethodsDesc(fns: CoreDbMptFns) =
|
||||
|
||||
proc validateMethodsDesc(fns: CoreDbAccFns) =
|
||||
doAssert not fns.backendFn.isNil
|
||||
doAssert not fns.getMptFn.isNil
|
||||
doAssert not fns.fetchFn.isNil
|
||||
doAssert not fns.deleteFn.isNil
|
||||
doAssert not fns.stoFlushFn.isNil
|
||||
|
@ -30,6 +30,19 @@ else:
|
||||
proc getParentHeader(self: CoreDbRef, header: BlockHeader): BlockHeader =
|
||||
self.getBlockHeader(header.parentHash)
|
||||
|
||||
proc setParentCtx(com: CommonRef, header: BlockHeader): CoreDbCtxRef =
|
||||
## Adjust state root (mainly for `Aristo`)
|
||||
let
|
||||
parent = com.db.getParentHeader(header)
|
||||
ctx = com.db.ctxFromTx(parent.stateRoot).valueOr:
|
||||
raiseAssert "setParentCtx: " & $$error
|
||||
com.db.swapCtx ctx
|
||||
|
||||
proc reset(com: CommonRef, saveCtx: CoreDbCtxRef) =
|
||||
## Reset context
|
||||
com.db.swapCtx(saveCtx).forget()
|
||||
|
||||
|
||||
proc `%`(x: openArray[byte]): JsonNode =
|
||||
result = %toHex(x, false)
|
||||
|
||||
@ -92,7 +105,7 @@ proc dumpMemoryDB*(node: JsonNode, kvt: TableRef[common.Blob, common.Blob]) =
|
||||
n[k.toHex(false)] = %v
|
||||
node["state"] = n
|
||||
|
||||
proc dumpMemoryDB*(node: JsonNode, capture: CoreDbCaptRef) =
|
||||
proc dumpMemoryDB*(node: JsonNode, capture: CoreDbCaptRef|CoreDxCaptRef) =
|
||||
node.dumpMemoryDB capture.logDb
|
||||
|
||||
const
|
||||
@ -105,13 +118,16 @@ const
|
||||
proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
||||
body: BlockBody, txIndex: int, tracerFlags: set[TracerFlags] = {}): JsonNode =
|
||||
let
|
||||
# parent = com.db.getParentHeader(header) -- notused
|
||||
# we add a memory layer between backend/lower layer db
|
||||
# and capture state db snapshot during transaction execution
|
||||
capture = com.db.capture()
|
||||
saveCtx = com.setParentCtx(header)
|
||||
capture = com.db.newCapture.value
|
||||
tracerInst = newLegacyTracer(tracerFlags)
|
||||
captureCom = com.clone(capture.recorder)
|
||||
vmState = BaseVMState.new(header, captureCom)
|
||||
defer:
|
||||
capture.forget
|
||||
com.reset saveCtx
|
||||
|
||||
var stateDb = vmState.stateDB
|
||||
|
||||
@ -175,13 +191,17 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
||||
proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpState = false): JsonNode =
|
||||
let
|
||||
parent = com.db.getParentHeader(header)
|
||||
capture = com.db.capture()
|
||||
saveCtx = com.setParentCtx(header)
|
||||
capture = com.db.newCapture.value
|
||||
captureCom = com.clone(capture.recorder)
|
||||
# we only need stack dump if we want to scan for internal transaction address
|
||||
# we only need a stack dump when scanning for internal transaction address
|
||||
captureFlags = {DisableMemory, DisableStorage, EnableAccount}
|
||||
tracerInst = newLegacyTracer(captureFlags)
|
||||
vmState = BaseVMState.new(header, captureCom, tracerInst)
|
||||
miner = vmState.coinbase()
|
||||
defer:
|
||||
capture.forget
|
||||
com.reset saveCtx
|
||||
|
||||
var
|
||||
before = newJArray()
|
||||
@ -231,11 +251,14 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS
|
||||
|
||||
proc traceBlock*(com: CommonRef, header: BlockHeader, body: BlockBody, tracerFlags: set[TracerFlags] = {}): JsonNode =
|
||||
let
|
||||
# parent = com.db.getParentHeader(header) -- notused
|
||||
capture = com.db.capture()
|
||||
saveCtx = com.setParentCtx(header)
|
||||
capture = com.db.newCapture.value
|
||||
captureCom = com.clone(capture.recorder)
|
||||
tracerInst = newLegacyTracer(tracerFlags)
|
||||
vmState = BaseVMState.new(header, captureCom, tracerInst)
|
||||
defer:
|
||||
capture.forget
|
||||
com.reset saveCtx
|
||||
|
||||
if header.txRoot == EMPTY_ROOT_HASH: return newJNull()
|
||||
doAssert(body.transactions.calcTxRoot == header.txRoot)
|
||||
@ -261,15 +284,17 @@ proc traceTransactions*(com: CommonRef, header: BlockHeader, blockBody: BlockBod
|
||||
for i in 0 ..< blockBody.transactions.len:
|
||||
result.add traceTransaction(com, header, blockBody, i, {DisableState})
|
||||
|
||||
proc dumpDebuggingMetaData*(com: CommonRef, header: BlockHeader,
|
||||
blockBody: BlockBody, vmState: BaseVMState, launchDebugger = true) =
|
||||
let
|
||||
blockNumber = header.blockNumber
|
||||
|
||||
var
|
||||
capture = com.db.capture()
|
||||
proc dumpDebuggingMetaData*(vmState: BaseVMState, header: BlockHeader,
|
||||
blockBody: BlockBody, launchDebugger = true) =
|
||||
let
|
||||
com = vmState.com
|
||||
blockNumber = header.blockNumber
|
||||
capture = com.db.newCapture.value
|
||||
captureCom = com.clone(capture.recorder)
|
||||
bloom = createBloom(vmState.receipts)
|
||||
defer:
|
||||
capture.forget()
|
||||
|
||||
let blockSummary = %{
|
||||
"receiptsRoot": %("0x" & toHex(calcReceiptRoot(vmState.receipts).data)),
|
||||
|
@ -1,3 +1,13 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[json, os],
|
||||
stew/byteutils,
|
||||
@ -37,7 +47,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: CoreDbRef, blockNumber: UInt256)
|
||||
info "block validation success", validationResult, blockNumber
|
||||
|
||||
transaction.rollback()
|
||||
dumpDebuggingMetaData(com, header, body, vmState, false)
|
||||
vmState.dumpDebuggingMetaData(header, body, false)
|
||||
let
|
||||
fileName = "debug" & $blockNumber & ".json"
|
||||
nimbus = json.parseFile(fileName)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2020-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -43,7 +43,7 @@ proc dumpDebug(com: CommonRef, blockNumber: UInt256) =
|
||||
discard vmState.processBlock(header, body)
|
||||
|
||||
transaction.rollback()
|
||||
dumpDebuggingMetaData(captureCom, header, body, vmState, false)
|
||||
vmState.dumpDebuggingMetaData(header, body, false)
|
||||
|
||||
proc main() {.used.} =
|
||||
let conf = getConfiguration()
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2020-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
@ -117,7 +117,7 @@ proc huntProblematicBlock(blockNumber: UInt256): ValidationResult =
|
||||
if validationResult != ValidationResult.OK:
|
||||
transaction.rollback()
|
||||
putAncestorsIntoDB(vmState, com.db)
|
||||
dumpDebuggingMetaData(com, thisBlock.header, thisBlock.body, vmState, false)
|
||||
vmState.dumpDebuggingMetaData(thisBlock.header, thisBlock.body, false)
|
||||
|
||||
result = validationResult
|
||||
|
||||
|
@ -260,6 +260,8 @@ proc coreDbMain*(noisy = defined(debug)) =
|
||||
noisy.chainSyncRunner(ldgType=LedgerCache)
|
||||
|
||||
when isMainModule:
|
||||
import
|
||||
std/times
|
||||
const
|
||||
noisy = defined(debug) or true
|
||||
var
|
||||
|
Loading…
x
Reference in New Issue
Block a user