Core db+aristo update tracer for non trivial operations (#2102)

* Remove cruft

* Docu/code cosmetics

* Aristo: Update `forkBase()`

why:
  Was not up to the job

* Update/correct tracer for running against `Aristo`

details:
  This patch makes sure that before creating a new `BaseVMState` the
  `CoreDb` context is adjusted to accommodate for the state root that
  is passed to the `BaseVMState` constructor.

* CpreDb+legacy: Always return current context with `ctxFromTx()`

why:
  There was an experimental setting trying to find the node with the
  proper setting in the KVT (not the hexary tie layer) which currently
  does not work reliable, probably due to `Ledger` caching effects.
This commit is contained in:
Jordan Hrycaj 2024-03-22 17:31:56 +00:00 committed by GitHub
parent eb67e78fde
commit 889a1165b0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 211 additions and 223 deletions

View File

@ -126,7 +126,7 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
if c.com.consensus == ConsensusType.POA:
var parent = if 0 < i: @[headers[i-1]] else: @[]
let rc = c.clique.cliqueVerify(c.com, header,parent)
let rc = c.clique.cliqueVerify(c.com, header, parent)
if rc.isOk:
# mark it off so it would not auto-restore previous state
c.clique.cliqueDispose(cliqueState)
@ -169,7 +169,7 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
# between eth_blockNumber and eth_syncing
c.com.syncCurrent = header.blockNumber
# Done with this bllock
# Done with this block
lapTx.commit()
dbTx.commit()

View File

@ -148,14 +148,6 @@ type
## pair was found on the filter or the backend, this transaction is
## empty.
AristoApiGetKeyFn* =
proc(db: AristoDbRef;
vid: VertexID;
): HashKey
{.noRaise.}
## Simplified version of `getKey(0` (see below) returns `VOID_HASH_KEY`
## also on fetch errors.
AristoApiGetKeyRcFn* =
proc(db: AristoDbRef;
vid: VertexID;
@ -360,7 +352,6 @@ type
forget*: AristoApiForgetFn
forkTop*: AristoApiForkTopFn
forkWith*: AristoApiForkWithFn
getKey*: AristoApiGetKeyFn
getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn
hasPath*: AristoApiHasPathFn
@ -393,7 +384,6 @@ type
AristoApiProfForgetFn = "forget"
AristoApiProfForkTopFn = "forkTop"
AristoApiProfForkWithFn = "forkWith"
AristoApiProfGetKeyFn = "getKey"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath"
@ -436,7 +426,6 @@ when AutoValidateApiHooks:
doAssert not api.forget.isNil
doAssert not api.forkTop.isNil
doAssert not api.forkWith.isNil
doAssert not api.getKey.isNil
doAssert not api.getKeyRc.isNil
doAssert not api.hashify.isNil
doAssert not api.hasPath.isNil
@ -489,7 +478,6 @@ func init*(api: var AristoApiObj) =
api.forget = forget
api.forkTop = forkTop
api.forkWith = forkWith
api.getKey = getKey
api.getKeyRc = getKeyRc
api.hashify = hashify
api.hasPath = hasPath
@ -525,7 +513,6 @@ func dup*(api: AristoApiRef): AristoApiRef =
forget: api.forget,
forkTop: api.forkTop,
forkWith: api.forkWith,
getKey: api.getKey,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPath: api.hasPath,
@ -614,11 +601,6 @@ func init*(
AristoApiProfForkWithFn.profileRunner:
result = api.forkWith(a, b, c, d)
profApi.getKey =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyFn.profileRunner:
result = api.getKey(a, b)
profApi.getKeyRc =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyRcFn.profileRunner:

View File

@ -190,7 +190,7 @@ proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string =
return (vids, "+")
if pfx:
result = "£"
if key.len == 0 or key.to(Hash256) == Hash256():
if key.to(Hash256) == Hash256():
result &= "©"
elif not key.isValid:
result &= "ø"
@ -751,27 +751,30 @@ proc pp*(
indent = 4;
backendOk = false;
filterOk = true;
topOk = true;
stackOk = true;
): string =
result = db.layersCc.pp(db, indent=indent) & indent.toPfx
if 0 < db.stack.len:
result &= " level=" & $db.stack.len
when false: # or true:
let layers = @[db.top] & db.stack.reversed
var lStr = ""
for n,w in layers:
let
m = layers.len - n - 1
l = db.layersCc m
a = w.delta.kMap.values.toSeq.filterIt(not it.isValid).len
c = l.delta.kMap.values.toSeq.filterIt(not it.isValid).len
result &= " (" & $(w.delta.kMap.len - a) & "," & $a
lStr &= " " & $m & "=(" & $(l.delta.kMap.len - c) & "," & $c
result &= " --" & lStr
result &= indent.toPfx
if topOk:
result = db.layersCc.pp(db, indent=indent)
let stackOnlyOk = stackOk and not (topOk or filterOk or backendOk)
if not stackOnlyOk:
result &= indent.toPfx & " level=" & $db.stack.len
if (stackOk and 0 < db.stack.len) or stackOnlyOk:
let layers = @[db.top] & db.stack.reversed
var lStr = ""
for n,w in layers:
let
m = layers.len - n - 1
l = db.layersCc m
a = w.delta.kMap.values.toSeq.filterIt(not it.isValid).len
c = l.delta.kMap.values.toSeq.filterIt(not it.isValid).len
result &= "(" & $(w.delta.kMap.len - a) & "," & $a & ")"
lStr &= " " & $m & "=(" & $(l.delta.kMap.len - c) & "," & $c & ")"
result &= " =>" & lStr
if backendOk:
result &= db.backend.pp(db)
result &= indent.toPfx & db.backend.pp(db)
elif filterOk:
result &= db.roFilter.ppFilter(db, indent+1)
result &= indent.toPfx & db.roFilter.ppFilter(db, indent+1)
proc pp*(sdb: MerkleSignRef; indent = 4): string =
"count=" & $sdb.count &

View File

@ -131,7 +131,8 @@ proc getVtx*(db: AristoDbRef; vid: VertexID): VertexRef =
proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
## Cascaded attempt to fetch a Merkle hash from the cache layers or the
## backend.
## backend. This function will never return a `VOID_HASH_KEY` but rather
## some `GetKeyNotFound` or `GetKeyUpdateNeeded` error.
##
block body:
let key = db.layersGetKey(vid).valueOr:

View File

@ -14,7 +14,7 @@
{.push raises: [].}
import
std/[sets, tables],
std/tables,
results,
"."/[aristo_desc, aristo_filter, aristo_get, aristo_layers, aristo_hashify]
@ -40,56 +40,34 @@ proc getTxUid(db: AristoDbRef): uint =
db.txUidGen.inc
db.txUidGen
proc txGet(
db: AristoDbRef;
vid: VertexID;
key: HashKey;
): Result[AristoTxRef,AristoError] =
## Getter, returns the transaction where the vertex with ID `vid` exists and
## has the Merkle hash key `key`.
##
var tx = db.txRef
if tx.isNil:
return err(TxNoPendingTx)
if tx.level != db.stack.len or
tx.txUid != db.top.txUid:
return err(TxStackGarbled)
iterator txWalk(tx: AristoTxRef): (AristoTxRef,LayerRef,AristoError) =
## Walk down the transaction chain.
let db = tx.db
var tx = tx
# Check the top level
if db.top.final.dirty.len == 0 and
db.top.delta.kMap.getOrVoid(vid) == key:
let rc = db.getVtxRc vid
if rc.isOk:
return ok(tx)
if rc.error != GetVtxNotFound:
return err(rc.error) # oops
block body:
# Start at top layer if tx refers to that
if tx.level == db.stack.len:
if tx.txUid != db.top.txUid:
yield (tx,db.top,TxStackGarbled)
break body
# Walk down the transaction stack
for level in (tx.level-1).countDown(1):
tx = tx.parent
if tx.isNil or tx.level != level:
return err(TxStackGarbled)
# Yield the top level
yield (tx,db.top,AristoError(0))
let layer = db.stack[level]
if tx.txUid != layer.txUid:
return err(TxStackGarbled)
# Walk down the transaction stack
for level in (tx.level-1).countDown(1):
tx = tx.parent
if tx.isNil or tx.level != level:
yield (tx,LayerRef(nil),TxStackGarbled)
break body
if layer.final.dirty.len == 0 and
layer.delta.kMap.getOrVoid(vid) == key:
var layer = db.stack[level]
if tx.txUid != layer.txUid:
yield (tx,layer,TxStackGarbled)
break body
# Need to check validity on lower layers
for n in level.countDown(0):
if db.stack[n].delta.sTab.getOrVoid(vid).isValid:
return ok(tx)
# Not found, check whether the key exists on the backend
let rc = db.getVtxBE vid
if rc.isOk:
return ok(tx)
if rc.error != GetVtxNotFound:
return err(rc.error) # oops
err(TxNotFound)
yield (tx,layer,AristoError(0))
# ------------------------------------------------------------------------------
# Public functions, getters
@ -200,11 +178,63 @@ proc forkTx*(
ok(txClone)
proc forkTop*(
db: AristoDbRef;
dontHashify = false; # Process/fix MPT hashes
): Result[AristoDbRef,AristoError] =
## Variant of `forkTx()` for the top transaction if there is any. Otherwise
## the top layer is cloned, and an empty transaction is set up. After
## successful fork the returned descriptor has transaction level 1.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
if db.txRef.isNil:
let dbClone = ? db.fork(noToplayer=true, noFilter=false)
dbClone.top = db.layersCc # Is a deep copy
if not dontHashify:
dbClone.hashify().isOkOr:
discard dbClone.forget()
return err(error[1])
discard dbClone.txBegin
return ok(dbClone)
# End if()
db.txRef.forkTx dontHashify
proc forkBase*(
db: AristoDbRef;
dontHashify = false; # Process/fix MPT hashes
): Result[AristoDbRef,AristoError] =
## Variant of `forkTx()`, sort of the opposite of `forkTop()`. This is the
## equivalent of top layer forking after all tranactions have been rolled
## back.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
if not db.txRef.isNil:
let dbClone = ? db.fork(noToplayer=true, noFilter=false)
dbClone.top = db.layersCc 0
if not dontHashify:
dbClone.hashify().isOkOr:
discard dbClone.forget()
return err(error[1])
discard dbClone.txBegin
return ok(dbClone)
# End if()
db.forkTop dontHashify
proc forkWith*(
db: AristoDbRef;
vid: VertexID; # Pivot vertex (typically `VertexID(1)`)
key: HashKey; # Hash key of pivot verte
dontHashify = false; # Process/fix MPT hashes
dontHashify = true; # Process/fix MPT hashes
): Result[AristoDbRef,AristoError] =
## Find the transaction where the vertex with ID `vid` exists and has the
## Merkle hash key `key`. If there is no transaction available, search in
@ -219,15 +249,26 @@ proc forkWith*(
not key.isValid:
return err(TxArgsUseless)
# Find `(vid,key)` on transaction layers
block:
let rc = db.txGet(vid, key)
if rc.isOk:
return rc.value.forkTx(dontHashify)
if rc.error notin {TxNotFound,GetVtxNotFound}:
return err(rc.error)
if db.txRef.isNil:
# Try `(vid,key)` on top layer
let topKey = db.top.delta.kMap.getOrVoid vid
if topKey == key:
return db.forkTop dontHashify
# Try filter
else:
# Find `(vid,key)` on transaction layers
for (tx,layer,error) in db.txRef.txWalk:
if error != AristoError(0):
return err(error)
if layer.delta.kMap.getOrVoid(vid) == key:
return tx.forkTx dontHashify
# Try bottom layer
let botKey = db.stack[0].delta.kMap.getOrVoid vid
if botKey == key:
return db.forkBase dontHashify
# Try `(vid,key)` on filter
if not db.roFilter.isNil:
let roKey = db.roFilter.kMap.getOrVoid vid
if roKey == key:
@ -236,7 +277,7 @@ proc forkWith*(
discard rc.value.txBegin
return rc
# Try backend alone
# Try `(vid,key)` on unfiltered backend
block:
let beKey = db.getKeyUBE(vid).valueOr: VOID_HASH_KEY
if beKey == key:
@ -247,32 +288,6 @@ proc forkWith*(
err(TxNotFound)
proc forkTop*(
db: AristoDbRef;
dontHashify = false; # Process/fix MPT hashes
): Result[AristoDbRef,AristoError] =
## Variant of `forkTx()` for the top transaction if there is any. Otherwise
## the top layer is cloned, and an empty transaction is set up. After
## successful fork the returned descriptor has transaction level 1.
##
## Use `aristo_desc.forget()` to clean up this descriptor.
##
if db.txRef.isNil:
let dbClone = ? db.fork(noToplayer = true, noFilter = false)
dbClone.top = db.layersCc # Is a deep copy
if not dontHashify:
dbClone.hashify().isOkOr:
discard dbClone.forget()
return err(error[1])
discard dbClone.txBegin
return ok(dbClone)
# End if()
db.txRef.forkTx dontHashify
# ------------------------------------------------------------------------------
# Public functions: Transaction frame
# ------------------------------------------------------------------------------

View File

@ -127,7 +127,6 @@ proc baseMethods(
db: AristoCoreDbRef;
flags: set[CoreDbCaptFlags];
): CoreDxCaptRef =
let dx = db.adbBase.ctx.mpt
if db.tracer.isNil:
db.tracer = AristoTracerRef(parent: db)
db.tracer.init(db.kdbBase, db.adbBase, flags)

View File

@ -205,7 +205,6 @@ proc mptMethods(cMpt: AristoCoreDxMptRef): CoreDbMptFns =
AristoCoreDbTrie(
base: base,
kind: CoreDbSubTrie(cMpt.root))
db.bless trie
proc mptPersistent(): CoreDbRc[void] =
@ -261,7 +260,6 @@ proc mptMethods(cMpt: AristoCoreDxMptRef): CoreDbMptFns =
# This is insane but legit. A storage trie was announced for an account
# but no data have been added, yet.
return ok()
let rc = api.delete(mpt, cMpt.root, key, cMpt.accPath)
if rc.isErr:
if rc.error[1] == DelPathNotFound:
@ -329,7 +327,7 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
kind: AccountsTrie)
proc accPersistent(): CoreDbRc[void] =
const info = "persistentFn()"
const info = "acc/persistentFn()"
let rc = api.stow(mpt, persistent = true)
if rc.isOk:
@ -345,7 +343,7 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
root: AccountsTrieID))
proc accFetch(address: EthAddress): CoreDbRc[CoreDbAccount] =
const info = "fetchFn()"
const info = "acc/fetchFn()"
let pyl = block:
let
@ -364,7 +362,7 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
ok cAcc.toCoreDbAccount(pyl.account, address)
proc accMerge(account: CoreDbAccount): CoreDbRc[void] =
const info = "mergeFn()"
const info = "acc/mergeFn()"
let
key = account.address.keccakHash.data
@ -375,7 +373,7 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
ok()
proc accDelete(address: EthAddress): CoreDbRc[void] =
const info = "deleteFn()"
const info = "acc/deleteFn()"
let
key = address.keccakHash.data
@ -462,7 +460,7 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
root: Hash256;
address: Option[EthAddress];
): CoreDbRc[CoreDbTrieRef] =
const info = "newTrieFn()"
const info = "ctx/newTrieFn()"
let trie = AristoCoreDbTrie(
base: base,
@ -492,13 +490,12 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
doAssert rc.error == GetKeyNotFound
elif rc.value == root.to(HashKey):
return ok(db.bless trie)
err(aristo.GenericError.toError(base, info, RootNotFound))
proc ctxGetMpt(trie: CoreDbTrieRef): CoreDbRc[CoreDxMptRef] =
const
info = "getMptFn()"
info = "ctx/getMptFn()"
let
trie = AristoCoreDbTrie(trie)
var
@ -635,14 +632,15 @@ proc triePrint*(
let
trie = trie.AristoCoreDbTrie
root = trie.to(VertexID)
result = "(" & $trie.kind & ","
result = "(" & $trie.kind
# Do vertex ID and address/hash
if trie.kind == StorageTrie:
result &= trie.stoRoot.toStr
if trie.stoAddr != EthAddress.default:
result &= ",%" & $trie.stoAddr.toHex
else:
result &= "," & VertexID(trie.kind).toStr
result &= VertexID(trie.kind).toStr
# Do the Merkle hash key
if not root.isValid:
@ -685,7 +683,6 @@ proc rootHash*(
doAssert rc.error in {GetKeyNotFound,GetKeyUpdateNeeded}
return err(rc.error.toError(base, info, HashNotAvailable))
rc.value
ok key.to(Hash256)
@ -729,28 +726,28 @@ proc init*(
root: Hash256;
kind: CoreDbSubTrie;
): CoreDbRc[CoreDbCtxRef] =
const info = "fromTxFn()"
const info = "fromTxFn()"
if kind.ord == 0:
return err(aristo.GenericError.toError(base, info, SubTrieUnacceptable))
if kind.ord == 0:
return err(aristo.GenericError.toError(base, info, SubTrieUnacceptable))
let
api = base.api
vid = VertexID(kind)
key = root.to(HashKey)
let
api = base.api
vid = VertexID(kind)
key = root.to(HashKey)
# Fork MPT descriptor that provides `(vid,key)`
newMpt = block:
let rc = api.forkWith(base.ctx.mpt, vid, key)
if rc.isErr:
return err(rc.error.toError(base, info))
rc.value
newMpt = block:
let rc = api.forkWith(base.ctx.mpt, vid, key)
if rc.isErr:
return err(rc.error.toError(base, info))
rc.value
# Create new context
let ctx = AristoCoreDbCtxRef(
base: base,
mpt: newMpt)
ctx.methods = ctx.ctxMethods
ok( base.parent.bless ctx)
# Create new context
let ctx = AristoCoreDbCtxRef(
base: base,
mpt: newMpt)
ctx.methods = ctx.ctxMethods
ok( base.parent.bless ctx)
# ------------------------------------------------------------------------------
# End

View File

@ -511,11 +511,7 @@ proc baseMethods(
root: Hash256;
kind: CoreDbSubTrie;
): CoreDbRc[CoreDbCtxRef] =
# This is not 100% on the tx layer but should work anyway with
# the application as it emulates sort of `Aristo` behaviour.
if db.tdb.contains root.data:
return ok(db.ctx)
err(db.bless(CtxNotFound, LegacyCoreDbError(ctx: "fromTxFn()"))),
ok(db.ctx),
beginFn: proc(): CoreDbRc[CoreDxTxRef] =
db.top = LegacyCoreDxTxRef(

View File

@ -428,7 +428,7 @@ proc ctxFromTx*(
kind = AccountsTrie;
): CoreDbRc[CoreDbCtxRef] =
## Create new context derived from matching transaction of the currently
## active context. Fir the legacy backend, this function always returns
## active context. For the legacy backend, this function always returns
## the currently active context (i.e. the same as `db.ctx()`.)
##
db.setTrackNewApi BaseNewCtxFromTxFn
@ -487,7 +487,7 @@ proc newTrie*(
## let trie = db.ctx.newTrie(AccountsTrie, root).valueOr:
## # some error handling
## return
## db.getAccMpt trie
## db.getAcc trie
##
ctx.setTrackNewApi CtxNewTrieFn
result = ctx.methods.newTrieFn(kind, root, address)
@ -602,7 +602,7 @@ proc getMpt*(acc: CoreDxAccRef): CoreDxMptRef =
debug newApiTxt, api, elapsed, root
proc getAccMpt*(
proc getAcc*(
ctx: CoreDbCtxRef;
trie: CoreDbTrieRef;
prune = true;
@ -613,7 +613,7 @@ proc getAccMpt*(
## Example:
## ::
## let trie = db.getTrie(AccountsTrie,<some-hash>).valueOr:
## ... # No node with <some-hash>
## ... # No node available with <some-hash>
## return
##
## let acc = db.getAccMpt(trie)
@ -625,38 +625,14 @@ proc getAccMpt*(
## recommended using this particular constructor for accounts because it
## provides its own subset of methods to handle accounts.
##
ctx.setTrackNewApi CtxGetAccMptFn
ctx.setTrackNewApi CtxGetAccFn
result = ctx.methods.getAccFn(trie, prune)
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, trie, prune, result
proc getAccMpt*(
ctx: CoreDbCtxRef;
root = EMPTY_ROOT_HASH;
prune = true;
): CoreDxAccRef =
## Simplified version of `getAccMpt()` where the `CoreDbTrieRef` argument is
## replaced by a `root` hash argument. This function is sort of a shortcut
## for:
## ::
## let trie = db.getTrie(AccountsTrie, root).value
## result = db.getAccMpt(trie, prune).value
##
## and will throw an exception if something goes wrong. The result reference
## will alwye be non `nil`.
##
ctx.setTrackNewApi CtxGetAccMptFn
let trie = ctx.methods.newTrieFn(
AccountsTrie, root, none(EthAddress)).valueOr:
raiseAssert error.prettyText()
result = ctx.methods.getAccFn(trie, prune).valueOr:
raiseAssert error.prettyText()
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, prune
proc toMpt*(phk: CoreDxPhkRef): CoreDxMptRef =
## Replaces the pre-hashed argument trie `phk` by the non pre-hashed *MPT*.
## Note that this does not apply to an accounts trie that was created by
## `getAccMpt()`.
## `getAcc()`.
##
phk.setTrackNewApi PhkToMptFn
result = phk.fromMpt

View File

@ -58,7 +58,6 @@ type
CtxForgetFn = "ctx/forget"
CtxGetAccFn = "ctx/getAcc"
CtxGetAccMptFn = "ctx/getAccMpt"
CtxGetMptFn = "ctx/getMpt"
CtxNewTrieFn = "ctx/newTrie"

View File

@ -71,7 +71,10 @@ proc db*(led: SomeLedger): CoreDbRef =
led.distinctBase.parent
proc rootHash*(led: SomeLedger): Hash256 =
const info = "SomeLedger/rootHash(): "
when SomeLedger is AccountLedger:
const info = "AccountLedger/rootHash(): "
else:
const info = "StorageLedger/rootHash(): "
let rc = led.distinctBase.getTrie().rootHash()
if rc.isErr:
raiseAssert info & $$rc.error
@ -90,14 +93,21 @@ proc init*(
root: Hash256;
pruneOk = true;
): T =
db.ctx.getAccMpt(root, pruneOk).T
proc init*(
T: type AccountLedger;
db: CoreDbRef;
pruneOk = true;
): T =
db.newAccMpt(EMPTY_ROOT_HASH, pruneOk).AccountLedger
const
info = "AccountLedger.init(): "
let
ctx = db.ctx
trie = block:
let rc = ctx.newTrie(AccountsTrie, root)
if rc.isErr:
raiseAssert info & $$rc.error
rc.value
mpt = block:
let rc = ctx.getAcc(trie)
if rc.isErr:
raiseAssert info & $$rc.error
rc.value
mpt.T
proc fetch*(al: AccountLedger; eAddr: EthAddress): Result[CoreDbAccount,void] =
## Using `fetch()` for trie data retrieval
@ -150,10 +160,10 @@ proc init*(
## https://github.com/status-im/nimbus-eth1/issues/932.)
const
info = "StorageLedger/init(): "
noisy = true
let
db = al.distinctBase.parent
stt = account.stoTrie
if not stt.isNil and reHashOk:
let rc = al.distinctBase.getTrie.rootHash
if rc.isErr:

View File

@ -30,17 +30,18 @@ else:
proc getParentHeader(self: CoreDbRef, header: BlockHeader): BlockHeader =
self.getBlockHeader(header.parentHash)
proc setParentCtx(com: CommonRef, header: BlockHeader): CoreDbCtxRef =
## Adjust state root (mainly for `Aristo`)
let
parent = com.db.getParentHeader(header)
ctx = com.db.ctxFromTx(parent.stateRoot).valueOr:
raiseAssert "setParentCtx: " & $$error
com.db.swapCtx ctx
type
SaveCtxEnv = object
db: CoreDbRef
ctx: CoreDbCtxRef
proc reset(com: CommonRef, saveCtx: CoreDbCtxRef) =
## Reset context
com.db.swapCtx(saveCtx).forget()
proc newCtx(com: CommonRef; root: eth_types.Hash256): SaveCtxEnv =
let ctx = com.db.ctxFromTx(root).valueOr:
raiseAssert "setParentCtx: " & $$error
SaveCtxEnv(db: com.db, ctx: ctx)
proc setCtx(saveCtx: SaveCtxEnv): SaveCtxEnv =
SaveCtxEnv(db: saveCtx.db, ctx: saveCtx.db.swapCtx saveCtx.ctx)
proc `%`(x: openArray[byte]): JsonNode =
@ -120,16 +121,16 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
let
# we add a memory layer between backend/lower layer db
# and capture state db snapshot during transaction execution
saveCtx = com.setParentCtx(header)
capture = com.db.newCapture.value
tracerInst = newLegacyTracer(tracerFlags)
captureCom = com.clone(capture.recorder)
vmState = BaseVMState.new(header, captureCom)
defer:
capture.forget
com.reset saveCtx
var stateDb = vmState.stateDB
saveCtx = setCtx com.newCtx(com.db.getParentHeader(header).stateRoot)
vmState = BaseVMState.new(header, captureCom)
stateDb = vmState.stateDB
defer:
saveCtx.setCtx().ctx.forget()
capture.forget()
if header.txRoot == EMPTY_ROOT_HASH: return newJNull()
doAssert(body.transactions.calcTxRoot == header.txRoot)
@ -141,6 +142,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
after = newJArray()
stateDiff = %{"before": before, "after": after}
beforeRoot: common.Hash256
beforeCtx: SaveCtxEnv
let
miner = vmState.coinbase()
@ -157,6 +159,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
stateDb.persist()
stateDiff["beforeRoot"] = %($stateDb.rootHash)
beforeRoot = stateDb.rootHash
beforeCtx = com.newCtx beforeRoot
let rc = vmState.processTransaction(tx, sender, header)
gasUsed = if rc.isOk: rc.value else: 0
@ -171,7 +174,12 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
break
# internal transactions:
var stateBefore = AccountsLedgerRef.init(capture.recorder, beforeRoot, com.pruneTrie)
let
saveCtxBefore = setCtx beforeCtx
stateBefore = AccountsLedgerRef.init(capture.recorder, beforeRoot, com.pruneTrie)
defer:
saveCtxBefore.setCtx().ctx.forget()
for idx, acc in tracedAccountsPairs(tracerInst):
before.captureAccount(stateBefore, acc, internalTxName & $idx)
@ -191,17 +199,18 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpState = false): JsonNode =
let
parent = com.db.getParentHeader(header)
saveCtx = com.setParentCtx(header)
capture = com.db.newCapture.value
captureCom = com.clone(capture.recorder)
# we only need a stack dump when scanning for internal transaction address
captureFlags = {DisableMemory, DisableStorage, EnableAccount}
tracerInst = newLegacyTracer(captureFlags)
saveCtx = setCtx com.newCtx(parent.stateRoot)
vmState = BaseVMState.new(header, captureCom, tracerInst)
miner = vmState.coinbase()
defer:
capture.forget
com.reset saveCtx
saveCtx.setCtx().ctx.forget()
capture.forget()
var
before = newJArray()
@ -251,14 +260,15 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS
proc traceBlock*(com: CommonRef, header: BlockHeader, body: BlockBody, tracerFlags: set[TracerFlags] = {}): JsonNode =
let
saveCtx = com.setParentCtx(header)
capture = com.db.newCapture.value
captureCom = com.clone(capture.recorder)
tracerInst = newLegacyTracer(tracerFlags)
saveCtx = setCtx com.newCtx(com.db.getParentHeader(header).stateRoot)
vmState = BaseVMState.new(header, captureCom, tracerInst)
defer:
capture.forget
com.reset saveCtx
saveCtx.setCtx().ctx.forget()
capture.forget()
if header.txRoot == EMPTY_ROOT_HASH: return newJNull()
doAssert(body.transactions.calcTxRoot == header.txRoot)