mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-04 16:25:10 +00:00
Revive json tracer unit tests (#2538)
* Some `Aristo` clean-ups/updates * Re-implemented core-db tracer functionality * Rename nimbus tracer `no-tracer.nim` => `tracer.nim` why: Restore original name for easy diff tracking with upcoming update * Update nimbus tracer using new core-db tracer functionality * Updating json tracer unit tests * Enable json tracer unit tests
This commit is contained in:
parent
e331c9e9b7
commit
01b5c08763
@ -14,3 +14,5 @@
|
|||||||
of proof nodes is rather small. Also, a right boundary leaf node is
|
of proof nodes is rather small. Also, a right boundary leaf node is
|
||||||
typically cleared. This needs to be re-checked when writing the `proof`
|
typically cleared. This needs to be re-checked when writing the `proof`
|
||||||
function mentioned above.
|
function mentioned above.
|
||||||
|
|
||||||
|
* `aristo_nearby` also qualifies for a re-write, now
|
||||||
|
@ -559,7 +559,7 @@ proc pp*(
|
|||||||
): string =
|
): string =
|
||||||
sTab.ppXTab(db.orDefault)
|
sTab.ppXTab(db.orDefault)
|
||||||
|
|
||||||
proc pp*(root: VertexID, leg: Leg; db = AristoDbRef(nil)): string =
|
proc pp*(leg: Leg; root: VertexID; db = AristoDbRef(nil)): string =
|
||||||
let db = db.orDefault()
|
let db = db.orDefault()
|
||||||
result = "(" & leg.wp.vid.ppVid & ","
|
result = "(" & leg.wp.vid.ppVid & ","
|
||||||
block:
|
block:
|
||||||
@ -583,7 +583,7 @@ proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string =
|
|||||||
else:
|
else:
|
||||||
if hike.legs[0].wp.vid != hike.root:
|
if hike.legs[0].wp.vid != hike.root:
|
||||||
result &= "(" & hike.root.ppVid & ")" & pfx
|
result &= "(" & hike.root.ppVid & ")" & pfx
|
||||||
result &= hike.legs.mapIt(pp(hike.root, it, db)).join(pfx)
|
result &= hike.legs.mapIt(it.pp(hike.root, db)).join(pfx)
|
||||||
result &= pfx & "(" & hike.tail.ppPathPfx & ")"
|
result &= pfx & "(" & hike.tail.ppPathPfx & ")"
|
||||||
result &= "]"
|
result &= "]"
|
||||||
|
|
||||||
|
@ -152,8 +152,10 @@ type
|
|||||||
|
|
||||||
|
|
||||||
# Part/proof node errors
|
# Part/proof node errors
|
||||||
|
PartArgNotInCore
|
||||||
PartArgNotGenericRoot
|
PartArgNotGenericRoot
|
||||||
PartArgRootAlreadyUsed
|
PartArgRootAlreadyUsed
|
||||||
|
PartArgRootAlreadyOnDatabase
|
||||||
PartChkChangedKeyNotInKeyTab
|
PartChkChangedKeyNotInKeyTab
|
||||||
PartChkChangedVtxMissing
|
PartChkChangedVtxMissing
|
||||||
PartChkCoreKeyLookupFailed
|
PartChkCoreKeyLookupFailed
|
||||||
@ -188,7 +190,6 @@ type
|
|||||||
PartRlpPayloadException
|
PartRlpPayloadException
|
||||||
PartRootKeysDontMatch
|
PartRootKeysDontMatch
|
||||||
PartRootVidsDontMatch
|
PartRootVidsDontMatch
|
||||||
PartRootAlreadyOnDatabase
|
|
||||||
PartVtxSlotWasModified
|
PartVtxSlotWasModified
|
||||||
PartVtxSlotWasNotModified
|
PartVtxSlotWasNotModified
|
||||||
|
|
||||||
|
@ -329,6 +329,11 @@ func to*(n: UInt256; T: type PathID): T =
|
|||||||
## Representation of a scalar as `PathID` (preserving full information)
|
## Representation of a scalar as `PathID` (preserving full information)
|
||||||
T(pfx: n, length: 64)
|
T(pfx: n, length: 64)
|
||||||
|
|
||||||
|
func to*(a: PathID; T: type UInt256): T =
|
||||||
|
if not a.pfx.isZero:
|
||||||
|
assert a.length < 64 # debugging only
|
||||||
|
result = a.pfx shr (4 * (64 - a.length))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers: Miscellaneous mappings
|
# Public helpers: Miscellaneous mappings
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -151,11 +151,11 @@ proc zeroAdjust(
|
|||||||
if 0 < hike.legs.len:
|
if 0 < hike.legs.len:
|
||||||
return ok(hike)
|
return ok(hike)
|
||||||
|
|
||||||
let root = db.getVtx (hike.root, hike.root)
|
let rootVtx = db.getVtx (hike.root, hike.root)
|
||||||
if root.isValid:
|
if rootVtx.isValid:
|
||||||
block fail:
|
block fail:
|
||||||
var pfx: NibblesBuf
|
var pfx: NibblesBuf
|
||||||
case root.vType:
|
case rootVtx.vType:
|
||||||
of Branch:
|
of Branch:
|
||||||
# Find first non-dangling link and assign it
|
# Find first non-dangling link and assign it
|
||||||
let nibbleID = block:
|
let nibbleID = block:
|
||||||
@ -166,18 +166,28 @@ proc zeroAdjust(
|
|||||||
if hike.tail.len == 0:
|
if hike.tail.len == 0:
|
||||||
break fail
|
break fail
|
||||||
hike.tail[0].int8
|
hike.tail[0].int8
|
||||||
let n = root.branchBorderNibble nibbleID
|
let n = rootVtx.branchBorderNibble nibbleID
|
||||||
if n < 0:
|
if n < 0:
|
||||||
# Before or after the database range
|
# Before or after the database range
|
||||||
return err((hike.root,NearbyBeyondRange))
|
return err((hike.root,NearbyBeyondRange))
|
||||||
pfx = root.ePfx & NibblesBuf.nibble(n.byte)
|
pfx = rootVtx.ePfx & NibblesBuf.nibble(n.byte)
|
||||||
|
|
||||||
of Leaf:
|
of Leaf:
|
||||||
pfx = root.lPfx
|
pfx = rootVtx.lPfx
|
||||||
if not hike.accept pfx:
|
if not hike.accept pfx:
|
||||||
# Before or after the database range
|
# Before or after the database range
|
||||||
return err((hike.root,NearbyBeyondRange))
|
return err((hike.root,NearbyBeyondRange))
|
||||||
|
|
||||||
|
# Pathological case: matching `rootVtx` which is a leaf
|
||||||
|
if hike.legs.len == 0 and hike.tail.len == 0:
|
||||||
|
return ok(Hike(
|
||||||
|
root: hike.root,
|
||||||
|
legs: @[Leg(
|
||||||
|
nibble: -1,
|
||||||
|
wp: VidVtxPair(
|
||||||
|
vid: hike.root,
|
||||||
|
vtx: rootVtx))]))
|
||||||
|
|
||||||
var newHike = pfx.toHike(hike.root, db)
|
var newHike = pfx.toHike(hike.root, db)
|
||||||
if 0 < newHike.legs.len:
|
if 0 < newHike.legs.len:
|
||||||
return ok(newHike)
|
return ok(newHike)
|
||||||
@ -268,10 +278,6 @@ proc nearbyNext(
|
|||||||
# Some easy cases
|
# Some easy cases
|
||||||
let hike = ? hike.zeroAdjust(db, doLeast=moveRight)
|
let hike = ? hike.zeroAdjust(db, doLeast=moveRight)
|
||||||
|
|
||||||
# if hike.legs[^1].wp.vtx.vType == Extension:
|
|
||||||
# let vid = hike.legs[^1].wp.vtx.eVid
|
|
||||||
# return hike.complete(vid, db, hikeLenMax, doLeast=moveRight)
|
|
||||||
|
|
||||||
var
|
var
|
||||||
uHike = hike
|
uHike = hike
|
||||||
start = true
|
start = true
|
||||||
|
@ -159,6 +159,16 @@ proc partPut*(
|
|||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
|
proc partGetSubTree*(ps: PartStateRef; rootHash: Hash256): VertexID =
|
||||||
|
## For the argument `roothash` retrieve the root vertex ID of a particular
|
||||||
|
## sub tree from the partial state descriptor argument `ps`. The function
|
||||||
|
## returns `VertexID(0)` if there is no match.
|
||||||
|
##
|
||||||
|
for vid in ps.core.keys:
|
||||||
|
if ps[vid].to(Hash256) == rootHash:
|
||||||
|
return vid
|
||||||
|
|
||||||
|
|
||||||
proc partReRoot*(
|
proc partReRoot*(
|
||||||
ps: PartStateRef;
|
ps: PartStateRef;
|
||||||
frRoot: VertexID;
|
frRoot: VertexID;
|
||||||
@ -166,9 +176,11 @@ proc partReRoot*(
|
|||||||
): Result[void,AristoError] =
|
): Result[void,AristoError] =
|
||||||
## Realign a generic root vertex (i.e `$2`..`$(LEAST_FREE_VID-1)`) for a
|
## Realign a generic root vertex (i.e `$2`..`$(LEAST_FREE_VID-1)`) for a
|
||||||
## `proof` state to a new root vertex.
|
## `proof` state to a new root vertex.
|
||||||
if frRoot notin ps.core or frRoot == toRoot:
|
if frRoot == toRoot:
|
||||||
return ok() # nothing to do
|
return ok() # nothing to do
|
||||||
|
|
||||||
|
if frRoot notin ps.core:
|
||||||
|
return err(PartArgNotInCore)
|
||||||
if frRoot < VertexID(2) or LEAST_FREE_VID <= frRoot.ord or
|
if frRoot < VertexID(2) or LEAST_FREE_VID <= frRoot.ord or
|
||||||
toRoot < VertexID(2) or LEAST_FREE_VID <= toRoot.ord:
|
toRoot < VertexID(2) or LEAST_FREE_VID <= toRoot.ord:
|
||||||
return err(PartArgNotGenericRoot)
|
return err(PartArgNotGenericRoot)
|
||||||
@ -176,7 +188,7 @@ proc partReRoot*(
|
|||||||
if toRoot in ps.core:
|
if toRoot in ps.core:
|
||||||
return err(PartArgRootAlreadyUsed)
|
return err(PartArgRootAlreadyUsed)
|
||||||
if ps.db.getVtx((toRoot,toRoot)).isValid:
|
if ps.db.getVtx((toRoot,toRoot)).isValid:
|
||||||
return err(PartRootAlreadyOnDatabase)
|
return err(PartArgRootAlreadyOnDatabase)
|
||||||
|
|
||||||
# Migrate
|
# Migrate
|
||||||
for key in ps.byKey.keys:
|
for key in ps.byKey.keys:
|
||||||
|
@ -128,7 +128,7 @@ proc pp*(
|
|||||||
let
|
let
|
||||||
pfx0 = indent.toPfx()
|
pfx0 = indent.toPfx()
|
||||||
pfx1 = indent.toPfx(1)
|
pfx1 = indent.toPfx(1)
|
||||||
|
pfx2 = indent.toPfx(2)
|
||||||
var pfx = ""
|
var pfx = ""
|
||||||
if dbOk:
|
if dbOk:
|
||||||
result &= pfx & "<db>" & pfx1 & ps.db.pp(
|
result &= pfx & "<db>" & pfx1 & ps.db.pp(
|
||||||
@ -147,9 +147,10 @@ proc pp*(
|
|||||||
if 0 < len:
|
if 0 < len:
|
||||||
var qfx = ""
|
var qfx = ""
|
||||||
result &= pfx1 & "{"
|
result &= pfx1 & "{"
|
||||||
for (vid,vLst) in ps.core.pairs:
|
for vid in ps.core.keys.toSeq.sorted:
|
||||||
|
let vLst = ps.core.getOrDefault vid
|
||||||
result &= qfx & "(" & vid.pp & ":" & vLst.pp(ps) & ")"
|
result &= qfx & "(" & vid.pp & ":" & vLst.pp(ps) & ")"
|
||||||
qfx = pfx1
|
qfx = pfx2
|
||||||
result &= "}"
|
result &= "}"
|
||||||
pfx = pfx0
|
pfx = pfx0
|
||||||
if byKeyOk:
|
if byKeyOk:
|
||||||
|
@ -11,14 +11,16 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
eth/common,
|
||||||
../../aristo as use_ari,
|
../../aristo as use_ari,
|
||||||
|
../../aristo/aristo_desc/desc_identifiers,
|
||||||
../../aristo/[aristo_init/memory_only, aristo_walk],
|
../../aristo/[aristo_init/memory_only, aristo_walk],
|
||||||
../../kvt as use_kvt,
|
../../kvt as use_kvt,
|
||||||
../../kvt/[kvt_init/memory_only, kvt_walk],
|
../../kvt/[kvt_init/memory_only, kvt_walk],
|
||||||
../base/[base_config, base_desc, base_helpers]
|
../base/[base_config, base_desc, base_helpers]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructor and helper
|
# Public constructors
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc create*(dbType: CoreDbType; kvt: KvtDbRef; mpt: AristoDbRef): CoreDbRef =
|
proc create*(dbType: CoreDbType; kvt: KvtDbRef; mpt: AristoDbRef): CoreDbRef =
|
||||||
@ -51,6 +53,41 @@ proc newAristoVoidCoreDbRef*(): CoreDbRef =
|
|||||||
KvtDbRef.init(use_kvt.VoidBackendRef),
|
KvtDbRef.init(use_kvt.VoidBackendRef),
|
||||||
AristoDbRef.init(use_ari.VoidBackendRef))
|
AristoDbRef.init(use_ari.VoidBackendRef))
|
||||||
|
|
||||||
|
proc newCtxByKey*(
|
||||||
|
ctx: CoreDbCtxRef;
|
||||||
|
key: Hash256;
|
||||||
|
info: static[string];
|
||||||
|
): CoreDbRc[CoreDbCtxRef] =
|
||||||
|
const
|
||||||
|
rvid: RootedVertexID = (VertexID(1),VertexID(1))
|
||||||
|
let
|
||||||
|
db = ctx.parent
|
||||||
|
|
||||||
|
# Find `(vid,key)` on transaction stack
|
||||||
|
inx = block:
|
||||||
|
let rc = db.ariApi.call(findTx, ctx.mpt, rvid, key.to(HashKey))
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error.toError info)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
# Fork MPT descriptor that provides `(vid,key)`
|
||||||
|
newMpt = block:
|
||||||
|
let rc = db.ariApi.call(forkTx, ctx.mpt, inx)
|
||||||
|
if rc.isErr:
|
||||||
|
return err(rc.error.toError info)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
# Fork KVT descriptor parallel to `newMpt`
|
||||||
|
newKvt = block:
|
||||||
|
let rc = db.kvtApi.call(forkTx, ctx.kvt, inx)
|
||||||
|
if rc.isErr:
|
||||||
|
discard db.ariApi.call(forget, newMpt)
|
||||||
|
return err(rc.error.toError info)
|
||||||
|
rc.value
|
||||||
|
|
||||||
|
# Create new context
|
||||||
|
ok(db.bless CoreDbCtxRef(kvt: newKvt, mpt: newMpt))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
956
nimbus/db/core_db/backend/aristo_trace.nim
Normal file
956
nimbus/db/core_db/backend/aristo_trace.nim
Normal file
@ -0,0 +1,956 @@
|
|||||||
|
# Nimbus
|
||||||
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
|
# according to those terms.
|
||||||
|
|
||||||
|
##
|
||||||
|
## Database Backend Tracer
|
||||||
|
## =======================
|
||||||
|
##
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/[sequtils, tables, typetraits],
|
||||||
|
eth/common,
|
||||||
|
results,
|
||||||
|
../../aristo as use_aristo,
|
||||||
|
../../aristo/aristo_desc,
|
||||||
|
../../kvt as use_kvt,
|
||||||
|
../../kvt/kvt_desc,
|
||||||
|
../base/[base_config, base_desc]
|
||||||
|
|
||||||
|
const
|
||||||
|
LogJournalMax = 1_000_000
|
||||||
|
## Maximal size of a journal (organised as LRU)
|
||||||
|
|
||||||
|
type
|
||||||
|
TracePfx = enum
|
||||||
|
TrpOops = 0
|
||||||
|
TrpKvt
|
||||||
|
TrpAccounts
|
||||||
|
TrpGeneric
|
||||||
|
TrpStorage
|
||||||
|
|
||||||
|
TraceRequest* = enum
|
||||||
|
TrqOops = 0
|
||||||
|
TrqFind
|
||||||
|
TrqAdd
|
||||||
|
TrqModify
|
||||||
|
TrqDelete
|
||||||
|
|
||||||
|
TraceDataType* = enum
|
||||||
|
TdtOops = 0
|
||||||
|
TdtBlob ## Kvt and Aristo
|
||||||
|
TdtError ## Kvt and Aristo
|
||||||
|
TdtVoid ## Kvt and Aristo
|
||||||
|
TdtAccount ## Aristo only
|
||||||
|
TdtBigNum ## Aristo only
|
||||||
|
TdtHash ## Aristo only
|
||||||
|
|
||||||
|
TraceDataItemRef* = ref object
|
||||||
|
## Log journal entry
|
||||||
|
pfx*: TracePfx ## DB storage prefix
|
||||||
|
info*: int ## `KvtApiProfNames` or `AristoApiProfNames`
|
||||||
|
req*: TraceRequest ## Logged action request
|
||||||
|
case kind*: TraceDataType
|
||||||
|
of TdtBlob:
|
||||||
|
blob*: Blob
|
||||||
|
of TdtError:
|
||||||
|
error*: int ## `KvtError` or `AristoError`
|
||||||
|
of TdtAccount:
|
||||||
|
account*: AristoAccount
|
||||||
|
of TdtBigNum:
|
||||||
|
bigNum*: UInt256
|
||||||
|
of TdtHash:
|
||||||
|
hash*: Hash256
|
||||||
|
of TdtVoid, TdtOops:
|
||||||
|
discard
|
||||||
|
|
||||||
|
TraceLogInstRef* = ref object
|
||||||
|
## Logger instance
|
||||||
|
base: TraceRecorderRef
|
||||||
|
level: int
|
||||||
|
truncated: bool
|
||||||
|
journal: KeyedQueue[Blob,TraceDataItemRef]
|
||||||
|
|
||||||
|
TraceRecorderRef* = ref object of RootRef
|
||||||
|
log: seq[TraceLogInstRef] ## Production stack for log database
|
||||||
|
db: CoreDbRef
|
||||||
|
kvtSave: KvtApiRef ## Restore `KVT` data
|
||||||
|
ariSave: AristoApiRef ## Restore `Aristo` data
|
||||||
|
|
||||||
|
doAssert LEAST_FREE_VID <= 256 # needed for journal key byte prefix
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
import
|
||||||
|
std/strutils,
|
||||||
|
chronicles,
|
||||||
|
stew/byteutils
|
||||||
|
|
||||||
|
func squeezeHex(s: string; ignLen = false): string =
|
||||||
|
result = if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1]
|
||||||
|
if not ignLen:
|
||||||
|
let n = (s.len + 1) div 2
|
||||||
|
result &= "[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||||
|
|
||||||
|
func stripZeros(a: string; toExp = false): string =
|
||||||
|
if 0 < a.len:
|
||||||
|
result = a.toLowerAscii.strip(leading=true, trailing=false, chars={'0'})
|
||||||
|
if result.len == 0:
|
||||||
|
result = "0"
|
||||||
|
elif result[^1] == '0' and toExp:
|
||||||
|
var n = 0
|
||||||
|
while result[^1] == '0':
|
||||||
|
let w = result.len
|
||||||
|
result.setLen(w-1)
|
||||||
|
n.inc
|
||||||
|
if n == 1:
|
||||||
|
result &= "0"
|
||||||
|
elif n == 2:
|
||||||
|
result &= "00"
|
||||||
|
elif 2 < n:
|
||||||
|
result &= "↑" & $n
|
||||||
|
|
||||||
|
func `$$`(w: openArray[byte]): string =
|
||||||
|
w.toHex.squeezeHex
|
||||||
|
|
||||||
|
func `$`(w: Blob): string =
|
||||||
|
w.toHex.squeezeHex
|
||||||
|
|
||||||
|
func `$`(w: UInt256): string =
|
||||||
|
"#" & w.toHex.stripZeros.squeezeHex
|
||||||
|
|
||||||
|
func `$`(w: Hash256): string =
|
||||||
|
"£" & w.data.toHex.squeezeHex
|
||||||
|
|
||||||
|
func `$`(w: VertexID): string =
|
||||||
|
if 0 < w.uint64: "$" & w.uint64.toHex.stripZeros else: "$ø"
|
||||||
|
|
||||||
|
func `$`(w: AristoAccount): string =
|
||||||
|
"(" & $w.nonce & "," & $w.balance & "," & $w.codeHash & ")"
|
||||||
|
|
||||||
|
func `$`(ti: TraceDataItemRef): string =
|
||||||
|
result = "(" &
|
||||||
|
(if ti.pfx == TrpKvt: $KvtApiProfNames(ti.info)
|
||||||
|
elif ti.pfx == TrpOops: "<oops>"
|
||||||
|
else: $AristoApiProfNames(ti.info))
|
||||||
|
|
||||||
|
result &= "," & (
|
||||||
|
case ti.req:
|
||||||
|
of TrqOops: "<oops>"
|
||||||
|
of TrqFind: ""
|
||||||
|
of TrqModify: "="
|
||||||
|
of TrqDelete: "-"
|
||||||
|
of TrqAdd: "+")
|
||||||
|
|
||||||
|
result &= (
|
||||||
|
case ti.kind:
|
||||||
|
of TdtOops: "<oops>"
|
||||||
|
of TdtBlob: $ti.blob
|
||||||
|
of TdtBigNum: $ti.bigNum
|
||||||
|
of TdtHash: $ti.hash
|
||||||
|
of TdtVoid: "ø"
|
||||||
|
of TdtError: (if ti.pfx == TrpKvt: $KvtError(ti.error)
|
||||||
|
elif ti.pfx == TrpOops: "<oops>"
|
||||||
|
else: $AristoError(ti.error))
|
||||||
|
of TdtAccount: $ti.account)
|
||||||
|
|
||||||
|
result &= ")"
|
||||||
|
|
||||||
|
func toStr(pfx: TracePfx, key: openArray[byte]): string =
|
||||||
|
case pfx:
|
||||||
|
of TrpOops:
|
||||||
|
"<oops>"
|
||||||
|
of TrpKvt:
|
||||||
|
$$(key.toOpenArray(0, key.len - 1))
|
||||||
|
of TrpAccounts:
|
||||||
|
"1:" & $$(key.toOpenArray(0, key.len - 1))
|
||||||
|
of TrpGeneric:
|
||||||
|
$key[0] & ":" & $$(key.toOpenArray(1, key.len - 1))
|
||||||
|
of TrpStorage:
|
||||||
|
"1:" & $$(key.toOpenArray(0, min(31, key.len - 1))) & ":" &
|
||||||
|
(if 32 < key.len: $$(key.toOpenArray(32, key.len - 1)) else: "")
|
||||||
|
|
||||||
|
func `$`(key: openArray[byte]; ti: TraceDataItemRef): string =
|
||||||
|
"(" &
|
||||||
|
TracePfx(key[0]).toStr(key.toOpenArray(1, key.len - 1)) & "," &
|
||||||
|
$ti & ")"
|
||||||
|
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
|
template logTxt(info: static[string]): static[string] =
|
||||||
|
"trace " & info
|
||||||
|
|
||||||
|
func topLevel(tr: TraceRecorderRef): int =
|
||||||
|
tr.log.len - 1
|
||||||
|
|
||||||
|
# --------------------
|
||||||
|
|
||||||
|
proc jLogger(
|
||||||
|
tr: TraceRecorderRef;
|
||||||
|
key: openArray[byte];
|
||||||
|
ti: TraceDataItemRef;
|
||||||
|
) =
|
||||||
|
## Add or update journal entry. The `tr.pfx` argument indicates the key type:
|
||||||
|
##
|
||||||
|
## * `TrpKvt`: followed by KVT key
|
||||||
|
## * `TrpAccounts`: followed by <account-path>
|
||||||
|
## * `TrpGeneric`: followed by <root-ID> + <path>
|
||||||
|
## * `TrpStorage`: followed by <account-path> + <storage-path>
|
||||||
|
##
|
||||||
|
doAssert ti.pfx != TrpOops
|
||||||
|
let
|
||||||
|
pfx = @[ti.pfx.byte]
|
||||||
|
lRec = tr.log[^1].journal.lruFetch(pfx & @key).valueOr:
|
||||||
|
if LogJournalMax <= tr.log[^1].journal.len:
|
||||||
|
tr.log[^1].truncated = true
|
||||||
|
discard tr.log[^1].journal.lruAppend(pfx & @key, ti, LogJournalMax)
|
||||||
|
return
|
||||||
|
if ti.req != TrqFind:
|
||||||
|
lRec[] = ti[]
|
||||||
|
|
||||||
|
proc jLogger(
|
||||||
|
tr: TraceRecorderRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
ti: TraceDataItemRef;
|
||||||
|
) =
|
||||||
|
tr.jLogger(accPath.data.toSeq, ti)
|
||||||
|
|
||||||
|
proc jLogger(
|
||||||
|
tr: TraceRecorderRef;
|
||||||
|
ti: TraceDataItemRef;
|
||||||
|
) =
|
||||||
|
tr.jLogger(EmptyBlob, ti)
|
||||||
|
|
||||||
|
proc jLogger(
|
||||||
|
tr: TraceRecorderRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
ti: TraceDataItemRef;
|
||||||
|
) =
|
||||||
|
tr.jLogger(@[root.byte] & @path, ti)
|
||||||
|
|
||||||
|
proc jLogger(
|
||||||
|
tr: TraceRecorderRef;
|
||||||
|
root: VertexID;
|
||||||
|
ti: TraceDataItemRef;
|
||||||
|
) =
|
||||||
|
tr.jLogger(@[root.byte], ti)
|
||||||
|
|
||||||
|
proc jLogger(
|
||||||
|
tr: TraceRecorderRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
stoPath: Hash256;
|
||||||
|
ti: TraceDataItemRef;
|
||||||
|
) =
|
||||||
|
tr.jLogger(accPath.data.toSeq & stoPath.data.toSeq, ti)
|
||||||
|
|
||||||
|
# --------------------
|
||||||
|
|
||||||
|
func to(w: AristoApiProfNames; T: type TracePfx): T =
|
||||||
|
case w:
|
||||||
|
of AristoApiProfFetchAccountRecordFn,
|
||||||
|
AristoApiProfFetchAccountStateFn,
|
||||||
|
AristoApiProfDeleteAccountRecordFn,
|
||||||
|
AristoApiProfMergeAccountRecordFn:
|
||||||
|
return TrpAccounts
|
||||||
|
of AristoApiProfFetchGenericDataFn,
|
||||||
|
AristoApiProfFetchGenericStateFn,
|
||||||
|
AristoApiProfDeleteGenericDataFn,
|
||||||
|
AristoApiProfDeleteGenericTreeFn,
|
||||||
|
AristoApiProfMergeGenericDataFn:
|
||||||
|
return TrpGeneric
|
||||||
|
of AristoApiProfFetchStorageDataFn,
|
||||||
|
AristoApiProfFetchStorageStateFn,
|
||||||
|
AristoApiProfDeleteStorageDataFn,
|
||||||
|
AristoApiProfDeleteStorageTreeFn,
|
||||||
|
AristoApiProfMergeStorageDataFn:
|
||||||
|
return TrpStorage
|
||||||
|
else:
|
||||||
|
discard
|
||||||
|
raiseAssert "Unsupported AristoApiProfNames: " & $w
|
||||||
|
|
||||||
|
func to(w: KvtApiProfNames; T: type TracePfx): T =
|
||||||
|
TrpKvt
|
||||||
|
|
||||||
|
# --------------------
|
||||||
|
|
||||||
|
func logRecord(
|
||||||
|
info: KvtApiProfNames | AristoApiProfNames;
|
||||||
|
req: TraceRequest;
|
||||||
|
data: openArray[byte];
|
||||||
|
): TraceDataItemRef =
|
||||||
|
TraceDataItemRef(
|
||||||
|
pfx: info.to(TracePfx),
|
||||||
|
info: info.ord,
|
||||||
|
req: req,
|
||||||
|
kind: TdtBlob,
|
||||||
|
blob: @data)
|
||||||
|
|
||||||
|
func logRecord(
|
||||||
|
info: KvtApiProfNames | AristoApiProfNames;
|
||||||
|
req: TraceRequest;
|
||||||
|
error: KvtError | AristoError;
|
||||||
|
): TraceDataItemRef =
|
||||||
|
TraceDataItemRef(
|
||||||
|
pfx: info.to(TracePfx),
|
||||||
|
info: info.ord,
|
||||||
|
req: req,
|
||||||
|
kind: TdtError,
|
||||||
|
error: error.ord)
|
||||||
|
|
||||||
|
func logRecord(
|
||||||
|
info: KvtApiProfNames | AristoApiProfNames;
|
||||||
|
req: TraceRequest;
|
||||||
|
): TraceDataItemRef =
|
||||||
|
TraceDataItemRef(
|
||||||
|
pfx: info.to(TracePfx),
|
||||||
|
info: info.ord,
|
||||||
|
req: req,
|
||||||
|
kind: TdtVoid)
|
||||||
|
|
||||||
|
# --------------------
|
||||||
|
|
||||||
|
func logRecord(
|
||||||
|
info: AristoApiProfNames;
|
||||||
|
req: TraceRequest;
|
||||||
|
accRec: AristoAccount;
|
||||||
|
): TraceDataItemRef =
|
||||||
|
TraceDataItemRef(
|
||||||
|
pfx: info.to(TracePfx),
|
||||||
|
info: info.ord,
|
||||||
|
req: req,
|
||||||
|
kind: TdtAccount,
|
||||||
|
account: accRec)
|
||||||
|
|
||||||
|
func logRecord(
|
||||||
|
info: AristoApiProfNames;
|
||||||
|
req: TraceRequest;
|
||||||
|
state: Hash256;
|
||||||
|
): TraceDataItemRef =
|
||||||
|
TraceDataItemRef(
|
||||||
|
pfx: info.to(TracePfx),
|
||||||
|
info: info.ord,
|
||||||
|
req: req,
|
||||||
|
kind: TdtHash,
|
||||||
|
hash: state)
|
||||||
|
|
||||||
|
func logRecord(
|
||||||
|
info: AristoApiProfNames;
|
||||||
|
req: TraceRequest;
|
||||||
|
sto: Uint256;
|
||||||
|
): TraceDataItemRef =
|
||||||
|
TraceDataItemRef(
|
||||||
|
pfx: info.to(TracePfx),
|
||||||
|
info: info.ord,
|
||||||
|
req: req,
|
||||||
|
kind: TdtBigNum,
|
||||||
|
bigNum: sto)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc kvtTraceRecorder(tr: TraceRecorderRef) =
|
||||||
|
let
|
||||||
|
api = tr.db.kvtApi
|
||||||
|
tracerApi = api.dup
|
||||||
|
|
||||||
|
# Set up new production api `tracerApi` and save the old one
|
||||||
|
tr.kvtSave = api
|
||||||
|
tr.db.kvtApi = tracerApi
|
||||||
|
|
||||||
|
# Update production api
|
||||||
|
tracerApi.get =
|
||||||
|
proc(kvt: KvtDbRef; key: openArray[byte]): Result[Blob,KvtError] =
|
||||||
|
const info = KvtApiProfGetFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
let data = api.get(kvt, key).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), error
|
||||||
|
tr.jLogger(key, logRecord(info, TrqFind, error))
|
||||||
|
return err(error) # No way
|
||||||
|
|
||||||
|
tr.jLogger(key, logRecord(info, TrqFind, data))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), data=($$data)
|
||||||
|
ok(data)
|
||||||
|
|
||||||
|
tracerApi.del =
|
||||||
|
proc(kvt: KvtDbRef; key: openArray[byte]): Result[void,KvtError] =
|
||||||
|
const info = KvtApiProfDelFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let tiRec = block:
|
||||||
|
let rc = api.get(kvt, key)
|
||||||
|
if rc.isOk:
|
||||||
|
logRecord(info, TrqDelete, rc.value)
|
||||||
|
elif rc.error == GetNotFound:
|
||||||
|
logRecord(info, TrqDelete)
|
||||||
|
else:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), error=rc.error
|
||||||
|
tr.jLogger(key, logRecord(info, TrqDelete, rc.error))
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
|
# Delete from DB
|
||||||
|
api.del(kvt, key).isOkOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), error
|
||||||
|
tr.jLogger(key, logRecord(info, TrqDelete, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(key, tiRec)
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key)
|
||||||
|
ok()
|
||||||
|
|
||||||
|
tracerApi.put =
|
||||||
|
proc(kvt: KvtDbRef; key, data: openArray[byte]): Result[void,KvtError] =
|
||||||
|
const info = KvtApiProfPutFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let
|
||||||
|
hasKey = api.hasKey(kvt, key).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), error
|
||||||
|
tr.jLogger(key, logRecord(info, TrqAdd, error))
|
||||||
|
return err(error)
|
||||||
|
mode = if hasKey: TrqModify else: TrqAdd
|
||||||
|
|
||||||
|
# Store on DB
|
||||||
|
api.put(kvt, key, data).isOkOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), data=($$data)
|
||||||
|
tr.jLogger(key, logRecord(info, mode, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger(key, logRecord(info, mode, data))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, key=($$key), data=($$data)
|
||||||
|
ok()
|
||||||
|
|
||||||
|
assert tr.kvtSave != tr.db.kvtApi
|
||||||
|
assert tr.kvtSave.del != tr.db.kvtApi.del
|
||||||
|
assert tr.kvtSave.hasKey == tr.db.kvtApi.hasKey
|
||||||
|
|
||||||
|
|
||||||
|
proc ariTraceRecorder(tr: TraceRecorderRef) =
|
||||||
|
let
|
||||||
|
api = tr.db.ariApi
|
||||||
|
tracerApi = api.dup
|
||||||
|
|
||||||
|
# Set up new production api `tracerApi` and save the old one
|
||||||
|
tr.ariSave = api
|
||||||
|
tr.db.ariApi = tracerApi
|
||||||
|
|
||||||
|
tracerApi.fetchAccountRecord =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
): Result[AristoAccount,AristoError] =
|
||||||
|
const info = AristoApiProfFetchAccountRecordFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let accRec = api.fetchAccountRecord(mpt, accPath).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, error
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqFind, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqFind, accRec))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, accRec
|
||||||
|
ok accRec
|
||||||
|
|
||||||
|
tracerApi.fetchAccountState =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
updateOk: bool;
|
||||||
|
): Result[Hash256,AristoError] =
|
||||||
|
const info = AristoApiProfFetchAccountStateFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let state = api.fetchAccountState(mpt, updateOk).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, updateOk, error
|
||||||
|
tr.jLogger logRecord(info, TrqFind, error)
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger logRecord(info, TrqFind, state)
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, updateOk, state
|
||||||
|
ok state
|
||||||
|
|
||||||
|
tracerApi.fetchGenericData =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
): Result[Blob,AristoError] =
|
||||||
|
const info = AristoApiProfFetchGenericDataFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let data = api.fetchGenericData(mpt, root, path).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path=($$path), error
|
||||||
|
tr.jLogger(root, path, logRecord(info, TrqFind, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger(root, path, logRecord(info, TrqFind, data))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path=($$path), data
|
||||||
|
ok data
|
||||||
|
|
||||||
|
tracerApi.fetchGenericState =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
updateOk: bool;
|
||||||
|
): Result[Hash256,AristoError] =
|
||||||
|
const info = AristoApiProfFetchGenericStateFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let state = api.fetchAccountState(mpt, updateOk).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, updateOk, error
|
||||||
|
tr.jLogger(root, logRecord(info, TrqFind, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger(root, logRecord(info, TrqFind, state))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, updateOk, state
|
||||||
|
ok state
|
||||||
|
|
||||||
|
tracerApi.fetchStorageData =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
stoPath: Hash256;
|
||||||
|
): Result[Uint256,AristoError] =
|
||||||
|
const info = AristoApiProfFetchStorageDataFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let stoData = api.fetchStorageData(mpt, accPath, stoPath).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, error
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, TrqFind, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, TrqFind, stoData))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, stoData
|
||||||
|
ok stoData
|
||||||
|
|
||||||
|
tracerApi.fetchStorageState =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
updateOk: bool;
|
||||||
|
): Result[Hash256,AristoError] =
|
||||||
|
const info = AristoApiProfFetchStorageStateFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB
|
||||||
|
let state = api.fetchStorageState(mpt, accPath, updateOk).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, updateOk, error
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqFind, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqFind, state))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, updateOk, state
|
||||||
|
ok state
|
||||||
|
|
||||||
|
tracerApi.deleteAccountRecord =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
): Result[void,AristoError] =
|
||||||
|
const info = AristoApiProfDeleteAccountRecordFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let tiRec = block:
|
||||||
|
let rc = api.fetchAccountRecord(mpt, accPath)
|
||||||
|
if rc.isOk:
|
||||||
|
logRecord(info, TrqDelete, rc.value)
|
||||||
|
elif rc.error == FetchPathNotFound:
|
||||||
|
logRecord(info, TrqDelete)
|
||||||
|
else:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, error=rc.error
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqDelete, rc.error))
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
|
# Delete from DB
|
||||||
|
api.deleteAccountRecord(mpt, accPath).isOkOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, error
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqDelete, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(accPath, tiRec)
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath
|
||||||
|
ok()
|
||||||
|
|
||||||
|
tracerApi.deleteGenericData =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
): Result[bool,AristoError] =
|
||||||
|
const info = AristoApiProfDeleteGenericDataFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let tiRec = block:
|
||||||
|
let rc = api.fetchGenericData(mpt, root, path)
|
||||||
|
if rc.isOk:
|
||||||
|
logRecord(info, TrqDelete, rc.value)
|
||||||
|
elif rc.error == FetchPathNotFound:
|
||||||
|
logRecord(info, TrqDelete)
|
||||||
|
else:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path=($$path), error=rc.error
|
||||||
|
tr.jLogger(root, path, logRecord(info, TrqDelete, rc.error))
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
|
# Delete from DB
|
||||||
|
let emptyTrie = api.deleteGenericData(mpt, root, path).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path=($$path), error
|
||||||
|
tr.jLogger(root, path, logRecord(info, TrqDelete, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(root, path, tiRec)
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path=($$path), emptyTrie
|
||||||
|
ok emptyTrie
|
||||||
|
|
||||||
|
tracerApi.deleteGenericTree =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
): Result[void,AristoError] =
|
||||||
|
const info = AristoApiProfDeleteGenericTreeFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Delete from DB
|
||||||
|
api.deleteGenericTree(mpt, root).isOkOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, error
|
||||||
|
tr.jLogger(root, logRecord(info, TrqDelete, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(root, logRecord(info, TrqDelete))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root
|
||||||
|
ok()
|
||||||
|
|
||||||
|
tracerApi.deleteStorageData =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
stoPath: Hash256;
|
||||||
|
): Result[bool,AristoError] =
|
||||||
|
const info = AristoApiProfDeleteStorageDataFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let tiRec = block:
|
||||||
|
let rc = api.fetchStorageData(mpt, accPath, stoPath)
|
||||||
|
if rc.isOk:
|
||||||
|
logRecord(info, TrqDelete, rc.value)
|
||||||
|
elif rc.error == FetchPathNotFound:
|
||||||
|
logRecord(info, TrqDelete)
|
||||||
|
else:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, error=rc.error
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, TrqDelete, rc.error))
|
||||||
|
return err(rc.error)
|
||||||
|
|
||||||
|
let emptyTrie = api.deleteStorageData(mpt, accPath, stoPath).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, error
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, TrqDelete, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(accPath, stoPath, tiRec)
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, emptyTrie
|
||||||
|
ok emptyTrie
|
||||||
|
|
||||||
|
tracerApi.deleteStorageTree =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
): Result[void,AristoError] =
|
||||||
|
const info = AristoApiProfDeleteStorageTreeFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Delete from DB
|
||||||
|
api.deleteStorageTree(mpt, accPath).isOkOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, error
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqDelete, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqDelete))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath
|
||||||
|
ok()
|
||||||
|
|
||||||
|
tracerApi.mergeAccountRecord =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
accRec: AristoAccount;
|
||||||
|
): Result[bool,AristoError] =
|
||||||
|
const info = AristoApiProfMergeAccountRecordFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let
|
||||||
|
hadPath = api.hasPathAccount(mpt, accPath).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, error
|
||||||
|
tr.jLogger(accPath, logRecord(info, TrqAdd, error))
|
||||||
|
return err(error)
|
||||||
|
mode = if hadPath: TrqModify else: TrqAdd
|
||||||
|
|
||||||
|
# Do the merge
|
||||||
|
let updated = api.mergeAccountRecord(mpt, accPath, accRec).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, hadPath, error
|
||||||
|
tr.jLogger(accPath, logRecord(info, mode, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(accPath, logRecord(info, mode, accRec))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, accRec, hadPath, updated
|
||||||
|
ok updated
|
||||||
|
|
||||||
|
tracerApi.mergeGenericData =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
root: VertexID;
|
||||||
|
path: openArray[byte];
|
||||||
|
data: openArray[byte];
|
||||||
|
): Result[bool,AristoError] =
|
||||||
|
const info = AristoApiProfMergeGenericDataFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let
|
||||||
|
hadPath = api.hasPathGeneric(mpt, root, path).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path, error
|
||||||
|
tr.jLogger(root, path, logRecord(info, TrqAdd, error))
|
||||||
|
return err(error)
|
||||||
|
mode = if hadPath: TrqModify else: TrqAdd
|
||||||
|
|
||||||
|
# Do the merge
|
||||||
|
let updated = api.mergeGenericData(mpt, root, path, data).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path, error
|
||||||
|
tr.jLogger(root, path, logRecord(info, mode, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(root, path, logRecord(info, mode, data))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, root, path, data=($$data), hadPath, updated
|
||||||
|
ok updated
|
||||||
|
|
||||||
|
tracerApi.mergeStorageData =
|
||||||
|
proc(mpt: AristoDbRef;
|
||||||
|
accPath: Hash256;
|
||||||
|
stoPath: Hash256;
|
||||||
|
stoData: UInt256;
|
||||||
|
): Result[void,AristoError] =
|
||||||
|
const info = AristoApiProfMergeStorageDataFn
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
let level = tr.topLevel()
|
||||||
|
|
||||||
|
# Find entry on DB (for comprehensive log record)
|
||||||
|
let
|
||||||
|
hadPath = api.hasPathStorage(mpt, accPath, stoPath).valueOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, error
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, TrqAdd, error))
|
||||||
|
return err(error)
|
||||||
|
mode = if hadPath: TrqModify else: TrqAdd
|
||||||
|
|
||||||
|
# Do the merge
|
||||||
|
api.mergeStorageData(mpt, accPath, stoPath,stoData).isOkOr:
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, error
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, mode, error))
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Log on journal
|
||||||
|
tr.jLogger(accPath, stoPath, logRecord(info, mode, stoData))
|
||||||
|
|
||||||
|
when CoreDbNoisyCaptJournal:
|
||||||
|
debug logTxt $info, level, accPath, stoPath, stoData, hadPath
|
||||||
|
ok()
|
||||||
|
|
||||||
|
assert tr.ariSave != tr.db.ariApi
|
||||||
|
assert tr.ariSave.deleteAccountRecord != tr.db.ariApi.deleteAccountRecord
|
||||||
|
assert tr.ariSave.hasPathAccount == tr.db.ariApi.hasPathAccount
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc topInst*(tr: TraceRecorderRef): TraceLogInstRef =
|
||||||
|
## Get top level logger
|
||||||
|
tr.log[^1]
|
||||||
|
|
||||||
|
func truncated*(log: TraceLogInstRef): bool =
|
||||||
|
## True if journal was truncated due to collecting too many entries
|
||||||
|
log.truncated
|
||||||
|
|
||||||
|
func level*(log: TraceLogInstRef): int =
|
||||||
|
## Non-negative stack level of this log instance.
|
||||||
|
log.level
|
||||||
|
|
||||||
|
func journal*(log: TraceLogInstRef): KeyedQueue[Blob,TraceDataItemRef] =
|
||||||
|
## Get the journal
|
||||||
|
log.journal
|
||||||
|
|
||||||
|
func db*(log: TraceLogInstRef): CoreDbRef =
|
||||||
|
## Get database
|
||||||
|
log.base.db
|
||||||
|
|
||||||
|
iterator kvtLog*(log: TraceLogInstRef): (Blob,TraceDataItemRef) =
|
||||||
|
## Extract `Kvt` journal
|
||||||
|
for p in log.journal.nextPairs:
|
||||||
|
let pfx = TracePfx(p.key[0])
|
||||||
|
if pfx == TrpKvt:
|
||||||
|
yield (p.key[1..^1], p.data)
|
||||||
|
|
||||||
|
proc kvtLogBlobs*(log: TraceLogInstRef): seq[(Blob,Blob)] =
|
||||||
|
log.kvtLog.toSeq
|
||||||
|
.filterIt(it[1].kind==TdtBlob)
|
||||||
|
.mapIt((it[0],it[1].blob))
|
||||||
|
|
||||||
|
iterator ariLog*(log: TraceLogInstRef): (VertexID,Blob,TraceDataItemRef) =
|
||||||
|
## Extract `Aristo` journal
|
||||||
|
for p in log.journal.nextPairs:
|
||||||
|
let
|
||||||
|
pfx = TracePfx(p.key[0])
|
||||||
|
(root, key) = block:
|
||||||
|
case pfx:
|
||||||
|
of TrpAccounts,TrpStorage:
|
||||||
|
(VertexID(1), p.key[1..^1])
|
||||||
|
of TrpGeneric:
|
||||||
|
(VertexID(p.key[1]), p.key[2..^1])
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
yield (root, key, p.data)
|
||||||
|
|
||||||
|
proc pop*(log: TraceLogInstRef): bool =
|
||||||
|
## Reduce logger stack by the argument descriptor `log` which must be the
|
||||||
|
## top entry on the stack. The function returns `true` if the descriptor
|
||||||
|
## `log` was not the only one on stack and the stack was reduced by the
|
||||||
|
## top entry. Otherwise nothing is done and `false` returned.
|
||||||
|
##
|
||||||
|
let tr = log.base
|
||||||
|
doAssert log.level == tr.topLevel()
|
||||||
|
if 1 < tr.log.len: # Always leave one instance on stack
|
||||||
|
tr.log.setLen(tr.log.len - 1)
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc push*(tr: TraceRecorderRef) =
|
||||||
|
## Push overlay logger instance
|
||||||
|
tr.log.add TraceLogInstRef(base: tr, level: tr.log.len)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public constructor/destructor
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc init*(
|
||||||
|
T: type TraceRecorderRef; # Recorder desc to instantiate
|
||||||
|
db: CoreDbRef; # Database
|
||||||
|
): T =
|
||||||
|
## Constructor, create initial/base tracer descriptor
|
||||||
|
result = T(db: db)
|
||||||
|
result.push()
|
||||||
|
result.kvtTraceRecorder()
|
||||||
|
result.ariTraceRecorder()
|
||||||
|
|
||||||
|
proc restore*(tr: TraceRecorderRef) =
|
||||||
|
## Restore production API.
|
||||||
|
tr.db.kvtApi = tr.kvtSave
|
||||||
|
tr.db.ariApi = tr.ariSave
|
||||||
|
tr[].reset
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
@ -15,6 +15,7 @@ import
|
|||||||
eth/common,
|
eth/common,
|
||||||
"../.."/[constants, errors],
|
"../.."/[constants, errors],
|
||||||
".."/[kvt, aristo],
|
".."/[kvt, aristo],
|
||||||
|
./backend/aristo_db,
|
||||||
./base/[api_tracking, base_config, base_desc, base_helpers]
|
./base/[api_tracking, base_config, base_desc, base_helpers]
|
||||||
|
|
||||||
export
|
export
|
||||||
@ -44,7 +45,7 @@ when CoreDbEnableProfiling:
|
|||||||
CoreDbFnInx,
|
CoreDbFnInx,
|
||||||
CoreDbProfListRef
|
CoreDbProfListRef
|
||||||
|
|
||||||
when CoreDbEnableCaptJournal and false:
|
when CoreDbEnableCaptJournal:
|
||||||
import
|
import
|
||||||
./backend/aristo_trace
|
./backend/aristo_trace
|
||||||
type
|
type
|
||||||
@ -69,33 +70,69 @@ proc ctx*(db: CoreDbRef): CoreDbCtxRef =
|
|||||||
##
|
##
|
||||||
db.defCtx
|
db.defCtx
|
||||||
|
|
||||||
proc swapCtx*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
proc newCtxByKey*(ctx: CoreDbCtxRef; root: Hash256): CoreDbRc[CoreDbCtxRef] =
|
||||||
## Activate argument context `ctx` as default and return the previously
|
## Create new context derived from a matching transaction of the currently
|
||||||
## active context. This function goes typically together with `forget()`. A
|
## active context. If successful, the resulting context has the following
|
||||||
## valid scenario might look like
|
## properties:
|
||||||
## ::
|
##
|
||||||
## proc doSomething(db: CoreDbRef; ctx: CoreDbCtxRef) =
|
## * Transaction level is 1
|
||||||
## let saved = db.swapCtx ctx
|
## * The state of the accounts column is equal to the argument `root`
|
||||||
## defer: db.swapCtx(saved).forget()
|
##
|
||||||
|
## If successful, the resulting descriptor **must** be manually released
|
||||||
|
## with `forget()` when it is not used, anymore.
|
||||||
|
##
|
||||||
|
## Note:
|
||||||
|
## The underlying `Aristo` backend uses lazy hashing so this function
|
||||||
|
## might fail simply because there is no computed state when nesting
|
||||||
|
## the next transaction. If the previous transaction needs to be found,
|
||||||
|
## then it must called like this:
|
||||||
|
## ::
|
||||||
|
## let db = .. # Instantiate CoreDb handle
|
||||||
## ...
|
## ...
|
||||||
|
## discard db.ctx.getAccounts.state() # Compute state hash
|
||||||
|
## db.ctx.newTransaction() # Enter new transaction
|
||||||
|
##
|
||||||
|
## However, remember that unused hash computations are contle relative
|
||||||
|
## to processing time.
|
||||||
|
##
|
||||||
|
ctx.setTrackNewApi CtxNewCtxByKeyFn
|
||||||
|
result = ctx.newCtxByKey(root, $api)
|
||||||
|
ctx.ifTrackNewApi: debug logTxt, api, elapsed, root=($$root), result
|
||||||
|
|
||||||
|
proc swapCtx*(ctx: CoreDbCtxRef; db: CoreDbRef): CoreDbCtxRef =
|
||||||
|
## Activate argument context `ctx` as default and return the previously
|
||||||
|
## active context. This function goes typically together with `forget()`.
|
||||||
|
## A valid scenario might look like
|
||||||
|
## ::
|
||||||
|
## let db = .. # Instantiate CoreDb handle
|
||||||
|
## ...
|
||||||
|
## let ctx = newCtxByKey(..).expect "ctx" # Create new context
|
||||||
|
## let saved = db.swapCtx ctx # Swap context dandles
|
||||||
|
## defer: db.swapCtx(saved).forget() # Restore
|
||||||
|
## ...
|
||||||
##
|
##
|
||||||
doAssert not ctx.isNil
|
doAssert not ctx.isNil
|
||||||
db.setTrackNewApi BaseSwapCtxFn
|
assert db.defCtx != ctx # debugging only
|
||||||
|
db.setTrackNewApi CtxSwapCtxFn
|
||||||
|
|
||||||
|
# Swap default context with argument `ctx`
|
||||||
result = db.defCtx
|
result = db.defCtx
|
||||||
|
db.defCtx = ctx
|
||||||
|
|
||||||
# Set read-write access and install
|
# Set read-write access and install
|
||||||
CoreDbAccRef(ctx).call(reCentre, db.ctx.mpt).isOkOr:
|
CoreDbAccRef(ctx).call(reCentre, db.ctx.mpt).isOkOr:
|
||||||
raiseAssert $api & " failed: " & $error
|
raiseAssert $api & " failed: " & $error
|
||||||
CoreDbKvtRef(ctx).call(reCentre, db.ctx.kvt).isOkOr:
|
CoreDbKvtRef(ctx).call(reCentre, db.ctx.kvt).isOkOr:
|
||||||
raiseAssert $api & " failed: " & $error
|
raiseAssert $api & " failed: " & $error
|
||||||
db.defCtx = ctx
|
doAssert db.defCtx != result
|
||||||
db.ifTrackNewApi: debug logTxt, api, elapsed
|
db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||||
|
|
||||||
proc forget*(ctx: CoreDbCtxRef) =
|
proc forget*(ctx: CoreDbCtxRef) =
|
||||||
## Dispose `ctx` argument context and related columns created with this
|
## Dispose `ctx` argument context and related columns created with this
|
||||||
## context. This function fails if `ctx` is the default context.
|
## context. This function throws an exception `ctx` is the default context.
|
||||||
##
|
##
|
||||||
ctx.setTrackNewApi CtxForgetFn
|
ctx.setTrackNewApi CtxForgetFn
|
||||||
|
doAssert ctx != ctx.parent.defCtx
|
||||||
CoreDbAccRef(ctx).call(forget, ctx.mpt).isOkOr:
|
CoreDbAccRef(ctx).call(forget, ctx.mpt).isOkOr:
|
||||||
raiseAssert $api & ": " & $error
|
raiseAssert $api & ": " & $error
|
||||||
CoreDbKvtRef(ctx).call(forget, ctx.kvt).isOkOr:
|
CoreDbKvtRef(ctx).call(forget, ctx.kvt).isOkOr:
|
||||||
@ -713,66 +750,54 @@ proc dispose*(tx: CoreDbTxRef) =
|
|||||||
# Public tracer methods
|
# Public tracer methods
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
when CoreDbEnableCaptJournal and false: # currently disabled
|
when CoreDbEnableCaptJournal:
|
||||||
proc newCapture*(
|
proc pushCapture*(db: CoreDbRef): CoreDbCaptRef =
|
||||||
db: CoreDbRef;
|
## ..
|
||||||
): CoreDbRc[CoreDbCaptRef] =
|
|
||||||
## Trace constructor providing an overlay on top of the argument database
|
|
||||||
## `db`. This overlay provides a replacement database handle that can be
|
|
||||||
## retrieved via `db.recorder()` (which can in turn be ovelayed.) While
|
|
||||||
## running the overlay stores data in a log-table which can be retrieved
|
|
||||||
## via `db.logDb()`.
|
|
||||||
##
|
##
|
||||||
## Caveat:
|
db.setTrackNewApi BasePushCaptureFn
|
||||||
## The original database argument `db` should not be used while the tracer
|
if db.tracerHook.isNil:
|
||||||
## is active (i.e. exists as overlay). The behaviour for this situation
|
db.tracerHook = TraceRecorderRef.init(db)
|
||||||
## is undefined and depends on the backend implementation of the tracer.
|
else:
|
||||||
##
|
TraceRecorderRef(db.tracerHook).push()
|
||||||
db.setTrackNewApi BaseNewCaptureFn
|
result = TraceRecorderRef(db.tracerHook).topInst().CoreDbCaptRef
|
||||||
result = db.methods.newCaptureFn flags
|
|
||||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||||
|
|
||||||
proc recorder*(cpt: CoreDbCaptRef): CoreDbRef =
|
proc level*(cpt: CoreDbCaptRef): int =
|
||||||
## Getter, returns a tracer replacement handle to be used as new database.
|
## Getter, returns the positive number of stacked instances.
|
||||||
## It records every action like fetch, store, hasKey, hasPath and delete.
|
|
||||||
## This descriptor can be superseded by a new overlay tracer (using
|
|
||||||
## `newCapture()`, again.)
|
|
||||||
##
|
##
|
||||||
## Caveat:
|
let log = cpt.distinctBase
|
||||||
## Unless the desriptor `cpt` referes to the top level overlay tracer, the
|
log.db.setTrackNewApi CptLevelFn
|
||||||
## result is undefined and depends on the backend implementation of the
|
result = log.level()
|
||||||
## tracer.
|
log.db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||||
##
|
|
||||||
cpt.setTrackNewApi CptRecorderFn
|
|
||||||
result = cpt.methods.recorderFn()
|
|
||||||
cpt.ifTrackNewApi: debug logTxt, api, elapsed
|
|
||||||
|
|
||||||
proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] =
|
proc kvtLog*(cpt: CoreDbCaptRef): seq[(Blob,Blob)] =
|
||||||
## Getter, returns the logger table for the overlay tracer database.
|
## Getter, returns the `Kvt` logger list for the argument instance.
|
||||||
##
|
##
|
||||||
## Caveat:
|
let log = cpt.distinctBase
|
||||||
## Unless the desriptor `cpt` referes to the top level overlay tracer, the
|
log.db.setTrackNewApi CptKvtLogFn
|
||||||
## result is undefined and depends on the backend implementation of the
|
result = log.kvtLogBlobs()
|
||||||
## tracer.
|
log.db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||||
##
|
|
||||||
cp.setTrackNewApi CptLogDbFn
|
|
||||||
result = cp.methods.logDbFn()
|
|
||||||
cp.ifTrackNewApi: debug logTxt, api, elapsed
|
|
||||||
|
|
||||||
proc flags*(cp: CoreDbCaptRef):set[CoreDbCaptFlags] =
|
proc pop*(cpt: CoreDbCaptRef) =
|
||||||
## Getter
|
|
||||||
##
|
|
||||||
cp.setTrackNewApi CptFlagsFn
|
|
||||||
result = cp.methods.getFlagsFn()
|
|
||||||
cp.ifTrackNewApi: debug logTxt, api, elapsed, result
|
|
||||||
|
|
||||||
proc forget*(cp: CoreDbCaptRef) =
|
|
||||||
## Explicitely stop recording the current tracer instance and reset to
|
## Explicitely stop recording the current tracer instance and reset to
|
||||||
## previous level.
|
## previous level.
|
||||||
##
|
##
|
||||||
cp.setTrackNewApi CptForgetFn
|
let db = cpt.distinctBase.db
|
||||||
cp.methods.forgetFn()
|
db.setTrackNewApi CptPopFn
|
||||||
cp.ifTrackNewApi: debug logTxt, api, elapsed
|
if not cpt.distinctBase.pop():
|
||||||
|
TraceRecorderRef(db.tracerHook).restore()
|
||||||
|
db.tracerHook = TraceRecorderRef(nil)
|
||||||
|
db.ifTrackNewApi: debug logTxt, api, elapsed, cpt
|
||||||
|
|
||||||
|
proc stopCapture*(db: CoreDbRef) =
|
||||||
|
## Discard capture instances. This function is equivalent to `pop()`-ing
|
||||||
|
## all instances.
|
||||||
|
##
|
||||||
|
db.setTrackNewApi CptStopCaptureFn
|
||||||
|
if not db.tracerHook.isNil:
|
||||||
|
TraceRecorderRef(db.tracerHook).restore()
|
||||||
|
db.tracerHook = TraceRecorderRef(nil)
|
||||||
|
db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -50,20 +50,21 @@ type
|
|||||||
|
|
||||||
BaseFinishFn = "finish"
|
BaseFinishFn = "finish"
|
||||||
BaseLevelFn = "level"
|
BaseLevelFn = "level"
|
||||||
BaseNewCaptureFn = "newCapture"
|
BasePushCaptureFn = "pushCapture"
|
||||||
BaseNewCtxFromTxFn = "ctxFromTx"
|
|
||||||
BaseNewTxFn = "newTransaction"
|
BaseNewTxFn = "newTransaction"
|
||||||
BasePersistentFn = "persistent"
|
BasePersistentFn = "persistent"
|
||||||
BaseStateBlockNumberFn = "stateBlockNumber"
|
BaseStateBlockNumberFn = "stateBlockNumber"
|
||||||
BaseSwapCtxFn = "swapCtx"
|
|
||||||
|
|
||||||
CptLogDbFn = "cpt/logDb"
|
CptKvtLogFn = "kvtLog"
|
||||||
CptRecorderFn = "cpt/recorder"
|
CptLevelFn = "level"
|
||||||
CptForgetFn = "cpt/forget"
|
CptPopFn = "pop"
|
||||||
|
CptStopCaptureFn = "stopCapture"
|
||||||
|
|
||||||
CtxForgetFn = "ctx/forget"
|
CtxForgetFn = "ctx/forget"
|
||||||
CtxGetAccountsFn = "getAccounts"
|
CtxGetAccountsFn = "getAccounts"
|
||||||
CtxGetGenericFn = "getGeneric"
|
CtxGetGenericFn = "getGeneric"
|
||||||
|
CtxNewCtxByKeyFn = "newCtxByKey"
|
||||||
|
CtxSwapCtxFn = "swapCtx"
|
||||||
|
|
||||||
KvtDelFn = "del"
|
KvtDelFn = "del"
|
||||||
KvtGetFn = "get"
|
KvtGetFn = "get"
|
||||||
|
@ -8,39 +8,93 @@
|
|||||||
# at your option. This file may not be copied, modified, or distributed except
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
# according to those terms.
|
# according to those terms.
|
||||||
|
|
||||||
# TODO: CoreDb module needs to be updated
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[strutils, json],
|
std/[strutils, json],
|
||||||
./common/common,
|
|
||||||
./db/[core_db, ledger],
|
|
||||||
./utils/utils,
|
|
||||||
./evm/tracer/legacy_tracer,
|
|
||||||
./constants,
|
|
||||||
./transaction,
|
|
||||||
./core/executor,
|
|
||||||
./evm/[state, types],
|
|
||||||
nimcrypto/utils as ncrutils,
|
nimcrypto/utils as ncrutils,
|
||||||
web3/conversions, ./launcher,
|
|
||||||
results,
|
results,
|
||||||
./beacon/web3_eth_conv
|
web3/conversions,
|
||||||
|
./beacon/web3_eth_conv,
|
||||||
|
./common/common,
|
||||||
|
./constants,
|
||||||
|
./core/executor,
|
||||||
|
./db/[core_db, ledger],
|
||||||
|
./evm/[code_bytes, state, types],
|
||||||
|
./evm/tracer/legacy_tracer,
|
||||||
|
./launcher,
|
||||||
|
./transaction,
|
||||||
|
./utils/utils
|
||||||
|
|
||||||
proc getParentHeader(self: CoreDbRef, header: BlockHeader): BlockHeader =
|
when not CoreDbEnableCaptJournal:
|
||||||
self.getBlockHeader(header.parentHash)
|
{.error: "Compiler flag missing for tracer, try -d:dbjapi_enabled".}
|
||||||
|
|
||||||
type
|
type
|
||||||
SaveCtxEnv = object
|
CaptCtxRef = ref object
|
||||||
db: CoreDbRef
|
db: CoreDbRef # not `nil`
|
||||||
ctx: CoreDbCtxRef
|
root: common.Hash256
|
||||||
|
ctx: CoreDbCtxRef # not `nil`
|
||||||
|
cpt: CoreDbCaptRef # not `nil`
|
||||||
|
restore: CoreDbCtxRef # `nil` unless `ctx` activated
|
||||||
|
|
||||||
proc newCtx(com: CommonRef; root: eth_types.Hash256): SaveCtxEnv =
|
const
|
||||||
let ctx = com.db.ctxFromTx(root).valueOr:
|
senderName = "sender"
|
||||||
raiseAssert "setParentCtx: " & $$error
|
recipientName = "recipient"
|
||||||
SaveCtxEnv(db: com.db, ctx: ctx)
|
minerName = "miner"
|
||||||
|
uncleName = "uncle"
|
||||||
|
internalTxName = "internalTx"
|
||||||
|
|
||||||
proc setCtx(saveCtx: SaveCtxEnv): SaveCtxEnv =
|
proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) {.gcsafe.}
|
||||||
SaveCtxEnv(db: saveCtx.db, ctx: saveCtx.db.swapCtx saveCtx.ctx)
|
proc toJson*(receipts: seq[Receipt]): JsonNode {.gcsafe.}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
template safeTracer(info: string; code: untyped) =
|
||||||
|
try:
|
||||||
|
code
|
||||||
|
except CatchableError as e:
|
||||||
|
raiseAssert info & " name=" & $e.name & " msg=" & e.msg
|
||||||
|
|
||||||
|
# -------------------
|
||||||
|
|
||||||
|
proc init(
|
||||||
|
T: type CaptCtxRef;
|
||||||
|
com: CommonRef;
|
||||||
|
root: common.Hash256;
|
||||||
|
): T
|
||||||
|
{.raises: [CatchableError].} =
|
||||||
|
let ctx = block:
|
||||||
|
let rc = com.db.ctx.newCtxByKey(root)
|
||||||
|
if rc.isErr:
|
||||||
|
raiseAssert "newCptCtx: " & $$rc.error
|
||||||
|
rc.value
|
||||||
|
T(db: com.db, root: root, cpt: com.db.pushCapture(), ctx: ctx)
|
||||||
|
|
||||||
|
proc init(
|
||||||
|
T: type CaptCtxRef;
|
||||||
|
com: CommonRef;
|
||||||
|
topHeader: BlockHeader;
|
||||||
|
): T
|
||||||
|
{.raises: [CatchableError].} =
|
||||||
|
T.init(com, com.db.getBlockHeader(topHeader.parentHash).stateRoot)
|
||||||
|
|
||||||
|
proc activate(cc: CaptCtxRef): CaptCtxRef {.discardable.} =
|
||||||
|
## Install/activate new context `cc.ctx`, old one in `cc.restore`
|
||||||
|
doAssert not cc.isNil
|
||||||
|
doAssert cc.restore.isNil # otherwise activated, already
|
||||||
|
cc.restore = cc.ctx.swapCtx cc.db
|
||||||
|
cc
|
||||||
|
|
||||||
|
proc release(cc: CaptCtxRef) =
|
||||||
|
if not cc.restore.isNil: # switch to original context (if any)
|
||||||
|
let ctx = cc.restore.swapCtx(cc.db)
|
||||||
|
doAssert ctx == cc.ctx
|
||||||
|
cc.ctx.forget() # dispose
|
||||||
|
cc.cpt.pop() # discard top layer of actions tracer
|
||||||
|
|
||||||
|
# -------------------
|
||||||
|
|
||||||
proc `%`(x: openArray[byte]): JsonNode =
|
proc `%`(x: openArray[byte]): JsonNode =
|
||||||
result = %toHex(x, false)
|
result = %toHex(x, false)
|
||||||
@ -57,17 +111,25 @@ proc toJson(receipt: Receipt): JsonNode =
|
|||||||
else:
|
else:
|
||||||
result["status"] = %receipt.status
|
result["status"] = %receipt.status
|
||||||
|
|
||||||
proc dumpReceipts*(chainDB: CoreDbRef, header: BlockHeader): JsonNode =
|
proc dumpReceiptsImpl(
|
||||||
|
chainDB: CoreDbRef;
|
||||||
|
header: BlockHeader;
|
||||||
|
): JsonNode
|
||||||
|
{.raises: [CatchableError].} =
|
||||||
result = newJArray()
|
result = newJArray()
|
||||||
for receipt in chainDB.getReceipts(header.receiptsRoot):
|
for receipt in chainDB.getReceipts(header.receiptsRoot):
|
||||||
result.add receipt.toJson
|
result.add receipt.toJson
|
||||||
|
|
||||||
proc toJson*(receipts: seq[Receipt]): JsonNode =
|
# ------------------------------------------------------------------------------
|
||||||
result = newJArray()
|
# Private functions
|
||||||
for receipt in receipts:
|
# ------------------------------------------------------------------------------
|
||||||
result.add receipt.toJson
|
|
||||||
|
|
||||||
proc captureAccount(n: JsonNode, db: LedgerRef, address: EthAddress, name: string) =
|
proc captureAccount(
|
||||||
|
n: JsonNode;
|
||||||
|
db: LedgerRef;
|
||||||
|
address: EthAddress;
|
||||||
|
name: string;
|
||||||
|
) =
|
||||||
var jaccount = newJObject()
|
var jaccount = newJObject()
|
||||||
jaccount["name"] = %name
|
jaccount["name"] = %name
|
||||||
jaccount["address"] = %("0x" & $address)
|
jaccount["address"] = %("0x" & $address)
|
||||||
@ -82,7 +144,7 @@ proc captureAccount(n: JsonNode, db: LedgerRef, address: EthAddress, name: strin
|
|||||||
|
|
||||||
let code = db.getCode(address)
|
let code = db.getCode(address)
|
||||||
jaccount["codeHash"] = %("0x" & ($codeHash).toLowerAscii)
|
jaccount["codeHash"] = %("0x" & ($codeHash).toLowerAscii)
|
||||||
jaccount["code"] = %("0x" & toHex(code, true))
|
jaccount["code"] = %("0x" & code.bytes.toHex(true))
|
||||||
jaccount["storageRoot"] = %("0x" & ($storageRoot).toLowerAscii)
|
jaccount["storageRoot"] = %("0x" & ($storageRoot).toLowerAscii)
|
||||||
|
|
||||||
var storage = newJObject()
|
var storage = newJObject()
|
||||||
@ -92,48 +154,26 @@ proc captureAccount(n: JsonNode, db: LedgerRef, address: EthAddress, name: strin
|
|||||||
|
|
||||||
n.add jaccount
|
n.add jaccount
|
||||||
|
|
||||||
proc dumpMemoryDB*(node: JsonNode, db: CoreDbRef) =
|
|
||||||
var n = newJObject()
|
|
||||||
for k, v in db.ctx.getKvt():
|
|
||||||
n[k.toHex(false)] = %v
|
|
||||||
node["state"] = n
|
|
||||||
|
|
||||||
proc dumpMemoryDB*(node: JsonNode, kvt: TableRef[common.Blob, common.Blob]) =
|
proc traceTransactionImpl(
|
||||||
var n = newJObject()
|
com: CommonRef;
|
||||||
for k, v in kvt:
|
header: BlockHeader;
|
||||||
n[k.toHex(false)] = %v
|
transactions: openArray[Transaction];
|
||||||
node["state"] = n
|
txIndex: uint64;
|
||||||
|
tracerFlags: set[TracerFlags] = {};
|
||||||
|
): JsonNode
|
||||||
|
{.raises: [CatchableError].}=
|
||||||
|
if header.txRoot == EMPTY_ROOT_HASH:
|
||||||
|
return newJNull()
|
||||||
|
|
||||||
proc dumpMemoryDB*(node: JsonNode, capture: CoreDbCaptRef) =
|
|
||||||
node.dumpMemoryDB capture.logDb
|
|
||||||
|
|
||||||
const
|
|
||||||
senderName = "sender"
|
|
||||||
recipientName = "recipient"
|
|
||||||
minerName = "miner"
|
|
||||||
uncleName = "uncle"
|
|
||||||
internalTxName = "internalTx"
|
|
||||||
|
|
||||||
proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
|
||||||
transactions: openArray[Transaction], txIndex: uint64,
|
|
||||||
tracerFlags: set[TracerFlags] = {}): JsonNode =
|
|
||||||
let
|
let
|
||||||
# we add a memory layer between backend/lower layer db
|
|
||||||
# and capture state db snapshot during transaction execution
|
|
||||||
capture = com.db.newCapture.value
|
|
||||||
tracerInst = newLegacyTracer(tracerFlags)
|
tracerInst = newLegacyTracer(tracerFlags)
|
||||||
captureCom = com.clone(capture.recorder)
|
cc = activate CaptCtxRef.init(com, header)
|
||||||
|
vmState = BaseVMState.new(header, com).valueOr: return newJNull()
|
||||||
saveCtx = setCtx com.newCtx(com.db.getParentHeader(header).stateRoot)
|
|
||||||
vmState = BaseVMState.new(header, captureCom).valueOr:
|
|
||||||
return newJNull()
|
|
||||||
stateDb = vmState.stateDB
|
stateDb = vmState.stateDB
|
||||||
|
|
||||||
defer:
|
defer: cc.release()
|
||||||
saveCtx.setCtx().ctx.forget()
|
|
||||||
capture.forget()
|
|
||||||
|
|
||||||
if header.txRoot == EMPTY_ROOT_HASH: return newJNull()
|
|
||||||
doAssert(transactions.calcTxRoot == header.txRoot)
|
doAssert(transactions.calcTxRoot == header.txRoot)
|
||||||
doAssert(transactions.len != 0)
|
doAssert(transactions.len != 0)
|
||||||
|
|
||||||
@ -142,8 +182,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
|||||||
before = newJArray()
|
before = newJArray()
|
||||||
after = newJArray()
|
after = newJArray()
|
||||||
stateDiff = %{"before": before, "after": after}
|
stateDiff = %{"before": before, "after": after}
|
||||||
beforeRoot: common.Hash256
|
stateCtx = CaptCtxRef(nil)
|
||||||
beforeCtx: SaveCtxEnv
|
|
||||||
|
|
||||||
let
|
let
|
||||||
miner = vmState.coinbase()
|
miner = vmState.coinbase()
|
||||||
@ -159,13 +198,14 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
|||||||
before.captureAccount(stateDb, miner, minerName)
|
before.captureAccount(stateDb, miner, minerName)
|
||||||
stateDb.persist()
|
stateDb.persist()
|
||||||
stateDiff["beforeRoot"] = %($stateDb.rootHash)
|
stateDiff["beforeRoot"] = %($stateDb.rootHash)
|
||||||
beforeRoot = stateDb.rootHash
|
discard com.db.ctx.getAccounts.state(updateOk=true) # lazy hashing!
|
||||||
beforeCtx = com.newCtx beforeRoot
|
stateCtx = CaptCtxRef.init(com, stateDb.rootHash)
|
||||||
|
|
||||||
let rc = vmState.processTransaction(tx, sender, header)
|
let rc = vmState.processTransaction(tx, sender, header)
|
||||||
gasUsed = if rc.isOk: rc.value else: 0
|
gasUsed = if rc.isOk: rc.value else: 0
|
||||||
|
|
||||||
if idx.uint64 == txIndex:
|
if idx.uint64 == txIndex:
|
||||||
|
discard com.db.ctx.getAccounts.state(updateOk=true) # lazy hashing!
|
||||||
after.captureAccount(stateDb, sender, senderName)
|
after.captureAccount(stateDb, sender, senderName)
|
||||||
after.captureAccount(stateDb, recipient, recipientName)
|
after.captureAccount(stateDb, recipient, recipientName)
|
||||||
after.captureAccount(stateDb, miner, minerName)
|
after.captureAccount(stateDb, miner, minerName)
|
||||||
@ -176,13 +216,12 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
|||||||
|
|
||||||
# internal transactions:
|
# internal transactions:
|
||||||
let
|
let
|
||||||
saveCtxBefore = setCtx beforeCtx
|
cx = activate stateCtx
|
||||||
stateBefore = LedgerRef.init(capture.recorder, beforeRoot)
|
ldgBefore = LedgerRef.init(com.db, cx.root)
|
||||||
defer:
|
defer: cx.release()
|
||||||
saveCtxBefore.setCtx().ctx.forget()
|
|
||||||
|
|
||||||
for idx, acc in tracedAccountsPairs(tracerInst):
|
for idx, acc in tracedAccountsPairs(tracerInst):
|
||||||
before.captureAccount(stateBefore, acc, internalTxName & $idx)
|
before.captureAccount(ldgBefore, acc, internalTxName & $idx)
|
||||||
|
|
||||||
for idx, acc in tracedAccountsPairs(tracerInst):
|
for idx, acc in tracedAccountsPairs(tracerInst):
|
||||||
after.captureAccount(stateDb, acc, internalTxName & $idx)
|
after.captureAccount(stateDb, acc, internalTxName & $idx)
|
||||||
@ -195,30 +234,34 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
|
|||||||
|
|
||||||
# now we dump captured state db
|
# now we dump captured state db
|
||||||
if TracerFlags.DisableState notin tracerFlags:
|
if TracerFlags.DisableState notin tracerFlags:
|
||||||
result.dumpMemoryDB(capture)
|
result.dumpMemoryDB(cx.cpt)
|
||||||
|
|
||||||
proc dumpBlockState*(com: CommonRef, blk: EthBlock, dumpState = false): JsonNode =
|
|
||||||
|
proc dumpBlockStateImpl(
|
||||||
|
com: CommonRef;
|
||||||
|
blk: EthBlock;
|
||||||
|
dumpState = false;
|
||||||
|
): JsonNode
|
||||||
|
{.raises: [CatchableError].} =
|
||||||
template header: BlockHeader = blk.header
|
template header: BlockHeader = blk.header
|
||||||
|
|
||||||
let
|
let
|
||||||
parent = com.db.getParentHeader(header)
|
cc = activate CaptCtxRef.init(com, header)
|
||||||
capture = com.db.newCapture.value
|
parent = com.db.getBlockHeader(header.parentHash)
|
||||||
captureCom = com.clone(capture.recorder)
|
|
||||||
# we only need a stack dump when scanning for internal transaction address
|
# only need a stack dump when scanning for internal transaction address
|
||||||
captureFlags = {DisableMemory, DisableStorage, EnableAccount}
|
captureFlags = {DisableMemory, DisableStorage, EnableAccount}
|
||||||
tracerInst = newLegacyTracer(captureFlags)
|
tracerInst = newLegacyTracer(captureFlags)
|
||||||
|
vmState = BaseVMState.new(header, com, tracerInst).valueOr:
|
||||||
saveCtx = setCtx com.newCtx(parent.stateRoot)
|
return newJNull()
|
||||||
vmState = BaseVMState.new(header, captureCom, tracerInst).valueOr:
|
|
||||||
return newJNull()
|
|
||||||
miner = vmState.coinbase()
|
miner = vmState.coinbase()
|
||||||
defer:
|
|
||||||
saveCtx.setCtx().ctx.forget()
|
defer: cc.release()
|
||||||
capture.forget()
|
|
||||||
|
|
||||||
var
|
var
|
||||||
before = newJArray()
|
before = newJArray()
|
||||||
after = newJArray()
|
after = newJArray()
|
||||||
stateBefore = LedgerRef.init(capture.recorder, parent.stateRoot)
|
stateBefore = LedgerRef.init(com.db, parent.stateRoot)
|
||||||
|
|
||||||
for idx, tx in blk.transactions:
|
for idx, tx in blk.transactions:
|
||||||
let sender = tx.getSender
|
let sender = tx.getSender
|
||||||
@ -259,22 +302,24 @@ proc dumpBlockState*(com: CommonRef, blk: EthBlock, dumpState = false): JsonNode
|
|||||||
result = %{"before": before, "after": after}
|
result = %{"before": before, "after": after}
|
||||||
|
|
||||||
if dumpState:
|
if dumpState:
|
||||||
result.dumpMemoryDB(capture)
|
result.dumpMemoryDB(cc.cpt)
|
||||||
|
|
||||||
proc traceBlock*(com: CommonRef, blk: EthBlock, tracerFlags: set[TracerFlags] = {}): JsonNode =
|
|
||||||
|
proc traceBlockImpl(
|
||||||
|
com: CommonRef;
|
||||||
|
blk: EthBlock;
|
||||||
|
tracerFlags: set[TracerFlags] = {};
|
||||||
|
): JsonNode
|
||||||
|
{.raises: [CatchableError].} =
|
||||||
template header: BlockHeader = blk.header
|
template header: BlockHeader = blk.header
|
||||||
|
|
||||||
let
|
let
|
||||||
capture = com.db.newCapture.value
|
cc = activate CaptCtxRef.init(com, header)
|
||||||
captureCom = com.clone(capture.recorder)
|
|
||||||
tracerInst = newLegacyTracer(tracerFlags)
|
tracerInst = newLegacyTracer(tracerFlags)
|
||||||
|
vmState = BaseVMState.new(header, com, tracerInst).valueOr:
|
||||||
|
return newJNull()
|
||||||
|
|
||||||
saveCtx = setCtx com.newCtx(com.db.getParentHeader(header).stateRoot)
|
defer: cc.release()
|
||||||
vmState = BaseVMState.new(header, captureCom, tracerInst).valueOr:
|
|
||||||
return newJNull()
|
|
||||||
|
|
||||||
defer:
|
|
||||||
saveCtx.setCtx().ctx.forget()
|
|
||||||
capture.forget()
|
|
||||||
|
|
||||||
if header.txRoot == EMPTY_ROOT_HASH: return newJNull()
|
if header.txRoot == EMPTY_ROOT_HASH: return newJNull()
|
||||||
doAssert(blk.transactions.calcTxRoot == header.txRoot)
|
doAssert(blk.transactions.calcTxRoot == header.txRoot)
|
||||||
@ -293,24 +338,33 @@ proc traceBlock*(com: CommonRef, blk: EthBlock, tracerFlags: set[TracerFlags] =
|
|||||||
result["gas"] = %gasUsed
|
result["gas"] = %gasUsed
|
||||||
|
|
||||||
if TracerFlags.DisableState notin tracerFlags:
|
if TracerFlags.DisableState notin tracerFlags:
|
||||||
result.dumpMemoryDB(capture)
|
result.dumpMemoryDB(cc.cpt)
|
||||||
|
|
||||||
proc traceTransactions*(com: CommonRef, header: BlockHeader, transactions: openArray[Transaction]): JsonNode =
|
proc traceTransactionsImpl(
|
||||||
|
com: CommonRef;
|
||||||
|
header: BlockHeader;
|
||||||
|
transactions: openArray[Transaction];
|
||||||
|
): JsonNode
|
||||||
|
{.raises: [CatchableError].} =
|
||||||
result = newJArray()
|
result = newJArray()
|
||||||
for i in 0 ..< transactions.len:
|
for i in 0 ..< transactions.len:
|
||||||
result.add traceTransaction(com, header, transactions, i.uint64, {DisableState})
|
result.add traceTransactionImpl(
|
||||||
|
com, header, transactions, i.uint64, {DisableState})
|
||||||
|
|
||||||
|
|
||||||
proc dumpDebuggingMetaData*(vmState: BaseVMState, blk: EthBlock, launchDebugger = true) =
|
proc dumpDebuggingMetaDataImpl(
|
||||||
|
vmState: BaseVMState;
|
||||||
|
blk: EthBlock;
|
||||||
|
launchDebugger = true;
|
||||||
|
) {.raises: [CatchableError].} =
|
||||||
template header: BlockHeader = blk.header
|
template header: BlockHeader = blk.header
|
||||||
|
|
||||||
let
|
let
|
||||||
com = vmState.com
|
cc = activate CaptCtxRef.init(vmState.com, header)
|
||||||
blockNumber = header.number
|
blockNumber = header.number
|
||||||
capture = com.db.newCapture.value
|
|
||||||
captureCom = com.clone(capture.recorder)
|
|
||||||
bloom = createBloom(vmState.receipts)
|
bloom = createBloom(vmState.receipts)
|
||||||
defer:
|
|
||||||
capture.forget()
|
defer: cc.release()
|
||||||
|
|
||||||
let blockSummary = %{
|
let blockSummary = %{
|
||||||
"receiptsRoot": %("0x" & toHex(calcReceiptsRoot(vmState.receipts).data)),
|
"receiptsRoot": %("0x" & toHex(calcReceiptsRoot(vmState.receipts).data)),
|
||||||
@ -320,17 +374,82 @@ proc dumpDebuggingMetaData*(vmState: BaseVMState, blk: EthBlock, launchDebugger
|
|||||||
|
|
||||||
var metaData = %{
|
var metaData = %{
|
||||||
"blockNumber": %blockNumber.toHex,
|
"blockNumber": %blockNumber.toHex,
|
||||||
"txTraces": traceTransactions(captureCom, header, blk.transactions),
|
"txTraces": traceTransactionsImpl(vmState.com, header, blk.transactions),
|
||||||
"stateDump": dumpBlockState(captureCom, blk),
|
"stateDump": dumpBlockStateImpl(vmState.com, blk),
|
||||||
"blockTrace": traceBlock(captureCom, blk, {DisableState}),
|
"blockTrace": traceBlockImpl(vmState.com, blk, {DisableState}),
|
||||||
"receipts": toJson(vmState.receipts),
|
"receipts": toJson(vmState.receipts),
|
||||||
"block": blockSummary
|
"block": blockSummary
|
||||||
}
|
}
|
||||||
|
|
||||||
metaData.dumpMemoryDB(capture)
|
metaData.dumpMemoryDB(cc.cpt)
|
||||||
|
|
||||||
let jsonFileName = "debug" & $blockNumber & ".json"
|
let jsonFileName = "debug" & $blockNumber & ".json"
|
||||||
if launchDebugger:
|
if launchDebugger:
|
||||||
launchPremix(jsonFileName, metaData)
|
launchPremix(jsonFileName, metaData)
|
||||||
else:
|
else:
|
||||||
writeFile(jsonFileName, metaData.pretty())
|
writeFile(jsonFileName, metaData.pretty())
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc traceBlock*(
|
||||||
|
com: CommonRef;
|
||||||
|
blk: EthBlock;
|
||||||
|
tracerFlags: set[TracerFlags] = {};
|
||||||
|
): JsonNode =
|
||||||
|
"traceBlock".safeTracer:
|
||||||
|
result = com.traceBlockImpl(blk, tracerFlags)
|
||||||
|
|
||||||
|
proc toJson*(receipts: seq[Receipt]): JsonNode =
|
||||||
|
result = newJArray()
|
||||||
|
for receipt in receipts:
|
||||||
|
result.add receipt.toJson
|
||||||
|
|
||||||
|
proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) =
|
||||||
|
var n = newJObject()
|
||||||
|
for (k,v) in cpt.kvtLog:
|
||||||
|
n[k.toHex(false)] = %v
|
||||||
|
node["state"] = n
|
||||||
|
|
||||||
|
proc dumpReceipts*(chainDB: CoreDbRef, header: BlockHeader): JsonNode =
|
||||||
|
"dumpReceipts".safeTracer:
|
||||||
|
result = chainDB.dumpReceiptsImpl header
|
||||||
|
|
||||||
|
proc traceTransaction*(
|
||||||
|
com: CommonRef;
|
||||||
|
header: BlockHeader;
|
||||||
|
txs: openArray[Transaction];
|
||||||
|
txIndex: uint64;
|
||||||
|
tracerFlags: set[TracerFlags] = {};
|
||||||
|
): JsonNode =
|
||||||
|
"traceTransaction".safeTracer:
|
||||||
|
result = com.traceTransactionImpl(header, txs, txIndex,tracerFlags)
|
||||||
|
|
||||||
|
proc dumpBlockState*(
|
||||||
|
com: CommonRef;
|
||||||
|
blk: EthBlock;
|
||||||
|
dumpState = false;
|
||||||
|
): JsonNode =
|
||||||
|
"dumpBlockState".safeTracer:
|
||||||
|
result = com.dumpBlockStateImpl(blk, dumpState)
|
||||||
|
|
||||||
|
proc traceTransactions*(
|
||||||
|
com: CommonRef;
|
||||||
|
header: BlockHeader;
|
||||||
|
transactions: openArray[Transaction];
|
||||||
|
): JsonNode =
|
||||||
|
"traceTransactions".safeTracer:
|
||||||
|
result = com.traceTransactionsImpl(header, transactions)
|
||||||
|
|
||||||
|
proc dumpDebuggingMetaData*(
|
||||||
|
vmState: BaseVMState;
|
||||||
|
blk: EthBlock;
|
||||||
|
launchDebugger = true;
|
||||||
|
) =
|
||||||
|
"dumpDebuggingMetaData".safeTracer:
|
||||||
|
vmState.dumpDebuggingMetaDataImpl(blk, launchDebugger)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# End
|
||||||
|
# ------------------------------------------------------------------------------
|
@ -16,7 +16,7 @@ cliBuilder:
|
|||||||
./test_genesis,
|
./test_genesis,
|
||||||
./test_precompiles,
|
./test_precompiles,
|
||||||
./test_generalstate_json,
|
./test_generalstate_json,
|
||||||
#./test_tracer_json, -- temporarily disabled
|
./test_tracer_json,
|
||||||
#./test_persistblock_json, -- fails
|
#./test_persistblock_json, -- fails
|
||||||
#./test_rpc, -- fails
|
#./test_rpc, -- fails
|
||||||
./test_filters,
|
./test_filters,
|
||||||
|
@ -66,7 +66,7 @@ func pp*(h: BlockHeader; sep = " "): string =
|
|||||||
&"receiptsRoot={h.receiptsRoot.pp}{sep}" &
|
&"receiptsRoot={h.receiptsRoot.pp}{sep}" &
|
||||||
&"stateRoot={h.stateRoot.pp}{sep}" &
|
&"stateRoot={h.stateRoot.pp}{sep}" &
|
||||||
&"baseFee={h.baseFeePerGas}{sep}" &
|
&"baseFee={h.baseFeePerGas}{sep}" &
|
||||||
&"withdrawalsRoot={h.withdrawalsRoot.get(EMPTY_ROOT_HASH)}{sep}" &
|
&"withdrawalsRoot={h.withdrawalsRoot.get(EMPTY_ROOT_HASH).pp}{sep}" &
|
||||||
&"blobGasUsed={h.blobGasUsed.get(0'u64)}{sep}" &
|
&"blobGasUsed={h.blobGasUsed.get(0'u64)}{sep}" &
|
||||||
&"excessBlobGas={h.excessBlobGas.get(0'u64)}"
|
&"excessBlobGas={h.excessBlobGas.get(0'u64)}"
|
||||||
|
|
||||||
|
@ -9,16 +9,16 @@
|
|||||||
# or distributed except according to those terms.
|
# or distributed except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[json, os, sets, tables, strutils],
|
std/[json, os, tables, strutils],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
chronicles,
|
chronicles,
|
||||||
unittest2,
|
unittest2,
|
||||||
results,
|
results,
|
||||||
./test_helpers,
|
./test_helpers,
|
||||||
../nimbus/sync/protocol/snap/snap_types,
|
|
||||||
../nimbus/db/aristo/aristo_merge,
|
|
||||||
../nimbus/db/kvt/kvt_utils,
|
|
||||||
../nimbus/db/aristo,
|
../nimbus/db/aristo,
|
||||||
|
../nimbus/db/aristo/[aristo_desc, aristo_layers, aristo_nearby, aristo_part],
|
||||||
|
../nimbus/db/aristo/aristo_part/part_debug,
|
||||||
|
../nimbus/db/kvt/kvt_utils,
|
||||||
../nimbus/[tracer, evm/types],
|
../nimbus/[tracer, evm/types],
|
||||||
../nimbus/common/common
|
../nimbus/common/common
|
||||||
|
|
||||||
@ -28,14 +28,17 @@ proc setErrorLevel {.used.} =
|
|||||||
|
|
||||||
proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
||||||
## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader
|
## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader
|
||||||
|
const
|
||||||
|
info = "preLoadAristoDb"
|
||||||
var
|
var
|
||||||
proof: seq[SnapProof] # for pre-loading MPT
|
proof: seq[Blob] # for pre-loading MPT
|
||||||
predRoot: Hash256 # from predecessor header
|
predRoot: Hash256 # from predecessor header
|
||||||
txRoot: Hash256 # header with block number `num`
|
txRoot: Hash256 # header with block number `num`
|
||||||
rcptRoot: Hash256 # ditto
|
rcptRoot: Hash256 # ditto
|
||||||
let
|
let
|
||||||
adb = cdb.mpt
|
adb = cdb.ctx.mpt # `Aristo` db
|
||||||
kdb = cdb.kvt
|
kdb = cdb.ctx.kvt # `Kvt` db
|
||||||
|
ps = PartStateRef.init adb # Partial DB descriptor
|
||||||
|
|
||||||
# Fill KVT and collect `proof` data
|
# Fill KVT and collect `proof` data
|
||||||
for (k,v) in jKvp.pairs:
|
for (k,v) in jKvp.pairs:
|
||||||
@ -45,7 +48,7 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
|||||||
if key.len == 32:
|
if key.len == 32:
|
||||||
doAssert key == val.keccakHash.data
|
doAssert key == val.keccakHash.data
|
||||||
if val != @[0x80u8]: # Exclude empty item
|
if val != @[0x80u8]: # Exclude empty item
|
||||||
proof.add SnapProof(val)
|
proof.add val
|
||||||
else:
|
else:
|
||||||
if key[0] == 0:
|
if key[0] == 0:
|
||||||
try:
|
try:
|
||||||
@ -60,19 +63,62 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
|||||||
discard
|
discard
|
||||||
check kdb.put(key, val).isOk
|
check kdb.put(key, val).isOk
|
||||||
|
|
||||||
# TODO: `getColumn(CtXyy)` does not exists anymore. There is only the generic
|
|
||||||
# `MPT` left that can be retrieved with `getGeneric()`, optionally with
|
|
||||||
# argument `clearData=true`
|
|
||||||
|
|
||||||
# Install sub-trie roots onto production db
|
|
||||||
if txRoot.isValid:
|
|
||||||
doAssert adb.mergeProof(txRoot, VertexID(CtTxs)).isOk
|
|
||||||
if rcptRoot.isValid:
|
|
||||||
doAssert adb.mergeProof(rcptRoot, VertexID(CtReceipts)).isOk
|
|
||||||
doAssert adb.mergeProof(predRoot, VertexID(CtAccounts)).isOk
|
|
||||||
|
|
||||||
# Set up production MPT
|
# Set up production MPT
|
||||||
doAssert adb.mergeProof(proof).isOk
|
ps.partPut(proof, AutomaticPayload).isOkOr:
|
||||||
|
raiseAssert info & ": partPut => " & $error
|
||||||
|
|
||||||
|
# Handle transaction sub-tree
|
||||||
|
if txRoot.isValid:
|
||||||
|
var txs: seq[Transaction]
|
||||||
|
for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot):
|
||||||
|
let
|
||||||
|
inx = key.path.to(UInt256).truncate(uint)
|
||||||
|
tx = rlp.decode(pyl.rawBlob, Transaction)
|
||||||
|
#
|
||||||
|
# FIXME: Is this might be a bug in the test data?
|
||||||
|
#
|
||||||
|
# The single item test key is always `128`. For non-single test
|
||||||
|
# lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit
|
||||||
|
# number `N`.)
|
||||||
|
#
|
||||||
|
# Unless the `128` item value is put at the start of the argument
|
||||||
|
# list `txs[]` for `persistTransactions()`, the `tracer` module
|
||||||
|
# will throw an exception at
|
||||||
|
# `doAssert(transactions.calcTxRoot == header.txRoot)` in the
|
||||||
|
# function `traceTransactionImpl()`.
|
||||||
|
#
|
||||||
|
if (inx and 0x80) != 0:
|
||||||
|
txs = @[tx] & txs
|
||||||
|
else:
|
||||||
|
txs.add tx
|
||||||
|
cdb.persistTransactions(num, txRoot, txs)
|
||||||
|
|
||||||
|
# Handle receipts sub-tree
|
||||||
|
if rcptRoot.isValid:
|
||||||
|
var rcpts: seq[Receipt]
|
||||||
|
for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot):
|
||||||
|
let
|
||||||
|
inx = key.path.to(UInt256).truncate(uint)
|
||||||
|
rcpt = rlp.decode(pyl.rawBlob, Receipt)
|
||||||
|
# FIXME: See comment at `txRoot` section.
|
||||||
|
if (inx and 0x80) != 0:
|
||||||
|
rcpts = @[rcpt] & rcpts
|
||||||
|
else:
|
||||||
|
rcpts.add rcpt
|
||||||
|
cdb.persistReceipts(rcptRoot, rcpts)
|
||||||
|
|
||||||
|
# Save keys to database
|
||||||
|
for (rvid,key) in ps.vkPairs:
|
||||||
|
adb.layersPutKey(rvid, key)
|
||||||
|
|
||||||
|
ps.check().isOkOr:
|
||||||
|
raiseAssert info & ": check => " & $error
|
||||||
|
|
||||||
|
#echo ">>> preLoadAristoDb (9)",
|
||||||
|
# "\n ps\n ", ps.pp(byKeyOk=false,byVidOk=false),
|
||||||
|
# ""
|
||||||
|
# -----------
|
||||||
|
#if true: quit()
|
||||||
|
|
||||||
# use tracerTestGen.nim to generate additional test data
|
# use tracerTestGen.nim to generate additional test data
|
||||||
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) =
|
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) =
|
||||||
@ -98,15 +144,25 @@ proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: C
|
|||||||
let stateDump = dumpBlockState(com, blk)
|
let stateDump = dumpBlockState(com, blk)
|
||||||
let blockTrace = traceBlock(com, blk, {DisableState})
|
let blockTrace = traceBlock(com, blk, {DisableState})
|
||||||
|
|
||||||
|
# Fix hex representation
|
||||||
|
for inx in 0 ..< node["txTraces"].len:
|
||||||
|
for key in ["beforeRoot", "afterRoot"]:
|
||||||
|
# Here, `node["txTraces"]` stores a string while `txTraces` uses a
|
||||||
|
# `Hash256` which might expand to a didfferent upper/lower case.
|
||||||
|
var strHash = txTraces[inx]["stateDiff"][key].getStr.toUpperAscii
|
||||||
|
if strHash.len < 64:
|
||||||
|
strHash = '0'.repeat(64 - strHash.len) & strHash
|
||||||
|
txTraces[inx]["stateDiff"][key] = %(strHash)
|
||||||
|
|
||||||
check node["txTraces"] == txTraces
|
check node["txTraces"] == txTraces
|
||||||
check node["stateDump"] == stateDump
|
check node["stateDump"] == stateDump
|
||||||
check node["blockTrace"] == blockTrace
|
check node["blockTrace"] == blockTrace
|
||||||
|
|
||||||
for i in 0 ..< receipts.len:
|
for i in 0 ..< receipts.len:
|
||||||
let receipt = receipts[i]
|
let receipt = receipts[i]
|
||||||
let stateDiff = txTraces[i]["stateDiff"]
|
let stateDiff = txTraces[i]["stateDiff"]
|
||||||
check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii()
|
check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii()
|
||||||
|
|
||||||
|
|
||||||
proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) =
|
proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) =
|
||||||
node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory)
|
node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user