Core db+aristo re org tracer (#2123)

* Kvt: Update API hooks

* Aristo: Generalised merging snap proofs, now for multiple state roots

why:
  This accommodates pre-loading partial tries for unit tests

* Aristo: Update some unit tests

* CoreDb+Aristo: Re-factor tracer

why:
  Was bonkers anyway. The main change is that the trace journal is now
  kept in a way similar to a transaction layer so that it can predictably
  interact with DB transactions.

* Ledger: Debugging helper

* Update tracer unit test applicable for `Aristo`

* Fix copyright year

* Disable `dump()` function as compile time default

why:
  This needs to pull in the `rocks_db` library at compile time.
This commit is contained in:
Jordan Hrycaj 2024-04-03 15:48:35 +00:00 committed by GitHub
parent 6c2c99a9ae
commit 1502014e36
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 1064 additions and 366 deletions

View File

@ -444,24 +444,43 @@ proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
pfx1 = indent.toPfx(1)
pfx2 = indent.toPfx(2)
result = "<" & $be.kind & ">"
result &= pfx & "vGen" & pfx1 & "[" &
be.getIdgFn().get(otherwise = EmptyVidSeq).mapIt(it.ppVid).join(",") & "]"
var (dump,dataOk) = ("",false)
dump &= pfx & "vGen"
block:
result &= pfx & "sTab" & pfx1 & "{"
var n = 0
let q = be.getIdgFn().get(otherwise = EmptyVidSeq).mapIt(it.ppVid)
dump &= "(" & $q.len & ")"
if 0 < q.len:
dataOk = true
dump &= pfx1
dump &= "[" & q.join(",") & "]"
block:
dump &= pfx & "sTab"
var (n, data) = (0, "")
for (vid,vtx) in be.walkVtx:
if 0 < n: result &= pfx2
if 0 < n: data &= pfx2
n.inc
result &= $n & "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")"
result &= "}"
data &= $n & "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")"
dump &= "(" & $n & ")"
if 0 < n:
dataOk = true
dump &= pfx1
dump &= "{" & data & "}"
block:
result &= pfx & "kMap" & pfx1 & "{"
var n = 0
dump &= pfx & "kMap"
var (n, data) = (0, "")
for (vid,key) in be.walkKey:
if 0 < n: result &= pfx2
if 0 < n: data &= pfx2
n.inc
result &= $n & "(" & vid.ppVid & "," & key.ppKey(db) & ")"
result &= "}"
data &= $n & "(" & vid.ppVid & "," & key.ppKey(db) & ")"
dump &= "(" & $n & ")"
if 0 < n:
dataOk = true
dump &= pfx1
dump &= "{" & data & "}"
if dataOk:
result &= dump
else:
result &= "[]"
proc ppLayer(
layer: LayerRef;
@ -532,8 +551,10 @@ proc ppLayer(
# Public functions
# ------------------------------------------------------------------------------
proc pp*(w: Hash256): string =
if w == EMPTY_ROOT_HASH:
proc pp*(w: Hash256; codeHashOk = false): string =
if codeHashOk:
w.ppCodeHash
elif w == EMPTY_ROOT_HASH:
"EMPTY_ROOT_HASH"
elif w == Hash256():
"Hash256()"
@ -546,6 +567,9 @@ proc pp*(w: HashKey; sig: MerkleSignRef): string =
proc pp*(w: HashKey; db = AristoDbRef(nil)): string =
w.ppKey(db.orDefault)
proc pp*(w: openArray[HashKey]; db = AristoDbRef(nil)): string =
"[" & @w.mapIt(it.ppKey(db.orDefault)).join(",") & "]"
proc pp*(lty: LeafTie, db = AristoDbRef(nil)): string =
lty.ppLeafTie(db.orDefault)
@ -753,9 +777,11 @@ proc pp*(
filterOk = true;
topOk = true;
stackOk = true;
kMapOk = true;
): string =
if topOk:
result = db.layersCc.pp(db, indent=indent)
result = db.layersCc.pp(
db, xTabOk=true, kMapOk=kMapOk, other=true, indent=indent)
let stackOnlyOk = stackOk and not (topOk or filterOk or backendOk)
if not stackOnlyOk:
result &= indent.toPfx & " level=" & $db.stack.len

View File

@ -92,6 +92,7 @@ type
MergeLeafGarbledHike
MergeLeafPathCachedAlready
MergeLeafPathOnBackendAlready
MergeLeafProofModeLock
MergeNonBranchProofModeLock
MergeRootBranchLinkBusy
MergeRootMissing
@ -100,11 +101,15 @@ type
MergeHashKeyInvalid
MergeHashKeyDiffersFromCached
MergeHashKeyRevLookUpGarbled
MergeRootVidInvalid
MergeRootKeyInvalid
MergeRootKeyNotInProof
MergeRootKeysMissing
MergeRootKeysOverflow
MergeProofInitMissing
MergeRevVidMustHaveBeenCached
MergeNodeVtxDiffersFromExisting
MergeNodeVidMissing
MergeNodeAccountPayloadError
MergeRootKeyDiffersForVid
MergeNodeVtxDuplicates
MergeRootKeyMissing

View File

@ -313,6 +313,13 @@ func to*(pid: PathID; T: type NibblesSeq): T =
else:
nibbles
func `@`*(pid: PathID): Blob =
## Representation of a `PathID` as a `Blob`. The result is left padded
## by a zero LSB if the path length was odd.
result = pid.pfx.toBytesBE.toSeq
if pid.length < 63:
result.setLen((pid.length + 1) shl 1)
func to*(lid: HashKey; T: type Hash256): T =
## Returns the `Hash236` key if available, otherwise the Keccak hash of
## the `Blob` version.

View File

@ -74,6 +74,42 @@ proc to(
else:
err(rc.error)
proc differ(
db: AristoDbRef; # Database, top layer
p1, p2: PayloadRef; # Payload values
): bool =
## Check whether payloads differ on the database.
## If `p1` is `RLP` serialised and `p2` is a raw blob compare serialsations.
## If `p1` is of account type and `p2` is serialised, translate `p2`
## to an account type and compare.
##
if p1 == p2:
return false
# Adjust abd check for divergent types.
if p1.pType != p2.pType:
if p1.pType == AccountData:
try:
let
blob = (if p2.pType == RlpData: p2.rlpBlob else: p2.rawBlob)
acc = rlp.decode(blob, Account)
if acc.nonce == p1.account.nonce and
acc.balance == p1.account.balance and
acc.codeHash == p1.account.codeHash and
acc.storageRoot.isValid == p1.account.storageID.isValid:
if not p1.account.storageID.isValid or
acc.storageRoot.to(HashKey) == db.getKey p1.account.storageID:
return false
except RlpError:
discard
elif p1.pType == RlpData:
if p2.pType == RawData and p1.rlpBlob == p2.rawBlob:
return false
true
# -----------
proc clearMerkleKeys(
@ -437,15 +473,16 @@ proc updatePayload(
let leafLeg = hike.legs[^1]
# Update payloads if they differ
if leafLeg.wp.vtx.lData != payload:
if db.differ(leafLeg.wp.vtx.lData, payload):
let vid = leafLeg.wp.vid
if vid in db.pPrf:
return err(MergeLeafProofModeLock)
# Update vertex and hike
let
vid = leafLeg.wp.vid
vtx = VertexRef(
vType: Leaf,
lPfx: leafLeg.wp.vtx.lPfx,
lData: payload)
let vtx = VertexRef(
vType: Leaf,
lPfx: leafLeg.wp.vtx.lPfx,
lData: payload)
var hike = hike
hike.legs[^1].wp.vtx = vtx
@ -515,27 +552,57 @@ proc mergeNodeImpl(
# The `vertexID <-> hashKey` mappings need to be set up now (if any)
case node.vType:
of Leaf:
discard
# Check whether there is need to convert the payload to `Account` payload
if rootVid == VertexID(1) and newVtxFromNode:
try:
let
# `aristo_serialise.read()` always decodes raw data payloaf
acc = rlp.decode(node.lData.rawBlob, Account)
pyl = PayloadRef(
pType: AccountData,
account: AristoAccount(
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash))
if acc.storageRoot.isValid:
var sid = db.layerGetProofVidOrVoid acc.storageRoot.to(HashKey)
if not sid.isValid:
sid = db.vidFetch
db.layersPutProof(sid, acc.storageRoot.to(HashKey))
pyl.account.storageID = sid
vtx.lData = pyl
except RlpError:
return err(MergeNodeAccountPayloadError)
of Extension:
if node.key[0].isValid:
let eKey = node.key[0]
if newVtxFromNode:
# Brand new reverse lookup link for this vertex
vtx.eVid = db.vidFetch
db.layersPutProof(vtx.eVid, eKey)
vtx.eVid = db.layerGetProofVidOrVoid eKey
if not vtx.eVid.isValid:
# Brand new reverse lookup link for this vertex
vtx.eVid = db.vidFetch
elif not vtx.eVid.isValid:
return err(MergeNodeVtxDiffersFromExisting)
return err(MergeNodeVidMissing)
else:
let yEke = db.getKey vtx.eVid
if yEke.isValid and eKey != yEke:
return err(MergeNodeVtxDiffersFromExisting)
db.layersPutProof(vtx.eVid, eKey)
of Branch:
for n in 0..15:
if node.key[n].isValid:
let bKey = node.key[n]
if newVtxFromNode:
# Brand new reverse lookup link for this vertex
vtx.bVid[n] = db.vidFetch
db.layersPutProof(vtx.bVid[n], bKey)
vtx.bVid[n] = db.layerGetProofVidOrVoid bKey
if not vtx.bVid[n].isValid:
# Brand new reverse lookup link for this vertex
vtx.bVid[n] = db.vidFetch
elif not vtx.bVid[n].isValid:
return err(MergeNodeVtxDiffersFromExisting)
return err(MergeNodeVidMissing)
else:
let yEkb = db.getKey vtx.bVid[n]
if yEkb.isValid and yEkb != bKey:
return err(MergeNodeVtxDiffersFromExisting)
db.layersPutProof(vtx.bVid[n], bKey)
# Store and lock vertex
@ -660,13 +727,17 @@ proc mergeLeaf*(
proc merge*(
db: AristoDbRef; # Database, top layer
proof: openArray[SnapProof]; # RLP encoded node records
rootVid: VertexID; # Current sub-trie
rootVid = VertexID(0); # Current sub-trie
): Result[int, AristoError]
{.gcsafe, raises: [RlpError].} =
## The function merges the argument `proof` list of RLP encoded node records
## into the `Aristo Trie` database. This function is intended to be used with
## the proof nodes as returened by `snap/1` messages.
##
## If there is no root vertex ID passed, the function tries to find out what
## the root hashes are and allocates new vertices with static IDs `$2`, `$3`,
## etc.
##
## Caveat:
## Proof of concept, not in production yet.
##
@ -675,7 +746,8 @@ proc merge*(
todo: var KeyedQueueNV[NodeRef];
key: HashKey;
) {.gcsafe, raises: [RlpError].} =
## Check for embedded nodes, i.e. fully encoded node instead of a hash
## Check for embedded nodes, i.e. fully encoded node instead of a hash.
## They need to be treated as full nodes, here.
if key.isValid and key.len < 32:
let lid = @key.digestTo(HashKey)
if not seen.hasKey lid:
@ -683,17 +755,22 @@ proc merge*(
discard todo.append node
seen[lid] = node
if not rootVid.isValid:
return err(MergeRootVidInvalid)
let rootKey = db.getKey rootVid
if not rootKey.isValid:
return err(MergeRootKeyInvalid)
# Make sure that the reverse lookup for the root vertex key is available.
if not db.layerGetProofVidOrVoid(rootKey).isValid:
return err(MergeProofInitMissing)
let rootKey = block:
if rootVid.isValid:
let vidKey = db.getKey rootVid
if not vidKey.isValid:
return err(MergeRootKeyInvalid)
# Make sure that the reverse lookup for the root vertex key is available.
if not db.layerGetProofVidOrVoid(vidKey).isValid:
return err(MergeProofInitMissing)
vidKey
else:
VOID_HASH_KEY
# Expand and collect hash keys and nodes
var nodeTab: Table[HashKey,NodeRef]
# Expand and collect hash keys and nodes and parent indicator
var
nodeTab: Table[HashKey,NodeRef]
rootKeys: HashSet[HashKey] # Potential root node hashes
for w in proof:
let
key = w.Blob.digestTo(HashKey)
@ -701,8 +778,10 @@ proc merge*(
if node.error != AristoError(0):
return err(node.error)
nodeTab[key] = node
rootKeys.incl key
# Check for embedded nodes, i.e. fully encoded node instead of a hash
# Check for embedded nodes, i.e. fully encoded node instead of a hash.
# They will be added as full nodes to the `nodeTab[]`.
var embNodes: KeyedQueueNV[NodeRef]
discard embNodes.append node
while true:
@ -727,6 +806,7 @@ proc merge*(
of Extension:
if nodeTab.hasKey node.key[0]:
backLink[node.key[0]] = key
rootKeys.excl node.key[0] # predecessor => not root
else:
blindNodes.incl key
of Branch:
@ -735,13 +815,45 @@ proc merge*(
if nodeTab.hasKey node.key[n]:
isBlind = false
backLink[node.key[n]] = key
rootKeys.excl node.key[n] # predecessor => not root
if isBlind:
blindNodes.incl key
# If it exists, the root key must be in the set `mayBeRoot` in order
# to work.
var roots: Table[HashKey,VertexID]
if rootVid.isValid:
if rootKey notin rootKeys:
return err(MergeRootKeyNotInProof)
roots[rootKey] = rootVid
elif rootKeys.len == 0:
return err(MergeRootKeysMissing)
else:
# Add static root keys different from VertexID(1)
var count = 2
for key in rootKeys.items:
while true:
# Check for already allocated nodes
let vid1 = db.layerGetProofVidOrVoid key
if vid1.isValid:
roots[key] = vid1
break
# Use the next free static free vertex ID
let vid2 = VertexID(count)
count.inc
if not db.getKey(vid2).isValid:
db.layersPutProof(vid2, key)
roots[key] = vid2
break
if LEAST_FREE_VID <= count:
return err(MergeRootKeysOverflow)
# Run over blind nodes and build chains from a blind/bottom level node up
# to the root node. Select only chains that end up at the pre-defined root
# node.
var chains: seq[seq[HashKey]]
var
accounts: seq[seq[HashKey]] # This one separated, to be processed last
chains: seq[seq[HashKey]]
for w in blindNodes:
# Build a chain of nodes up to the root node
var
@ -750,8 +862,11 @@ proc merge*(
while nodeKey.isValid and nodeTab.hasKey nodeKey:
chain.add nodeKey
nodeKey = backLink.getOrVoid nodeKey
if 0 < chain.len and chain[^1] == rootKey:
chains.add chain
if 0 < chain.len and chain[^1] in roots:
if roots.getOrVoid(chain[0]) == VertexID(1):
accounts.add chain
else:
chains.add chain
# Process over chains in reverse mode starting with the root node. This
# allows the algorithm to find existing nodes on the backend.
@ -759,11 +874,13 @@ proc merge*(
seen: HashSet[HashKey]
merged = 0
# Process the root ID which is common to all chains
for chain in chains:
for chain in chains & accounts:
let chainRootVid = roots.getOrVoid chain[^1]
for key in chain.reversed:
if key notin seen:
seen.incl key
db.mergeNodeImpl(key, nodeTab.getOrVoid key, rootVid).isOkOr:
let node = nodeTab.getOrVoid key
db.mergeNodeImpl(key, node, chainRootVid).isOkOr:
return err(error)
merged.inc

View File

@ -437,9 +437,12 @@ proc stow*(
# Merge `top` layer into `roFilter`
db.merge(fwd).isOkOr:
return err(error[1])
let final =
if chunkedMpt: LayerFinalRef(fRpp: db.top.final.fRpp)
else: LayerFinalRef()
db.top = LayerRef(
delta: LayerDeltaRef(),
final: LayerFinalRef())
final: final)
if db.roFilter.isValid:
db.top.final.vGen = db.roFilter.vGen
else:
@ -456,9 +459,12 @@ proc stow*(
db.roFilter = FilterRef(nil)
# Delete/clear top
let final =
if chunkedMpt: LayerFinalRef(vGen: db.vGen, fRpp: db.top.final.fRpp)
else: LayerFinalRef(vGen: db.vGen)
db.top = LayerRef(
delta: LayerDeltaRef(),
final: LayerFinalRef(vGen: db.vGen),
final: final,
txUid: db.top.txUid)
ok()

View File

@ -32,7 +32,8 @@ import
export
AristoApiRlpError,
AristoCoreDbKvtBE
AristoCoreDbKvtBE,
isAristo
type
AristoCoreDbRef* = ref object of CoreDbRef
@ -98,9 +99,9 @@ proc cptMethods(
tracer: AristoTracerRef;
): CoreDbCaptFns =
let
tracer = tracer # So it can savely be captured
db = tracer.parent # Will not change and can be captured
log = tracer.topInst() # Ditto
tr = tracer # So it can savely be captured
db = tr.parent # Will not change and can be captured
log = tr.topInst() # Ditto
CoreDbCaptFns(
recorderFn: proc(): CoreDbRef =
@ -113,8 +114,9 @@ proc cptMethods(
log.flags,
forgetFn: proc() =
if tracer.pop():
tracer.restore())
if not tracer.pop():
tr.parent.tracer = AristoTracerRef(nil)
tr.restore())
proc baseMethods(

View File

@ -88,6 +88,12 @@ func to(trie: CoreDbTrieRef; T: type VertexID): T =
func to(address: EthAddress; T: type PathID): T =
HashKey.fromBytes(address.keccakHash.data).value.to(T)
func resetTrie(kind: CoreDbSubTrie): bool =
## Check whether to reset some non-dynamic trie when instantiating. It
## emulates the behaviour of a new empty MPT on the legacy database.
kind == GenericTrie or
(high(CoreDbSubTrie) < kind and kind.ord < LEAST_FREE_VID)
# -------------------------------
func toCoreDbAccount(
@ -475,11 +481,9 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
if not root.isValid:
return ok(db.bless trie)
# Reset non-dynamic trie when instantiating. This applies to root IDs beween
# `VertexID(2) .. LEAST_FREE_VID`. It emulates the behaviour of a new empty
# MPT on the legacy database.
if AccountsTrie < kind and kind.ord < LEAST_FREE_VID:
trie.reset = true
# Reset some non-dynamic trie when instantiating. It emulates the behaviour
# of a new empty MPT on the legacy database.
trie.reset = kind.resetTrie()
# Update hashes in order to verify the trie state root.
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
@ -521,7 +525,7 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
if rc.isErr:
return err(rc.error[1].toError(base, info, AccNotFound))
else:
reset = AccountsTrie < trie.kind
reset = trie.kind.resetTrie()
newMpt = AristoCoreDxMptRef(
root: VertexID(trie.kind),
accPath: VOID_PATH_ID)
@ -747,7 +751,7 @@ proc init*(
base: base,
mpt: newMpt)
ctx.methods = ctx.ctxMethods
ok( base.parent.bless ctx)
ok(base.parent.bless ctx)
# ------------------------------------------------------------------------------
# End

File diff suppressed because it is too large Load Diff

View File

@ -31,6 +31,7 @@ export
core_apps,
# see `aristo_db`
isAristo,
toAristo,
toAristoProfData,

View File

@ -61,6 +61,7 @@ type
KvtApiReCentreFn* = proc(db: KvtDbRef) {.noRaise.}
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
KvtApiStowFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
KvtApiTxBeginFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.}
KvtApiTxTopFn* =
proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.}
@ -85,6 +86,7 @@ type
reCentre*: KvtApiReCentreFn
rollback*: KvtApiRollbackFn
stow*: KvtApiStowFn
toKvtDbRef*: KvtApiToKvtDbRefFn
txBegin*: KvtApiTxBeginFn
txTop*: KvtApiTxTopFn
@ -109,6 +111,7 @@ type
KvtApiProfReCentreFn = "reCentre"
KvtApiProfRollbackFn = "rollback"
KvtApiProfStowFn = "stow"
KvtApiProfToKvtDbRefFn = "toKvtDbRef"
KvtApiProfTxBeginFn = "txBegin"
KvtApiProfTxTopFn = "txTop"
@ -142,6 +145,7 @@ when AutoValidateApiHooks:
doAssert not api.reCentre.isNil
doAssert not api.rollback.isNil
doAssert not api.stow.isNil
doAssert not api.toKvtDbRef.isNil
doAssert not api.txBegin.isNil
doAssert not api.txTop.isNil
@ -184,6 +188,7 @@ func init*(api: var KvtApiObj) =
api.reCentre = reCentre
api.rollback = rollback
api.stow = stow
api.toKvtDbRef = toKvtDbRef
api.txBegin = txBegin
api.txTop = txTop
when AutoValidateApiHooks:
@ -195,24 +200,25 @@ func init*(T: type KvtApiRef): T =
func dup*(api: KvtApiRef): KvtApiRef =
result = KvtApiRef(
commit: api.commit,
del: api.del,
finish: api.finish,
forget: api.forget,
fork: api.fork,
forkTop: api.forkTop,
get: api.get,
hasKey: api.hasKey,
isCentre: api.isCentre,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
put: api.put,
reCentre: api.reCentre,
rollback: api.rollback,
stow: api.stow,
txBegin: api.txBegin,
txTop: api.txTop)
commit: api.commit,
del: api.del,
finish: api.finish,
forget: api.forget,
fork: api.fork,
forkTop: api.forkTop,
get: api.get,
hasKey: api.hasKey,
isCentre: api.isCentre,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
put: api.put,
reCentre: api.reCentre,
rollback: api.rollback,
stow: api.stow,
toKvtDbRef: api.toKvtDbRef,
txBegin: api.txBegin,
txTop: api.txTop)
when AutoValidateApiHooks:
api.validate
@ -323,6 +329,11 @@ func init*(
KvtApiProfStowFn.profileRunner:
result = api.stow(a)
profApi.toKvtDbRef =
proc(a: KvtTxRef): auto =
KvtApiProfToKvtDbRefFn.profileRunner:
result = api.toKvtDbRef(a)
profApi.txBegin =
proc(a: KvtDbRef): auto =
KvtApiProfTxBeginFn.profileRunner:

View File

@ -74,6 +74,10 @@ func to*(tx: KvtTxRef; T: type[KvtDbRef]): T =
## Getter, retrieves the parent database descriptor from argument `tx`
tx.db
func toKvtDbRef*(tx: KvtTxRef): KvtDbRef =
## Same as `.to(KvtDbRef)`
tx.db
proc forkTx*(tx: KvtTxRef): Result[KvtDbRef,KvtError] =
## Clone a transaction into a new DB descriptor accessing the same backend
## (if any) database as the argument `db`. The new descriptor is linked to

View File

@ -36,6 +36,12 @@ type
StorageLedger* = distinct CoreDxPhkRef
SomeLedger* = AccountLedger | StorageLedger
const
EnableMptDump = false # or true
## Provide database dumper. Note that the dump function needs to link
## against the `rocksdb` library. The# dependency lies in import of
## `aristo_debug`.
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------
@ -63,6 +69,46 @@ proc toSvp*(sl: StorageLedger): seq[(UInt256,UInt256)] =
proc toStr*(w: seq[(UInt256,UInt256)]): string =
"[" & w.mapIt("(" & it[0].toHex & "," & it[1].toHex & ")").join(", ") & "]"
when EnableMptDump:
import
eth/trie,
stew/byteutils,
../aristo,
../aristo/aristo_debug
proc dump*(led: SomeLedger): string =
## Dump database (beware of large backend)
let db = led.distinctBase.parent
if db.dbType notin CoreDbPersistentTypes:
# Memory based storage only
let be = led.distinctBase.backend
if db.isAristo:
let adb = be.toAristo()
if not adb.isNil:
return adb.pp(kMapOk=false,backendOK=true)
if db.isLegacy:
let ldb = be.toLegacy()
var blurb: seq[string]
blurb.add "level=" & $db.level
try:
for (k,v) in ldb.pairs:
let key = HashKey.fromBytes(k).value
if key.isValid:
let acc = rlp.decode(v, Account)
blurb.add "(" & key.pp & ",(" &
$acc.nonce & "," &
$acc.balance & "," &
acc.storageRoot.pp & "," &
acc.codeHash.pp(codeHashOk=true) & "))"
except RlpError as e:
raiseAssert "dump: " & $e.name & " - " & e.msg
return blurb.join("\n ")
# Oops
"<" & $db.dbType & ">"
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -12,7 +12,7 @@
## ----------------------------------------------------
import
std/tables,
std/[tables, typetraits],
eth/common,
stew/byteutils,
../../nimbus/common/chain_config,
@ -25,19 +25,30 @@ export
# Public functions, pretty printer
# ------------------------------------------------------------------------------
proc pp*(b: Blob): string =
func pp*(b: Blob): string =
b.toHex.pp(hex = true)
proc pp*(a: EthAddress): string =
func pp*(a: EthAddress): string =
a.toHex[32 .. 39]
proc pp*(a: openArray[EthAddress]): string =
func pp*(a: Option[EthAddress]): string =
if a.isSome: a.unsafeGet.pp else: "n/a"
func pp*(a: openArray[EthAddress]): string =
"[" & a.mapIt(it.pp).join(" ") & "]"
proc pp*(a: BlockNonce): string =
func pp*(a: BlockNonce): string =
a.toHex
proc pp*(h: BlockHeader; sep = " "): string =
func pp*(a: NetworkPayload): string =
if a.isNil:
"n/a"
else:
"([#" & $a.blobs.len & "],[#" &
$a.commitments.len & "],[#" &
$a.proofs.len & "])"
func pp*(h: BlockHeader; sep = " "): string =
"" &
&"hash={h.blockHash.pp}{sep}" &
&"blockNumber={h.blockNumber}{sep}" &
@ -56,10 +67,10 @@ proc pp*(h: BlockHeader; sep = " "): string =
&"stateRoot={h.stateRoot.pp}{sep}" &
&"baseFee={h.baseFee}{sep}" &
&"withdrawalsRoot={h.withdrawalsRoot.get(EMPTY_ROOT_HASH)}{sep}" &
&"blobGasUsed={h.blobGasUsed.get(0'u64)}" &
&"blobGasUsed={h.blobGasUsed.get(0'u64)}{sep}" &
&"excessBlobGas={h.excessBlobGas.get(0'u64)}"
proc pp*(g: Genesis; sep = " "): string =
func pp*(g: Genesis; sep = " "): string =
"" &
&"nonce={g.nonce.pp}{sep}" &
&"timestamp={g.timestamp}{sep}" &
@ -74,6 +85,25 @@ proc pp*(g: Genesis; sep = " "): string =
&"parentHash={g.parentHash.pp}{sep}" &
&"baseFeePerGas={g.baseFeePerGas}"
func pp*(t: Transaction; sep = " "): string =
"" &
&"txType={t.txType}{sep}" &
&"chainId={t.chainId.distinctBase}{sep}" &
&"nonce={t.nonce}{sep}" &
&"gasPrice={t.gasPrice}{sep}" &
&"maxPriorityFee={t.maxPriorityFee}{sep}" &
&"maxFee={t.maxFee}{sep}" &
&"gasLimit={t.gasLimit}{sep}" &
&"to={t.to.pp}{sep}" &
&"value={t.value}{sep}" &
&"payload={t.payload.pp}{sep}" &
&"accessList=[#{t.accessList.len}]{sep}" &
&"maxFeePerBlobGas={t.maxFeePerBlobGas}{sep}" &
&"versionedHashes=[#{t.versionedHashes.len}]{sep}" &
&"networkPayload={t.networkPayload.pp}{sep}" &
&"V={t.V}{sep}" &
&"R={t.R}{sep}" &
&"S={t.S}{sep}"
proc pp*(h: BlockHeader; indent: int): string =
h.pp("\n" & " ".repeat(max(1,indent)))

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -134,7 +134,7 @@ func pp*(a: MDigest[256]; collapse = true): string =
elif a == ZERO_HASH256:
"ZERO_HASH256"
else:
a.data.toHex.join[56 .. 63]
"£" & a.data.toHex.join[0..6] & ".." & a.data.toHex.join[56..63]
func pp*(a: openArray[MDigest[256]]; collapse = true): string =
"@[" & a.toSeq.mapIt(it.pp).join(" ") & "]"

View File

@ -192,9 +192,9 @@ when isMainModule:
setErrorLevel()
when true: # and false:
# Verify Problem with the persisten database
noisy.accountsRunner()
when true and false:
# Verify Problem with the database for production test
noisy.accountsRunner(persistent=false)
when true: # and false:
noisy.miscRunner(qidSampleSize = 1_000)

View File

@ -218,10 +218,10 @@ proc mergeData(
rc.value
let nMerged = block:
let rc = db.merge(proof, root) # , noisy=noisy)
let rc = db.merge(proof, root)
xCheckRc rc.error == 0
rc.value
discard nMerged
discard nMerged # Result is currently unused
let merged = db.mergeList(leafs, noisy=noisy)
xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready}
@ -292,6 +292,8 @@ proc testBackendConsistency*(
xCheck ndb.backend.isNil
xCheck not mdb.backend.isNil
xCheck ndb.vGen == mdb.vGen
xCheck ndb.top.final.fRpp.len == mdb.top.final.fRpp.len
when true and false:
noisy.say "***", "beCon(1) <", n, "/", list.len-1, ">",
@ -350,6 +352,9 @@ proc testBackendConsistency*(
let rc = rdb.stow(persistent=true, chunkedMpt=true)
xCheckRc rc.error == 0
xCheck ndb.vGen == mdb.vGen
xCheck ndb.top.final.fRpp.len == mdb.top.final.fRpp.len
block:
ndb.top.final.pPrf.clear # let it look like mdb/rdb
xCheck mdb.pPrf.len == 0

View File

@ -589,8 +589,10 @@ proc testTxMergeProofAndKvpList*(
return
when true and false:
noisy.say "***", "proofs(9) <", n, "/", list.len-1, ">",
" groups=", count, " proved=", proved, " merged=", merged
noisy.say "***", "testTxMergeProofAndKvpList (1)",
" <", n, "/", list.len-1, ">",
" runID=", runID,
" groups=", count, " merged=", merged
true

View File

@ -1,29 +1,94 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified,
# or distributed except according to those terms.
import
std/[json, os, tables, strutils],
std/[json, os, sets, tables, strutils],
chronicles,
unittest2,
stew/byteutils,
results,
./test_helpers,
../nimbus/sync/protocol/snap/snap_types,
../nimbus/db/aristo/aristo_merge,
../nimbus/db/kvt/kvt_utils,
../nimbus/db/aristo,
../nimbus/[tracer, vm_types],
../nimbus/common/common
proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus)
proc setErrorLevel {.used.} =
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
proc preLoadLegaDb(cdb: CoreDbRef; jKvp: JsonNode) =
# Just a hack: MPT and KVT share the same base table
for k, v in jKvp:
let key = hexToSeqByte(k)
let value = hexToSeqByte(v.getStr())
cdb.kvt.put(key, value)
proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader
var
proof: seq[SnapProof] # for pre-loading MPT
predRoot: Hash256 # from predecessor header
txRoot: Hash256 # header with block number `num`
rcptRoot: Hash256 # ditto
let
adb = cdb.ctx.getMpt(GenericTrie).backend.toAristo
kdb = cdb.newKvt.backend.toAristo
# Fill KVT and collect `proof` data
for (k,v) in jKvp.pairs:
let
key = hexToSeqByte(k)
val = hexToSeqByte(v.getStr())
if key.len == 32:
doAssert key == val.keccakHash.data
if val != @[0x80u8]: # Exclude empty item
proof.add SnapProof(val)
else:
if key[0] == 0:
try:
# Pull our particular header fields (if possible)
let header = rlp.decode(val, BlockHeader)
if header.blockNumber == num:
txRoot = header.txRoot
rcptRoot = header.receiptRoot
elif header.blockNumber == num-1:
predRoot = header.stateRoot
except RlpError:
discard
check kdb.put(key, val).isOk
# Install sub-trie roots onto production db
if txRoot.isValid:
doAssert adb.merge(txRoot, VertexID(TxTrie)).isOk
if rcptRoot.isValid:
doAssert adb.merge(rcptRoot, VertexID(ReceiptsTrie)).isOk
doAssert adb.merge(predRoot, VertexID(AccountsTrie)).isOk
# Set up production MPT
doAssert adb.merge(proof).isOk
# Remove locks so that hashify can re-assign changed nodes
adb.top.final.pPrf.clear
adb.top.final.fRpp.clear
proc tracerJsonMain*() =
suite "tracer json tests":
jsonTest("TracerTests", testFixture)
# use tracerTestGen.nim to generate additional test data
proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) =
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) =
setErrorLevel()
var
blockNumber = UInt256.fromHex(node["blockNumber"].getStr())
memoryDB = newCoreDbRef LegacyDbMemory
com = CommonRef.new(memoryDB, chainConfigForNetwork(MainNet))
state = node["state"]
receipts = node["receipts"]
@ -31,10 +96,13 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) =
# disable POS/post Merge feature
com.setTTD none(DifficultyInt)
for k, v in state:
let key = hexToSeqByte(k)
let value = hexToSeqByte(v.getStr())
memoryDB.kvt.put(key, value)
# Import raw data into database
if memoryDB.dbType in {LegacyDbMemory,LegacyDbPersistent}:
# Just a hack: MPT and KVT share the same base table
memoryDB.preLoadLegaDb state
else:
# Another hack for `Aristo` using the `snap` protocol proof-loader
memoryDB.preLoadAristoDb(state, blockNumber)
var header = com.db.getBlockHeader(blockNumber)
var headerHash = header.blockHash
@ -52,5 +120,18 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) =
let stateDiff = txTraces[i]["stateDiff"]
check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii()
proc testFixtureLega(node: JsonNode, testStatusIMPL: var TestStatus) =
node.testFixtureImpl(testStatusIMPL, newCoreDbRef LegacyDbMemory)
proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) =
node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory)
proc tracerJsonMain*() =
suite "tracer json tests for legacy DB":
jsonTest("TracerTests", testFixtureLega)
suite "tracer json tests for Aristo DB":
jsonTest("TracerTests", testFixtureAristo)
when isMainModule:
tracerJsonMain()