diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index b93e85257..39976750c 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -444,24 +444,43 @@ proc ppBe[T](be: T; db: AristoDbRef; indent: int): string = pfx1 = indent.toPfx(1) pfx2 = indent.toPfx(2) result = "<" & $be.kind & ">" - result &= pfx & "vGen" & pfx1 & "[" & - be.getIdgFn().get(otherwise = EmptyVidSeq).mapIt(it.ppVid).join(",") & "]" + var (dump,dataOk) = ("",false) + dump &= pfx & "vGen" block: - result &= pfx & "sTab" & pfx1 & "{" - var n = 0 + let q = be.getIdgFn().get(otherwise = EmptyVidSeq).mapIt(it.ppVid) + dump &= "(" & $q.len & ")" + if 0 < q.len: + dataOk = true + dump &= pfx1 + dump &= "[" & q.join(",") & "]" + block: + dump &= pfx & "sTab" + var (n, data) = (0, "") for (vid,vtx) in be.walkVtx: - if 0 < n: result &= pfx2 + if 0 < n: data &= pfx2 n.inc - result &= $n & "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")" - result &= "}" + data &= $n & "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")" + dump &= "(" & $n & ")" + if 0 < n: + dataOk = true + dump &= pfx1 + dump &= "{" & data & "}" block: - result &= pfx & "kMap" & pfx1 & "{" - var n = 0 + dump &= pfx & "kMap" + var (n, data) = (0, "") for (vid,key) in be.walkKey: - if 0 < n: result &= pfx2 + if 0 < n: data &= pfx2 n.inc - result &= $n & "(" & vid.ppVid & "," & key.ppKey(db) & ")" - result &= "}" + data &= $n & "(" & vid.ppVid & "," & key.ppKey(db) & ")" + dump &= "(" & $n & ")" + if 0 < n: + dataOk = true + dump &= pfx1 + dump &= "{" & data & "}" + if dataOk: + result &= dump + else: + result &= "[]" proc ppLayer( layer: LayerRef; @@ -532,8 +551,10 @@ proc ppLayer( # Public functions # ------------------------------------------------------------------------------ -proc pp*(w: Hash256): string = - if w == EMPTY_ROOT_HASH: +proc pp*(w: Hash256; codeHashOk = false): string = + if codeHashOk: + w.ppCodeHash + elif w == EMPTY_ROOT_HASH: "EMPTY_ROOT_HASH" elif w == Hash256(): "Hash256()" @@ -546,6 +567,9 @@ proc pp*(w: HashKey; sig: MerkleSignRef): string = proc pp*(w: HashKey; db = AristoDbRef(nil)): string = w.ppKey(db.orDefault) +proc pp*(w: openArray[HashKey]; db = AristoDbRef(nil)): string = + "[" & @w.mapIt(it.ppKey(db.orDefault)).join(",") & "]" + proc pp*(lty: LeafTie, db = AristoDbRef(nil)): string = lty.ppLeafTie(db.orDefault) @@ -753,9 +777,11 @@ proc pp*( filterOk = true; topOk = true; stackOk = true; + kMapOk = true; ): string = if topOk: - result = db.layersCc.pp(db, indent=indent) + result = db.layersCc.pp( + db, xTabOk=true, kMapOk=kMapOk, other=true, indent=indent) let stackOnlyOk = stackOk and not (topOk or filterOk or backendOk) if not stackOnlyOk: result &= indent.toPfx & " level=" & $db.stack.len diff --git a/nimbus/db/aristo/aristo_desc/desc_error.nim b/nimbus/db/aristo/aristo_desc/desc_error.nim index 3055c1e56..4c6eb2631 100644 --- a/nimbus/db/aristo/aristo_desc/desc_error.nim +++ b/nimbus/db/aristo/aristo_desc/desc_error.nim @@ -92,6 +92,7 @@ type MergeLeafGarbledHike MergeLeafPathCachedAlready MergeLeafPathOnBackendAlready + MergeLeafProofModeLock MergeNonBranchProofModeLock MergeRootBranchLinkBusy MergeRootMissing @@ -100,11 +101,15 @@ type MergeHashKeyInvalid MergeHashKeyDiffersFromCached MergeHashKeyRevLookUpGarbled - MergeRootVidInvalid MergeRootKeyInvalid + MergeRootKeyNotInProof + MergeRootKeysMissing + MergeRootKeysOverflow MergeProofInitMissing MergeRevVidMustHaveBeenCached MergeNodeVtxDiffersFromExisting + MergeNodeVidMissing + MergeNodeAccountPayloadError MergeRootKeyDiffersForVid MergeNodeVtxDuplicates MergeRootKeyMissing diff --git a/nimbus/db/aristo/aristo_desc/desc_identifiers.nim b/nimbus/db/aristo/aristo_desc/desc_identifiers.nim index fff90a79f..a18b359ff 100644 --- a/nimbus/db/aristo/aristo_desc/desc_identifiers.nim +++ b/nimbus/db/aristo/aristo_desc/desc_identifiers.nim @@ -313,6 +313,13 @@ func to*(pid: PathID; T: type NibblesSeq): T = else: nibbles +func `@`*(pid: PathID): Blob = + ## Representation of a `PathID` as a `Blob`. The result is left padded + ## by a zero LSB if the path length was odd. + result = pid.pfx.toBytesBE.toSeq + if pid.length < 63: + result.setLen((pid.length + 1) shl 1) + func to*(lid: HashKey; T: type Hash256): T = ## Returns the `Hash236` key if available, otherwise the Keccak hash of ## the `Blob` version. diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index 500d43b78..e6e93d836 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -74,6 +74,42 @@ proc to( else: err(rc.error) + +proc differ( + db: AristoDbRef; # Database, top layer + p1, p2: PayloadRef; # Payload values + ): bool = + ## Check whether payloads differ on the database. + ## If `p1` is `RLP` serialised and `p2` is a raw blob compare serialsations. + ## If `p1` is of account type and `p2` is serialised, translate `p2` + ## to an account type and compare. + ## + if p1 == p2: + return false + + # Adjust abd check for divergent types. + if p1.pType != p2.pType: + if p1.pType == AccountData: + try: + let + blob = (if p2.pType == RlpData: p2.rlpBlob else: p2.rawBlob) + acc = rlp.decode(blob, Account) + if acc.nonce == p1.account.nonce and + acc.balance == p1.account.balance and + acc.codeHash == p1.account.codeHash and + acc.storageRoot.isValid == p1.account.storageID.isValid: + if not p1.account.storageID.isValid or + acc.storageRoot.to(HashKey) == db.getKey p1.account.storageID: + return false + except RlpError: + discard + + elif p1.pType == RlpData: + if p2.pType == RawData and p1.rlpBlob == p2.rawBlob: + return false + + true + # ----------- proc clearMerkleKeys( @@ -437,15 +473,16 @@ proc updatePayload( let leafLeg = hike.legs[^1] # Update payloads if they differ - if leafLeg.wp.vtx.lData != payload: + if db.differ(leafLeg.wp.vtx.lData, payload): + let vid = leafLeg.wp.vid + if vid in db.pPrf: + return err(MergeLeafProofModeLock) # Update vertex and hike - let - vid = leafLeg.wp.vid - vtx = VertexRef( - vType: Leaf, - lPfx: leafLeg.wp.vtx.lPfx, - lData: payload) + let vtx = VertexRef( + vType: Leaf, + lPfx: leafLeg.wp.vtx.lPfx, + lData: payload) var hike = hike hike.legs[^1].wp.vtx = vtx @@ -515,27 +552,57 @@ proc mergeNodeImpl( # The `vertexID <-> hashKey` mappings need to be set up now (if any) case node.vType: of Leaf: - discard + # Check whether there is need to convert the payload to `Account` payload + if rootVid == VertexID(1) and newVtxFromNode: + try: + let + # `aristo_serialise.read()` always decodes raw data payloaf + acc = rlp.decode(node.lData.rawBlob, Account) + pyl = PayloadRef( + pType: AccountData, + account: AristoAccount( + nonce: acc.nonce, + balance: acc.balance, + codeHash: acc.codeHash)) + if acc.storageRoot.isValid: + var sid = db.layerGetProofVidOrVoid acc.storageRoot.to(HashKey) + if not sid.isValid: + sid = db.vidFetch + db.layersPutProof(sid, acc.storageRoot.to(HashKey)) + pyl.account.storageID = sid + vtx.lData = pyl + except RlpError: + return err(MergeNodeAccountPayloadError) of Extension: if node.key[0].isValid: let eKey = node.key[0] if newVtxFromNode: - # Brand new reverse lookup link for this vertex - vtx.eVid = db.vidFetch - db.layersPutProof(vtx.eVid, eKey) + vtx.eVid = db.layerGetProofVidOrVoid eKey + if not vtx.eVid.isValid: + # Brand new reverse lookup link for this vertex + vtx.eVid = db.vidFetch elif not vtx.eVid.isValid: - return err(MergeNodeVtxDiffersFromExisting) + return err(MergeNodeVidMissing) + else: + let yEke = db.getKey vtx.eVid + if yEke.isValid and eKey != yEke: + return err(MergeNodeVtxDiffersFromExisting) db.layersPutProof(vtx.eVid, eKey) of Branch: for n in 0..15: if node.key[n].isValid: let bKey = node.key[n] if newVtxFromNode: - # Brand new reverse lookup link for this vertex - vtx.bVid[n] = db.vidFetch - db.layersPutProof(vtx.bVid[n], bKey) + vtx.bVid[n] = db.layerGetProofVidOrVoid bKey + if not vtx.bVid[n].isValid: + # Brand new reverse lookup link for this vertex + vtx.bVid[n] = db.vidFetch elif not vtx.bVid[n].isValid: - return err(MergeNodeVtxDiffersFromExisting) + return err(MergeNodeVidMissing) + else: + let yEkb = db.getKey vtx.bVid[n] + if yEkb.isValid and yEkb != bKey: + return err(MergeNodeVtxDiffersFromExisting) db.layersPutProof(vtx.bVid[n], bKey) # Store and lock vertex @@ -660,13 +727,17 @@ proc mergeLeaf*( proc merge*( db: AristoDbRef; # Database, top layer proof: openArray[SnapProof]; # RLP encoded node records - rootVid: VertexID; # Current sub-trie + rootVid = VertexID(0); # Current sub-trie ): Result[int, AristoError] {.gcsafe, raises: [RlpError].} = ## The function merges the argument `proof` list of RLP encoded node records ## into the `Aristo Trie` database. This function is intended to be used with ## the proof nodes as returened by `snap/1` messages. ## + ## If there is no root vertex ID passed, the function tries to find out what + ## the root hashes are and allocates new vertices with static IDs `$2`, `$3`, + ## etc. + ## ## Caveat: ## Proof of concept, not in production yet. ## @@ -675,7 +746,8 @@ proc merge*( todo: var KeyedQueueNV[NodeRef]; key: HashKey; ) {.gcsafe, raises: [RlpError].} = - ## Check for embedded nodes, i.e. fully encoded node instead of a hash + ## Check for embedded nodes, i.e. fully encoded node instead of a hash. + ## They need to be treated as full nodes, here. if key.isValid and key.len < 32: let lid = @key.digestTo(HashKey) if not seen.hasKey lid: @@ -683,17 +755,22 @@ proc merge*( discard todo.append node seen[lid] = node - if not rootVid.isValid: - return err(MergeRootVidInvalid) - let rootKey = db.getKey rootVid - if not rootKey.isValid: - return err(MergeRootKeyInvalid) - # Make sure that the reverse lookup for the root vertex key is available. - if not db.layerGetProofVidOrVoid(rootKey).isValid: - return err(MergeProofInitMissing) + let rootKey = block: + if rootVid.isValid: + let vidKey = db.getKey rootVid + if not vidKey.isValid: + return err(MergeRootKeyInvalid) + # Make sure that the reverse lookup for the root vertex key is available. + if not db.layerGetProofVidOrVoid(vidKey).isValid: + return err(MergeProofInitMissing) + vidKey + else: + VOID_HASH_KEY - # Expand and collect hash keys and nodes - var nodeTab: Table[HashKey,NodeRef] + # Expand and collect hash keys and nodes and parent indicator + var + nodeTab: Table[HashKey,NodeRef] + rootKeys: HashSet[HashKey] # Potential root node hashes for w in proof: let key = w.Blob.digestTo(HashKey) @@ -701,8 +778,10 @@ proc merge*( if node.error != AristoError(0): return err(node.error) nodeTab[key] = node + rootKeys.incl key - # Check for embedded nodes, i.e. fully encoded node instead of a hash + # Check for embedded nodes, i.e. fully encoded node instead of a hash. + # They will be added as full nodes to the `nodeTab[]`. var embNodes: KeyedQueueNV[NodeRef] discard embNodes.append node while true: @@ -727,6 +806,7 @@ proc merge*( of Extension: if nodeTab.hasKey node.key[0]: backLink[node.key[0]] = key + rootKeys.excl node.key[0] # predecessor => not root else: blindNodes.incl key of Branch: @@ -735,13 +815,45 @@ proc merge*( if nodeTab.hasKey node.key[n]: isBlind = false backLink[node.key[n]] = key + rootKeys.excl node.key[n] # predecessor => not root if isBlind: blindNodes.incl key + # If it exists, the root key must be in the set `mayBeRoot` in order + # to work. + var roots: Table[HashKey,VertexID] + if rootVid.isValid: + if rootKey notin rootKeys: + return err(MergeRootKeyNotInProof) + roots[rootKey] = rootVid + elif rootKeys.len == 0: + return err(MergeRootKeysMissing) + else: + # Add static root keys different from VertexID(1) + var count = 2 + for key in rootKeys.items: + while true: + # Check for already allocated nodes + let vid1 = db.layerGetProofVidOrVoid key + if vid1.isValid: + roots[key] = vid1 + break + # Use the next free static free vertex ID + let vid2 = VertexID(count) + count.inc + if not db.getKey(vid2).isValid: + db.layersPutProof(vid2, key) + roots[key] = vid2 + break + if LEAST_FREE_VID <= count: + return err(MergeRootKeysOverflow) + # Run over blind nodes and build chains from a blind/bottom level node up # to the root node. Select only chains that end up at the pre-defined root # node. - var chains: seq[seq[HashKey]] + var + accounts: seq[seq[HashKey]] # This one separated, to be processed last + chains: seq[seq[HashKey]] for w in blindNodes: # Build a chain of nodes up to the root node var @@ -750,8 +862,11 @@ proc merge*( while nodeKey.isValid and nodeTab.hasKey nodeKey: chain.add nodeKey nodeKey = backLink.getOrVoid nodeKey - if 0 < chain.len and chain[^1] == rootKey: - chains.add chain + if 0 < chain.len and chain[^1] in roots: + if roots.getOrVoid(chain[0]) == VertexID(1): + accounts.add chain + else: + chains.add chain # Process over chains in reverse mode starting with the root node. This # allows the algorithm to find existing nodes on the backend. @@ -759,11 +874,13 @@ proc merge*( seen: HashSet[HashKey] merged = 0 # Process the root ID which is common to all chains - for chain in chains: + for chain in chains & accounts: + let chainRootVid = roots.getOrVoid chain[^1] for key in chain.reversed: if key notin seen: seen.incl key - db.mergeNodeImpl(key, nodeTab.getOrVoid key, rootVid).isOkOr: + let node = nodeTab.getOrVoid key + db.mergeNodeImpl(key, node, chainRootVid).isOkOr: return err(error) merged.inc diff --git a/nimbus/db/aristo/aristo_tx.nim b/nimbus/db/aristo/aristo_tx.nim index 46da1db15..f049a735a 100644 --- a/nimbus/db/aristo/aristo_tx.nim +++ b/nimbus/db/aristo/aristo_tx.nim @@ -437,9 +437,12 @@ proc stow*( # Merge `top` layer into `roFilter` db.merge(fwd).isOkOr: return err(error[1]) + let final = + if chunkedMpt: LayerFinalRef(fRpp: db.top.final.fRpp) + else: LayerFinalRef() db.top = LayerRef( delta: LayerDeltaRef(), - final: LayerFinalRef()) + final: final) if db.roFilter.isValid: db.top.final.vGen = db.roFilter.vGen else: @@ -456,9 +459,12 @@ proc stow*( db.roFilter = FilterRef(nil) # Delete/clear top + let final = + if chunkedMpt: LayerFinalRef(vGen: db.vGen, fRpp: db.top.final.fRpp) + else: LayerFinalRef(vGen: db.vGen) db.top = LayerRef( delta: LayerDeltaRef(), - final: LayerFinalRef(vGen: db.vGen), + final: final, txUid: db.top.txUid) ok() diff --git a/nimbus/db/core_db/backend/aristo_db.nim b/nimbus/db/core_db/backend/aristo_db.nim index fc896a156..e1f80b4f9 100644 --- a/nimbus/db/core_db/backend/aristo_db.nim +++ b/nimbus/db/core_db/backend/aristo_db.nim @@ -32,7 +32,8 @@ import export AristoApiRlpError, - AristoCoreDbKvtBE + AristoCoreDbKvtBE, + isAristo type AristoCoreDbRef* = ref object of CoreDbRef @@ -98,9 +99,9 @@ proc cptMethods( tracer: AristoTracerRef; ): CoreDbCaptFns = let - tracer = tracer # So it can savely be captured - db = tracer.parent # Will not change and can be captured - log = tracer.topInst() # Ditto + tr = tracer # So it can savely be captured + db = tr.parent # Will not change and can be captured + log = tr.topInst() # Ditto CoreDbCaptFns( recorderFn: proc(): CoreDbRef = @@ -113,8 +114,9 @@ proc cptMethods( log.flags, forgetFn: proc() = - if tracer.pop(): - tracer.restore()) + if not tracer.pop(): + tr.parent.tracer = AristoTracerRef(nil) + tr.restore()) proc baseMethods( diff --git a/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim b/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim index 2e3bb02cc..d649ac147 100644 --- a/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim +++ b/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim @@ -88,6 +88,12 @@ func to(trie: CoreDbTrieRef; T: type VertexID): T = func to(address: EthAddress; T: type PathID): T = HashKey.fromBytes(address.keccakHash.data).value.to(T) +func resetTrie(kind: CoreDbSubTrie): bool = + ## Check whether to reset some non-dynamic trie when instantiating. It + ## emulates the behaviour of a new empty MPT on the legacy database. + kind == GenericTrie or + (high(CoreDbSubTrie) < kind and kind.ord < LEAST_FREE_VID) + # ------------------------------- func toCoreDbAccount( @@ -475,11 +481,9 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns = if not root.isValid: return ok(db.bless trie) - # Reset non-dynamic trie when instantiating. This applies to root IDs beween - # `VertexID(2) .. LEAST_FREE_VID`. It emulates the behaviour of a new empty - # MPT on the legacy database. - if AccountsTrie < kind and kind.ord < LEAST_FREE_VID: - trie.reset = true + # Reset some non-dynamic trie when instantiating. It emulates the behaviour + # of a new empty MPT on the legacy database. + trie.reset = kind.resetTrie() # Update hashes in order to verify the trie state root. ? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable) @@ -521,7 +525,7 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns = if rc.isErr: return err(rc.error[1].toError(base, info, AccNotFound)) else: - reset = AccountsTrie < trie.kind + reset = trie.kind.resetTrie() newMpt = AristoCoreDxMptRef( root: VertexID(trie.kind), accPath: VOID_PATH_ID) @@ -747,7 +751,7 @@ proc init*( base: base, mpt: newMpt) ctx.methods = ctx.ctxMethods - ok( base.parent.bless ctx) + ok(base.parent.bless ctx) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/core_db/backend/aristo_db/handlers_trace.nim b/nimbus/db/core_db/backend/aristo_db/handlers_trace.nim index 7f39bf996..b2f89abe0 100644 --- a/nimbus/db/core_db/backend/aristo_db/handlers_trace.nim +++ b/nimbus/db/core_db/backend/aristo_db/handlers_trace.nim @@ -11,13 +11,13 @@ {.push raises: [].} import - std/[strutils, tables], + std/tables, eth/common, - stew/byteutils, results, ../../../aristo as use_aristo, - ../../../aristo/aristo_path, + ../../../aristo/[aristo_desc, aristo_path], ../../../kvt as use_kvt, + ../../../kvt/kvt_desc, ../../base, ../../base/base_desc, "."/[handlers_kvt, handlers_aristo] @@ -34,67 +34,116 @@ type base: AristoBaseRef savedApi: AristoApiRef + TracerBlobRef* = ref object + ## `Kvt` journal entry + blind: bool ## Marked `true` for `get()` logs + old: Blob + cur: Blob + + TracerPylRef* = ref object + ## `Aristo` journal entry + blind: bool ## Marked `true` for `fetch()` logs + accPath: PathID ## Account path needed for storage data + old: PayloadRef ## Deleted or just cached payload version + cur: PayloadRef ## Updated/current or just cached + curBlob: Blob ## Serialised version for `cur` accounts data + + TracerBlobTabRef* = + TableRef[Blob,TracerBlobRef] + + TracerPylTabRef* = + TableRef[LeafTie,TracerPylRef] + TracerLogInstRef* = ref object ## Logger instance - level*: uint8 + txLevel*: int flags*: set[CoreDbCaptFlags] - kLog*: TableRef[Blob,Blob] - mLog*: TableRef[LeafTie,CoreDbPayloadRef] + kvtJournal*: TableRef[KvtDbRef,TracerBlobTabRef] + mptJournal*: TableRef[AristoDbRef,TracerPylTabRef] TraceRecorderRef* = ref object of RootRef inst: seq[TracerLogInstRef] ## Production stack for log database kdb: TraceKdbRecorder ## Contains restore information adb: TraceAdbRecorder ## Contains restore information -when EnableDebugLog: - import chronicles +proc push*(tr: TraceRecorderRef; flags: set[CoreDbCaptFlags]) {.gcsafe.} # ------------------------------------------------------------------------------ # Private helpers # ------------------------------------------------------------------------------ -func toStr(key: openArray[byte]): string = - key.toHex +when EnableDebugLog: + import + std/strutils, + chronicles, + stew/byteutils -func `$`(root: VertexID): string = - let vid = root.uint64 - if 0 < vid: - "$" & vid.toHex.strip(leading=true, trailing=false, chars={'0'}) - else: - "$ø" + func toStr(key: openArray[byte]): string = + key.toHex -func `$`(pyl: PayloadRef): string = - case pyl.pType: - of RawData: - pyl.rawBlob.toStr - of RlpData: - pyl.rlpBlob.toStr - of AccountData: - "" + func `$`(data: Blob): string = + data.toStr -func `$`(pyl: CoreDbPayloadRef): string = - if 0 < pyl.blob.len: - pyl.blob.toStr - else: - $pyl + func `$`(lty: LeafTie): string = + $lty.root & ":" & $lty.path -func `$`(data: Blob): string = - data.toStr + func `$`(root: VertexID): string = + let vid = root.uint64 + if 0 < vid: + "$" & vid.toHex.strip(leading=true, trailing=false, chars={'0'}) + else: + "$ø" -func `$`(lty: LeafTie): string = - $lty.root & ":" & $lty.path + func `$`(pyl: PayloadRef): string = + case pyl.pType: + of RawData: + pyl.rawBlob.toStr + of RlpData: + pyl.rlpBlob.toStr + of AccountData: + "" + + func `$`(tpl: TracerPylRef): string = + result = "(" + if tpl.blind: + result &= "Touched" + elif tpl.cur.isNil: + result &= "Deleted" + elif tpl.old.isNil: + result &= "Added" + else: + result &= "Update" + result &= "," + + if tpl.accPath.isValid: + result &= $tpl.accPath + else: + result &= "ø" + result &= "," + + if 0 < tpl.curBlob.len: + result &= tpl.curBlob.toStr + else: + result &= $tpl.cur + result &= ")" + + func `$`(tbl: TracerBlobRef): string = + result = "(" + if tbl.blind: + result &= "Touched" + elif tbl.cur.len == 0: + result &= "Deleted" + elif tbl.old.len == 0: + result &= "Added" + else: + result &= "Update" + result &= "," & tbl.cur.toStr & ")" # ------------------------- func getOrVoid(tab: TableRef[Blob,Blob]; w: openArray[byte]): Blob = tab.getOrDefault(@w, EmptyBlob) -func getOrVoid( - tab: TableRef[LeafTie,CoreDbPayloadRef]; - lty: LeafTie; - ): CoreDbPayloadRef = - tab.getOrDefault(lty, CoreDbPayloadRef(nil)) - func leafTie( root: VertexID; path: openArray[byte]; @@ -103,32 +152,166 @@ func leafTie( return err((VertexID(root), error)) ok LeafTie(root: root, path: tag) -func to(pyl: PayloadRef; T: type CoreDbPayloadRef): T = - case pyl.pType: - of RawData: - T(pType: RawData, rawBlob: pyl.rawBlob) - of RlpData: - T(pType: RlpData, rlpBlob: pyl.rlpBlob) - of AccountData: - T(pType: AccountData, account: pyl.account) - -func to(data: openArray[byte]; T: type CoreDbPayloadRef): T = - T(pType: RawData, rawBlob: @data) - -proc update( - pyl: CoreDbPayloadRef; +proc blobify( + pyl: PayloadRef; api: AristoApiRef; mpt: AristoDbRef; - ): Result[CoreDbPayloadRef,(VertexID,AristoError)] = + ): Result[Blob,(VertexID,AristoError)] = + var blob = EmptyBlob if pyl.pType == AccountData: - pyl.blob = block: + blob = block: let rc = api.serialise(mpt, pyl) if rc.isOk: rc.value else: ? api.hashify(mpt) ? api.serialise(mpt, pyl) - ok(pyl) + ok(blob) + +# ------------------------------- + +proc kvtJournalPut( + tr: TraceRecorderRef; + kvt: KvtDbRef; + key: openArray[byte]; + tbl: TracerBlobRef; + ) = + var byKvt = tr.inst[^1].kvtJournal.getOrDefault kvt + if byKvt.isNil: + byKvt = newTable[Blob,TracerBlobRef]() + tr.inst[^1].kvtJournal[kvt] = byKvt + byKvt[@key] = tbl + +proc kvtJournalDel( + tr: TraceRecorderRef; + kvt: KvtDbRef; + key: openArray[byte]; + ) = + var byKvt = tr.inst[^1].kvtJournal.getOrDefault kvt + if byKvt.isNil: + byKvt.del @key + if byKvt.len == 0: + tr.inst[^1].kvtJournal.del kvt + +proc kvtJournalGet( + tr: TraceRecorderRef; + kvt: KvtDbRef; + key: openArray[byte]; + modOnly = true; + ): TracerBlobRef = + var byKvt = tr.inst[^1].kvtJournal.getOrDefault kvt + if not byKvt.isNil: + let tbl = byKvt.getOrDefault @key + if not modOnly or tbl.isNil or not tbl.blind: + return tbl + + +proc mptJournalPut( + tr: TraceRecorderRef; + mpt: AristoDbRef; + key: LeafTie; + tpl: TracerPylRef; + ) = + var byMpt = tr.inst[^1].mptJournal.getOrDefault mpt + if byMpt.isNil: + byMpt = newTable[LeafTie,TracerPylRef]() + tr.inst[^1].mptJournal[mpt] = byMpt + byMpt[key] = tpl + +proc mptJournalDel( + tr: TraceRecorderRef; + mpt: AristoDbRef; + key: LeafTie; + ) = + let byMpt = tr.inst[^1].mptJournal.getOrDefault mpt + if not byMpt.isNil: + byMpt.del key + if byMpt.len == 0: + tr.inst[^1].mptJournal.del mpt + +proc mptJournalGet( + tr: TraceRecorderRef; + mpt: AristoDbRef; + key: LeafTie; + modOnly = true; + ): TracerPylRef = + let byMpt = tr.inst[^1].mptJournal.getOrDefault mpt + if not byMpt.isNil: + let pyl = byMpt.getOrDefault key + if not modOnly or pyl.isNil or not pyl.blind: + return pyl + + +proc popDiscard(tr: TraceRecorderRef) = + ## Pop top journal. + doAssert 0 < tr.inst.len + tr.inst.setLen(tr.inst.len - 1) + +proc popRestore(tr: TraceRecorderRef) = + ## Undo journals and remove/pop top entry. + doAssert 0 < tr.inst.len + + let inst = tr.inst[^1] + tr.inst.setLen(tr.inst.len - 1) # pop + + let mApi = tr.adb.savedApi + for (mpt,mptTab) in inst.mptJournal.pairs: + for (key,tpl) in mptTab.pairs: + if not tpl.blind: + let (root, path, accPath) = (key.root, @(key.path), tpl.accPath) + if tpl.old.isNil: + if PersistPut notin inst.flags: + doAssert mApi.delete(mpt, root, path, accPath).isOk + else: + if PersistDel notin inst.flags: + doAssert mApi.mergePayload(mpt, root, path, tpl.old, accPath).isOk + + let kApi = tr.kdb.savedApi + for (kvt,kvtTab) in inst.kvtJournal.pairs: + for (key,tbl) in kvtTab.pairs: + if not tbl.blind: + if tbl.old.len == 0: + if PersistPut notin inst.flags: + doAssert kApi.del(kvt, key).isOk + else: + if PersistDel notin inst.flags: + doAssert kApi.put(kvt, key, tbl.old).isOk + +proc popMerge(tr: TraceRecorderRef) = + ## Merge top journal into layer below. The function requires at least + ## two stack entries. + doAssert 1 < tr.inst.len + + let inst = tr.inst[^1] + tr.inst.setLen(tr.inst.len - 1) # pop + + for (mpt,mptTab) in inst.mptJournal.pairs: + for (key,tpl) in mptTab.pairs: + let jrn = tr.mptJournalGet(mpt, key) + if not jrn.isNil: + if jrn.old != tpl.cur: + tpl.old = jrn.old + else: + tpl.blind = true + tr.mptJournalPut(mpt, key,tpl) + + for (kvt,kvtTab) in inst.kvtJournal.pairs: + for (key,tbl) in kvtTab.pairs: + let jrn = tr.kvtJournalGet(kvt, key) + if not jrn.isNil: + if jrn.old != tbl.cur: + tbl.old = jrn.old + else: + tbl.blind = true + tr.kvtJournalPut(kvt, key,tbl) + + +proc pushNew(tr: TraceRecorderRef; flags: set[CoreDbCaptFlags]) = + ## Add a new journal + tr.inst.add TracerLogInstRef( + kvtJournal: newTable[KvtDbRef,TracerBlobTabRef](), + mptJournal: newTable[AristoDbRef,TracerPylTabRef](), + flags: flags) # ------------------------------------------------------------------------------ # Private functions @@ -146,92 +329,203 @@ proc traceRecorder( tracerApi.get = proc(kvt: KvtDbRef; key: openArray[byte]): Result[Blob,KvtError] = when EnableDebugLog: - const logTxt = "trace get" + const + logTxt = "trace get" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags - # Try to fetch data from the stacked logger instances - var (data, pos) = (EmptyBlob, -1) - for level in (tr.inst.len-1).countDown(0): - data = tr.inst[level].kLog.getOrVoid key - if 0 < data.len: - when EnableDebugLog: - debug logTxt, level, log="get()", key=key.toStr, result=data.toStr - pos = level - break + # Use journal entry if available + let jrn = tr.kvtJournalGet(kvt, key, modOnly=false) + if not jrn.isNil: + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, log="get()", data=($jrn) + if jrn.cur.len == 0: + return err(use_kvt.GetNotFound) + else: + return ok jrn.cur - # Alternatively fetch data from the production DB instance - if pos < 0: + let + # Find entry on DB data = api.get(kvt, key).valueOr: when EnableDebugLog: - debug logTxt, key=key.toStr, error + debug logTxt, level, flags, key=key.toStr, error return err(error) # No way - # Data available, store in all top level instances - for level in pos+1 ..< tr.inst.len: - tr.inst[level].kLog[@key] = data - when EnableDebugLog: - debug logTxt, level, log="put()", key=key.toStr, result=data.toStr + # Journal entry + tbl = TracerBlobRef(blind: true, cur: data) + + # Update journal + tr.kvtJournalPut(kvt, key, tbl) + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, data=($tbl) ok(data) tracerApi.del = proc(kvt: KvtDbRef; key: openArray[byte]): Result[void,KvtError] = when EnableDebugLog: - const logTxt = "trace del" - - # Delete data on the stacked logger instances - for level in (tr.inst.len-1).countDown(0): - let flags = tr.inst[level].flags - tr.inst[level].kLog.del @key - when EnableDebugLog: - debug logTxt, level, log="del()", flags, key=key.toStr - if PersistDel notin flags: + const + logTxt = "trace del" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + let + # Find entry on DB + data = api.get(kvt, key).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, error + if error != use_kvt.GetNotFound: + return err(error) return ok() - when EnableDebugLog: - debug logTxt, key=key.toStr - api.del(kvt, key) + # Delete from DB + api.del(kvt, key).isOkOr: + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, error + return err(error) + + # Update journal + let jrn = tr.kvtJournalGet(kvt, key) + if jrn.isNil: + let tbl = TracerBlobRef(old: data) + tr.kvtJournalPut(kvt, key, tbl) + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, log="put()", data=($tbl) + + elif jrn.old.len == 0: + # Was just added earlier + tr.kvtJournalDel(kvt, key) # Undo earlier stuff + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, log="del()" + + else: + # Was modified earlier + let tbl = TracerBlobRef(old: jrn.old) + tr.kvtJournalPut(kvt, key, tbl) + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, log="put()", data=($tbl) + + ok() tracerApi.put = proc(kvt: KvtDbRef; key, data: openArray[byte]): Result[void,KvtError] = when EnableDebugLog: - const logTxt = "trace put" + const + logTxt = "trace put" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + let + # Create journal entry + tbl = TracerBlobRef(cur: @data) - # Store data on the stacked logger instances - for level in (tr.inst.len-1).countDown(0): - let flags = tr.inst[level].flags - tr.inst[level].kLog[@key] = @data - when EnableDebugLog: - debug logTxt, level, log="put()", - flags, key=key.toStr, data=data.toStr - if PersistPut notin flags: - return ok() - - when EnableDebugLog: - debug logTxt, key=key.toStr, data=data.toStr - api.put(kvt, key, data) - - tracerApi.hasKey = - proc(kvt: KvtDbRef; key: openArray[byte]): Result[bool,KvtError] = - when EnableDebugLog: - const logTxt = "trace hasKey" - - # Try to fetch data from the stacked logger instances - for level in (tr.inst.len-1).countDown(0): - if tr.inst[level].kLog.hasKey @key: + # Update journal entry so that previous state is saved + let jrn = tr.kvtJournalGet(kvt, key) + if jrn.isNil: + # Find current entry on the DB + let rc = api.get(kvt, key) + if rc.isOk: + tbl.old = rc.value + elif rc.error != use_kvt.GetNotFound: when EnableDebugLog: - debug logTxt, level, log="get()", key=key.toStr, result=true - return ok(true) + debug logTxt, level, flags, key=key.toStr, error=rc.error + return err(rc.error) + elif 0 < jrn.old.len: + tbl.old = jrn.old - # Alternatively fetch data from the production DB instance + # Store on DB + api.put(kvt, key, data).isOkOr: + when EnableDebugLog: + debug logTxt, level, flags, key=key.toStr, data=data.toStr + return err(error) + + tr.kvtJournalPut(kvt, key, tbl) when EnableDebugLog: - debug logTxt, key=key.toStr - api.hasKey(kvt, key) + debug logTxt, level, flags, key=key.toStr, data=($tbl) + + ok() + + # It is enough to catch transactions on the `Kvt` tracer only + tracerApi.txBegin = + proc(kvt: KvtDbRef): Result[KvtTxRef,KvtError] = + when EnableDebugLog: + const + logTxt = "trace txBegin" + let + level = tr.inst.len - 1 + txLevel = tr.inst[^1].txLevel + let + flags = tr.inst[^1].flags + tx = api.txBegin(kvt).valueOr: + when EnableDebugLog: + debug logTxt, level, txLevel, flags, error + return err(error) + + tr.push flags + tr.inst[^1].txLevel = api.level kvt + doAssert 0 < tr.inst[^1].txLevel + when EnableDebugLog: + debug logTxt, level=(level+1), txLevel= tr.inst[^1].txLevel, flags + + ok tx + + tracerApi.commit = + proc(tx: KvtTxRef): Result[void,KvtError] = + when EnableDebugLog: + const + logTxt = "trace commit" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + txLevel = tr.inst[^1].txLevel + + when EnableDebugLog: + debug logTxt, level, txLevel, flags + + # Make sure that the system is properly nested + doAssert tr.inst[^1].txLevel == api.level api.toKvtDbRef(tx) + tr.popMerge() + + api.commit(tx).isOkOr: + when EnableDebugLog: + debug logTxt, level, txLevel, flags, error + return err(error) + + ok() + + tracerApi.rollback = + proc(tx: KvtTxRef): Result[void,KvtError] = + when EnableDebugLog: + const + logTxt = "trace rollback" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + txLevel = tr.inst[^1].txLevel + + when EnableDebugLog: + debug logTxt, level, txLevel, flags + + # Make sure that the system is properly nested + doAssert tr.inst[^1].txLevel == api.level api.toKvtDbRef(tx) + tr.popDiscard() + + api.rollback(tx).isOkOr: + when EnableDebugLog: + debug logTxt, level, txLevel, flags, error + return err(error) + + ok() result = TraceKdbRecorder( base: base, savedApi: api) base.api = tracerApi + assert result.savedApi != base.api + assert result.savedApi.del != base.api.del + assert result.savedApi.hasKey == base.api.hasKey proc traceRecorder( @@ -248,46 +542,49 @@ proc traceRecorder( path: openArray[byte]; ): Result[PayloadRef,(VertexID,AristoError)] = when EnableDebugLog: - const logTxt = "trace fetchPayload" - - let key = leafTie(root, path).valueOr: - when EnableDebugLog: - debug logTxt, root, path=path.toStr, error=error[1] - return err(error) - - # Try to fetch data from the stacked logger instances - var (pyl, pos) = (CoreDbPayloadRef(nil), -1) - for level in (tr.inst.len-1).countDown(0): - pyl = tr.inst[level].mLog.getOrVoid key - if not pyl.isNil: - pos = level + const + logTxt = "trace fetchPayload" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + let + key = leafTie(root, path).valueOr: when EnableDebugLog: - debug logTxt, level, key, result=($pyl) - break - - # Alternatively fetch data from the production DB instance - if pyl.isNil: - pyl = block: - let rc = api.fetchPayload(mpt, root, path) - if rc.isErr: - when EnableDebugLog: - debug logTxt, level=0, key, error=rc.error[1] - return err(rc.error) - rc.value.to(CoreDbPayloadRef) - - # For accounts payload serialise the data - pyl = pyl.update(api, mpt).valueOr: - when EnableDebugLog: - debug logTxt, key, pyl, error=(error[1]) + debug logTxt, level, flags, root, path=path.toStr, error=error[1] return err(error) - # Data and payload available, store in all top level instances - for level in pos+1 ..< tr.inst.len: - tr.inst[level].mLog[key] = pyl + # Use journal entry if available + let jrn = tr.mptJournalGet(mpt, key, modOnly=false) + if not jrn.isNil: when EnableDebugLog: - debug logTxt, level, log="put()", key, result=($pyl) + debug logTxt, level, flags, key, log="get()", data=($jrn) + if jrn.cur.isNil: + return err((VertexID(0),FetchPathNotFound)) + else: + return ok jrn.cur - ok(pyl) + let + # Find on DB + pyl = api.fetchPayload(mpt, root, path).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, key, error=error[1] + return err(error) + + # Serialise (if needed) + blob = pyl.blobify(api, mpt).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, key, error=error[1] + return err(error) + + # Journal entry + tPyl = TracerPylRef(blind: true, cur: pyl, curBlob: blob) + + # Update journal + tr.mptJournalPut(mpt, key, tPyl) + when EnableDebugLog: + debug logTxt, level, flags, key, log="put()", data=($tPyl) + + ok pyl tracerApi.delete = proc(mpt: AristoDbRef; @@ -296,53 +593,105 @@ proc traceRecorder( accPath: PathID; ): Result[bool,(VertexID,AristoError)] = when EnableDebugLog: - const logTxt = "trace delete" + const + logTxt = "trace delete" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + let + key = leafTie(root, path).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, root, path=path.toStr, error=error[1] + return err(error) - let key = leafTie(root, path).valueOr: + # Find entry on the DB + pyl = api.fetchPayload(mpt, root, path).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, key, error=error[1] + if error[1] == FetchPathNotFound: + return err((error[0], DelPathNotFound)) + return err(error) + + # Delete from DB + deleted = api.delete(mpt, root, path, accPath).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, key, error + return err(error) + + # Update journal + let jrn = tr.mptJournalGet(mpt, key) + if jrn.isNil: + let tpl = TracerPylRef(old: pyl, accPath: accPath) + tr.mptJournalPut(mpt, key, tpl) when EnableDebugLog: - debug logTxt, root, path=path.toStr, error=error[1] - return err(error) + debug logTxt, level, flags, key, log="put()", data=($tpl) - # Delete data on the stacked logger instances - for level in (tr.inst.len-1).countDown(0): - let flags = tr.inst[level].flags - tr.inst[level].mLog.del key + elif jrn.old.isNil: + # Was just added earlier + tr.mptJournalDel(mpt, key) # Undo earlier stuff + when EnableDebugLog: + debug logTxt, level, flags, key, log="del()" + + else: + # Was modified earlier + let tpl = TracerPylRef(old: jrn.old, accPath: jrn.accPath) + tr.mptJournalPut(mpt, key, tpl) when EnableDebugLog: - debug logTxt, level, log="del()", flags, key - if PersistDel notin flags: - return ok(false) + debug logTxt, level, flags, key, log="put()", data=($tpl) - when EnableDebugLog: - debug logTxt, key, accPath - api.delete(mpt, root, path, accPath) + ok deleted tracerApi.merge = proc(mpt: AristoDbRef; root: VertexID; - path, data: openArray[byte]; + path: openArray[byte]; + data: openArray[byte]; accPath: PathID; ): Result[bool,AristoError] = when EnableDebugLog: - const logTxt = "trace merge" + const + logTxt = "trace merge" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + let + key = leafTie(root, path).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, root, path=path.toStr, error=error[1] + return err(error[1]) - let key = leafTie(root, path).valueOr: + # Create journal entry, `pType` same as generated by `merge()` + tpl = TracerPylRef( + accPath: accPath, + cur: PayloadRef(pType: RawData, rawBlob: @data)) + + # Update journal + let jrn = tr.mptJournalGet(mpt, key) + if jrn.isNil: + # Find current entry on the DB + let rc = api.fetchPayload(mpt, root, path) + if rc.isOk: + tpl.old = rc.value + tpl.accPath = accPath + elif rc.error[1] != FetchPathNotFound: + when EnableDebugLog: + debug logTxt, level, flags, key, error=rc.error[1] + return err(rc.error[1]) + elif not jrn.old.isNil: + tpl.old = jrn.old + tpl.accPath = jrn.accPath + + # Merge on DB + let merged = api.merge(mpt, root, path, data, accPath).valueOr: when EnableDebugLog: - debug logTxt, root, path=path.toStr, error=error[1] - return err(error[1]) - - # Store data on the stacked logger instances - let pyl = data.to(CoreDbPayloadRef) - for level in (tr.inst.len-1).countDown(0): - let flags = tr.inst[level].flags - tr.inst[level].mLog[key] = pyl - when EnableDebugLog: - debug logTxt, level, log="put()", flags, key, data=($pyl) - if PersistPut notin flags: - return ok(false) + debug logTxt, level, flags, key, accPath, error + return err(error) + tr.mptJournalPut(mpt, key, tpl) when EnableDebugLog: - debug logTxt, key, data=($pyl), accPath - api.merge(mpt, root, path, data, accPath) + debug logTxt, level, flags, key, accPath, log="put()", data=($tpl) + + ok merged tracerApi.mergePayload = proc(mpt: AristoDbRef; @@ -352,62 +701,65 @@ proc traceRecorder( accPath = VOID_PATH_ID; ): Result[bool,AristoError] = when EnableDebugLog: - const logTxt = "trace mergePayload" + const + logTxt = "trace mergePayload" + let + level = tr.inst.len - 1 + flags = tr.inst[^1].flags + let + key = leafTie(root, path).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, root, path=path.toStr, error=error[1] + return err(error[1]) - let key = leafTie(root, path).valueOr: + # Create serialised payload + blob = pyl.blobify(api, mpt).valueOr: + when EnableDebugLog: + debug logTxt, level, flags, key, error=error[1] + return err(error[1]) + + # Create journal entry + tpl = TracerPylRef( + accPath: accPath, + cur: pyl, + curBlob: blob) + + # Update journal + let jrn = tr.mptJournalGet(mpt, key) + if jrn.isNil: + # Find current entry on the DB + let rc = api.fetchPayload(mpt, root, path) + if rc.isOk: + tpl.old = rc.value + tpl.accPath = accPath + elif rc.error[1] != FetchPathNotFound: + when EnableDebugLog: + debug logTxt, level, flags, key, error=rc.error[1] + return err(rc.error[1]) + elif not jrn.old.isNil: + tpl.old = jrn.old + tpl.accPath = jrn.accPath + + # Merge on DB + let merged = api.mergePayload(mpt, root, path, pyl, accPath).valueOr: when EnableDebugLog: - debug logTxt, root, path=path.toStr, error=error[1] - return err(error[1]) - - # For accounts payload add serialised version of the data to `pyl` - var pyl = pyl.to(CoreDbPayloadRef).update(api, mpt).valueOr: - when EnableDebugLog: - debug logTxt, key, pyl, error=(error[1]) - return err(error[1]) - - # Store data on the stacked logger instances - for level in (tr.inst.len-1).countDown(0): - let flags = tr.inst[level].flags - tr.inst[level].mLog[key] = pyl - when EnableDebugLog: - debug logTxt, level, log="put()", flags, key, pyl - if PersistPut notin flags: - return ok(false) - - when EnableDebugLog: - debug logTxt, key, pyl - api.mergePayload(mpt, root, path, pyl, accPath) - - tracerApi.hasPath = - proc(mpt: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[bool,(VertexID,AristoError)] = - when EnableDebugLog: - const logTxt = "trace hasPath" - - let key = leafTie(root, path).valueOr: - when EnableDebugLog: - debug logTxt, root, path=path.toStr, error=error[1] + debug logTxt, level, flags, key, accPath, error return err(error) - # Try to fetch data from the stacked logger instances - for level in (tr.inst.len-1).countDown(0): - if tr.inst[level].mLog.hasKey key: - when EnableDebugLog: - debug logTxt, level, log="get()", key, result=true - return ok(true) - - # Alternatively fetch data from the production DB instance + tr.mptJournalPut(mpt, key, tpl) when EnableDebugLog: - debug logTxt, key - api.hasPath(mpt, root, path) + debug logTxt, level, flags, key, accPath, log="put()", data=($tpl) + + ok merged result = TraceAdbRecorder( base: base, savedApi: api) base.api = tracerApi + assert result.savedApi != base.api + assert result.savedApi.delete != base.api.delete + assert result.savedApi.commit == base.api.commit # ------------------------------------------------------------------------------ # Public functions @@ -415,14 +767,22 @@ proc traceRecorder( proc topInst*(tr: TraceRecorderRef): TracerLogInstRef = ## Get top level KVT logger - if not tr.isNil and 0 < tr.inst.len: - result = tr.inst[^1] + doAssert 0 < tr.inst.len + tr.inst[^1] + +func kLog*(inst: TracerLogInstRef): TableRef[Blob,Blob] = + ## Export `Kvt` journal + result = newTable[Blob,Blob]() + for (kvt,kvtTab) in inst.kvtJournal.pairs: + for (key,tbl) in kvtTab.pairs: + if tbl.cur.len != 0: + result[key] = tbl.cur proc pop*(tr: TraceRecorderRef): bool = ## Reduce logger stack, returns `true` on success. There will always be ## at least one logger left on stack. if 1 < tr.inst.len: # Always leave one instance on stack - tr.inst.setLen(tr.inst.len - 1) + tr.popRestore() return true proc push*( @@ -430,40 +790,31 @@ proc push*( flags: set[CoreDbCaptFlags]; ) = ## Push overlay logger instance - if not tr.isNil and 0 < tr.inst.len: - let stackLen = tr.inst.len.uint8 - doAssert stackLen < 254 # so length can be securely held as a `uint8` - tr.inst.add TracerLogInstRef( - level: stackLen + 1u8, - kLog: newTable[Blob,Blob](), - mLog: newTable[LeafTie,CoreDbPayloadRef](), - flags: flags) + doAssert 0 < tr.inst.len + tr.pushNew flags # ------------------------------------------------------------------------------ # Public constructor/destructor # ------------------------------------------------------------------------------ proc init*( - db: TraceRecorderRef; # Recorder desc to initialise + tr: TraceRecorderRef; # Recorder desc to initialise kBase: KvtBaseRef; # `Kvt` base descriptor aBase: AristoBaseRef; # `Aristo` base descriptor flags: set[CoreDbCaptFlags]; ) = ## Constructor, create initial/base tracer descriptor - db.inst = @[TracerLogInstRef( - level: 1, - kLog: newTable[Blob,Blob](), - mLog: newTable[LeafTie,CoreDbPayloadRef](), - flags: flags)] - db.kdb = db.traceRecorder kBase - db.adb = db.traceRecorder aBase + tr.inst.setLen(0) + tr.pushNew flags + tr.kdb = tr.traceRecorder kBase + tr.adb = tr.traceRecorder aBase -proc restore*(db: TraceRecorderRef) = - ## Restore production API, might be called directly or be invoked from the - ## call-back handler. - if 0 < db.inst.len: - db.kdb.base.api = db.kdb.savedApi - db.adb.base.api = db.adb.savedApi +proc restore*(tr: TraceRecorderRef) = + ## Restore production API. + while 0 < tr.inst.len: + tr.popRestore() + tr.kdb.base.api = tr.kdb.savedApi + tr.adb.base.api = tr.adb.savedApi # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/core_db/memory_only.nim b/nimbus/db/core_db/memory_only.nim index ab2ed12f7..7d99fe73f 100644 --- a/nimbus/db/core_db/memory_only.nim +++ b/nimbus/db/core_db/memory_only.nim @@ -31,6 +31,7 @@ export core_apps, # see `aristo_db` + isAristo, toAristo, toAristoProfData, diff --git a/nimbus/db/kvt/kvt_api.nim b/nimbus/db/kvt/kvt_api.nim index e7e1dcdfd..0e8eba236 100644 --- a/nimbus/db/kvt/kvt_api.nim +++ b/nimbus/db/kvt/kvt_api.nim @@ -61,6 +61,7 @@ type KvtApiReCentreFn* = proc(db: KvtDbRef) {.noRaise.} KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.} KvtApiStowFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.} + KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.} KvtApiTxBeginFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.} KvtApiTxTopFn* = proc(db: KvtDbRef): Result[KvtTxRef,KvtError] {.noRaise.} @@ -85,6 +86,7 @@ type reCentre*: KvtApiReCentreFn rollback*: KvtApiRollbackFn stow*: KvtApiStowFn + toKvtDbRef*: KvtApiToKvtDbRefFn txBegin*: KvtApiTxBeginFn txTop*: KvtApiTxTopFn @@ -109,6 +111,7 @@ type KvtApiProfReCentreFn = "reCentre" KvtApiProfRollbackFn = "rollback" KvtApiProfStowFn = "stow" + KvtApiProfToKvtDbRefFn = "toKvtDbRef" KvtApiProfTxBeginFn = "txBegin" KvtApiProfTxTopFn = "txTop" @@ -142,6 +145,7 @@ when AutoValidateApiHooks: doAssert not api.reCentre.isNil doAssert not api.rollback.isNil doAssert not api.stow.isNil + doAssert not api.toKvtDbRef.isNil doAssert not api.txBegin.isNil doAssert not api.txTop.isNil @@ -184,6 +188,7 @@ func init*(api: var KvtApiObj) = api.reCentre = reCentre api.rollback = rollback api.stow = stow + api.toKvtDbRef = toKvtDbRef api.txBegin = txBegin api.txTop = txTop when AutoValidateApiHooks: @@ -195,24 +200,25 @@ func init*(T: type KvtApiRef): T = func dup*(api: KvtApiRef): KvtApiRef = result = KvtApiRef( - commit: api.commit, - del: api.del, - finish: api.finish, - forget: api.forget, - fork: api.fork, - forkTop: api.forkTop, - get: api.get, - hasKey: api.hasKey, - isCentre: api.isCentre, - isTop: api.isTop, - level: api.level, - nForked: api.nForked, - put: api.put, - reCentre: api.reCentre, - rollback: api.rollback, - stow: api.stow, - txBegin: api.txBegin, - txTop: api.txTop) + commit: api.commit, + del: api.del, + finish: api.finish, + forget: api.forget, + fork: api.fork, + forkTop: api.forkTop, + get: api.get, + hasKey: api.hasKey, + isCentre: api.isCentre, + isTop: api.isTop, + level: api.level, + nForked: api.nForked, + put: api.put, + reCentre: api.reCentre, + rollback: api.rollback, + stow: api.stow, + toKvtDbRef: api.toKvtDbRef, + txBegin: api.txBegin, + txTop: api.txTop) when AutoValidateApiHooks: api.validate @@ -323,6 +329,11 @@ func init*( KvtApiProfStowFn.profileRunner: result = api.stow(a) + profApi.toKvtDbRef = + proc(a: KvtTxRef): auto = + KvtApiProfToKvtDbRefFn.profileRunner: + result = api.toKvtDbRef(a) + profApi.txBegin = proc(a: KvtDbRef): auto = KvtApiProfTxBeginFn.profileRunner: diff --git a/nimbus/db/kvt/kvt_tx.nim b/nimbus/db/kvt/kvt_tx.nim index 21f016b06..3537d7a67 100644 --- a/nimbus/db/kvt/kvt_tx.nim +++ b/nimbus/db/kvt/kvt_tx.nim @@ -74,6 +74,10 @@ func to*(tx: KvtTxRef; T: type[KvtDbRef]): T = ## Getter, retrieves the parent database descriptor from argument `tx` tx.db +func toKvtDbRef*(tx: KvtTxRef): KvtDbRef = + ## Same as `.to(KvtDbRef)` + tx.db + proc forkTx*(tx: KvtTxRef): Result[KvtDbRef,KvtError] = ## Clone a transaction into a new DB descriptor accessing the same backend ## (if any) database as the argument `db`. The new descriptor is linked to diff --git a/nimbus/db/ledger/distinct_ledgers.nim b/nimbus/db/ledger/distinct_ledgers.nim index 5ab1d1e8e..f72869e2b 100644 --- a/nimbus/db/ledger/distinct_ledgers.nim +++ b/nimbus/db/ledger/distinct_ledgers.nim @@ -36,6 +36,12 @@ type StorageLedger* = distinct CoreDxPhkRef SomeLedger* = AccountLedger | StorageLedger +const + EnableMptDump = false # or true + ## Provide database dumper. Note that the dump function needs to link + ## against the `rocksdb` library. The# dependency lies in import of + ## `aristo_debug`. + # ------------------------------------------------------------------------------ # Public debugging helpers # ------------------------------------------------------------------------------ @@ -63,6 +69,46 @@ proc toSvp*(sl: StorageLedger): seq[(UInt256,UInt256)] = proc toStr*(w: seq[(UInt256,UInt256)]): string = "[" & w.mapIt("(" & it[0].toHex & "," & it[1].toHex & ")").join(", ") & "]" +when EnableMptDump: + import + eth/trie, + stew/byteutils, + ../aristo, + ../aristo/aristo_debug + + proc dump*(led: SomeLedger): string = + ## Dump database (beware of large backend) + let db = led.distinctBase.parent + if db.dbType notin CoreDbPersistentTypes: + # Memory based storage only + let be = led.distinctBase.backend + + if db.isAristo: + let adb = be.toAristo() + if not adb.isNil: + return adb.pp(kMapOk=false,backendOK=true) + + if db.isLegacy: + let ldb = be.toLegacy() + var blurb: seq[string] + blurb.add "level=" & $db.level + try: + for (k,v) in ldb.pairs: + let key = HashKey.fromBytes(k).value + if key.isValid: + let acc = rlp.decode(v, Account) + blurb.add "(" & key.pp & ",(" & + $acc.nonce & "," & + $acc.balance & "," & + acc.storageRoot.pp & "," & + acc.codeHash.pp(codeHashOk=true) & "))" + except RlpError as e: + raiseAssert "dump: " & $e.name & " - " & e.msg + return blurb.join("\n ") + + # Oops + "<" & $db.dbType & ">" + # ------------------------------------------------------------------------------ # Public helpers # ------------------------------------------------------------------------------ diff --git a/tests/replay/pp.nim b/tests/replay/pp.nim index cc4cc6d9c..c2f7f7663 100644 --- a/tests/replay/pp.nim +++ b/tests/replay/pp.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -12,7 +12,7 @@ ## ---------------------------------------------------- import - std/tables, + std/[tables, typetraits], eth/common, stew/byteutils, ../../nimbus/common/chain_config, @@ -25,19 +25,30 @@ export # Public functions, pretty printer # ------------------------------------------------------------------------------ -proc pp*(b: Blob): string = +func pp*(b: Blob): string = b.toHex.pp(hex = true) -proc pp*(a: EthAddress): string = +func pp*(a: EthAddress): string = a.toHex[32 .. 39] -proc pp*(a: openArray[EthAddress]): string = +func pp*(a: Option[EthAddress]): string = + if a.isSome: a.unsafeGet.pp else: "n/a" + +func pp*(a: openArray[EthAddress]): string = "[" & a.mapIt(it.pp).join(" ") & "]" -proc pp*(a: BlockNonce): string = +func pp*(a: BlockNonce): string = a.toHex -proc pp*(h: BlockHeader; sep = " "): string = +func pp*(a: NetworkPayload): string = + if a.isNil: + "n/a" + else: + "([#" & $a.blobs.len & "],[#" & + $a.commitments.len & "],[#" & + $a.proofs.len & "])" + +func pp*(h: BlockHeader; sep = " "): string = "" & &"hash={h.blockHash.pp}{sep}" & &"blockNumber={h.blockNumber}{sep}" & @@ -56,10 +67,10 @@ proc pp*(h: BlockHeader; sep = " "): string = &"stateRoot={h.stateRoot.pp}{sep}" & &"baseFee={h.baseFee}{sep}" & &"withdrawalsRoot={h.withdrawalsRoot.get(EMPTY_ROOT_HASH)}{sep}" & - &"blobGasUsed={h.blobGasUsed.get(0'u64)}" & + &"blobGasUsed={h.blobGasUsed.get(0'u64)}{sep}" & &"excessBlobGas={h.excessBlobGas.get(0'u64)}" -proc pp*(g: Genesis; sep = " "): string = +func pp*(g: Genesis; sep = " "): string = "" & &"nonce={g.nonce.pp}{sep}" & &"timestamp={g.timestamp}{sep}" & @@ -74,6 +85,25 @@ proc pp*(g: Genesis; sep = " "): string = &"parentHash={g.parentHash.pp}{sep}" & &"baseFeePerGas={g.baseFeePerGas}" +func pp*(t: Transaction; sep = " "): string = + "" & + &"txType={t.txType}{sep}" & + &"chainId={t.chainId.distinctBase}{sep}" & + &"nonce={t.nonce}{sep}" & + &"gasPrice={t.gasPrice}{sep}" & + &"maxPriorityFee={t.maxPriorityFee}{sep}" & + &"maxFee={t.maxFee}{sep}" & + &"gasLimit={t.gasLimit}{sep}" & + &"to={t.to.pp}{sep}" & + &"value={t.value}{sep}" & + &"payload={t.payload.pp}{sep}" & + &"accessList=[#{t.accessList.len}]{sep}" & + &"maxFeePerBlobGas={t.maxFeePerBlobGas}{sep}" & + &"versionedHashes=[#{t.versionedHashes.len}]{sep}" & + &"networkPayload={t.networkPayload.pp}{sep}" & + &"V={t.V}{sep}" & + &"R={t.R}{sep}" & + &"S={t.S}{sep}" proc pp*(h: BlockHeader; indent: int): string = h.pp("\n" & " ".repeat(max(1,indent))) diff --git a/tests/replay/pp_light.nim b/tests/replay/pp_light.nim index 568120a9e..100e06d65 100644 --- a/tests/replay/pp_light.nim +++ b/tests/replay/pp_light.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -134,7 +134,7 @@ func pp*(a: MDigest[256]; collapse = true): string = elif a == ZERO_HASH256: "ZERO_HASH256" else: - a.data.toHex.join[56 .. 63] + "£" & a.data.toHex.join[0..6] & ".." & a.data.toHex.join[56..63] func pp*(a: openArray[MDigest[256]]; collapse = true): string = "@[" & a.toSeq.mapIt(it.pp).join(" ") & "]" diff --git a/tests/test_aristo.nim b/tests/test_aristo.nim index b5c34bcb0..b715d30a5 100644 --- a/tests/test_aristo.nim +++ b/tests/test_aristo.nim @@ -192,9 +192,9 @@ when isMainModule: setErrorLevel() - when true: # and false: - # Verify Problem with the persisten database - noisy.accountsRunner() + when true and false: + # Verify Problem with the database for production test + noisy.accountsRunner(persistent=false) when true: # and false: noisy.miscRunner(qidSampleSize = 1_000) diff --git a/tests/test_aristo/test_backend.nim b/tests/test_aristo/test_backend.nim index 1dd4df24f..fff1a34c2 100644 --- a/tests/test_aristo/test_backend.nim +++ b/tests/test_aristo/test_backend.nim @@ -218,10 +218,10 @@ proc mergeData( rc.value let nMerged = block: - let rc = db.merge(proof, root) # , noisy=noisy) + let rc = db.merge(proof, root) xCheckRc rc.error == 0 rc.value - discard nMerged + discard nMerged # Result is currently unused let merged = db.mergeList(leafs, noisy=noisy) xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready} @@ -292,6 +292,8 @@ proc testBackendConsistency*( xCheck ndb.backend.isNil xCheck not mdb.backend.isNil + xCheck ndb.vGen == mdb.vGen + xCheck ndb.top.final.fRpp.len == mdb.top.final.fRpp.len when true and false: noisy.say "***", "beCon(1) <", n, "/", list.len-1, ">", @@ -350,6 +352,9 @@ proc testBackendConsistency*( let rc = rdb.stow(persistent=true, chunkedMpt=true) xCheckRc rc.error == 0 + xCheck ndb.vGen == mdb.vGen + xCheck ndb.top.final.fRpp.len == mdb.top.final.fRpp.len + block: ndb.top.final.pPrf.clear # let it look like mdb/rdb xCheck mdb.pPrf.len == 0 diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index 3471011d1..2a85f0b03 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -589,8 +589,10 @@ proc testTxMergeProofAndKvpList*( return when true and false: - noisy.say "***", "proofs(9) <", n, "/", list.len-1, ">", - " groups=", count, " proved=", proved, " merged=", merged + noisy.say "***", "testTxMergeProofAndKvpList (1)", + " <", n, "/", list.len-1, ">", + " runID=", runID, + " groups=", count, " merged=", merged true diff --git a/tests/test_tracer_json.nim b/tests/test_tracer_json.nim index 9da0923dc..35d08723a 100644 --- a/tests/test_tracer_json.nim +++ b/tests/test_tracer_json.nim @@ -1,29 +1,94 @@ # Nimbus -# Copyright (c) 2018-2023 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed except according to those terms. +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, +# or distributed except according to those terms. import - std/[json, os, tables, strutils], + std/[json, os, sets, tables, strutils], + chronicles, unittest2, stew/byteutils, + results, ./test_helpers, + ../nimbus/sync/protocol/snap/snap_types, + ../nimbus/db/aristo/aristo_merge, + ../nimbus/db/kvt/kvt_utils, + ../nimbus/db/aristo, ../nimbus/[tracer, vm_types], ../nimbus/common/common -proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) +proc setErrorLevel {.used.} = + when defined(chronicles_runtime_filtering) and loggingEnabled: + setLogLevel(LogLevel.ERROR) + + +proc preLoadLegaDb(cdb: CoreDbRef; jKvp: JsonNode) = + # Just a hack: MPT and KVT share the same base table + for k, v in jKvp: + let key = hexToSeqByte(k) + let value = hexToSeqByte(v.getStr()) + cdb.kvt.put(key, value) + +proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = + ## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader + var + proof: seq[SnapProof] # for pre-loading MPT + predRoot: Hash256 # from predecessor header + txRoot: Hash256 # header with block number `num` + rcptRoot: Hash256 # ditto + let + adb = cdb.ctx.getMpt(GenericTrie).backend.toAristo + kdb = cdb.newKvt.backend.toAristo + + # Fill KVT and collect `proof` data + for (k,v) in jKvp.pairs: + let + key = hexToSeqByte(k) + val = hexToSeqByte(v.getStr()) + if key.len == 32: + doAssert key == val.keccakHash.data + if val != @[0x80u8]: # Exclude empty item + proof.add SnapProof(val) + else: + if key[0] == 0: + try: + # Pull our particular header fields (if possible) + let header = rlp.decode(val, BlockHeader) + if header.blockNumber == num: + txRoot = header.txRoot + rcptRoot = header.receiptRoot + elif header.blockNumber == num-1: + predRoot = header.stateRoot + except RlpError: + discard + check kdb.put(key, val).isOk + + # Install sub-trie roots onto production db + if txRoot.isValid: + doAssert adb.merge(txRoot, VertexID(TxTrie)).isOk + if rcptRoot.isValid: + doAssert adb.merge(rcptRoot, VertexID(ReceiptsTrie)).isOk + doAssert adb.merge(predRoot, VertexID(AccountsTrie)).isOk + + # Set up production MPT + doAssert adb.merge(proof).isOk + + # Remove locks so that hashify can re-assign changed nodes + adb.top.final.pPrf.clear + adb.top.final.fRpp.clear -proc tracerJsonMain*() = - suite "tracer json tests": - jsonTest("TracerTests", testFixture) # use tracerTestGen.nim to generate additional test data -proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) = +proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) = + setErrorLevel() + var blockNumber = UInt256.fromHex(node["blockNumber"].getStr()) - memoryDB = newCoreDbRef LegacyDbMemory com = CommonRef.new(memoryDB, chainConfigForNetwork(MainNet)) state = node["state"] receipts = node["receipts"] @@ -31,10 +96,13 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) = # disable POS/post Merge feature com.setTTD none(DifficultyInt) - for k, v in state: - let key = hexToSeqByte(k) - let value = hexToSeqByte(v.getStr()) - memoryDB.kvt.put(key, value) + # Import raw data into database + if memoryDB.dbType in {LegacyDbMemory,LegacyDbPersistent}: + # Just a hack: MPT and KVT share the same base table + memoryDB.preLoadLegaDb state + else: + # Another hack for `Aristo` using the `snap` protocol proof-loader + memoryDB.preLoadAristoDb(state, blockNumber) var header = com.db.getBlockHeader(blockNumber) var headerHash = header.blockHash @@ -52,5 +120,18 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) = let stateDiff = txTraces[i]["stateDiff"] check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii() + +proc testFixtureLega(node: JsonNode, testStatusIMPL: var TestStatus) = + node.testFixtureImpl(testStatusIMPL, newCoreDbRef LegacyDbMemory) + +proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) = + node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory) + +proc tracerJsonMain*() = + suite "tracer json tests for legacy DB": + jsonTest("TracerTests", testFixtureLega) + suite "tracer json tests for Aristo DB": + jsonTest("TracerTests", testFixtureAristo) + when isMainModule: tracerJsonMain()