From 8dd038144b67d05ddcc1ff3c7d0b0b57ab273acb Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Fri, 28 Jun 2024 18:43:04 +0000 Subject: [PATCH] Some cleanups (#2428) * Remove `dirty` set from structural objects why: Not used anymore, the tree is dirty by default. * Rename `aristo_hashify` -> `aristo_compute` * Remove cruft, update comments, cosmetics, etc. * Simplify `SavedState` object why: The key chaining have become obsolete after extra lazy hashing. There is some available space for a state hash to be maintained in future. details: Accept the legacy `SavedState` object serialisation format for a while (which will be overwritten by new format.) --- nimbus/db/aristo/README.md | 6 +- nimbus/db/aristo/aristo_blobify.nim | 35 ++++------- nimbus/db/aristo/aristo_check.nim | 8 +-- nimbus/db/aristo/aristo_check/check_be.nim | 9 +-- nimbus/db/aristo/aristo_check/check_top.nim | 2 +- ...{aristo_hashify.nim => aristo_compute.nim} | 3 - nimbus/db/aristo/aristo_debug.nim | 6 -- nimbus/db/aristo/aristo_delete.nim | 4 +- nimbus/db/aristo/aristo_delta.nim | 3 +- nimbus/db/aristo/aristo_delta/delta_merge.nim | 26 -------- .../db/aristo/aristo_delta/delta_reverse.nim | 2 +- nimbus/db/aristo/aristo_desc/desc_error.nim | 8 --- .../db/aristo/aristo_desc/desc_structural.nim | 10 +-- nimbus/db/aristo/aristo_fetch.nim | 2 +- nimbus/db/aristo/aristo_layers.nim | 8 --- nimbus/db/aristo/aristo_merge.nim | 4 +- nimbus/db/aristo/aristo_sign.nim | 3 +- nimbus/db/aristo/aristo_tx/tx_frame.nim | 2 +- nimbus/db/aristo/aristo_tx/tx_stow.nim | 5 +- nimbus/db/aristo/aristo_utils.nim | 61 +------------------ nimbus/db/core_db/backend/aristo_db.nim | 5 +- nimbus/db/core_db/core_apps.nim | 28 +++++---- tests/test_aristo/test_filter.nim | 5 +- tests/test_aristo/test_tx.nim | 12 ---- tests/test_coredb/coredb_test_xx.nim | 2 +- 25 files changed, 55 insertions(+), 204 deletions(-) rename nimbus/db/aristo/{aristo_hashify.nim => aristo_compute.nim} (98%) diff --git a/nimbus/db/aristo/README.md b/nimbus/db/aristo/README.md index ad47ec9bd..ee20d869a 100644 --- a/nimbus/db/aristo/README.md +++ b/nimbus/db/aristo/README.md @@ -358,12 +358,10 @@ assumed, i.e. the list with the single vertex ID *1*. ### 4.7 Serialisation of a last saved state record 0 +--+--+--+--+--+ .. --+--+ .. --+ - | | -- 32 bytes source state hash + | | -- 32 bytes state hash 32 +--+--+--+--+--+ .. --+--+ .. --+ - | | -- 32 bytes target state hash - 64 +--+--+--+--+--+ .. --+--+ .. --+ | | -- state number/block number - 72 +--+--+--+--+--+--+--+--+ + 40 +--+--+--+--+--+--+--+--+ | | -- marker(8), 0x7f +--+ diff --git a/nimbus/db/aristo/aristo_blobify.nim b/nimbus/db/aristo/aristo_blobify.nim index c86bfbcf1..592c6c334 100644 --- a/nimbus/db/aristo/aristo_blobify.nim +++ b/nimbus/db/aristo/aristo_blobify.nim @@ -152,21 +152,7 @@ proc blobify*(tuv: VertexID): Blob = proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] = ## Serialise a last saved state record - case lSst.src.len: - of 0: - data.setLen(32) - of 32: - data.setLen(0) - data.add lSst.src.data - else: - return err(BlobifyStateSrcLenGarbled) - case lSst.trg.len: - of 0: - data.setLen(64) - of 32: - data.add lSst.trg.data - else: - return err(BlobifyStateTrgLenGarbled) + data.add lSst.key.data data.add lSst.serial.toBytesBE data.add @[0x7fu8] ok() @@ -352,17 +338,20 @@ proc deblobifyTo*( ): Result[void,AristoError] = ## De-serialise the last saved state data record previously encoded with ## `blobify()`. - if data.len != 73: + # Keep that legacy setting for a while + if data.len == 73: + if data[^1] != 0x7f: + return err(DeblobWrongType) + lSst.key = EMPTY_ROOT_HASH + lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71) + return ok() + # ----- + if data.len != 41: return err(DeblobWrongSize) if data[^1] != 0x7f: return err(DeblobWrongType) - func loadHashKey(data: openArray[byte]): Result[HashKey,AristoError] = - var w = HashKey.fromBytes(data).valueOr: - return err(DeblobHashKeyExpected) - ok move(w) - lSst.src = ? data.toOpenArray(0, 31).loadHashKey() - lSst.trg = ? data.toOpenArray(32, 63).loadHashKey() - lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71) + (addr lSst.key.data[0]).copyMem(unsafeAddr data[0], 32) + lSst.serial = uint64.fromBytesBE data.toOpenArray(32, 39) ok() proc deblobify*( diff --git a/nimbus/db/aristo/aristo_check.nim b/nimbus/db/aristo/aristo_check.nim index e8909f21a..a764a64d4 100644 --- a/nimbus/db/aristo/aristo_check.nim +++ b/nimbus/db/aristo/aristo_check.nim @@ -14,7 +14,7 @@ {.push raises: [].} import - std/[algorithm, sequtils, sets, tables], + std/[algorithm, sequtils, tables], eth/common, stew/interval_set, results, @@ -31,9 +31,9 @@ proc checkTop*( proofMode = false; # Has proof nodes ): Result[void,(VertexID,AristoError)] = ## Verify that the cache structure is correct as it would be after `merge()` - ## and `hashify()` operations. Unless `proofMode` is set `true` it would not - ## fully check against the backend, which is typically not applicable after - ## `delete()` operations. + ## operations. Unless `proofMode` is set `true` it would not fully check + ## against the backend, which is typically not applicable after `delete()` + ## operations. ## ## The following is verified: ## diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index aa49d6b13..3d7b7c888 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -135,14 +135,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( # Check layer cache against backend if cache: var topVidCache = VertexID(0) - - let checkKeysOk = block: - if db.dirty.len == 0: - true - elif relax: - false - else: - return err((VertexID(0),CheckBeCacheIsDirty)) + let checkKeysOk = true # Check structural table for (vid,vtx) in db.layersWalkVtx: diff --git a/nimbus/db/aristo/aristo_check/check_top.nim b/nimbus/db/aristo/aristo_check/check_top.nim index b8c3c7ed2..e7c7547a3 100644 --- a/nimbus/db/aristo/aristo_check/check_top.nim +++ b/nimbus/db/aristo/aristo_check/check_top.nim @@ -42,7 +42,7 @@ proc checkTopStrict*( if key != node.digestTo(HashKey): return err((vid,CheckStkVtxKeyMismatch)) - elif db.dirty.len == 0 or db.layersGetKey(vid).isErr: + elif db.layersGetKey(vid).isErr: # So `vtx` exists but not `key`, so cache is supposed dirty and the # vertex has a zero entry. return err((vid,CheckStkVtxKeyMissing)) diff --git a/nimbus/db/aristo/aristo_hashify.nim b/nimbus/db/aristo/aristo_compute.nim similarity index 98% rename from nimbus/db/aristo/aristo_hashify.nim rename to nimbus/db/aristo/aristo_compute.nim index ee434e1a9..4960f1bb4 100644 --- a/nimbus/db/aristo/aristo_hashify.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -11,13 +11,10 @@ {.push raises: [].} import - chronicles, eth/common, results, "."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise] -logScope: - topics = "aristo-hashify" proc computeKey*( db: AristoDbRef; # Database, top layer diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index 796064de5..fbe6b3337 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -401,7 +401,6 @@ proc ppFilter( if fl.isNil: result &= " n/a" return - result &= pfx & "src=" & fl.src.ppKey(db) result &= pfx & "vTop=" & fl.vTop.ppVid result &= pfx & "sTab" & pfx1 & "{" for n,vid in fl.sTab.sortedKeys: @@ -521,11 +520,6 @@ proc ppLayer( tLen = layer.final.fRpp.len info = "fRpp(" & $tLen & ")" result &= info.doPrefix(0 < tLen) & layer.final.fRpp.ppFRpp(db,indent+2) - if 0 < nOKs: - let - info = if layer.final.dirty.len == 0: "clean" - else: "dirty" & layer.final.dirty.ppVids - result &= info.doPrefix(false) # ------------------------------------------------------------------------------ # Public functions diff --git a/nimbus/db/aristo/aristo_delete.nim b/nimbus/db/aristo/aristo_delete.nim index a1e11ef82..90641d5b5 100644 --- a/nimbus/db/aristo/aristo_delete.nim +++ b/nimbus/db/aristo/aristo_delete.nim @@ -429,7 +429,7 @@ proc deleteStorageData*( return err(DelPathNotFound) return err(error[1]) - # Mark account path for update for `hashify()` + # Mark account path Merkle keys for update db.updateAccountForHasher accHike db.deleteImpl(stoHike).isOkOr: @@ -464,7 +464,7 @@ proc deleteStorageTree*( if not stoID.isValid: return err(DelStoRootMissing) - # Mark account path for update for `hashify()` + # Mark account path Merkle keys for update db.updateAccountForHasher accHike ? db.delSubTreeImpl stoID diff --git a/nimbus/db/aristo/aristo_delta.nim b/nimbus/db/aristo/aristo_delta.nim index 0ae96f4ee..764fa363d 100644 --- a/nimbus/db/aristo/aristo_delta.nim +++ b/nimbus/db/aristo/aristo_delta.nim @@ -69,8 +69,7 @@ proc deltaPersistent*( defer: updateSiblings.rollback() let lSst = SavedState( - src: db.balancer.src, - trg: db.balancer.kMap.getOrVoid(VertexID 1), + key: EMPTY_ROOT_HASH, # placeholder for more serial: nxtFid) # Store structural single trie entries diff --git a/nimbus/db/aristo/aristo_delta/delta_merge.nim b/nimbus/db/aristo/aristo_delta/delta_merge.nim index 29f7487c7..22357caf9 100644 --- a/nimbus/db/aristo/aristo_delta/delta_merge.nim +++ b/nimbus/db/aristo/aristo_delta/delta_merge.nim @@ -30,43 +30,22 @@ proc deltaMerge*( ## stacked and the database access is `upper -> lower -> backend` whereas ## the `src/trg` matching logic goes the other way round. ## - ## The resuting filter has no `FilterID` set. - ## - ## Comparing before and after merge - ## :: - ## arguments | merged result - ## --------------------------------+------------------------------------ - ## (src2==trg1) --> upper --> trg2 | - ## | (src1==trg0) --> newFilter --> trg2 - ## (src1==trg0) --> lower --> trg1 | - ## | - ## beStateRoot --> trg0 | - ## # Degenerate case: `upper` is void if lower.isNil: if upper.isNil: # Even more degenerate case when both filters are void return ok LayerDeltaRef(nil) - if upper.src != beStateRoot: - return err((VertexID(1),FilStateRootMismatch)) return ok(upper) # Degenerate case: `upper` is non-trivial and `lower` is void if upper.isNil: - if lower.src != beStateRoot: - return err((VertexID(0), FilStateRootMismatch)) return ok(lower) # Verify stackability let lowerTrg = lower.kMap.getOrVoid VertexID(1) - if upper.src != lowerTrg: - return err((VertexID(0), FilTrgSrcMismatch)) - if lower.src != beStateRoot: - return err((VertexID(0), FilStateRootMismatch)) # There is no need to deep copy table vertices as they will not be modified. let newFilter = LayerDeltaRef( - src: lower.src, sTab: lower.sTab, kMap: lower.kMap, vTop: upper.vTop) @@ -95,11 +74,6 @@ proc deltaMerge*( else: return err((vid,rc.error)) - # # Check consistency - # if (newFilter.src == newFilter.kMap.getOrVoid(VertexID 1)) != - # (newFilter.sTab.len == 0 and newFilter.kMap.len == 0): - # return err((VertexID(0),FilSrcTrgInconsistent)) - ok newFilter # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_delta/delta_reverse.nim b/nimbus/db/aristo/aristo_delta/delta_reverse.nim index 0946f42fd..934b0e6d5 100644 --- a/nimbus/db/aristo/aristo_delta/delta_reverse.nim +++ b/nimbus/db/aristo/aristo_delta/delta_reverse.nim @@ -29,7 +29,7 @@ proc revFilter*( ## backend (excluding optionally installed read-only filter.) ## # Register MPT state roots for reverting back - let rev = LayerDeltaRef(src: filter.kMap.getOrVoid(VertexID 1)) + let rev = LayerDeltaRef() # Get vid generator state on backend block: diff --git a/nimbus/db/aristo/aristo_desc/desc_error.nim b/nimbus/db/aristo/aristo_desc/desc_error.nim index 6a6684122..aeb14329f 100644 --- a/nimbus/db/aristo/aristo_desc/desc_error.nim +++ b/nimbus/db/aristo/aristo_desc/desc_error.nim @@ -23,8 +23,6 @@ type BlobifyLeafPathOverflow BlobifyNilFilter BlobifyNilVertex - BlobifyStateSrcLenGarbled - BlobifyStateTrgLenGarbled # Cache checker `checkCache()` @@ -157,12 +155,6 @@ type GetVtxNotFound - # Update `Merkle` hashes `hashify()` - HashifyVtxUnresolved - HashifyRootVtxUnresolved - HashifyProofHashMismatch - - # Path function `hikeUp()` HikeBranchMissingEdge HikeBranchTailEmpty diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index 22c6f1509..6efcd761a 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -82,9 +82,8 @@ type SavedState* = object ## Last saved state - src*: HashKey ## Previous state hash - trg*: HashKey ## Last state hash - serial*: uint64 ## Generic identifier froom application + key*: Hash256 ## Some state hash (if any) + serial*: uint64 ## Generic identifier from application LayerDeltaRef* = ref object ## Delta layers are stacked implying a tables hierarchy. Table entries on @@ -109,7 +108,6 @@ type ## tables. So a corresponding zero value or missing entry produces an ## inconsistent state that must be resolved. ## - src*: HashKey ## Only needed when used as a filter sTab*: Table[VertexID,VertexRef] ## Structural vertex table kMap*: Table[VertexID,HashKey] ## Merkle hash key mapping vTop*: VertexID ## Last used vertex ID @@ -124,7 +122,6 @@ type ## pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes) fRpp*: Table[HashKey,VertexID] ## Key lookup for `pPrf[]` (proof nodes) - dirty*: HashSet[VertexID] ## Start nodes to re-hashiy from LayerRef* = ref LayerObj LayerObj* = object @@ -276,8 +273,7 @@ func dup*(final: LayerFinalRef): LayerFinalRef = ## Duplicate final layer. LayerFinalRef( pPrf: final.pPrf, - fRpp: final.fRpp, - dirty: final.dirty) + fRpp: final.fRpp) func dup*(wp: VidVtxPair): VidVtxPair = ## Safe copy of `wp` argument diff --git a/nimbus/db/aristo/aristo_fetch.nim b/nimbus/db/aristo/aristo_fetch.nim index 0dc10113e..9cfb78eb2 100644 --- a/nimbus/db/aristo/aristo_fetch.nim +++ b/nimbus/db/aristo/aristo_fetch.nim @@ -17,7 +17,7 @@ import std/typetraits, eth/common, results, - "."/[aristo_desc, aristo_get, aristo_hashify, aristo_hike] + "."/[aristo_compute, aristo_desc, aristo_get, aristo_hike] # ------------------------------------------------------------------------------ # Private functions diff --git a/nimbus/db/aristo/aristo_layers.nim b/nimbus/db/aristo/aristo_layers.nim index d4630c346..9cf3762e9 100644 --- a/nimbus/db/aristo/aristo_layers.nim +++ b/nimbus/db/aristo/aristo_layers.nim @@ -29,9 +29,6 @@ func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] = # Public getters: lazy value lookup for read only versions # ------------------------------------------------------------------------------ -func dirty*(db: AristoDbRef): lent HashSet[VertexID] = - db.top.final.dirty - func pPrf*(db: AristoDbRef): lent HashSet[VertexID] = db.top.final.pPrf @@ -85,13 +82,10 @@ func layersGetKey*(db: AristoDbRef; vid: VertexID): Opt[HashKey] = ## hash key if it is stored on the cache that way. ## db.top.delta.kMap.withValue(vid, item): - # This is ok regardless of the `dirty` flag. If this vertex has become - # dirty, there is an empty `kMap[]` entry on this layer. return Opt.some(item[]) for w in db.rstack: w.delta.kMap.withValue(vid, item): - # Same reasoning as above regarding the `dirty` flag. return ok(item[]) Opt.none(HashKey) @@ -126,7 +120,6 @@ func layersPutVtx*( ) = ## Store a (potentally empty) vertex on the top layer db.top.delta.sTab[vid] = vtx - # db.top.final.dirty.incl root func layersResVtx*( db: AristoDbRef; @@ -146,7 +139,6 @@ func layersPutKey*( ) = ## Store a (potentally void) hash key on the top layer db.top.delta.kMap[vid] = key - # db.top.final.dirty.incl root # Modified top cache layers => hashify func layersResKey*(db: AristoDbRef; root: VertexID; vid: VertexID) = diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index f2f45453d..94b9fbf88 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -126,7 +126,7 @@ proc mergeStorageData*( rc = db.mergePayloadImpl(useID, stoPath, pyl) if rc.isOk: - # Mark account path for update for `hashify()` + # Mark account path Merkle keys for update db.updateAccountForHasher accHike if stoID.isValid: @@ -144,7 +144,7 @@ proc mergeStorageData*( assert stoID.isValid # debugging only return ok() - # Error: mark account path for update for `hashify()` + # Error: mark account path Merkle keys for update db.updateAccountForHasher accHike err(rc.error) diff --git a/nimbus/db/aristo/aristo_sign.nim b/nimbus/db/aristo/aristo_sign.nim index 173ace82c..b139c335f 100644 --- a/nimbus/db/aristo/aristo_sign.nim +++ b/nimbus/db/aristo/aristo_sign.nim @@ -16,8 +16,7 @@ import eth/common, results, - "."/[aristo_constants, aristo_desc, aristo_hashify, aristo_init, - aristo_merge] + "."/[aristo_compute, aristo_constants, aristo_desc, aristo_init, aristo_merge] # ------------------------------------------------------------------------------ # Public functions, signature generator diff --git a/nimbus/db/aristo/aristo_tx/tx_frame.nim b/nimbus/db/aristo/aristo_tx/tx_frame.nim index 11fbed1ab..7f9791aaa 100644 --- a/nimbus/db/aristo/aristo_tx/tx_frame.nim +++ b/nimbus/db/aristo/aristo_tx/tx_frame.nim @@ -16,7 +16,7 @@ import std/tables, results, - ".."/[aristo_desc, aristo_layers, aristo_hashify] + ".."/[aristo_desc, aristo_layers] func txFrameIsTop*(tx: AristoTxRef): bool diff --git a/nimbus/db/aristo/aristo_tx/tx_stow.nim b/nimbus/db/aristo/aristo_tx/tx_stow.nim index 1d5a2ad46..16186bf39 100644 --- a/nimbus/db/aristo/aristo_tx/tx_stow.nim +++ b/nimbus/db/aristo/aristo_tx/tx_stow.nim @@ -17,7 +17,7 @@ import std/tables, results, ../aristo_delta/delta_merge, - ".."/[aristo_desc, aristo_get, aristo_delta, aristo_layers, aristo_hashify] + ".."/[aristo_desc, aristo_get, aristo_delta, aristo_layers] # ------------------------------------------------------------------------------ # Private functions @@ -70,9 +70,6 @@ proc topMerge(db: AristoDbRef; src: HashKey): Result[void,AristoError] = else: return err(rc.error) - # Update layer for merge call - db.top.delta.src = src - # This one will return the `db.top.delta` if `db.balancer.isNil` db.balancer = db.deltaMerge(db.top.delta, db.balancer, ubeRoot).valueOr: return err(error[1]) diff --git a/nimbus/db/aristo/aristo_utils.nim b/nimbus/db/aristo/aristo_utils.nim index 54f44e81e..43a1999fd 100644 --- a/nimbus/db/aristo/aristo_utils.nim +++ b/nimbus/db/aristo/aristo_utils.nim @@ -23,58 +23,6 @@ import # Public functions, converters # ------------------------------------------------------------------------------ -proc toAccount*( - payload: PayloadRef; - db: AristoDbRef; - ): Result[Account,AristoError] = - ## Converts the argument `payload` to an `Account` type. If the implied - ## account das a storage slots system associated, the database `db` must - ## contain the Merkle hash key of the root vertex. - if payload.pType == AccountData: - var acc = Account( - nonce: payload.account.nonce, - balance: payload.account.balance, - codeHash: payload.account.codeHash, - storageRoot: EMPTY_ROOT_HASH) - if payload.stoID.isValid: - acc.storageRoot = (? db.getKeyRc payload.stoID).to(Hash256) - return ok(acc) - - err UtilsPayloadTypeUnsupported - -proc toAccount*( - vtx: VertexRef; - db: AristoDbRef; - ): Result[Account,AristoError] = - ## Variant of `toAccount()` for a `Leaf` vertex. - if vtx.isValid and vtx.vType == Leaf: - return vtx.lData.toAccount db - err UtilsAccVtxUnsupported - -proc toAccount*( - node: NodeRef; - ): Result[Account,AristoError] = - ## Variant of `toAccount()` for a `Leaf` node which must be complete (i.e. - ## a potential Merkle hash key must have been initialised.) - if node.isValid and node.vType == Leaf: - if node.lData.pType == AccountData: - var acc = Account( - nonce: node.lData.account.nonce, - balance: node.lData.account.balance, - codeHash: node.lData.account.codeHash, - storageRoot: EMPTY_ROOT_HASH) - if node.lData.stoID.isValid: - if not node.key[0].isValid: - return err(UtilsAccStorageKeyMissing) - acc.storageRoot = node.key[0].to(Hash256) - return ok(acc) - else: - return err(UtilsPayloadTypeUnsupported) - - err UtilsAccNodeUnsupported - -# --------------------- - proc toNode*( vtx: VertexRef; # Vertex to convert db: AristoDbRef; # Database, top layer @@ -176,18 +124,13 @@ proc updateAccountForHasher*( db: AristoDbRef; # Database hike: Hike; # Return value from `retrieveStorageID()` ) = - ## For a successful run of `retrieveStoAccHike()`, the argument `hike` is - ## used to mark/reset the keys along the `accPath` for being re-calculated - ## by `hashify()`. + ## The argument `hike` is used to mark/reset the keys along the implied + ## vertex path for being re-calculated. ## # Clear Merkle keys so that `hasify()` can calculate the re-hash forest/tree for w in hike.legs.mapIt(it.wp.vid): db.layersResKey(hike.root, w) - # Signal to `hashify()` where to start rebuilding Merkel hashes - # db.top.final.dirty.incl hike.root - # db.top.final.dirty.incl hike.legs[^1].wp.vid - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/backend/aristo_db.nim b/nimbus/db/core_db/backend/aristo_db.nim index 76341e08a..a406b2a14 100644 --- a/nimbus/db/core_db/backend/aristo_db.nim +++ b/nimbus/db/core_db/backend/aristo_db.nim @@ -243,12 +243,11 @@ func toAristo*(mBe: CoreDbAccBackendRef): AristoDbRef = proc toAristoSavedStateBlockNumber*( mBe: CoreDbMptBackendRef; - ): tuple[stateRoot: Hash256, blockNumber: BlockNumber] = + ): BlockNumber = if not mBe.isNil and mBe.parent.isAristo: let rc = mBe.parent.AristoCoreDbRef.adbBase.getSavedState() if rc.isOk: - return (rc.value.src.to(Hash256), rc.value.serial.BlockNumber) - (EMPTY_ROOT_HASH, 0.BlockNumber) + return rc.value.serial.BlockNumber # ------------------------------------------------------------------------------ # Public aristo iterators diff --git a/nimbus/db/core_db/core_apps.nim b/nimbus/db/core_db/core_apps.nim index 33770e200..08099b9ce 100644 --- a/nimbus/db/core_db/core_apps.nim +++ b/nimbus/db/core_db/core_apps.nim @@ -334,20 +334,24 @@ proc getSavedStateBlockNumber*( ## ## This function verifies the state consistency of the database and throws ## an assert exception if that fails. So the function will only apply to a - ## finalised (aka hashified) database state. For an an opportunistic use, - ## the `relax` argument can be set `true` so this function also returns - ## zero if the state consistency check fails. + ## saved database state. For an an opportunistic use, the `relax` argument + ## can be set `true` so this function also returns the block number if the + ## state consistency check fails. ## const info = "getSavedStateBlockNumber(): " - var header: BlockHeader - let st = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber() - if db.getBlockHeader(st.blockNumber, header): - let state = db.ctx.getAccounts.state.valueOr: - raiseAssert info & $$error - if state == header.stateRoot: - return st.blockNumber - if not relax: - raiseAssert info & ": state mismatch at " & "#" & $st.blockNumber + # FIXME: This construct following will be replaced by a proper + # `CoreDb` method. + let bn = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber() + if relax: + return bn + else: + var header: BlockHeader + if db.getBlockHeader(bn, header): + let state = db.ctx.getAccounts.state(updateOk=true).valueOr: + raiseAssert info & $$error + if state != header.stateRoot: + raiseAssert info & ": state mismatch at " & "#" & $result + return bn proc getBlockHeader*( db: CoreDbRef; diff --git a/tests/test_aristo/test_filter.nim b/tests/test_aristo/test_filter.nim index 13001c7b6..bda882e7d 100644 --- a/tests/test_aristo/test_filter.nim +++ b/tests/test_aristo/test_filter.nim @@ -12,7 +12,6 @@ ## import - std/sets, eth/common, results, unittest2, @@ -23,7 +22,6 @@ import aristo_debug, aristo_desc, aristo_get, - aristo_layers, aristo_persistent, aristo_tx], ../replay/xcheck, @@ -170,8 +168,7 @@ proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool = if b.isNil: return false if unsafeAddr(a[]) != unsafeAddr(b[]): - if a.src != b.src or - a.kMap.getOrVoid(testRootVid) != b.kMap.getOrVoid(testRootVid) or + if a.kMap.getOrVoid(testRootVid) != b.kMap.getOrVoid(testRootVid) or a.vTop != b.vTop: return false diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index c4ca07fae..3878848e9 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -155,9 +155,6 @@ proc saveToBackend( let rc = tx.commit() xCheckRc rc.error == 0 - # Make sure MPT hashes are OK - xCheck db.dirty.len == 0 - block: let rc = db.txTop() xCheckRc rc.error == 0 @@ -175,9 +172,6 @@ proc saveToBackend( let rc = tx.commit() xCheckRc rc.error == 0 - # Make sure MPT hashes are OK - xCheck db.dirty.len == 0 - block: let rc = db.txTop() xCheckErr rc.value.level < 0 # force error @@ -213,9 +207,6 @@ proc saveToBackendWithOops( let rc = tx.commit() xCheckRc rc.error == 0 - # Make sure MPT hashes are OK - xCheck db.dirty.len == 0 - block: let rc = db.txTop() xCheckRc rc.error == 0 @@ -229,9 +220,6 @@ proc saveToBackendWithOops( let rc = tx.commit() xCheckRc rc.error == 0 - # Make sure MPT hashes are OK - xCheck db.dirty.len == 0 - block: let rc = db.txTop() xCheckErr rc.value.level < 0 # force error diff --git a/tests/test_coredb/coredb_test_xx.nim b/tests/test_coredb/coredb_test_xx.nim index 97103a56c..36b20be72 100644 --- a/tests/test_coredb/coredb_test_xx.nim +++ b/tests/test_coredb/coredb_test_xx.nim @@ -87,7 +87,7 @@ let mainTest0m* = mainSample .cloneWith( name = "-am-some", - numBlocks = 5) # 1_000) + numBlocks = 1_000) mainTest1m* = mainSample .cloneWith(