Aristo code cosmetics and tests update (#2434)

* Update some docu

* Resolve obsolete compile time option

why:
  Not optional anymore

* Update checks

why:
  The notion of what constitutes a valid `Aristo` db has changed due to
  (even more) lazy calculating Merkle hash keys.

* Disable redundant unit test for production
This commit is contained in:
Jordan Hrycaj 2024-07-01 10:59:18 +00:00 committed by GitHub
parent 740882d8ce
commit 2c87fd1636
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 43 additions and 119 deletions

View File

@ -1,14 +1,18 @@
* Check whether `HashKey` can be reduced to a simple 32 byte array (see * Check whether `HashKey` can be reduced to a simple 32 byte array (see
*desc_identifiers.nim*) *desc_identifiers.nim*)
* Remove the `RlpData` accounts payload type. It is not needed as a separate
data type. An application must know the layout. So it can be subsumed
under `RawData` (which could be renamed `PlainData`.)
* Currently, the data save/store logic only works when there is s VertexID(1)
root. In tests without a `VertexID(1)` a dummy vertex is set up.
* Re-visit `delTree()`. Suggestion is deleting small trees on the memory later, * Re-visit `delTree()`. Suggestion is deleting small trees on the memory later,
otherwise only deleting the root vertex (so it becomes inaccessible) and otherwise only deleting the root vertex (so it becomes inaccessible) and
remember the follow up vertices which can travel through the tx-layers remember the follow up vertices which can travel through the tx-layers
to be picked up by the backend store. to be picked up by the backend store.
* Consider changing fetch/merge/delete prototypes for account and storage. At
the moment they all use `openArray[]` for strictly 32 byte arrays (which is
only implicitely enforced at run time -- i.e. it would fail otherwise.)
* Mental note: For *proof-mode* with pre-allocated locked vertices and Merkle
keys, verification of a patyion tree must be done by computing sub-tree keys
at the relative roots and comparing them with the pre-allocated Merkle keys.
* Remove legacy state format import from `deblobifyTo()` after a while (last
updated 28/06/24).

View File

@ -19,7 +19,7 @@ import
stew/interval_set, stew/interval_set,
results, results,
./aristo_walk/persistent, ./aristo_walk/persistent,
"."/[aristo_desc, aristo_get, aristo_init, aristo_utils], "."/[aristo_desc, aristo_get, aristo_init],
./aristo_check/[check_be, check_top] ./aristo_check/[check_be, check_top]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -51,9 +51,6 @@ proc checkTop*(
proc checkBE*( proc checkBE*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
relax = true; # Not re-compiling hashes if `true`
cache = true; # Also verify against top layer cache
fifos = false; # Also verify cascaded filter fifos
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Verify database backend structure. If the argument `relax` is set `false`, ## Verify database backend structure. If the argument `relax` is set `false`,
## all necessary Merkle hashes are compiled and verified. If the argument ## all necessary Merkle hashes are compiled and verified. If the argument
@ -73,11 +70,11 @@ proc checkBE*(
## ##
case db.backend.kind: case db.backend.kind:
of BackendMemory: of BackendMemory:
return MemBackendRef.checkBE(db, cache=cache, relax=relax) return MemBackendRef.checkBE db
of BackendRocksDB, BackendRdbHosting: of BackendRocksDB, BackendRdbHosting:
return RdbBackendRef.checkBE(db, cache=cache, relax=relax) return RdbBackendRef.checkBE db
of BackendVoid: of BackendVoid:
return VoidBackendRef.checkBE(db, cache=cache, relax=relax) return VoidBackendRef.checkBE db
proc check*( proc check*(
@ -88,7 +85,7 @@ proc check*(
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Shortcut for running `checkTop()` followed by `checkBE()` ## Shortcut for running `checkTop()` followed by `checkBE()`
? db.checkTop(proofMode = proofMode) ? db.checkTop(proofMode = proofMode)
? db.checkBE(relax = relax, cache = cache) ? db.checkBE()
ok() ok()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -17,7 +17,7 @@ import
stew/interval_set, stew/interval_set,
../../aristo, ../../aristo,
../aristo_walk/persistent, ../aristo_walk/persistent,
".."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise] ".."/[aristo_desc, aristo_get, aristo_layers]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helper # Private helper
@ -69,9 +69,6 @@ proc toNodeBE(
proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
_: type T; _: type T;
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
relax: bool; # Not compiling hashes if `true`
cache: bool; # Also verify against top layer cache
fifos = true; # Also verify cascaded filter fifos
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Make sure that each vertex has a Merkle hash and vice versa. Also check ## Make sure that each vertex has a Merkle hash and vice versa. Also check
## the vertex ID generator state. ## the vertex ID generator state.
@ -82,9 +79,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
topVidBe = vid topVidBe = vid
if not vtx.isValid: if not vtx.isValid:
return err((vid,CheckBeVtxInvalid)) return err((vid,CheckBeVtxInvalid))
let rc = db.getKeyBE vid
if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeKeyMissing))
case vtx.vType: case vtx.vType:
of Leaf: of Leaf:
discard discard
@ -104,16 +98,8 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
for (vid,key) in T.walkKeyBe db: for (vid,key) in T.walkKeyBe db:
if topVidBe < vid: if topVidBe < vid:
topVidBe = vid topVidBe = vid
if not key.isValid:
return err((vid,CheckBeKeyInvalid))
let vtx = db.getVtxBE(vid).valueOr: let vtx = db.getVtxBE(vid).valueOr:
return err((vid,CheckBeVtxMissing)) return err((vid,CheckBeVtxMissing))
let node = vtx.toNodeBE(db).valueOr: # backend links only
return err((vid,CheckBeKeyCantCompile))
if not relax:
let expected = node.digestTo(HashKey)
if expected != key:
return err((vid,CheckBeKeyMismatch))
# Compare calculated `vTop` against database state # Compare calculated `vTop` against database state
if topVidBe.isValid: if topVidBe.isValid:
@ -133,30 +119,18 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
return err((vid,CheckBeGarbledVTop)) return err((vid,CheckBeGarbledVTop))
# Check layer cache against backend # Check layer cache against backend
if cache: block:
var topVidCache = VertexID(0) var topVidCache = VertexID(0)
let checkKeysOk = true
# Check structural table # Check structural table
for (vid,vtx) in db.layersWalkVtx: for (vid,vtx) in db.layersWalkVtx:
if vtx.isValid and topVidCache < vid: if vtx.isValid and topVidCache < vid:
topVidCache = vid topVidCache = vid
let key = block: let key = db.layersGetKey(vid).valueOr: VOID_HASH_KEY
let rc = db.layersGetKey(vid)
if rc.isOk:
rc.value
elif checkKeysOk:
# A `kMap[]` entry must exist.
return err((vid,CheckBeCacheKeyMissing))
else:
VOID_HASH_KEY
if not vtx.isValid: if not vtx.isValid:
# Some vertex is to be deleted, the key must be empty # Some vertex is to be deleted, the key must be empty
if checkKeysOk and key.isValid: if key.isValid:
return err((vid,CheckBeCacheKeyNonEmpty)) return err((vid,CheckBeCacheKeyNonEmpty))
# There must be a representation on the backend DB unless in a TX
if db.getVtxBE(vid).isErr and db.stack.len == 0:
return err((vid,CheckBeCacheVidUnsynced))
# Check key table # Check key table
var list: seq[VertexID] var list: seq[VertexID]
@ -167,15 +141,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
let vtx = db.getVtx vid let vtx = db.getVtx vid
if db.layersGetVtx(vid).isErr and not vtx.isValid: if db.layersGetVtx(vid).isErr and not vtx.isValid:
return err((vid,CheckBeCacheKeyDangling)) return err((vid,CheckBeCacheKeyDangling))
if not key.isValid or relax:
continue
if not vtx.isValid:
return err((vid,CheckBeCacheVtxDangling))
let node = vtx.toNode(db).valueOr: # compile cache first
return err((vid,CheckBeCacheKeyCantCompile))
let expected = node.digestTo(HashKey)
if expected != key:
return err((vid,CheckBeCacheKeyMismatch))
# Check vTop # Check vTop
if topVidCache.isValid and topVidCache != db.vTop: if topVidCache.isValid and topVidCache != db.vTop:
@ -185,7 +150,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
if db.layersGetVtxOrVoid(vid).isValid or if db.layersGetVtxOrVoid(vid).isValid or
db.layersGetKeyOrVoid(vid).isValid: db.layersGetKeyOrVoid(vid).isValid:
return err((db.vTop,CheckBeCacheGarbledVTop)) return err((db.vTop,CheckBeCacheGarbledVTop))
ok() ok()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -41,18 +41,8 @@ const
## functions with fixed assignments of the type of a state root (e.g. for ## functions with fixed assignments of the type of a state root (e.g. for
## a receipt or a transaction root.) ## a receipt or a transaction root.)
LOOSE_STORAGE_TRIE_COUPLING* = true
## Enabling the `LOOSE_STORAGE_TRIE_COUPLING` flag a sub-trie is considered
## empty if the root vertex ID is zero or at least `LEAST_FREE_VID` and
## there is no vertex available. If the vertex ID is not zero and should
## be considered as such will affect calculating the Merkel hash node key
## for an accou.t leaf of payload type `AccountData`.
##
## Setting this flag `true` might be helpful for running an API supporting
## both, a legacy and# the `Aristo` database backend.
##
static: static:
doAssert 1 < LEAST_FREE_VID # must stay away from `VertexID(1)` # must stay away from `VertexID(1)` and `VertexID(2)`
doAssert 2 < LEAST_FREE_VID
# End # End

View File

@ -37,21 +37,9 @@ type
CheckAnyVTopUnset CheckAnyVTopUnset
CheckBeCacheGarbledVTop CheckBeCacheGarbledVTop
CheckBeCacheIsDirty
CheckBeCacheKeyCantCompile
CheckBeCacheKeyDangling CheckBeCacheKeyDangling
CheckBeCacheKeyMismatch
CheckBeCacheKeyMissing
CheckBeCacheKeyNonEmpty CheckBeCacheKeyNonEmpty
CheckBeCacheVidUnsynced
CheckBeCacheVtxDangling
CheckBeFifoSrcTrgMismatch
CheckBeFifoTrgNotStateRoot
CheckBeGarbledVTop CheckBeGarbledVTop
CheckBeKeyCantCompile
CheckBeKeyInvalid
CheckBeKeyMismatch
CheckBeKeyMissing
CheckBeVtxBranchLinksMissing CheckBeVtxBranchLinksMissing
CheckBeVtxExtPfxMissing CheckBeVtxExtPfxMissing
CheckBeVtxInvalid CheckBeVtxInvalid
@ -62,7 +50,6 @@ type
CheckStkVtxKeyMismatch CheckStkVtxKeyMismatch
CheckStkVtxKeyMissing CheckStkVtxKeyMissing
CheckRlxVidVtxMismatch
CheckRlxVtxIncomplete CheckRlxVtxIncomplete
CheckRlxVtxKeyMissing CheckRlxVtxKeyMissing
CheckRlxVtxKeyMismatch CheckRlxVtxKeyMismatch

View File

@ -63,15 +63,7 @@ proc toNode*(
if vid.isValid: if vid.isValid:
let key = db.getKey vid let key = db.getKey vid
if not key.isValid: if not key.isValid:
block looseCoupling: return err(@[vid])
when LOOSE_STORAGE_TRIE_COUPLING:
# Stale storage trie?
if LEAST_FREE_VID <= vid.distinctBase and
not db.getVtx(vid).isValid:
node.lData.stoID = VertexID(0)
break looseCoupling
# Otherwise this is a stale storage trie.
return err(@[vid])
node.key[0] = key node.key[0] = key
return ok node return ok node

View File

@ -3,6 +3,9 @@ Core database replacement wrapper object
This wrapper replaces the *TrieDatabaseRef* and its derivatives by the new This wrapper replaces the *TrieDatabaseRef* and its derivatives by the new
object *CoreDbRef*. object *CoreDbRef*.
# **out of date**
Relations to current *TrieDatabaseRef* implementation Relations to current *TrieDatabaseRef* implementation
----------------------------------------------------- -----------------------------------------------------
Here are some incomplete translations for objects and constructors. Here are some incomplete translations for objects and constructors.

View File

@ -151,7 +151,6 @@ proc storagesRunner(
proc aristoMain*(noisy = defined(debug)) = proc aristoMain*(noisy = defined(debug)) =
noisy.miscRunner() noisy.miscRunner()
noisy.accountsRunner()
noisy.storagesRunner() noisy.storagesRunner()
when isMainModule: when isMainModule:
@ -164,13 +163,6 @@ when isMainModule:
# Verify Problem with the database for production test # Verify Problem with the database for production test
noisy.aristoMain() noisy.aristoMain()
# This one uses dumps from the external `nimbus-eth1-blob` repo
when true and false:
import ./test_sync_snap/snap_other_xx
noisy.showElapsed("@snap_other_xx"):
for n,sam in snapOtherList:
noisy.accountsRunner(sam, resetDb=true)
when true: # and false: when true: # and false:
let persistent = false or true let persistent = false or true
noisy.showElapsed("@snap_test_list"): noisy.showElapsed("@snap_test_list"):

View File

@ -238,20 +238,15 @@ proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool =
proc checkBeOk( proc checkBeOk(
dx: DbTriplet; dx: DbTriplet;
relax = false;
forceCache = false; forceCache = false;
fifos = true;
noisy = true; noisy = true;
): bool = ): bool =
## .. ## ..
#for n in 0 ..< dx.len: for n in 0 ..< dx.len:
# let cache = if forceCache: true else: dx[n].dirty.len == 0 let rc = dx[n].checkBE()
# block: xCheckRc rc.error == (0,0):
# let rc = dx[n].checkBE(relax=relax, cache=cache, fifos=fifos) noisy.say "***", "db checkBE failed",
# xCheckRc rc.error == (0,0): " n=", n, "/", dx.len-1
# noisy.say "***", "db checkBE failed",
# " n=", n, "/", dx.len-1,
# " cache=", cache
true true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -317,7 +312,7 @@ proc testDistributedAccess*(
# Check/verify backends # Check/verify backends
block: block:
let ok = dx.checkBeOk(noisy=noisy,fifos=true) let ok = dx.checkBeOk(noisy=noisy)
xCheck ok: xCheck ok:
noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3 noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3
@ -375,7 +370,7 @@ proc testDistributedAccess*(
"" ""
# Check/verify backends # Check/verify backends
block: block:
let ok = dy.checkBeOk(noisy=noisy,fifos=true) let ok = dy.checkBeOk(noisy=noisy)
xCheck ok xCheck ok
when false: # or true: when false: # or true:

View File

@ -146,9 +146,9 @@ proc saveToBackend(
# Verify context: nesting level must be 2 (i.e. two transactions) # Verify context: nesting level must be 2 (i.e. two transactions)
xCheck tx.level == 2 xCheck tx.level == 2
#block: block:
# let rc = db.checkTop() let rc = db.checkTop()
# xCheckRc rc.error == (0,0) xCheckRc rc.error == (0,0)
# Commit and hashify the current layer # Commit and hashify the current layer
block: block:
@ -163,9 +163,9 @@ proc saveToBackend(
# Verify context: nesting level must be 1 (i.e. one transaction) # Verify context: nesting level must be 1 (i.e. one transaction)
xCheck tx.level == 1 xCheck tx.level == 1
#block: block:
# let rc = db.checkBE(relax=true) let rc = db.checkBE()
# xCheckRc rc.error == (0,0) xCheckRc rc.error == (0,0)
# Commit and save to backend # Commit and save to backend
block: block:
@ -180,10 +180,10 @@ proc saveToBackend(
let rc = db.schedStow(chunkedMpt=chunkedMpt) let rc = db.schedStow(chunkedMpt=chunkedMpt)
xCheckRc rc.error == 0 xCheckRc rc.error == 0
#block: block:
# let rc = db.checkBE(relax=relax) let rc = db.checkBE()
# xCheckRc rc.error == (0,0): xCheckRc rc.error == (0,0):
# noisy.say "***", "saveToBackend (8)", " debugID=", debugID noisy.say "***", "saveToBackend (8)", " debugID=", debugID
# Update layers to original level # Update layers to original level
tx = db.txBegin().value.to(AristoDbRef).txBegin().value tx = db.txBegin().value.to(AristoDbRef).txBegin().value