mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-23 18:49:57 +00:00
Aristo code cosmetics and tests update (#2434)
* Update some docu * Resolve obsolete compile time option why: Not optional anymore * Update checks why: The notion of what constitutes a valid `Aristo` db has changed due to (even more) lazy calculating Merkle hash keys. * Disable redundant unit test for production
This commit is contained in:
parent
740882d8ce
commit
2c87fd1636
@ -1,14 +1,18 @@
|
||||
* Check whether `HashKey` can be reduced to a simple 32 byte array (see
|
||||
*desc_identifiers.nim*)
|
||||
|
||||
* Remove the `RlpData` accounts payload type. It is not needed as a separate
|
||||
data type. An application must know the layout. So it can be subsumed
|
||||
under `RawData` (which could be renamed `PlainData`.)
|
||||
|
||||
* Currently, the data save/store logic only works when there is s VertexID(1)
|
||||
root. In tests without a `VertexID(1)` a dummy vertex is set up.
|
||||
|
||||
* Re-visit `delTree()`. Suggestion is deleting small trees on the memory later,
|
||||
otherwise only deleting the root vertex (so it becomes inaccessible) and
|
||||
remember the follow up vertices which can travel through the tx-layers
|
||||
to be picked up by the backend store.
|
||||
|
||||
* Consider changing fetch/merge/delete prototypes for account and storage. At
|
||||
the moment they all use `openArray[]` for strictly 32 byte arrays (which is
|
||||
only implicitely enforced at run time -- i.e. it would fail otherwise.)
|
||||
|
||||
* Mental note: For *proof-mode* with pre-allocated locked vertices and Merkle
|
||||
keys, verification of a patyion tree must be done by computing sub-tree keys
|
||||
at the relative roots and comparing them with the pre-allocated Merkle keys.
|
||||
|
||||
* Remove legacy state format import from `deblobifyTo()` after a while (last
|
||||
updated 28/06/24).
|
||||
|
@ -19,7 +19,7 @@ import
|
||||
stew/interval_set,
|
||||
results,
|
||||
./aristo_walk/persistent,
|
||||
"."/[aristo_desc, aristo_get, aristo_init, aristo_utils],
|
||||
"."/[aristo_desc, aristo_get, aristo_init],
|
||||
./aristo_check/[check_be, check_top]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -51,9 +51,6 @@ proc checkTop*(
|
||||
|
||||
proc checkBE*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
relax = true; # Not re-compiling hashes if `true`
|
||||
cache = true; # Also verify against top layer cache
|
||||
fifos = false; # Also verify cascaded filter fifos
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Verify database backend structure. If the argument `relax` is set `false`,
|
||||
## all necessary Merkle hashes are compiled and verified. If the argument
|
||||
@ -73,11 +70,11 @@ proc checkBE*(
|
||||
##
|
||||
case db.backend.kind:
|
||||
of BackendMemory:
|
||||
return MemBackendRef.checkBE(db, cache=cache, relax=relax)
|
||||
return MemBackendRef.checkBE db
|
||||
of BackendRocksDB, BackendRdbHosting:
|
||||
return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
|
||||
return RdbBackendRef.checkBE db
|
||||
of BackendVoid:
|
||||
return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
|
||||
return VoidBackendRef.checkBE db
|
||||
|
||||
|
||||
proc check*(
|
||||
@ -88,7 +85,7 @@ proc check*(
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Shortcut for running `checkTop()` followed by `checkBE()`
|
||||
? db.checkTop(proofMode = proofMode)
|
||||
? db.checkBE(relax = relax, cache = cache)
|
||||
? db.checkBE()
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -17,7 +17,7 @@ import
|
||||
stew/interval_set,
|
||||
../../aristo,
|
||||
../aristo_walk/persistent,
|
||||
".."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
|
||||
".."/[aristo_desc, aristo_get, aristo_layers]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
@ -69,9 +69,6 @@ proc toNodeBE(
|
||||
proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
_: type T;
|
||||
db: AristoDbRef; # Database, top layer
|
||||
relax: bool; # Not compiling hashes if `true`
|
||||
cache: bool; # Also verify against top layer cache
|
||||
fifos = true; # Also verify cascaded filter fifos
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Make sure that each vertex has a Merkle hash and vice versa. Also check
|
||||
## the vertex ID generator state.
|
||||
@ -82,9 +79,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
topVidBe = vid
|
||||
if not vtx.isValid:
|
||||
return err((vid,CheckBeVtxInvalid))
|
||||
let rc = db.getKeyBE vid
|
||||
if rc.isErr or not rc.value.isValid:
|
||||
return err((vid,CheckBeKeyMissing))
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
discard
|
||||
@ -104,16 +98,8 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
for (vid,key) in T.walkKeyBe db:
|
||||
if topVidBe < vid:
|
||||
topVidBe = vid
|
||||
if not key.isValid:
|
||||
return err((vid,CheckBeKeyInvalid))
|
||||
let vtx = db.getVtxBE(vid).valueOr:
|
||||
return err((vid,CheckBeVtxMissing))
|
||||
let node = vtx.toNodeBE(db).valueOr: # backend links only
|
||||
return err((vid,CheckBeKeyCantCompile))
|
||||
if not relax:
|
||||
let expected = node.digestTo(HashKey)
|
||||
if expected != key:
|
||||
return err((vid,CheckBeKeyMismatch))
|
||||
|
||||
# Compare calculated `vTop` against database state
|
||||
if topVidBe.isValid:
|
||||
@ -133,30 +119,18 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
return err((vid,CheckBeGarbledVTop))
|
||||
|
||||
# Check layer cache against backend
|
||||
if cache:
|
||||
block:
|
||||
var topVidCache = VertexID(0)
|
||||
let checkKeysOk = true
|
||||
|
||||
# Check structural table
|
||||
for (vid,vtx) in db.layersWalkVtx:
|
||||
if vtx.isValid and topVidCache < vid:
|
||||
topVidCache = vid
|
||||
let key = block:
|
||||
let rc = db.layersGetKey(vid)
|
||||
if rc.isOk:
|
||||
rc.value
|
||||
elif checkKeysOk:
|
||||
# A `kMap[]` entry must exist.
|
||||
return err((vid,CheckBeCacheKeyMissing))
|
||||
else:
|
||||
VOID_HASH_KEY
|
||||
let key = db.layersGetKey(vid).valueOr: VOID_HASH_KEY
|
||||
if not vtx.isValid:
|
||||
# Some vertex is to be deleted, the key must be empty
|
||||
if checkKeysOk and key.isValid:
|
||||
if key.isValid:
|
||||
return err((vid,CheckBeCacheKeyNonEmpty))
|
||||
# There must be a representation on the backend DB unless in a TX
|
||||
if db.getVtxBE(vid).isErr and db.stack.len == 0:
|
||||
return err((vid,CheckBeCacheVidUnsynced))
|
||||
|
||||
# Check key table
|
||||
var list: seq[VertexID]
|
||||
@ -167,15 +141,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
let vtx = db.getVtx vid
|
||||
if db.layersGetVtx(vid).isErr and not vtx.isValid:
|
||||
return err((vid,CheckBeCacheKeyDangling))
|
||||
if not key.isValid or relax:
|
||||
continue
|
||||
if not vtx.isValid:
|
||||
return err((vid,CheckBeCacheVtxDangling))
|
||||
let node = vtx.toNode(db).valueOr: # compile cache first
|
||||
return err((vid,CheckBeCacheKeyCantCompile))
|
||||
let expected = node.digestTo(HashKey)
|
||||
if expected != key:
|
||||
return err((vid,CheckBeCacheKeyMismatch))
|
||||
|
||||
# Check vTop
|
||||
if topVidCache.isValid and topVidCache != db.vTop:
|
||||
@ -185,7 +150,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||
if db.layersGetVtxOrVoid(vid).isValid or
|
||||
db.layersGetKeyOrVoid(vid).isValid:
|
||||
return err((db.vTop,CheckBeCacheGarbledVTop))
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -41,18 +41,8 @@ const
|
||||
## functions with fixed assignments of the type of a state root (e.g. for
|
||||
## a receipt or a transaction root.)
|
||||
|
||||
LOOSE_STORAGE_TRIE_COUPLING* = true
|
||||
## Enabling the `LOOSE_STORAGE_TRIE_COUPLING` flag a sub-trie is considered
|
||||
## empty if the root vertex ID is zero or at least `LEAST_FREE_VID` and
|
||||
## there is no vertex available. If the vertex ID is not zero and should
|
||||
## be considered as such will affect calculating the Merkel hash node key
|
||||
## for an accou.t leaf of payload type `AccountData`.
|
||||
##
|
||||
## Setting this flag `true` might be helpful for running an API supporting
|
||||
## both, a legacy and# the `Aristo` database backend.
|
||||
##
|
||||
|
||||
static:
|
||||
doAssert 1 < LEAST_FREE_VID # must stay away from `VertexID(1)`
|
||||
# must stay away from `VertexID(1)` and `VertexID(2)`
|
||||
doAssert 2 < LEAST_FREE_VID
|
||||
|
||||
# End
|
||||
|
@ -37,21 +37,9 @@ type
|
||||
CheckAnyVTopUnset
|
||||
|
||||
CheckBeCacheGarbledVTop
|
||||
CheckBeCacheIsDirty
|
||||
CheckBeCacheKeyCantCompile
|
||||
CheckBeCacheKeyDangling
|
||||
CheckBeCacheKeyMismatch
|
||||
CheckBeCacheKeyMissing
|
||||
CheckBeCacheKeyNonEmpty
|
||||
CheckBeCacheVidUnsynced
|
||||
CheckBeCacheVtxDangling
|
||||
CheckBeFifoSrcTrgMismatch
|
||||
CheckBeFifoTrgNotStateRoot
|
||||
CheckBeGarbledVTop
|
||||
CheckBeKeyCantCompile
|
||||
CheckBeKeyInvalid
|
||||
CheckBeKeyMismatch
|
||||
CheckBeKeyMissing
|
||||
CheckBeVtxBranchLinksMissing
|
||||
CheckBeVtxExtPfxMissing
|
||||
CheckBeVtxInvalid
|
||||
@ -62,7 +50,6 @@ type
|
||||
CheckStkVtxKeyMismatch
|
||||
CheckStkVtxKeyMissing
|
||||
|
||||
CheckRlxVidVtxMismatch
|
||||
CheckRlxVtxIncomplete
|
||||
CheckRlxVtxKeyMissing
|
||||
CheckRlxVtxKeyMismatch
|
||||
|
@ -63,14 +63,6 @@ proc toNode*(
|
||||
if vid.isValid:
|
||||
let key = db.getKey vid
|
||||
if not key.isValid:
|
||||
block looseCoupling:
|
||||
when LOOSE_STORAGE_TRIE_COUPLING:
|
||||
# Stale storage trie?
|
||||
if LEAST_FREE_VID <= vid.distinctBase and
|
||||
not db.getVtx(vid).isValid:
|
||||
node.lData.stoID = VertexID(0)
|
||||
break looseCoupling
|
||||
# Otherwise this is a stale storage trie.
|
||||
return err(@[vid])
|
||||
node.key[0] = key
|
||||
return ok node
|
||||
|
@ -3,6 +3,9 @@ Core database replacement wrapper object
|
||||
This wrapper replaces the *TrieDatabaseRef* and its derivatives by the new
|
||||
object *CoreDbRef*.
|
||||
|
||||
# **out of date**
|
||||
|
||||
|
||||
Relations to current *TrieDatabaseRef* implementation
|
||||
-----------------------------------------------------
|
||||
Here are some incomplete translations for objects and constructors.
|
||||
|
@ -151,7 +151,6 @@ proc storagesRunner(
|
||||
|
||||
proc aristoMain*(noisy = defined(debug)) =
|
||||
noisy.miscRunner()
|
||||
noisy.accountsRunner()
|
||||
noisy.storagesRunner()
|
||||
|
||||
when isMainModule:
|
||||
@ -164,13 +163,6 @@ when isMainModule:
|
||||
# Verify Problem with the database for production test
|
||||
noisy.aristoMain()
|
||||
|
||||
# This one uses dumps from the external `nimbus-eth1-blob` repo
|
||||
when true and false:
|
||||
import ./test_sync_snap/snap_other_xx
|
||||
noisy.showElapsed("@snap_other_xx"):
|
||||
for n,sam in snapOtherList:
|
||||
noisy.accountsRunner(sam, resetDb=true)
|
||||
|
||||
when true: # and false:
|
||||
let persistent = false or true
|
||||
noisy.showElapsed("@snap_test_list"):
|
||||
|
@ -238,20 +238,15 @@ proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool =
|
||||
|
||||
proc checkBeOk(
|
||||
dx: DbTriplet;
|
||||
relax = false;
|
||||
forceCache = false;
|
||||
fifos = true;
|
||||
noisy = true;
|
||||
): bool =
|
||||
## ..
|
||||
#for n in 0 ..< dx.len:
|
||||
# let cache = if forceCache: true else: dx[n].dirty.len == 0
|
||||
# block:
|
||||
# let rc = dx[n].checkBE(relax=relax, cache=cache, fifos=fifos)
|
||||
# xCheckRc rc.error == (0,0):
|
||||
# noisy.say "***", "db checkBE failed",
|
||||
# " n=", n, "/", dx.len-1,
|
||||
# " cache=", cache
|
||||
for n in 0 ..< dx.len:
|
||||
let rc = dx[n].checkBE()
|
||||
xCheckRc rc.error == (0,0):
|
||||
noisy.say "***", "db checkBE failed",
|
||||
" n=", n, "/", dx.len-1
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -317,7 +312,7 @@ proc testDistributedAccess*(
|
||||
|
||||
# Check/verify backends
|
||||
block:
|
||||
let ok = dx.checkBeOk(noisy=noisy,fifos=true)
|
||||
let ok = dx.checkBeOk(noisy=noisy)
|
||||
xCheck ok:
|
||||
noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3
|
||||
|
||||
@ -375,7 +370,7 @@ proc testDistributedAccess*(
|
||||
""
|
||||
# Check/verify backends
|
||||
block:
|
||||
let ok = dy.checkBeOk(noisy=noisy,fifos=true)
|
||||
let ok = dy.checkBeOk(noisy=noisy)
|
||||
xCheck ok
|
||||
|
||||
when false: # or true:
|
||||
|
@ -146,9 +146,9 @@ proc saveToBackend(
|
||||
# Verify context: nesting level must be 2 (i.e. two transactions)
|
||||
xCheck tx.level == 2
|
||||
|
||||
#block:
|
||||
# let rc = db.checkTop()
|
||||
# xCheckRc rc.error == (0,0)
|
||||
block:
|
||||
let rc = db.checkTop()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
# Commit and hashify the current layer
|
||||
block:
|
||||
@ -163,9 +163,9 @@ proc saveToBackend(
|
||||
# Verify context: nesting level must be 1 (i.e. one transaction)
|
||||
xCheck tx.level == 1
|
||||
|
||||
#block:
|
||||
# let rc = db.checkBE(relax=true)
|
||||
# xCheckRc rc.error == (0,0)
|
||||
block:
|
||||
let rc = db.checkBE()
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
# Commit and save to backend
|
||||
block:
|
||||
@ -180,10 +180,10 @@ proc saveToBackend(
|
||||
let rc = db.schedStow(chunkedMpt=chunkedMpt)
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
#block:
|
||||
# let rc = db.checkBE(relax=relax)
|
||||
# xCheckRc rc.error == (0,0):
|
||||
# noisy.say "***", "saveToBackend (8)", " debugID=", debugID
|
||||
block:
|
||||
let rc = db.checkBE()
|
||||
xCheckRc rc.error == (0,0):
|
||||
noisy.say "***", "saveToBackend (8)", " debugID=", debugID
|
||||
|
||||
# Update layers to original level
|
||||
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
|
Loading…
x
Reference in New Issue
Block a user