Some cleanups (#2428)

* Remove `dirty` set from structural objects

why:
  Not used anymore, the tree is dirty by default.

* Rename `aristo_hashify` -> `aristo_compute`

* Remove cruft, update comments, cosmetics, etc.

* Simplify `SavedState` object

why:
  The key chaining have become obsolete after extra lazy hashing. There
  is some available space for a state hash to be maintained in future.

details:
  Accept the legacy `SavedState` object serialisation format for a
  while (which will be overwritten by new format.)
This commit is contained in:
Jordan Hrycaj 2024-06-28 18:43:04 +00:00 committed by GitHub
parent 14c3772545
commit 8dd038144b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 55 additions and 204 deletions

View File

@ -358,12 +358,10 @@ assumed, i.e. the list with the single vertex ID *1*.
### 4.7 Serialisation of a last saved state record
0 +--+--+--+--+--+ .. --+--+ .. --+
| | -- 32 bytes source state hash
| | -- 32 bytes state hash
32 +--+--+--+--+--+ .. --+--+ .. --+
| | -- 32 bytes target state hash
64 +--+--+--+--+--+ .. --+--+ .. --+
| | -- state number/block number
72 +--+--+--+--+--+--+--+--+
40 +--+--+--+--+--+--+--+--+
| | -- marker(8), 0x7f
+--+

View File

@ -152,21 +152,7 @@ proc blobify*(tuv: VertexID): Blob =
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
## Serialise a last saved state record
case lSst.src.len:
of 0:
data.setLen(32)
of 32:
data.setLen(0)
data.add lSst.src.data
else:
return err(BlobifyStateSrcLenGarbled)
case lSst.trg.len:
of 0:
data.setLen(64)
of 32:
data.add lSst.trg.data
else:
return err(BlobifyStateTrgLenGarbled)
data.add lSst.key.data
data.add lSst.serial.toBytesBE
data.add @[0x7fu8]
ok()
@ -352,17 +338,20 @@ proc deblobifyTo*(
): Result[void,AristoError] =
## De-serialise the last saved state data record previously encoded with
## `blobify()`.
if data.len != 73:
# Keep that legacy setting for a while
if data.len == 73:
if data[^1] != 0x7f:
return err(DeblobWrongType)
lSst.key = EMPTY_ROOT_HASH
lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71)
return ok()
# -----
if data.len != 41:
return err(DeblobWrongSize)
if data[^1] != 0x7f:
return err(DeblobWrongType)
func loadHashKey(data: openArray[byte]): Result[HashKey,AristoError] =
var w = HashKey.fromBytes(data).valueOr:
return err(DeblobHashKeyExpected)
ok move(w)
lSst.src = ? data.toOpenArray(0, 31).loadHashKey()
lSst.trg = ? data.toOpenArray(32, 63).loadHashKey()
lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71)
(addr lSst.key.data[0]).copyMem(unsafeAddr data[0], 32)
lSst.serial = uint64.fromBytesBE data.toOpenArray(32, 39)
ok()
proc deblobify*(

View File

@ -14,7 +14,7 @@
{.push raises: [].}
import
std/[algorithm, sequtils, sets, tables],
std/[algorithm, sequtils, tables],
eth/common,
stew/interval_set,
results,
@ -31,9 +31,9 @@ proc checkTop*(
proofMode = false; # Has proof nodes
): Result[void,(VertexID,AristoError)] =
## Verify that the cache structure is correct as it would be after `merge()`
## and `hashify()` operations. Unless `proofMode` is set `true` it would not
## fully check against the backend, which is typically not applicable after
## `delete()` operations.
## operations. Unless `proofMode` is set `true` it would not fully check
## against the backend, which is typically not applicable after `delete()`
## operations.
##
## The following is verified:
##

View File

@ -135,14 +135,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check layer cache against backend
if cache:
var topVidCache = VertexID(0)
let checkKeysOk = block:
if db.dirty.len == 0:
true
elif relax:
false
else:
return err((VertexID(0),CheckBeCacheIsDirty))
let checkKeysOk = true
# Check structural table
for (vid,vtx) in db.layersWalkVtx:

View File

@ -42,7 +42,7 @@ proc checkTopStrict*(
if key != node.digestTo(HashKey):
return err((vid,CheckStkVtxKeyMismatch))
elif db.dirty.len == 0 or db.layersGetKey(vid).isErr:
elif db.layersGetKey(vid).isErr:
# So `vtx` exists but not `key`, so cache is supposed dirty and the
# vertex has a zero entry.
return err((vid,CheckStkVtxKeyMissing))

View File

@ -11,13 +11,10 @@
{.push raises: [].}
import
chronicles,
eth/common,
results,
"."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
logScope:
topics = "aristo-hashify"
proc computeKey*(
db: AristoDbRef; # Database, top layer

View File

@ -401,7 +401,6 @@ proc ppFilter(
if fl.isNil:
result &= " n/a"
return
result &= pfx & "src=" & fl.src.ppKey(db)
result &= pfx & "vTop=" & fl.vTop.ppVid
result &= pfx & "sTab" & pfx1 & "{"
for n,vid in fl.sTab.sortedKeys:
@ -521,11 +520,6 @@ proc ppLayer(
tLen = layer.final.fRpp.len
info = "fRpp(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.final.fRpp.ppFRpp(db,indent+2)
if 0 < nOKs:
let
info = if layer.final.dirty.len == 0: "clean"
else: "dirty" & layer.final.dirty.ppVids
result &= info.doPrefix(false)
# ------------------------------------------------------------------------------
# Public functions

View File

@ -429,7 +429,7 @@ proc deleteStorageData*(
return err(DelPathNotFound)
return err(error[1])
# Mark account path for update for `hashify()`
# Mark account path Merkle keys for update
db.updateAccountForHasher accHike
db.deleteImpl(stoHike).isOkOr:
@ -464,7 +464,7 @@ proc deleteStorageTree*(
if not stoID.isValid:
return err(DelStoRootMissing)
# Mark account path for update for `hashify()`
# Mark account path Merkle keys for update
db.updateAccountForHasher accHike
? db.delSubTreeImpl stoID

View File

@ -69,8 +69,7 @@ proc deltaPersistent*(
defer: updateSiblings.rollback()
let lSst = SavedState(
src: db.balancer.src,
trg: db.balancer.kMap.getOrVoid(VertexID 1),
key: EMPTY_ROOT_HASH, # placeholder for more
serial: nxtFid)
# Store structural single trie entries

View File

@ -30,43 +30,22 @@ proc deltaMerge*(
## stacked and the database access is `upper -> lower -> backend` whereas
## the `src/trg` matching logic goes the other way round.
##
## The resuting filter has no `FilterID` set.
##
## Comparing before and after merge
## ::
## arguments | merged result
## --------------------------------+------------------------------------
## (src2==trg1) --> upper --> trg2 |
## | (src1==trg0) --> newFilter --> trg2
## (src1==trg0) --> lower --> trg1 |
## |
## beStateRoot --> trg0 |
##
# Degenerate case: `upper` is void
if lower.isNil:
if upper.isNil:
# Even more degenerate case when both filters are void
return ok LayerDeltaRef(nil)
if upper.src != beStateRoot:
return err((VertexID(1),FilStateRootMismatch))
return ok(upper)
# Degenerate case: `upper` is non-trivial and `lower` is void
if upper.isNil:
if lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
return ok(lower)
# Verify stackability
let lowerTrg = lower.kMap.getOrVoid VertexID(1)
if upper.src != lowerTrg:
return err((VertexID(0), FilTrgSrcMismatch))
if lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
# There is no need to deep copy table vertices as they will not be modified.
let newFilter = LayerDeltaRef(
src: lower.src,
sTab: lower.sTab,
kMap: lower.kMap,
vTop: upper.vTop)
@ -95,11 +74,6 @@ proc deltaMerge*(
else:
return err((vid,rc.error))
# # Check consistency
# if (newFilter.src == newFilter.kMap.getOrVoid(VertexID 1)) !=
# (newFilter.sTab.len == 0 and newFilter.kMap.len == 0):
# return err((VertexID(0),FilSrcTrgInconsistent))
ok newFilter
# ------------------------------------------------------------------------------

View File

@ -29,7 +29,7 @@ proc revFilter*(
## backend (excluding optionally installed read-only filter.)
##
# Register MPT state roots for reverting back
let rev = LayerDeltaRef(src: filter.kMap.getOrVoid(VertexID 1))
let rev = LayerDeltaRef()
# Get vid generator state on backend
block:

View File

@ -23,8 +23,6 @@ type
BlobifyLeafPathOverflow
BlobifyNilFilter
BlobifyNilVertex
BlobifyStateSrcLenGarbled
BlobifyStateTrgLenGarbled
# Cache checker `checkCache()`
@ -157,12 +155,6 @@ type
GetVtxNotFound
# Update `Merkle` hashes `hashify()`
HashifyVtxUnresolved
HashifyRootVtxUnresolved
HashifyProofHashMismatch
# Path function `hikeUp()`
HikeBranchMissingEdge
HikeBranchTailEmpty

View File

@ -82,9 +82,8 @@ type
SavedState* = object
## Last saved state
src*: HashKey ## Previous state hash
trg*: HashKey ## Last state hash
serial*: uint64 ## Generic identifier froom application
key*: Hash256 ## Some state hash (if any)
serial*: uint64 ## Generic identifier from application
LayerDeltaRef* = ref object
## Delta layers are stacked implying a tables hierarchy. Table entries on
@ -109,7 +108,6 @@ type
## tables. So a corresponding zero value or missing entry produces an
## inconsistent state that must be resolved.
##
src*: HashKey ## Only needed when used as a filter
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
kMap*: Table[VertexID,HashKey] ## Merkle hash key mapping
vTop*: VertexID ## Last used vertex ID
@ -124,7 +122,6 @@ type
##
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
fRpp*: Table[HashKey,VertexID] ## Key lookup for `pPrf[]` (proof nodes)
dirty*: HashSet[VertexID] ## Start nodes to re-hashiy from
LayerRef* = ref LayerObj
LayerObj* = object
@ -276,8 +273,7 @@ func dup*(final: LayerFinalRef): LayerFinalRef =
## Duplicate final layer.
LayerFinalRef(
pPrf: final.pPrf,
fRpp: final.fRpp,
dirty: final.dirty)
fRpp: final.fRpp)
func dup*(wp: VidVtxPair): VidVtxPair =
## Safe copy of `wp` argument

View File

@ -17,7 +17,7 @@ import
std/typetraits,
eth/common,
results,
"."/[aristo_desc, aristo_get, aristo_hashify, aristo_hike]
"."/[aristo_compute, aristo_desc, aristo_get, aristo_hike]
# ------------------------------------------------------------------------------
# Private functions

View File

@ -29,9 +29,6 @@ func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
# Public getters: lazy value lookup for read only versions
# ------------------------------------------------------------------------------
func dirty*(db: AristoDbRef): lent HashSet[VertexID] =
db.top.final.dirty
func pPrf*(db: AristoDbRef): lent HashSet[VertexID] =
db.top.final.pPrf
@ -85,13 +82,10 @@ func layersGetKey*(db: AristoDbRef; vid: VertexID): Opt[HashKey] =
## hash key if it is stored on the cache that way.
##
db.top.delta.kMap.withValue(vid, item):
# This is ok regardless of the `dirty` flag. If this vertex has become
# dirty, there is an empty `kMap[]` entry on this layer.
return Opt.some(item[])
for w in db.rstack:
w.delta.kMap.withValue(vid, item):
# Same reasoning as above regarding the `dirty` flag.
return ok(item[])
Opt.none(HashKey)
@ -126,7 +120,6 @@ func layersPutVtx*(
) =
## Store a (potentally empty) vertex on the top layer
db.top.delta.sTab[vid] = vtx
# db.top.final.dirty.incl root
func layersResVtx*(
db: AristoDbRef;
@ -146,7 +139,6 @@ func layersPutKey*(
) =
## Store a (potentally void) hash key on the top layer
db.top.delta.kMap[vid] = key
# db.top.final.dirty.incl root # Modified top cache layers => hashify
func layersResKey*(db: AristoDbRef; root: VertexID; vid: VertexID) =

View File

@ -126,7 +126,7 @@ proc mergeStorageData*(
rc = db.mergePayloadImpl(useID, stoPath, pyl)
if rc.isOk:
# Mark account path for update for `hashify()`
# Mark account path Merkle keys for update
db.updateAccountForHasher accHike
if stoID.isValid:
@ -144,7 +144,7 @@ proc mergeStorageData*(
assert stoID.isValid # debugging only
return ok()
# Error: mark account path for update for `hashify()`
# Error: mark account path Merkle keys for update
db.updateAccountForHasher accHike
err(rc.error)

View File

@ -16,8 +16,7 @@
import
eth/common,
results,
"."/[aristo_constants, aristo_desc, aristo_hashify, aristo_init,
aristo_merge]
"."/[aristo_compute, aristo_constants, aristo_desc, aristo_init, aristo_merge]
# ------------------------------------------------------------------------------
# Public functions, signature generator

View File

@ -16,7 +16,7 @@
import
std/tables,
results,
".."/[aristo_desc, aristo_layers, aristo_hashify]
".."/[aristo_desc, aristo_layers]
func txFrameIsTop*(tx: AristoTxRef): bool

View File

@ -17,7 +17,7 @@ import
std/tables,
results,
../aristo_delta/delta_merge,
".."/[aristo_desc, aristo_get, aristo_delta, aristo_layers, aristo_hashify]
".."/[aristo_desc, aristo_get, aristo_delta, aristo_layers]
# ------------------------------------------------------------------------------
# Private functions
@ -70,9 +70,6 @@ proc topMerge(db: AristoDbRef; src: HashKey): Result[void,AristoError] =
else:
return err(rc.error)
# Update layer for merge call
db.top.delta.src = src
# This one will return the `db.top.delta` if `db.balancer.isNil`
db.balancer = db.deltaMerge(db.top.delta, db.balancer, ubeRoot).valueOr:
return err(error[1])

View File

@ -23,58 +23,6 @@ import
# Public functions, converters
# ------------------------------------------------------------------------------
proc toAccount*(
payload: PayloadRef;
db: AristoDbRef;
): Result[Account,AristoError] =
## Converts the argument `payload` to an `Account` type. If the implied
## account das a storage slots system associated, the database `db` must
## contain the Merkle hash key of the root vertex.
if payload.pType == AccountData:
var acc = Account(
nonce: payload.account.nonce,
balance: payload.account.balance,
codeHash: payload.account.codeHash,
storageRoot: EMPTY_ROOT_HASH)
if payload.stoID.isValid:
acc.storageRoot = (? db.getKeyRc payload.stoID).to(Hash256)
return ok(acc)
err UtilsPayloadTypeUnsupported
proc toAccount*(
vtx: VertexRef;
db: AristoDbRef;
): Result[Account,AristoError] =
## Variant of `toAccount()` for a `Leaf` vertex.
if vtx.isValid and vtx.vType == Leaf:
return vtx.lData.toAccount db
err UtilsAccVtxUnsupported
proc toAccount*(
node: NodeRef;
): Result[Account,AristoError] =
## Variant of `toAccount()` for a `Leaf` node which must be complete (i.e.
## a potential Merkle hash key must have been initialised.)
if node.isValid and node.vType == Leaf:
if node.lData.pType == AccountData:
var acc = Account(
nonce: node.lData.account.nonce,
balance: node.lData.account.balance,
codeHash: node.lData.account.codeHash,
storageRoot: EMPTY_ROOT_HASH)
if node.lData.stoID.isValid:
if not node.key[0].isValid:
return err(UtilsAccStorageKeyMissing)
acc.storageRoot = node.key[0].to(Hash256)
return ok(acc)
else:
return err(UtilsPayloadTypeUnsupported)
err UtilsAccNodeUnsupported
# ---------------------
proc toNode*(
vtx: VertexRef; # Vertex to convert
db: AristoDbRef; # Database, top layer
@ -176,18 +124,13 @@ proc updateAccountForHasher*(
db: AristoDbRef; # Database
hike: Hike; # Return value from `retrieveStorageID()`
) =
## For a successful run of `retrieveStoAccHike()`, the argument `hike` is
## used to mark/reset the keys along the `accPath` for being re-calculated
## by `hashify()`.
## The argument `hike` is used to mark/reset the keys along the implied
## vertex path for being re-calculated.
##
# Clear Merkle keys so that `hasify()` can calculate the re-hash forest/tree
for w in hike.legs.mapIt(it.wp.vid):
db.layersResKey(hike.root, w)
# Signal to `hashify()` where to start rebuilding Merkel hashes
# db.top.final.dirty.incl hike.root
# db.top.final.dirty.incl hike.legs[^1].wp.vid
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -243,12 +243,11 @@ func toAristo*(mBe: CoreDbAccBackendRef): AristoDbRef =
proc toAristoSavedStateBlockNumber*(
mBe: CoreDbMptBackendRef;
): tuple[stateRoot: Hash256, blockNumber: BlockNumber] =
): BlockNumber =
if not mBe.isNil and mBe.parent.isAristo:
let rc = mBe.parent.AristoCoreDbRef.adbBase.getSavedState()
if rc.isOk:
return (rc.value.src.to(Hash256), rc.value.serial.BlockNumber)
(EMPTY_ROOT_HASH, 0.BlockNumber)
return rc.value.serial.BlockNumber
# ------------------------------------------------------------------------------
# Public aristo iterators

View File

@ -334,20 +334,24 @@ proc getSavedStateBlockNumber*(
##
## This function verifies the state consistency of the database and throws
## an assert exception if that fails. So the function will only apply to a
## finalised (aka hashified) database state. For an an opportunistic use,
## the `relax` argument can be set `true` so this function also returns
## zero if the state consistency check fails.
## saved database state. For an an opportunistic use, the `relax` argument
## can be set `true` so this function also returns the block number if the
## state consistency check fails.
##
const info = "getSavedStateBlockNumber(): "
# FIXME: This construct following will be replaced by a proper
# `CoreDb` method.
let bn = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber()
if relax:
return bn
else:
var header: BlockHeader
let st = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber()
if db.getBlockHeader(st.blockNumber, header):
let state = db.ctx.getAccounts.state.valueOr:
if db.getBlockHeader(bn, header):
let state = db.ctx.getAccounts.state(updateOk=true).valueOr:
raiseAssert info & $$error
if state == header.stateRoot:
return st.blockNumber
if not relax:
raiseAssert info & ": state mismatch at " & "#" & $st.blockNumber
if state != header.stateRoot:
raiseAssert info & ": state mismatch at " & "#" & $result
return bn
proc getBlockHeader*(
db: CoreDbRef;

View File

@ -12,7 +12,6 @@
##
import
std/sets,
eth/common,
results,
unittest2,
@ -23,7 +22,6 @@ import
aristo_debug,
aristo_desc,
aristo_get,
aristo_layers,
aristo_persistent,
aristo_tx],
../replay/xcheck,
@ -170,8 +168,7 @@ proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool =
if b.isNil:
return false
if unsafeAddr(a[]) != unsafeAddr(b[]):
if a.src != b.src or
a.kMap.getOrVoid(testRootVid) != b.kMap.getOrVoid(testRootVid) or
if a.kMap.getOrVoid(testRootVid) != b.kMap.getOrVoid(testRootVid) or
a.vTop != b.vTop:
return false

View File

@ -155,9 +155,6 @@ proc saveToBackend(
let rc = tx.commit()
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.dirty.len == 0
block:
let rc = db.txTop()
xCheckRc rc.error == 0
@ -175,9 +172,6 @@ proc saveToBackend(
let rc = tx.commit()
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.dirty.len == 0
block:
let rc = db.txTop()
xCheckErr rc.value.level < 0 # force error
@ -213,9 +207,6 @@ proc saveToBackendWithOops(
let rc = tx.commit()
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.dirty.len == 0
block:
let rc = db.txTop()
xCheckRc rc.error == 0
@ -229,9 +220,6 @@ proc saveToBackendWithOops(
let rc = tx.commit()
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.dirty.len == 0
block:
let rc = db.txTop()
xCheckErr rc.value.level < 0 # force error

View File

@ -87,7 +87,7 @@ let
mainTest0m* = mainSample
.cloneWith(
name = "-am-some",
numBlocks = 5) # 1_000)
numBlocks = 1_000)
mainTest1m* = mainSample
.cloneWith(