mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-12 21:34:33 +00:00
Some cleanups (#2428)
* Remove `dirty` set from structural objects why: Not used anymore, the tree is dirty by default. * Rename `aristo_hashify` -> `aristo_compute` * Remove cruft, update comments, cosmetics, etc. * Simplify `SavedState` object why: The key chaining have become obsolete after extra lazy hashing. There is some available space for a state hash to be maintained in future. details: Accept the legacy `SavedState` object serialisation format for a while (which will be overwritten by new format.)
This commit is contained in:
parent
14c3772545
commit
8dd038144b
@ -358,12 +358,10 @@ assumed, i.e. the list with the single vertex ID *1*.
|
|||||||
### 4.7 Serialisation of a last saved state record
|
### 4.7 Serialisation of a last saved state record
|
||||||
|
|
||||||
0 +--+--+--+--+--+ .. --+--+ .. --+
|
0 +--+--+--+--+--+ .. --+--+ .. --+
|
||||||
| | -- 32 bytes source state hash
|
| | -- 32 bytes state hash
|
||||||
32 +--+--+--+--+--+ .. --+--+ .. --+
|
32 +--+--+--+--+--+ .. --+--+ .. --+
|
||||||
| | -- 32 bytes target state hash
|
|
||||||
64 +--+--+--+--+--+ .. --+--+ .. --+
|
|
||||||
| | -- state number/block number
|
| | -- state number/block number
|
||||||
72 +--+--+--+--+--+--+--+--+
|
40 +--+--+--+--+--+--+--+--+
|
||||||
| | -- marker(8), 0x7f
|
| | -- marker(8), 0x7f
|
||||||
+--+
|
+--+
|
||||||
|
|
||||||
|
@ -152,21 +152,7 @@ proc blobify*(tuv: VertexID): Blob =
|
|||||||
|
|
||||||
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
|
proc blobifyTo*(lSst: SavedState; data: var Blob): Result[void,AristoError] =
|
||||||
## Serialise a last saved state record
|
## Serialise a last saved state record
|
||||||
case lSst.src.len:
|
data.add lSst.key.data
|
||||||
of 0:
|
|
||||||
data.setLen(32)
|
|
||||||
of 32:
|
|
||||||
data.setLen(0)
|
|
||||||
data.add lSst.src.data
|
|
||||||
else:
|
|
||||||
return err(BlobifyStateSrcLenGarbled)
|
|
||||||
case lSst.trg.len:
|
|
||||||
of 0:
|
|
||||||
data.setLen(64)
|
|
||||||
of 32:
|
|
||||||
data.add lSst.trg.data
|
|
||||||
else:
|
|
||||||
return err(BlobifyStateTrgLenGarbled)
|
|
||||||
data.add lSst.serial.toBytesBE
|
data.add lSst.serial.toBytesBE
|
||||||
data.add @[0x7fu8]
|
data.add @[0x7fu8]
|
||||||
ok()
|
ok()
|
||||||
@ -352,17 +338,20 @@ proc deblobifyTo*(
|
|||||||
): Result[void,AristoError] =
|
): Result[void,AristoError] =
|
||||||
## De-serialise the last saved state data record previously encoded with
|
## De-serialise the last saved state data record previously encoded with
|
||||||
## `blobify()`.
|
## `blobify()`.
|
||||||
if data.len != 73:
|
# Keep that legacy setting for a while
|
||||||
|
if data.len == 73:
|
||||||
|
if data[^1] != 0x7f:
|
||||||
|
return err(DeblobWrongType)
|
||||||
|
lSst.key = EMPTY_ROOT_HASH
|
||||||
|
lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71)
|
||||||
|
return ok()
|
||||||
|
# -----
|
||||||
|
if data.len != 41:
|
||||||
return err(DeblobWrongSize)
|
return err(DeblobWrongSize)
|
||||||
if data[^1] != 0x7f:
|
if data[^1] != 0x7f:
|
||||||
return err(DeblobWrongType)
|
return err(DeblobWrongType)
|
||||||
func loadHashKey(data: openArray[byte]): Result[HashKey,AristoError] =
|
(addr lSst.key.data[0]).copyMem(unsafeAddr data[0], 32)
|
||||||
var w = HashKey.fromBytes(data).valueOr:
|
lSst.serial = uint64.fromBytesBE data.toOpenArray(32, 39)
|
||||||
return err(DeblobHashKeyExpected)
|
|
||||||
ok move(w)
|
|
||||||
lSst.src = ? data.toOpenArray(0, 31).loadHashKey()
|
|
||||||
lSst.trg = ? data.toOpenArray(32, 63).loadHashKey()
|
|
||||||
lSst.serial = uint64.fromBytesBE data.toOpenArray(64, 71)
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc deblobify*(
|
proc deblobify*(
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, sequtils, sets, tables],
|
std/[algorithm, sequtils, tables],
|
||||||
eth/common,
|
eth/common,
|
||||||
stew/interval_set,
|
stew/interval_set,
|
||||||
results,
|
results,
|
||||||
@ -31,9 +31,9 @@ proc checkTop*(
|
|||||||
proofMode = false; # Has proof nodes
|
proofMode = false; # Has proof nodes
|
||||||
): Result[void,(VertexID,AristoError)] =
|
): Result[void,(VertexID,AristoError)] =
|
||||||
## Verify that the cache structure is correct as it would be after `merge()`
|
## Verify that the cache structure is correct as it would be after `merge()`
|
||||||
## and `hashify()` operations. Unless `proofMode` is set `true` it would not
|
## operations. Unless `proofMode` is set `true` it would not fully check
|
||||||
## fully check against the backend, which is typically not applicable after
|
## against the backend, which is typically not applicable after `delete()`
|
||||||
## `delete()` operations.
|
## operations.
|
||||||
##
|
##
|
||||||
## The following is verified:
|
## The following is verified:
|
||||||
##
|
##
|
||||||
|
@ -135,14 +135,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
|||||||
# Check layer cache against backend
|
# Check layer cache against backend
|
||||||
if cache:
|
if cache:
|
||||||
var topVidCache = VertexID(0)
|
var topVidCache = VertexID(0)
|
||||||
|
let checkKeysOk = true
|
||||||
let checkKeysOk = block:
|
|
||||||
if db.dirty.len == 0:
|
|
||||||
true
|
|
||||||
elif relax:
|
|
||||||
false
|
|
||||||
else:
|
|
||||||
return err((VertexID(0),CheckBeCacheIsDirty))
|
|
||||||
|
|
||||||
# Check structural table
|
# Check structural table
|
||||||
for (vid,vtx) in db.layersWalkVtx:
|
for (vid,vtx) in db.layersWalkVtx:
|
||||||
|
@ -42,7 +42,7 @@ proc checkTopStrict*(
|
|||||||
if key != node.digestTo(HashKey):
|
if key != node.digestTo(HashKey):
|
||||||
return err((vid,CheckStkVtxKeyMismatch))
|
return err((vid,CheckStkVtxKeyMismatch))
|
||||||
|
|
||||||
elif db.dirty.len == 0 or db.layersGetKey(vid).isErr:
|
elif db.layersGetKey(vid).isErr:
|
||||||
# So `vtx` exists but not `key`, so cache is supposed dirty and the
|
# So `vtx` exists but not `key`, so cache is supposed dirty and the
|
||||||
# vertex has a zero entry.
|
# vertex has a zero entry.
|
||||||
return err((vid,CheckStkVtxKeyMissing))
|
return err((vid,CheckStkVtxKeyMissing))
|
||||||
|
@ -11,13 +11,10 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
chronicles,
|
|
||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
"."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
|
"."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
|
||||||
|
|
||||||
logScope:
|
|
||||||
topics = "aristo-hashify"
|
|
||||||
|
|
||||||
proc computeKey*(
|
proc computeKey*(
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
@ -401,7 +401,6 @@ proc ppFilter(
|
|||||||
if fl.isNil:
|
if fl.isNil:
|
||||||
result &= " n/a"
|
result &= " n/a"
|
||||||
return
|
return
|
||||||
result &= pfx & "src=" & fl.src.ppKey(db)
|
|
||||||
result &= pfx & "vTop=" & fl.vTop.ppVid
|
result &= pfx & "vTop=" & fl.vTop.ppVid
|
||||||
result &= pfx & "sTab" & pfx1 & "{"
|
result &= pfx & "sTab" & pfx1 & "{"
|
||||||
for n,vid in fl.sTab.sortedKeys:
|
for n,vid in fl.sTab.sortedKeys:
|
||||||
@ -521,11 +520,6 @@ proc ppLayer(
|
|||||||
tLen = layer.final.fRpp.len
|
tLen = layer.final.fRpp.len
|
||||||
info = "fRpp(" & $tLen & ")"
|
info = "fRpp(" & $tLen & ")"
|
||||||
result &= info.doPrefix(0 < tLen) & layer.final.fRpp.ppFRpp(db,indent+2)
|
result &= info.doPrefix(0 < tLen) & layer.final.fRpp.ppFRpp(db,indent+2)
|
||||||
if 0 < nOKs:
|
|
||||||
let
|
|
||||||
info = if layer.final.dirty.len == 0: "clean"
|
|
||||||
else: "dirty" & layer.final.dirty.ppVids
|
|
||||||
result &= info.doPrefix(false)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
|
@ -429,7 +429,7 @@ proc deleteStorageData*(
|
|||||||
return err(DelPathNotFound)
|
return err(DelPathNotFound)
|
||||||
return err(error[1])
|
return err(error[1])
|
||||||
|
|
||||||
# Mark account path for update for `hashify()`
|
# Mark account path Merkle keys for update
|
||||||
db.updateAccountForHasher accHike
|
db.updateAccountForHasher accHike
|
||||||
|
|
||||||
db.deleteImpl(stoHike).isOkOr:
|
db.deleteImpl(stoHike).isOkOr:
|
||||||
@ -464,7 +464,7 @@ proc deleteStorageTree*(
|
|||||||
if not stoID.isValid:
|
if not stoID.isValid:
|
||||||
return err(DelStoRootMissing)
|
return err(DelStoRootMissing)
|
||||||
|
|
||||||
# Mark account path for update for `hashify()`
|
# Mark account path Merkle keys for update
|
||||||
db.updateAccountForHasher accHike
|
db.updateAccountForHasher accHike
|
||||||
|
|
||||||
? db.delSubTreeImpl stoID
|
? db.delSubTreeImpl stoID
|
||||||
|
@ -69,8 +69,7 @@ proc deltaPersistent*(
|
|||||||
defer: updateSiblings.rollback()
|
defer: updateSiblings.rollback()
|
||||||
|
|
||||||
let lSst = SavedState(
|
let lSst = SavedState(
|
||||||
src: db.balancer.src,
|
key: EMPTY_ROOT_HASH, # placeholder for more
|
||||||
trg: db.balancer.kMap.getOrVoid(VertexID 1),
|
|
||||||
serial: nxtFid)
|
serial: nxtFid)
|
||||||
|
|
||||||
# Store structural single trie entries
|
# Store structural single trie entries
|
||||||
|
@ -30,43 +30,22 @@ proc deltaMerge*(
|
|||||||
## stacked and the database access is `upper -> lower -> backend` whereas
|
## stacked and the database access is `upper -> lower -> backend` whereas
|
||||||
## the `src/trg` matching logic goes the other way round.
|
## the `src/trg` matching logic goes the other way round.
|
||||||
##
|
##
|
||||||
## The resuting filter has no `FilterID` set.
|
|
||||||
##
|
|
||||||
## Comparing before and after merge
|
|
||||||
## ::
|
|
||||||
## arguments | merged result
|
|
||||||
## --------------------------------+------------------------------------
|
|
||||||
## (src2==trg1) --> upper --> trg2 |
|
|
||||||
## | (src1==trg0) --> newFilter --> trg2
|
|
||||||
## (src1==trg0) --> lower --> trg1 |
|
|
||||||
## |
|
|
||||||
## beStateRoot --> trg0 |
|
|
||||||
##
|
|
||||||
# Degenerate case: `upper` is void
|
# Degenerate case: `upper` is void
|
||||||
if lower.isNil:
|
if lower.isNil:
|
||||||
if upper.isNil:
|
if upper.isNil:
|
||||||
# Even more degenerate case when both filters are void
|
# Even more degenerate case when both filters are void
|
||||||
return ok LayerDeltaRef(nil)
|
return ok LayerDeltaRef(nil)
|
||||||
if upper.src != beStateRoot:
|
|
||||||
return err((VertexID(1),FilStateRootMismatch))
|
|
||||||
return ok(upper)
|
return ok(upper)
|
||||||
|
|
||||||
# Degenerate case: `upper` is non-trivial and `lower` is void
|
# Degenerate case: `upper` is non-trivial and `lower` is void
|
||||||
if upper.isNil:
|
if upper.isNil:
|
||||||
if lower.src != beStateRoot:
|
|
||||||
return err((VertexID(0), FilStateRootMismatch))
|
|
||||||
return ok(lower)
|
return ok(lower)
|
||||||
|
|
||||||
# Verify stackability
|
# Verify stackability
|
||||||
let lowerTrg = lower.kMap.getOrVoid VertexID(1)
|
let lowerTrg = lower.kMap.getOrVoid VertexID(1)
|
||||||
if upper.src != lowerTrg:
|
|
||||||
return err((VertexID(0), FilTrgSrcMismatch))
|
|
||||||
if lower.src != beStateRoot:
|
|
||||||
return err((VertexID(0), FilStateRootMismatch))
|
|
||||||
|
|
||||||
# There is no need to deep copy table vertices as they will not be modified.
|
# There is no need to deep copy table vertices as they will not be modified.
|
||||||
let newFilter = LayerDeltaRef(
|
let newFilter = LayerDeltaRef(
|
||||||
src: lower.src,
|
|
||||||
sTab: lower.sTab,
|
sTab: lower.sTab,
|
||||||
kMap: lower.kMap,
|
kMap: lower.kMap,
|
||||||
vTop: upper.vTop)
|
vTop: upper.vTop)
|
||||||
@ -95,11 +74,6 @@ proc deltaMerge*(
|
|||||||
else:
|
else:
|
||||||
return err((vid,rc.error))
|
return err((vid,rc.error))
|
||||||
|
|
||||||
# # Check consistency
|
|
||||||
# if (newFilter.src == newFilter.kMap.getOrVoid(VertexID 1)) !=
|
|
||||||
# (newFilter.sTab.len == 0 and newFilter.kMap.len == 0):
|
|
||||||
# return err((VertexID(0),FilSrcTrgInconsistent))
|
|
||||||
|
|
||||||
ok newFilter
|
ok newFilter
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -29,7 +29,7 @@ proc revFilter*(
|
|||||||
## backend (excluding optionally installed read-only filter.)
|
## backend (excluding optionally installed read-only filter.)
|
||||||
##
|
##
|
||||||
# Register MPT state roots for reverting back
|
# Register MPT state roots for reverting back
|
||||||
let rev = LayerDeltaRef(src: filter.kMap.getOrVoid(VertexID 1))
|
let rev = LayerDeltaRef()
|
||||||
|
|
||||||
# Get vid generator state on backend
|
# Get vid generator state on backend
|
||||||
block:
|
block:
|
||||||
|
@ -23,8 +23,6 @@ type
|
|||||||
BlobifyLeafPathOverflow
|
BlobifyLeafPathOverflow
|
||||||
BlobifyNilFilter
|
BlobifyNilFilter
|
||||||
BlobifyNilVertex
|
BlobifyNilVertex
|
||||||
BlobifyStateSrcLenGarbled
|
|
||||||
BlobifyStateTrgLenGarbled
|
|
||||||
|
|
||||||
|
|
||||||
# Cache checker `checkCache()`
|
# Cache checker `checkCache()`
|
||||||
@ -157,12 +155,6 @@ type
|
|||||||
GetVtxNotFound
|
GetVtxNotFound
|
||||||
|
|
||||||
|
|
||||||
# Update `Merkle` hashes `hashify()`
|
|
||||||
HashifyVtxUnresolved
|
|
||||||
HashifyRootVtxUnresolved
|
|
||||||
HashifyProofHashMismatch
|
|
||||||
|
|
||||||
|
|
||||||
# Path function `hikeUp()`
|
# Path function `hikeUp()`
|
||||||
HikeBranchMissingEdge
|
HikeBranchMissingEdge
|
||||||
HikeBranchTailEmpty
|
HikeBranchTailEmpty
|
||||||
|
@ -82,9 +82,8 @@ type
|
|||||||
|
|
||||||
SavedState* = object
|
SavedState* = object
|
||||||
## Last saved state
|
## Last saved state
|
||||||
src*: HashKey ## Previous state hash
|
key*: Hash256 ## Some state hash (if any)
|
||||||
trg*: HashKey ## Last state hash
|
serial*: uint64 ## Generic identifier from application
|
||||||
serial*: uint64 ## Generic identifier froom application
|
|
||||||
|
|
||||||
LayerDeltaRef* = ref object
|
LayerDeltaRef* = ref object
|
||||||
## Delta layers are stacked implying a tables hierarchy. Table entries on
|
## Delta layers are stacked implying a tables hierarchy. Table entries on
|
||||||
@ -109,7 +108,6 @@ type
|
|||||||
## tables. So a corresponding zero value or missing entry produces an
|
## tables. So a corresponding zero value or missing entry produces an
|
||||||
## inconsistent state that must be resolved.
|
## inconsistent state that must be resolved.
|
||||||
##
|
##
|
||||||
src*: HashKey ## Only needed when used as a filter
|
|
||||||
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
|
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
|
||||||
kMap*: Table[VertexID,HashKey] ## Merkle hash key mapping
|
kMap*: Table[VertexID,HashKey] ## Merkle hash key mapping
|
||||||
vTop*: VertexID ## Last used vertex ID
|
vTop*: VertexID ## Last used vertex ID
|
||||||
@ -124,7 +122,6 @@ type
|
|||||||
##
|
##
|
||||||
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
|
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
|
||||||
fRpp*: Table[HashKey,VertexID] ## Key lookup for `pPrf[]` (proof nodes)
|
fRpp*: Table[HashKey,VertexID] ## Key lookup for `pPrf[]` (proof nodes)
|
||||||
dirty*: HashSet[VertexID] ## Start nodes to re-hashiy from
|
|
||||||
|
|
||||||
LayerRef* = ref LayerObj
|
LayerRef* = ref LayerObj
|
||||||
LayerObj* = object
|
LayerObj* = object
|
||||||
@ -276,8 +273,7 @@ func dup*(final: LayerFinalRef): LayerFinalRef =
|
|||||||
## Duplicate final layer.
|
## Duplicate final layer.
|
||||||
LayerFinalRef(
|
LayerFinalRef(
|
||||||
pPrf: final.pPrf,
|
pPrf: final.pPrf,
|
||||||
fRpp: final.fRpp,
|
fRpp: final.fRpp)
|
||||||
dirty: final.dirty)
|
|
||||||
|
|
||||||
func dup*(wp: VidVtxPair): VidVtxPair =
|
func dup*(wp: VidVtxPair): VidVtxPair =
|
||||||
## Safe copy of `wp` argument
|
## Safe copy of `wp` argument
|
||||||
|
@ -17,7 +17,7 @@ import
|
|||||||
std/typetraits,
|
std/typetraits,
|
||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
"."/[aristo_desc, aristo_get, aristo_hashify, aristo_hike]
|
"."/[aristo_compute, aristo_desc, aristo_get, aristo_hike]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
|
@ -29,9 +29,6 @@ func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
|
|||||||
# Public getters: lazy value lookup for read only versions
|
# Public getters: lazy value lookup for read only versions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
func dirty*(db: AristoDbRef): lent HashSet[VertexID] =
|
|
||||||
db.top.final.dirty
|
|
||||||
|
|
||||||
func pPrf*(db: AristoDbRef): lent HashSet[VertexID] =
|
func pPrf*(db: AristoDbRef): lent HashSet[VertexID] =
|
||||||
db.top.final.pPrf
|
db.top.final.pPrf
|
||||||
|
|
||||||
@ -85,13 +82,10 @@ func layersGetKey*(db: AristoDbRef; vid: VertexID): Opt[HashKey] =
|
|||||||
## hash key if it is stored on the cache that way.
|
## hash key if it is stored on the cache that way.
|
||||||
##
|
##
|
||||||
db.top.delta.kMap.withValue(vid, item):
|
db.top.delta.kMap.withValue(vid, item):
|
||||||
# This is ok regardless of the `dirty` flag. If this vertex has become
|
|
||||||
# dirty, there is an empty `kMap[]` entry on this layer.
|
|
||||||
return Opt.some(item[])
|
return Opt.some(item[])
|
||||||
|
|
||||||
for w in db.rstack:
|
for w in db.rstack:
|
||||||
w.delta.kMap.withValue(vid, item):
|
w.delta.kMap.withValue(vid, item):
|
||||||
# Same reasoning as above regarding the `dirty` flag.
|
|
||||||
return ok(item[])
|
return ok(item[])
|
||||||
|
|
||||||
Opt.none(HashKey)
|
Opt.none(HashKey)
|
||||||
@ -126,7 +120,6 @@ func layersPutVtx*(
|
|||||||
) =
|
) =
|
||||||
## Store a (potentally empty) vertex on the top layer
|
## Store a (potentally empty) vertex on the top layer
|
||||||
db.top.delta.sTab[vid] = vtx
|
db.top.delta.sTab[vid] = vtx
|
||||||
# db.top.final.dirty.incl root
|
|
||||||
|
|
||||||
func layersResVtx*(
|
func layersResVtx*(
|
||||||
db: AristoDbRef;
|
db: AristoDbRef;
|
||||||
@ -146,7 +139,6 @@ func layersPutKey*(
|
|||||||
) =
|
) =
|
||||||
## Store a (potentally void) hash key on the top layer
|
## Store a (potentally void) hash key on the top layer
|
||||||
db.top.delta.kMap[vid] = key
|
db.top.delta.kMap[vid] = key
|
||||||
# db.top.final.dirty.incl root # Modified top cache layers => hashify
|
|
||||||
|
|
||||||
|
|
||||||
func layersResKey*(db: AristoDbRef; root: VertexID; vid: VertexID) =
|
func layersResKey*(db: AristoDbRef; root: VertexID; vid: VertexID) =
|
||||||
|
@ -126,7 +126,7 @@ proc mergeStorageData*(
|
|||||||
rc = db.mergePayloadImpl(useID, stoPath, pyl)
|
rc = db.mergePayloadImpl(useID, stoPath, pyl)
|
||||||
|
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
# Mark account path for update for `hashify()`
|
# Mark account path Merkle keys for update
|
||||||
db.updateAccountForHasher accHike
|
db.updateAccountForHasher accHike
|
||||||
|
|
||||||
if stoID.isValid:
|
if stoID.isValid:
|
||||||
@ -144,7 +144,7 @@ proc mergeStorageData*(
|
|||||||
assert stoID.isValid # debugging only
|
assert stoID.isValid # debugging only
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
# Error: mark account path for update for `hashify()`
|
# Error: mark account path Merkle keys for update
|
||||||
db.updateAccountForHasher accHike
|
db.updateAccountForHasher accHike
|
||||||
err(rc.error)
|
err(rc.error)
|
||||||
|
|
||||||
|
@ -16,8 +16,7 @@
|
|||||||
import
|
import
|
||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
"."/[aristo_constants, aristo_desc, aristo_hashify, aristo_init,
|
"."/[aristo_compute, aristo_constants, aristo_desc, aristo_init, aristo_merge]
|
||||||
aristo_merge]
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, signature generator
|
# Public functions, signature generator
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
import
|
import
|
||||||
std/tables,
|
std/tables,
|
||||||
results,
|
results,
|
||||||
".."/[aristo_desc, aristo_layers, aristo_hashify]
|
".."/[aristo_desc, aristo_layers]
|
||||||
|
|
||||||
func txFrameIsTop*(tx: AristoTxRef): bool
|
func txFrameIsTop*(tx: AristoTxRef): bool
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ import
|
|||||||
std/tables,
|
std/tables,
|
||||||
results,
|
results,
|
||||||
../aristo_delta/delta_merge,
|
../aristo_delta/delta_merge,
|
||||||
".."/[aristo_desc, aristo_get, aristo_delta, aristo_layers, aristo_hashify]
|
".."/[aristo_desc, aristo_get, aristo_delta, aristo_layers]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
@ -70,9 +70,6 @@ proc topMerge(db: AristoDbRef; src: HashKey): Result[void,AristoError] =
|
|||||||
else:
|
else:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
|
|
||||||
# Update layer for merge call
|
|
||||||
db.top.delta.src = src
|
|
||||||
|
|
||||||
# This one will return the `db.top.delta` if `db.balancer.isNil`
|
# This one will return the `db.top.delta` if `db.balancer.isNil`
|
||||||
db.balancer = db.deltaMerge(db.top.delta, db.balancer, ubeRoot).valueOr:
|
db.balancer = db.deltaMerge(db.top.delta, db.balancer, ubeRoot).valueOr:
|
||||||
return err(error[1])
|
return err(error[1])
|
||||||
|
@ -23,58 +23,6 @@ import
|
|||||||
# Public functions, converters
|
# Public functions, converters
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc toAccount*(
|
|
||||||
payload: PayloadRef;
|
|
||||||
db: AristoDbRef;
|
|
||||||
): Result[Account,AristoError] =
|
|
||||||
## Converts the argument `payload` to an `Account` type. If the implied
|
|
||||||
## account das a storage slots system associated, the database `db` must
|
|
||||||
## contain the Merkle hash key of the root vertex.
|
|
||||||
if payload.pType == AccountData:
|
|
||||||
var acc = Account(
|
|
||||||
nonce: payload.account.nonce,
|
|
||||||
balance: payload.account.balance,
|
|
||||||
codeHash: payload.account.codeHash,
|
|
||||||
storageRoot: EMPTY_ROOT_HASH)
|
|
||||||
if payload.stoID.isValid:
|
|
||||||
acc.storageRoot = (? db.getKeyRc payload.stoID).to(Hash256)
|
|
||||||
return ok(acc)
|
|
||||||
|
|
||||||
err UtilsPayloadTypeUnsupported
|
|
||||||
|
|
||||||
proc toAccount*(
|
|
||||||
vtx: VertexRef;
|
|
||||||
db: AristoDbRef;
|
|
||||||
): Result[Account,AristoError] =
|
|
||||||
## Variant of `toAccount()` for a `Leaf` vertex.
|
|
||||||
if vtx.isValid and vtx.vType == Leaf:
|
|
||||||
return vtx.lData.toAccount db
|
|
||||||
err UtilsAccVtxUnsupported
|
|
||||||
|
|
||||||
proc toAccount*(
|
|
||||||
node: NodeRef;
|
|
||||||
): Result[Account,AristoError] =
|
|
||||||
## Variant of `toAccount()` for a `Leaf` node which must be complete (i.e.
|
|
||||||
## a potential Merkle hash key must have been initialised.)
|
|
||||||
if node.isValid and node.vType == Leaf:
|
|
||||||
if node.lData.pType == AccountData:
|
|
||||||
var acc = Account(
|
|
||||||
nonce: node.lData.account.nonce,
|
|
||||||
balance: node.lData.account.balance,
|
|
||||||
codeHash: node.lData.account.codeHash,
|
|
||||||
storageRoot: EMPTY_ROOT_HASH)
|
|
||||||
if node.lData.stoID.isValid:
|
|
||||||
if not node.key[0].isValid:
|
|
||||||
return err(UtilsAccStorageKeyMissing)
|
|
||||||
acc.storageRoot = node.key[0].to(Hash256)
|
|
||||||
return ok(acc)
|
|
||||||
else:
|
|
||||||
return err(UtilsPayloadTypeUnsupported)
|
|
||||||
|
|
||||||
err UtilsAccNodeUnsupported
|
|
||||||
|
|
||||||
# ---------------------
|
|
||||||
|
|
||||||
proc toNode*(
|
proc toNode*(
|
||||||
vtx: VertexRef; # Vertex to convert
|
vtx: VertexRef; # Vertex to convert
|
||||||
db: AristoDbRef; # Database, top layer
|
db: AristoDbRef; # Database, top layer
|
||||||
@ -176,18 +124,13 @@ proc updateAccountForHasher*(
|
|||||||
db: AristoDbRef; # Database
|
db: AristoDbRef; # Database
|
||||||
hike: Hike; # Return value from `retrieveStorageID()`
|
hike: Hike; # Return value from `retrieveStorageID()`
|
||||||
) =
|
) =
|
||||||
## For a successful run of `retrieveStoAccHike()`, the argument `hike` is
|
## The argument `hike` is used to mark/reset the keys along the implied
|
||||||
## used to mark/reset the keys along the `accPath` for being re-calculated
|
## vertex path for being re-calculated.
|
||||||
## by `hashify()`.
|
|
||||||
##
|
##
|
||||||
# Clear Merkle keys so that `hasify()` can calculate the re-hash forest/tree
|
# Clear Merkle keys so that `hasify()` can calculate the re-hash forest/tree
|
||||||
for w in hike.legs.mapIt(it.wp.vid):
|
for w in hike.legs.mapIt(it.wp.vid):
|
||||||
db.layersResKey(hike.root, w)
|
db.layersResKey(hike.root, w)
|
||||||
|
|
||||||
# Signal to `hashify()` where to start rebuilding Merkel hashes
|
|
||||||
# db.top.final.dirty.incl hike.root
|
|
||||||
# db.top.final.dirty.incl hike.legs[^1].wp.vid
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -243,12 +243,11 @@ func toAristo*(mBe: CoreDbAccBackendRef): AristoDbRef =
|
|||||||
|
|
||||||
proc toAristoSavedStateBlockNumber*(
|
proc toAristoSavedStateBlockNumber*(
|
||||||
mBe: CoreDbMptBackendRef;
|
mBe: CoreDbMptBackendRef;
|
||||||
): tuple[stateRoot: Hash256, blockNumber: BlockNumber] =
|
): BlockNumber =
|
||||||
if not mBe.isNil and mBe.parent.isAristo:
|
if not mBe.isNil and mBe.parent.isAristo:
|
||||||
let rc = mBe.parent.AristoCoreDbRef.adbBase.getSavedState()
|
let rc = mBe.parent.AristoCoreDbRef.adbBase.getSavedState()
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
return (rc.value.src.to(Hash256), rc.value.serial.BlockNumber)
|
return rc.value.serial.BlockNumber
|
||||||
(EMPTY_ROOT_HASH, 0.BlockNumber)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public aristo iterators
|
# Public aristo iterators
|
||||||
|
@ -334,20 +334,24 @@ proc getSavedStateBlockNumber*(
|
|||||||
##
|
##
|
||||||
## This function verifies the state consistency of the database and throws
|
## This function verifies the state consistency of the database and throws
|
||||||
## an assert exception if that fails. So the function will only apply to a
|
## an assert exception if that fails. So the function will only apply to a
|
||||||
## finalised (aka hashified) database state. For an an opportunistic use,
|
## saved database state. For an an opportunistic use, the `relax` argument
|
||||||
## the `relax` argument can be set `true` so this function also returns
|
## can be set `true` so this function also returns the block number if the
|
||||||
## zero if the state consistency check fails.
|
## state consistency check fails.
|
||||||
##
|
##
|
||||||
const info = "getSavedStateBlockNumber(): "
|
const info = "getSavedStateBlockNumber(): "
|
||||||
|
# FIXME: This construct following will be replaced by a proper
|
||||||
|
# `CoreDb` method.
|
||||||
|
let bn = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber()
|
||||||
|
if relax:
|
||||||
|
return bn
|
||||||
|
else:
|
||||||
var header: BlockHeader
|
var header: BlockHeader
|
||||||
let st = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber()
|
if db.getBlockHeader(bn, header):
|
||||||
if db.getBlockHeader(st.blockNumber, header):
|
let state = db.ctx.getAccounts.state(updateOk=true).valueOr:
|
||||||
let state = db.ctx.getAccounts.state.valueOr:
|
|
||||||
raiseAssert info & $$error
|
raiseAssert info & $$error
|
||||||
if state == header.stateRoot:
|
if state != header.stateRoot:
|
||||||
return st.blockNumber
|
raiseAssert info & ": state mismatch at " & "#" & $result
|
||||||
if not relax:
|
return bn
|
||||||
raiseAssert info & ": state mismatch at " & "#" & $st.blockNumber
|
|
||||||
|
|
||||||
proc getBlockHeader*(
|
proc getBlockHeader*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
##
|
##
|
||||||
|
|
||||||
import
|
import
|
||||||
std/sets,
|
|
||||||
eth/common,
|
eth/common,
|
||||||
results,
|
results,
|
||||||
unittest2,
|
unittest2,
|
||||||
@ -23,7 +22,6 @@ import
|
|||||||
aristo_debug,
|
aristo_debug,
|
||||||
aristo_desc,
|
aristo_desc,
|
||||||
aristo_get,
|
aristo_get,
|
||||||
aristo_layers,
|
|
||||||
aristo_persistent,
|
aristo_persistent,
|
||||||
aristo_tx],
|
aristo_tx],
|
||||||
../replay/xcheck,
|
../replay/xcheck,
|
||||||
@ -170,8 +168,7 @@ proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool =
|
|||||||
if b.isNil:
|
if b.isNil:
|
||||||
return false
|
return false
|
||||||
if unsafeAddr(a[]) != unsafeAddr(b[]):
|
if unsafeAddr(a[]) != unsafeAddr(b[]):
|
||||||
if a.src != b.src or
|
if a.kMap.getOrVoid(testRootVid) != b.kMap.getOrVoid(testRootVid) or
|
||||||
a.kMap.getOrVoid(testRootVid) != b.kMap.getOrVoid(testRootVid) or
|
|
||||||
a.vTop != b.vTop:
|
a.vTop != b.vTop:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
@ -155,9 +155,6 @@ proc saveToBackend(
|
|||||||
let rc = tx.commit()
|
let rc = tx.commit()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
|
||||||
# Make sure MPT hashes are OK
|
|
||||||
xCheck db.dirty.len == 0
|
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let rc = db.txTop()
|
let rc = db.txTop()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
@ -175,9 +172,6 @@ proc saveToBackend(
|
|||||||
let rc = tx.commit()
|
let rc = tx.commit()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
|
||||||
# Make sure MPT hashes are OK
|
|
||||||
xCheck db.dirty.len == 0
|
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let rc = db.txTop()
|
let rc = db.txTop()
|
||||||
xCheckErr rc.value.level < 0 # force error
|
xCheckErr rc.value.level < 0 # force error
|
||||||
@ -213,9 +207,6 @@ proc saveToBackendWithOops(
|
|||||||
let rc = tx.commit()
|
let rc = tx.commit()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
|
||||||
# Make sure MPT hashes are OK
|
|
||||||
xCheck db.dirty.len == 0
|
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let rc = db.txTop()
|
let rc = db.txTop()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
@ -229,9 +220,6 @@ proc saveToBackendWithOops(
|
|||||||
let rc = tx.commit()
|
let rc = tx.commit()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
|
||||||
# Make sure MPT hashes are OK
|
|
||||||
xCheck db.dirty.len == 0
|
|
||||||
|
|
||||||
block:
|
block:
|
||||||
let rc = db.txTop()
|
let rc = db.txTop()
|
||||||
xCheckErr rc.value.level < 0 # force error
|
xCheckErr rc.value.level < 0 # force error
|
||||||
|
@ -87,7 +87,7 @@ let
|
|||||||
mainTest0m* = mainSample
|
mainTest0m* = mainSample
|
||||||
.cloneWith(
|
.cloneWith(
|
||||||
name = "-am-some",
|
name = "-am-some",
|
||||||
numBlocks = 5) # 1_000)
|
numBlocks = 1_000)
|
||||||
|
|
||||||
mainTest1m* = mainSample
|
mainTest1m* = mainSample
|
||||||
.cloneWith(
|
.cloneWith(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user