mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-24 09:48:24 +00:00
Core db and aristo maintenance update (#2014)
* Aristo: Update error return code why: Failing of `Aristo` function `delete()` might fail because there is no such data item on the db. This must return a single error code as is done with `fetch()`. * Ledger: Better error handling why: The `expect()` clauses have been replaced by raising asserts indicating the error from the database backend. Also, `delete()` failures are legitimate if the item to delete does not exist. * Aristo: Delete function must always leave a label on DB for `hashify()` why: The `hashify()` uses the labels left bu `merge()` and `delete()` to compile (and optimise) a scheduler for subsequent hashing. Originally, the labels were not used for deleted entries and `delete()` still had some edge case where the deletion label was not properly handled. * Aristo: Update `hashify()` scheduler, remove buggy optimisation why: Was left over from version without virtual state roots which did not know about account payload leaf vertices referring to storage roots. * Aristo: Label storage trie account in `delete()` similar to `merge()` details; The `delete()` function applied to a non-static state root (assumed to be a storage root) will check the payload of an accounts leaf and mark its Merkle keys to be re-checked when runninh `hashify()` * Aristo: Clean up and re-org recycled vertex IDs in `hashify()` why: Re-organising the recycled vertex IDs list intends to reduce the size of the list. This list is organised as a LIFO (or stack.) By reorganising it in a way so that the least vertex ID numbers are on top, the list will be kept smaller as observed on some examples (less than 30%.) * CoreDb: Accept storage trie deletion requests in non-initialised state why: Due to lazy initialisation, the root vertex ID might not yet exist. So the `Aristo` database handlers would reject this call with an error and this condition needs to be handled by the API (which realises the lazy feature.) * Cosmetics & code massage, prettify logging * fix missing import
This commit is contained in:
parent
fa7f5de162
commit
2c35390bdf
@ -647,6 +647,15 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
|
||||
result[^1] = ']'
|
||||
result &= ")"
|
||||
|
||||
proc pp*[T](rc: Result[T,(VertexID,AristoError)]): string =
|
||||
if rc.isOk:
|
||||
result = "ok("
|
||||
when T isnot void:
|
||||
result &= ".."
|
||||
result &= ")"
|
||||
else:
|
||||
result = "err((" & rc.error[0].pp & "," & $rc.error[1] & "))"
|
||||
|
||||
proc pp*(nd: NodeRef): string =
|
||||
nd.pp(AristoDbRef(nil).orDefault)
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sets, tables],
|
||||
std/[sets, tables, typetraits],
|
||||
chronicles,
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
@ -44,11 +44,8 @@ func toVae(vid: VertexID): SaveToVaeVidFn =
|
||||
proc(err: AristoError): (VertexID,AristoError) =
|
||||
return (vid,err)
|
||||
|
||||
func toVae(err: (Hike,AristoError)): (VertexID,AristoError) =
|
||||
if 0 < err[0].legs.len:
|
||||
(err[0].legs[^1].wp.vid, err[1])
|
||||
else:
|
||||
(VertexID(0), err[1])
|
||||
func toVae(err: (VertexID,AristoError,Hike)): (VertexID,AristoError) =
|
||||
(err[0], err[1])
|
||||
|
||||
proc branchStillNeeded(vtx: VertexRef): Result[int,void] =
|
||||
## Returns the nibble if there is only one reference left.
|
||||
@ -226,7 +223,7 @@ proc collapseLeaf(
|
||||
db.layersPutVtx(par.vid, par.vtx)
|
||||
db.layersPutVtx(lf.vid, lf.vtx)
|
||||
# Make sure that there is a cache enty in case the leaf was pulled from
|
||||
# the backend.!
|
||||
# the backend.
|
||||
let
|
||||
lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx
|
||||
tag = lfPath.pathToTag.valueOr:
|
||||
@ -249,7 +246,7 @@ proc collapseLeaf(
|
||||
db.layersPutVtx(gpr.vid, gpr.vtx)
|
||||
db.layersPutVtx(lf.vid, lf.vtx)
|
||||
# Make sure that there is a cache enty in case the leaf was pulled from
|
||||
# the backend.!
|
||||
# the backend.
|
||||
let
|
||||
lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx
|
||||
tag = lfPath.pathToTag.valueOr:
|
||||
@ -335,9 +332,14 @@ proc deleteImpl(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Fully expanded path
|
||||
lty: LeafTie; # `Patricia Trie` path root-to-leaf
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Implementation of *delete* functionality.
|
||||
|
||||
if LEAST_FREE_VID <= lty.root.distinctBase:
|
||||
db.registerAccount(lty.root, accPath).isOkOr:
|
||||
return err((lty.root,error))
|
||||
|
||||
# Remove leaf entry on the top
|
||||
let lf = hike.legs[^1].wp
|
||||
if lf.vtx.vType != Leaf:
|
||||
@ -401,13 +403,9 @@ proc deleteImpl(
|
||||
of Leaf:
|
||||
? db.collapseLeaf(hike, nibble.byte, nxt.vtx)
|
||||
|
||||
# Delete leaf entry
|
||||
if leafVidBe.isValid:
|
||||
# To be recorded on change history
|
||||
db.top.final.lTab[lty] = VertexID(0)
|
||||
else:
|
||||
# No need to keep it any longer in cache
|
||||
db.top.final.lTab.del lty
|
||||
# Make sure that there is a cache entry so the hasher can label this path
|
||||
# at a later state.
|
||||
db.top.final.lTab[lty] = VertexID(0)
|
||||
|
||||
# Delete dependent leaf node storage tree if there is any
|
||||
let data = lf.vtx.lData
|
||||
@ -439,9 +437,17 @@ proc delete*(
|
||||
proc delete*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Fully expanded chain of vertices
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Delete argument `hike` chain of vertices from the database.
|
||||
##
|
||||
## For a `hike.root` with `VertexID` greater than `LEAST_FREE_VID`, the
|
||||
## sub-tree generated by `payload.root` is considered a storage trie linked
|
||||
## to an account leaf referred to by a valid `accPath` (i.e. different from
|
||||
## `VOID_PATH_ID`.) In that case, an account must exists. If there is payload
|
||||
## of type `AccountData`, its `storageID` field must be unset or equal to the
|
||||
## `hike.root` vertex ID.
|
||||
##
|
||||
## Note:
|
||||
## If the leaf node has an account payload referring to a storage sub-trie,
|
||||
## this one will be deleted as well.
|
||||
@ -450,24 +456,31 @@ proc delete*(
|
||||
let lty = LeafTie(
|
||||
root: hike.root,
|
||||
path: ? hike.to(NibblesSeq).pathToTag().mapErr toVae)
|
||||
db.deleteImpl(hike, lty)
|
||||
db.deleteImpl(hike, lty, accPath)
|
||||
|
||||
proc delete*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
lty: LeafTie; # `Patricia Trie` path root-to-leaf
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Variant of `delete()`
|
||||
##
|
||||
db.deleteImpl(? lty.hikeUp(db).mapErr toVae, lty)
|
||||
db.deleteImpl(? lty.hikeUp(db).mapErr toVae, lty, accPath)
|
||||
|
||||
proc delete*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Variant of `delete()`
|
||||
##
|
||||
db.delete(? path.initNibbleRange.hikeUp(root, db).mapErr toVae)
|
||||
let rc = path.initNibbleRange.hikeUp(root, db)
|
||||
if rc.isOk:
|
||||
return db.delete(rc.value, accPath)
|
||||
if rc.error[1] in HikeAcceptableStopsNotFound:
|
||||
return err((rc.error[0], DelPathNotFound))
|
||||
err((rc.error[0],rc.error[1]))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -95,10 +95,6 @@ type
|
||||
MergeNonBranchProofModeLock
|
||||
MergeRootBranchLinkBusy
|
||||
MergeRootMissing
|
||||
MergeAccPathMissing
|
||||
MergeAccUnaccessible
|
||||
MergeAccPathWithoutLeaf
|
||||
MergeAccWrongStorageRoot
|
||||
MergeAssemblyFailed # Ooops, internal error
|
||||
|
||||
MergeHashKeyInvalid
|
||||
@ -112,8 +108,14 @@ type
|
||||
MergeRootKeyDiffersForVid
|
||||
MergeNodeVtxDuplicates
|
||||
|
||||
# Utils
|
||||
UtilsAccPathMissing
|
||||
UtilsAccPathWithoutLeaf
|
||||
UtilsAccUnaccessible
|
||||
UtilsAccWrongStorageRoot
|
||||
UtilsStoRootMissing
|
||||
|
||||
# Update `Merkle` hashes `hashify()`
|
||||
HashifyEmptyHike
|
||||
HashifyExistingHashMismatch
|
||||
HashifyNodeUnresolved
|
||||
HashifyRootHashMismatch
|
||||
@ -195,6 +197,7 @@ type
|
||||
DelVidStaleVtx
|
||||
DelSubTreeTooBig
|
||||
DelSubTreeVoidRoot
|
||||
DelPathNotFound
|
||||
|
||||
# Functions from `aristo_filter.nim`
|
||||
FilBackendMissing
|
||||
|
@ -18,31 +18,17 @@ import
|
||||
results,
|
||||
"."/[aristo_desc, aristo_hike]
|
||||
|
||||
const
|
||||
AcceptableHikeStops = {
|
||||
HikeBranchTailEmpty,
|
||||
HikeBranchMissingEdge,
|
||||
HikeExtTailEmpty,
|
||||
HikeExtTailMismatch,
|
||||
HikeLeafUnexpected,
|
||||
HikeNoLegs}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fetchPayloadImpl(
|
||||
rc: Result[Hike,(Hike,AristoError)];
|
||||
rc: Result[Hike,(VertexID,AristoError,Hike)];
|
||||
): Result[PayloadRef,(VertexID,AristoError)] =
|
||||
if rc.isErr:
|
||||
let vid =
|
||||
if rc.error[0].legs.len == 0: VertexID(0)
|
||||
else: rc.error[0].legs[^1].wp.vid
|
||||
if rc.error[1] in AcceptableHikeStops:
|
||||
return err((vid, FetchPathNotFound))
|
||||
return err((vid, rc.error[1]))
|
||||
if rc.value.legs.len == 0:
|
||||
return err((VertexID(0), FetchPathNotFound))
|
||||
if rc.error[1] in HikeAcceptableStopsNotFound:
|
||||
return err((rc.error[0], FetchPathNotFound))
|
||||
return err((rc.error[0], rc.error[1]))
|
||||
ok rc.value.legs[^1].wp.vtx.lData
|
||||
|
||||
proc fetchPayloadImpl(
|
||||
|
@ -61,7 +61,7 @@ import
|
||||
results,
|
||||
stew/byteutils,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_serialise,
|
||||
aristo_utils]
|
||||
aristo_utils, aristo_vid]
|
||||
|
||||
type
|
||||
FollowUpVid = object
|
||||
@ -222,12 +222,6 @@ proc updateSchedule(
|
||||
break findlegInx
|
||||
vid = leaf.vid
|
||||
|
||||
if not db.layersGetKeyOrVoid(vid).isValid:
|
||||
db.layersPutLabel(vid, HashLabel(root: root, key: node.digestTo(HashKey)))
|
||||
# Clean up unnecessay leaf node from previous session
|
||||
wff.base.del vid
|
||||
wff.setNextLink(wff.pool, wff.base.getOrVoid vid)
|
||||
|
||||
# If possible, compute a node from the current vertex with all links
|
||||
# resolved on the cache layer. If this is not possible, stop here and
|
||||
# return the list of vertex IDs that could not be resolved (see option
|
||||
@ -324,9 +318,8 @@ proc hashify*(
|
||||
# FIXME: Is there a case for adding unresolved child-to-root links
|
||||
# to the `wff` schedule?
|
||||
continue
|
||||
if rc.isErr:
|
||||
return err((lfVid,rc.error[1]))
|
||||
return err((hike.root,HashifyEmptyHike))
|
||||
doAssert rc.isErr # see implementation of `hikeUp()`
|
||||
return err((lfVid,rc.error[1]))
|
||||
|
||||
# Compile width-first forest search schedule
|
||||
wff.updateSchedule(db, hike)
|
||||
@ -419,8 +412,10 @@ proc hashify*(
|
||||
db.layersPutLabel(vid, HashLabel(root: vid, key: node.digestTo(HashKey)))
|
||||
wff.completed.incl vid
|
||||
|
||||
db.top.final.dirty = false
|
||||
db.top.final.lTab.clear
|
||||
db.top.final.dirty = false # Mark top layer clean
|
||||
db.top.final.lTab.clear # Done with leafs
|
||||
db.top.final.vGen = db.vGen.vidReorg() # Squeze list of recycled vertex IDs
|
||||
|
||||
ok wff.completed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -27,6 +27,18 @@ type
|
||||
legs*: seq[Leg] ## Chain of vertices and IDs
|
||||
tail*: NibblesSeq ## Portion of non completed path
|
||||
|
||||
const
|
||||
HikeAcceptableStopsNotFound* = {
|
||||
HikeBranchTailEmpty,
|
||||
HikeBranchMissingEdge,
|
||||
HikeExtTailEmpty,
|
||||
HikeExtTailMismatch,
|
||||
HikeLeafUnexpected,
|
||||
HikeNoLegs}
|
||||
## When trying to find a leaf vertex the Patricia tree, there are several
|
||||
## conditions where the search stops which do not constitute a problem
|
||||
## with the trie (aka sysetm error.)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -47,9 +59,9 @@ func getNibblesImpl(hike: Hike; start = 0; maxLen = high(int)): NibblesSeq =
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func to*(rc: Result[Hike,(Hike,AristoError)]; T: type Hike): T =
|
||||
func to*(rc: Result[Hike,(VertexID,AristoError,Hike)]; T: type Hike): T =
|
||||
## Extract `Hike` from either ok ot error part of argument `rc`.
|
||||
if rc.isOk: rc.value else: rc.error[0]
|
||||
if rc.isOk: rc.value else: rc.error[2]
|
||||
|
||||
func to*(hike: Hike; T: type NibblesSeq): T =
|
||||
## Convert back
|
||||
@ -69,7 +81,7 @@ proc hikeUp*(
|
||||
path: NibblesSeq; # Partial path
|
||||
root: VertexID; # Start vertex
|
||||
db: AristoDbRef; # Database
|
||||
): Result[Hike,(Hike,AristoError)] =
|
||||
): Result[Hike,(VertexID,AristoError,Hike)] =
|
||||
## For the argument `path`, find and return the logest possible path in the
|
||||
## argument database `db`.
|
||||
var hike = Hike(
|
||||
@ -77,9 +89,9 @@ proc hikeUp*(
|
||||
tail: path)
|
||||
|
||||
if not root.isValid:
|
||||
return err((hike,HikeRootMissing))
|
||||
return err((VertexID(0),HikeRootMissing,hike))
|
||||
if path.len == 0:
|
||||
return err((hike,HikeEmptyPath))
|
||||
return err((VertexID(0),HikeEmptyPath,hike))
|
||||
|
||||
var vid = root
|
||||
while vid.isValid:
|
||||
@ -88,9 +100,9 @@ proc hikeUp*(
|
||||
# Fetch next vertex
|
||||
leg.wp.vtx = db.getVtxRc(vid).valueOr:
|
||||
if error != GetVtxNotFound:
|
||||
return err((hike,error))
|
||||
return err((vid,error,hike))
|
||||
if hike.legs.len == 0:
|
||||
return err((hike,HikeNoLegs))
|
||||
return err((vid,HikeNoLegs,hike))
|
||||
break
|
||||
|
||||
case leg.wp.vtx.vType:
|
||||
@ -101,19 +113,19 @@ proc hikeUp*(
|
||||
hike.tail = EmptyNibbleSeq
|
||||
break
|
||||
|
||||
return err((hike,HikeLeafUnexpected))
|
||||
return err((vid,HikeLeafUnexpected,hike))
|
||||
|
||||
of Branch:
|
||||
if hike.tail.len == 0:
|
||||
hike.legs.add leg
|
||||
return err((hike,HikeBranchTailEmpty))
|
||||
return err((vid,HikeBranchTailEmpty,hike))
|
||||
|
||||
let
|
||||
nibble = hike.tail[0].int8
|
||||
nextVid = leg.wp.vtx.bVid[nibble]
|
||||
|
||||
if not nextVid.isValid:
|
||||
return err((hike,HikeBranchMissingEdge))
|
||||
return err((vid,HikeBranchMissingEdge,hike))
|
||||
|
||||
leg.nibble = nibble
|
||||
hike.legs.add leg
|
||||
@ -124,10 +136,10 @@ proc hikeUp*(
|
||||
if hike.tail.len == 0:
|
||||
hike.legs.add leg
|
||||
hike.tail = EmptyNibbleSeq
|
||||
return err((hike,HikeExtTailEmpty)) # Well, somehow odd
|
||||
return err((vid,HikeExtTailEmpty,hike)) # Well, somehow odd
|
||||
|
||||
if leg.wp.vtx.ePfx.len != hike.tail.sharedPrefixLen(leg.wp.vtx.ePfx):
|
||||
return err((hike,HikeExtTailMismatch)) # Need to branch from here
|
||||
return err((vid,HikeExtTailMismatch,hike)) # Need to branch from here
|
||||
|
||||
hike.legs.add leg
|
||||
hike.tail = hike.tail.slice(leg.wp.vtx.ePfx.len)
|
||||
@ -135,7 +147,10 @@ proc hikeUp*(
|
||||
|
||||
ok hike
|
||||
|
||||
proc hikeUp*(lty: LeafTie; db: AristoDbRef): Result[Hike,(Hike,AristoError)] =
|
||||
proc hikeUp*(
|
||||
lty: LeafTie;
|
||||
db: AristoDbRef;
|
||||
): Result[Hike,(VertexID,AristoError,Hike)] =
|
||||
## Variant of `hike()`
|
||||
lty.path.to(NibblesSeq).hikeUp(lty.root, db)
|
||||
|
||||
|
@ -32,7 +32,7 @@ import
|
||||
stew/keyed_queue,
|
||||
../../sync/protocol/snap/snap_types,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
|
||||
aristo_serialise, aristo_vid]
|
||||
aristo_serialise, aristo_utils, aristo_vid]
|
||||
|
||||
logScope:
|
||||
topics = "aristo-merge"
|
||||
@ -468,47 +468,6 @@ proc updatePayload(
|
||||
else:
|
||||
err(MergeLeafPathCachedAlready)
|
||||
|
||||
|
||||
proc registerStorageRootAccount(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
stoRoot: VertexID; # Storage root ID
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,AristoError] =
|
||||
## Verify that the `stoRoot` argument is properly referred to by the
|
||||
## account data (if any) implied to by the `accPath` argument.
|
||||
##
|
||||
# Verify storage root and account path
|
||||
if not stoRoot.isValid:
|
||||
return err(MergeRootMissing)
|
||||
if not accPath.isValid:
|
||||
return err(MergeAccPathMissing)
|
||||
|
||||
# Check whether the account is marked for re-hash, already
|
||||
let lty = LeafTie(root: VertexID(1), path: accPath)
|
||||
if db.lTab.hasKey lty:
|
||||
return ok()
|
||||
|
||||
# Get account leaf with account data
|
||||
let hike = lty.hikeUp(db).valueOr:
|
||||
return err(MergeAccUnaccessible)
|
||||
let wp = hike.legs[^1].wp
|
||||
if wp.vtx.vType != Leaf:
|
||||
return err(MergeAccPathWithoutLeaf)
|
||||
if wp.vtx.lData.pType != AccountData:
|
||||
return ok() # nothing to do
|
||||
|
||||
# Need to flag for re-hash
|
||||
let stoID = wp.vtx.lData.account.storageID
|
||||
if stoID.isValid and stoID != stoRoot:
|
||||
return err(MergeAccWrongStorageRoot)
|
||||
|
||||
# Clear Merkle keys and store leaf record
|
||||
for w in hike.legs.mapIt(it.wp.vid):
|
||||
db.nullifyKey w
|
||||
db.top.final.lTab[lty] = wp.vid
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: add Merkle proof node
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -646,11 +605,11 @@ proc merge*(
|
||||
## is stored with the leaf vertex in the database unless the leaf vertex
|
||||
## exists already.
|
||||
##
|
||||
## For `payload.root` vertex IDs with number at least `LEAST_FREE_VID`, the
|
||||
## sub-tree generated by `payload.root` is considered a storage root linked
|
||||
## For a `payload.root` with `VertexID` greater than `LEAST_FREE_VID`, the
|
||||
## sub-tree generated by `payload.root` is considered a storage trie linked
|
||||
## to an account leaf referred to by a valid `accPath` (i.e. different from
|
||||
## `VOID_PATH_ID`.) In that case, an account must exists. If there is payload
|
||||
## of type `accountData`, its `storageID` must be unset or equal to the
|
||||
## of type `AccountData`, its `storageID` field must be unset or equal to the
|
||||
## `payload.root` vertex ID.
|
||||
##
|
||||
# Check whether the leaf is on the database and payloads match
|
||||
@ -662,7 +621,7 @@ proc merge*(
|
||||
return err(MergeLeafPathCachedAlready)
|
||||
|
||||
if LEAST_FREE_VID <= leafTie.root.distinctBase:
|
||||
? db.registerStorageRootAccount(leafTie.root, accPath)
|
||||
? db.registerAccount(leafTie.root, accPath)
|
||||
elif not leafTie.root.isValid:
|
||||
return err(MergeRootMissing)
|
||||
|
||||
|
@ -14,9 +14,10 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_layers]
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, converters
|
||||
@ -177,6 +178,52 @@ proc subVids*(vtx: VertexRef): seq[VertexID] =
|
||||
of Extension:
|
||||
result.add vtx.eVid
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc registerAccount*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
stoRoot: VertexID; # Storage root ID
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,AristoError] =
|
||||
## Verify that the `stoRoot` argument is properly referred to by the
|
||||
## account data (if any) implied to by the `accPath` argument.
|
||||
##
|
||||
# Verify storage root and account path
|
||||
if not stoRoot.isValid:
|
||||
return err(UtilsStoRootMissing)
|
||||
if not accPath.isValid:
|
||||
return err(UtilsAccPathMissing)
|
||||
|
||||
# Check whether the account is marked for re-hash, already
|
||||
let lty = LeafTie(root: VertexID(1), path: accPath)
|
||||
if db.lTab.hasKey lty:
|
||||
return ok()
|
||||
|
||||
# Get account leaf with account data
|
||||
let rc = lty.hikeUp(db)
|
||||
let hike = block:
|
||||
if rc.isErr:
|
||||
return err(UtilsAccUnaccessible)
|
||||
rc.value
|
||||
|
||||
let wp = hike.legs[^1].wp
|
||||
if wp.vtx.vType != Leaf:
|
||||
return err(UtilsAccPathWithoutLeaf)
|
||||
if wp.vtx.lData.pType != AccountData:
|
||||
return ok() # nothing to do
|
||||
|
||||
# Need to flag for re-hash
|
||||
let stoID = wp.vtx.lData.account.storageID
|
||||
if stoID.isValid and stoID != stoRoot:
|
||||
return err(UtilsAccWrongStorageRoot)
|
||||
|
||||
# Clear Merkle keys and store leaf record
|
||||
for w in hike.legs.mapIt(it.wp.vid):
|
||||
db.layersResLabel w
|
||||
db.top.final.lTab[lty] = wp.vid
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -72,26 +72,26 @@ proc vidDispose*(db: AristoDbRef; vid: VertexID) =
|
||||
|
||||
proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
|
||||
## Return a compacted version of the argument vertex ID generator state
|
||||
## `vGen`. The function removes redundant items from the recycle queue.
|
||||
## `vGen`. The function removes redundant items from the recycle queue and
|
||||
## orders it in a way so that smaller `VertexID` numbers are re-used first.
|
||||
##
|
||||
if 1 < vGen.len:
|
||||
let lst = vGen.mapIt(uint64(it)).sorted.mapIt(VertexID(it))
|
||||
for n in (lst.len-1).countDown(1):
|
||||
if lst[n-1].uint64 + 1 != lst[n].uint64:
|
||||
# All elements larger than `lst[n-1]` are in increasing order. For
|
||||
# the last continuously increasing sequence, only the smallest item
|
||||
# is needed and the rest can be removed
|
||||
let lst = vGen.mapIt(uint64(it)).sorted(Descending).mapIt(VertexID(it))
|
||||
for n in 0 .. lst.len-2:
|
||||
if lst[n].uint64 != lst[n+1].uint64 + 1:
|
||||
# All elements of the sequence `lst[0]`..`lst[n]` are in decreasing
|
||||
# order with distance 1. Only the smallest item is needed and the
|
||||
# rest can be removed (as long as distance is 1.)
|
||||
#
|
||||
# Example:
|
||||
# ..3, 5, 6, 7 => ..3, 5
|
||||
# ^
|
||||
# |
|
||||
# n
|
||||
# 7, 6, 5, 3.. => 5, 3.. => @[3..] & @[5]
|
||||
# ^
|
||||
# |
|
||||
# n
|
||||
#
|
||||
if n < lst.len-1:
|
||||
return lst[0..n]
|
||||
return vGen
|
||||
# All entries are continuously increasing
|
||||
return @[lst[0]]
|
||||
return lst[n+1 .. lst.len-1] & @[lst[n]]
|
||||
# Entries decrease continuously
|
||||
return @[lst[^1]]
|
||||
|
||||
vGen
|
||||
|
||||
|
@ -25,7 +25,7 @@ type
|
||||
ctx*: string ## Context where the exception or error occured
|
||||
case isAristo*: bool
|
||||
of true:
|
||||
root*: VertexID
|
||||
vid*: VertexID
|
||||
aErr*: AristoError
|
||||
else:
|
||||
kErr*: KvtError
|
||||
@ -51,8 +51,8 @@ func errorPrint*(e: CoreDbErrorRef): string =
|
||||
result = if e.isAristo: "Aristo" else: "Kvt"
|
||||
result &= ", ctx=" & $e.ctx & ", "
|
||||
if e.isAristo:
|
||||
if e.root.isValid:
|
||||
result &= "root=" & e.root.toStr & ", "
|
||||
if e.vid.isValid:
|
||||
result &= "vid=" & e.vid.toStr & ", "
|
||||
result &= "error=" & $e.aErr
|
||||
else:
|
||||
result &= "error=" & $e.kErr
|
||||
|
@ -436,8 +436,16 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
||||
let
|
||||
db = cMpt.base.parent
|
||||
mpt = cMpt.mpt
|
||||
rc = mpt.delete(cMpt.root, k)
|
||||
|
||||
if not cMpt.root.isValid and cMpt.accPath.isValid:
|
||||
# This is insane but legit. A storage trie was announced for an account
|
||||
# but no data have been added, yet.
|
||||
return ok()
|
||||
|
||||
let rc = mpt.delete(cMpt.root, k, cMpt.accPath)
|
||||
if rc.isErr:
|
||||
if rc.error[1] == DelPathNotFound:
|
||||
return err(rc.error.toError(db, info, MptNotFound))
|
||||
return err(rc.error.toError(db, info))
|
||||
ok()
|
||||
|
||||
@ -560,8 +568,10 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
||||
db = cAcc.base.parent
|
||||
mpt = cAcc.mpt
|
||||
key = address.keccakHash.data
|
||||
rc = mpt.delete(cAcc.root, key)
|
||||
rc = mpt.delete(cAcc.root, key, VOID_PATH_ID)
|
||||
if rc.isErr:
|
||||
if rc.error[1] == DelPathNotFound:
|
||||
return err(rc.error.toError(db, info, AccNotFound))
|
||||
return err(rc.error.toError(db, info))
|
||||
ok()
|
||||
|
||||
@ -623,7 +633,7 @@ func toError*(
|
||||
db.bless(error, AristoCoreDbError(
|
||||
ctx: info,
|
||||
isAristo: true,
|
||||
root: e[0],
|
||||
vid: e[0],
|
||||
aErr: e[1]))
|
||||
|
||||
func toVoidRc*[T](
|
||||
@ -795,7 +805,7 @@ proc getTrie*(
|
||||
base.gc() # update pending changes
|
||||
|
||||
if kind == StorageTrie and not path.isValid:
|
||||
return err(aristo.MergeAccPathMissing.toError(db, info, AccAddrMissing))
|
||||
return err(aristo.UtilsAccPathMissing.toError(db, info, AccAddrMissing))
|
||||
|
||||
if not root.isValid:
|
||||
var trie = AristoCoreDbTrie(
|
||||
|
@ -71,7 +71,11 @@ proc db*(t: SomeLedger): CoreDbRef =
|
||||
t.distinctBase.parent
|
||||
|
||||
proc rootHash*(t: SomeLedger): Hash256 =
|
||||
t.distinctBase.getTrie().rootHash().expect "SomeLedger/rootHash()"
|
||||
const info = "SomeLedger/rootHash(): "
|
||||
let rc = t.distinctBase.getTrie().rootHash()
|
||||
if rc.isErr:
|
||||
raiseAssert info & $$rc.error
|
||||
rc.value
|
||||
|
||||
proc getTrie*(t: SomeLedger): CoreDbTrieRef =
|
||||
t.distinctBase.getTrie()
|
||||
@ -97,14 +101,23 @@ proc init*(
|
||||
|
||||
proc fetch*(al: AccountLedger; eAddr: EthAddress): Result[CoreDbAccount,void] =
|
||||
## Using `fetch()` for trie data retrieval
|
||||
al.distinctBase.fetch(eAddr).mapErr(proc(ign: CoreDbErrorRef) = discard)
|
||||
let rc = al.distinctBase.fetch(eAddr)
|
||||
if rc.isErr:
|
||||
return err()
|
||||
ok rc.value
|
||||
|
||||
proc merge*(al: AccountLedger; account: CoreDbAccount) =
|
||||
## Using `merge()` for trie data storage
|
||||
al.distinctBase.merge(account).expect "AccountLedger/merge()"
|
||||
const info = "AccountLedger/merge(): "
|
||||
al.distinctBase.merge(account).isOkOr:
|
||||
raiseAssert info & $$error
|
||||
|
||||
proc delete*(al: AccountLedger, eAddr: EthAddress) =
|
||||
al.distinctBase.delete(eAddr).expect "AccountLedger/delete()"
|
||||
const info = "AccountLedger/delete()"
|
||||
al.distinctBase.delete(eAddr).isOkOr:
|
||||
if error.error == MptNotFound:
|
||||
return
|
||||
raiseAssert info & $$error
|
||||
|
||||
proc persistent*(al: AccountLedger) =
|
||||
let rc = al.distinctBase.persistent()
|
||||
@ -151,13 +164,22 @@ proc init*(
|
||||
mpt.toPhk.T
|
||||
|
||||
proc fetch*(sl: StorageLedger, slot: UInt256): Result[Blob,void] =
|
||||
sl.distinctBase.fetch(slot.toBytesBE).mapErr proc(ign: CoreDbErrorRef)=discard
|
||||
let rc = sl.distinctBase.fetch(slot.toBytesBE)
|
||||
if rc.isErr:
|
||||
return err()
|
||||
ok rc.value
|
||||
|
||||
proc merge*(sl: StorageLedger, slot: UInt256, value: openArray[byte]) =
|
||||
sl.distinctBase.merge(slot.toBytesBE, value).expect "StorageLedger/merge()"
|
||||
const info = "StorageLedger/merge(): "
|
||||
sl.distinctBase.merge(slot.toBytesBE, value).isOkOr:
|
||||
raiseAssert info & $$error
|
||||
|
||||
proc delete*(sl: StorageLedger, slot: UInt256) =
|
||||
sl.distinctBase.delete(slot.toBytesBE).expect "StorageLedger/delete()"
|
||||
const info = "StorageLedger/delete(): "
|
||||
sl.distinctBase.delete(slot.toBytesBE).isOkOr:
|
||||
if error.error == MptNotFound:
|
||||
return
|
||||
raiseAssert info & $$error
|
||||
|
||||
iterator storage*(
|
||||
al: AccountLedger;
|
||||
|
@ -276,8 +276,8 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool =
|
||||
expectedVids += (vid < first).ord
|
||||
db.vidDispose vid
|
||||
|
||||
xCheck db.vGen.len == expectedVids
|
||||
noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids
|
||||
xCheck db.vGen.len == expectedVids:
|
||||
noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids
|
||||
|
||||
# Serialise/deserialise
|
||||
block:
|
||||
@ -315,7 +315,7 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool =
|
||||
# Recycling and re-org tests
|
||||
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(LEAST_FREE_VID+it))
|
||||
|
||||
xCheck @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[3, 4, 5, 7] .toVQ
|
||||
xCheck @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
|
||||
xCheck @[8, 7, 6, 3, 4, 5, 9] .toVQ.vidReorg == @[3] .toVQ
|
||||
xCheck @[5, 4, 3, 7] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
|
||||
xCheck @[5] .toVQ.vidReorg == @[5] .toVQ
|
||||
|
@ -389,7 +389,7 @@ proc testTxMergeAndDeleteOneByOne*(
|
||||
|
||||
# Delete leaf
|
||||
block:
|
||||
let rc = db.delete leaf
|
||||
let rc = db.delete(leaf, VOID_PATH_ID)
|
||||
xCheckRc rc.error == (0,0)
|
||||
|
||||
# Update list of remaininf leafs
|
||||
|
Loading…
x
Reference in New Issue
Block a user