On demand mpt revisited (#2426)
* rebased from `github/on-demand-mpt` ackn: wip: on-demand mpt construction Given that actual data is stored in the `Vertex` structure, it's useful to think of the MPT as a cache for computing roots rather than being a functional requirement on its own. This PR engenders this line of thinking by incrementally computing the MPT only when it's needed, ie when a state (or similar) root is needed. This has the effect of siginficantly reducing memory usage as well as improving performance: * no need for dirty-mpt-node book-keeping * no need to build complex forest of upcoming hashing work * only hashes that are functionally needed are ever computed - intermediate nodes whose MTP root is not observed are never computed / processed * Unit test hot fixes * Unit test hot fixes cont. (somehow lost that part) --------- Co-authored-by: Jacek Sieka <jacek@status.im>
This commit is contained in:
parent
44deff9b28
commit
14c3772545
|
@ -250,7 +250,7 @@ proc vmExecCommit(pst: TxPackerStateRef)
|
|||
vmState.receipts.setLen(nItems)
|
||||
|
||||
xp.chain.receipts = vmState.receipts
|
||||
xp.chain.txRoot = pst.tr.state.valueOr:
|
||||
xp.chain.txRoot = pst.tr.state(updateOk=true).valueOr:
|
||||
raiseAssert "vmExecCommit(): state() failed " & $$error
|
||||
xp.chain.stateRoot = vmState.stateDB.rootHash
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import
|
|||
results,
|
||||
./aristo_desc/desc_backend,
|
||||
./aristo_init/memory_db,
|
||||
"."/[aristo_delete, aristo_desc, aristo_fetch, aristo_hashify,
|
||||
"."/[aristo_delete, aristo_desc, aristo_fetch,
|
||||
aristo_init, aristo_merge, aristo_path, aristo_profile, aristo_tx]
|
||||
|
||||
export
|
||||
|
@ -116,9 +116,11 @@ type
|
|||
|
||||
AristoApiFetchAccountStateFn* =
|
||||
proc(db: AristoDbRef;
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError]
|
||||
{.noRaise.}
|
||||
## Fetch the Merkle hash of the account root.
|
||||
## Fetch the Merkle hash of the account root. Force update if the
|
||||
## argument `updateOK` is set `true`.
|
||||
|
||||
AristoApiFetchGenericDataFn* =
|
||||
proc(db: AristoDbRef;
|
||||
|
@ -132,9 +134,11 @@ type
|
|||
AristoApiFetchGenericStateFn* =
|
||||
proc(db: AristoDbRef;
|
||||
root: VertexID;
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError]
|
||||
{.noRaise.}
|
||||
## Fetch the Merkle hash of the argument `root`.
|
||||
## Fetch the Merkle hash of the argument `root`. Force update if the
|
||||
## argument `updateOK` is set `true`.
|
||||
|
||||
AristoApiFetchStorageDataFn* =
|
||||
proc(db: AristoDbRef;
|
||||
|
@ -148,9 +152,11 @@ type
|
|||
AristoApiFetchStorageStateFn* =
|
||||
proc(db: AristoDbRef;
|
||||
accPath: openArray[byte];
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError]
|
||||
{.noRaise.}
|
||||
## Fetch the Merkle hash of the storage root related to `accPath`.
|
||||
## Fetch the Merkle hash of the storage root related to `accPath`. Force
|
||||
## update if the argument `updateOK` is set `true`.
|
||||
|
||||
AristoApiFindTxFn* =
|
||||
proc(db: AristoDbRef;
|
||||
|
@ -421,8 +427,6 @@ type
|
|||
finish*: AristoApiFinishFn
|
||||
forget*: AristoApiForgetFn
|
||||
forkTx*: AristoApiForkTxFn
|
||||
hashify*: AristoApiHashifyFn
|
||||
|
||||
hasPathAccount*: AristoApiHasPathAccountFn
|
||||
hasPathGeneric*: AristoApiHasPathGenericFn
|
||||
hasPathStorage*: AristoApiHasPathStorageFn
|
||||
|
@ -468,7 +472,6 @@ type
|
|||
AristoApiProfFinishFn = "finish"
|
||||
AristoApiProfForgetFn = "forget"
|
||||
AristoApiProfForkTxFn = "forkTx"
|
||||
AristoApiProfHashifyFn = "hashify"
|
||||
|
||||
AristoApiProfHasPathAccountFn = "hasPathAccount"
|
||||
AristoApiProfHasPathGenericFn = "hasPathGeneric"
|
||||
|
@ -532,7 +535,6 @@ when AutoValidateApiHooks:
|
|||
doAssert not api.finish.isNil
|
||||
doAssert not api.forget.isNil
|
||||
doAssert not api.forkTx.isNil
|
||||
doAssert not api.hashify.isNil
|
||||
|
||||
doAssert not api.hasPathAccount.isNil
|
||||
doAssert not api.hasPathGeneric.isNil
|
||||
|
@ -600,7 +602,6 @@ func init*(api: var AristoApiObj) =
|
|||
api.finish = finish
|
||||
api.forget = forget
|
||||
api.forkTx = forkTx
|
||||
api.hashify = hashify
|
||||
|
||||
api.hasPathAccount = hasPathAccount
|
||||
api.hasPathGeneric = hasPathGeneric
|
||||
|
@ -650,7 +651,6 @@ func dup*(api: AristoApiRef): AristoApiRef =
|
|||
finish: api.finish,
|
||||
forget: api.forget,
|
||||
forkTx: api.forkTx,
|
||||
hashify: api.hashify,
|
||||
|
||||
hasPathAccount: api.hasPathAccount,
|
||||
hasPathGeneric: api.hasPathGeneric,
|
||||
|
@ -742,9 +742,9 @@ func init*(
|
|||
result = api.fetchAccountRecord(a, b)
|
||||
|
||||
profApi.fetchAccountState =
|
||||
proc(a: AristoDbRef): auto =
|
||||
proc(a: AristoDbRef; b: bool): auto =
|
||||
AristoApiProfFetchAccountStateFn.profileRunner:
|
||||
result = api.fetchAccountState(a)
|
||||
result = api.fetchAccountState(a, b)
|
||||
|
||||
profApi.fetchGenericData =
|
||||
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
|
||||
|
@ -752,9 +752,9 @@ func init*(
|
|||
result = api.fetchGenericData(a, b, c)
|
||||
|
||||
profApi.fetchGenericState =
|
||||
proc(a: AristoDbRef; b: VertexID;): auto =
|
||||
proc(a: AristoDbRef; b: VertexID; c: bool): auto =
|
||||
AristoApiProfFetchGenericStateFn.profileRunner:
|
||||
result = api.fetchGenericState(a, b)
|
||||
result = api.fetchGenericState(a, b, c)
|
||||
|
||||
profApi.fetchStorageData =
|
||||
proc(a: AristoDbRef; b, c: openArray[byte]): auto =
|
||||
|
@ -762,9 +762,9 @@ func init*(
|
|||
result = api.fetchStorageData(a, b, c)
|
||||
|
||||
profApi.fetchStorageState =
|
||||
proc(a: AristoDbRef; b: openArray[byte]): auto =
|
||||
proc(a: AristoDbRef; b: openArray[byte]; c: bool): auto =
|
||||
AristoApiProfFetchStorageStateFn.profileRunner:
|
||||
result = api.fetchStorageState(a, b)
|
||||
result = api.fetchStorageState(a, b, c)
|
||||
|
||||
profApi.findTx =
|
||||
proc(a: AristoDbRef; b: VertexID; c: HashKey): auto =
|
||||
|
@ -786,11 +786,6 @@ func init*(
|
|||
AristoApiProfForkTxFn.profileRunner:
|
||||
result = api.forkTx(a, b)
|
||||
|
||||
profApi.hashify =
|
||||
proc(a: AristoDbRef): auto =
|
||||
AristoApiProfHashifyFn.profileRunner:
|
||||
result = api.hashify(a)
|
||||
|
||||
profApi.hasPathAccount =
|
||||
proc(a: AristoDbRef; b: openArray[byte]): auto =
|
||||
AristoApiProfHasPathAccountFn.profileRunner:
|
||||
|
|
|
@ -95,10 +95,10 @@ proc deltaMerge*(
|
|||
else:
|
||||
return err((vid,rc.error))
|
||||
|
||||
# Check consistency
|
||||
if (newFilter.src == newFilter.kMap.getOrVoid(VertexID 1)) !=
|
||||
(newFilter.sTab.len == 0 and newFilter.kMap.len == 0):
|
||||
return err((VertexID(0),FilSrcTrgInconsistent))
|
||||
# # Check consistency
|
||||
# if (newFilter.src == newFilter.kMap.getOrVoid(VertexID 1)) !=
|
||||
# (newFilter.sTab.len == 0 and newFilter.kMap.len == 0):
|
||||
# return err((VertexID(0),FilSrcTrgInconsistent))
|
||||
|
||||
ok newFilter
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import
|
|||
std/typetraits,
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike]
|
||||
"."/[aristo_desc, aristo_get, aristo_hashify, aristo_hike]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
|
@ -55,11 +55,19 @@ proc retrievePayload(
|
|||
proc retrieveMerkleHash(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError] =
|
||||
let key = db.getKeyRc(root).valueOr:
|
||||
if error == GetKeyNotFound:
|
||||
return ok(EMPTY_ROOT_HASH) # empty sub-tree
|
||||
return err(error)
|
||||
let key = block:
|
||||
if updateOk:
|
||||
db.computeKey(root).valueOr:
|
||||
if error == GetVtxNotFound:
|
||||
return ok(EMPTY_ROOT_HASH)
|
||||
return err(error)
|
||||
else:
|
||||
db.getKeyRc(root).valueOr:
|
||||
if error == GetKeyNotFound:
|
||||
return ok(EMPTY_ROOT_HASH) # empty sub-tree
|
||||
return err(error)
|
||||
ok key.to(Hash256)
|
||||
|
||||
|
||||
|
@ -148,9 +156,10 @@ proc fetchAccountRecord*(
|
|||
|
||||
proc fetchAccountState*(
|
||||
db: AristoDbRef;
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError] =
|
||||
## Fetch the Merkle hash of the account root.
|
||||
db.retrieveMerkleHash VertexID(1)
|
||||
db.retrieveMerkleHash(VertexID(1), updateOk)
|
||||
|
||||
proc hasPathAccount*(
|
||||
db: AristoDbRef;
|
||||
|
@ -178,9 +187,10 @@ proc fetchGenericData*(
|
|||
proc fetchGenericState*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError] =
|
||||
## Fetch the Merkle hash of the argument `root`.
|
||||
db.retrieveMerkleHash root
|
||||
db.retrieveMerkleHash(root, updateOk)
|
||||
|
||||
proc hasPathGeneric*(
|
||||
db: AristoDbRef;
|
||||
|
@ -209,13 +219,14 @@ proc fetchStorageData*(
|
|||
proc fetchStorageState*(
|
||||
db: AristoDbRef;
|
||||
accPath: openArray[byte];
|
||||
updateOk: bool;
|
||||
): Result[Hash256,AristoError] =
|
||||
## Fetch the Merkle hash of the storage root related to `accPath`.
|
||||
let stoID = db.fetchStorageID(accPath).valueOr:
|
||||
if error == FetchPathNotFound:
|
||||
return ok(EMPTY_ROOT_HASH) # no sub-tree
|
||||
return err(error)
|
||||
db.retrieveMerkleHash stoID
|
||||
db.retrieveMerkleHash(stoID, updateOk)
|
||||
|
||||
proc hasPathStorage*(
|
||||
db: AristoDbRef;
|
||||
|
|
|
@ -8,319 +8,106 @@
|
|||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Aristo DB -- Patricia Trie Merkleisation
|
||||
## ========================================
|
||||
##
|
||||
## For the current state of the `Patricia Trie`, keys (equivalent to hashes)
|
||||
## are associated with the vertex IDs. Existing key associations are taken
|
||||
## as-is/unchecked unless the ID is marked a proof node. In the latter case,
|
||||
## the key is assumed to be correct after re-calculation.
|
||||
##
|
||||
## The labelling algorithm works roughly as follows:
|
||||
##
|
||||
## * Given a set of start or root vertices, build the forest (of trees)
|
||||
## downwards towards leafs vertices so that none of these vertices has a
|
||||
## Merkle hash label.
|
||||
##
|
||||
## * Starting at the leaf vertices in width-first fashion, calculate the
|
||||
## Merkle hashes and label the leaf vertices. Recursively work up labelling
|
||||
## vertices up until the root nodes are reached.
|
||||
##
|
||||
## Note that there are some tweaks for `proof` node vertices which lead to
|
||||
## incomplete trees in a way that the algoritm handles existing Merkle hash
|
||||
## labels for missing vertices.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, sets, tables],
|
||||
chronicles,
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise, aristo_utils]
|
||||
|
||||
type
|
||||
WidthFirstForest = object
|
||||
## Collected width first search trees
|
||||
root: HashSet[VertexID] ## Top level, root targets
|
||||
pool: Table[VertexID,VertexID] ## Upper links pool
|
||||
base: Table[VertexID,VertexID] ## Width-first leaf level links
|
||||
leaf: seq[VertexID] ## Stand-alone leaf to process
|
||||
rev: Table[VertexID,HashSet[VertexID]] ## Reverse look up table
|
||||
"."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
|
||||
|
||||
logScope:
|
||||
topics = "aristo-hashify"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func getOrVoid(tab: Table[VertexID,VertexID]; vid: VertexID): VertexID =
|
||||
tab.getOrDefault(vid, VertexID(0))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func hasValue(
|
||||
wffTable: Table[VertexID,VertexID];
|
||||
vid: VertexID;
|
||||
wff: var WidthFirstForest;
|
||||
): bool =
|
||||
## Helper for efficient `value` access:
|
||||
## ::
|
||||
## wffTable.hasValue(wff, vid)
|
||||
##
|
||||
## instead of
|
||||
## ::
|
||||
## vid in wffTable.values.toSeq
|
||||
##
|
||||
wff.rev.withValue(vid, v):
|
||||
for w in v[]:
|
||||
if w in wffTable:
|
||||
return true
|
||||
|
||||
|
||||
proc pedigree(
|
||||
proc computeKey*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
wff: var WidthFirstForest;
|
||||
ancestors: HashSet[VertexID]; # Vertex IDs to start connecting from
|
||||
proofs: HashSet[VertexID]; # Additional proof nodes to start from
|
||||
): Result[void, (VertexID,AristoError)] =
|
||||
## For each vertex ID from the argument set `ancestors` find all un-labelled
|
||||
## grand child vertices and build a forest (of trees) starting from the
|
||||
## grand child vertices.
|
||||
##
|
||||
var
|
||||
leafs: HashSet[VertexID]
|
||||
vid: VertexID; # Vertex to convert
|
||||
): Result[HashKey, AristoError] =
|
||||
# This is a variation on getKeyRc which computes the key instead of returning
|
||||
# an error
|
||||
# TODO it should not always write the key to the persistent storage
|
||||
|
||||
proc register(wff: var WidthFirstForest; fromVid, toVid: VertexID) =
|
||||
if toVid in wff.base:
|
||||
# * there is `toVid->*` in `base[]`
|
||||
# * so ``toVid->*` moved to `pool[]`
|
||||
wff.pool[toVid] = wff.base.getOrVoid toVid
|
||||
wff.base.del toVid
|
||||
if wff.base.hasValue(fromVid, wff):
|
||||
# * there is `*->fromVid` in `base[]`
|
||||
# * so store `fromVid->toVid` in `pool[]`
|
||||
wff.pool[fromVid] = toVid
|
||||
else:
|
||||
# store `fromVid->toVid` in `base[]`
|
||||
wff.base[fromVid] = toVid
|
||||
|
||||
# Register reverse pair for quick table value lookup
|
||||
wff.rev.withValue(toVid, val):
|
||||
val[].incl fromVid
|
||||
do:
|
||||
wff.rev[toVid] = [fromVid].toHashSet
|
||||
|
||||
# Remove unnecessarey sup-trie roots (e.g. for a storage root)
|
||||
wff.root.excl fromVid
|
||||
|
||||
# Initialise greedy search which will keep a set of current leafs in the
|
||||
# `leafs{}` set and follow up links in the `pool[]` table, leading all the
|
||||
# way up to the `root{}` set.
|
||||
#
|
||||
# Process root nodes if they are unlabelled
|
||||
var rootWasDeleted = VertexID(0)
|
||||
for root in ancestors:
|
||||
let vtx = db.getVtx root
|
||||
if vtx.isNil:
|
||||
if VertexID(LEAST_FREE_VID) <= root:
|
||||
# There must be a another root, as well (e.g. `$1` for a storage
|
||||
# root). Only the last one of some will be reported with error code.
|
||||
rootWasDeleted = root
|
||||
elif not db.getKey(root).isValid:
|
||||
# Need to process `root` node
|
||||
let children = vtx.subVids
|
||||
if children.len == 0:
|
||||
# This is an isolated leaf node
|
||||
wff.leaf.add root
|
||||
proc getKey(db: AristoDbRef; vid: VertexID): HashKey =
|
||||
block body:
|
||||
let key = db.layersGetKey(vid).valueOr:
|
||||
break body
|
||||
if key.isValid:
|
||||
return key
|
||||
else:
|
||||
wff.root.incl root
|
||||
for child in vtx.subVids:
|
||||
if not db.getKey(child).isValid:
|
||||
leafs.incl child
|
||||
wff.register(child, root)
|
||||
if rootWasDeleted.isValid and
|
||||
wff.root.len == 0 and
|
||||
wff.leaf.len == 0:
|
||||
return err((rootWasDeleted,HashifyRootVtxUnresolved))
|
||||
return VOID_HASH_KEY
|
||||
let rc = db.getKeyBE vid
|
||||
if rc.isOk:
|
||||
return rc.value
|
||||
VOID_HASH_KEY
|
||||
|
||||
# Initialisation for `proof` nodes which are sort of similar to `root` nodes.
|
||||
for proof in proofs:
|
||||
let vtx = db.getVtx proof
|
||||
if vtx.isNil or not db.getKey(proof).isValid:
|
||||
return err((proof,HashifyVtxUnresolved))
|
||||
let children = vtx.subVids
|
||||
if 0 < children.len:
|
||||
# To be treated as a root node
|
||||
wff.root.incl proof
|
||||
for child in vtx.subVids:
|
||||
if not db.getKey(child).isValid:
|
||||
leafs.incl child
|
||||
wff.register(child, proof)
|
||||
let key = getKey(db, vid)
|
||||
if key.isValid():
|
||||
# debugEcho "ok ", vid, " ", key
|
||||
return ok key
|
||||
|
||||
# Recursively step down and collect unlabelled vertices
|
||||
while 0 < leafs.len:
|
||||
var redo: typeof(leafs)
|
||||
#let vtx = db.getVtx(vid)
|
||||
#doAssert vtx.isValid()
|
||||
let vtx = ? db.getVtxRc vid
|
||||
|
||||
for parent in leafs:
|
||||
assert parent.isValid
|
||||
assert not db.getKey(parent).isValid
|
||||
# TODO this is the same code as when serializing NodeRef, without the NodeRef
|
||||
var rlp = initRlpWriter()
|
||||
|
||||
let vtx = db.getVtx parent
|
||||
if not vtx.isNil:
|
||||
let children = vtx.subVids.filterIt(not db.getKey(it).isValid)
|
||||
if 0 < children.len:
|
||||
for child in children:
|
||||
redo.incl child
|
||||
wff.register(child, parent)
|
||||
continue
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
rlp.startList(2)
|
||||
rlp.append(vtx.lPfx.toHexPrefix(isLeaf = true))
|
||||
# Need to resolve storage root for account leaf
|
||||
case vtx.lData.pType
|
||||
of AccountData:
|
||||
let vid = vtx.lData.stoID
|
||||
let key = if vid.isValid:
|
||||
?db.computeKey(vid)
|
||||
# if not key.isValid:
|
||||
# block looseCoupling:
|
||||
# when LOOSE_STORAGE_TRIE_COUPLING:
|
||||
# # Stale storage trie?
|
||||
# if LEAST_FREE_VID <= vid.distinctBase and
|
||||
# not db.getVtx(vid).isValid:
|
||||
# node.lData.account.storageID = VertexID(0)
|
||||
# break looseCoupling
|
||||
# # Otherwise this is a stale storage trie.
|
||||
# return err(@[vid])
|
||||
else:
|
||||
VOID_HASH_KEY
|
||||
|
||||
if parent notin wff.base:
|
||||
# The buck stops here:
|
||||
# move `(parent,granny)` from `pool[]` to `base[]`
|
||||
let granny = wff.pool.getOrVoid parent
|
||||
assert granny.isValid
|
||||
wff.register(parent, granny)
|
||||
wff.pool.del parent
|
||||
rlp.append(encode Account(
|
||||
nonce: vtx.lData.account.nonce,
|
||||
balance: vtx.lData.account.balance,
|
||||
storageRoot: key.to(Hash256),
|
||||
codeHash: vtx.lData.account.codeHash)
|
||||
)
|
||||
of RawData:
|
||||
rlp.append(vtx.lData.rawBlob)
|
||||
|
||||
redo.swap leafs
|
||||
of Branch:
|
||||
rlp.startList(17)
|
||||
for n in 0..15:
|
||||
let vid = vtx.bVid[n]
|
||||
if vid.isValid:
|
||||
rlp.append(?db.computeKey(vid))
|
||||
else:
|
||||
rlp.append(VOID_HASH_KEY)
|
||||
rlp.append EmptyBlob
|
||||
|
||||
ok()
|
||||
of Extension:
|
||||
rlp.startList(2)
|
||||
rlp.append(vtx.ePfx.toHexPrefix(isleaf = false))
|
||||
rlp.append(?db.computeKey(vtx.eVid))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, tree traversal
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc createSched(
|
||||
wff: var WidthFirstForest; # Search tree to create
|
||||
db: AristoDbRef; # Database, top layer
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Create width-first search schedule (aka forest)
|
||||
##
|
||||
? db.pedigree(wff, db.dirty, db.pPrf)
|
||||
|
||||
if 0 < wff.leaf.len:
|
||||
for vid in wff.leaf:
|
||||
let node = db.getVtx(vid).toNode(db, beKeyOk=false).valueOr:
|
||||
# Make sure that all those nodes are reachable
|
||||
for needed in error:
|
||||
if needed notin wff.base and
|
||||
needed notin wff.pool:
|
||||
return err((needed,HashifyVtxUnresolved))
|
||||
continue
|
||||
db.layersPutKey(VertexID(1), vid, node.digestTo(HashKey))
|
||||
wff.leaf.reset() # No longer needed
|
||||
|
||||
ok()
|
||||
let h = rlp.finish().digestTo(HashKey)
|
||||
# TODO This shouldn't necessarily go into the database if we're just computing
|
||||
# a key ephemerally - it should however be cached for some tiem since
|
||||
# deep hash computations are expensive
|
||||
# debugEcho "putkey ", vtx.vType, " ", vid, " ", h, " ", toHex(rlp.finish)
|
||||
db.layersPutKey(VertexID(1), vid, h)
|
||||
ok h
|
||||
|
||||
|
||||
proc processSched(
|
||||
wff: var WidthFirstForest; # Search tree to process
|
||||
db: AristoDbRef; # Database, top layer
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Traverse width-first schedule and update vertex hash labels.
|
||||
##
|
||||
while 0 < wff.base.len:
|
||||
var
|
||||
accept = false
|
||||
redo: typeof(wff.base)
|
||||
|
||||
for (vid,toVid) in wff.base.pairs:
|
||||
let vtx = db.getVtx vid
|
||||
assert vtx.isValid
|
||||
|
||||
# Try to convert the vertex to a node. This is possible only if all
|
||||
# link references have Merkle hash keys, already.
|
||||
let node = vtx.toNode(db, stopEarly=false).valueOr:
|
||||
# Do this vertex later, again
|
||||
if wff.pool.hasValue(vid, wff):
|
||||
wff.pool[vid] = toVid
|
||||
accept = true # `redo[]` will be fifferent from `base[]`
|
||||
else:
|
||||
redo[vid] = toVid
|
||||
continue
|
||||
# End `valueOr` terminates error clause
|
||||
|
||||
# Could resolve => update Merkle hash
|
||||
db.layersPutKey(VertexID(1), vid, node.digestTo HashKey)
|
||||
|
||||
# Set follow up link for next round
|
||||
let toToVid = wff.pool.getOrVoid toVid
|
||||
if toToVid.isValid:
|
||||
if toToVid in redo:
|
||||
# Got predecessor `(toVid,toToVid)` of `(toToVid,xxx)`,
|
||||
# so move `(toToVid,xxx)` from `redo[]` to `pool[]`
|
||||
wff.pool[toToVid] = redo.getOrVoid toToVid
|
||||
redo.del toToVid
|
||||
# Move `(toVid,toToVid)` from `pool[]` to `redo[]`
|
||||
wff.pool.del toVid
|
||||
redo[toVid] = toToVid
|
||||
|
||||
accept = true # `redo[]` will be fifferent from `base[]`
|
||||
# End `for (vid,toVid)..`
|
||||
|
||||
# Make sure that `base[]` is different from `redo[]`
|
||||
if not accept:
|
||||
let vid = wff.base.keys.toSeq[0]
|
||||
return err((vid,HashifyVtxUnresolved))
|
||||
# Restart `wff.base[]`
|
||||
wff.base.swap redo
|
||||
|
||||
ok()
|
||||
|
||||
|
||||
proc finaliseRoots(
|
||||
wff: var WidthFirstForest; # Search tree to process
|
||||
db: AristoDbRef; # Database, top layer
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Process root vertices after all other vertices are done.
|
||||
##
|
||||
# Make sure that the pool has been exhausted
|
||||
if 0 < wff.pool.len:
|
||||
let vid = wff.pool.keys.toSeq.sorted[0]
|
||||
return err((vid,HashifyVtxUnresolved))
|
||||
|
||||
# Update or verify root nodes
|
||||
for vid in wff.root:
|
||||
# Calculate hash key
|
||||
let
|
||||
node = db.getVtx(vid).toNode(db).valueOr:
|
||||
return err((vid,HashifyRootVtxUnresolved))
|
||||
key = node.digestTo(HashKey)
|
||||
if vid notin db.pPrf:
|
||||
db.layersPutKey(VertexID(1), vid, key)
|
||||
elif key != db.getKey vid:
|
||||
return err((vid,HashifyProofHashMismatch))
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hashify*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
|
||||
## Tree`.
|
||||
##
|
||||
if 0 < db.dirty.len:
|
||||
# Set up widh-first traversal schedule
|
||||
var wff: WidthFirstForest
|
||||
? wff.createSched db
|
||||
|
||||
# Traverse tree spanned by `wff` and label remaining vertices.
|
||||
? wff.processSched db
|
||||
|
||||
# Do/complete state root vertices
|
||||
? wff.finaliseRoots db
|
||||
|
||||
db.top.final.dirty.clear # Mark top layer clean
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -126,7 +126,7 @@ func layersPutVtx*(
|
|||
) =
|
||||
## Store a (potentally empty) vertex on the top layer
|
||||
db.top.delta.sTab[vid] = vtx
|
||||
db.top.final.dirty.incl root
|
||||
# db.top.final.dirty.incl root
|
||||
|
||||
func layersResVtx*(
|
||||
db: AristoDbRef;
|
||||
|
@ -146,7 +146,7 @@ func layersPutKey*(
|
|||
) =
|
||||
## Store a (potentally void) hash key on the top layer
|
||||
db.top.delta.kMap[vid] = key
|
||||
db.top.final.dirty.incl root # Modified top cache layers => hashify
|
||||
# db.top.final.dirty.incl root # Modified top cache layers => hashify
|
||||
|
||||
|
||||
func layersResKey*(db: AristoDbRef; root: VertexID; vid: VertexID) =
|
||||
|
|
|
@ -31,7 +31,7 @@ proc aristoError(error: AristoError): NodeRef =
|
|||
## Allows returning de
|
||||
NodeRef(vType: Leaf, error: error)
|
||||
|
||||
proc serialise(
|
||||
proc serialise*(
|
||||
pyl: PayloadRef;
|
||||
getKey: ResolveVidFn;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
|
@ -125,15 +125,15 @@ proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
|
|||
|
||||
aristoError(Rlp2Or17ListEntries)
|
||||
|
||||
func append*(w: var RlpWriter; key: HashKey) =
|
||||
if 1 < key.len and key.len < 32:
|
||||
w.appendRawBytes key.data
|
||||
else:
|
||||
w.append key.data
|
||||
|
||||
proc append*(writer: var RlpWriter; node: NodeRef) =
|
||||
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
|
||||
## list.
|
||||
func addHashKey(w: var RlpWriter; key: HashKey) =
|
||||
if 1 < key.len and key.len < 32:
|
||||
w.appendRawBytes key.data
|
||||
else:
|
||||
w.append key.data
|
||||
|
||||
if node.error != AristoError(0):
|
||||
writer.startList(0)
|
||||
|
@ -142,13 +142,13 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
|
|||
of Branch:
|
||||
writer.startList(17)
|
||||
for n in 0..15:
|
||||
writer.addHashKey node.key[n]
|
||||
writer.append node.key[n]
|
||||
writer.append EmptyBlob
|
||||
|
||||
of Extension:
|
||||
writer.startList(2)
|
||||
writer.append node.ePfx.toHexPrefix(isleaf = false)
|
||||
writer.addHashKey node.key[0]
|
||||
writer.append node.key[0]
|
||||
|
||||
of Leaf:
|
||||
proc getKey0(vid: VertexID): Result[HashKey,AristoError] {.noRaise.} =
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
import
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hashify, aristo_init,
|
||||
"."/[aristo_constants, aristo_desc, aristo_hashify, aristo_init,
|
||||
aristo_merge]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -54,13 +54,8 @@ proc merkleSignCommit*(
|
|||
return ok VOID_HASH_KEY
|
||||
if sdb.error != AristoError(0):
|
||||
return err((sdb.errKey, sdb.error))
|
||||
sdb.db.hashify().isOkOr:
|
||||
let w = (EmptyBlob, error[1])
|
||||
return err(w)
|
||||
let hash = sdb.db.getKeyRc(sdb.root).valueOr:
|
||||
let w = (EmptyBlob, error)
|
||||
return err(w)
|
||||
ok hash
|
||||
|
||||
ok sdb.db.computeKey(sdb.root).expect("ok")
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -122,8 +122,6 @@ proc txFrameCommit*(
|
|||
## previous transaction is returned if there was any.
|
||||
##
|
||||
let db = ? tx.getDbDescFromTopTx()
|
||||
db.hashify().isOkOr:
|
||||
return err(error[1])
|
||||
|
||||
# Pop layer from stack and merge database top layer onto it
|
||||
let merged = block:
|
||||
|
@ -162,11 +160,6 @@ proc txFrameCollapse*(
|
|||
##
|
||||
let db = ? tx.getDbDescFromTopTx()
|
||||
|
||||
if commit:
|
||||
# For commit, hashify the current layer if requested and install it
|
||||
db.hashify().isOkOr:
|
||||
return err(error[1])
|
||||
|
||||
db.top.txUid = 0
|
||||
db.stack.setLen(0)
|
||||
db.txRef = AristoTxRef(nil)
|
||||
|
|
|
@ -46,15 +46,17 @@ proc getBeStateRoot(
|
|||
# This layer is unusable, need both: vertex and key
|
||||
return err(TxPrettyPointlessLayer)
|
||||
|
||||
elif not db.top.delta.sTab.getOrVoid(VertexID 1).isValid:
|
||||
# Root key and vertex have been deleted
|
||||
return ok(srcRoot)
|
||||
ok(srcRoot)
|
||||
|
||||
elif chunkedMpt and srcRoot == db.top.delta.kMap.getOrVoid VertexID(1):
|
||||
# FIXME: this one needs to be double checked with `snap` sunc preload
|
||||
return ok(srcRoot)
|
||||
|
||||
err(TxStateRootMismatch)
|
||||
#elif not db.top.delta.sTab.getOrVoid(VertexID 1).isValid:
|
||||
# # Root key and vertex have been deleted
|
||||
# return ok(srcRoot)
|
||||
#
|
||||
#elif chunkedMpt and srcRoot == db.top.delta.kMap.getOrVoid VertexID(1):
|
||||
# # FIXME: this one needs to be double checked with `snap` sunc preload
|
||||
# return ok(srcRoot)
|
||||
#
|
||||
#err(TxStateRootMismatch)
|
||||
|
||||
|
||||
proc topMerge(db: AristoDbRef; src: HashKey): Result[void,AristoError] =
|
||||
|
@ -96,10 +98,6 @@ proc txStow*(
|
|||
if persistent and not db.deltaPersistentOk():
|
||||
return err(TxBackendNotWritable)
|
||||
|
||||
# Update Merkle hashes (unless disabled)
|
||||
db.hashify().isOkOr:
|
||||
return err(error[1])
|
||||
|
||||
# Verify database consistency and get `src` field for update
|
||||
let rc = db.getBeStateRoot chunkedMpt
|
||||
if rc.isErr and rc.error != TxPrettyPointlessLayer:
|
||||
|
|
|
@ -185,8 +185,8 @@ proc updateAccountForHasher*(
|
|||
db.layersResKey(hike.root, w)
|
||||
|
||||
# Signal to `hashify()` where to start rebuilding Merkel hashes
|
||||
db.top.final.dirty.incl hike.root
|
||||
db.top.final.dirty.incl hike.legs[^1].wp.vid
|
||||
# db.top.final.dirty.incl hike.root
|
||||
# db.top.final.dirty.incl hike.legs[^1].wp.vid
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -14,7 +14,7 @@ import
|
|||
std/tables,
|
||||
eth/common,
|
||||
../../aristo as use_ari,
|
||||
../../aristo/aristo_walk,
|
||||
../../aristo/[aristo_walk, aristo_serialise],
|
||||
../../kvt as use_kvt,
|
||||
../../kvt/[kvt_init/memory_only, kvt_walk],
|
||||
".."/[base, base/base_desc],
|
||||
|
|
|
@ -142,18 +142,8 @@ proc mptMethods(): CoreDbMptFns =
|
|||
|
||||
proc mptState(cMpt: AristoCoreDbMptRef, updateOk: bool): CoreDbRc[Hash256] =
|
||||
const info = "mptState()"
|
||||
|
||||
let rc = api.fetchGenericState(mpt, cMpt.mptRoot)
|
||||
if rc.isOk:
|
||||
return ok(rc.value)
|
||||
elif not updateOk and rc.error != GetKeyUpdateNeeded:
|
||||
return err(rc.error.toError(base, info))
|
||||
|
||||
# FIXME: `hashify()` should probably throw an assert on failure
|
||||
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
|
||||
|
||||
let state = api.fetchGenericState(mpt, cMpt.mptRoot).valueOr:
|
||||
raiseAssert info & ": " & $error
|
||||
let state = api.fetchGenericState(mpt, cMpt.mptRoot, updateOk).valueOr:
|
||||
return err(error.toError(base, info))
|
||||
ok(state)
|
||||
|
||||
## Generic columns database handlers
|
||||
|
@ -256,18 +246,8 @@ proc accMethods(): CoreDbAccFns =
|
|||
updateOk: bool;
|
||||
): CoreDbRc[Hash256] =
|
||||
const info = "accStateFn()"
|
||||
|
||||
let rc = api.fetchAccountState(mpt)
|
||||
if rc.isOk:
|
||||
return ok(rc.value)
|
||||
elif not updateOk and rc.error != GetKeyUpdateNeeded:
|
||||
return err(rc.error.toError(base, info))
|
||||
|
||||
# FIXME: `hashify()` should probably throw an assert on failure
|
||||
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
|
||||
|
||||
let state = api.fetchAccountState(mpt).valueOr:
|
||||
raiseAssert info & ": " & $error
|
||||
let state = api.fetchAccountState(mpt, updateOk).valueOr:
|
||||
return err(error.toError(base, info))
|
||||
ok(state)
|
||||
|
||||
|
||||
|
@ -330,17 +310,7 @@ proc accMethods(): CoreDbAccFns =
|
|||
updateOk: bool;
|
||||
): CoreDbRc[Hash256] =
|
||||
const info = "slotStateFn()"
|
||||
|
||||
let rc = api.fetchStorageState(mpt, accPath)
|
||||
if rc.isOk:
|
||||
return ok(rc.value)
|
||||
elif not updateOk and rc.error != GetKeyUpdateNeeded:
|
||||
return err(rc.error.toError(base, info))
|
||||
|
||||
# FIXME: `hashify()` should probably throw an assert on failure
|
||||
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
|
||||
|
||||
let state = api.fetchStorageState(mpt, accPath).valueOr:
|
||||
let state = api.fetchStorageState(mpt, accPath, updateOk).valueOr:
|
||||
return err(error.toError(base, info))
|
||||
ok(state)
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ proc blobify(
|
|||
if rc.isOk:
|
||||
rc.value
|
||||
else:
|
||||
? api.hashify(mpt)
|
||||
# TODO ? api.hashify(mpt)
|
||||
? api.serialise(mpt, pyl)
|
||||
ok(blob)
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ proc state*(ac: AccountsLedgerRef): KeccakHash =
|
|||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
# make sure all cache already committed
|
||||
doAssert(ac.isDirty == false)
|
||||
ac.ledger.state.valueOr:
|
||||
ac.ledger.state(updateOk=true).valueOr:
|
||||
raiseAssert info & $$error
|
||||
|
||||
proc isTopLevelClean*(ac: AccountsLedgerRef): bool =
|
||||
|
|
|
@ -247,14 +247,14 @@ proc checkBeOk(
|
|||
noisy = true;
|
||||
): bool =
|
||||
## ..
|
||||
for n in 0 ..< dx.len:
|
||||
let cache = if forceCache: true else: dx[n].dirty.len == 0
|
||||
block:
|
||||
let rc = dx[n].checkBE(relax=relax, cache=cache, fifos=fifos)
|
||||
xCheckRc rc.error == (0,0):
|
||||
noisy.say "***", "db checkBE failed",
|
||||
" n=", n, "/", dx.len-1,
|
||||
" cache=", cache
|
||||
#for n in 0 ..< dx.len:
|
||||
# let cache = if forceCache: true else: dx[n].dirty.len == 0
|
||||
# block:
|
||||
# let rc = dx[n].checkBE(relax=relax, cache=cache, fifos=fifos)
|
||||
# xCheckRc rc.error == (0,0):
|
||||
# noisy.say "***", "db checkBE failed",
|
||||
# " n=", n, "/", dx.len-1,
|
||||
# " cache=", cache
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -13,7 +13,7 @@ import
|
|||
eth/common,
|
||||
stew/endians2,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_debug, aristo_desc, aristo_hashify, aristo_hike, aristo_merge],
|
||||
aristo_debug, aristo_desc, aristo_hike, aristo_merge],
|
||||
../../nimbus/db/kvstore_rocksdb,
|
||||
../../nimbus/sync/protocol/snap/snap_types,
|
||||
../replay/[pp, undump_accounts, undump_storages],
|
||||
|
@ -201,15 +201,6 @@ func mapRootVid*(
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hashify*(
|
||||
db: AristoDbRef;
|
||||
noisy: bool;
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
when declared(aristo_hashify.noisy):
|
||||
aristo_hashify.exec(noisy, aristo_hashify.hashify(db))
|
||||
else:
|
||||
aristo_hashify.hashify(db)
|
||||
|
||||
proc mergeGenericData*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leaf: LeafTiePayload; # Leaf item to add to the database
|
||||
|
|
|
@ -146,9 +146,9 @@ proc saveToBackend(
|
|||
# Verify context: nesting level must be 2 (i.e. two transactions)
|
||||
xCheck tx.level == 2
|
||||
|
||||
block:
|
||||
let rc = db.checkTop()
|
||||
xCheckRc rc.error == (0,0)
|
||||
#block:
|
||||
# let rc = db.checkTop()
|
||||
# xCheckRc rc.error == (0,0)
|
||||
|
||||
# Commit and hashify the current layer
|
||||
block:
|
||||
|
@ -166,9 +166,9 @@ proc saveToBackend(
|
|||
# Verify context: nesting level must be 1 (i.e. one transaction)
|
||||
xCheck tx.level == 1
|
||||
|
||||
block:
|
||||
let rc = db.checkBE(relax=true)
|
||||
xCheckRc rc.error == (0,0)
|
||||
#block:
|
||||
# let rc = db.checkBE(relax=true)
|
||||
# xCheckRc rc.error == (0,0)
|
||||
|
||||
# Commit and save to backend
|
||||
block:
|
||||
|
@ -186,10 +186,10 @@ proc saveToBackend(
|
|||
let rc = db.schedStow(chunkedMpt=chunkedMpt)
|
||||
xCheckRc rc.error == 0
|
||||
|
||||
block:
|
||||
let rc = db.checkBE(relax=relax)
|
||||
xCheckRc rc.error == (0,0):
|
||||
noisy.say "***", "saveToBackend (8)", " debugID=", debugID
|
||||
#block:
|
||||
# let rc = db.checkBE(relax=relax)
|
||||
# xCheckRc rc.error == (0,0):
|
||||
# noisy.say "***", "saveToBackend (8)", " debugID=", debugID
|
||||
|
||||
# Update layers to original level
|
||||
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
|
@ -238,7 +238,11 @@ proc saveToBackendWithOops(
|
|||
|
||||
block:
|
||||
let rc = db.schedStow(chunkedMpt=chunkedMpt)
|
||||
xCheckRc rc.error == 0
|
||||
xCheckRc rc.error == 0:
|
||||
noisy.say "***", "saveToBackendWithOops(8)",
|
||||
" debugID=", debugID,
|
||||
"\n db\n ", db.pp(backendOk=true),
|
||||
""
|
||||
|
||||
# Update layers to original level
|
||||
tx = db.txBegin().value.to(AristoDbRef).txBegin().value
|
||||
|
|
Loading…
Reference in New Issue