nimbus-eth1/nimbus/db/aristo/aristo_merge.nim

293 lines
11 KiB
Nim
Raw Normal View History

# nimbus-eth1
Core db update storage root management for sub tries (#1964) * Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references why: Avoids copying in some cases * Fix copyright header * Aristo: Verify `leafTie.root` function argument for `merge()` proc why: Zero root will lead to inconsistent DB entry * Aristo: Update failure condition for hash labels compiler `hashify()` why: Node need not be rejected as long as links are on the schedule. In that case, `redo[]` is to become `wff.base[]` at a later stage. This amends an earlier fix, part of #1952 by also testing against the target nodes of the `wff.base[]` sets. * Aristo: Add storage root glue record to `hashify()` schedule why: An account leaf node might refer to a non-resolvable storage root ID. Storage root node chains will end up at the storage root. So the link `storage-root->account-leaf` needs an extra item in the schedule. * Aristo: fix error code returned by `fetchPayload()` details: Final error code is implied by the error code form the `hikeUp()` function. * CoreDb: Discard `createOk` argument in API `getRoot()` function why: Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is implemented where a stprage root node is created on-the-fly. * CoreDb: Prevent `$$` logging in some cases why: Logging the function `$$` is not useful when it is used for internal use, i.e. retrieving an an error text for logging. * CoreDb: Add `tryHashFn()` to API for pretty printing why: Pretty printing must not change the hashification status for the `Aristo` DB. So there is an independent API wrapper for getting the node hash which never updated the hashes. * CoreDb: Discard `update` argument in API `hash()` function why: When calling the API function `hash()`, the latest state is always wanted. For a version that uses the current state as-is without checking, the function `tryHash()` was added to the backend. * CoreDb: Update opaque vertex ID objects for the `Aristo` backend why: For `Aristo`, vID objects encapsulate a numeric `VertexID` referencing a vertex (rather than a node hash as used on the legacy backend.) For storage sub-tries, there might be no initial vertex known when the descriptor is created. So opaque vertex ID objects are supported without a valid `VertexID` which will be initalised on-the-fly when the first item is merged. * CoreDb: Add pretty printer for opaque vertex ID objects * Cosmetics, printing profiling data * CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor why: Missing initialisation error * CoreDb: Allow MPT to inherit shared context on `Aristo` backend why: Creates descriptors with different storage roots for the same shared `Aristo` DB descriptor. * Cosmetics, update diagnostic message items for `Aristo` backend * Fix Copyright year
2024-01-11 19:11:38 +00:00
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Patricia Trie builder, raw node insertion
## ======================================================
##
## This module merges `PathID` values as hexary lookup paths into the
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
## associated (but separated) Merkle hashes will be deleted unless locked.
## Instead of deleting locked hashes error handling is applied.
##
## Also, nodes (vertices plus merkle hashes) can be added which is needed for
## boundary proofing after `snap/1` download. The vertices are split from the
## nodes and stored as-is on the table holding `Patricia Trie` entries. The
## hashes are stored iin a separate table and the vertices are labelled
## `locked`.
{.push raises: [].}
import
std/typetraits,
eth/common,
results,
"."/[aristo_desc, aristo_fetch, aristo_get, aristo_layers, aristo_vid]
proc layersPutLeaf(
db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload
): VertexRef =
let vtx = VertexRef(vType: Leaf, pfx: path, lData: payload)
db.layersPutVtx(rvid, vtx)
vtx
proc mergePayloadImpl(
db: AristoDbRef, # Database, top layer
root: VertexID, # MPT state root
path: openArray[byte], # Leaf item to add to the database
leaf: Opt[VertexRef],
payload: LeafPayload, # Payload value
): Result[(VertexRef, VertexRef, VertexRef), AristoError] =
## Merge the argument `(root,path)` key-value-pair into the top level vertex
## table of the database `db`. The `path` argument is used to address the
## leaf vertex with the payload. It is stored or updated on the database
## accordingly.
##
var
path = NibblesBuf.fromBytes(path)
cur = root
(vtx, _) = db.getVtxRc((root, cur)).valueOr:
if error != GetVtxNotFound:
return err(error)
# We're at the root vertex and there is no data - this must be a fresh
# VertexID!
return ok (db.layersPutLeaf((root, cur), path, payload), nil, nil)
steps: ArrayBuf[NibblesBuf.high + 1, VertexID]
template resetKeys() =
# Reset cached hashes of touched verticies
for i in 1..steps.len:
db.layersResKey((root, steps[^i]))
while path.len > 0:
# Clear existing merkle keys along the traversal path
steps.add cur
let n = path.sharedPrefixLen(vtx.pfx)
case vtx.vType
of Leaf:
let res =
if n == vtx.pfx.len:
# Same path - replace the current vertex with a new payload
if vtx.lData == payload:
return err(MergeNoAction)
let leafVtx = if root == VertexID(1):
var payload = payload.dup()
# TODO can we avoid this hack? it feels like the caller should already
# have set an appropriate stoID - this "fixup" feels risky,
# specially from a caching point of view
payload.stoID = vtx.lData.stoID
db.layersPutLeaf((root, cur), path, payload)
else:
db.layersPutLeaf((root, cur), path, payload)
(leafVtx, nil, nil)
else:
# Turn leaf into a branch (or extension) then insert the two leaves
# into the branch
let branch = VertexRef(vType: Branch, pfx: path.slice(0, n))
let other = block: # Copy of existing leaf node, now one level deeper
let local = db.vidFetch()
branch.bVid[vtx.pfx[n]] = local
db.layersPutLeaf((root, local), vtx.pfx.slice(n + 1), vtx.lData)
let leafVtx = block: # Newly inserted leaf node
let local = db.vidFetch()
branch.bVid[path[n]] = local
db.layersPutLeaf((root, local), path.slice(n + 1), payload)
# Put the branch at the vid where the leaf was
db.layersPutVtx((root, cur), branch)
# We need to return vtx here because its pfx member hasn't yet been
# sliced off and is therefore shared with the hike
(leafVtx, vtx, other)
resetKeys()
return ok(res)
of Branch:
if vtx.pfx.len == n:
# The existing branch is a prefix of the new entry
let
nibble = path[vtx.pfx.len]
next = vtx.bVid[nibble]
if next.isValid:
cur = next
path = path.slice(n + 1)
vtx =
if leaf.isSome and leaf[].isValid and leaf[].pfx == path:
leaf[]
else:
(?db.getVtxRc((root, next)))[0]
else:
# There's no vertex at the branch point - insert the payload as a new
# leaf and update the existing branch
let
local = db.vidFetch()
leafVtx = db.layersPutLeaf((root, local), path.slice(n + 1), payload)
brDup = vtx.dup()
brDup.bVid[nibble] = local
db.layersPutVtx((root, cur), brDup)
resetKeys()
return ok((leafVtx, nil, nil))
else:
# Partial path match - we need to split the existing branch at
# the point of divergence, inserting a new branch
let branch = VertexRef(vType: Branch, pfx: path.slice(0, n))
block: # Copy the existing vertex and add it to the new branch
let local = db.vidFetch()
branch.bVid[vtx.pfx[n]] = local
db.layersPutVtx(
(root, local),
VertexRef(vType: Branch, pfx: vtx.pfx.slice(n + 1), bVid: vtx.bVid),
)
let leafVtx = block: # add the new entry
let local = db.vidFetch()
branch.bVid[path[n]] = local
db.layersPutLeaf((root, local), path.slice(n + 1), payload)
db.layersPutVtx((root, cur), branch)
resetKeys()
return ok((leafVtx, nil, nil))
err(MergeHikeFailed)
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
Update storage tree admin (#2419) * Tighten `CoreDb` API for accounts why: Apart from cruft, the way to fetch the accounts state root via a `CoreDbColRef` record was unnecessarily complicated. * Extend `CoreDb` API for accounts to cover storage tries why: In future, this will make the notion of column objects obsolete. Storage trees will then be indexed by the account address rather than the vertex ID equivalent like a `CoreDbColRef`. * Apply new/extended accounts API to ledger and tests details: This makes the `distinct_ledger` module obsolete * Remove column object constructors why: They were needed as an abstraction of MPT sub-trees including storage trees. Now, storage trees are handled by the account (e.g. via address) they belong to and all other trees can be identified by a constant well known vertex ID. So there is no need for column objects anymore. Still there are some left-over column object methods wnich will be removed next. * Remove `serialise()` and `PayloadRef` from default Aristo API why: Not needed. `PayloadRef` was used for unstructured/unknown payload formats (account or blob) and `serialise()` was used for decodng `PayloadRef`. Now it is known in advance what the payload looks like. * Added query function `hasStorageData()` whether a storage area exists why: Useful for supporting `slotStateEmpty()` of the `CoreDb` API * In the `Ledger` replace `storage.stateEmpty()` by `slotStateEmpty()` * On Aristo, hide the storage root/vertex ID in the `PayloadRef` why: The storage vertex ID is fully controlled by Aristo while the `AristoAccount` object is controlled by the application. With the storage root part of the `AristoAccount` object, there was a useless administrative burden to keep that storage root field up to date. * Remove cruft, update comments etc. * Update changed MPT access paradigms why: Fixes verified proxy tests * Fluffy cosmetics
2024-06-27 09:01:26 +00:00
proc mergeAccountRecord*(
db: AristoDbRef; # Database, top layer
accPath: Hash256; # Even nibbled byte path
Update storage tree admin (#2419) * Tighten `CoreDb` API for accounts why: Apart from cruft, the way to fetch the accounts state root via a `CoreDbColRef` record was unnecessarily complicated. * Extend `CoreDb` API for accounts to cover storage tries why: In future, this will make the notion of column objects obsolete. Storage trees will then be indexed by the account address rather than the vertex ID equivalent like a `CoreDbColRef`. * Apply new/extended accounts API to ledger and tests details: This makes the `distinct_ledger` module obsolete * Remove column object constructors why: They were needed as an abstraction of MPT sub-trees including storage trees. Now, storage trees are handled by the account (e.g. via address) they belong to and all other trees can be identified by a constant well known vertex ID. So there is no need for column objects anymore. Still there are some left-over column object methods wnich will be removed next. * Remove `serialise()` and `PayloadRef` from default Aristo API why: Not needed. `PayloadRef` was used for unstructured/unknown payload formats (account or blob) and `serialise()` was used for decodng `PayloadRef`. Now it is known in advance what the payload looks like. * Added query function `hasStorageData()` whether a storage area exists why: Useful for supporting `slotStateEmpty()` of the `CoreDb` API * In the `Ledger` replace `storage.stateEmpty()` by `slotStateEmpty()` * On Aristo, hide the storage root/vertex ID in the `PayloadRef` why: The storage vertex ID is fully controlled by Aristo while the `AristoAccount` object is controlled by the application. With the storage root part of the `AristoAccount` object, there was a useless administrative burden to keep that storage root field up to date. * Remove cruft, update comments etc. * Update changed MPT access paradigms why: Fixes verified proxy tests * Fluffy cosmetics
2024-06-27 09:01:26 +00:00
accRec: AristoAccount; # Account data
): Result[bool,AristoError] =
## Merge the key-value-pair argument `(accKey,accRec)` as an account
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
##
## On success, the function returns `true` if the `accRec` argument was
## not on the database already or different from `accRec`, and `false`
## otherwise.
##
let
pyl = LeafPayload(pType: AccountData, account: accRec)
updated = db.mergePayloadImpl(
VertexID(1), accPath.data, db.cachedAccLeaf(accPath), pyl).valueOr:
if error == MergeNoAction:
return ok false
return err(error)
# Update leaf cache both of the merged value and potentially the displaced
# leaf resulting from splitting a leaf into a branch with two leaves
db.layersPutAccLeaf(accPath, updated[0])
if updated[1].isValid:
let otherPath = Hash256(data: getBytes(
NibblesBuf.fromBytes(accPath.data).replaceSuffix(updated[1].pfx)))
db.layersPutAccLeaf(otherPath, updated[2])
ok true
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
proc mergeGenericData*(
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
db: AristoDbRef; # Database, top layer
root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
data: openArray[byte]; # Raw data payload value
Aristo db api extensions for use as core db backend (#1754) * Update docu * Update Aristo/Kvt constructor prototype why: Previous version used an `enum` value to indicate what backend is to be used. This was replaced by using the backend object type. * Rewrite `hikeUp()` return code into `Result[Hike,(Hike,AristoError)]` why: Better code maintenance. Previously, the `Hike` object was returned. It had an internal error field so partial success was also available on a failure. This error field has been removed. * Use `openArray[byte]` rather than `Blob` in functions prototypes * Provide synchronised multi instance transactions why: The `CoreDB` object was geared towards the legacy DB which used a single transaction for the key-value backend DB. Different state roots are provided by the backend database, so all instances work directly on the same backend. Aristo db instances have different in-memory mappings (aka different state roots) and the transactions are on top of there mappings. So each instance might run different transactions. Multi instance transactions are a compromise to converge towards the legacy behaviour. The synchronised transactions span over all instances available at the time when base transaction was opened. Instances created later are unaffected. * Provide key-value pair database iterator why: Needed in `CoreDB` for `replicate()` emulation also: Some update of internal code * Extend API (i.e. prototype variants) why: Needed for `CoreDB` geared towards the legacy backend which has a more basic API than Aristo.
2023-09-15 15:23:53 +00:00
): Result[bool,AristoError] =
## Variant of `mergeXXX()` for generic sub-trees, i.e. for arguments
## `root` greater than `VertexID(1)` and smaller than `LEAST_FREE_VID`.
##
## On success, the function returns `true` if the `data` argument was merged
## into the database ot updated, and `false` if it was on the database
## already.
##
# Verify that `root` is neither an accounts tree nor a strorage tree.
if not root.isValid:
return err(MergeRootVidMissing)
elif root == VertexID(1):
return err(MergeAccRootNotAccepted)
elif LEAST_FREE_VID <= root.distinctBase:
return err(MergeStoRootNotAccepted)
let
pyl = LeafPayload(pType: RawData, rawBlob: @data)
discard db.mergePayloadImpl(root, path, Opt.none(VertexRef), pyl).valueOr:
if error == MergeNoAction:
return ok false
return err error
ok true
proc mergeStorageData*(
db: AristoDbRef; # Database, top layer
accPath: Hash256; # Needed for accounts payload
stoPath: Hash256; # Storage data path (aka key)
stoData: UInt256; # Storage data payload value
): Result[void,AristoError] =
## Store the `stoData` data argument on the storage area addressed by
## `(accPath,stoPath)` where `accPath` is the account key (into the MPT)
## and `stoPath` is the slot path of the corresponding storage area.
##
var accHike: Hike
db.fetchAccountHike(accPath,accHike).isOkOr:
return err(MergeStoAccMissing)
let
stoID = accHike.legs[^1].wp.vtx.lData.stoID
# Provide new storage ID when needed
useID =
if stoID.isValid: stoID # Use as is
elif stoID.vid.isValid: (true, stoID.vid) # Re-use previous vid
else: (true, db.vidFetch()) # Create new vid
mixPath = mixUp(accPath, stoPath)
# Call merge
pyl = LeafPayload(pType: StoData, stoData: stoData)
updated = db.mergePayloadImpl(
useID.vid, stoPath.data, db.cachedStoLeaf(mixPath), pyl).valueOr:
if error == MergeNoAction:
assert stoID.isValid # debugging only
return ok()
return err(error)
# Mark account path Merkle keys for update
db.layersResKeys(accHike)
# Update leaf cache both of the merged value and potentially the displaced
# leaf resulting from splitting a leaf into a branch with two leaves
db.layersPutStoLeaf(mixPath, updated[0])
if updated[1].isValid:
let otherPath = Hash256(data: getBytes(
NibblesBuf.fromBytes(stoPath.data).replaceSuffix(updated[1].pfx)))
db.layersPutStoLeaf(mixUp(accPath, otherPath), updated[2])
if not stoID.isValid:
# Make sure that there is an account that refers to that storage trie
let leaf = accHike.legs[^1].wp.vtx.dup # Dup on modify
leaf.lData.stoID = useID
db.layersPutAccLeaf(accPath, leaf)
db.layersPutVtx((VertexID(1), accHike.legs[^1].wp.vid), leaf)
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------