2023-05-30 11:47:47 +00:00
|
|
|
# nimbus-eth1
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-05-30 11:47:47 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
## Aristo DB -- Patricia Trie builder, raw node insertion
|
|
|
|
## ======================================================
|
|
|
|
##
|
2023-10-27 21:36:51 +00:00
|
|
|
## This module merges `PathID` values as hexary lookup paths into the
|
2023-05-30 21:21:15 +00:00
|
|
|
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
|
|
|
|
## associated (but separated) Merkle hashes will be deleted unless locked.
|
|
|
|
## Instead of deleting locked hashes error handling is applied.
|
|
|
|
##
|
|
|
|
## Also, nodes (vertices plus merkle hashes) can be added which is needed for
|
|
|
|
## boundary proofing after `snap/1` download. The vertices are split from the
|
|
|
|
## nodes and stored as-is on the table holding `Patricia Trie` entries. The
|
|
|
|
## hashes are stored iin a separate table and the vertices are labelled
|
|
|
|
## `locked`.
|
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2024-02-01 21:27:48 +00:00
|
|
|
std/[algorithm, sequtils, strutils, sets, tables, typetraits],
|
2023-05-30 11:47:47 +00:00
|
|
|
eth/[common, trie/nibbles],
|
2023-09-12 18:45:12 +00:00
|
|
|
results,
|
2023-11-08 12:18:32 +00:00
|
|
|
stew/keyed_queue,
|
2023-09-18 20:20:28 +00:00
|
|
|
../../sync/protocol/snap/snap_types,
|
2024-02-22 08:24:58 +00:00
|
|
|
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers,
|
|
|
|
aristo_path, aristo_serialise, aristo_utils, aristo_vid]
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
type
|
2023-06-12 13:48:47 +00:00
|
|
|
LeafTiePayload* = object
|
2023-06-09 11:17:37 +00:00
|
|
|
## Generalised key-value pair for a sub-trie. The main trie is the
|
|
|
|
## sub-trie with `root=VertexID(1)`.
|
2023-06-12 13:48:47 +00:00
|
|
|
leafTie*: LeafTie ## Full `Patricia Trie` path root-to-leaf
|
2023-05-30 21:21:15 +00:00
|
|
|
payload*: PayloadRef ## Leaf data payload
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private getters & setters
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc xPfx(vtx: VertexRef): NibblesSeq =
|
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
|
|
|
return vtx.lPfx
|
|
|
|
of Extension:
|
|
|
|
return vtx.ePfx
|
|
|
|
of Branch:
|
|
|
|
doAssert vtx.vType != Branch # Ooops
|
|
|
|
|
2023-06-02 19:21:46 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
proc to(
|
|
|
|
rc: Result[Hike,AristoError];
|
|
|
|
T: type Result[bool,AristoError];
|
|
|
|
): T =
|
|
|
|
## Return code converter
|
|
|
|
if rc.isOk:
|
|
|
|
ok true
|
2023-12-04 20:39:26 +00:00
|
|
|
elif rc.error in {MergeLeafPathCachedAlready,
|
|
|
|
MergeLeafPathOnBackendAlready}:
|
2023-09-15 15:23:53 +00:00
|
|
|
ok false
|
|
|
|
else:
|
|
|
|
err(rc.error)
|
|
|
|
|
2024-04-03 15:48:35 +00:00
|
|
|
|
|
|
|
proc differ(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
p1, p2: PayloadRef; # Payload values
|
|
|
|
): bool =
|
|
|
|
## Check whether payloads differ on the database.
|
|
|
|
## If `p1` is `RLP` serialised and `p2` is a raw blob compare serialsations.
|
|
|
|
## If `p1` is of account type and `p2` is serialised, translate `p2`
|
|
|
|
## to an account type and compare.
|
|
|
|
##
|
|
|
|
if p1 == p2:
|
|
|
|
return false
|
|
|
|
|
|
|
|
# Adjust abd check for divergent types.
|
|
|
|
if p1.pType != p2.pType:
|
|
|
|
if p1.pType == AccountData:
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
blob = (if p2.pType == RlpData: p2.rlpBlob else: p2.rawBlob)
|
|
|
|
acc = rlp.decode(blob, Account)
|
|
|
|
if acc.nonce == p1.account.nonce and
|
|
|
|
acc.balance == p1.account.balance and
|
|
|
|
acc.codeHash == p1.account.codeHash and
|
|
|
|
acc.storageRoot.isValid == p1.account.storageID.isValid:
|
|
|
|
if not p1.account.storageID.isValid or
|
|
|
|
acc.storageRoot.to(HashKey) == db.getKey p1.account.storageID:
|
|
|
|
return false
|
|
|
|
except RlpError:
|
|
|
|
discard
|
|
|
|
|
|
|
|
elif p1.pType == RlpData:
|
|
|
|
if p2.pType == RawData and p1.rlpBlob == p2.rawBlob:
|
|
|
|
return false
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
# -----------
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc clearMerkleKeys(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
hike: Hike; # Implied vertex IDs to clear hashes for
|
|
|
|
vid: VertexID; # Additionall vertex IDs to clear
|
|
|
|
) =
|
2023-12-12 17:47:41 +00:00
|
|
|
for w in hike.legs.mapIt(it.wp.vid) & @[vid]:
|
2024-02-22 08:24:58 +00:00
|
|
|
db.layersResKey(hike.root, w)
|
2023-12-12 17:47:41 +00:00
|
|
|
|
|
|
|
proc setVtxAndKey(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
2024-02-22 08:24:58 +00:00
|
|
|
root: VertexID;
|
2023-12-12 17:47:41 +00:00
|
|
|
vid: VertexID; # Vertex IDs to add/clear
|
|
|
|
vtx: VertexRef; # Vertex to add
|
|
|
|
) =
|
2024-02-22 08:24:58 +00:00
|
|
|
db.layersPutVtx(root, vid, vtx)
|
|
|
|
db.layersResKey(root, vid)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# -----------
|
|
|
|
|
|
|
|
proc insertBranch(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-06-09 11:17:37 +00:00
|
|
|
hike: Hike; # Current state
|
|
|
|
linkID: VertexID; # Vertex ID to insert
|
|
|
|
linkVtx: VertexRef; # Vertex to insert
|
2023-05-30 11:47:47 +00:00
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
##
|
|
|
|
## Insert `Extension->Branch` vertex chain or just a `Branch` vertex
|
|
|
|
##
|
2023-05-30 21:21:15 +00:00
|
|
|
## ... --(linkID)--> <linkVtx>
|
|
|
|
##
|
|
|
|
## <-- immutable --> <---- mutable ----> ..
|
2023-05-30 11:47:47 +00:00
|
|
|
##
|
|
|
|
## will become either
|
|
|
|
##
|
|
|
|
## --(linkID)-->
|
|
|
|
## <extVtx> --(local1)-->
|
2023-05-30 21:21:15 +00:00
|
|
|
## <forkVtx>[linkInx] --(local2)--> <linkVtx*>
|
2023-05-30 11:47:47 +00:00
|
|
|
## [leafInx] --(local3)--> <leafVtx>
|
|
|
|
##
|
|
|
|
## or in case that there is no common prefix
|
|
|
|
##
|
|
|
|
## --(linkID)-->
|
2023-05-30 21:21:15 +00:00
|
|
|
## <forkVtx>[linkInx] --(local2)--> <linkVtx*>
|
2023-05-30 11:47:47 +00:00
|
|
|
## [leafInx] --(local3)--> <leafVtx>
|
|
|
|
##
|
2023-05-30 21:21:15 +00:00
|
|
|
## *) vertex was slightly modified or removed if obsolete `Extension`
|
|
|
|
##
|
2023-05-30 11:47:47 +00:00
|
|
|
let n = linkVtx.xPfx.sharedPrefixLen hike.tail
|
|
|
|
|
|
|
|
# Verify minimum requirements
|
|
|
|
if hike.tail.len == n:
|
|
|
|
# Should have been tackeld by `hikeUp()`, already
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeLeafGarbledHike)
|
2023-05-30 11:47:47 +00:00
|
|
|
if linkVtx.xPfx.len == n:
|
2023-12-04 20:39:26 +00:00
|
|
|
return err(MergeBranchLinkVtxPfxTooShort)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# Provide and install `forkVtx`
|
|
|
|
let
|
|
|
|
forkVtx = VertexRef(vType: Branch)
|
|
|
|
linkInx = linkVtx.xPfx[n]
|
|
|
|
leafInx = hike.tail[n]
|
|
|
|
var
|
|
|
|
leafLeg = Leg(nibble: -1)
|
|
|
|
|
|
|
|
# Install `forkVtx`
|
|
|
|
block:
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode.
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
db.clearMerkleKeys(hike, linkID)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif linkID in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeNonBranchProofModeLock)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
if linkVtx.vType == Leaf:
|
2024-05-03 17:38:17 +00:00
|
|
|
# Double check path prefix
|
|
|
|
if 64 < hike.legsTo(NibblesSeq).len + linkVtx.lPfx.len:
|
2023-12-04 20:39:26 +00:00
|
|
|
return err(MergeBranchLinkLeafGarbled)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2024-05-03 17:38:17 +00:00
|
|
|
let
|
|
|
|
local = db.vidFetch(pristine = true)
|
|
|
|
linkDup = linkVtx.dup
|
|
|
|
db.setVtxAndKey(hike.root, local, linkDup)
|
|
|
|
linkDup.lPfx = linkDup.lPfx.slice(1+n)
|
2023-05-30 21:21:15 +00:00
|
|
|
forkVtx.bVid[linkInx] = local
|
|
|
|
|
|
|
|
elif linkVtx.ePfx.len == n + 1:
|
|
|
|
# This extension `linkVtx` becomes obsolete
|
|
|
|
forkVtx.bVid[linkInx] = linkVtx.eVid
|
|
|
|
|
|
|
|
else:
|
2024-05-03 17:38:17 +00:00
|
|
|
let
|
|
|
|
local = db.vidFetch
|
|
|
|
linkDup = linkVtx.dup
|
|
|
|
db.setVtxAndKey(hike.root, local, linkDup)
|
|
|
|
linkDup.ePfx = linkDup.ePfx.slice(1+n)
|
2023-05-30 21:21:15 +00:00
|
|
|
forkVtx.bVid[linkInx] = local
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
block:
|
2023-06-30 22:22:33 +00:00
|
|
|
let local = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
forkVtx.bVid[leafInx] = local
|
|
|
|
leafLeg.wp.vid = local
|
|
|
|
leafLeg.wp.vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1+n),
|
|
|
|
lData: payload)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, local, leafLeg.wp.vtx)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# Update branch leg, ready to append more legs
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# Update in-beween glue linking `branch --[..]--> forkVtx`
|
|
|
|
if 0 < n:
|
|
|
|
let extVtx = VertexRef(
|
|
|
|
vType: Extension,
|
|
|
|
ePfx: hike.tail.slice(0,n),
|
|
|
|
eVid: db.vidFetch)
|
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, linkID, extVtx)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(
|
2023-05-30 11:47:47 +00:00
|
|
|
nibble: -1,
|
|
|
|
wp: VidVtxPair(
|
|
|
|
vid: linkID,
|
|
|
|
vtx: extVtx))
|
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, extVtx.eVid, forkVtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(
|
2023-05-30 11:47:47 +00:00
|
|
|
nibble: leafInx.int8,
|
|
|
|
wp: VidVtxPair(
|
|
|
|
vid: extVtx.eVid,
|
|
|
|
vtx: forkVtx))
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
else:
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, linkID, forkVtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(
|
2023-05-30 11:47:47 +00:00
|
|
|
nibble: leafInx.int8,
|
|
|
|
wp: VidVtxPair(
|
|
|
|
vid: linkID,
|
|
|
|
vtx: forkVtx))
|
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add leafLeg
|
|
|
|
ok okHike
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc concatBranchAndLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # Path top has a `Branch` vertex
|
2023-05-30 21:21:15 +00:00
|
|
|
brVid: VertexID; # Branch vertex ID from from `Hike` top
|
2023-05-30 11:47:47 +00:00
|
|
|
brVtx: VertexRef; # Branch vertex, linked to from `Hike`
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append argument branch vertex passed as argument `(brID,brVtx)` and then
|
|
|
|
## a `Leaf` vertex derived from the argument `payload`.
|
2023-05-30 21:21:15 +00:00
|
|
|
##
|
2023-05-30 11:47:47 +00:00
|
|
|
if hike.tail.len == 0:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchGarbledTail)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let nibble = hike.tail[0].int8
|
2023-06-12 13:48:47 +00:00
|
|
|
if brVtx.bVid[nibble].isValid:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeRootBranchLinkBusy)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode.
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
db.clearMerkleKeys(hike, brVid)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif brVid in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchProofModeLock) # Ooops
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Append branch vertex
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Append leaf vertex
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2024-05-03 17:38:17 +00:00
|
|
|
brDup = brVtx.dup
|
2023-06-30 22:22:33 +00:00
|
|
|
vid = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1),
|
|
|
|
lData: payload)
|
2024-05-03 17:38:17 +00:00
|
|
|
brDup.bVid[nibble] = vid
|
|
|
|
db.setVtxAndKey(hike.root, brVid, brDup)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, vid, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
|
|
|
|
|
|
|
ok okHike
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
2023-06-12 13:48:47 +00:00
|
|
|
# Private functions: add Particia Trie leaf vertex
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc topIsBranchAddLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # Path top has a `Branch` vertex
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append a `Leaf` vertex derived from the argument `payload` after the top
|
|
|
|
## leg of the `hike` argument which is assumend to refert to a `Branch`
|
|
|
|
## vertex. If successful, the function returns the updated `hike` trail.
|
|
|
|
if hike.tail.len == 0:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchGarbledTail)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
let nibble = hike.legs[^1].nibble
|
|
|
|
if nibble < 0:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchGarbledNibble)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
let
|
2024-02-22 08:24:58 +00:00
|
|
|
parent = hike.legs[^1].wp.vid
|
2023-05-30 11:47:47 +00:00
|
|
|
branch = hike.legs[^1].wp.vtx
|
|
|
|
linkID = branch.bVid[nibble]
|
2023-05-30 21:21:15 +00:00
|
|
|
linkVtx = db.getVtx linkID
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
if not linkVtx.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
#
|
|
|
|
# .. <branch>[nibble] --(linkID)--> nil
|
|
|
|
#
|
|
|
|
# <-------- immutable ------------> <---- mutable ----> ..
|
|
|
|
#
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
# Not much else that can be done here
|
2024-05-03 17:38:17 +00:00
|
|
|
raiseAssert "Dangling edge:" &
|
|
|
|
" pfx=" & $hike.legsTo(hike.legs.len-1,NibblesSeq) &
|
|
|
|
" branch=" & $parent &
|
|
|
|
" nibble=" & $nibble &
|
|
|
|
" edge=" & $linkID &
|
|
|
|
" tail=" & $hike.tail
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Reuse placeholder entry in table
|
|
|
|
let vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail,
|
|
|
|
lData: payload)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, linkID, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vid: linkID, vtx: vtx), nibble: -1)
|
2024-02-22 08:24:58 +00:00
|
|
|
if parent notin db.pPrf:
|
|
|
|
db.layersResKey(hike.root, parent)
|
2023-09-15 15:23:53 +00:00
|
|
|
return ok(okHike)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
if linkVtx.vType == Branch:
|
2023-05-30 21:21:15 +00:00
|
|
|
# Slot link to a branch vertex should be handled by `hikeUp()`
|
|
|
|
#
|
|
|
|
# .. <branch>[nibble] --(linkID)--> <linkVtx>[]
|
|
|
|
#
|
|
|
|
# <-------- immutable ------------> <---- mutable ----> ..
|
|
|
|
#
|
|
|
|
return db.concatBranchAndLeaf(hike, linkID, linkVtx, payload)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
db.insertBranch(hike, linkID, linkVtx, payload)
|
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc topIsExtAddLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # Path top has an `Extension` vertex
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append a `Leaf` vertex derived from the argument `payload` after the top
|
|
|
|
## leg of the `hike` argument which is assumend to refert to a `Extension`
|
|
|
|
## vertex. If successful, the function returns the
|
|
|
|
## updated `hike` trail.
|
|
|
|
let
|
2023-05-30 21:21:15 +00:00
|
|
|
extVtx = hike.legs[^1].wp.vtx
|
|
|
|
extVid = hike.legs[^1].wp.vid
|
|
|
|
brVid = extVtx.eVid
|
|
|
|
brVtx = db.getVtx brVid
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
if not brVtx.isValid:
|
2023-06-12 18:16:03 +00:00
|
|
|
# Blind vertex, promote to leaf vertex.
|
2023-05-30 21:21:15 +00:00
|
|
|
#
|
|
|
|
# --(extVid)--> <extVtx> --(brVid)--> nil
|
|
|
|
#
|
|
|
|
# <-------- immutable -------------->
|
|
|
|
#
|
2023-08-11 17:23:57 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
2023-05-30 21:21:15 +00:00
|
|
|
lPfx: extVtx.ePfx & hike.tail,
|
2023-05-30 11:47:47 +00:00
|
|
|
lData: payload)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, extVid, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs[^1].wp.vtx = vtx
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
elif brVtx.vType != Branch:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchRootExpected)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
else:
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
|
|
|
nibble = hike.tail[0].int8
|
|
|
|
linkID = brVtx.bVid[nibble]
|
|
|
|
#
|
|
|
|
# Required
|
|
|
|
#
|
|
|
|
# --(extVid)--> <extVtx> --(brVid)--> <brVtx>[nibble] --(linkID)--> nil
|
|
|
|
#
|
|
|
|
# <-------- immutable --------------> <-------- mutable ----------> ..
|
|
|
|
#
|
2023-06-12 13:48:47 +00:00
|
|
|
if linkID.isValid:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeRootBranchLinkBusy)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
db.clearMerkleKeys(hike, brVid)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif brVid in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchProofModeLock)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2024-05-03 17:38:17 +00:00
|
|
|
brDup = brVtx.dup
|
2023-06-30 22:22:33 +00:00
|
|
|
vid = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1),
|
|
|
|
lData: payload)
|
2024-05-03 17:38:17 +00:00
|
|
|
brDup.bVid[nibble] = vid
|
|
|
|
db.setVtxAndKey(hike.root, brVid, brDup)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, vid, vtx)
|
2024-05-03 17:38:17 +00:00
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: brDup, vid: brVid), nibble: nibble)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
|
|
|
|
|
|
|
ok okHike
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc topIsEmptyAddLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # No path legs
|
|
|
|
rootVtx: VertexRef; # Root vertex
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append a `Leaf` vertex derived from the argument `payload` after the
|
|
|
|
## argument vertex `rootVtx` and append both the empty arguent `hike`.
|
|
|
|
if rootVtx.vType == Branch:
|
|
|
|
let nibble = hike.tail[0].int8
|
2023-06-12 13:48:47 +00:00
|
|
|
if rootVtx.bVid[nibble].isValid:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeRootBranchLinkBusy)
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-06-02 10:04:29 +00:00
|
|
|
db.clearMerkleKeys(hike, hike.root)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif hike.root in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchProofModeLock)
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2024-05-03 17:38:17 +00:00
|
|
|
rootDup = rootVtx.dup
|
2023-06-30 22:22:33 +00:00
|
|
|
leafVid = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
leafVtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1),
|
|
|
|
lData: payload)
|
2024-05-03 17:38:17 +00:00
|
|
|
rootDup.bVid[nibble] = leafVid
|
|
|
|
db.setVtxAndKey(hike.root, hike.root, rootDup)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, leafVid, leafVtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
return ok Hike(
|
2023-05-30 11:47:47 +00:00
|
|
|
root: hike.root,
|
2024-05-03 17:38:17 +00:00
|
|
|
legs: @[Leg(wp: VidVtxPair(vtx: rootDup, vid: hike.root), nibble: nibble),
|
2023-05-30 11:47:47 +00:00
|
|
|
Leg(wp: VidVtxPair(vtx: leafVtx, vid: leafVid), nibble: -1)])
|
|
|
|
|
|
|
|
db.insertBranch(hike, hike.root, rootVtx, payload)
|
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
|
|
|
|
proc updatePayload(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-06-22 11:13:24 +00:00
|
|
|
hike: Hike; # No path legs
|
2023-08-07 17:45:23 +00:00
|
|
|
leafTie: LeafTie; # Leaf item to add to the database
|
2024-05-30 17:48:38 +00:00
|
|
|
payload: PayloadRef; # Payload value to add
|
2023-11-08 12:18:32 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-06-22 11:13:24 +00:00
|
|
|
## Update leaf vertex if payloads differ
|
2023-12-04 20:39:26 +00:00
|
|
|
let leafLeg = hike.legs[^1]
|
2023-06-22 11:13:24 +00:00
|
|
|
|
|
|
|
# Update payloads if they differ
|
2024-04-03 15:48:35 +00:00
|
|
|
if db.differ(leafLeg.wp.vtx.lData, payload):
|
|
|
|
let vid = leafLeg.wp.vid
|
|
|
|
if vid in db.pPrf:
|
|
|
|
return err(MergeLeafProofModeLock)
|
2023-06-22 11:13:24 +00:00
|
|
|
|
2024-05-30 17:48:38 +00:00
|
|
|
# Verify that the account leaf can be replaced
|
|
|
|
if leafTie.root == VertexID(1):
|
|
|
|
if leafLeg.wp.vtx.lData.pType != payload.pType:
|
|
|
|
return err(MergeLeafCantChangePayloadType)
|
|
|
|
if payload.pType == AccountData and
|
|
|
|
payload.account.storageID != leafLeg.wp.vtx.lData.account.storageID:
|
|
|
|
return err(MergeLeafCantChangeStorageID)
|
|
|
|
|
2023-12-04 20:39:26 +00:00
|
|
|
# Update vertex and hike
|
2024-04-03 15:48:35 +00:00
|
|
|
let vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: leafLeg.wp.vtx.lPfx,
|
|
|
|
lData: payload)
|
2023-12-04 20:39:26 +00:00
|
|
|
var hike = hike
|
|
|
|
hike.legs[^1].wp.vtx = vtx
|
2023-08-11 17:23:57 +00:00
|
|
|
|
2023-12-04 20:39:26 +00:00
|
|
|
# Modify top level cache
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, vid, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
db.clearMerkleKeys(hike, vid)
|
2023-12-04 20:39:26 +00:00
|
|
|
ok hike
|
2023-09-15 15:23:53 +00:00
|
|
|
|
2023-12-19 12:39:23 +00:00
|
|
|
elif db.layersGetVtx(leafLeg.wp.vid).isErr:
|
2023-12-04 20:39:26 +00:00
|
|
|
err(MergeLeafPathOnBackendAlready)
|
|
|
|
|
|
|
|
else:
|
|
|
|
err(MergeLeafPathCachedAlready)
|
2023-06-22 11:13:24 +00:00
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: add Merkle proof node
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc mergeNodeImpl(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-11-08 12:18:32 +00:00
|
|
|
hashKey: HashKey; # Merkel hash of node (or so)
|
2023-06-12 13:48:47 +00:00
|
|
|
node: NodeRef; # Node derived from RLP representation
|
|
|
|
rootVid: VertexID; # Current sub-trie
|
2023-11-08 12:18:32 +00:00
|
|
|
): Result[void,AristoError] =
|
|
|
|
## The function merges the argument hash key `lid` as expanded from the
|
2023-06-12 18:16:03 +00:00
|
|
|
## node RLP representation into the `Aristo Trie` database. The vertex is
|
|
|
|
## split off from the node and stored separately. So are the Merkle hashes.
|
|
|
|
## The vertex is labelled `locked`.
|
2023-06-12 13:48:47 +00:00
|
|
|
##
|
|
|
|
## The `node` argument is *not* checked, whether the vertex IDs have been
|
|
|
|
## allocated, already. If the node comes straight from the `decode()` RLP
|
|
|
|
## decoder as expected, these vertex IDs will be all zero.
|
|
|
|
##
|
2024-02-22 08:24:58 +00:00
|
|
|
## This function expects that the parent for the argument `node` has already
|
|
|
|
## been installed.
|
2023-11-08 12:18:32 +00:00
|
|
|
##
|
2024-02-22 08:24:58 +00:00
|
|
|
## Caveat:
|
|
|
|
## Proof of concept, not in production yet.
|
2023-11-08 12:18:32 +00:00
|
|
|
##
|
2023-12-04 20:39:26 +00:00
|
|
|
# Check for error after RLP decoding
|
2023-11-08 12:18:32 +00:00
|
|
|
doAssert node.error == AristoError(0)
|
2024-02-22 08:24:58 +00:00
|
|
|
|
|
|
|
# Verify arguments
|
2023-06-12 13:48:47 +00:00
|
|
|
if not rootVid.isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeRootKeyInvalid)
|
2023-06-12 18:16:03 +00:00
|
|
|
if not hashKey.isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeHashKeyInvalid)
|
2023-06-12 13:48:47 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
# Make sure that the `vid<->key` reverse mapping is updated.
|
|
|
|
let vid = db.layerGetProofVidOrVoid hashKey
|
|
|
|
if not vid.isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeRevVidMustHaveBeenCached)
|
2024-02-22 08:24:58 +00:00
|
|
|
|
|
|
|
# Use the vertex ID `vid` to be populated by the argument root node
|
|
|
|
let key = db.layersGetKeyOrVoid vid
|
|
|
|
if key.isValid and key != hashKey:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeHashKeyDiffersFromCached)
|
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
# Set up vertex.
|
|
|
|
let (vtx, newVtxFromNode) = block:
|
|
|
|
let vty = db.getVtx vid
|
2023-06-20 13:26:25 +00:00
|
|
|
if vty.isValid:
|
2024-02-22 08:24:58 +00:00
|
|
|
(vty, false)
|
2023-06-20 13:26:25 +00:00
|
|
|
else:
|
2024-02-22 08:24:58 +00:00
|
|
|
(node.to(VertexRef), true)
|
2023-11-08 12:18:32 +00:00
|
|
|
|
2024-02-14 19:11:59 +00:00
|
|
|
# The `vertexID <-> hashKey` mappings need to be set up now (if any)
|
2023-06-12 13:48:47 +00:00
|
|
|
case node.vType:
|
|
|
|
of Leaf:
|
2024-04-03 15:48:35 +00:00
|
|
|
# Check whether there is need to convert the payload to `Account` payload
|
|
|
|
if rootVid == VertexID(1) and newVtxFromNode:
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
# `aristo_serialise.read()` always decodes raw data payloaf
|
|
|
|
acc = rlp.decode(node.lData.rawBlob, Account)
|
|
|
|
pyl = PayloadRef(
|
|
|
|
pType: AccountData,
|
|
|
|
account: AristoAccount(
|
|
|
|
nonce: acc.nonce,
|
|
|
|
balance: acc.balance,
|
|
|
|
codeHash: acc.codeHash))
|
|
|
|
if acc.storageRoot.isValid:
|
|
|
|
var sid = db.layerGetProofVidOrVoid acc.storageRoot.to(HashKey)
|
|
|
|
if not sid.isValid:
|
|
|
|
sid = db.vidFetch
|
|
|
|
db.layersPutProof(sid, acc.storageRoot.to(HashKey))
|
|
|
|
pyl.account.storageID = sid
|
|
|
|
vtx.lData = pyl
|
|
|
|
except RlpError:
|
|
|
|
return err(MergeNodeAccountPayloadError)
|
2023-06-12 13:48:47 +00:00
|
|
|
of Extension:
|
|
|
|
if node.key[0].isValid:
|
2024-02-14 19:11:59 +00:00
|
|
|
let eKey = node.key[0]
|
2024-02-22 08:24:58 +00:00
|
|
|
if newVtxFromNode:
|
2024-04-03 15:48:35 +00:00
|
|
|
vtx.eVid = db.layerGetProofVidOrVoid eKey
|
|
|
|
if not vtx.eVid.isValid:
|
|
|
|
# Brand new reverse lookup link for this vertex
|
|
|
|
vtx.eVid = db.vidFetch
|
2023-11-08 12:18:32 +00:00
|
|
|
elif not vtx.eVid.isValid:
|
2024-04-03 15:48:35 +00:00
|
|
|
return err(MergeNodeVidMissing)
|
|
|
|
else:
|
|
|
|
let yEke = db.getKey vtx.eVid
|
|
|
|
if yEke.isValid and eKey != yEke:
|
|
|
|
return err(MergeNodeVtxDiffersFromExisting)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.layersPutProof(vtx.eVid, eKey)
|
2023-06-12 13:48:47 +00:00
|
|
|
of Branch:
|
|
|
|
for n in 0..15:
|
|
|
|
if node.key[n].isValid:
|
2024-02-14 19:11:59 +00:00
|
|
|
let bKey = node.key[n]
|
2024-02-22 08:24:58 +00:00
|
|
|
if newVtxFromNode:
|
2024-04-03 15:48:35 +00:00
|
|
|
vtx.bVid[n] = db.layerGetProofVidOrVoid bKey
|
|
|
|
if not vtx.bVid[n].isValid:
|
|
|
|
# Brand new reverse lookup link for this vertex
|
|
|
|
vtx.bVid[n] = db.vidFetch
|
2023-11-08 12:18:32 +00:00
|
|
|
elif not vtx.bVid[n].isValid:
|
2024-04-03 15:48:35 +00:00
|
|
|
return err(MergeNodeVidMissing)
|
|
|
|
else:
|
|
|
|
let yEkb = db.getKey vtx.bVid[n]
|
|
|
|
if yEkb.isValid and yEkb != bKey:
|
|
|
|
return err(MergeNodeVtxDiffersFromExisting)
|
2024-02-22 08:24:58 +00:00
|
|
|
db.layersPutProof(vtx.bVid[n], bKey)
|
2023-11-08 12:18:32 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
# Store and lock vertex
|
|
|
|
db.layersPutProof(vid, key, vtx)
|
2023-11-08 12:18:32 +00:00
|
|
|
ok()
|
2023-06-12 13:48:47 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-02-29 21:10:24 +00:00
|
|
|
proc mergePayload*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-08-07 17:45:23 +00:00
|
|
|
leafTie: LeafTie; # Leaf item to add to the database
|
|
|
|
payload: PayloadRef; # Payload value
|
2024-02-01 21:27:48 +00:00
|
|
|
accPath: PathID; # Needed for accounts payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
|
|
|
## Merge the argument `leafTie` key-value-pair into the top level vertex
|
|
|
|
## table of the database `db`. The field `path` of the `leafTie` argument is
|
2024-05-30 17:48:38 +00:00
|
|
|
## used to address the leaf vertex with the payload. It is stored or updated
|
|
|
|
## on the database accordingly.
|
|
|
|
##
|
|
|
|
## If the `leafTie` argument referes to aa account entrie (i.e. the
|
|
|
|
## `leafTie.root` equals `VertexID(1)`) and the leaf entry has already an
|
|
|
|
## `AccountData` payload, its `storageID` field must be the same as the one
|
|
|
|
## on the database. The `accPath` argument will be ignored.
|
|
|
|
##
|
|
|
|
## Otherwise, if the `root` argument belongs to a well known sub trie (i.e.
|
|
|
|
## it does not exceed `LEAST_FREE_VID`) the `accPath` argument is ignored
|
|
|
|
## and the entry will just be merged.
|
|
|
|
##
|
|
|
|
## Otherwise, a valid `accPath` (i.e. different from `VOID_PATH_ID`.) is
|
|
|
|
## required relating to an account leaf entry (starting at `VertexID(`)`).
|
|
|
|
## If the payload of that leaf entry is not of type `AccountData` it is
|
|
|
|
## ignored.
|
2023-05-30 21:21:15 +00:00
|
|
|
##
|
2024-05-30 17:48:38 +00:00
|
|
|
## Otherwise, if the sub-trie where the `leafTie` is to be merged into does
|
|
|
|
## not exist yes, the `storageID` field of the `accPath` leaf must have been
|
|
|
|
## reset to `storageID(0)` and will be updated accordingly on the database.
|
2024-02-01 21:27:48 +00:00
|
|
|
##
|
2024-05-30 17:48:38 +00:00
|
|
|
## Otherwise its `storageID` field must be equal to the `leafTie.root` vertex
|
|
|
|
## ID. So vertices can be marked for Merkle hash update.
|
|
|
|
##
|
|
|
|
let wp = block:
|
|
|
|
if leafTie.root.distinctBase < LEAST_FREE_VID:
|
|
|
|
if not leafTie.root.isValid:
|
|
|
|
return err(MergeRootMissing)
|
|
|
|
VidVtxPair()
|
|
|
|
else:
|
|
|
|
let rc = db.registerAccount(leafTie.root, accPath)
|
|
|
|
if rc.isErr:
|
|
|
|
return err(rc.error)
|
|
|
|
else:
|
|
|
|
rc.value
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
let hike = leafTie.hikeUp(db).to(Hike)
|
|
|
|
var okHike: Hike
|
|
|
|
if 0 < hike.legs.len:
|
|
|
|
case hike.legs[^1].wp.vtx.vType:
|
|
|
|
of Branch:
|
|
|
|
okHike = ? db.topIsBranchAddLeaf(hike, payload)
|
|
|
|
of Leaf:
|
|
|
|
if 0 < hike.tail.len: # `Leaf` vertex problem?
|
|
|
|
return err(MergeLeafGarbledHike)
|
|
|
|
okHike = ? db.updatePayload(hike, leafTie, payload)
|
|
|
|
of Extension:
|
|
|
|
okHike = ? db.topIsExtAddLeaf(hike, payload)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
else:
|
2023-09-15 15:23:53 +00:00
|
|
|
# Empty hike
|
|
|
|
let rootVtx = db.getVtx hike.root
|
|
|
|
if rootVtx.isValid:
|
|
|
|
okHike = ? db.topIsEmptyAddLeaf(hike,rootVtx, payload)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
else:
|
2023-09-15 15:23:53 +00:00
|
|
|
# Bootstrap for existing root ID
|
|
|
|
let wp = VidVtxPair(
|
|
|
|
vid: hike.root,
|
|
|
|
vtx: VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: leafTie.path.to(NibblesSeq),
|
|
|
|
lData: payload))
|
2024-02-22 08:24:58 +00:00
|
|
|
db.setVtxAndKey(hike.root, wp.vid, wp.vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
# Double check the result until the code is more reliable
|
|
|
|
block:
|
2023-10-27 21:36:51 +00:00
|
|
|
let rc = okHike.to(NibblesSeq).pathToTag
|
|
|
|
if rc.isErr or rc.value != leafTie.path:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeAssemblyFailed) # Ooops
|
|
|
|
|
2024-05-30 17:48:38 +00:00
|
|
|
# Make sure that there is an accounts that refers to that storage trie
|
|
|
|
if wp.vid.isValid and not wp.vtx.lData.account.storageID.isValid:
|
|
|
|
let leaf = wp.vtx.dup # Dup on modify
|
|
|
|
leaf.lData.account.storageID = leafTie.root
|
|
|
|
db.layersPutVtx(VertexID(1), wp.vid, leaf)
|
|
|
|
db.layersResKey(VertexID(1), wp.vid)
|
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
ok okHike
|
|
|
|
|
|
|
|
|
2024-02-29 21:10:24 +00:00
|
|
|
proc mergePayload*(
|
2023-09-15 15:23:53 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
root: VertexID; # MPT state root
|
2023-10-27 21:36:51 +00:00
|
|
|
path: openArray[byte]; # Even nibbled byte path
|
2023-09-15 15:23:53 +00:00
|
|
|
payload: PayloadRef; # Payload value
|
2024-02-29 21:10:24 +00:00
|
|
|
accPath = VOID_PATH_ID; # Needed for accounts payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
|
|
|
## object.
|
2023-11-08 12:18:32 +00:00
|
|
|
let lty = LeafTie(root: root, path: ? path.pathToTag)
|
2024-02-29 21:10:24 +00:00
|
|
|
db.mergePayload(lty, payload, accPath).to(typeof result)
|
2024-02-01 21:27:48 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
|
|
|
|
proc merge*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
root: VertexID; # MPT state root
|
|
|
|
path: openArray[byte]; # Leaf item to add to the database
|
2023-11-08 12:18:32 +00:00
|
|
|
data: openArray[byte]; # Raw data payload value
|
2024-02-01 21:27:48 +00:00
|
|
|
accPath: PathID; # Needed for accounts payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[bool,AristoError] =
|
2023-11-08 12:18:32 +00:00
|
|
|
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
|
2024-02-01 21:27:48 +00:00
|
|
|
## The argument `data` is stored as-is as a `RawData` payload value.
|
|
|
|
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
2024-02-29 21:10:24 +00:00
|
|
|
db.mergePayload(root, path, pyl, accPath)
|
2024-02-01 21:27:48 +00:00
|
|
|
|
2024-02-29 21:10:24 +00:00
|
|
|
proc mergeAccount*(
|
2024-02-01 21:27:48 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
path: openArray[byte]; # Leaf item to add to the database
|
|
|
|
data: openArray[byte]; # Raw data payload value
|
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## Variant of `merge()` for `(VertexID(1),path)` arguments instead of a
|
|
|
|
## `LeafTie`. The argument `data` is stored as-is as a `RawData` payload
|
|
|
|
## value.
|
|
|
|
let pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
2024-02-29 21:10:24 +00:00
|
|
|
db.mergePayload(VertexID(1), path, pyl, VOID_PATH_ID)
|
2024-02-01 21:27:48 +00:00
|
|
|
|
2023-08-11 17:23:57 +00:00
|
|
|
|
2024-02-29 21:10:24 +00:00
|
|
|
proc mergeLeaf*(
|
2023-08-07 17:45:23 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-11-08 12:18:32 +00:00
|
|
|
leaf: LeafTiePayload; # Leaf item to add to the database
|
2024-02-29 21:10:24 +00:00
|
|
|
accPath = VOID_PATH_ID; # Needed for accounts payload
|
2023-08-07 17:45:23 +00:00
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## Variant of `merge()`. This function will not indicate if the leaf
|
|
|
|
## was cached, already.
|
2024-02-29 21:10:24 +00:00
|
|
|
db.mergePayload(leaf.leafTie, leaf.payload, accPath).to(typeof result)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
# ---------------------
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc merge*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
proof: openArray[SnapProof]; # RLP encoded node records
|
2024-04-03 15:48:35 +00:00
|
|
|
rootVid = VertexID(0); # Current sub-trie
|
2024-02-22 08:24:58 +00:00
|
|
|
): Result[int, AristoError]
|
2023-05-30 21:21:15 +00:00
|
|
|
{.gcsafe, raises: [RlpError].} =
|
|
|
|
## The function merges the argument `proof` list of RLP encoded node records
|
|
|
|
## into the `Aristo Trie` database. This function is intended to be used with
|
|
|
|
## the proof nodes as returened by `snap/1` messages.
|
2023-06-20 13:26:25 +00:00
|
|
|
##
|
2024-04-03 15:48:35 +00:00
|
|
|
## If there is no root vertex ID passed, the function tries to find out what
|
|
|
|
## the root hashes are and allocates new vertices with static IDs `$2`, `$3`,
|
|
|
|
## etc.
|
|
|
|
##
|
2024-02-22 08:24:58 +00:00
|
|
|
## Caveat:
|
|
|
|
## Proof of concept, not in production yet.
|
|
|
|
##
|
2023-11-08 12:18:32 +00:00
|
|
|
proc update(
|
|
|
|
seen: var Table[HashKey,NodeRef];
|
|
|
|
todo: var KeyedQueueNV[NodeRef];
|
|
|
|
key: HashKey;
|
|
|
|
) {.gcsafe, raises: [RlpError].} =
|
2024-04-03 15:48:35 +00:00
|
|
|
## Check for embedded nodes, i.e. fully encoded node instead of a hash.
|
|
|
|
## They need to be treated as full nodes, here.
|
2023-11-08 12:18:32 +00:00
|
|
|
if key.isValid and key.len < 32:
|
2024-05-30 17:48:38 +00:00
|
|
|
let lid = @(key.data).digestTo(HashKey)
|
2023-11-08 12:18:32 +00:00
|
|
|
if not seen.hasKey lid:
|
2024-05-30 17:48:38 +00:00
|
|
|
let node = @(key.data).decode(NodeRef)
|
2023-11-08 12:18:32 +00:00
|
|
|
discard todo.append node
|
|
|
|
seen[lid] = node
|
|
|
|
|
2024-04-03 15:48:35 +00:00
|
|
|
let rootKey = block:
|
|
|
|
if rootVid.isValid:
|
|
|
|
let vidKey = db.getKey rootVid
|
|
|
|
if not vidKey.isValid:
|
|
|
|
return err(MergeRootKeyInvalid)
|
|
|
|
# Make sure that the reverse lookup for the root vertex key is available.
|
|
|
|
if not db.layerGetProofVidOrVoid(vidKey).isValid:
|
|
|
|
return err(MergeProofInitMissing)
|
|
|
|
vidKey
|
|
|
|
else:
|
|
|
|
VOID_HASH_KEY
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-03 15:48:35 +00:00
|
|
|
# Expand and collect hash keys and nodes and parent indicator
|
|
|
|
var
|
|
|
|
nodeTab: Table[HashKey,NodeRef]
|
|
|
|
rootKeys: HashSet[HashKey] # Potential root node hashes
|
2023-06-20 13:26:25 +00:00
|
|
|
for w in proof:
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
2023-06-12 18:16:03 +00:00
|
|
|
key = w.Blob.digestTo(HashKey)
|
2023-11-08 12:18:32 +00:00
|
|
|
node = rlp.decode(w.Blob,NodeRef)
|
|
|
|
if node.error != AristoError(0):
|
2024-02-22 08:24:58 +00:00
|
|
|
return err(node.error)
|
2023-06-20 13:26:25 +00:00
|
|
|
nodeTab[key] = node
|
2024-04-03 15:48:35 +00:00
|
|
|
rootKeys.incl key
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-03 15:48:35 +00:00
|
|
|
# Check for embedded nodes, i.e. fully encoded node instead of a hash.
|
|
|
|
# They will be added as full nodes to the `nodeTab[]`.
|
2023-11-08 12:18:32 +00:00
|
|
|
var embNodes: KeyedQueueNV[NodeRef]
|
|
|
|
discard embNodes.append node
|
|
|
|
while true:
|
|
|
|
let node = embNodes.shift.valueOr: break
|
|
|
|
case node.vType:
|
|
|
|
of Leaf:
|
|
|
|
discard
|
|
|
|
of Branch:
|
|
|
|
for n in 0 .. 15:
|
|
|
|
nodeTab.update(embNodes, node.key[n])
|
|
|
|
of Extension:
|
|
|
|
nodeTab.update(embNodes, node.key[0])
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# Create a table with back links
|
|
|
|
var
|
|
|
|
backLink: Table[HashKey,HashKey]
|
|
|
|
blindNodes: HashSet[HashKey]
|
|
|
|
for (key,node) in nodeTab.pairs:
|
|
|
|
case node.vType:
|
|
|
|
of Leaf:
|
|
|
|
blindNodes.incl key
|
|
|
|
of Extension:
|
|
|
|
if nodeTab.hasKey node.key[0]:
|
|
|
|
backLink[node.key[0]] = key
|
2024-04-03 15:48:35 +00:00
|
|
|
rootKeys.excl node.key[0] # predecessor => not root
|
2023-06-20 13:26:25 +00:00
|
|
|
else:
|
|
|
|
blindNodes.incl key
|
|
|
|
of Branch:
|
|
|
|
var isBlind = true
|
|
|
|
for n in 0 .. 15:
|
|
|
|
if nodeTab.hasKey node.key[n]:
|
|
|
|
isBlind = false
|
|
|
|
backLink[node.key[n]] = key
|
2024-04-03 15:48:35 +00:00
|
|
|
rootKeys.excl node.key[n] # predecessor => not root
|
2023-06-20 13:26:25 +00:00
|
|
|
if isBlind:
|
|
|
|
blindNodes.incl key
|
|
|
|
|
2024-04-03 15:48:35 +00:00
|
|
|
# If it exists, the root key must be in the set `mayBeRoot` in order
|
|
|
|
# to work.
|
|
|
|
var roots: Table[HashKey,VertexID]
|
|
|
|
if rootVid.isValid:
|
|
|
|
if rootKey notin rootKeys:
|
|
|
|
return err(MergeRootKeyNotInProof)
|
|
|
|
roots[rootKey] = rootVid
|
|
|
|
elif rootKeys.len == 0:
|
|
|
|
return err(MergeRootKeysMissing)
|
|
|
|
else:
|
|
|
|
# Add static root keys different from VertexID(1)
|
|
|
|
var count = 2
|
|
|
|
for key in rootKeys.items:
|
|
|
|
while true:
|
|
|
|
# Check for already allocated nodes
|
|
|
|
let vid1 = db.layerGetProofVidOrVoid key
|
|
|
|
if vid1.isValid:
|
|
|
|
roots[key] = vid1
|
|
|
|
break
|
|
|
|
# Use the next free static free vertex ID
|
|
|
|
let vid2 = VertexID(count)
|
|
|
|
count.inc
|
|
|
|
if not db.getKey(vid2).isValid:
|
|
|
|
db.layersPutProof(vid2, key)
|
|
|
|
roots[key] = vid2
|
|
|
|
break
|
|
|
|
if LEAST_FREE_VID <= count:
|
|
|
|
return err(MergeRootKeysOverflow)
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# Run over blind nodes and build chains from a blind/bottom level node up
|
|
|
|
# to the root node. Select only chains that end up at the pre-defined root
|
|
|
|
# node.
|
2024-04-03 15:48:35 +00:00
|
|
|
var
|
|
|
|
accounts: seq[seq[HashKey]] # This one separated, to be processed last
|
|
|
|
chains: seq[seq[HashKey]]
|
2023-06-20 13:26:25 +00:00
|
|
|
for w in blindNodes:
|
|
|
|
# Build a chain of nodes up to the root node
|
|
|
|
var
|
|
|
|
chain: seq[HashKey]
|
|
|
|
nodeKey = w
|
|
|
|
while nodeKey.isValid and nodeTab.hasKey nodeKey:
|
|
|
|
chain.add nodeKey
|
2023-11-08 12:18:32 +00:00
|
|
|
nodeKey = backLink.getOrVoid nodeKey
|
2024-04-03 15:48:35 +00:00
|
|
|
if 0 < chain.len and chain[^1] in roots:
|
|
|
|
if roots.getOrVoid(chain[0]) == VertexID(1):
|
|
|
|
accounts.add chain
|
|
|
|
else:
|
|
|
|
chains.add chain
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
# Process over chains in reverse mode starting with the root node. This
|
|
|
|
# allows the algorithm to find existing nodes on the backend.
|
|
|
|
var
|
|
|
|
seen: HashSet[HashKey]
|
2024-02-22 08:24:58 +00:00
|
|
|
merged = 0
|
2023-06-20 13:26:25 +00:00
|
|
|
# Process the root ID which is common to all chains
|
2024-04-03 15:48:35 +00:00
|
|
|
for chain in chains & accounts:
|
|
|
|
let chainRootVid = roots.getOrVoid chain[^1]
|
2023-06-20 13:26:25 +00:00
|
|
|
for key in chain.reversed:
|
2023-11-08 12:18:32 +00:00
|
|
|
if key notin seen:
|
2023-06-20 13:26:25 +00:00
|
|
|
seen.incl key
|
2024-04-03 15:48:35 +00:00
|
|
|
let node = nodeTab.getOrVoid key
|
|
|
|
db.mergeNodeImpl(key, node, chainRootVid).isOkOr:
|
2024-02-22 08:24:58 +00:00
|
|
|
return err(error)
|
|
|
|
merged.inc
|
|
|
|
|
|
|
|
ok merged
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
proc merge*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2024-02-22 08:24:58 +00:00
|
|
|
rootHash: Hash256; # Merkle hash for root
|
|
|
|
rootVid = VertexID(0); # Optionally, force root vertex ID
|
2023-06-09 11:17:37 +00:00
|
|
|
): Result[VertexID,AristoError] =
|
2024-02-22 08:24:58 +00:00
|
|
|
## Set up a `rootKey` associated with a vertex ID for use with proof nodes.
|
2023-06-09 11:17:37 +00:00
|
|
|
##
|
2024-02-22 08:24:58 +00:00
|
|
|
## If argument `rootVid` is unset then a new dybamic root vertex (i.e.
|
|
|
|
## the ID will be at least `LEAST_FREE_VID`) will be installed.
|
2023-06-09 11:17:37 +00:00
|
|
|
##
|
2024-02-22 08:24:58 +00:00
|
|
|
## Otherwise, if the argument `rootVid` is set then a sub-trie with root
|
|
|
|
## `rootVid` is checked for. An error is returned if it is set up already
|
|
|
|
## with a different `rootHash`.
|
2023-06-09 11:17:37 +00:00
|
|
|
##
|
|
|
|
## Upon successful return, the vertex ID assigned to the root key is returned.
|
|
|
|
##
|
2024-02-22 08:24:58 +00:00
|
|
|
## Caveat:
|
|
|
|
## Proof of concept, not in production yet.
|
|
|
|
##
|
|
|
|
let rootKey = rootHash.to(HashKey)
|
2023-11-08 12:18:32 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
if rootVid.isValid:
|
2023-06-12 13:48:47 +00:00
|
|
|
let key = db.getKey rootVid
|
2024-02-22 08:24:58 +00:00
|
|
|
if key.isValid:
|
|
|
|
if rootKey.isValid and key != rootKey:
|
|
|
|
# Cannot use installed root key differing from hash argument
|
|
|
|
return err(MergeRootKeyDiffersForVid)
|
|
|
|
# Confirm root ID and key for proof nodes processing
|
|
|
|
db.layersPutProof(rootVid, key) # note that `rootKey` might be void
|
2023-06-12 13:48:47 +00:00
|
|
|
return ok rootVid
|
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
if not rootHash.isValid:
|
|
|
|
return err(MergeRootArgsIncomplete)
|
|
|
|
if db.getVtx(rootVid).isValid:
|
|
|
|
# Cannot use verify root key for existing root vertex
|
|
|
|
return err(MergeRootKeyMissing)
|
|
|
|
|
|
|
|
# Confirm root ID and hash key for proof nodes processing
|
|
|
|
db.layersPutProof(rootVid, rootKey)
|
|
|
|
return ok rootVid
|
|
|
|
|
|
|
|
if not rootHash.isValid:
|
|
|
|
return err(MergeRootArgsIncomplete)
|
|
|
|
|
|
|
|
# Now there is no root vertex ID, only the hash argument.
|
|
|
|
# So Create and assign a new root key.
|
|
|
|
let vid = db.vidFetch
|
|
|
|
db.layersPutProof(vid, rootKey)
|
|
|
|
return ok vid
|
|
|
|
|
|
|
|
|
|
|
|
proc merge*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
rootVid: VertexID; # Root ID
|
|
|
|
): Result[VertexID,AristoError] =
|
|
|
|
## Variant of `merge()` for missing `rootHash`
|
|
|
|
db.merge(EMPTY_ROOT_HASH, rootVid)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|