2023-05-30 11:47:47 +00:00
|
|
|
# nimbus-eth1
|
2023-11-08 16:52:25 +00:00
|
|
|
# Copyright (c) 2023 Status Research & Development GmbH
|
2023-05-30 11:47:47 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
## Aristo DB -- Patricia Trie builder, raw node insertion
|
|
|
|
## ======================================================
|
|
|
|
##
|
2023-10-27 21:36:51 +00:00
|
|
|
## This module merges `PathID` values as hexary lookup paths into the
|
2023-05-30 21:21:15 +00:00
|
|
|
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
|
|
|
|
## associated (but separated) Merkle hashes will be deleted unless locked.
|
|
|
|
## Instead of deleting locked hashes error handling is applied.
|
|
|
|
##
|
|
|
|
## Also, nodes (vertices plus merkle hashes) can be added which is needed for
|
|
|
|
## boundary proofing after `snap/1` download. The vertices are split from the
|
|
|
|
## nodes and stored as-is on the table holding `Patricia Trie` entries. The
|
|
|
|
## hashes are stored iin a separate table and the vertices are labelled
|
|
|
|
## `locked`.
|
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2023-06-20 13:26:25 +00:00
|
|
|
std/[algorithm, sequtils, strutils, sets, tables],
|
2023-05-30 11:47:47 +00:00
|
|
|
chronicles,
|
|
|
|
eth/[common, trie/nibbles],
|
2023-09-12 18:45:12 +00:00
|
|
|
results,
|
2023-11-08 12:18:32 +00:00
|
|
|
stew/keyed_queue,
|
2023-09-18 20:20:28 +00:00
|
|
|
../../sync/protocol/snap/snap_types,
|
2023-12-19 12:39:23 +00:00
|
|
|
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
|
|
|
|
aristo_serialise, aristo_vid]
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
logScope:
|
2023-05-30 21:21:15 +00:00
|
|
|
topics = "aristo-merge"
|
|
|
|
|
|
|
|
type
|
2023-06-12 13:48:47 +00:00
|
|
|
LeafTiePayload* = object
|
2023-06-09 11:17:37 +00:00
|
|
|
## Generalised key-value pair for a sub-trie. The main trie is the
|
|
|
|
## sub-trie with `root=VertexID(1)`.
|
2023-06-12 13:48:47 +00:00
|
|
|
leafTie*: LeafTie ## Full `Patricia Trie` path root-to-leaf
|
2023-05-30 21:21:15 +00:00
|
|
|
payload*: PayloadRef ## Leaf data payload
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private getters & setters
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc xPfx(vtx: VertexRef): NibblesSeq =
|
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
|
|
|
return vtx.lPfx
|
|
|
|
of Extension:
|
|
|
|
return vtx.ePfx
|
|
|
|
of Branch:
|
|
|
|
doAssert vtx.vType != Branch # Ooops
|
|
|
|
|
2023-06-02 19:21:46 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
proc to(
|
|
|
|
rc: Result[Hike,AristoError];
|
|
|
|
T: type Result[bool,AristoError];
|
|
|
|
): T =
|
|
|
|
## Return code converter
|
|
|
|
if rc.isOk:
|
|
|
|
ok true
|
2023-12-04 20:39:26 +00:00
|
|
|
elif rc.error in {MergeLeafPathCachedAlready,
|
|
|
|
MergeLeafPathOnBackendAlready}:
|
2023-09-15 15:23:53 +00:00
|
|
|
ok false
|
|
|
|
else:
|
|
|
|
err(rc.error)
|
|
|
|
|
|
|
|
# -----------
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
proc nullifyKey(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
vid: VertexID; # Vertex IDs to clear
|
|
|
|
) =
|
|
|
|
# Register for void hash (to be recompiled)
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersResLabel vid
|
2023-12-12 17:47:41 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc clearMerkleKeys(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
hike: Hike; # Implied vertex IDs to clear hashes for
|
|
|
|
vid: VertexID; # Additionall vertex IDs to clear
|
|
|
|
) =
|
2023-12-12 17:47:41 +00:00
|
|
|
for w in hike.legs.mapIt(it.wp.vid) & @[vid]:
|
|
|
|
db.nullifyKey w
|
|
|
|
|
|
|
|
proc setVtxAndKey(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
vid: VertexID; # Vertex IDs to add/clear
|
|
|
|
vtx: VertexRef; # Vertex to add
|
|
|
|
) =
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutVtx(vid, vtx)
|
|
|
|
db.layersResLabel vid
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# -----------
|
|
|
|
|
|
|
|
proc insertBranch(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-06-09 11:17:37 +00:00
|
|
|
hike: Hike; # Current state
|
|
|
|
linkID: VertexID; # Vertex ID to insert
|
|
|
|
linkVtx: VertexRef; # Vertex to insert
|
2023-05-30 11:47:47 +00:00
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
##
|
|
|
|
## Insert `Extension->Branch` vertex chain or just a `Branch` vertex
|
|
|
|
##
|
2023-05-30 21:21:15 +00:00
|
|
|
## ... --(linkID)--> <linkVtx>
|
|
|
|
##
|
|
|
|
## <-- immutable --> <---- mutable ----> ..
|
2023-05-30 11:47:47 +00:00
|
|
|
##
|
|
|
|
## will become either
|
|
|
|
##
|
|
|
|
## --(linkID)-->
|
|
|
|
## <extVtx> --(local1)-->
|
2023-05-30 21:21:15 +00:00
|
|
|
## <forkVtx>[linkInx] --(local2)--> <linkVtx*>
|
2023-05-30 11:47:47 +00:00
|
|
|
## [leafInx] --(local3)--> <leafVtx>
|
|
|
|
##
|
|
|
|
## or in case that there is no common prefix
|
|
|
|
##
|
|
|
|
## --(linkID)-->
|
2023-05-30 21:21:15 +00:00
|
|
|
## <forkVtx>[linkInx] --(local2)--> <linkVtx*>
|
2023-05-30 11:47:47 +00:00
|
|
|
## [leafInx] --(local3)--> <leafVtx>
|
|
|
|
##
|
2023-05-30 21:21:15 +00:00
|
|
|
## *) vertex was slightly modified or removed if obsolete `Extension`
|
|
|
|
##
|
2023-05-30 11:47:47 +00:00
|
|
|
let n = linkVtx.xPfx.sharedPrefixLen hike.tail
|
|
|
|
|
|
|
|
# Verify minimum requirements
|
|
|
|
if hike.tail.len == n:
|
|
|
|
# Should have been tackeld by `hikeUp()`, already
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeLeafGarbledHike)
|
2023-05-30 11:47:47 +00:00
|
|
|
if linkVtx.xPfx.len == n:
|
2023-12-04 20:39:26 +00:00
|
|
|
return err(MergeBranchLinkVtxPfxTooShort)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# Provide and install `forkVtx`
|
|
|
|
let
|
|
|
|
forkVtx = VertexRef(vType: Branch)
|
|
|
|
linkInx = linkVtx.xPfx[n]
|
|
|
|
leafInx = hike.tail[n]
|
|
|
|
var
|
|
|
|
leafLeg = Leg(nibble: -1)
|
|
|
|
|
|
|
|
# Install `forkVtx`
|
|
|
|
block:
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode.
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
db.clearMerkleKeys(hike, linkID)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif linkID in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeNonBranchProofModeLock)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
if linkVtx.vType == Leaf:
|
2023-05-30 21:21:15 +00:00
|
|
|
# Update vertex path lookup
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
|
|
|
path = hike.legsTo(NibblesSeq) & linkVtx.lPfx
|
|
|
|
rc = path.pathToTag()
|
|
|
|
if rc.isErr:
|
2023-05-30 21:21:15 +00:00
|
|
|
debug "Branch link leaf path garbled", linkID, path
|
2023-12-04 20:39:26 +00:00
|
|
|
return err(MergeBranchLinkLeafGarbled)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
let
|
2023-06-30 22:22:33 +00:00
|
|
|
local = db.vidFetch(pristine = true)
|
2023-06-12 13:48:47 +00:00
|
|
|
lty = LeafTie(root: hike.root, path: rc.value)
|
2023-12-12 17:47:41 +00:00
|
|
|
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.final.lTab[lty] = local # update leaf path lookup cache
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(local, linkVtx)
|
2023-05-30 21:21:15 +00:00
|
|
|
linkVtx.lPfx = linkVtx.lPfx.slice(1+n)
|
|
|
|
forkVtx.bVid[linkInx] = local
|
|
|
|
|
|
|
|
elif linkVtx.ePfx.len == n + 1:
|
|
|
|
# This extension `linkVtx` becomes obsolete
|
|
|
|
forkVtx.bVid[linkInx] = linkVtx.eVid
|
|
|
|
|
|
|
|
else:
|
|
|
|
let local = db.vidFetch
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(local, linkVtx)
|
2023-05-30 21:21:15 +00:00
|
|
|
linkVtx.ePfx = linkVtx.ePfx.slice(1+n)
|
|
|
|
forkVtx.bVid[linkInx] = local
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
block:
|
2023-06-30 22:22:33 +00:00
|
|
|
let local = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
forkVtx.bVid[leafInx] = local
|
|
|
|
leafLeg.wp.vid = local
|
|
|
|
leafLeg.wp.vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1+n),
|
|
|
|
lData: payload)
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(local, leafLeg.wp.vtx)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# Update branch leg, ready to append more legs
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# Update in-beween glue linking `branch --[..]--> forkVtx`
|
|
|
|
if 0 < n:
|
|
|
|
let extVtx = VertexRef(
|
|
|
|
vType: Extension,
|
|
|
|
ePfx: hike.tail.slice(0,n),
|
|
|
|
eVid: db.vidFetch)
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(linkID, extVtx)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(
|
2023-05-30 11:47:47 +00:00
|
|
|
nibble: -1,
|
|
|
|
wp: VidVtxPair(
|
|
|
|
vid: linkID,
|
|
|
|
vtx: extVtx))
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(extVtx.eVid, forkVtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(
|
2023-05-30 11:47:47 +00:00
|
|
|
nibble: leafInx.int8,
|
|
|
|
wp: VidVtxPair(
|
|
|
|
vid: extVtx.eVid,
|
|
|
|
vtx: forkVtx))
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
else:
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(linkID, forkVtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(
|
2023-05-30 11:47:47 +00:00
|
|
|
nibble: leafInx.int8,
|
|
|
|
wp: VidVtxPair(
|
|
|
|
vid: linkID,
|
|
|
|
vtx: forkVtx))
|
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add leafLeg
|
|
|
|
ok okHike
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc concatBranchAndLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # Path top has a `Branch` vertex
|
2023-05-30 21:21:15 +00:00
|
|
|
brVid: VertexID; # Branch vertex ID from from `Hike` top
|
2023-05-30 11:47:47 +00:00
|
|
|
brVtx: VertexRef; # Branch vertex, linked to from `Hike`
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append argument branch vertex passed as argument `(brID,brVtx)` and then
|
|
|
|
## a `Leaf` vertex derived from the argument `payload`.
|
2023-05-30 21:21:15 +00:00
|
|
|
##
|
2023-05-30 11:47:47 +00:00
|
|
|
if hike.tail.len == 0:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchGarbledTail)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let nibble = hike.tail[0].int8
|
2023-06-12 13:48:47 +00:00
|
|
|
if brVtx.bVid[nibble].isValid:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeRootBranchLinkBusy)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode.
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
db.clearMerkleKeys(hike, brVid)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif brVid in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchProofModeLock) # Ooops
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Append branch vertex
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Append leaf vertex
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2023-06-30 22:22:33 +00:00
|
|
|
vid = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1),
|
|
|
|
lData: payload)
|
|
|
|
brVtx.bVid[nibble] = vid
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(brVid, brVtx)
|
|
|
|
db.setVtxAndKey(vid, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
|
|
|
|
|
|
|
ok okHike
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
2023-06-12 13:48:47 +00:00
|
|
|
# Private functions: add Particia Trie leaf vertex
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc topIsBranchAddLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # Path top has a `Branch` vertex
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append a `Leaf` vertex derived from the argument `payload` after the top
|
|
|
|
## leg of the `hike` argument which is assumend to refert to a `Branch`
|
|
|
|
## vertex. If successful, the function returns the updated `hike` trail.
|
|
|
|
if hike.tail.len == 0:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchGarbledTail)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
let nibble = hike.legs[^1].nibble
|
|
|
|
if nibble < 0:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchGarbledNibble)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
branch = hike.legs[^1].wp.vtx
|
|
|
|
linkID = branch.bVid[nibble]
|
2023-05-30 21:21:15 +00:00
|
|
|
linkVtx = db.getVtx linkID
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
if not linkVtx.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
#
|
|
|
|
# .. <branch>[nibble] --(linkID)--> nil
|
|
|
|
#
|
|
|
|
# <-------- immutable ------------> <---- mutable ----> ..
|
|
|
|
#
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
# Not much else that can be done here
|
|
|
|
debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
|
|
|
|
nibble, linkID, leafPfx=hike.tail
|
|
|
|
|
|
|
|
# Reuse placeholder entry in table
|
|
|
|
let vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail,
|
|
|
|
lData: payload)
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(linkID, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vid: linkID, vtx: vtx), nibble: -1)
|
|
|
|
return ok(okHike)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
if linkVtx.vType == Branch:
|
2023-05-30 21:21:15 +00:00
|
|
|
# Slot link to a branch vertex should be handled by `hikeUp()`
|
|
|
|
#
|
|
|
|
# .. <branch>[nibble] --(linkID)--> <linkVtx>[]
|
|
|
|
#
|
|
|
|
# <-------- immutable ------------> <---- mutable ----> ..
|
|
|
|
#
|
|
|
|
return db.concatBranchAndLeaf(hike, linkID, linkVtx, payload)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
db.insertBranch(hike, linkID, linkVtx, payload)
|
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc topIsExtAddLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # Path top has an `Extension` vertex
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append a `Leaf` vertex derived from the argument `payload` after the top
|
|
|
|
## leg of the `hike` argument which is assumend to refert to a `Extension`
|
|
|
|
## vertex. If successful, the function returns the
|
|
|
|
## updated `hike` trail.
|
|
|
|
let
|
2023-05-30 21:21:15 +00:00
|
|
|
extVtx = hike.legs[^1].wp.vtx
|
|
|
|
extVid = hike.legs[^1].wp.vid
|
|
|
|
brVid = extVtx.eVid
|
|
|
|
brVtx = db.getVtx brVid
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
var okHike = Hike(root: hike.root, legs: hike.legs)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
if not brVtx.isValid:
|
2023-06-12 18:16:03 +00:00
|
|
|
# Blind vertex, promote to leaf vertex.
|
2023-05-30 21:21:15 +00:00
|
|
|
#
|
|
|
|
# --(extVid)--> <extVtx> --(brVid)--> nil
|
|
|
|
#
|
|
|
|
# <-------- immutable -------------->
|
|
|
|
#
|
2023-08-11 17:23:57 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
2023-05-30 21:21:15 +00:00
|
|
|
lPfx: extVtx.ePfx & hike.tail,
|
2023-05-30 11:47:47 +00:00
|
|
|
lData: payload)
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(extVid, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs[^1].wp.vtx = vtx
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
elif brVtx.vType != Branch:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchRootExpected)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
else:
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
|
|
|
nibble = hike.tail[0].int8
|
|
|
|
linkID = brVtx.bVid[nibble]
|
|
|
|
#
|
|
|
|
# Required
|
|
|
|
#
|
|
|
|
# --(extVid)--> <extVtx> --(brVid)--> <brVtx>[nibble] --(linkID)--> nil
|
|
|
|
#
|
|
|
|
# <-------- immutable --------------> <-------- mutable ----------> ..
|
|
|
|
#
|
2023-06-12 13:48:47 +00:00
|
|
|
if linkID.isValid:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeRootBranchLinkBusy)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-05-30 21:21:15 +00:00
|
|
|
db.clearMerkleKeys(hike, brVid)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif brVid in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchProofModeLock)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2023-06-30 22:22:33 +00:00
|
|
|
vid = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1),
|
|
|
|
lData: payload)
|
|
|
|
brVtx.bVid[nibble] = vid
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(brVid, brVtx)
|
|
|
|
db.setVtxAndKey(vid, vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
|
|
|
okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
|
|
|
|
|
|
|
ok okHike
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc topIsEmptyAddLeaf(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 11:47:47 +00:00
|
|
|
hike: Hike; # No path legs
|
|
|
|
rootVtx: VertexRef; # Root vertex
|
|
|
|
payload: PayloadRef; # Leaf data payload
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-05-30 11:47:47 +00:00
|
|
|
## Append a `Leaf` vertex derived from the argument `payload` after the
|
|
|
|
## argument vertex `rootVtx` and append both the empty arguent `hike`.
|
|
|
|
if rootVtx.vType == Branch:
|
|
|
|
let nibble = hike.tail[0].int8
|
2023-06-12 13:48:47 +00:00
|
|
|
if rootVtx.bVid[nibble].isValid:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeRootBranchLinkBusy)
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Clear Merkle hashes (aka hash keys) unless proof mode
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.pPrf.len == 0:
|
2023-06-02 10:04:29 +00:00
|
|
|
db.clearMerkleKeys(hike, hike.root)
|
2023-12-19 12:39:23 +00:00
|
|
|
elif hike.root in db.pPrf:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeBranchProofModeLock)
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2023-06-30 22:22:33 +00:00
|
|
|
leafVid = db.vidFetch(pristine = true)
|
2023-05-30 11:47:47 +00:00
|
|
|
leafVtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: hike.tail.slice(1),
|
|
|
|
lData: payload)
|
|
|
|
rootVtx.bVid[nibble] = leafVid
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(hike.root, rootVtx)
|
|
|
|
db.setVtxAndKey(leafVid, leafVtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
return ok Hike(
|
2023-05-30 11:47:47 +00:00
|
|
|
root: hike.root,
|
|
|
|
legs: @[Leg(wp: VidVtxPair(vtx: rootVtx, vid: hike.root), nibble: nibble),
|
|
|
|
Leg(wp: VidVtxPair(vtx: leafVtx, vid: leafVid), nibble: -1)])
|
|
|
|
|
|
|
|
db.insertBranch(hike, hike.root, rootVtx, payload)
|
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
|
|
|
|
proc updatePayload(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-06-22 11:13:24 +00:00
|
|
|
hike: Hike; # No path legs
|
2023-08-07 17:45:23 +00:00
|
|
|
leafTie: LeafTie; # Leaf item to add to the database
|
|
|
|
payload: PayloadRef; # Payload value
|
2023-11-08 12:18:32 +00:00
|
|
|
): Result[Hike,AristoError] =
|
2023-06-22 11:13:24 +00:00
|
|
|
## Update leaf vertex if payloads differ
|
2023-12-04 20:39:26 +00:00
|
|
|
let leafLeg = hike.legs[^1]
|
2023-06-22 11:13:24 +00:00
|
|
|
|
|
|
|
# Update payloads if they differ
|
2023-12-04 20:39:26 +00:00
|
|
|
if leafLeg.wp.vtx.lData != payload:
|
2023-06-22 11:13:24 +00:00
|
|
|
|
2023-12-04 20:39:26 +00:00
|
|
|
# Update vertex and hike
|
|
|
|
let
|
|
|
|
vid = leafLeg.wp.vid
|
|
|
|
vtx = VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: leafLeg.wp.vtx.lPfx,
|
|
|
|
lData: payload)
|
|
|
|
var hike = hike
|
|
|
|
hike.legs[^1].wp.vtx = vtx
|
2023-08-11 17:23:57 +00:00
|
|
|
|
2023-12-04 20:39:26 +00:00
|
|
|
# Modify top level cache
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(vid, vtx)
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.final.lTab[leafTie] = vid
|
2023-09-15 15:23:53 +00:00
|
|
|
db.clearMerkleKeys(hike, vid)
|
2023-12-04 20:39:26 +00:00
|
|
|
ok hike
|
2023-09-15 15:23:53 +00:00
|
|
|
|
2023-12-19 12:39:23 +00:00
|
|
|
elif db.layersGetVtx(leafLeg.wp.vid).isErr:
|
2023-12-04 20:39:26 +00:00
|
|
|
err(MergeLeafPathOnBackendAlready)
|
|
|
|
|
|
|
|
else:
|
|
|
|
err(MergeLeafPathCachedAlready)
|
2023-06-22 11:13:24 +00:00
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: add Merkle proof node
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc mergeNodeImpl(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-11-08 12:18:32 +00:00
|
|
|
hashKey: HashKey; # Merkel hash of node (or so)
|
2023-06-12 13:48:47 +00:00
|
|
|
node: NodeRef; # Node derived from RLP representation
|
|
|
|
rootVid: VertexID; # Current sub-trie
|
2023-11-08 12:18:32 +00:00
|
|
|
): Result[void,AristoError] =
|
|
|
|
## The function merges the argument hash key `lid` as expanded from the
|
2023-06-12 18:16:03 +00:00
|
|
|
## node RLP representation into the `Aristo Trie` database. The vertex is
|
|
|
|
## split off from the node and stored separately. So are the Merkle hashes.
|
|
|
|
## The vertex is labelled `locked`.
|
2023-06-12 13:48:47 +00:00
|
|
|
##
|
|
|
|
## The `node` argument is *not* checked, whether the vertex IDs have been
|
|
|
|
## allocated, already. If the node comes straight from the `decode()` RLP
|
|
|
|
## decoder as expected, these vertex IDs will be all zero.
|
|
|
|
##
|
2023-11-08 12:18:32 +00:00
|
|
|
## This function expects that the parent for the argument node has already
|
|
|
|
## been installed, i.e. the top layer cache mapping
|
|
|
|
##
|
|
|
|
## pAmk: {HashKey} -> {{VertexID}}
|
|
|
|
##
|
|
|
|
## has a result for the argument `node`. Also, the invers top layer cache
|
|
|
|
## mapping
|
|
|
|
##
|
|
|
|
## sTab: {VertexID} -> {VertexRef}
|
|
|
|
##
|
|
|
|
## has no result for all images of the argument `node` under `pAmk`:
|
|
|
|
##
|
2023-12-04 20:39:26 +00:00
|
|
|
# Check for error after RLP decoding
|
2023-11-08 12:18:32 +00:00
|
|
|
doAssert node.error == AristoError(0)
|
2023-06-12 13:48:47 +00:00
|
|
|
if not rootVid.isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeRootKeyInvalid)
|
2023-06-12 13:48:47 +00:00
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
# Verify `hashKey`
|
|
|
|
if not hashKey.isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeHashKeyInvalid)
|
2023-06-12 13:48:47 +00:00
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# Make sure that the `vid<->hashLbl` reverse mapping has been cached,
|
|
|
|
# already. This is provided for if the `nodes` are processed in the right
|
|
|
|
# order `root->.. ->leaf`.
|
2023-06-22 19:21:33 +00:00
|
|
|
let
|
2023-06-12 18:16:03 +00:00
|
|
|
hashLbl = HashLabel(root: rootVid, key: hashKey)
|
2023-12-19 12:39:23 +00:00
|
|
|
vids = db.layersGetLebalOrVoid(hashLbl).toSeq
|
2023-11-08 12:18:32 +00:00
|
|
|
isRoot = rootVid in vids
|
|
|
|
if vids.len == 0:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeRevVidMustHaveBeenCached)
|
2023-11-08 12:18:32 +00:00
|
|
|
if isRoot and 1 < vids.len:
|
|
|
|
# There can only be one root.
|
|
|
|
return err(MergeHashKeyRevLookUpGarbled)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
# Use the first vertex ID from the `vis` list as representant for all others
|
2023-12-19 12:39:23 +00:00
|
|
|
let lbl = db.layersGetLabelOrVoid vids[0]
|
2023-06-20 13:26:25 +00:00
|
|
|
if lbl == hashLbl:
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.layersGetVtx(vids[0]).isOk:
|
2023-11-08 12:18:32 +00:00
|
|
|
for n in 1 ..< vids.len:
|
2023-12-19 12:39:23 +00:00
|
|
|
if db.layersGetVtx(vids[n]).isErr:
|
2023-11-08 12:18:32 +00:00
|
|
|
return err(MergeHashKeyRevLookUpGarbled)
|
2023-06-20 13:26:25 +00:00
|
|
|
# This is tyically considered OK
|
|
|
|
return err(MergeHashKeyCachedAlready)
|
|
|
|
# Otherwise proceed
|
|
|
|
elif lbl.isValid:
|
|
|
|
# Different key assigned => error
|
|
|
|
return err(MergeHashKeyDiffersFromCached)
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
# While the vertex referred to by `vids[0]` does not exists in the top layer
|
|
|
|
# cache it may well be in some lower layers or the backend. This typically
|
|
|
|
# happens for the root node.
|
|
|
|
var (vtx, hasVtx) = block:
|
|
|
|
let vty = db.getVtx vids[0]
|
2023-06-20 13:26:25 +00:00
|
|
|
if vty.isValid:
|
|
|
|
(vty, true)
|
|
|
|
else:
|
|
|
|
(node.to(VertexRef), false)
|
2023-06-12 13:48:47 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
# Verify that all `vids` entries are similar
|
|
|
|
for n in 1 ..< vids.len:
|
|
|
|
let w = vids[n]
|
2023-12-19 12:39:23 +00:00
|
|
|
if lbl != db.layersGetLabelOrVoid(w) or db.layersGetVtx(w).isOk:
|
2023-11-08 12:18:32 +00:00
|
|
|
return err(MergeHashKeyRevLookUpGarbled)
|
|
|
|
if not hasVtx:
|
|
|
|
# Prefer existing node which has all links available, already.
|
|
|
|
let u = db.getVtx w
|
|
|
|
if u.isValid:
|
|
|
|
(vtx, hasVtx) = (u, true)
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# The `vertexID <-> hashLabel` mappings need to be set up now (if any)
|
2023-06-12 13:48:47 +00:00
|
|
|
case node.vType:
|
|
|
|
of Leaf:
|
|
|
|
discard
|
|
|
|
of Extension:
|
|
|
|
if node.key[0].isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
let eLbl = HashLabel(root: rootVid, key: node.key[0])
|
2023-11-08 12:18:32 +00:00
|
|
|
if not hasVtx:
|
|
|
|
# Brand new reverse lookup link for this vertex
|
2023-12-19 12:39:23 +00:00
|
|
|
vtx.eVid = db.vidFetch
|
|
|
|
db.layersPutLabel(vtx.eVid, eLbl)
|
2023-11-08 12:18:32 +00:00
|
|
|
elif not vtx.eVid.isValid:
|
|
|
|
return err(MergeNodeVtxDiffersFromExisting)
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutLabel(vtx.eVid, eLbl)
|
2023-06-12 13:48:47 +00:00
|
|
|
of Branch:
|
|
|
|
for n in 0..15:
|
|
|
|
if node.key[n].isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
let bLbl = HashLabel(root: rootVid, key: node.key[n])
|
2023-11-08 12:18:32 +00:00
|
|
|
if not hasVtx:
|
|
|
|
# Brand new reverse lookup link for this vertex
|
2023-12-19 12:39:23 +00:00
|
|
|
vtx.bVid[n] = db.vidFetch
|
|
|
|
db.layersPutLabel(vtx.bVid[n], bLbl)
|
2023-11-08 12:18:32 +00:00
|
|
|
elif not vtx.bVid[n].isValid:
|
|
|
|
return err(MergeNodeVtxDiffersFromExisting)
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutLabel(vtx.bVid[n], bLbl)
|
2023-06-22 19:21:33 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
for w in vids:
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.final.pPrf.incl w
|
2023-11-08 12:18:32 +00:00
|
|
|
if not hasVtx or db.getKey(w) != hashKey:
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutVtx(w, vtx.dup)
|
2023-11-08 12:18:32 +00:00
|
|
|
|
|
|
|
ok()
|
2023-06-12 13:48:47 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc merge*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-08-07 17:45:23 +00:00
|
|
|
leafTie: LeafTie; # Leaf item to add to the database
|
|
|
|
payload: PayloadRef; # Payload value
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[Hike,AristoError] =
|
|
|
|
## Merge the argument `leafTie` key-value-pair into the top level vertex
|
|
|
|
## table of the database `db`. The field `path` of the `leafTie` argument is
|
|
|
|
## used to index the leaf vertex on the `Patricia Trie`. The field `payload`
|
|
|
|
## is stored with the leaf vertex in the database unless the leaf vertex
|
|
|
|
## exists already.
|
2023-05-30 21:21:15 +00:00
|
|
|
##
|
2023-06-22 11:13:24 +00:00
|
|
|
# Check whether the leaf is on the database and payloads match
|
|
|
|
block:
|
2023-12-19 12:39:23 +00:00
|
|
|
let vid = db.lTab.getOrVoid leafTie
|
2023-06-22 11:13:24 +00:00
|
|
|
if vid.isValid:
|
|
|
|
let vtx = db.getVtx vid
|
2023-08-07 17:45:23 +00:00
|
|
|
if vtx.isValid and vtx.lData == payload:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeLeafPathCachedAlready)
|
|
|
|
|
|
|
|
let hike = leafTie.hikeUp(db).to(Hike)
|
|
|
|
var okHike: Hike
|
|
|
|
if 0 < hike.legs.len:
|
|
|
|
case hike.legs[^1].wp.vtx.vType:
|
|
|
|
of Branch:
|
|
|
|
okHike = ? db.topIsBranchAddLeaf(hike, payload)
|
|
|
|
of Leaf:
|
|
|
|
if 0 < hike.tail.len: # `Leaf` vertex problem?
|
|
|
|
return err(MergeLeafGarbledHike)
|
|
|
|
okHike = ? db.updatePayload(hike, leafTie, payload)
|
|
|
|
of Extension:
|
|
|
|
okHike = ? db.topIsExtAddLeaf(hike, payload)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
else:
|
2023-09-15 15:23:53 +00:00
|
|
|
# Empty hike
|
|
|
|
let rootVtx = db.getVtx hike.root
|
|
|
|
if rootVtx.isValid:
|
|
|
|
okHike = ? db.topIsEmptyAddLeaf(hike,rootVtx, payload)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
else:
|
2023-09-15 15:23:53 +00:00
|
|
|
# Bootstrap for existing root ID
|
|
|
|
let wp = VidVtxPair(
|
|
|
|
vid: hike.root,
|
|
|
|
vtx: VertexRef(
|
|
|
|
vType: Leaf,
|
|
|
|
lPfx: leafTie.path.to(NibblesSeq),
|
|
|
|
lData: payload))
|
2023-12-12 17:47:41 +00:00
|
|
|
db.setVtxAndKey(wp.vid, wp.vtx)
|
2023-09-15 15:23:53 +00:00
|
|
|
okHike = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
# Double check the result until the code is more reliable
|
|
|
|
block:
|
2023-10-27 21:36:51 +00:00
|
|
|
let rc = okHike.to(NibblesSeq).pathToTag
|
|
|
|
if rc.isErr or rc.value != leafTie.path:
|
2023-09-15 15:23:53 +00:00
|
|
|
return err(MergeAssemblyFailed) # Ooops
|
|
|
|
|
|
|
|
# Update leaf acccess cache
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.final.lTab[leafTie] = okHike.legs[^1].wp.vid
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-09-15 15:23:53 +00:00
|
|
|
ok okHike
|
|
|
|
|
|
|
|
|
|
|
|
proc merge*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
root: VertexID; # MPT state root
|
2023-10-27 21:36:51 +00:00
|
|
|
path: openArray[byte]; # Even nibbled byte path
|
2023-09-15 15:23:53 +00:00
|
|
|
payload: PayloadRef; # Payload value
|
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
|
|
|
|
## object.
|
2023-11-08 12:18:32 +00:00
|
|
|
let lty = LeafTie(root: root, path: ? path.pathToTag)
|
2023-09-15 15:23:53 +00:00
|
|
|
db.merge(lty, payload).to(typeof result)
|
|
|
|
|
|
|
|
proc merge*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
root: VertexID; # MPT state root
|
|
|
|
path: openArray[byte]; # Leaf item to add to the database
|
2023-11-08 12:18:32 +00:00
|
|
|
data: openArray[byte]; # Raw data payload value
|
2023-09-15 15:23:53 +00:00
|
|
|
): Result[bool,AristoError] =
|
2023-11-08 12:18:32 +00:00
|
|
|
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`.
|
|
|
|
## The argument `data` is stored as-is as a a `RawData` payload value.
|
|
|
|
db.merge(root, path, PayloadRef(pType: RawData, rawBlob: @data))
|
2023-08-11 17:23:57 +00:00
|
|
|
|
2023-08-07 17:45:23 +00:00
|
|
|
proc merge*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-11-08 12:18:32 +00:00
|
|
|
leaf: LeafTiePayload; # Leaf item to add to the database
|
2023-08-07 17:45:23 +00:00
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## Variant of `merge()`. This function will not indicate if the leaf
|
|
|
|
## was cached, already.
|
2023-09-15 15:23:53 +00:00
|
|
|
db.merge(leaf.leafTie, leaf.payload).to(typeof result)
|
2023-08-07 17:45:23 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc merge*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-06-12 13:48:47 +00:00
|
|
|
leafs: openArray[LeafTiePayload]; # Leaf items to add to the database
|
2023-05-30 21:21:15 +00:00
|
|
|
): tuple[merged: int, dups: int, error: AristoError] =
|
|
|
|
## Variant of `merge()` for leaf lists.
|
|
|
|
var (merged, dups) = (0, 0)
|
|
|
|
for n,w in leafs:
|
2023-09-15 15:23:53 +00:00
|
|
|
let rc = db.merge(w.leafTie, w.payload)
|
|
|
|
if rc.isOk:
|
2023-06-09 11:17:37 +00:00
|
|
|
merged.inc
|
2023-12-04 20:39:26 +00:00
|
|
|
elif rc.error in {MergeLeafPathCachedAlready,
|
|
|
|
MergeLeafPathOnBackendAlready}:
|
2023-06-09 11:17:37 +00:00
|
|
|
dups.inc
|
|
|
|
else:
|
2023-09-15 15:23:53 +00:00
|
|
|
return (n,dups,rc.error)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
|
|
|
(merged, dups, AristoError(0))
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
# ---------------------
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc merge*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
proof: openArray[SnapProof]; # RLP encoded node records
|
2023-06-12 13:48:47 +00:00
|
|
|
rootVid: VertexID; # Current sub-trie
|
2023-05-30 21:21:15 +00:00
|
|
|
): tuple[merged: int, dups: int, error: AristoError]
|
|
|
|
{.gcsafe, raises: [RlpError].} =
|
|
|
|
## The function merges the argument `proof` list of RLP encoded node records
|
|
|
|
## into the `Aristo Trie` database. This function is intended to be used with
|
|
|
|
## the proof nodes as returened by `snap/1` messages.
|
2023-06-20 13:26:25 +00:00
|
|
|
##
|
2023-11-08 12:18:32 +00:00
|
|
|
proc update(
|
|
|
|
seen: var Table[HashKey,NodeRef];
|
|
|
|
todo: var KeyedQueueNV[NodeRef];
|
|
|
|
key: HashKey;
|
|
|
|
) {.gcsafe, raises: [RlpError].} =
|
|
|
|
## Check for embedded nodes, i.e. fully encoded node instead of a hash
|
|
|
|
if key.isValid and key.len < 32:
|
|
|
|
let lid = @key.digestTo(HashKey)
|
|
|
|
if not seen.hasKey lid:
|
|
|
|
let node = @key.decode(NodeRef)
|
|
|
|
discard todo.append node
|
|
|
|
seen[lid] = node
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
if not rootVid.isValid:
|
|
|
|
return (0,0,MergeRootVidInvalid)
|
|
|
|
let rootKey = db.getKey rootVid
|
|
|
|
if not rootKey.isValid:
|
|
|
|
return (0,0,MergeRootKeyInvalid)
|
|
|
|
|
|
|
|
# Expand and collect hash keys and nodes
|
|
|
|
var nodeTab: Table[HashKey,NodeRef]
|
|
|
|
for w in proof:
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
2023-06-12 18:16:03 +00:00
|
|
|
key = w.Blob.digestTo(HashKey)
|
2023-11-08 12:18:32 +00:00
|
|
|
node = rlp.decode(w.Blob,NodeRef)
|
|
|
|
if node.error != AristoError(0):
|
|
|
|
return (0,0,node.error)
|
2023-06-20 13:26:25 +00:00
|
|
|
nodeTab[key] = node
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
# Check for embedded nodes, i.e. fully encoded node instead of a hash
|
|
|
|
var embNodes: KeyedQueueNV[NodeRef]
|
|
|
|
discard embNodes.append node
|
|
|
|
while true:
|
|
|
|
let node = embNodes.shift.valueOr: break
|
|
|
|
case node.vType:
|
|
|
|
of Leaf:
|
|
|
|
discard
|
|
|
|
of Branch:
|
|
|
|
for n in 0 .. 15:
|
|
|
|
nodeTab.update(embNodes, node.key[n])
|
|
|
|
of Extension:
|
|
|
|
nodeTab.update(embNodes, node.key[0])
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# Create a table with back links
|
|
|
|
var
|
|
|
|
backLink: Table[HashKey,HashKey]
|
|
|
|
blindNodes: HashSet[HashKey]
|
|
|
|
for (key,node) in nodeTab.pairs:
|
|
|
|
case node.vType:
|
|
|
|
of Leaf:
|
|
|
|
blindNodes.incl key
|
|
|
|
of Extension:
|
|
|
|
if nodeTab.hasKey node.key[0]:
|
|
|
|
backLink[node.key[0]] = key
|
|
|
|
else:
|
|
|
|
blindNodes.incl key
|
|
|
|
of Branch:
|
|
|
|
var isBlind = true
|
|
|
|
for n in 0 .. 15:
|
|
|
|
if nodeTab.hasKey node.key[n]:
|
|
|
|
isBlind = false
|
|
|
|
backLink[node.key[n]] = key
|
|
|
|
if isBlind:
|
|
|
|
blindNodes.incl key
|
|
|
|
|
|
|
|
# Run over blind nodes and build chains from a blind/bottom level node up
|
|
|
|
# to the root node. Select only chains that end up at the pre-defined root
|
|
|
|
# node.
|
|
|
|
var chains: seq[seq[HashKey]]
|
|
|
|
for w in blindNodes:
|
|
|
|
# Build a chain of nodes up to the root node
|
|
|
|
var
|
|
|
|
chain: seq[HashKey]
|
|
|
|
nodeKey = w
|
|
|
|
while nodeKey.isValid and nodeTab.hasKey nodeKey:
|
|
|
|
chain.add nodeKey
|
2023-11-08 12:18:32 +00:00
|
|
|
nodeKey = backLink.getOrVoid nodeKey
|
2023-06-20 13:26:25 +00:00
|
|
|
if 0 < chain.len and chain[^1] == rootKey:
|
|
|
|
chains.add chain
|
|
|
|
|
|
|
|
# Make sure that the reverse lookup for the root vertex label is available.
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
lbl = HashLabel(root: rootVid, key: rootKey)
|
2023-12-19 12:39:23 +00:00
|
|
|
vids = db.layersGetLebalOrVoid lbl
|
2023-11-08 12:18:32 +00:00
|
|
|
if not vids.isValid:
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutlabel(rootVid, lbl)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
# Process over chains in reverse mode starting with the root node. This
|
|
|
|
# allows the algorithm to find existing nodes on the backend.
|
|
|
|
var
|
|
|
|
seen: HashSet[HashKey]
|
|
|
|
(merged, dups) = (0, 0)
|
|
|
|
# Process the root ID which is common to all chains
|
|
|
|
for chain in chains:
|
|
|
|
for key in chain.reversed:
|
2023-11-08 12:18:32 +00:00
|
|
|
if key notin seen:
|
2023-06-20 13:26:25 +00:00
|
|
|
seen.incl key
|
2023-11-08 12:18:32 +00:00
|
|
|
let rc = db.mergeNodeImpl(key, nodeTab.getOrVoid key, rootVid)
|
2023-06-20 13:26:25 +00:00
|
|
|
if rc.isOK:
|
|
|
|
merged.inc
|
|
|
|
elif rc.error == MergeHashKeyCachedAlready:
|
|
|
|
dups.inc
|
|
|
|
else:
|
|
|
|
return (merged, dups, rc.error)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
(merged, dups, AristoError(0))
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
proc merge*(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2023-11-08 12:18:32 +00:00
|
|
|
rootKey: Hash256; # Merkle hash for root
|
2023-06-09 11:17:37 +00:00
|
|
|
rootVid = VertexID(0) # Optionally, force root vertex ID
|
|
|
|
): Result[VertexID,AristoError] =
|
|
|
|
## Set up a `rootKey` associated with a vertex ID.
|
|
|
|
##
|
|
|
|
## If argument `rootVid` is unset (defaults to `VertexID(0)`) then the main
|
|
|
|
## trie is tested for `VertexID(1)`. If assigned with a different Merkle key
|
|
|
|
## already, a new vertex ID is created and the argument root key is assigned
|
|
|
|
## to this vertex ID.
|
|
|
|
##
|
|
|
|
## If the argument `rootVid` is set (to a value different from `VertexID(0)`),
|
|
|
|
## then a sub-trie with root `rootVid` is checked for. If it exists with a
|
|
|
|
## diffent root key assigned, then an error is returned. Otherwise a new
|
|
|
|
## vertex ID is created and the argument root key is assigned.
|
|
|
|
##
|
|
|
|
## Upon successful return, the vertex ID assigned to the root key is returned.
|
|
|
|
##
|
2023-06-12 13:48:47 +00:00
|
|
|
if not rootKey.isValid:
|
2023-06-20 13:26:25 +00:00
|
|
|
return err(MergeRootKeyInvalid)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
let rootLink = rootKey.to(HashKey)
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
if rootVid.isValid and rootVid != VertexID(1):
|
|
|
|
let key = db.getKey rootVid
|
2023-11-08 12:18:32 +00:00
|
|
|
if key.to(Hash256) == rootKey:
|
2023-06-12 13:48:47 +00:00
|
|
|
return ok rootVid
|
|
|
|
|
|
|
|
if not key.isValid:
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutLabel(rootVid, HashLabel(root: rootVid, key: rootLink))
|
2023-06-12 13:48:47 +00:00
|
|
|
return ok rootVid
|
|
|
|
else:
|
2023-06-09 11:17:37 +00:00
|
|
|
let key = db.getKey VertexID(1)
|
2023-11-08 12:18:32 +00:00
|
|
|
if key.to(Hash256) == rootKey:
|
2023-06-09 11:17:37 +00:00
|
|
|
return ok VertexID(1)
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
# Otherwise assign unless valid
|
|
|
|
if not key.isValid:
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutLabel(VertexID(1),HashLabel(root: VertexID(1), key: rootLink))
|
2023-06-09 11:17:37 +00:00
|
|
|
return ok VertexID(1)
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
# Create and assign a new root key
|
|
|
|
if not rootVid.isValid:
|
2023-10-03 11:56:13 +00:00
|
|
|
let vid = db.vidFetch
|
2023-12-19 12:39:23 +00:00
|
|
|
db.layersPutLabel(vid, HashLabel(root: vid, key: rootLink))
|
2023-10-03 11:56:13 +00:00
|
|
|
return ok vid
|
2023-06-09 11:17:37 +00:00
|
|
|
|
|
|
|
err(MergeRootKeyDiffersForVid)
|
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|