Detach from snap/sync declarations & definitions (#1601)

why:
  Tests and some basic components were originally borrowed from the
  snap/sync implementation. These have fully been re-implemented.
This commit is contained in:
Jordan Hrycaj 2023-06-12 19:16:03 +01:00 committed by GitHub
parent 0308dfac4f
commit d7f40516a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 330 additions and 195 deletions

View File

@ -12,7 +12,6 @@
import
eth/[common, trie/nibbles],
../../sync/snap/range_desc,
./aristo_desc/aristo_types_identifiers
const
@ -22,12 +21,12 @@ const
EmptyNibbleSeq* = EmptyBlob.initNibbleRange
## Useful shortcut (borrowed from `sync/snap/constants.nim`)
VOID_NODE_KEY* = EMPTY_ROOT_HASH.to(NodeKey)
## Equivalent of `nil` for Merkle hash ket
VOID_CODE_KEY* = EMPTY_CODE_HASH.to(NodeKey)
VOID_CODE_KEY* = EMPTY_CODE_HASH.to(HashKey)
## Equivalent of `nil` for `Account` object code hash
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_NODE_KEY)
VOID_HASH_KEY* = EMPTY_ROOT_HASH.to(HashKey)
## Equivalent of `nil` for Merkle hash ket
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_HASH_KEY)
# End

View File

@ -14,7 +14,8 @@ import
std/[algorithm, sequtils, sets, strutils, tables],
eth/[common, trie/nibbles],
stew/byteutils,
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_vid]
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_vid],
./aristo_desc/aristo_types_private
# ------------------------------------------------------------------------------
# Ptivate functions
@ -83,10 +84,10 @@ proc vidCode(lbl: HashLabel, db: AristoDb): uint64 =
if vid.isValid:
return vid.uint64
proc ppKey(key: NodeKey): string =
if key == NodeKey.default:
proc ppKey(key: HashKey): string =
if key == HashKey.default:
return "£ø"
if key == VOID_NODE_KEY:
if key == VOID_HASH_KEY:
return "£r"
if key == VOID_CODE_KEY:
return "£c"
@ -96,9 +97,9 @@ proc ppKey(key: NodeKey): string =
.squeeze(hex=true,ignLen=true)
proc ppLabel(lbl: HashLabel; db: AristoDb): string =
if lbl.key == NodeKey.default:
if lbl.key == HashKey.default:
return "£ø"
if lbl.key == VOID_NODE_KEY:
if lbl.key == VOID_HASH_KEY:
return "£r"
if lbl.key == VOID_CODE_KEY:
return "£c"
@ -118,11 +119,11 @@ proc ppLabel(lbl: HashLabel; db: AristoDb): string =
.mapIt(it.toHex(2)).join.tolowerAscii
.squeeze(hex=true,ignLen=true)
proc ppRootKey(a: NodeKey): string =
proc ppRootKey(a: HashKey): string =
if a.isValid:
return a.ppKey
proc ppCodeKey(a: NodeKey): string =
proc ppCodeKey(a: HashKey): string =
if a != VOID_CODE_KEY:
return a.ppKey
@ -133,7 +134,7 @@ proc ppLeafTie(lty: LeafTie, db: AristoDb): string =
return "@" & vid.ppVid
"@" & ($lty.root.uint64.toHex).stripZeros & ":" &
lty.path.to(NodeKey).ByteArray32
lty.path.to(HashKey).ByteArray32
.mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
proc ppPathPfx(pfx: NibblesSeq): string =
@ -154,8 +155,8 @@ proc ppPayload(p: PayloadRef, db: AristoDb): string =
result = "("
result &= $p.account.nonce & ","
result &= $p.account.balance & ","
result &= p.account.storageRoot.to(NodeKey).ppRootKey() & ","
result &= p.account.codeHash.to(NodeKey).ppCodeKey() & ")"
result &= p.account.storageRoot.to(HashKey).ppRootKey() & ","
result &= p.account.codeHash.to(HashKey).ppCodeKey() & ")"
proc ppVtx(nd: VertexRef, db: AristoDb, vid: VertexID): string =
if not nd.isValid:
@ -261,9 +262,9 @@ proc lblToVtxID*(db: var AristoDb, lbl: HashLabel): VertexID =
db.xMap[lbl] = result
proc hashToVtxID*(db: var AristoDb, root: VertexID; hash: Hash256): VertexID =
db.lblToVtxID HashLabel(root: root, key: hash.to(NodeKey))
db.lblToVtxID HashLabel(root: root, key: hash.to(HashKey))
proc pp*(key: NodeKey): string =
proc pp*(key: HashKey): string =
key.ppKey
proc pp*(lbl: HashLabel, db = AristoDb()): string =

View File

@ -78,7 +78,7 @@ proc deleteImpl(
inx.dec
while 0 <= inx:
# Unlink child node
# Unlink child vertex
let br = hike.legs[inx].wp
if br.vtx.vType != Branch:
return err((br.vid,DelBranchExpexted))

View File

@ -16,16 +16,14 @@
##
## Some semantic explanations;
##
## * NodeKey, NodeRef etc. refer to the standard/legacy `Merkel Patricia Tree`
## * HashKey, NodeRef etc. refer to the standard/legacy `Merkle Patricia Tree`
## * VertexID, VertexRef, etc. refer to the `Aristo Trie`
##
{.push raises: [].}
import
std/[sets, tables],
eth/[common, trie/nibbles],
stew/results,
../../sync/snap/range_desc,
eth/common,
./aristo_constants,
./aristo_desc/[
aristo_error, aristo_types_backend,
@ -36,9 +34,6 @@ export
aristo_constants, aristo_error, aristo_types_identifiers,
aristo_types_structural
export # This one should go away one time
ByteArray32, NodeKey, NodeTag, digestTo, hash, to, `==`, `$`
type
AristoLayerRef* = ref object
## Hexary trie database layer structures. Any layer holds the full
@ -60,10 +55,6 @@ type
# Debugging data below, might go away in future
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
static:
# Not that there is no doubt about this ...
doAssert NodeKey.default.ByteArray32.initNibbleRange.len == 64
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
@ -77,6 +68,7 @@ proc getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
proc getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
tab.getOrDefault(w, VertexID(0))
# --------
proc isValid*(vtx: VertexRef): bool =
vtx != VertexRef(nil)
@ -84,8 +76,8 @@ proc isValid*(vtx: VertexRef): bool =
proc isValid*(nd: NodeRef): bool =
nd != NodeRef(nil)
proc isValid*(key: NodeKey): bool =
key != VOID_NODE_KEY
proc isValid*(key: HashKey): bool =
key != VOID_HASH_KEY
proc isValid*(lbl: HashLabel): bool =
lbl != VOID_HASH_LABEL

View File

@ -82,9 +82,9 @@ type
MergeNonBranchProofModeLock
MergeRootBranchLinkBusy
MergeNodeKeyEmpty
MergeNodeKeyCachedAlready
MergeNodeKeyDiffersFromCached
MergeHashKeyEmpty
MergeHashKeyCachedAlready
MergeHashKeyDiffersFromCached
MergeRootKeyEmpty
MergeRootKeyDiffersForVid

View File

@ -16,7 +16,6 @@
import
stew/results,
../../../sync/snap/range_desc,
"."/[aristo_error, aristo_types_identifiers, aristo_types_structural]
type
@ -26,7 +25,7 @@ type
## `Aristo DB` data record.
GetKeyFn* =
proc(vid: VertexID): Result[NodeKey,AristoError] {.gcsafe, raises: [].}
proc(vid: VertexID): Result[HashKey,AristoError] {.gcsafe, raises: [].}
## Generic backend database retrieval function for a single
## `Aristo DB` hash lookup value.
@ -53,7 +52,7 @@ type
## Generic backend database bulk storage function.
PutKeyFn* =
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,NodeKey)])
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)])
{.gcsafe, raises: [].}
## Generic backend database bulk storage function.

View File

@ -15,9 +15,10 @@
{.push raises: [].}
import
std/strutils,
eth/common,
../../../sync/snap/range_desc
std/[strutils, hashes],
eth/[common, trie/nibbles],
stint,
./aristo_types_private
type
VertexID* = distinct uint64
@ -25,7 +26,20 @@ type
## prefix tree (aka `Patricia Trie`) component. When augmented by hash
## keys, the vertex component will be called a node. On the persistent
## backend of the database, there is no other reference to the node than
## the very same `VertexID`
## the very same `VertexID`.
HashID* = distinct UInt256
## Variant of a `Hash256` object that can be used in a order relation
## (i.e. it can be sorted.) Among temporary conversions for sorting, the
## `HashID` type is consistently used for addressing leaf vertices (see
## below `LeafTie`.)
HashKey* = distinct ByteArray32
## Dedicated `Hash256` object variant that is used for labelling the
## vertices of the `Patricia Trie` in order to make it a
## `Merkle Patricia Tree`.
# ----------
LeafTie* = object
## Unique access key for a leaf vertex. It identifies a root vertex
@ -37,7 +51,7 @@ type
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
## They are used temporarily and in caches or backlog tables.
root*: VertexID ## Root ID for the sub-trie
path*: NodeTag ## Path into the `Patricia Trie`
path*: HashID ## Path into the `Patricia Trie`
HashLabel* = object
## Merkle hash key uniquely associated with a vertex ID. As hashes in a
@ -47,8 +61,12 @@ type
##
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
## They are used temporarily and in caches or backlog tables.
root*: VertexID ## Root ID for the sub-trie
key*: NodeKey ## Path into the `Patricia Trie`
root*: VertexID ## Root ID for the sub-trie.
key*: HashKey ## Merkle hash tacked to a vertex.
static:
# Not that there is no doubt about this ...
doAssert HashKey.default.ByteArray32.initNibbleRange.len == 64
# ------------------------------------------------------------------------------
# Public helpers: `VertexID` scalar data model
@ -62,6 +80,93 @@ proc `$`*(a: VertexID): string = $a.uint64
proc `==`*(a: VertexID; b: static[uint]): bool =
a == VertexID(b)
# ------------------------------------------------------------------------------
# Public helpers: `HashID` scalar data model
# ------------------------------------------------------------------------------
proc u256*(lp: HashID): UInt256 = lp.UInt256
proc low*(T: type HashID): T = low(UInt256).T
proc high*(T: type HashID): T = high(UInt256).T
proc `+`*(a: HashID; b: UInt256): HashID = (a.u256+b).HashID
proc `-`*(a: HashID; b: UInt256): HashID = (a.u256-b).HashID
proc `-`*(a, b: HashID): UInt256 = (a.u256 - b.u256)
proc `==`*(a, b: HashID): bool = a.u256 == b.u256
proc `<=`*(a, b: HashID): bool = a.u256 <= b.u256
proc `<`*(a, b: HashID): bool = a.u256 < b.u256
proc cmp*(x, y: HashID): int = cmp(x.UInt256, y.UInt256)
# ------------------------------------------------------------------------------
# Public helpers: Conversions between `HashID`, `HashKey`, `Hash256`
# ------------------------------------------------------------------------------
proc to*(hid: HashID; T: type Hash256): T =
result.data = hid.UInt256.toBytesBE
proc to*(hid: HashID; T: type HashKey): T =
hid.UInt256.toBytesBE.T
proc to*(key: HashKey; T: type HashID): T =
UInt256.fromBytesBE(key.ByteArray32).T
proc to*(key: HashKey; T: type Hash256): T =
T(data: ByteArray32(key))
proc to*(hash: Hash256; T: type HashKey): T =
hash.data.T
proc to*(key: Hash256; T: type HashID): T =
key.data.HashKey.to(T)
# ------------------------------------------------------------------------------
# Public helpers: Miscellaneous mappings
# ------------------------------------------------------------------------------
proc to*(key: HashKey; T: type Blob): T =
## Representation of a `HashKey` as `Blob` (preserving full information)
key.ByteArray32.toSeq
proc to*(key: HashKey; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
key.ByteArray32.initNibbleRange()
proc to*(hid: HashID; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
ByteArray32(hid.to(HashKey)).initNibbleRange()
proc to*(n: SomeUnsignedInt|UInt256; T: type HashID): T =
## Representation of a scalar as `HashID` (preserving full information)
n.u256.T
proc digestTo*(data: Blob; T: type HashKey): T =
## Keccak hash of a `Blob`, represented as a `HashKey`
keccakHash(data).data.T
# ------------------------------------------------------------------------------
# Public helpers: `Tables` and `Rlp` support
# ------------------------------------------------------------------------------
proc hash*(a: HashID): Hash =
## Table/KeyedQueue mixin
a.to(HashKey).ByteArray32.hash
proc hash*(a: HashKey): Hash =
## Table/KeyedQueue mixin
a.ByteArray32.hash
proc `==`*(a, b: HashKey): bool =
## Table/KeyedQueue mixin
a.ByteArray32 == b.ByteArray32
proc read*[T: HashID|HashKey](rlp: var Rlp, W: type T): T
{.gcsafe, raises: [RlpError].} =
rlp.read(Hash256).to(T)
proc append*(writer: var RlpWriter, val: HashID|HashKey) =
writer.append(val.to(Hash256))
# ------------------------------------------------------------------------------
# Public helpers: `LeafTie` scalar data model
# ------------------------------------------------------------------------------
@ -79,6 +184,29 @@ proc `$`*(a: LeafTie): string =
let w = $a.root.uint64.toHex & ":" & $a.path.Uint256.toHex
w.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
# ------------------------------------------------------------------------------
# Miscellaneous helpers
# ------------------------------------------------------------------------------
proc `$`*(hid: HashID): string =
if hid == high(HashID):
"2^256-1"
elif hid == 0.u256.HashID:
"0"
elif hid == 2.u256.pow(255).HashID:
"2^255" # 800...
elif hid == 2.u256.pow(254).HashID:
"2^254" # 400..
elif hid == 2.u256.pow(253).HashID:
"2^253" # 200...
elif hid == 2.u256.pow(251).HashID:
"2^252" # 100...
else:
hid.UInt256.toHex
proc `$`*(key: HashKey): string =
$key.to(HashID)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,15 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
type
ByteArray32* = array[32,byte]
## Used for 32 byte hash components repurposed as Merkle hash labels.
# End

View File

@ -16,7 +16,6 @@
import
eth/[common, trie/nibbles],
../../../sync/snap/range_desc,
"."/[aristo_error, aristo_types_identifiers]
type
@ -55,7 +54,7 @@ type
## Combined record for a *traditional* ``Merkle Patricia Tree` node merged
## with a structural `VertexRef` type object.
error*: AristoError ## Can be used for error signalling
key*: array[16,NodeKey] ## Merkle hash/es for Branch & Extension
key*: array[16,HashKey] ## Merkle hash/es for Branch & Extension
# ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef`

View File

@ -41,7 +41,7 @@ proc getVtxBackend*(
proc getKeyBackend*(
db: AristoDb;
vid: VertexID;
): Result[NodeKey,AristoError] =
): Result[HashKey,AristoError] =
## Get the merkle hash/key from the backend
# key must not have been locally deleted (but not saved, yet)
if vid notin db.top.dKey:
@ -66,7 +66,7 @@ proc getVtxCascaded*(
proc getKeyCascaded*(
db: AristoDb;
vid: VertexID;
): Result[NodeKey,AristoError] =
): Result[HashKey,AristoError] =
## Get the Merkle hash/key from the top layer or the `backened` layer if
## available.
let lbl = db.top.kMap.getOrVoid vid
@ -103,10 +103,10 @@ proc getVtx*(db: AristoDb; lty: LeafTie): VertexRef =
if rc.isOk:
return rc.value.vtx
proc getKey*(db: AristoDb; vid: VertexID): NodeKey =
## Variant of `getKeyCascaded()` returning `VOID_NODE_KEY` on error (while
proc getKey*(db: AristoDb; vid: VertexID): HashKey =
## Variant of `getKeyCascaded()` returning `VOID_HASH_KEY` on error (while
## ignoring the detailed error type information.)
db.getKeyCascaded(vid).get(otherwise = VOID_NODE_KEY)
db.getKeyCascaded(vid).get(otherwise = VOID_HASH_KEY)
# ------------------------------------------------------------------------------
# End

View File

@ -35,8 +35,8 @@
## vertex as long as possible.
## + Stash the rest of the partial chain to be completed later
##
## * While there is a partial chain left, use the ends towards the leaf nodes
## and calculate the remaining keys (which results in a width-first
## * While there is a partial chain left, use the ends towards the leaf
## vertices and calculate the remaining keys (which results in a width-first
## traversal, again.)
{.push raises: [].}
@ -70,7 +70,7 @@ proc toNode(vtx: VertexRef; db: AristoDb): Result[NodeRef,void] =
continue
return err()
else:
node.key[n] = VOID_NODE_KEY
node.key[n] = VOID_HASH_KEY
return ok node
of Extension:
if vtx.eVid.isValid:
@ -98,7 +98,7 @@ proc leafToRootHasher(
# Check against existing key, or store new key
let
key = rc.value.encode.digestTo(NodeKey)
key = rc.value.encode.digestTo(HashKey)
vfy = db.getKey wp.vid
if not vfy.isValid:
db.vidAttach(HashLabel(root: hike.root, key: key), wp.vid)
@ -188,22 +188,22 @@ proc hashify*(
# Also `db.getVtx(fromVid)` => not nil as it was fetched earlier, already
let rc = db.getVtx(fromVid).toNode(db)
if rc.isErr:
# Cannot complete with this node, so do it later
# Cannot complete with this vertex, so do it later
redo[fromVid] = rootAndVid
else:
# Register Hashes
let
nodeKey = rc.value.encode.digestTo(NodeKey)
hashKey = rc.value.encode.digestTo(HashKey)
toVid = rootAndVid[1]
# Update Merkle hash (aka `nodeKey`)
# Update Merkle hash (aka `HashKey`)
let fromLbl = db.top.kMap.getOrVoid fromVid
if fromLbl.isValid:
db.vidAttach(HashLabel(root: rootAndVid[0], key: nodeKey), fromVid)
elif nodeKey != fromLbl.key:
db.vidAttach(HashLabel(root: rootAndVid[0], key: hashKey), fromVid)
elif hashKey != fromLbl.key:
let error = HashifyExistingHashMismatch
debug "hashify failed", vid=fromVid, key=nodeKey,
debug "hashify failed", vid=fromVid, key=hashKey,
expected=fromLbl.key.pp, error
return err((fromVid,error))
@ -245,7 +245,7 @@ proc hashifyCheck*(
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,HashifyCheckVtxHashMissing))
if lbl.key != rc.value.encode.digestTo(NodeKey):
if lbl.key != rc.value.encode.digestTo(HashKey):
return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
@ -267,7 +267,7 @@ proc hashifyCheck*(
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,HashifyCheckVtxHashMissing))
if lbl.key != rc.value.encode.digestTo(NodeKey):
if lbl.key != rc.value.encode.digestTo(HashKey):
return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
@ -282,7 +282,7 @@ proc hashifyCheck*(
if vtx.isValid:
let rc = vtx.toNode(db)
if rc.isOk:
if lbl.key != rc.value.encode.digestTo(NodeKey):
if lbl.key != rc.value.encode.digestTo(HashKey):
return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.top.pAmk.getOrVoid lbl

View File

@ -12,7 +12,7 @@
import
eth/[common, trie/nibbles],
"."/[aristo_desc, aristo_get, aristo_path]
"."/[aristo_desc, aristo_get]
type
Leg* = object
@ -126,7 +126,7 @@ proc hikeUp*(
proc hikeUp*(lty: LeafTie; db: AristoDb): Hike =
## Variant of `hike()`
lty.path.pathAsNibbles.hikeUp(lty.root, db)
lty.path.to(NibblesSeq).hikeUp(lty.root, db)
# ------------------------------------------------------------------------------
# End

View File

@ -8,21 +8,32 @@
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Backend or cascaded constructors for Aristo DB
## ==============================================
## Constructors for Aristo DB
## ==========================
##
## For a backend-less constructor use `AristoDbRef.new()`
## For a backend-less constructor use `AristoDb(top: AristoLayerRef())`.
{.push raises: [].}
import
./aristo_init/[aristo_memory],
./aristo_desc
./aristo_desc,
./aristo_desc/aristo_types_private
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc init*(key: var HashKey; data: openArray[byte]): bool =
## Import argument `data` into `key` which must have length either `32`, or
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
if data.len == 32:
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
return true
if data.len == 0:
key = VOID_HASH_KEY
return true
proc init*(T: type AristoDb): T =
## Constructor with memory backend.
T(top: AristoLayerRef(),

View File

@ -22,7 +22,7 @@ import
type
MemBackendRef = ref object
sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
kMap: Table[VertexID,NodeKey] ## Merkle hash key mapping
kMap: Table[VertexID,HashKey] ## Merkle hash key mapping
vGen: seq[VertexID]
txGen: uint ## Transaction ID generator (for debugging)
txId: uint ## Active transaction ID (for debugging)
@ -47,9 +47,9 @@ proc getVtxFn(db: MemBackendRef): GetVtxFn =
proc getKeyFn(db: MemBackendRef): GetKeyFn =
result =
proc(vid: VertexID): Result[NodeKey,AristoError] =
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY:
proc(vid: VertexID): Result[HashKey,AristoError] =
let key = db.kMap.getOrDefault(vid, VOID_HASH_KEY)
if key != VOID_HASH_KEY:
return ok key
err(MemBeKeyNotFound)
@ -79,7 +79,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
proc putKeyFn(db: MemBackendRef): PutKeyFn =
result =
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,NodeKey)]) =
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
when VerifyIxId:
doAssert db.txId == hdl.MemPutHdlRef.txId
for (vid,key) in vkps:

View File

@ -11,7 +11,7 @@
## Aristo DB -- Patricia Trie builder, raw node insertion
## ======================================================
##
## This module merges `NodeTag` values as hexary lookup paths into the
## This module merges `HashID` values as hexary lookup paths into the
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
## associated (but separated) Merkle hashes will be deleted unless locked.
## Instead of deleting locked hashes error handling is applied.
@ -133,7 +133,7 @@ proc insertBranch(
# Install `forkVtx`
block:
# Clear Merkle hashes (aka node keys) unless proof mode.
# Clear Merkle hashes (aka hash keys) unless proof mode.
if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, linkID)
elif linkID in db.top.pPrf:
@ -228,17 +228,17 @@ proc concatBranchAndLeaf(
if brVtx.bVid[nibble].isValid:
return Hike(error: MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka node keys) unless proof mode.
# Clear Merkle hashes (aka hash keys) unless proof mode.
if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid)
elif brVid in db.top.pPrf:
return Hike(error: MergeBranchProofModeLock) # Ooops
# Append branch node
# Append branch vertex
result = Hike(root: hike.root, legs: hike.legs)
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
# Append leaf node
# Append leaf vertex
let
vid = db.vidFetch
vtx = VertexRef(
@ -324,7 +324,7 @@ proc topIsExtAddLeaf(
result = Hike(root: hike.root, legs: hike.legs)
if not brVtx.isValid:
# Blind vertex, promote to leaf node.
# Blind vertex, promote to leaf vertex.
#
# --(extVid)--> <extVtx> --(brVid)--> nil
#
@ -354,7 +354,7 @@ proc topIsExtAddLeaf(
if linkID.isValid:
return Hike(error: MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka node keys) unless proof mode
# Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid)
elif brVid in db.top.pPrf:
@ -386,7 +386,7 @@ proc topIsEmptyAddLeaf(
if rootVtx.bVid[nibble].isValid:
return Hike(error: MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka node keys) unless proof mode
# Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, hike.root)
elif hike.root in db.top.pPrf:
@ -413,14 +413,14 @@ proc topIsEmptyAddLeaf(
proc mergeNodeImpl(
db: AristoDb; # Database, top layer
nodeKey: NodeKey; # Merkel hash of node
hashKey: HashKey; # Merkel hash of node
node: NodeRef; # Node derived from RLP representation
rootVid: VertexID; # Current sub-trie
): Result[VertexID,AristoError] =
## The function merges a node key `nodeKey` expanded from its RLP
## representation into the `Aristo Trie` database. The vertex is split off
## from the node and stored separately. So are the Merkle hashes. The
## vertex is labelled `locked`.
## The function merges the argument hash key `hashKey` as expanded from the
## node RLP representation into the `Aristo Trie` database. The vertex is
## split off from the node and stored separately. So are the Merkle hashes.
## The vertex is labelled `locked`.
##
## The `node` argument is *not* checked, whether the vertex IDs have been
## allocated, already. If the node comes straight from the `decode()` RLP
@ -431,26 +431,26 @@ proc mergeNodeImpl(
if not rootVid.isValid:
return err(MergeRootKeyEmpty)
# Verify `nodeKey`
if not nodeKey.isValid:
return err(MergeNodeKeyEmpty)
# Verify `hashKey`
if not hashKey.isValid:
return err(MergeHashKeyEmpty)
# Check whether the node exists, already. If not then create a new vertex ID
var
nodeLbl = HashLabel(root: rootVid, key: nodeKey)
vid = db.top.pAmk.getOrVoid nodeLbl
hashLbl = HashLabel(root: rootVid, key: hashKey)
vid = db.top.pAmk.getOrVoid hashLbl
if not vid.isValid:
vid = db.vidAttach nodeLbl
vid = db.vidAttach hashLbl
else:
let lbl = db.top.kMap.getOrVoid vid
if lbl == nodeLbl:
if lbl == hashLbl:
if db.top.sTab.hasKey vid:
# This is tyically considered OK
return err(MergeNodeKeyCachedAlready)
return err(MergeHashKeyCachedAlready)
# Otherwise proceed
elif lbl.isValid:
# Different key assigned => error
return err(MergeNodeKeyDiffersFromCached)
return err(MergeHashKeyDiffersFromCached)
let vtx = node.to(VertexRef) # the vertex IDs need to be set up now (if any)
@ -523,7 +523,7 @@ proc merge*(
vid: hike.root,
vtx: VertexRef(
vType: Leaf,
lPfx: leaf.leafTie.path.pathAsNibbles,
lPfx: leaf.leafTie.path.to(NibblesSeq),
lData: leaf.payload))
db.top.sTab[wp.vid] = wp.vtx
result = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
@ -565,12 +565,12 @@ proc merge*(
var (merged, dups) = (0, 0)
for n,w in proof:
let
key = w.Blob.digestTo(NodeKey)
key = w.Blob.digestTo(HashKey)
node = w.Blob.decode(NodeRef)
rc = db.mergeNodeImpl(key, node, rootVid)
if rc.isOK:
merged.inc
elif rc.error == MergeNodeKeyCachedAlready:
elif rc.error == MergeHashKeyCachedAlready:
dups.inc
else:
return (n, dups, rc.error)
@ -579,7 +579,7 @@ proc merge*(
proc merge*(
db: AristoDb; # Database, top layer
rootKey: NodeKey; # Merkle hash for root
rootKey: HashKey; # Merkle hash for root
rootVid = VertexID(0) # Optionally, force root vertex ID
): Result[VertexID,AristoError] =
## Set up a `rootKey` associated with a vertex ID.

View File

@ -127,7 +127,7 @@ proc zeroAdjust(
db: AristoDb; # Database layer
doLeast: static[bool]; # Direction: *least* or *most*
): Hike =
## Adjust empty argument path to the first node entry to the right. Ths
## Adjust empty argument path to the first vertex entry to the right. Ths
## applies is the argument `hike` is before the first entry in the database.
## The result is a hike which is aligned with the first entry.
proc accept(p: Hike; pfx: NibblesSeq): bool =
@ -171,7 +171,7 @@ proc zeroAdjust(
of Extension:
let ePfx = root.ePfx
# Must be followed by a branch node
# Must be followed by a branch vertex
if hike.tail.len < 2 or not hike.accept(ePfx):
break fail
let vtx = db.getVtx root.eVid
@ -229,9 +229,9 @@ proc finalise(
if 0 < hike.tail.len: # nothing to compare against, otherwise
let top = hike.legs[^1]
# Note that only a `Branch` nodes has a non-zero nibble
# Note that only a `Branch` vertices has a non-zero nibble
if 0 <= top.nibble and top.nibble == top.wp.vtx.branchBorderNibble:
# Check the following up node
# Check the following up vertex
let vtx = db.getVtx top.wp.vtx.bVid[top.nibble]
if not vtx.isValid:
return Hike(error: NearbyDanglingLink)
@ -252,7 +252,7 @@ proc finalise(
# * finalise left: n00000.. for 0 < n
if hike.legs[0].wp.vtx.vType == Branch or
(1 < hike.legs.len and hike.legs[1].wp.vtx.vType == Branch):
return Hike(error: NearbyFailed) # no more nodes
return Hike(error: NearbyFailed) # no more vertices
Hike(error: NearbyUnexpectedVtx) # error
@ -314,7 +314,7 @@ proc nearbyNext(
uHikeLen = uHike.legs.len # in case of backtracking
uHikeTail = uHike.tail # in case of backtracking
# Look ahead checking next node
# Look ahead checking next vertex
if start:
let vid = top.wp.vtx.bVid[top.nibble]
if not vid.isValid:
@ -334,7 +334,7 @@ proc nearbyNext(
of Branch:
let nibble = uHike.tail[0].int8
if start and accept nibble:
# Step down and complete with a branch link on the child node
# Step down and complete with a branch link on the child vertex
step = Leg(wp: VidVtxPair(vid: vid, vtx: vtx), nibble: nibble)
uHike.legs.add step
@ -354,7 +354,7 @@ proc nearbyNext(
uHike.legs.setLen(uHikeLen)
uHike.tail = uHikeTail
else:
# Pop current `Branch` node on top and append nibble to `tail`
# Pop current `Branch` vertex on top and append nibble to `tail`
uHike.tail = @[top.nibble.byte].initNibbleRange.slice(1) & uHike.tail
uHike.legs.setLen(uHike.legs.len - 1)
# End while
@ -368,7 +368,7 @@ proc nearbyNext(
db: AristoDb; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any)
moveRight:static[bool]; # Direction of next vertex
): Result[NodeTag,AristoError] =
): Result[HashID,AristoError] =
## Variant of `nearbyNext()`, convenience wrapper
let hike = lty.hikeUp(db).nearbyNext(db, hikeLenMax, moveRight)
if hike.error != AristoError(0):
@ -377,7 +377,7 @@ proc nearbyNext(
if 0 < hike.legs.len and hike.legs[^1].wp.vtx.vType == Leaf:
let rc = hike.legsTo(NibblesSeq).pathToKey
if rc.isOk:
return ok rc.value.to(NodeTag)
return ok rc.value.to(HashID)
return err(rc.error)
err(NearbyLeafExpected)
@ -390,22 +390,22 @@ proc nearbyRight*(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
): Hike =
## Extends the maximally extended argument nodes `hike` to the right (i.e.
## Extends the maximally extended argument vertices `hike` to the right (i.e.
## with non-decreasing path value). This function does not backtrack if
## there are dangling links in between. It will return an error in that case.
##
## If there is no more leaf node to the right of the argument `hike`, the
## If there is no more leaf vertices to the right of the argument `hike`, the
## particular error code `NearbyBeyondRange` is returned.
##
## This code is intended to be used for verifying a left-bound proof to
## verify that there is no leaf node *right* of a boundary path value.
## verify that there is no leaf vertex *right* of a boundary path value.
hike.nearbyNext(db, 64, moveRight=true)
proc nearbyRight*(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDb; # Database layer
): Result[LeafTie,AristoError] =
## Variant of `nearbyRight()` working with a `NodeTag` argument instead
## Variant of `nearbyRight()` working with a `HashID` argument instead
## of a `Hike`.
let rc = lty.nearbyNext(db, 64, moveRight=true)
if rc.isErr:
@ -419,15 +419,14 @@ proc nearbyLeft*(
## Similar to `nearbyRight()`.
##
## This code is intended to be used for verifying a right-bound proof to
## verify that there is no leaf node *left* to a boundary path value.
## verify that there is no leaf vertex *left* to a boundary path value.
hike.nearbyNext(db, 64, moveRight=false)
proc nearbyLeft*(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDb; # Database layer
): Result[LeafTie,AristoError] =
## Similar to `nearbyRight()` for `NodeTag` argument instead
## of a `Hike`.
## Similar to `nearbyRight()` for `HashID` argument instead of a `Hike`.
let rc = lty.nearbyNext(db, 64, moveRight=false)
if rc.isErr:
return err(rc.error)
@ -441,10 +440,10 @@ proc nearbyRightMissing*(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
): Result[bool,AristoError] =
## Returns `true` if the maximally extended argument nodes `hike` is the
## rightmost on the hexary trie database. It verifies that there is no more
## Returns `true` if the maximally extended argument vertex `hike` is the
## right most on the hexary trie database. It verifies that there is no more
## leaf entry to the right of the argument `hike`. This function is an
## an alternative to
## alternative to
## ::
## let rc = path.nearbyRight(db)
## if rc.isOk:
@ -454,7 +453,7 @@ proc nearbyRightMissing*(
## # problem with database => error
## ...
## else:
## # no nore nodes => true
## # no nore vertices => true
## ...
## and is intended mainly for debugging.
if hike.legs.len == 0:

View File

@ -14,7 +14,8 @@ import
std/sequtils,
eth/[common, trie/nibbles],
stew/results,
./aristo_desc
./aristo_desc,
./aristo_desc/aristo_types_private
# Info snippet (just a reminder to keep somewhere)
#
@ -34,35 +35,28 @@ import
# Public functions
# ------------------------------------------------------------------------------
proc pathAsNibbles*(key: NodeKey): NibblesSeq =
key.ByteArray32.initNibbleRange()
proc pathAsNibbles*(tag: NodeTag): NibblesSeq =
tag.to(NodeKey).pathAsNibbles()
proc pathAsBlob*(keyOrTag: NodeKey|NodeTag): Blob =
proc pathAsBlob*(keyOrTag: HashKey|HashID): Blob =
keyOrTag.pathAsNibbles.hexPrefixEncode(isLeaf=true)
proc pathToKey*(partPath: NibblesSeq): Result[NodeKey,AristoError] =
proc pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
var key: ByteArray32
if partPath.len == 64:
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg
let path = (partPath & EmptyNibbleSeq).getBytes()
(addr key[0]).copyMem(unsafeAddr path[0], 32)
return ok(key.NodeKey)
return ok(key.HashKey)
err(PathExpected64Nibbles)
proc pathToKey*(partPath: Blob): Result[NodeKey,AristoError] =
proc pathToKey*(partPath: Blob): Result[HashKey,AristoError] =
let (isLeaf,pathSegment) = partPath.hexPrefixDecode
if isleaf:
return pathSegment.pathToKey()
err(PathExpectedLeaf)
proc pathToTag*(partPath: NibblesSeq|Blob): Result[NodeTag,AristoError] =
proc pathToTag*(partPath: NibblesSeq|Blob): Result[HashID,AristoError] =
let rc = partPath.pathToKey()
if rc.isOk:
return ok(rc.value.to(NodeTag))
return ok(rc.value.to(HashID))
err(rc.error)
# --------------------
@ -86,11 +80,11 @@ proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
let nope = seq[byte].default.initNibbleRange
result = pfx.slice(0,64) & nope # nope forces re-alignment
proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): NodeKey =
proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
## Variant of `pathPfxPad()`.
##
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
## `NodeKey`.
## `HashKey`.
let bytes = pfx.pathPfxPad(dblNibble).getBytes
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)

View File

@ -14,7 +14,7 @@ import
std/[bitops, sequtils],
eth/[common, trie/nibbles],
stew/results,
"."/[aristo_constants, aristo_desc]
"."/[aristo_constants, aristo_desc, aristo_init]
# ------------------------------------------------------------------------------
# Private functions
@ -24,16 +24,6 @@ proc aristoError(error: AristoError): NodeRef =
## Allows returning de
NodeRef(vType: Leaf, error: error)
proc aInit(key: var NodeKey; data: openArray[byte]): bool =
## Import argument `data` into `key` which must have length either `32`, or
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
if data.len == 32:
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
return true
elif data.len == 0:
key = VOID_NODE_KEY
return true
# ------------------------------------------------------------------------------
# Public RLP transcoder mixins
# ------------------------------------------------------------------------------
@ -51,7 +41,7 @@ proc read*(
var
blobs = newSeq[Blob](2) # temporary, cache
links: array[16,NodeKey] # reconstruct branch node
links: array[16,HashKey] # reconstruct branch node
top = 0 # count entries and positions
# Collect lists of either 2 or 17 blob entries.
@ -62,7 +52,7 @@ proc read*(
return aristoError(RlpBlobExpected)
blobs[top] = rlp.read(Blob)
of 2 .. 15:
if not links[top].aInit(rlp.read(Blob)):
if not links[top].init(rlp.read(Blob)):
return aristoError(RlpBranchLinkExpected)
of 16:
if not w.isBlob:
@ -90,12 +80,12 @@ proc read*(
var node = NodeRef(
vType: Extension,
ePfx: pathSegment)
if not node.key[0].aInit(blobs[1]):
if not node.key[0].init(blobs[1]):
return aristoError(RlpExtPathEncoding)
return node
of 17:
for n in [0,1]:
if not links[n].aInit(blobs[n]):
if not links[n].init(blobs[n]):
return aristoError(RlpBranchLinkExpected)
return NodeRef(
vType: Branch,
@ -109,7 +99,7 @@ proc read*(
proc append*(writer: var RlpWriter; node: NodeRef) =
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
## list.
proc addNodeKey(writer: var RlpWriter; key: NodeKey) =
proc addHashKey(writer: var RlpWriter; key: HashKey) =
if not key.isValid:
writer.append EmptyBlob
else:
@ -122,12 +112,12 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
of Branch:
writer.startList(17)
for n in 0..15:
writer.addNodeKey node.key[n]
writer.addHashKey node.key[n]
writer.append EmptyBlob
of Extension:
writer.startList(2)
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
writer.addNodeKey node.key[0]
writer.addHashKey node.key[0]
of Leaf:
writer.startList(2)
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
@ -143,12 +133,12 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
## boundaries.
## ::
## Branch:
## uint64, ... -- list of up to 16 child nodes lookup keys
## uint64, ... -- list of up to 16 child vertices lookup keys
## uint16 -- index bitmap
## 0x00 -- marker(2) + unused(2)
##
## Extension:
## uint64 -- child node lookup key
## uint64 -- child vertex lookup key
## Blob -- hex encoded partial path (at least one byte)
## 0x80 -- marker(2) + unused(2)
##
@ -158,7 +148,7 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
## 0xc0 -- marker(2) + partialPathLen(6)
##
## For a branch record, the bytes of the `access` array indicate the position
## of the Patricia Trie node reference. So the `vertexID` with index `n` has
## of the Patricia Trie vertex reference. So the `vertexID` with index `n` has
## ::
## 8 * n * ((access shr (n * 4)) and 15)
##
@ -229,7 +219,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
return DbrTooShort
case record[^1] shr 6:
of 0: # `Branch` node
of 0: # `Branch` vertex
if record.len < 19: # at least two edges
return DbrBranchTooShort
if (record.len mod 8) != 3:
@ -254,7 +244,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
vType: Branch,
bVid: vtxList)
of 2: # `Extension` node
of 2: # `Extension` vertex
let
sLen = record[^1].int and 0x3f # length of path segment
rlen = record.len - 1 # `vertexID` + path segm
@ -270,7 +260,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
eVid: (uint64.fromBytesBE record[0 ..< 8]).VertexID,
ePfx: pathSegment)
of 3: # `Leaf` node
of 3: # `Leaf` vertex
let
sLen = record[^1].int and 0x3f # length of path segment
rlen = record.len - 1 # payload + path segment

View File

@ -101,7 +101,7 @@ proc vidAttach*(db: AristoDb; lbl: HashLabel): VertexID {.discardable.} =
result = db.vidFetch
db.vidAttach(lbl, result)
proc vidRoot*(db: AristoDb; key: NodeKey): VertexID {.discardable.} =
proc vidRoot*(db: AristoDb; key: HashKey): VertexID {.discardable.} =
## Variant of `vidAttach()` for generating a sub-trie root
result = db.vidFetch
db.vidAttach(HashLabel(root: result, key: key), result)

View File

@ -100,12 +100,12 @@ proc cachedVID(db: AristoDb; lbl: HashLabel): VertexID =
result = db.vidAttach lbl
# ------------------------------------------------------------------------------
# Public functions for `VertexID` => `NodeKey` mapping
# Public functions for `VertexID` => `HashKey` mapping
# ------------------------------------------------------------------------------
proc pal*(db: AristoDb; rootID: VertexID; vid: VertexID): NodeKey =
## Retrieve the cached `Merkel` hash (aka `NodeKey` object) associated with
## the argument `VertexID` type argument `vid`. Return a zero `NodeKey` if
proc pal*(db: AristoDb; rootID: VertexID; vid: VertexID): HashKey =
## Retrieve the cached `Merkel` hash (aka `HashKey` object) associated with
## the argument `VertexID` type argument `vid`. Return a zero `HashKey` if
## there is none.
##
## If the vertex ID `vid` is not found in the cache, then the structural
@ -122,7 +122,7 @@ proc pal*(db: AristoDb; rootID: VertexID; vid: VertexID): NodeKey =
if db.convertPartiallyOk(vtx,node):
var w = initRlpWriter()
w.append node
result = w.finish.keccakHash.data.NodeKey
result = w.finish.keccakHash.data.HashKey
db.top.kMap[vid] = HashLabel(root: rootID, key: result)
# ------------------------------------------------------------------------------
@ -135,7 +135,7 @@ proc updated*(nd: NodeRef; root: VertexID; db: AristoDb): NodeRef =
## For a `Leaf` node, the payload data `PayloadRef` type reference is *not*
## duplicated and returned as-is.
##
## This function will not complain if all `Merkel` hashes (aka `NodeKey`
## This function will not complain if all `Merkel` hashes (aka `HashKey`
## objects) are zero for either `Extension` or `Leaf` nodes.
if nd.isValid:
case nd.vType:
@ -161,7 +161,7 @@ proc updated*(nd: NodeRef; root: VertexID; db: AristoDb): NodeRef =
proc asNode*(vtx: VertexRef; db: AristoDb): NodeRef =
## Return a `NodeRef` object by augmenting missing `Merkel` hashes (aka
## `NodeKey` objects) from the cache or from calculated cached vertex
## `HashKey` objects) from the cache or from calculated cached vertex
## entries, if available.
##
## If not all `Merkel` hashes are available in a single lookup, then the

View File

@ -18,7 +18,6 @@ import
unittest2,
../../nimbus/db/aristo/[
aristo_desc, aristo_delete, aristo_hashify, aristo_nearby, aristo_merge],
../../nimbus/sync/snap/range_desc,
./test_helpers
type
@ -93,8 +92,8 @@ proc fwdWalkVerify(
error = rc.error
check rc.error == AristoError(0)
break
if rc.value.path < high(NodeTag):
lty.path = NodeTag(rc.value.path.u256 + 1)
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
n.inc
if error != AristoError(0):

View File

@ -20,9 +20,12 @@ import
../test_sync_snap/test_types,
../replay/[pp, undump_accounts, undump_storages]
from ../../nimbus/sync/snap/range_desc
import NodeKey
type
ProofTrieData* = object
root*: NodeKey
root*: HashKey
id*: int
proof*: seq[SnapProof]
kvpLst*: seq[LeafTiePayload]
@ -34,6 +37,9 @@ type
proc toPfx(indent: int): string =
"\n" & " ".repeat(indent)
proc to(a: NodeKey; T: type HashKey): T =
a.T
# ------------------------------------------------------------------------------
# Public pretty printing
# ------------------------------------------------------------------------------
@ -118,9 +124,9 @@ proc to*(sample: AccountsSample; T: type seq[UndumpStorages]): T =
result.add w
proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
var (rootKey, rootVid) = (VOID_NODE_KEY, VertexID(0))
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0))
for w in ua:
let thisRoot = w.root.to(NodeKey)
let thisRoot = w.root.to(HashKey)
if rootKey != thisRoot:
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
if 0 < w.data.accounts.len:
@ -128,14 +134,16 @@ proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
root: rootKey,
proof: w.data.proof,
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
leafTie: LeafTie(root: rootVid, path: it.accKey.to(NodeTag)),
leafTie: LeafTie(
root: rootVid,
path: it.accKey.to(HashKey).to(HashID)),
payload: PayloadRef(pType: BlobData, blob: it.accBlob))))
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
var (rootKey, rootVid) = (VOID_NODE_KEY, VertexID(0))
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0))
for n,s in us:
for w in s.data.storages:
let thisRoot = w.account.storageRoot.to(NodeKey)
let thisRoot = w.account.storageRoot.to(HashKey)
if rootKey != thisRoot:
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
if 0 < w.data.len:
@ -143,7 +151,9 @@ proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
root: thisRoot,
id: n + 1,
kvpLst: w.data.mapIt(LeafTiePayload(
leafTie: LeafTie(root: rootVid, path: it.slotHash.to(NodeTag)),
leafTie: LeafTie(
root: rootVid,
path: it.slotHash.to(HashKey).to(HashID)),
payload: PayloadRef(pType: BlobData, blob: it.slotData))))
if 0 < result.len:
result[^1].proof = s.data.proof

View File

@ -194,7 +194,7 @@ proc test_mergeProofAndKvpList*(
oopsTab = oops.toTable
var
db: AristoDb
rootKey = NodeKey.default
rootKey = HashKey.default
count = 0
for n,w in list:
if resetDb or w.root != rootKey or w.proof.len == 0:
@ -222,7 +222,7 @@ proc test_mergeProofAndKvpList*(
check rc.error == AristoError(0)
return
proved = db.merge(w.proof, rc.value)
check proved.error in {AristoError(0),MergeNodeKeyCachedAlready}
check proved.error in {AristoError(0),MergeHashKeyCachedAlready}
check w.proof.len == proved.merged + proved.dups
check db.top.lTab.len == lTabLen
check db.top.sTab.len == proved.merged + sTabLen

View File

@ -18,7 +18,6 @@ import
unittest2,
../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_merge, aristo_nearby],
../../nimbus/sync/snap/range_desc,
./test_helpers
# ------------------------------------------------------------------------------
@ -28,14 +27,14 @@ import
proc fwdWalkLeafsCompleteDB(
db: AristoDb;
root: VertexID;
tags: openArray[NodeTag];
tags: openArray[HashID];
noisy: bool;
): tuple[visited: int, error: AristoError] =
let
tLen = tags.len
var
error = AristoError(0)
lty = LeafTie(root: root, path: NodeTag(tags[0].u256 div 2))
lty = LeafTie(root: root, path: HashID(tags[0].u256 div 2))
n = 0
while true:
let rc = lty.nearbyRight(db)
@ -63,8 +62,8 @@ proc fwdWalkLeafsCompleteDB(
error = AristoError(1)
check rc.value.path == tags[n]
break
if rc.value.path < high(NodeTag):
lty.path = NodeTag(rc.value.path.u256 + 1)
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
n.inc
(n,error)
@ -73,7 +72,7 @@ proc fwdWalkLeafsCompleteDB(
proc revWalkLeafsCompleteDB(
db: AristoDb;
root: VertexID;
tags: openArray[NodeTag];
tags: openArray[HashID];
noisy: bool;
): tuple[visited: int, error: AristoError] =
let
@ -81,7 +80,7 @@ proc revWalkLeafsCompleteDB(
var
error = AristoError(0)
delta = ((high(UInt256) - tags[^1].u256) div 2)
lty = LeafTie(root: root, path: NodeTag(tags[^1].u256 + delta))
lty = LeafTie(root: root, path: HashID(tags[^1].u256 + delta))
n = tLen-1
while true: # and false:
let rc = lty.nearbyLeft(db)
@ -107,8 +106,8 @@ proc revWalkLeafsCompleteDB(
error = AristoError(1)
check rc.value.path == tags[n]
break
if low(NodeTag) < rc.value.path:
lty.path = NodeTag(rc.value.path.u256 - 1)
if low(HashID) < rc.value.path:
lty.path = HashID(rc.value.path.u256 - 1)
n.dec
(tLen-1 - n, error)
@ -124,8 +123,8 @@ proc test_nearbyKvpList*(
): bool =
var
db: AristoDb
rootKey = NodeKey.default
tagSet: HashSet[NodeTag]
rootKey = HashKey.default
tagSet: HashSet[HashID]
count = 0
for n,w in list:
if resetDb or w.root != rootKey: