Detach from snap/sync declarations & definitions (#1601)
why: Tests and some basic components were originally borrowed from the snap/sync implementation. These have fully been re-implemented.
This commit is contained in:
parent
0308dfac4f
commit
d7f40516a7
|
@ -12,7 +12,6 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
../../sync/snap/range_desc,
|
|
||||||
./aristo_desc/aristo_types_identifiers
|
./aristo_desc/aristo_types_identifiers
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -22,12 +21,12 @@ const
|
||||||
EmptyNibbleSeq* = EmptyBlob.initNibbleRange
|
EmptyNibbleSeq* = EmptyBlob.initNibbleRange
|
||||||
## Useful shortcut (borrowed from `sync/snap/constants.nim`)
|
## Useful shortcut (borrowed from `sync/snap/constants.nim`)
|
||||||
|
|
||||||
VOID_NODE_KEY* = EMPTY_ROOT_HASH.to(NodeKey)
|
VOID_CODE_KEY* = EMPTY_CODE_HASH.to(HashKey)
|
||||||
## Equivalent of `nil` for Merkle hash ket
|
|
||||||
|
|
||||||
VOID_CODE_KEY* = EMPTY_CODE_HASH.to(NodeKey)
|
|
||||||
## Equivalent of `nil` for `Account` object code hash
|
## Equivalent of `nil` for `Account` object code hash
|
||||||
|
|
||||||
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_NODE_KEY)
|
VOID_HASH_KEY* = EMPTY_ROOT_HASH.to(HashKey)
|
||||||
|
## Equivalent of `nil` for Merkle hash ket
|
||||||
|
|
||||||
|
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_HASH_KEY)
|
||||||
|
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -14,7 +14,8 @@ import
|
||||||
std/[algorithm, sequtils, sets, strutils, tables],
|
std/[algorithm, sequtils, sets, strutils, tables],
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_vid]
|
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_vid],
|
||||||
|
./aristo_desc/aristo_types_private
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Ptivate functions
|
# Ptivate functions
|
||||||
|
@ -83,10 +84,10 @@ proc vidCode(lbl: HashLabel, db: AristoDb): uint64 =
|
||||||
if vid.isValid:
|
if vid.isValid:
|
||||||
return vid.uint64
|
return vid.uint64
|
||||||
|
|
||||||
proc ppKey(key: NodeKey): string =
|
proc ppKey(key: HashKey): string =
|
||||||
if key == NodeKey.default:
|
if key == HashKey.default:
|
||||||
return "£ø"
|
return "£ø"
|
||||||
if key == VOID_NODE_KEY:
|
if key == VOID_HASH_KEY:
|
||||||
return "£r"
|
return "£r"
|
||||||
if key == VOID_CODE_KEY:
|
if key == VOID_CODE_KEY:
|
||||||
return "£c"
|
return "£c"
|
||||||
|
@ -96,9 +97,9 @@ proc ppKey(key: NodeKey): string =
|
||||||
.squeeze(hex=true,ignLen=true)
|
.squeeze(hex=true,ignLen=true)
|
||||||
|
|
||||||
proc ppLabel(lbl: HashLabel; db: AristoDb): string =
|
proc ppLabel(lbl: HashLabel; db: AristoDb): string =
|
||||||
if lbl.key == NodeKey.default:
|
if lbl.key == HashKey.default:
|
||||||
return "£ø"
|
return "£ø"
|
||||||
if lbl.key == VOID_NODE_KEY:
|
if lbl.key == VOID_HASH_KEY:
|
||||||
return "£r"
|
return "£r"
|
||||||
if lbl.key == VOID_CODE_KEY:
|
if lbl.key == VOID_CODE_KEY:
|
||||||
return "£c"
|
return "£c"
|
||||||
|
@ -118,11 +119,11 @@ proc ppLabel(lbl: HashLabel; db: AristoDb): string =
|
||||||
.mapIt(it.toHex(2)).join.tolowerAscii
|
.mapIt(it.toHex(2)).join.tolowerAscii
|
||||||
.squeeze(hex=true,ignLen=true)
|
.squeeze(hex=true,ignLen=true)
|
||||||
|
|
||||||
proc ppRootKey(a: NodeKey): string =
|
proc ppRootKey(a: HashKey): string =
|
||||||
if a.isValid:
|
if a.isValid:
|
||||||
return a.ppKey
|
return a.ppKey
|
||||||
|
|
||||||
proc ppCodeKey(a: NodeKey): string =
|
proc ppCodeKey(a: HashKey): string =
|
||||||
if a != VOID_CODE_KEY:
|
if a != VOID_CODE_KEY:
|
||||||
return a.ppKey
|
return a.ppKey
|
||||||
|
|
||||||
|
@ -133,7 +134,7 @@ proc ppLeafTie(lty: LeafTie, db: AristoDb): string =
|
||||||
return "@" & vid.ppVid
|
return "@" & vid.ppVid
|
||||||
|
|
||||||
"@" & ($lty.root.uint64.toHex).stripZeros & ":" &
|
"@" & ($lty.root.uint64.toHex).stripZeros & ":" &
|
||||||
lty.path.to(NodeKey).ByteArray32
|
lty.path.to(HashKey).ByteArray32
|
||||||
.mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
|
.mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
|
||||||
|
|
||||||
proc ppPathPfx(pfx: NibblesSeq): string =
|
proc ppPathPfx(pfx: NibblesSeq): string =
|
||||||
|
@ -154,8 +155,8 @@ proc ppPayload(p: PayloadRef, db: AristoDb): string =
|
||||||
result = "("
|
result = "("
|
||||||
result &= $p.account.nonce & ","
|
result &= $p.account.nonce & ","
|
||||||
result &= $p.account.balance & ","
|
result &= $p.account.balance & ","
|
||||||
result &= p.account.storageRoot.to(NodeKey).ppRootKey() & ","
|
result &= p.account.storageRoot.to(HashKey).ppRootKey() & ","
|
||||||
result &= p.account.codeHash.to(NodeKey).ppCodeKey() & ")"
|
result &= p.account.codeHash.to(HashKey).ppCodeKey() & ")"
|
||||||
|
|
||||||
proc ppVtx(nd: VertexRef, db: AristoDb, vid: VertexID): string =
|
proc ppVtx(nd: VertexRef, db: AristoDb, vid: VertexID): string =
|
||||||
if not nd.isValid:
|
if not nd.isValid:
|
||||||
|
@ -261,9 +262,9 @@ proc lblToVtxID*(db: var AristoDb, lbl: HashLabel): VertexID =
|
||||||
db.xMap[lbl] = result
|
db.xMap[lbl] = result
|
||||||
|
|
||||||
proc hashToVtxID*(db: var AristoDb, root: VertexID; hash: Hash256): VertexID =
|
proc hashToVtxID*(db: var AristoDb, root: VertexID; hash: Hash256): VertexID =
|
||||||
db.lblToVtxID HashLabel(root: root, key: hash.to(NodeKey))
|
db.lblToVtxID HashLabel(root: root, key: hash.to(HashKey))
|
||||||
|
|
||||||
proc pp*(key: NodeKey): string =
|
proc pp*(key: HashKey): string =
|
||||||
key.ppKey
|
key.ppKey
|
||||||
|
|
||||||
proc pp*(lbl: HashLabel, db = AristoDb()): string =
|
proc pp*(lbl: HashLabel, db = AristoDb()): string =
|
||||||
|
|
|
@ -78,7 +78,7 @@ proc deleteImpl(
|
||||||
inx.dec
|
inx.dec
|
||||||
|
|
||||||
while 0 <= inx:
|
while 0 <= inx:
|
||||||
# Unlink child node
|
# Unlink child vertex
|
||||||
let br = hike.legs[inx].wp
|
let br = hike.legs[inx].wp
|
||||||
if br.vtx.vType != Branch:
|
if br.vtx.vType != Branch:
|
||||||
return err((br.vid,DelBranchExpexted))
|
return err((br.vid,DelBranchExpexted))
|
||||||
|
|
|
@ -16,16 +16,14 @@
|
||||||
##
|
##
|
||||||
## Some semantic explanations;
|
## Some semantic explanations;
|
||||||
##
|
##
|
||||||
## * NodeKey, NodeRef etc. refer to the standard/legacy `Merkel Patricia Tree`
|
## * HashKey, NodeRef etc. refer to the standard/legacy `Merkle Patricia Tree`
|
||||||
## * VertexID, VertexRef, etc. refer to the `Aristo Trie`
|
## * VertexID, VertexRef, etc. refer to the `Aristo Trie`
|
||||||
##
|
##
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sets, tables],
|
std/[sets, tables],
|
||||||
eth/[common, trie/nibbles],
|
eth/common,
|
||||||
stew/results,
|
|
||||||
../../sync/snap/range_desc,
|
|
||||||
./aristo_constants,
|
./aristo_constants,
|
||||||
./aristo_desc/[
|
./aristo_desc/[
|
||||||
aristo_error, aristo_types_backend,
|
aristo_error, aristo_types_backend,
|
||||||
|
@ -36,9 +34,6 @@ export
|
||||||
aristo_constants, aristo_error, aristo_types_identifiers,
|
aristo_constants, aristo_error, aristo_types_identifiers,
|
||||||
aristo_types_structural
|
aristo_types_structural
|
||||||
|
|
||||||
export # This one should go away one time
|
|
||||||
ByteArray32, NodeKey, NodeTag, digestTo, hash, to, `==`, `$`
|
|
||||||
|
|
||||||
type
|
type
|
||||||
AristoLayerRef* = ref object
|
AristoLayerRef* = ref object
|
||||||
## Hexary trie database layer structures. Any layer holds the full
|
## Hexary trie database layer structures. Any layer holds the full
|
||||||
|
@ -60,10 +55,6 @@ type
|
||||||
# Debugging data below, might go away in future
|
# Debugging data below, might go away in future
|
||||||
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
|
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
|
||||||
|
|
||||||
static:
|
|
||||||
# Not that there is no doubt about this ...
|
|
||||||
doAssert NodeKey.default.ByteArray32.initNibbleRange.len == 64
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -77,6 +68,7 @@ proc getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
|
||||||
proc getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
|
proc getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
|
||||||
tab.getOrDefault(w, VertexID(0))
|
tab.getOrDefault(w, VertexID(0))
|
||||||
|
|
||||||
|
# --------
|
||||||
|
|
||||||
proc isValid*(vtx: VertexRef): bool =
|
proc isValid*(vtx: VertexRef): bool =
|
||||||
vtx != VertexRef(nil)
|
vtx != VertexRef(nil)
|
||||||
|
@ -84,8 +76,8 @@ proc isValid*(vtx: VertexRef): bool =
|
||||||
proc isValid*(nd: NodeRef): bool =
|
proc isValid*(nd: NodeRef): bool =
|
||||||
nd != NodeRef(nil)
|
nd != NodeRef(nil)
|
||||||
|
|
||||||
proc isValid*(key: NodeKey): bool =
|
proc isValid*(key: HashKey): bool =
|
||||||
key != VOID_NODE_KEY
|
key != VOID_HASH_KEY
|
||||||
|
|
||||||
proc isValid*(lbl: HashLabel): bool =
|
proc isValid*(lbl: HashLabel): bool =
|
||||||
lbl != VOID_HASH_LABEL
|
lbl != VOID_HASH_LABEL
|
||||||
|
|
|
@ -82,9 +82,9 @@ type
|
||||||
MergeNonBranchProofModeLock
|
MergeNonBranchProofModeLock
|
||||||
MergeRootBranchLinkBusy
|
MergeRootBranchLinkBusy
|
||||||
|
|
||||||
MergeNodeKeyEmpty
|
MergeHashKeyEmpty
|
||||||
MergeNodeKeyCachedAlready
|
MergeHashKeyCachedAlready
|
||||||
MergeNodeKeyDiffersFromCached
|
MergeHashKeyDiffersFromCached
|
||||||
MergeRootKeyEmpty
|
MergeRootKeyEmpty
|
||||||
|
|
||||||
MergeRootKeyDiffersForVid
|
MergeRootKeyDiffersForVid
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
stew/results,
|
stew/results,
|
||||||
../../../sync/snap/range_desc,
|
|
||||||
"."/[aristo_error, aristo_types_identifiers, aristo_types_structural]
|
"."/[aristo_error, aristo_types_identifiers, aristo_types_structural]
|
||||||
|
|
||||||
type
|
type
|
||||||
|
@ -26,7 +25,7 @@ type
|
||||||
## `Aristo DB` data record.
|
## `Aristo DB` data record.
|
||||||
|
|
||||||
GetKeyFn* =
|
GetKeyFn* =
|
||||||
proc(vid: VertexID): Result[NodeKey,AristoError] {.gcsafe, raises: [].}
|
proc(vid: VertexID): Result[HashKey,AristoError] {.gcsafe, raises: [].}
|
||||||
## Generic backend database retrieval function for a single
|
## Generic backend database retrieval function for a single
|
||||||
## `Aristo DB` hash lookup value.
|
## `Aristo DB` hash lookup value.
|
||||||
|
|
||||||
|
@ -53,7 +52,7 @@ type
|
||||||
## Generic backend database bulk storage function.
|
## Generic backend database bulk storage function.
|
||||||
|
|
||||||
PutKeyFn* =
|
PutKeyFn* =
|
||||||
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,NodeKey)])
|
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)])
|
||||||
{.gcsafe, raises: [].}
|
{.gcsafe, raises: [].}
|
||||||
## Generic backend database bulk storage function.
|
## Generic backend database bulk storage function.
|
||||||
|
|
||||||
|
|
|
@ -15,9 +15,10 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/strutils,
|
std/[strutils, hashes],
|
||||||
eth/common,
|
eth/[common, trie/nibbles],
|
||||||
../../../sync/snap/range_desc
|
stint,
|
||||||
|
./aristo_types_private
|
||||||
|
|
||||||
type
|
type
|
||||||
VertexID* = distinct uint64
|
VertexID* = distinct uint64
|
||||||
|
@ -25,7 +26,20 @@ type
|
||||||
## prefix tree (aka `Patricia Trie`) component. When augmented by hash
|
## prefix tree (aka `Patricia Trie`) component. When augmented by hash
|
||||||
## keys, the vertex component will be called a node. On the persistent
|
## keys, the vertex component will be called a node. On the persistent
|
||||||
## backend of the database, there is no other reference to the node than
|
## backend of the database, there is no other reference to the node than
|
||||||
## the very same `VertexID`
|
## the very same `VertexID`.
|
||||||
|
|
||||||
|
HashID* = distinct UInt256
|
||||||
|
## Variant of a `Hash256` object that can be used in a order relation
|
||||||
|
## (i.e. it can be sorted.) Among temporary conversions for sorting, the
|
||||||
|
## `HashID` type is consistently used for addressing leaf vertices (see
|
||||||
|
## below `LeafTie`.)
|
||||||
|
|
||||||
|
HashKey* = distinct ByteArray32
|
||||||
|
## Dedicated `Hash256` object variant that is used for labelling the
|
||||||
|
## vertices of the `Patricia Trie` in order to make it a
|
||||||
|
## `Merkle Patricia Tree`.
|
||||||
|
|
||||||
|
# ----------
|
||||||
|
|
||||||
LeafTie* = object
|
LeafTie* = object
|
||||||
## Unique access key for a leaf vertex. It identifies a root vertex
|
## Unique access key for a leaf vertex. It identifies a root vertex
|
||||||
|
@ -37,7 +51,7 @@ type
|
||||||
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
|
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
|
||||||
## They are used temporarily and in caches or backlog tables.
|
## They are used temporarily and in caches or backlog tables.
|
||||||
root*: VertexID ## Root ID for the sub-trie
|
root*: VertexID ## Root ID for the sub-trie
|
||||||
path*: NodeTag ## Path into the `Patricia Trie`
|
path*: HashID ## Path into the `Patricia Trie`
|
||||||
|
|
||||||
HashLabel* = object
|
HashLabel* = object
|
||||||
## Merkle hash key uniquely associated with a vertex ID. As hashes in a
|
## Merkle hash key uniquely associated with a vertex ID. As hashes in a
|
||||||
|
@ -47,8 +61,12 @@ type
|
||||||
##
|
##
|
||||||
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
|
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
|
||||||
## They are used temporarily and in caches or backlog tables.
|
## They are used temporarily and in caches or backlog tables.
|
||||||
root*: VertexID ## Root ID for the sub-trie
|
root*: VertexID ## Root ID for the sub-trie.
|
||||||
key*: NodeKey ## Path into the `Patricia Trie`
|
key*: HashKey ## Merkle hash tacked to a vertex.
|
||||||
|
|
||||||
|
static:
|
||||||
|
# Not that there is no doubt about this ...
|
||||||
|
doAssert HashKey.default.ByteArray32.initNibbleRange.len == 64
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers: `VertexID` scalar data model
|
# Public helpers: `VertexID` scalar data model
|
||||||
|
@ -62,6 +80,93 @@ proc `$`*(a: VertexID): string = $a.uint64
|
||||||
proc `==`*(a: VertexID; b: static[uint]): bool =
|
proc `==`*(a: VertexID; b: static[uint]): bool =
|
||||||
a == VertexID(b)
|
a == VertexID(b)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public helpers: `HashID` scalar data model
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc u256*(lp: HashID): UInt256 = lp.UInt256
|
||||||
|
proc low*(T: type HashID): T = low(UInt256).T
|
||||||
|
proc high*(T: type HashID): T = high(UInt256).T
|
||||||
|
|
||||||
|
proc `+`*(a: HashID; b: UInt256): HashID = (a.u256+b).HashID
|
||||||
|
proc `-`*(a: HashID; b: UInt256): HashID = (a.u256-b).HashID
|
||||||
|
proc `-`*(a, b: HashID): UInt256 = (a.u256 - b.u256)
|
||||||
|
|
||||||
|
proc `==`*(a, b: HashID): bool = a.u256 == b.u256
|
||||||
|
proc `<=`*(a, b: HashID): bool = a.u256 <= b.u256
|
||||||
|
proc `<`*(a, b: HashID): bool = a.u256 < b.u256
|
||||||
|
|
||||||
|
proc cmp*(x, y: HashID): int = cmp(x.UInt256, y.UInt256)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public helpers: Conversions between `HashID`, `HashKey`, `Hash256`
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc to*(hid: HashID; T: type Hash256): T =
|
||||||
|
result.data = hid.UInt256.toBytesBE
|
||||||
|
|
||||||
|
proc to*(hid: HashID; T: type HashKey): T =
|
||||||
|
hid.UInt256.toBytesBE.T
|
||||||
|
|
||||||
|
proc to*(key: HashKey; T: type HashID): T =
|
||||||
|
UInt256.fromBytesBE(key.ByteArray32).T
|
||||||
|
|
||||||
|
proc to*(key: HashKey; T: type Hash256): T =
|
||||||
|
T(data: ByteArray32(key))
|
||||||
|
|
||||||
|
proc to*(hash: Hash256; T: type HashKey): T =
|
||||||
|
hash.data.T
|
||||||
|
|
||||||
|
proc to*(key: Hash256; T: type HashID): T =
|
||||||
|
key.data.HashKey.to(T)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public helpers: Miscellaneous mappings
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc to*(key: HashKey; T: type Blob): T =
|
||||||
|
## Representation of a `HashKey` as `Blob` (preserving full information)
|
||||||
|
key.ByteArray32.toSeq
|
||||||
|
|
||||||
|
proc to*(key: HashKey; T: type NibblesSeq): T =
|
||||||
|
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
|
||||||
|
key.ByteArray32.initNibbleRange()
|
||||||
|
|
||||||
|
proc to*(hid: HashID; T: type NibblesSeq): T =
|
||||||
|
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
|
||||||
|
ByteArray32(hid.to(HashKey)).initNibbleRange()
|
||||||
|
|
||||||
|
proc to*(n: SomeUnsignedInt|UInt256; T: type HashID): T =
|
||||||
|
## Representation of a scalar as `HashID` (preserving full information)
|
||||||
|
n.u256.T
|
||||||
|
|
||||||
|
proc digestTo*(data: Blob; T: type HashKey): T =
|
||||||
|
## Keccak hash of a `Blob`, represented as a `HashKey`
|
||||||
|
keccakHash(data).data.T
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public helpers: `Tables` and `Rlp` support
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc hash*(a: HashID): Hash =
|
||||||
|
## Table/KeyedQueue mixin
|
||||||
|
a.to(HashKey).ByteArray32.hash
|
||||||
|
|
||||||
|
proc hash*(a: HashKey): Hash =
|
||||||
|
## Table/KeyedQueue mixin
|
||||||
|
a.ByteArray32.hash
|
||||||
|
|
||||||
|
proc `==`*(a, b: HashKey): bool =
|
||||||
|
## Table/KeyedQueue mixin
|
||||||
|
a.ByteArray32 == b.ByteArray32
|
||||||
|
|
||||||
|
proc read*[T: HashID|HashKey](rlp: var Rlp, W: type T): T
|
||||||
|
{.gcsafe, raises: [RlpError].} =
|
||||||
|
rlp.read(Hash256).to(T)
|
||||||
|
|
||||||
|
proc append*(writer: var RlpWriter, val: HashID|HashKey) =
|
||||||
|
writer.append(val.to(Hash256))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers: `LeafTie` scalar data model
|
# Public helpers: `LeafTie` scalar data model
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -79,6 +184,29 @@ proc `$`*(a: LeafTie): string =
|
||||||
let w = $a.root.uint64.toHex & ":" & $a.path.Uint256.toHex
|
let w = $a.root.uint64.toHex & ":" & $a.path.Uint256.toHex
|
||||||
w.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
|
w.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Miscellaneous helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc `$`*(hid: HashID): string =
|
||||||
|
if hid == high(HashID):
|
||||||
|
"2^256-1"
|
||||||
|
elif hid == 0.u256.HashID:
|
||||||
|
"0"
|
||||||
|
elif hid == 2.u256.pow(255).HashID:
|
||||||
|
"2^255" # 800...
|
||||||
|
elif hid == 2.u256.pow(254).HashID:
|
||||||
|
"2^254" # 400..
|
||||||
|
elif hid == 2.u256.pow(253).HashID:
|
||||||
|
"2^253" # 200...
|
||||||
|
elif hid == 2.u256.pow(251).HashID:
|
||||||
|
"2^252" # 100...
|
||||||
|
else:
|
||||||
|
hid.UInt256.toHex
|
||||||
|
|
||||||
|
proc `$`*(key: HashKey): string =
|
||||||
|
$key.to(HashID)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
# nimbus-eth1
|
||||||
|
# Copyright (c) 2021 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
|
# except according to those terms.
|
||||||
|
|
||||||
|
type
|
||||||
|
ByteArray32* = array[32,byte]
|
||||||
|
## Used for 32 byte hash components repurposed as Merkle hash labels.
|
||||||
|
|
||||||
|
# End
|
|
@ -16,7 +16,6 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
../../../sync/snap/range_desc,
|
|
||||||
"."/[aristo_error, aristo_types_identifiers]
|
"."/[aristo_error, aristo_types_identifiers]
|
||||||
|
|
||||||
type
|
type
|
||||||
|
@ -55,7 +54,7 @@ type
|
||||||
## Combined record for a *traditional* ``Merkle Patricia Tree` node merged
|
## Combined record for a *traditional* ``Merkle Patricia Tree` node merged
|
||||||
## with a structural `VertexRef` type object.
|
## with a structural `VertexRef` type object.
|
||||||
error*: AristoError ## Can be used for error signalling
|
error*: AristoError ## Can be used for error signalling
|
||||||
key*: array[16,NodeKey] ## Merkle hash/es for Branch & Extension
|
key*: array[16,HashKey] ## Merkle hash/es for Branch & Extension
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers: `NodeRef` and `PayloadRef`
|
# Public helpers: `NodeRef` and `PayloadRef`
|
||||||
|
|
|
@ -41,7 +41,7 @@ proc getVtxBackend*(
|
||||||
proc getKeyBackend*(
|
proc getKeyBackend*(
|
||||||
db: AristoDb;
|
db: AristoDb;
|
||||||
vid: VertexID;
|
vid: VertexID;
|
||||||
): Result[NodeKey,AristoError] =
|
): Result[HashKey,AristoError] =
|
||||||
## Get the merkle hash/key from the backend
|
## Get the merkle hash/key from the backend
|
||||||
# key must not have been locally deleted (but not saved, yet)
|
# key must not have been locally deleted (but not saved, yet)
|
||||||
if vid notin db.top.dKey:
|
if vid notin db.top.dKey:
|
||||||
|
@ -66,7 +66,7 @@ proc getVtxCascaded*(
|
||||||
proc getKeyCascaded*(
|
proc getKeyCascaded*(
|
||||||
db: AristoDb;
|
db: AristoDb;
|
||||||
vid: VertexID;
|
vid: VertexID;
|
||||||
): Result[NodeKey,AristoError] =
|
): Result[HashKey,AristoError] =
|
||||||
## Get the Merkle hash/key from the top layer or the `backened` layer if
|
## Get the Merkle hash/key from the top layer or the `backened` layer if
|
||||||
## available.
|
## available.
|
||||||
let lbl = db.top.kMap.getOrVoid vid
|
let lbl = db.top.kMap.getOrVoid vid
|
||||||
|
@ -103,10 +103,10 @@ proc getVtx*(db: AristoDb; lty: LeafTie): VertexRef =
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
return rc.value.vtx
|
return rc.value.vtx
|
||||||
|
|
||||||
proc getKey*(db: AristoDb; vid: VertexID): NodeKey =
|
proc getKey*(db: AristoDb; vid: VertexID): HashKey =
|
||||||
## Variant of `getKeyCascaded()` returning `VOID_NODE_KEY` on error (while
|
## Variant of `getKeyCascaded()` returning `VOID_HASH_KEY` on error (while
|
||||||
## ignoring the detailed error type information.)
|
## ignoring the detailed error type information.)
|
||||||
db.getKeyCascaded(vid).get(otherwise = VOID_NODE_KEY)
|
db.getKeyCascaded(vid).get(otherwise = VOID_HASH_KEY)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -35,8 +35,8 @@
|
||||||
## vertex as long as possible.
|
## vertex as long as possible.
|
||||||
## + Stash the rest of the partial chain to be completed later
|
## + Stash the rest of the partial chain to be completed later
|
||||||
##
|
##
|
||||||
## * While there is a partial chain left, use the ends towards the leaf nodes
|
## * While there is a partial chain left, use the ends towards the leaf
|
||||||
## and calculate the remaining keys (which results in a width-first
|
## vertices and calculate the remaining keys (which results in a width-first
|
||||||
## traversal, again.)
|
## traversal, again.)
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
@ -70,7 +70,7 @@ proc toNode(vtx: VertexRef; db: AristoDb): Result[NodeRef,void] =
|
||||||
continue
|
continue
|
||||||
return err()
|
return err()
|
||||||
else:
|
else:
|
||||||
node.key[n] = VOID_NODE_KEY
|
node.key[n] = VOID_HASH_KEY
|
||||||
return ok node
|
return ok node
|
||||||
of Extension:
|
of Extension:
|
||||||
if vtx.eVid.isValid:
|
if vtx.eVid.isValid:
|
||||||
|
@ -98,7 +98,7 @@ proc leafToRootHasher(
|
||||||
|
|
||||||
# Check against existing key, or store new key
|
# Check against existing key, or store new key
|
||||||
let
|
let
|
||||||
key = rc.value.encode.digestTo(NodeKey)
|
key = rc.value.encode.digestTo(HashKey)
|
||||||
vfy = db.getKey wp.vid
|
vfy = db.getKey wp.vid
|
||||||
if not vfy.isValid:
|
if not vfy.isValid:
|
||||||
db.vidAttach(HashLabel(root: hike.root, key: key), wp.vid)
|
db.vidAttach(HashLabel(root: hike.root, key: key), wp.vid)
|
||||||
|
@ -188,22 +188,22 @@ proc hashify*(
|
||||||
# Also `db.getVtx(fromVid)` => not nil as it was fetched earlier, already
|
# Also `db.getVtx(fromVid)` => not nil as it was fetched earlier, already
|
||||||
let rc = db.getVtx(fromVid).toNode(db)
|
let rc = db.getVtx(fromVid).toNode(db)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
# Cannot complete with this node, so do it later
|
# Cannot complete with this vertex, so do it later
|
||||||
redo[fromVid] = rootAndVid
|
redo[fromVid] = rootAndVid
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Register Hashes
|
# Register Hashes
|
||||||
let
|
let
|
||||||
nodeKey = rc.value.encode.digestTo(NodeKey)
|
hashKey = rc.value.encode.digestTo(HashKey)
|
||||||
toVid = rootAndVid[1]
|
toVid = rootAndVid[1]
|
||||||
|
|
||||||
# Update Merkle hash (aka `nodeKey`)
|
# Update Merkle hash (aka `HashKey`)
|
||||||
let fromLbl = db.top.kMap.getOrVoid fromVid
|
let fromLbl = db.top.kMap.getOrVoid fromVid
|
||||||
if fromLbl.isValid:
|
if fromLbl.isValid:
|
||||||
db.vidAttach(HashLabel(root: rootAndVid[0], key: nodeKey), fromVid)
|
db.vidAttach(HashLabel(root: rootAndVid[0], key: hashKey), fromVid)
|
||||||
elif nodeKey != fromLbl.key:
|
elif hashKey != fromLbl.key:
|
||||||
let error = HashifyExistingHashMismatch
|
let error = HashifyExistingHashMismatch
|
||||||
debug "hashify failed", vid=fromVid, key=nodeKey,
|
debug "hashify failed", vid=fromVid, key=hashKey,
|
||||||
expected=fromLbl.key.pp, error
|
expected=fromLbl.key.pp, error
|
||||||
return err((fromVid,error))
|
return err((fromVid,error))
|
||||||
|
|
||||||
|
@ -245,7 +245,7 @@ proc hashifyCheck*(
|
||||||
let lbl = db.top.kMap.getOrVoid vid
|
let lbl = db.top.kMap.getOrVoid vid
|
||||||
if not lbl.isValid:
|
if not lbl.isValid:
|
||||||
return err((vid,HashifyCheckVtxHashMissing))
|
return err((vid,HashifyCheckVtxHashMissing))
|
||||||
if lbl.key != rc.value.encode.digestTo(NodeKey):
|
if lbl.key != rc.value.encode.digestTo(HashKey):
|
||||||
return err((vid,HashifyCheckVtxHashMismatch))
|
return err((vid,HashifyCheckVtxHashMismatch))
|
||||||
|
|
||||||
let revVid = db.top.pAmk.getOrVoid lbl
|
let revVid = db.top.pAmk.getOrVoid lbl
|
||||||
|
@ -267,7 +267,7 @@ proc hashifyCheck*(
|
||||||
let lbl = db.top.kMap.getOrVoid vid
|
let lbl = db.top.kMap.getOrVoid vid
|
||||||
if not lbl.isValid:
|
if not lbl.isValid:
|
||||||
return err((vid,HashifyCheckVtxHashMissing))
|
return err((vid,HashifyCheckVtxHashMissing))
|
||||||
if lbl.key != rc.value.encode.digestTo(NodeKey):
|
if lbl.key != rc.value.encode.digestTo(HashKey):
|
||||||
return err((vid,HashifyCheckVtxHashMismatch))
|
return err((vid,HashifyCheckVtxHashMismatch))
|
||||||
|
|
||||||
let revVid = db.top.pAmk.getOrVoid lbl
|
let revVid = db.top.pAmk.getOrVoid lbl
|
||||||
|
@ -282,7 +282,7 @@ proc hashifyCheck*(
|
||||||
if vtx.isValid:
|
if vtx.isValid:
|
||||||
let rc = vtx.toNode(db)
|
let rc = vtx.toNode(db)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
if lbl.key != rc.value.encode.digestTo(NodeKey):
|
if lbl.key != rc.value.encode.digestTo(HashKey):
|
||||||
return err((vid,HashifyCheckVtxHashMismatch))
|
return err((vid,HashifyCheckVtxHashMismatch))
|
||||||
|
|
||||||
let revVid = db.top.pAmk.getOrVoid lbl
|
let revVid = db.top.pAmk.getOrVoid lbl
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
"."/[aristo_desc, aristo_get, aristo_path]
|
"."/[aristo_desc, aristo_get]
|
||||||
|
|
||||||
type
|
type
|
||||||
Leg* = object
|
Leg* = object
|
||||||
|
@ -126,7 +126,7 @@ proc hikeUp*(
|
||||||
|
|
||||||
proc hikeUp*(lty: LeafTie; db: AristoDb): Hike =
|
proc hikeUp*(lty: LeafTie; db: AristoDb): Hike =
|
||||||
## Variant of `hike()`
|
## Variant of `hike()`
|
||||||
lty.path.pathAsNibbles.hikeUp(lty.root, db)
|
lty.path.to(NibblesSeq).hikeUp(lty.root, db)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -8,21 +8,32 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
## Backend or cascaded constructors for Aristo DB
|
## Constructors for Aristo DB
|
||||||
## ==============================================
|
## ==========================
|
||||||
##
|
##
|
||||||
## For a backend-less constructor use `AristoDbRef.new()`
|
## For a backend-less constructor use `AristoDb(top: AristoLayerRef())`.
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
./aristo_init/[aristo_memory],
|
./aristo_init/[aristo_memory],
|
||||||
./aristo_desc
|
./aristo_desc,
|
||||||
|
./aristo_desc/aristo_types_private
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc init*(key: var HashKey; data: openArray[byte]): bool =
|
||||||
|
## Import argument `data` into `key` which must have length either `32`, or
|
||||||
|
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
|
||||||
|
if data.len == 32:
|
||||||
|
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
|
||||||
|
return true
|
||||||
|
if data.len == 0:
|
||||||
|
key = VOID_HASH_KEY
|
||||||
|
return true
|
||||||
|
|
||||||
proc init*(T: type AristoDb): T =
|
proc init*(T: type AristoDb): T =
|
||||||
## Constructor with memory backend.
|
## Constructor with memory backend.
|
||||||
T(top: AristoLayerRef(),
|
T(top: AristoLayerRef(),
|
||||||
|
|
|
@ -22,7 +22,7 @@ import
|
||||||
type
|
type
|
||||||
MemBackendRef = ref object
|
MemBackendRef = ref object
|
||||||
sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
|
sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
|
||||||
kMap: Table[VertexID,NodeKey] ## Merkle hash key mapping
|
kMap: Table[VertexID,HashKey] ## Merkle hash key mapping
|
||||||
vGen: seq[VertexID]
|
vGen: seq[VertexID]
|
||||||
txGen: uint ## Transaction ID generator (for debugging)
|
txGen: uint ## Transaction ID generator (for debugging)
|
||||||
txId: uint ## Active transaction ID (for debugging)
|
txId: uint ## Active transaction ID (for debugging)
|
||||||
|
@ -47,9 +47,9 @@ proc getVtxFn(db: MemBackendRef): GetVtxFn =
|
||||||
|
|
||||||
proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
||||||
result =
|
result =
|
||||||
proc(vid: VertexID): Result[NodeKey,AristoError] =
|
proc(vid: VertexID): Result[HashKey,AristoError] =
|
||||||
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
let key = db.kMap.getOrDefault(vid, VOID_HASH_KEY)
|
||||||
if key != EMPTY_ROOT_KEY:
|
if key != VOID_HASH_KEY:
|
||||||
return ok key
|
return ok key
|
||||||
err(MemBeKeyNotFound)
|
err(MemBeKeyNotFound)
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
||||||
|
|
||||||
proc putKeyFn(db: MemBackendRef): PutKeyFn =
|
proc putKeyFn(db: MemBackendRef): PutKeyFn =
|
||||||
result =
|
result =
|
||||||
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,NodeKey)]) =
|
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
|
||||||
when VerifyIxId:
|
when VerifyIxId:
|
||||||
doAssert db.txId == hdl.MemPutHdlRef.txId
|
doAssert db.txId == hdl.MemPutHdlRef.txId
|
||||||
for (vid,key) in vkps:
|
for (vid,key) in vkps:
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
## Aristo DB -- Patricia Trie builder, raw node insertion
|
## Aristo DB -- Patricia Trie builder, raw node insertion
|
||||||
## ======================================================
|
## ======================================================
|
||||||
##
|
##
|
||||||
## This module merges `NodeTag` values as hexary lookup paths into the
|
## This module merges `HashID` values as hexary lookup paths into the
|
||||||
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
|
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
|
||||||
## associated (but separated) Merkle hashes will be deleted unless locked.
|
## associated (but separated) Merkle hashes will be deleted unless locked.
|
||||||
## Instead of deleting locked hashes error handling is applied.
|
## Instead of deleting locked hashes error handling is applied.
|
||||||
|
@ -133,7 +133,7 @@ proc insertBranch(
|
||||||
|
|
||||||
# Install `forkVtx`
|
# Install `forkVtx`
|
||||||
block:
|
block:
|
||||||
# Clear Merkle hashes (aka node keys) unless proof mode.
|
# Clear Merkle hashes (aka hash keys) unless proof mode.
|
||||||
if db.top.pPrf.len == 0:
|
if db.top.pPrf.len == 0:
|
||||||
db.clearMerkleKeys(hike, linkID)
|
db.clearMerkleKeys(hike, linkID)
|
||||||
elif linkID in db.top.pPrf:
|
elif linkID in db.top.pPrf:
|
||||||
|
@ -228,17 +228,17 @@ proc concatBranchAndLeaf(
|
||||||
if brVtx.bVid[nibble].isValid:
|
if brVtx.bVid[nibble].isValid:
|
||||||
return Hike(error: MergeRootBranchLinkBusy)
|
return Hike(error: MergeRootBranchLinkBusy)
|
||||||
|
|
||||||
# Clear Merkle hashes (aka node keys) unless proof mode.
|
# Clear Merkle hashes (aka hash keys) unless proof mode.
|
||||||
if db.top.pPrf.len == 0:
|
if db.top.pPrf.len == 0:
|
||||||
db.clearMerkleKeys(hike, brVid)
|
db.clearMerkleKeys(hike, brVid)
|
||||||
elif brVid in db.top.pPrf:
|
elif brVid in db.top.pPrf:
|
||||||
return Hike(error: MergeBranchProofModeLock) # Ooops
|
return Hike(error: MergeBranchProofModeLock) # Ooops
|
||||||
|
|
||||||
# Append branch node
|
# Append branch vertex
|
||||||
result = Hike(root: hike.root, legs: hike.legs)
|
result = Hike(root: hike.root, legs: hike.legs)
|
||||||
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
||||||
|
|
||||||
# Append leaf node
|
# Append leaf vertex
|
||||||
let
|
let
|
||||||
vid = db.vidFetch
|
vid = db.vidFetch
|
||||||
vtx = VertexRef(
|
vtx = VertexRef(
|
||||||
|
@ -324,7 +324,7 @@ proc topIsExtAddLeaf(
|
||||||
result = Hike(root: hike.root, legs: hike.legs)
|
result = Hike(root: hike.root, legs: hike.legs)
|
||||||
|
|
||||||
if not brVtx.isValid:
|
if not brVtx.isValid:
|
||||||
# Blind vertex, promote to leaf node.
|
# Blind vertex, promote to leaf vertex.
|
||||||
#
|
#
|
||||||
# --(extVid)--> <extVtx> --(brVid)--> nil
|
# --(extVid)--> <extVtx> --(brVid)--> nil
|
||||||
#
|
#
|
||||||
|
@ -354,7 +354,7 @@ proc topIsExtAddLeaf(
|
||||||
if linkID.isValid:
|
if linkID.isValid:
|
||||||
return Hike(error: MergeRootBranchLinkBusy)
|
return Hike(error: MergeRootBranchLinkBusy)
|
||||||
|
|
||||||
# Clear Merkle hashes (aka node keys) unless proof mode
|
# Clear Merkle hashes (aka hash keys) unless proof mode
|
||||||
if db.top.pPrf.len == 0:
|
if db.top.pPrf.len == 0:
|
||||||
db.clearMerkleKeys(hike, brVid)
|
db.clearMerkleKeys(hike, brVid)
|
||||||
elif brVid in db.top.pPrf:
|
elif brVid in db.top.pPrf:
|
||||||
|
@ -386,7 +386,7 @@ proc topIsEmptyAddLeaf(
|
||||||
if rootVtx.bVid[nibble].isValid:
|
if rootVtx.bVid[nibble].isValid:
|
||||||
return Hike(error: MergeRootBranchLinkBusy)
|
return Hike(error: MergeRootBranchLinkBusy)
|
||||||
|
|
||||||
# Clear Merkle hashes (aka node keys) unless proof mode
|
# Clear Merkle hashes (aka hash keys) unless proof mode
|
||||||
if db.top.pPrf.len == 0:
|
if db.top.pPrf.len == 0:
|
||||||
db.clearMerkleKeys(hike, hike.root)
|
db.clearMerkleKeys(hike, hike.root)
|
||||||
elif hike.root in db.top.pPrf:
|
elif hike.root in db.top.pPrf:
|
||||||
|
@ -413,14 +413,14 @@ proc topIsEmptyAddLeaf(
|
||||||
|
|
||||||
proc mergeNodeImpl(
|
proc mergeNodeImpl(
|
||||||
db: AristoDb; # Database, top layer
|
db: AristoDb; # Database, top layer
|
||||||
nodeKey: NodeKey; # Merkel hash of node
|
hashKey: HashKey; # Merkel hash of node
|
||||||
node: NodeRef; # Node derived from RLP representation
|
node: NodeRef; # Node derived from RLP representation
|
||||||
rootVid: VertexID; # Current sub-trie
|
rootVid: VertexID; # Current sub-trie
|
||||||
): Result[VertexID,AristoError] =
|
): Result[VertexID,AristoError] =
|
||||||
## The function merges a node key `nodeKey` expanded from its RLP
|
## The function merges the argument hash key `hashKey` as expanded from the
|
||||||
## representation into the `Aristo Trie` database. The vertex is split off
|
## node RLP representation into the `Aristo Trie` database. The vertex is
|
||||||
## from the node and stored separately. So are the Merkle hashes. The
|
## split off from the node and stored separately. So are the Merkle hashes.
|
||||||
## vertex is labelled `locked`.
|
## The vertex is labelled `locked`.
|
||||||
##
|
##
|
||||||
## The `node` argument is *not* checked, whether the vertex IDs have been
|
## The `node` argument is *not* checked, whether the vertex IDs have been
|
||||||
## allocated, already. If the node comes straight from the `decode()` RLP
|
## allocated, already. If the node comes straight from the `decode()` RLP
|
||||||
|
@ -431,26 +431,26 @@ proc mergeNodeImpl(
|
||||||
if not rootVid.isValid:
|
if not rootVid.isValid:
|
||||||
return err(MergeRootKeyEmpty)
|
return err(MergeRootKeyEmpty)
|
||||||
|
|
||||||
# Verify `nodeKey`
|
# Verify `hashKey`
|
||||||
if not nodeKey.isValid:
|
if not hashKey.isValid:
|
||||||
return err(MergeNodeKeyEmpty)
|
return err(MergeHashKeyEmpty)
|
||||||
|
|
||||||
# Check whether the node exists, already. If not then create a new vertex ID
|
# Check whether the node exists, already. If not then create a new vertex ID
|
||||||
var
|
var
|
||||||
nodeLbl = HashLabel(root: rootVid, key: nodeKey)
|
hashLbl = HashLabel(root: rootVid, key: hashKey)
|
||||||
vid = db.top.pAmk.getOrVoid nodeLbl
|
vid = db.top.pAmk.getOrVoid hashLbl
|
||||||
if not vid.isValid:
|
if not vid.isValid:
|
||||||
vid = db.vidAttach nodeLbl
|
vid = db.vidAttach hashLbl
|
||||||
else:
|
else:
|
||||||
let lbl = db.top.kMap.getOrVoid vid
|
let lbl = db.top.kMap.getOrVoid vid
|
||||||
if lbl == nodeLbl:
|
if lbl == hashLbl:
|
||||||
if db.top.sTab.hasKey vid:
|
if db.top.sTab.hasKey vid:
|
||||||
# This is tyically considered OK
|
# This is tyically considered OK
|
||||||
return err(MergeNodeKeyCachedAlready)
|
return err(MergeHashKeyCachedAlready)
|
||||||
# Otherwise proceed
|
# Otherwise proceed
|
||||||
elif lbl.isValid:
|
elif lbl.isValid:
|
||||||
# Different key assigned => error
|
# Different key assigned => error
|
||||||
return err(MergeNodeKeyDiffersFromCached)
|
return err(MergeHashKeyDiffersFromCached)
|
||||||
|
|
||||||
let vtx = node.to(VertexRef) # the vertex IDs need to be set up now (if any)
|
let vtx = node.to(VertexRef) # the vertex IDs need to be set up now (if any)
|
||||||
|
|
||||||
|
@ -523,7 +523,7 @@ proc merge*(
|
||||||
vid: hike.root,
|
vid: hike.root,
|
||||||
vtx: VertexRef(
|
vtx: VertexRef(
|
||||||
vType: Leaf,
|
vType: Leaf,
|
||||||
lPfx: leaf.leafTie.path.pathAsNibbles,
|
lPfx: leaf.leafTie.path.to(NibblesSeq),
|
||||||
lData: leaf.payload))
|
lData: leaf.payload))
|
||||||
db.top.sTab[wp.vid] = wp.vtx
|
db.top.sTab[wp.vid] = wp.vtx
|
||||||
result = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
|
result = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
|
||||||
|
@ -565,12 +565,12 @@ proc merge*(
|
||||||
var (merged, dups) = (0, 0)
|
var (merged, dups) = (0, 0)
|
||||||
for n,w in proof:
|
for n,w in proof:
|
||||||
let
|
let
|
||||||
key = w.Blob.digestTo(NodeKey)
|
key = w.Blob.digestTo(HashKey)
|
||||||
node = w.Blob.decode(NodeRef)
|
node = w.Blob.decode(NodeRef)
|
||||||
rc = db.mergeNodeImpl(key, node, rootVid)
|
rc = db.mergeNodeImpl(key, node, rootVid)
|
||||||
if rc.isOK:
|
if rc.isOK:
|
||||||
merged.inc
|
merged.inc
|
||||||
elif rc.error == MergeNodeKeyCachedAlready:
|
elif rc.error == MergeHashKeyCachedAlready:
|
||||||
dups.inc
|
dups.inc
|
||||||
else:
|
else:
|
||||||
return (n, dups, rc.error)
|
return (n, dups, rc.error)
|
||||||
|
@ -579,7 +579,7 @@ proc merge*(
|
||||||
|
|
||||||
proc merge*(
|
proc merge*(
|
||||||
db: AristoDb; # Database, top layer
|
db: AristoDb; # Database, top layer
|
||||||
rootKey: NodeKey; # Merkle hash for root
|
rootKey: HashKey; # Merkle hash for root
|
||||||
rootVid = VertexID(0) # Optionally, force root vertex ID
|
rootVid = VertexID(0) # Optionally, force root vertex ID
|
||||||
): Result[VertexID,AristoError] =
|
): Result[VertexID,AristoError] =
|
||||||
## Set up a `rootKey` associated with a vertex ID.
|
## Set up a `rootKey` associated with a vertex ID.
|
||||||
|
|
|
@ -127,7 +127,7 @@ proc zeroAdjust(
|
||||||
db: AristoDb; # Database layer
|
db: AristoDb; # Database layer
|
||||||
doLeast: static[bool]; # Direction: *least* or *most*
|
doLeast: static[bool]; # Direction: *least* or *most*
|
||||||
): Hike =
|
): Hike =
|
||||||
## Adjust empty argument path to the first node entry to the right. Ths
|
## Adjust empty argument path to the first vertex entry to the right. Ths
|
||||||
## applies is the argument `hike` is before the first entry in the database.
|
## applies is the argument `hike` is before the first entry in the database.
|
||||||
## The result is a hike which is aligned with the first entry.
|
## The result is a hike which is aligned with the first entry.
|
||||||
proc accept(p: Hike; pfx: NibblesSeq): bool =
|
proc accept(p: Hike; pfx: NibblesSeq): bool =
|
||||||
|
@ -171,7 +171,7 @@ proc zeroAdjust(
|
||||||
|
|
||||||
of Extension:
|
of Extension:
|
||||||
let ePfx = root.ePfx
|
let ePfx = root.ePfx
|
||||||
# Must be followed by a branch node
|
# Must be followed by a branch vertex
|
||||||
if hike.tail.len < 2 or not hike.accept(ePfx):
|
if hike.tail.len < 2 or not hike.accept(ePfx):
|
||||||
break fail
|
break fail
|
||||||
let vtx = db.getVtx root.eVid
|
let vtx = db.getVtx root.eVid
|
||||||
|
@ -229,9 +229,9 @@ proc finalise(
|
||||||
if 0 < hike.tail.len: # nothing to compare against, otherwise
|
if 0 < hike.tail.len: # nothing to compare against, otherwise
|
||||||
let top = hike.legs[^1]
|
let top = hike.legs[^1]
|
||||||
|
|
||||||
# Note that only a `Branch` nodes has a non-zero nibble
|
# Note that only a `Branch` vertices has a non-zero nibble
|
||||||
if 0 <= top.nibble and top.nibble == top.wp.vtx.branchBorderNibble:
|
if 0 <= top.nibble and top.nibble == top.wp.vtx.branchBorderNibble:
|
||||||
# Check the following up node
|
# Check the following up vertex
|
||||||
let vtx = db.getVtx top.wp.vtx.bVid[top.nibble]
|
let vtx = db.getVtx top.wp.vtx.bVid[top.nibble]
|
||||||
if not vtx.isValid:
|
if not vtx.isValid:
|
||||||
return Hike(error: NearbyDanglingLink)
|
return Hike(error: NearbyDanglingLink)
|
||||||
|
@ -252,7 +252,7 @@ proc finalise(
|
||||||
# * finalise left: n00000.. for 0 < n
|
# * finalise left: n00000.. for 0 < n
|
||||||
if hike.legs[0].wp.vtx.vType == Branch or
|
if hike.legs[0].wp.vtx.vType == Branch or
|
||||||
(1 < hike.legs.len and hike.legs[1].wp.vtx.vType == Branch):
|
(1 < hike.legs.len and hike.legs[1].wp.vtx.vType == Branch):
|
||||||
return Hike(error: NearbyFailed) # no more nodes
|
return Hike(error: NearbyFailed) # no more vertices
|
||||||
|
|
||||||
Hike(error: NearbyUnexpectedVtx) # error
|
Hike(error: NearbyUnexpectedVtx) # error
|
||||||
|
|
||||||
|
@ -314,7 +314,7 @@ proc nearbyNext(
|
||||||
uHikeLen = uHike.legs.len # in case of backtracking
|
uHikeLen = uHike.legs.len # in case of backtracking
|
||||||
uHikeTail = uHike.tail # in case of backtracking
|
uHikeTail = uHike.tail # in case of backtracking
|
||||||
|
|
||||||
# Look ahead checking next node
|
# Look ahead checking next vertex
|
||||||
if start:
|
if start:
|
||||||
let vid = top.wp.vtx.bVid[top.nibble]
|
let vid = top.wp.vtx.bVid[top.nibble]
|
||||||
if not vid.isValid:
|
if not vid.isValid:
|
||||||
|
@ -334,7 +334,7 @@ proc nearbyNext(
|
||||||
of Branch:
|
of Branch:
|
||||||
let nibble = uHike.tail[0].int8
|
let nibble = uHike.tail[0].int8
|
||||||
if start and accept nibble:
|
if start and accept nibble:
|
||||||
# Step down and complete with a branch link on the child node
|
# Step down and complete with a branch link on the child vertex
|
||||||
step = Leg(wp: VidVtxPair(vid: vid, vtx: vtx), nibble: nibble)
|
step = Leg(wp: VidVtxPair(vid: vid, vtx: vtx), nibble: nibble)
|
||||||
uHike.legs.add step
|
uHike.legs.add step
|
||||||
|
|
||||||
|
@ -354,7 +354,7 @@ proc nearbyNext(
|
||||||
uHike.legs.setLen(uHikeLen)
|
uHike.legs.setLen(uHikeLen)
|
||||||
uHike.tail = uHikeTail
|
uHike.tail = uHikeTail
|
||||||
else:
|
else:
|
||||||
# Pop current `Branch` node on top and append nibble to `tail`
|
# Pop current `Branch` vertex on top and append nibble to `tail`
|
||||||
uHike.tail = @[top.nibble.byte].initNibbleRange.slice(1) & uHike.tail
|
uHike.tail = @[top.nibble.byte].initNibbleRange.slice(1) & uHike.tail
|
||||||
uHike.legs.setLen(uHike.legs.len - 1)
|
uHike.legs.setLen(uHike.legs.len - 1)
|
||||||
# End while
|
# End while
|
||||||
|
@ -368,7 +368,7 @@ proc nearbyNext(
|
||||||
db: AristoDb; # Database layer
|
db: AristoDb; # Database layer
|
||||||
hikeLenMax: static[int]; # Beware of loops (if any)
|
hikeLenMax: static[int]; # Beware of loops (if any)
|
||||||
moveRight:static[bool]; # Direction of next vertex
|
moveRight:static[bool]; # Direction of next vertex
|
||||||
): Result[NodeTag,AristoError] =
|
): Result[HashID,AristoError] =
|
||||||
## Variant of `nearbyNext()`, convenience wrapper
|
## Variant of `nearbyNext()`, convenience wrapper
|
||||||
let hike = lty.hikeUp(db).nearbyNext(db, hikeLenMax, moveRight)
|
let hike = lty.hikeUp(db).nearbyNext(db, hikeLenMax, moveRight)
|
||||||
if hike.error != AristoError(0):
|
if hike.error != AristoError(0):
|
||||||
|
@ -377,7 +377,7 @@ proc nearbyNext(
|
||||||
if 0 < hike.legs.len and hike.legs[^1].wp.vtx.vType == Leaf:
|
if 0 < hike.legs.len and hike.legs[^1].wp.vtx.vType == Leaf:
|
||||||
let rc = hike.legsTo(NibblesSeq).pathToKey
|
let rc = hike.legsTo(NibblesSeq).pathToKey
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
return ok rc.value.to(NodeTag)
|
return ok rc.value.to(HashID)
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
|
|
||||||
err(NearbyLeafExpected)
|
err(NearbyLeafExpected)
|
||||||
|
@ -390,22 +390,22 @@ proc nearbyRight*(
|
||||||
hike: Hike; # Partially expanded chain of vertices
|
hike: Hike; # Partially expanded chain of vertices
|
||||||
db: AristoDb; # Database layer
|
db: AristoDb; # Database layer
|
||||||
): Hike =
|
): Hike =
|
||||||
## Extends the maximally extended argument nodes `hike` to the right (i.e.
|
## Extends the maximally extended argument vertices `hike` to the right (i.e.
|
||||||
## with non-decreasing path value). This function does not backtrack if
|
## with non-decreasing path value). This function does not backtrack if
|
||||||
## there are dangling links in between. It will return an error in that case.
|
## there are dangling links in between. It will return an error in that case.
|
||||||
##
|
##
|
||||||
## If there is no more leaf node to the right of the argument `hike`, the
|
## If there is no more leaf vertices to the right of the argument `hike`, the
|
||||||
## particular error code `NearbyBeyondRange` is returned.
|
## particular error code `NearbyBeyondRange` is returned.
|
||||||
##
|
##
|
||||||
## This code is intended to be used for verifying a left-bound proof to
|
## This code is intended to be used for verifying a left-bound proof to
|
||||||
## verify that there is no leaf node *right* of a boundary path value.
|
## verify that there is no leaf vertex *right* of a boundary path value.
|
||||||
hike.nearbyNext(db, 64, moveRight=true)
|
hike.nearbyNext(db, 64, moveRight=true)
|
||||||
|
|
||||||
proc nearbyRight*(
|
proc nearbyRight*(
|
||||||
lty: LeafTie; # Some `Patricia Trie` path
|
lty: LeafTie; # Some `Patricia Trie` path
|
||||||
db: AristoDb; # Database layer
|
db: AristoDb; # Database layer
|
||||||
): Result[LeafTie,AristoError] =
|
): Result[LeafTie,AristoError] =
|
||||||
## Variant of `nearbyRight()` working with a `NodeTag` argument instead
|
## Variant of `nearbyRight()` working with a `HashID` argument instead
|
||||||
## of a `Hike`.
|
## of a `Hike`.
|
||||||
let rc = lty.nearbyNext(db, 64, moveRight=true)
|
let rc = lty.nearbyNext(db, 64, moveRight=true)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
|
@ -419,15 +419,14 @@ proc nearbyLeft*(
|
||||||
## Similar to `nearbyRight()`.
|
## Similar to `nearbyRight()`.
|
||||||
##
|
##
|
||||||
## This code is intended to be used for verifying a right-bound proof to
|
## This code is intended to be used for verifying a right-bound proof to
|
||||||
## verify that there is no leaf node *left* to a boundary path value.
|
## verify that there is no leaf vertex *left* to a boundary path value.
|
||||||
hike.nearbyNext(db, 64, moveRight=false)
|
hike.nearbyNext(db, 64, moveRight=false)
|
||||||
|
|
||||||
proc nearbyLeft*(
|
proc nearbyLeft*(
|
||||||
lty: LeafTie; # Some `Patricia Trie` path
|
lty: LeafTie; # Some `Patricia Trie` path
|
||||||
db: AristoDb; # Database layer
|
db: AristoDb; # Database layer
|
||||||
): Result[LeafTie,AristoError] =
|
): Result[LeafTie,AristoError] =
|
||||||
## Similar to `nearbyRight()` for `NodeTag` argument instead
|
## Similar to `nearbyRight()` for `HashID` argument instead of a `Hike`.
|
||||||
## of a `Hike`.
|
|
||||||
let rc = lty.nearbyNext(db, 64, moveRight=false)
|
let rc = lty.nearbyNext(db, 64, moveRight=false)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err(rc.error)
|
return err(rc.error)
|
||||||
|
@ -441,10 +440,10 @@ proc nearbyRightMissing*(
|
||||||
hike: Hike; # Partially expanded chain of vertices
|
hike: Hike; # Partially expanded chain of vertices
|
||||||
db: AristoDb; # Database layer
|
db: AristoDb; # Database layer
|
||||||
): Result[bool,AristoError] =
|
): Result[bool,AristoError] =
|
||||||
## Returns `true` if the maximally extended argument nodes `hike` is the
|
## Returns `true` if the maximally extended argument vertex `hike` is the
|
||||||
## rightmost on the hexary trie database. It verifies that there is no more
|
## right most on the hexary trie database. It verifies that there is no more
|
||||||
## leaf entry to the right of the argument `hike`. This function is an
|
## leaf entry to the right of the argument `hike`. This function is an
|
||||||
## an alternative to
|
## alternative to
|
||||||
## ::
|
## ::
|
||||||
## let rc = path.nearbyRight(db)
|
## let rc = path.nearbyRight(db)
|
||||||
## if rc.isOk:
|
## if rc.isOk:
|
||||||
|
@ -454,7 +453,7 @@ proc nearbyRightMissing*(
|
||||||
## # problem with database => error
|
## # problem with database => error
|
||||||
## ...
|
## ...
|
||||||
## else:
|
## else:
|
||||||
## # no nore nodes => true
|
## # no nore vertices => true
|
||||||
## ...
|
## ...
|
||||||
## and is intended mainly for debugging.
|
## and is intended mainly for debugging.
|
||||||
if hike.legs.len == 0:
|
if hike.legs.len == 0:
|
||||||
|
|
|
@ -14,7 +14,8 @@ import
|
||||||
std/sequtils,
|
std/sequtils,
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/results,
|
stew/results,
|
||||||
./aristo_desc
|
./aristo_desc,
|
||||||
|
./aristo_desc/aristo_types_private
|
||||||
|
|
||||||
# Info snippet (just a reminder to keep somewhere)
|
# Info snippet (just a reminder to keep somewhere)
|
||||||
#
|
#
|
||||||
|
@ -34,35 +35,28 @@ import
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc pathAsNibbles*(key: NodeKey): NibblesSeq =
|
proc pathAsBlob*(keyOrTag: HashKey|HashID): Blob =
|
||||||
key.ByteArray32.initNibbleRange()
|
|
||||||
|
|
||||||
proc pathAsNibbles*(tag: NodeTag): NibblesSeq =
|
|
||||||
tag.to(NodeKey).pathAsNibbles()
|
|
||||||
|
|
||||||
proc pathAsBlob*(keyOrTag: NodeKey|NodeTag): Blob =
|
|
||||||
keyOrTag.pathAsNibbles.hexPrefixEncode(isLeaf=true)
|
keyOrTag.pathAsNibbles.hexPrefixEncode(isLeaf=true)
|
||||||
|
|
||||||
|
proc pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
|
||||||
proc pathToKey*(partPath: NibblesSeq): Result[NodeKey,AristoError] =
|
|
||||||
var key: ByteArray32
|
var key: ByteArray32
|
||||||
if partPath.len == 64:
|
if partPath.len == 64:
|
||||||
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg
|
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg
|
||||||
let path = (partPath & EmptyNibbleSeq).getBytes()
|
let path = (partPath & EmptyNibbleSeq).getBytes()
|
||||||
(addr key[0]).copyMem(unsafeAddr path[0], 32)
|
(addr key[0]).copyMem(unsafeAddr path[0], 32)
|
||||||
return ok(key.NodeKey)
|
return ok(key.HashKey)
|
||||||
err(PathExpected64Nibbles)
|
err(PathExpected64Nibbles)
|
||||||
|
|
||||||
proc pathToKey*(partPath: Blob): Result[NodeKey,AristoError] =
|
proc pathToKey*(partPath: Blob): Result[HashKey,AristoError] =
|
||||||
let (isLeaf,pathSegment) = partPath.hexPrefixDecode
|
let (isLeaf,pathSegment) = partPath.hexPrefixDecode
|
||||||
if isleaf:
|
if isleaf:
|
||||||
return pathSegment.pathToKey()
|
return pathSegment.pathToKey()
|
||||||
err(PathExpectedLeaf)
|
err(PathExpectedLeaf)
|
||||||
|
|
||||||
proc pathToTag*(partPath: NibblesSeq|Blob): Result[NodeTag,AristoError] =
|
proc pathToTag*(partPath: NibblesSeq|Blob): Result[HashID,AristoError] =
|
||||||
let rc = partPath.pathToKey()
|
let rc = partPath.pathToKey()
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
return ok(rc.value.to(NodeTag))
|
return ok(rc.value.to(HashID))
|
||||||
err(rc.error)
|
err(rc.error)
|
||||||
|
|
||||||
# --------------------
|
# --------------------
|
||||||
|
@ -86,11 +80,11 @@ proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
|
||||||
let nope = seq[byte].default.initNibbleRange
|
let nope = seq[byte].default.initNibbleRange
|
||||||
result = pfx.slice(0,64) & nope # nope forces re-alignment
|
result = pfx.slice(0,64) & nope # nope forces re-alignment
|
||||||
|
|
||||||
proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): NodeKey =
|
proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
|
||||||
## Variant of `pathPfxPad()`.
|
## Variant of `pathPfxPad()`.
|
||||||
##
|
##
|
||||||
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
|
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
|
||||||
## `NodeKey`.
|
## `HashKey`.
|
||||||
let bytes = pfx.pathPfxPad(dblNibble).getBytes
|
let bytes = pfx.pathPfxPad(dblNibble).getBytes
|
||||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
|
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ import
|
||||||
std/[bitops, sequtils],
|
std/[bitops, sequtils],
|
||||||
eth/[common, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/results,
|
stew/results,
|
||||||
"."/[aristo_constants, aristo_desc]
|
"."/[aristo_constants, aristo_desc, aristo_init]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
|
@ -24,16 +24,6 @@ proc aristoError(error: AristoError): NodeRef =
|
||||||
## Allows returning de
|
## Allows returning de
|
||||||
NodeRef(vType: Leaf, error: error)
|
NodeRef(vType: Leaf, error: error)
|
||||||
|
|
||||||
proc aInit(key: var NodeKey; data: openArray[byte]): bool =
|
|
||||||
## Import argument `data` into `key` which must have length either `32`, or
|
|
||||||
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
|
|
||||||
if data.len == 32:
|
|
||||||
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
|
|
||||||
return true
|
|
||||||
elif data.len == 0:
|
|
||||||
key = VOID_NODE_KEY
|
|
||||||
return true
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public RLP transcoder mixins
|
# Public RLP transcoder mixins
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -51,7 +41,7 @@ proc read*(
|
||||||
|
|
||||||
var
|
var
|
||||||
blobs = newSeq[Blob](2) # temporary, cache
|
blobs = newSeq[Blob](2) # temporary, cache
|
||||||
links: array[16,NodeKey] # reconstruct branch node
|
links: array[16,HashKey] # reconstruct branch node
|
||||||
top = 0 # count entries and positions
|
top = 0 # count entries and positions
|
||||||
|
|
||||||
# Collect lists of either 2 or 17 blob entries.
|
# Collect lists of either 2 or 17 blob entries.
|
||||||
|
@ -62,7 +52,7 @@ proc read*(
|
||||||
return aristoError(RlpBlobExpected)
|
return aristoError(RlpBlobExpected)
|
||||||
blobs[top] = rlp.read(Blob)
|
blobs[top] = rlp.read(Blob)
|
||||||
of 2 .. 15:
|
of 2 .. 15:
|
||||||
if not links[top].aInit(rlp.read(Blob)):
|
if not links[top].init(rlp.read(Blob)):
|
||||||
return aristoError(RlpBranchLinkExpected)
|
return aristoError(RlpBranchLinkExpected)
|
||||||
of 16:
|
of 16:
|
||||||
if not w.isBlob:
|
if not w.isBlob:
|
||||||
|
@ -90,12 +80,12 @@ proc read*(
|
||||||
var node = NodeRef(
|
var node = NodeRef(
|
||||||
vType: Extension,
|
vType: Extension,
|
||||||
ePfx: pathSegment)
|
ePfx: pathSegment)
|
||||||
if not node.key[0].aInit(blobs[1]):
|
if not node.key[0].init(blobs[1]):
|
||||||
return aristoError(RlpExtPathEncoding)
|
return aristoError(RlpExtPathEncoding)
|
||||||
return node
|
return node
|
||||||
of 17:
|
of 17:
|
||||||
for n in [0,1]:
|
for n in [0,1]:
|
||||||
if not links[n].aInit(blobs[n]):
|
if not links[n].init(blobs[n]):
|
||||||
return aristoError(RlpBranchLinkExpected)
|
return aristoError(RlpBranchLinkExpected)
|
||||||
return NodeRef(
|
return NodeRef(
|
||||||
vType: Branch,
|
vType: Branch,
|
||||||
|
@ -109,7 +99,7 @@ proc read*(
|
||||||
proc append*(writer: var RlpWriter; node: NodeRef) =
|
proc append*(writer: var RlpWriter; node: NodeRef) =
|
||||||
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
|
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
|
||||||
## list.
|
## list.
|
||||||
proc addNodeKey(writer: var RlpWriter; key: NodeKey) =
|
proc addHashKey(writer: var RlpWriter; key: HashKey) =
|
||||||
if not key.isValid:
|
if not key.isValid:
|
||||||
writer.append EmptyBlob
|
writer.append EmptyBlob
|
||||||
else:
|
else:
|
||||||
|
@ -122,12 +112,12 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
|
||||||
of Branch:
|
of Branch:
|
||||||
writer.startList(17)
|
writer.startList(17)
|
||||||
for n in 0..15:
|
for n in 0..15:
|
||||||
writer.addNodeKey node.key[n]
|
writer.addHashKey node.key[n]
|
||||||
writer.append EmptyBlob
|
writer.append EmptyBlob
|
||||||
of Extension:
|
of Extension:
|
||||||
writer.startList(2)
|
writer.startList(2)
|
||||||
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
|
writer.append node.ePfx.hexPrefixEncode(isleaf = false)
|
||||||
writer.addNodeKey node.key[0]
|
writer.addHashKey node.key[0]
|
||||||
of Leaf:
|
of Leaf:
|
||||||
writer.startList(2)
|
writer.startList(2)
|
||||||
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
|
writer.append node.lPfx.hexPrefixEncode(isleaf = true)
|
||||||
|
@ -143,12 +133,12 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
|
||||||
## boundaries.
|
## boundaries.
|
||||||
## ::
|
## ::
|
||||||
## Branch:
|
## Branch:
|
||||||
## uint64, ... -- list of up to 16 child nodes lookup keys
|
## uint64, ... -- list of up to 16 child vertices lookup keys
|
||||||
## uint16 -- index bitmap
|
## uint16 -- index bitmap
|
||||||
## 0x00 -- marker(2) + unused(2)
|
## 0x00 -- marker(2) + unused(2)
|
||||||
##
|
##
|
||||||
## Extension:
|
## Extension:
|
||||||
## uint64 -- child node lookup key
|
## uint64 -- child vertex lookup key
|
||||||
## Blob -- hex encoded partial path (at least one byte)
|
## Blob -- hex encoded partial path (at least one byte)
|
||||||
## 0x80 -- marker(2) + unused(2)
|
## 0x80 -- marker(2) + unused(2)
|
||||||
##
|
##
|
||||||
|
@ -158,7 +148,7 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
|
||||||
## 0xc0 -- marker(2) + partialPathLen(6)
|
## 0xc0 -- marker(2) + partialPathLen(6)
|
||||||
##
|
##
|
||||||
## For a branch record, the bytes of the `access` array indicate the position
|
## For a branch record, the bytes of the `access` array indicate the position
|
||||||
## of the Patricia Trie node reference. So the `vertexID` with index `n` has
|
## of the Patricia Trie vertex reference. So the `vertexID` with index `n` has
|
||||||
## ::
|
## ::
|
||||||
## 8 * n * ((access shr (n * 4)) and 15)
|
## 8 * n * ((access shr (n * 4)) and 15)
|
||||||
##
|
##
|
||||||
|
@ -229,7 +219,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
|
||||||
return DbrTooShort
|
return DbrTooShort
|
||||||
|
|
||||||
case record[^1] shr 6:
|
case record[^1] shr 6:
|
||||||
of 0: # `Branch` node
|
of 0: # `Branch` vertex
|
||||||
if record.len < 19: # at least two edges
|
if record.len < 19: # at least two edges
|
||||||
return DbrBranchTooShort
|
return DbrBranchTooShort
|
||||||
if (record.len mod 8) != 3:
|
if (record.len mod 8) != 3:
|
||||||
|
@ -254,7 +244,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
|
||||||
vType: Branch,
|
vType: Branch,
|
||||||
bVid: vtxList)
|
bVid: vtxList)
|
||||||
|
|
||||||
of 2: # `Extension` node
|
of 2: # `Extension` vertex
|
||||||
let
|
let
|
||||||
sLen = record[^1].int and 0x3f # length of path segment
|
sLen = record[^1].int and 0x3f # length of path segment
|
||||||
rlen = record.len - 1 # `vertexID` + path segm
|
rlen = record.len - 1 # `vertexID` + path segm
|
||||||
|
@ -270,7 +260,7 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
|
||||||
eVid: (uint64.fromBytesBE record[0 ..< 8]).VertexID,
|
eVid: (uint64.fromBytesBE record[0 ..< 8]).VertexID,
|
||||||
ePfx: pathSegment)
|
ePfx: pathSegment)
|
||||||
|
|
||||||
of 3: # `Leaf` node
|
of 3: # `Leaf` vertex
|
||||||
let
|
let
|
||||||
sLen = record[^1].int and 0x3f # length of path segment
|
sLen = record[^1].int and 0x3f # length of path segment
|
||||||
rlen = record.len - 1 # payload + path segment
|
rlen = record.len - 1 # payload + path segment
|
||||||
|
|
|
@ -101,7 +101,7 @@ proc vidAttach*(db: AristoDb; lbl: HashLabel): VertexID {.discardable.} =
|
||||||
result = db.vidFetch
|
result = db.vidFetch
|
||||||
db.vidAttach(lbl, result)
|
db.vidAttach(lbl, result)
|
||||||
|
|
||||||
proc vidRoot*(db: AristoDb; key: NodeKey): VertexID {.discardable.} =
|
proc vidRoot*(db: AristoDb; key: HashKey): VertexID {.discardable.} =
|
||||||
## Variant of `vidAttach()` for generating a sub-trie root
|
## Variant of `vidAttach()` for generating a sub-trie root
|
||||||
result = db.vidFetch
|
result = db.vidFetch
|
||||||
db.vidAttach(HashLabel(root: result, key: key), result)
|
db.vidAttach(HashLabel(root: result, key: key), result)
|
||||||
|
|
|
@ -100,12 +100,12 @@ proc cachedVID(db: AristoDb; lbl: HashLabel): VertexID =
|
||||||
result = db.vidAttach lbl
|
result = db.vidAttach lbl
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions for `VertexID` => `NodeKey` mapping
|
# Public functions for `VertexID` => `HashKey` mapping
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc pal*(db: AristoDb; rootID: VertexID; vid: VertexID): NodeKey =
|
proc pal*(db: AristoDb; rootID: VertexID; vid: VertexID): HashKey =
|
||||||
## Retrieve the cached `Merkel` hash (aka `NodeKey` object) associated with
|
## Retrieve the cached `Merkel` hash (aka `HashKey` object) associated with
|
||||||
## the argument `VertexID` type argument `vid`. Return a zero `NodeKey` if
|
## the argument `VertexID` type argument `vid`. Return a zero `HashKey` if
|
||||||
## there is none.
|
## there is none.
|
||||||
##
|
##
|
||||||
## If the vertex ID `vid` is not found in the cache, then the structural
|
## If the vertex ID `vid` is not found in the cache, then the structural
|
||||||
|
@ -122,7 +122,7 @@ proc pal*(db: AristoDb; rootID: VertexID; vid: VertexID): NodeKey =
|
||||||
if db.convertPartiallyOk(vtx,node):
|
if db.convertPartiallyOk(vtx,node):
|
||||||
var w = initRlpWriter()
|
var w = initRlpWriter()
|
||||||
w.append node
|
w.append node
|
||||||
result = w.finish.keccakHash.data.NodeKey
|
result = w.finish.keccakHash.data.HashKey
|
||||||
db.top.kMap[vid] = HashLabel(root: rootID, key: result)
|
db.top.kMap[vid] = HashLabel(root: rootID, key: result)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -135,7 +135,7 @@ proc updated*(nd: NodeRef; root: VertexID; db: AristoDb): NodeRef =
|
||||||
## For a `Leaf` node, the payload data `PayloadRef` type reference is *not*
|
## For a `Leaf` node, the payload data `PayloadRef` type reference is *not*
|
||||||
## duplicated and returned as-is.
|
## duplicated and returned as-is.
|
||||||
##
|
##
|
||||||
## This function will not complain if all `Merkel` hashes (aka `NodeKey`
|
## This function will not complain if all `Merkel` hashes (aka `HashKey`
|
||||||
## objects) are zero for either `Extension` or `Leaf` nodes.
|
## objects) are zero for either `Extension` or `Leaf` nodes.
|
||||||
if nd.isValid:
|
if nd.isValid:
|
||||||
case nd.vType:
|
case nd.vType:
|
||||||
|
@ -161,7 +161,7 @@ proc updated*(nd: NodeRef; root: VertexID; db: AristoDb): NodeRef =
|
||||||
|
|
||||||
proc asNode*(vtx: VertexRef; db: AristoDb): NodeRef =
|
proc asNode*(vtx: VertexRef; db: AristoDb): NodeRef =
|
||||||
## Return a `NodeRef` object by augmenting missing `Merkel` hashes (aka
|
## Return a `NodeRef` object by augmenting missing `Merkel` hashes (aka
|
||||||
## `NodeKey` objects) from the cache or from calculated cached vertex
|
## `HashKey` objects) from the cache or from calculated cached vertex
|
||||||
## entries, if available.
|
## entries, if available.
|
||||||
##
|
##
|
||||||
## If not all `Merkel` hashes are available in a single lookup, then the
|
## If not all `Merkel` hashes are available in a single lookup, then the
|
||||||
|
|
|
@ -18,7 +18,6 @@ import
|
||||||
unittest2,
|
unittest2,
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
aristo_desc, aristo_delete, aristo_hashify, aristo_nearby, aristo_merge],
|
aristo_desc, aristo_delete, aristo_hashify, aristo_nearby, aristo_merge],
|
||||||
../../nimbus/sync/snap/range_desc,
|
|
||||||
./test_helpers
|
./test_helpers
|
||||||
|
|
||||||
type
|
type
|
||||||
|
@ -93,8 +92,8 @@ proc fwdWalkVerify(
|
||||||
error = rc.error
|
error = rc.error
|
||||||
check rc.error == AristoError(0)
|
check rc.error == AristoError(0)
|
||||||
break
|
break
|
||||||
if rc.value.path < high(NodeTag):
|
if rc.value.path < high(HashID):
|
||||||
lty.path = NodeTag(rc.value.path.u256 + 1)
|
lty.path = HashID(rc.value.path.u256 + 1)
|
||||||
n.inc
|
n.inc
|
||||||
|
|
||||||
if error != AristoError(0):
|
if error != AristoError(0):
|
||||||
|
|
|
@ -20,9 +20,12 @@ import
|
||||||
../test_sync_snap/test_types,
|
../test_sync_snap/test_types,
|
||||||
../replay/[pp, undump_accounts, undump_storages]
|
../replay/[pp, undump_accounts, undump_storages]
|
||||||
|
|
||||||
|
from ../../nimbus/sync/snap/range_desc
|
||||||
|
import NodeKey
|
||||||
|
|
||||||
type
|
type
|
||||||
ProofTrieData* = object
|
ProofTrieData* = object
|
||||||
root*: NodeKey
|
root*: HashKey
|
||||||
id*: int
|
id*: int
|
||||||
proof*: seq[SnapProof]
|
proof*: seq[SnapProof]
|
||||||
kvpLst*: seq[LeafTiePayload]
|
kvpLst*: seq[LeafTiePayload]
|
||||||
|
@ -34,6 +37,9 @@ type
|
||||||
proc toPfx(indent: int): string =
|
proc toPfx(indent: int): string =
|
||||||
"\n" & " ".repeat(indent)
|
"\n" & " ".repeat(indent)
|
||||||
|
|
||||||
|
proc to(a: NodeKey; T: type HashKey): T =
|
||||||
|
a.T
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public pretty printing
|
# Public pretty printing
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -118,9 +124,9 @@ proc to*(sample: AccountsSample; T: type seq[UndumpStorages]): T =
|
||||||
result.add w
|
result.add w
|
||||||
|
|
||||||
proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
||||||
var (rootKey, rootVid) = (VOID_NODE_KEY, VertexID(0))
|
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0))
|
||||||
for w in ua:
|
for w in ua:
|
||||||
let thisRoot = w.root.to(NodeKey)
|
let thisRoot = w.root.to(HashKey)
|
||||||
if rootKey != thisRoot:
|
if rootKey != thisRoot:
|
||||||
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
||||||
if 0 < w.data.accounts.len:
|
if 0 < w.data.accounts.len:
|
||||||
|
@ -128,14 +134,16 @@ proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
||||||
root: rootKey,
|
root: rootKey,
|
||||||
proof: w.data.proof,
|
proof: w.data.proof,
|
||||||
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
|
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
|
||||||
leafTie: LeafTie(root: rootVid, path: it.accKey.to(NodeTag)),
|
leafTie: LeafTie(
|
||||||
|
root: rootVid,
|
||||||
|
path: it.accKey.to(HashKey).to(HashID)),
|
||||||
payload: PayloadRef(pType: BlobData, blob: it.accBlob))))
|
payload: PayloadRef(pType: BlobData, blob: it.accBlob))))
|
||||||
|
|
||||||
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||||
var (rootKey, rootVid) = (VOID_NODE_KEY, VertexID(0))
|
var (rootKey, rootVid) = (VOID_HASH_KEY, VertexID(0))
|
||||||
for n,s in us:
|
for n,s in us:
|
||||||
for w in s.data.storages:
|
for w in s.data.storages:
|
||||||
let thisRoot = w.account.storageRoot.to(NodeKey)
|
let thisRoot = w.account.storageRoot.to(HashKey)
|
||||||
if rootKey != thisRoot:
|
if rootKey != thisRoot:
|
||||||
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
|
||||||
if 0 < w.data.len:
|
if 0 < w.data.len:
|
||||||
|
@ -143,7 +151,9 @@ proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||||
root: thisRoot,
|
root: thisRoot,
|
||||||
id: n + 1,
|
id: n + 1,
|
||||||
kvpLst: w.data.mapIt(LeafTiePayload(
|
kvpLst: w.data.mapIt(LeafTiePayload(
|
||||||
leafTie: LeafTie(root: rootVid, path: it.slotHash.to(NodeTag)),
|
leafTie: LeafTie(
|
||||||
|
root: rootVid,
|
||||||
|
path: it.slotHash.to(HashKey).to(HashID)),
|
||||||
payload: PayloadRef(pType: BlobData, blob: it.slotData))))
|
payload: PayloadRef(pType: BlobData, blob: it.slotData))))
|
||||||
if 0 < result.len:
|
if 0 < result.len:
|
||||||
result[^1].proof = s.data.proof
|
result[^1].proof = s.data.proof
|
||||||
|
|
|
@ -194,7 +194,7 @@ proc test_mergeProofAndKvpList*(
|
||||||
oopsTab = oops.toTable
|
oopsTab = oops.toTable
|
||||||
var
|
var
|
||||||
db: AristoDb
|
db: AristoDb
|
||||||
rootKey = NodeKey.default
|
rootKey = HashKey.default
|
||||||
count = 0
|
count = 0
|
||||||
for n,w in list:
|
for n,w in list:
|
||||||
if resetDb or w.root != rootKey or w.proof.len == 0:
|
if resetDb or w.root != rootKey or w.proof.len == 0:
|
||||||
|
@ -222,7 +222,7 @@ proc test_mergeProofAndKvpList*(
|
||||||
check rc.error == AristoError(0)
|
check rc.error == AristoError(0)
|
||||||
return
|
return
|
||||||
proved = db.merge(w.proof, rc.value)
|
proved = db.merge(w.proof, rc.value)
|
||||||
check proved.error in {AristoError(0),MergeNodeKeyCachedAlready}
|
check proved.error in {AristoError(0),MergeHashKeyCachedAlready}
|
||||||
check w.proof.len == proved.merged + proved.dups
|
check w.proof.len == proved.merged + proved.dups
|
||||||
check db.top.lTab.len == lTabLen
|
check db.top.lTab.len == lTabLen
|
||||||
check db.top.sTab.len == proved.merged + sTabLen
|
check db.top.sTab.len == proved.merged + sTabLen
|
||||||
|
|
|
@ -18,7 +18,6 @@ import
|
||||||
unittest2,
|
unittest2,
|
||||||
../../nimbus/db/aristo/[
|
../../nimbus/db/aristo/[
|
||||||
aristo_desc, aristo_debug, aristo_merge, aristo_nearby],
|
aristo_desc, aristo_debug, aristo_merge, aristo_nearby],
|
||||||
../../nimbus/sync/snap/range_desc,
|
|
||||||
./test_helpers
|
./test_helpers
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -28,14 +27,14 @@ import
|
||||||
proc fwdWalkLeafsCompleteDB(
|
proc fwdWalkLeafsCompleteDB(
|
||||||
db: AristoDb;
|
db: AristoDb;
|
||||||
root: VertexID;
|
root: VertexID;
|
||||||
tags: openArray[NodeTag];
|
tags: openArray[HashID];
|
||||||
noisy: bool;
|
noisy: bool;
|
||||||
): tuple[visited: int, error: AristoError] =
|
): tuple[visited: int, error: AristoError] =
|
||||||
let
|
let
|
||||||
tLen = tags.len
|
tLen = tags.len
|
||||||
var
|
var
|
||||||
error = AristoError(0)
|
error = AristoError(0)
|
||||||
lty = LeafTie(root: root, path: NodeTag(tags[0].u256 div 2))
|
lty = LeafTie(root: root, path: HashID(tags[0].u256 div 2))
|
||||||
n = 0
|
n = 0
|
||||||
while true:
|
while true:
|
||||||
let rc = lty.nearbyRight(db)
|
let rc = lty.nearbyRight(db)
|
||||||
|
@ -63,8 +62,8 @@ proc fwdWalkLeafsCompleteDB(
|
||||||
error = AristoError(1)
|
error = AristoError(1)
|
||||||
check rc.value.path == tags[n]
|
check rc.value.path == tags[n]
|
||||||
break
|
break
|
||||||
if rc.value.path < high(NodeTag):
|
if rc.value.path < high(HashID):
|
||||||
lty.path = NodeTag(rc.value.path.u256 + 1)
|
lty.path = HashID(rc.value.path.u256 + 1)
|
||||||
n.inc
|
n.inc
|
||||||
|
|
||||||
(n,error)
|
(n,error)
|
||||||
|
@ -73,7 +72,7 @@ proc fwdWalkLeafsCompleteDB(
|
||||||
proc revWalkLeafsCompleteDB(
|
proc revWalkLeafsCompleteDB(
|
||||||
db: AristoDb;
|
db: AristoDb;
|
||||||
root: VertexID;
|
root: VertexID;
|
||||||
tags: openArray[NodeTag];
|
tags: openArray[HashID];
|
||||||
noisy: bool;
|
noisy: bool;
|
||||||
): tuple[visited: int, error: AristoError] =
|
): tuple[visited: int, error: AristoError] =
|
||||||
let
|
let
|
||||||
|
@ -81,7 +80,7 @@ proc revWalkLeafsCompleteDB(
|
||||||
var
|
var
|
||||||
error = AristoError(0)
|
error = AristoError(0)
|
||||||
delta = ((high(UInt256) - tags[^1].u256) div 2)
|
delta = ((high(UInt256) - tags[^1].u256) div 2)
|
||||||
lty = LeafTie(root: root, path: NodeTag(tags[^1].u256 + delta))
|
lty = LeafTie(root: root, path: HashID(tags[^1].u256 + delta))
|
||||||
n = tLen-1
|
n = tLen-1
|
||||||
while true: # and false:
|
while true: # and false:
|
||||||
let rc = lty.nearbyLeft(db)
|
let rc = lty.nearbyLeft(db)
|
||||||
|
@ -107,8 +106,8 @@ proc revWalkLeafsCompleteDB(
|
||||||
error = AristoError(1)
|
error = AristoError(1)
|
||||||
check rc.value.path == tags[n]
|
check rc.value.path == tags[n]
|
||||||
break
|
break
|
||||||
if low(NodeTag) < rc.value.path:
|
if low(HashID) < rc.value.path:
|
||||||
lty.path = NodeTag(rc.value.path.u256 - 1)
|
lty.path = HashID(rc.value.path.u256 - 1)
|
||||||
n.dec
|
n.dec
|
||||||
|
|
||||||
(tLen-1 - n, error)
|
(tLen-1 - n, error)
|
||||||
|
@ -124,8 +123,8 @@ proc test_nearbyKvpList*(
|
||||||
): bool =
|
): bool =
|
||||||
var
|
var
|
||||||
db: AristoDb
|
db: AristoDb
|
||||||
rootKey = NodeKey.default
|
rootKey = HashKey.default
|
||||||
tagSet: HashSet[NodeTag]
|
tagSet: HashSet[HashID]
|
||||||
count = 0
|
count = 0
|
||||||
for n,w in list:
|
for n,w in list:
|
||||||
if resetDb or w.root != rootKey:
|
if resetDb or w.root != rootKey:
|
||||||
|
|
Loading…
Reference in New Issue