Aristo db allow shorter than 64 nibbles path keys (#1864)

* Aristo: Single `FetchPathNotFound` error in `fetchXxx()` and `hasPath()`

why:
  Missing path hike returns too many detailed reasons why it failed
  which becomes cumbersome to handle.

also:
  Renamed `contains()` => `hasPath()` which disables the `in` operator on
  non-boolean 	`contains()` functions

* Kvt: Renamed `contains()` => `hasKey()`

why:
  which disables the `in` operator on non-boolean 	`contains()` functions

* Aristo: Generalising `HashID` by variable length `PathID`

why:
  There are cases when the `Aristo` database is to be used with
  shorter than 64 nibbles keys when handling transactions indexes
  with sequence IDs.

caveat:
  This patch only works reliable for full length `PathID` values. Tests
  for shorter `PathID` values are currently missing.
This commit is contained in:
Jordan Hrycaj 2023-10-27 22:36:51 +01:00 committed by GitHub
parent 3198ad1bbd
commit 3fe0a49a5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 260 additions and 196 deletions

View File

@ -30,6 +30,11 @@ import
export export
getKeyRc getKeyRc
import
aristo/aristo_path
export
pathAsBlob
import aristo/aristo_desc/[desc_identifiers, desc_structural] import aristo/aristo_desc/[desc_identifiers, desc_structural]
export export
AristoAccount, AristoAccount,

View File

@ -81,7 +81,6 @@ proc checkBE*(
return RdbBackendRef.checkBE(db, cache=cache, relax=relax) return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendVoid: of BackendVoid:
return VoidBackendRef.checkBE(db, cache=cache, relax=relax) return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
ok()
proc check*( proc check*(

View File

@ -24,6 +24,15 @@ import
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc toHex(w: VertexID): string =
w.uint64.toHex.toLowerAscii
proc toHex(w: HashKey): string =
w.ByteArray32.toHex.toLowerAscii
proc toHexLsb(w: int8): string =
$"0123456789abcdef"[w and 15]
proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] = proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] =
lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafTie): int = cmp(a,b)) lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafTie): int = cmp(a,b))
@ -80,7 +89,7 @@ proc ppVid(vid: VertexID; pfx = true): string =
if pfx: if pfx:
result = "$" result = "$"
if vid.isValid: if vid.isValid:
result &= vid.uint64.toHex.stripZeros.toLowerAscii result &= vid.toHex.stripZeros.toLowerAscii
else: else:
result &= "ø" result &= "ø"
@ -110,7 +119,7 @@ proc ppQid(qid: QueueID): string =
else: else:
break here break here
return return
result &= qid.uint64.toHex.stripZeros.toLowerAscii result &= qid.toHex.stripZeros.toLowerAscii
proc ppVidList(vGen: openArray[VertexID]): string = proc ppVidList(vGen: openArray[VertexID]): string =
"[" & vGen.mapIt(it.ppVid).join(",") & "]" "[" & vGen.mapIt(it.ppVid).join(",") & "]"
@ -132,9 +141,7 @@ proc ppKey(key: HashKey): string =
if key == VOID_HASH_KEY: if key == VOID_HASH_KEY:
return "£r" return "£r"
"%" & key.ByteArray32 "%" & key.toHex.squeeze(hex=true,ignLen=true)
.mapIt(it.toHex(2)).join.tolowerAscii
.squeeze(hex=true,ignLen=true)
proc ppLabel(lbl: HashLabel; db: AristoDbRef): string = proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
if lbl.key == HashKey.default: if lbl.key == HashKey.default:
@ -143,7 +150,7 @@ proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
return "£r" return "£r"
let rid = if not lbl.root.isValid: "ø:" let rid = if not lbl.root.isValid: "ø:"
else: ($lbl.root.uint64.toHex).stripZeros & ":" else: ($lbl.root.toHex).stripZeros & ":"
if not db.top.isNil: if not db.top.isNil:
let vid = db.top.pAmk.getOrVoid lbl let vid = db.top.pAmk.getOrVoid lbl
if vid.isValid: if vid.isValid:
@ -153,9 +160,7 @@ proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
if vid.isValid: if vid.isValid:
return "£" & rid & vid.ppVid(pfx=false) return "£" & rid & vid.ppVid(pfx=false)
"%" & rid & lbl.key.ByteArray32 "%" & rid & lbl.key.toHex.squeeze(hex=true,ignLen=true)
.mapIt(it.toHex(2)).join.tolowerAscii
.squeeze(hex=true,ignLen=true)
proc ppRootKey(a: HashKey): string = proc ppRootKey(a: HashKey): string =
if a.isValid: if a.isValid:
@ -169,17 +174,14 @@ proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string =
let vid = db.top.lTab.getOrVoid lty let vid = db.top.lTab.getOrVoid lty
if vid.isValid: if vid.isValid:
return "@" & vid.ppVid return "@" & vid.ppVid
"@" & $lty
"@" & ($lty.root.uint64.toHex).stripZeros & ":" &
lty.path.to(HashKey).ByteArray32
.mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
proc ppPathPfx(pfx: NibblesSeq): string = proc ppPathPfx(pfx: NibblesSeq): string =
let s = $pfx let s = $pfx
if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1] & ":" & $s.len if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1] & ":" & $s.len
proc ppNibble(n: int8): string = proc ppNibble(n: int8): string =
if n < 0: "ø" elif n < 10: $n else: n.toHex(1).toLowerAscii if n < 0: "ø" elif n < 10: $n else: n.toHexLsb
proc ppPayload(p: PayloadRef, db: AristoDbRef): string = proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
if p.isNil: if p.isNil:

View File

@ -62,6 +62,7 @@ type
# Path function `hikeUp()` # Path function `hikeUp()`
HikeRootMissing HikeRootMissing
HikeEmptyPath
HikeLeafTooEarly HikeLeafTooEarly
HikeBranchTailEmpty HikeBranchTailEmpty
HikeBranchBlindEdge HikeBranchBlindEdge
@ -70,6 +71,7 @@ type
# Path/nibble/key conversions in `aisto_path.nim` # Path/nibble/key conversions in `aisto_path.nim`
PathExpected64Nibbles PathExpected64Nibbles
PathAtMost64Nibbles
PathExpectedLeaf PathExpectedLeaf
# Merge leaf `merge()` # Merge leaf `merge()`
@ -164,7 +166,6 @@ type
NearbyLeafExpected NearbyLeafExpected
NearbyNestingTooDeep NearbyNestingTooDeep
NearbyPathTailUnexpected NearbyPathTailUnexpected
NearbyPathTailInxOverflow
NearbyUnexpectedVtx NearbyUnexpectedVtx
NearbyVidInvalid NearbyVidInvalid
@ -204,7 +205,7 @@ type
FilTrgTopSrcMismatch FilTrgTopSrcMismatch
FilSiblingsCommitUnfinshed FilSiblingsCommitUnfinshed
# Get functions form `aristo_get.nim` # Get functions from `aristo_get.nim`
GetLeafNotFound GetLeafNotFound
GetVtxNotFound GetVtxNotFound
GetKeyNotFound GetKeyNotFound
@ -213,6 +214,9 @@ type
GetIdgNotFound GetIdgNotFound
GetFqsNotFound GetFqsNotFound
# Fetch functions from `aristo_fetch.nim`
FetchPathNotFound
# RocksDB backend # RocksDB backend
RdbBeCantCreateDataDir RdbBeCantCreateDataDir
RdbBeCantCreateBackupDir RdbBeCantCreateBackupDir

View File

@ -37,17 +37,24 @@ type
## backend of the database, there is no other reference to the node than ## backend of the database, there is no other reference to the node than
## the very same `VertexID`. ## the very same `VertexID`.
HashID* = distinct UInt256
## Variant of a `Hash256` object that can be used in a order relation
## (i.e. it can be sorted.) Among temporary conversions for sorting, the
## `HashID` type is consistently used for addressing leaf vertices (see
## below `LeafTie`.)
HashKey* = distinct ByteArray32 HashKey* = distinct ByteArray32
## Dedicated `Hash256` object variant that is used for labelling the ## Dedicated `Hash256` object variant that is used for labelling the
## vertices of the `Patricia Trie` in order to make it a ## vertices of the `Patricia Trie` in order to make it a
## `Merkle Patricia Tree`. ## `Merkle Patricia Tree`.
PathID* = object
## Path into the `Patricia Trie`. This is a chain of maximal 64 nibbles
## (which is 32 bytes.) In most cases, the length is 64. So the path is
## encoded as a numeric value which is often easier to handle than a
## chain of nibbles.
##
## The path ID should be kept normalised, i.e.
## * 0 <= `length` <= 64
## * the unused trailing nibbles in `pfx` ar set to `0`
##
pfx*: UInt256
length*: uint8
# ---------- # ----------
LeafTie* = object LeafTie* = object
@ -60,7 +67,7 @@ type
## Note that `LeafTie` objects have no representation in the `Aristo Trie`. ## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
## They are used temporarily and in caches or backlog tables. ## They are used temporarily and in caches or backlog tables.
root*: VertexID ## Root ID for the sub-trie root*: VertexID ## Root ID for the sub-trie
path*: HashID ## Path into the `Patricia Trie` path*: PathID ## Path into the `Patricia Trie`
HashLabel* = object HashLabel* = object
## Merkle hash key uniquely associated with a vertex ID. As hashes in a ## Merkle hash key uniquely associated with a vertex ID. As hashes in a
@ -127,162 +134,202 @@ func `-`*(a: FilterID; b: uint64): FilterID = (a.uint64-b).FilterID
func `-`*(a, b: FilterID): uint64 = (a.uint64 - b.uint64) func `-`*(a, b: FilterID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `HashID` scalar data model # Public helpers: `PathID` ordered scalar data model
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func u256*(lp: HashID): UInt256 = lp.UInt256 func high*(_: type PathID): PathID =
func low*(T: type HashID): T = low(UInt256).T ## Highest possible `PathID` object for given root vertex.
func high*(T: type HashID): T = high(UInt256).T PathID(pfx: high(UInt256), length: 64)
func `+`*(a: HashID; b: UInt256): HashID = (a.u256+b).HashID func low*(_: type PathID): PathID =
func `-`*(a: HashID; b: UInt256): HashID = (a.u256-b).HashID ## Lowest possible `PathID` object for given root vertex.
func `-`*(a, b: HashID): UInt256 = (a.u256 - b.u256) PathID()
func `==`*(a, b: HashID): bool = a.u256 == b.u256 func next*(pid: PathID): PathID =
func `<=`*(a, b: HashID): bool = a.u256 <= b.u256 ## Return a `PathID` object with incremented path field. This function might
func `<`*(a, b: HashID): bool = a.u256 < b.u256 ## return also a modified `length` field.
##
## The function returns the argument `pid` if it is already at its
## maximum value `high(PathID)`.
if pid.pfx == 0 and pid.length < 64:
PathID(length: pid.length + 1)
elif pid.pfx < high(UInt256):
PathID(pfx: pid.pfx + 1, length: 64)
else:
pid
func cmp*(x, y: HashID): int = cmp(x.UInt256, y.UInt256) func prev*(pid: PathID): PathID =
## Return a `PathID` object with decremented path field. This function might
## return also a modified `length` field.
##
## The function returns the argument `pid` if it is already at its
## minimum value `low(PathID)`.
if 0 < pid.pfx:
PathID(pfx: pid.pfx - 1, length: 64)
elif 0 < pid.length:
PathID(length: pid.length - 1)
else:
pid
func `<`*(a, b: PathID): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.pfx < b.pfx or (a.pfx == b.pfx and a.length < b.length)
func `<=`*(a, b: PathID): bool =
not (b < a)
func `==`*(a, b: PathID): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.pfx == b.pfx and a.length == b.length
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `LeafTie` # Public helpers: `LeafTie` ordered scalar data model
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func high*(_: type LeafTie; root = VertexID(1)): LeafTie = func high*(_: type LeafTie; root = VertexID(1)): LeafTie =
## Highest possible `LeafTie` object for given root vertex. ## Highest possible `LeafTie` object for given root vertex.
LeafTie(root: root, path: high(HashID)) LeafTie(root: root, path: high(PathID))
func low*(_: type LeafTie; root = VertexID(1)): LeafTie = func low*(_: type LeafTie; root = VertexID(1)): LeafTie =
## Lowest possible `LeafTie` object for given root vertex. ## Lowest possible `LeafTie` object for given root vertex.
LeafTie(root: root, path: low(HashID)) LeafTie(root: root, path: low(PathID))
func `+`*(lty: LeafTie, n: int): LeafTie = func next*(lty: LeafTie): LeafTie =
## Return a `LeafTie` object with incremented path field. This function ## Return a `LeafTie` object with the `next()` path field.
## will not check for a path field overflow. Neither it will verify that LeafTie(root: lty.root, path: lty.path.next)
## the argument `n` is non-negative.
LeafTie(root: lty.root, path: HashID(lty.path.u256 + n.u256))
func `-`*(lty: LeafTie, n: int): LeafTie = func prev*(lty: LeafTie): LeafTie =
## Return a `LeafTie` object with decremented path field. This function ## Return a `LeafTie` object with the `prev()` path field.
## will not check for a path field underflow. Neither it will verify that LeafTie(root: lty.root, path: lty.path.prev)
## the argument `n` is non-negative.
LeafTie(root: lty.root, path: HashID(lty.path.u256 - n.u256)) func `<`*(a, b: LeafTie): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.root < b.root or (a.root == b.root and a.path < b.path)
func `==`*(a, b: LeafTie): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.root == b.root and a.path == b.path
func cmp*(a, b: LeafTie): int =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
if a < b: -1 elif a == b: 0 else: 1
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: Conversions between `HashID`, `HashKey`, `Hash256` # Public helpers: Reversible conversions between `PathID`, `HashKey`, etc.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func to*(hid: HashID; T: type Hash256): T = proc to*(key: HashKey; T: type UInt256): T =
result.data = hid.UInt256.toBytesBE T.fromBytesBE key.ByteArray32
func to*(hid: HashID; T: type HashKey): T =
hid.UInt256.toBytesBE.T
func to*(key: HashKey; T: type HashID): T =
UInt256.fromBytesBE(key.ByteArray32).T
func to*(key: HashKey; T: type Hash256): T = func to*(key: HashKey; T: type Hash256): T =
T(data: ByteArray32(key)) T(data: ByteArray32(key))
func to*(key: HashKey; T: type PathID): T =
## Not necessarily reversible for shorter lengths `PathID` values
T(pfx: UInt256.fromBytesBE key.ByteArray32, length: 64)
func to*(hash: Hash256; T: type HashKey): T = func to*(hash: Hash256; T: type HashKey): T =
hash.data.T hash.data.T
func to*(key: Hash256; T: type HashID): T =
key.data.HashKey.to(T)
# ------------------------------------------------------------------------------
# Public helpers: Miscellaneous mappings
# ------------------------------------------------------------------------------
func to*(key: HashKey; T: type Blob): T = func to*(key: HashKey; T: type Blob): T =
## Representation of a `HashKey` as `Blob` (preserving full information) ## Representation of a `HashKey` as `Blob` (preserving full information)
key.ByteArray32.toSeq key.ByteArray32.toSeq
func to*(hid: HashID; T: type Blob): T =
## Representation of a `HashID` as `Blob` (preserving full information)
hid.UInt256.toBytesBE.toSeq
func to*(key: HashKey; T: type NibblesSeq): T = func to*(key: HashKey; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information) ## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
key.ByteArray32.initNibbleRange() key.ByteArray32.initNibbleRange()
func to*(hid: HashID; T: type NibblesSeq): T = func to*(pid: PathID; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information) ## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
ByteArray32(hid.to(HashKey)).initNibbleRange() let nibbles = pid.pfx.UInt256.toBytesBE.toSeq.initNibbleRange()
if pid.length < 64:
nibbles.slice(0, pid.length.int)
else:
nibbles
func to*(n: SomeUnsignedInt|UInt256; T: type HashID): T = func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
## Representation of a scalar as `HashID` (preserving full information) ## Representation of a scalar as `PathID` (preserving full information)
n.u256.T T(pfx: n.u256, length: 64)
# ------------------------------------------------------------------------------
# Public helpers: Miscellaneous mappings
# ------------------------------------------------------------------------------
func digestTo*(data: openArray[byte]; T: type HashKey): T = func digestTo*(data: openArray[byte]; T: type HashKey): T =
## Keccak hash of a `Blob` like argument, represented as a `HashKey` ## Keccak hash of a `Blob` like argument, represented as a `HashKey`
keccakHash(data).data.T keccakHash(data).data.T
func normal*(a: PathID): PathID =
## Normalise path ID representation
result = a
if 64 < a.length:
result.length = 64
elif a.length < 64:
result.pfx = a.pfx and not (1.u256 shl (4 * (64 - a.length))) - 1.u256
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `Tables` and `Rlp` support # Public helpers: `Tables` and `Rlp` support
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func hash*(a: HashID): Hash = func hash*(a: PathID): Hash =
## Table/KeyedQueue mixin ## Table/KeyedQueue mixin
a.to(HashKey).ByteArray32.hash var h: Hash = 0
h = h !& a.pfx.toBytesBE.hash
h = h !& a.length.hash
!$h
func hash*(a: HashKey): Hash = func hash*(a: HashKey): Hash {.borrow.}
## Table/KeyedQueue mixin
a.ByteArray32.hash
func `==`*(a, b: HashKey): bool = func `==`*(a, b: HashKey): bool {.borrow.}
## Table/KeyedQueue mixin
a.ByteArray32 == b.ByteArray32
func read*[T: HashID|HashKey]( func read*(rlp: var Rlp; T: type HashKey;): T {.gcsafe, raises: [RlpError].} =
rlp: var Rlp;
W: type T;
): T
{.gcsafe, raises: [RlpError].} =
rlp.read(Hash256).to(T) rlp.read(Hash256).to(T)
func append*(writer: var RlpWriter, val: HashID|HashKey) = func append*(writer: var RlpWriter, val: HashKey) =
writer.append(val.to(Hash256)) writer.append(val.to(Hash256))
# ------------------------------------------------------------------------------
# Public helpers: `LeafTie` scalar data model
# ------------------------------------------------------------------------------
func `<`*(a, b: LeafTie): bool =
a.root < b.root or (a.root == b.root and a.path < b.path)
func `==`*(a, b: LeafTie): bool =
a.root == b.root and a.path == b.path
func cmp*(a, b: LeafTie): int =
if a < b: -1 elif a == b: 0 else: 1
func `$`*(a: LeafTie): string =
let w = $a.root.uint64.toHex & ":" & $a.path.Uint256.toHex
w.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Miscellaneous helpers # Miscellaneous helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func `$`*(hid: HashID): string = func `$`*(key: HashKey): string =
if hid == high(HashID): let w = UInt256.fromBytesBE key.ByteArray32
if w == high(UInt256):
"2^256-1" "2^256-1"
elif hid == 0.u256.HashID: elif w == 0.u256:
"0" "0"
elif hid == 2.u256.pow(255).HashID: elif w == 2.u256.pow 255:
"2^255" # 800... "2^255" # 800...
elif hid == 2.u256.pow(254).HashID: elif w == 2.u256.pow 254:
"2^254" # 400.. "2^254" # 400..
elif hid == 2.u256.pow(253).HashID: elif w == 2.u256.pow 253:
"2^253" # 200... "2^253" # 200...
elif hid == 2.u256.pow(251).HashID: elif w == 2.u256.pow 251:
"2^252" # 100... "2^252" # 100...
else: else:
hid.UInt256.toHex w.toHex
func `$`*(key: HashKey): string = func `$`*(a: PathID): string =
$key.to(HashID) if a.pfx != 0:
result = ($a.pfx.toHex).strip(
leading=true, trailing=false, chars={'0'}).toLowerAscii
elif a.length != 0:
result = "0"
if a.length < 64:
result &= "(" & $a.length & ")"
func `$`*(a: LeafTie): string =
if a.root != 0:
result = ($a.root.uint64.toHex).strip(
leading=true, trailing=false, chars={'0'}).toLowerAscii
else:
result = "0"
result &= ":" & $a.path
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -18,6 +18,13 @@ import
results, results,
"."/[aristo_desc, aristo_hike] "."/[aristo_desc, aristo_hike]
const
AcceptableHikeStops = {
HikeBranchTailEmpty,
HikeBranchBlindEdge,
HikeExtTailEmpty,
HikeExtTailMismatch}
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -29,6 +36,8 @@ proc fetchPayloadImpl(
let vid = let vid =
if rc.error[0].legs.len == 0: VertexID(0) if rc.error[0].legs.len == 0: VertexID(0)
else: rc.error[0].legs[^1].wp.vid else: rc.error[0].legs[^1].wp.vid
if rc.error[1] in AcceptableHikeStops:
return err((vid, FetchPathNotFound))
return err((vid, rc.error[1])) return err((vid, rc.error[1]))
ok rc.value.legs[^1].wp.vtx.lData ok rc.value.legs[^1].wp.vtx.lData
@ -63,7 +72,7 @@ proc fetchPayload*(
return err((VertexID(0),LeafKeyInvalid)) return err((VertexID(0),LeafKeyInvalid))
db.fetchPayloadImpl(root, path) db.fetchPayloadImpl(root, path)
proc contains*( proc hasPath*(
db: AristoDbRef; # Database db: AristoDbRef; # Database
root: VertexID; root: VertexID;
path: openArray[byte]; # Key of database record path: openArray[byte]; # Key of database record
@ -75,7 +84,9 @@ proc contains*(
let rc = db.fetchPayloadImpl(root, path) let rc = db.fetchPayloadImpl(root, path)
if rc.isOk: if rc.isOk:
return ok(true) return ok(true)
return ok(false) if rc.error[1] == FetchPathNotFound:
return ok(false)
err(rc.error)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -75,6 +75,8 @@ proc hikeUp*(
if not root.isValid: if not root.isValid:
return err((hike,HikeRootMissing)) return err((hike,HikeRootMissing))
if path.len == 0:
return err((hike,HikeEmptyPath))
var vid = root var vid = root
while vid.isValid: while vid.isValid:

View File

@ -11,7 +11,7 @@
## Aristo DB -- Patricia Trie builder, raw node insertion ## Aristo DB -- Patricia Trie builder, raw node insertion
## ====================================================== ## ======================================================
## ##
## This module merges `HashID` values as hexary lookup paths into the ## This module merges `PathID` values as hexary lookup paths into the
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes), ## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
## associated (but separated) Merkle hashes will be deleted unless locked. ## associated (but separated) Merkle hashes will be deleted unless locked.
## Instead of deleting locked hashes error handling is applied. ## Instead of deleting locked hashes error handling is applied.
@ -627,8 +627,8 @@ proc merge*(
# Double check the result until the code is more reliable # Double check the result until the code is more reliable
block: block:
let rc = okHike.to(NibblesSeq).pathToKey let rc = okHike.to(NibblesSeq).pathToTag
if rc.isErr or rc.value != leafTie.path.to(HashKey): if rc.isErr or rc.value != leafTie.path:
return err(MergeAssemblyFailed) # Ooops return err(MergeAssemblyFailed) # Ooops
# Update leaf acccess cache # Update leaf acccess cache
@ -640,12 +640,12 @@ proc merge*(
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
root: VertexID; # MPT state root root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database path: openArray[byte]; # Even nibbled byte path
payload: PayloadRef; # Payload value payload: PayloadRef; # Payload value
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie` ## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
## object. ## object.
let lty = LeafTie(root: root, path: ? path.pathToTag) let lty = LeafTie(root: root, path: ? path.initNibbleRange.pathToTag)
db.merge(lty, payload).to(typeof result) db.merge(lty, payload).to(typeof result)
proc merge*( proc merge*(
@ -688,7 +688,7 @@ proc merge*(
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
path: HashID; # Path into database path: PathID; # Path into database
rlpData: openArray[byte]; # RLP encoded payload data rlpData: openArray[byte]; # RLP encoded payload data
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Variant of `merge()` for storing a single item with implicte state root ## Variant of `merge()` for storing a single item with implicte state root
@ -697,7 +697,7 @@ proc merge*(
db.merge( db.merge(
LeafTie( LeafTie(
root: VertexID(1), root: VertexID(1),
path: path), path: path.normal),
PayloadRef( PayloadRef(
pType: RlpData, pType: RlpData,
rlpBlob: @rlpData)).to(typeof result) rlpBlob: @rlpData)).to(typeof result)

View File

@ -72,7 +72,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
# ------------------ # ------------------
proc toTLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) = proc toLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) =
## Shortcut for iterators. This function will gloriously crash unless the ## Shortcut for iterators. This function will gloriously crash unless the
## `hike` argument is complete. ## `hike` argument is complete.
(LeafTie(root: hike.root, path: hike.to(NibblesSeq).pathToTag.value), (LeafTie(root: hike.root, path: hike.to(NibblesSeq).pathToTag.value),
@ -167,10 +167,15 @@ proc zeroAdjust(
case root.vType: case root.vType:
of Branch: of Branch:
# Find first non-dangling link and assign it # Find first non-dangling link and assign it
if hike.tail.len == 0: let nibbleID = block:
break fail when doLeast:
if hike.tail.len == 0: 0i8
let n = root.branchBorderNibble hike.tail[0].int8 else: hike.tail[0].int8
else:
if hike.tail.len == 0:
break fail
hike.tail[0].int8
let n = root.branchBorderNibble nibbleID
if n < 0: if n < 0:
# Before or after the database range # Before or after the database range
return err((hike.root,NearbyBeyondRange)) return err((hike.root,NearbyBeyondRange))
@ -179,26 +184,16 @@ proc zeroAdjust(
of Extension: of Extension:
let ePfx = root.ePfx let ePfx = root.ePfx
# Must be followed by a branch vertex # Must be followed by a branch vertex
if hike.tail.len < 2 or not hike.accept(ePfx): if not hike.accept ePfx:
break fail break fail
let vtx = db.getVtx root.eVid let vtx = db.getVtx root.eVid
if not vtx.isValid: if not vtx.isValid:
break fail break fail
let ePfxLen = ePfx.len
if hike.tail.len <= ePfxLen:
return err((root.eVid,NearbyPathTailInxOverflow))
let tailPfx = hike.tail.slice(0,ePfxLen)
when doLeast:
if ePfx < tailPfx:
return err((root.eVid,NearbyBeyondRange))
else:
if tailPfx < ePfx:
return err((root.eVid,NearbyBeyondRange))
pfx = ePfx pfx = ePfx
of Leaf: of Leaf:
pfx = root.lPfx pfx = root.lPfx
if not hike.accept(pfx): if not hike.accept pfx:
# Before or after the database range # Before or after the database range
return err((hike.root,NearbyBeyondRange)) return err((hike.root,NearbyBeyondRange))
@ -368,13 +363,12 @@ proc nearbyNext(
# Handle some pathological cases # Handle some pathological cases
hike.finalise(db, moveRight) hike.finalise(db, moveRight)
proc nearbyNextLeafTie( proc nearbyNextLeafTie(
lty: LeafTie; # Some `Patricia Trie` path lty: LeafTie; # Some `Patricia Trie` path
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
moveRight:static[bool]; # Direction of next vertex moveRight:static[bool]; # Direction of next vertex
): Result[HashID,(VertexID,AristoError)] = ): Result[PathID,(VertexID,AristoError)] =
## Variant of `nearbyNext()`, convenience wrapper ## Variant of `nearbyNext()`, convenience wrapper
let hike = ? lty.hikeUp(db).to(Hike).nearbyNext(db, hikeLenMax, moveRight) let hike = ? lty.hikeUp(db).to(Hike).nearbyNext(db, hikeLenMax, moveRight)
@ -383,7 +377,7 @@ proc nearbyNextLeafTie(
return err((hike.legs[^1].wp.vid,NearbyLeafExpected)) return err((hike.legs[^1].wp.vid,NearbyLeafExpected))
let rc = hike.legsTo(NibblesSeq).pathToKey let rc = hike.legsTo(NibblesSeq).pathToKey
if rc.isOk: if rc.isOk:
return ok rc.value.to(HashID) return ok rc.value.to(PathID)
return err((VertexID(0),rc.error)) return err((VertexID(0),rc.error))
err((VertexID(0),NearbyLeafExpected)) err((VertexID(0),NearbyLeafExpected))
@ -411,7 +405,7 @@ proc right*(
lty: LeafTie; # Some `Patricia Trie` path lty: LeafTie; # Some `Patricia Trie` path
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Result[LeafTie,(VertexID,AristoError)] = ): Result[LeafTie,(VertexID,AristoError)] =
## Variant of `nearbyRight()` working with a `HashID` argument instead ## Variant of `nearbyRight()` working with a `LeafTie` argument instead
## of a `Hike`. ## of a `Hike`.
ok LeafTie( ok LeafTie(
root: lty.root, root: lty.root,
@ -428,14 +422,14 @@ iterator right*(
rc = hike.right db rc = hike.right db
while rc.isOK: while rc.isOK:
hike = rc.value hike = rc.value
let (key, pyl) = hike.toTLeafTiePayload let (key, pyl) = hike.toLeafTiePayload
yield (key, pyl) yield (key, pyl)
if high(HashID) <= key.path: if high(PathID) <= key.path:
break break
# Increment `key` by one and update `hike`. In many cases, the current # Increment `key` by one and update `hike`. In many cases, the current
# `hike` can be modified and re-used which saves some database lookups. # `hike` can be modified and re-used which saves some database lookups.
block: block reuseHike:
let tail = hike.legs[^1].wp.vtx.lPfx let tail = hike.legs[^1].wp.vtx.lPfx
if 0 < tail.len: if 0 < tail.len:
let topNibble = tail[tail.len - 1] let topNibble = tail[tail.len - 1]
@ -443,16 +437,16 @@ iterator right*(
let newNibble = @[topNibble+1].initNibbleRange.slice(1) let newNibble = @[topNibble+1].initNibbleRange.slice(1)
hike.tail = tail.slice(0, tail.len - 1) & newNibble hike.tail = tail.slice(0, tail.len - 1) & newNibble
hike.legs.setLen(hike.legs.len - 1) hike.legs.setLen(hike.legs.len - 1)
break break reuseHike
if 1 < tail.len: if 1 < tail.len:
let nxtNibble = tail[tail.len - 2] let nxtNibble = tail[tail.len - 2]
if nxtNibble < 15: if nxtNibble < 15:
let dblNibble = @[((nxtNibble+1) shl 4) + 0].initNibbleRange let dblNibble = @[((nxtNibble+1) shl 4) + 0].initNibbleRange
hike.tail = tail.slice(0, tail.len - 2) & dblNibble hike.tail = tail.slice(0, tail.len - 2) & dblNibble
hike.legs.setLen(hike.legs.len - 1) hike.legs.setLen(hike.legs.len - 1)
break break reuseHike
# Fall back to default method # Fall back to default method
hike = (key + 1).hikeUp(db).to(Hike) hike = key.next.hikeUp(db).to(Hike)
rc = hike.right db rc = hike.right db
# End while # End while
@ -473,7 +467,7 @@ proc left*(
lty: LeafTie; # Some `Patricia Trie` path lty: LeafTie; # Some `Patricia Trie` path
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
): Result[LeafTie,(VertexID,AristoError)] = ): Result[LeafTie,(VertexID,AristoError)] =
## Similar to `nearbyRight()` for `HashID` argument instead of a `Hike`. ## Similar to `nearbyRight()` for `LeafTie` argument instead of a `Hike`.
ok LeafTie( ok LeafTie(
root: lty.root, root: lty.root,
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false)) path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false))
@ -491,14 +485,14 @@ iterator left*(
rc = hike.left db rc = hike.left db
while rc.isOK: while rc.isOK:
hike = rc.value hike = rc.value
let (key, pyl) = hike.toTLeafTiePayload let (key, pyl) = hike.toLeafTiePayload
yield (key, pyl) yield (key, pyl)
if key.path <= low(HashID): if key.path <= low(PathID):
break break
# Decrement `key` by one and update `hike`. In many cases, the current # Decrement `key` by one and update `hike`. In many cases, the current
# `hike` can be modified and re-used which saves some database lookups. # `hike` can be modified and re-used which saves some database lookups.
block: block reuseHike:
let tail = hike.legs[^1].wp.vtx.lPfx let tail = hike.legs[^1].wp.vtx.lPfx
if 0 < tail.len: if 0 < tail.len:
let topNibble = tail[tail.len - 1] let topNibble = tail[tail.len - 1]
@ -506,16 +500,16 @@ iterator left*(
let newNibble = @[topNibble - 1].initNibbleRange.slice(1) let newNibble = @[topNibble - 1].initNibbleRange.slice(1)
hike.tail = tail.slice(0, tail.len - 1) & newNibble hike.tail = tail.slice(0, tail.len - 1) & newNibble
hike.legs.setLen(hike.legs.len - 1) hike.legs.setLen(hike.legs.len - 1)
break break reuseHike
if 1 < tail.len: if 1 < tail.len:
let nxtNibble = tail[tail.len - 2] let nxtNibble = tail[tail.len - 2]
if 0 < nxtNibble: if 0 < nxtNibble:
let dblNibble = @[((nxtNibble-1) shl 4) + 15].initNibbleRange let dblNibble = @[((nxtNibble-1) shl 4) + 15].initNibbleRange
hike.tail = tail.slice(0, tail.len - 2) & dblNibble hike.tail = tail.slice(0, tail.len - 2) & dblNibble
hike.legs.setLen(hike.legs.len - 1) hike.legs.setLen(hike.legs.len - 1)
break break reuseHike
# Fall back to default method # Fall back to default method
hike = (key - 1).hikeUp(db).to(Hike) hike = key.prev.hikeUp(db).to(Hike)
rc = hike.left db rc = hike.left db
# End while # End while

View File

@ -30,14 +30,16 @@ import
# #
# where the `ignored` part is typically expected a zero nibble. # where the `ignored` part is typically expected a zero nibble.
func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pathAsBlob*(keyOrTag: HashKey|HashID): Blob = func pathAsBlob*(keyOrTag: HashKey|PathID): Blob =
keyOrTag.pathAsNibbles.hexPrefixEncode(isLeaf=true) keyOrTag.to(NibblesSeq).hexPrefixEncode(isLeaf=true)
proc pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] = func pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
var key: ByteArray32 var key: ByteArray32
if partPath.len == 64: if partPath.len == 64:
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg # Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg
@ -46,7 +48,7 @@ proc pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
return ok(key.HashKey) return ok(key.HashKey)
err(PathExpected64Nibbles) err(PathExpected64Nibbles)
proc pathToKey*( func pathToKey*(
partPath: openArray[byte]; partPath: openArray[byte];
): Result[HashKey,AristoError] = ): Result[HashKey,AristoError] =
let (isLeaf,pathSegment) = partPath.hexPrefixDecode let (isLeaf,pathSegment) = partPath.hexPrefixDecode
@ -54,14 +56,17 @@ proc pathToKey*(
return pathSegment.pathToKey() return pathSegment.pathToKey()
err(PathExpectedLeaf) err(PathExpectedLeaf)
proc pathToTag*( func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] =
partPath: NibblesSeq|openArray[byte]; ## Nickname `tag` for `PathID`
): Result[HashID,AristoError] = if partPath.len <= 64:
ok (? partPath.pathToKey).to(HashID) return ok PathID(
pfx: UInt256.fromBytesBE partPath.pathPfxPad(0).getBytes(),
length: partPath.len.uint8)
err(PathAtMost64Nibbles)
# -------------------- # --------------------
proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq = func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
## Extend (or cut) the argument nibbles sequence `pfx` for generating a ## Extend (or cut) the argument nibbles sequence `pfx` for generating a
## `NibblesSeq` with exactly 64 nibbles, the equivalent of a path key. ## `NibblesSeq` with exactly 64 nibbles, the equivalent of a path key.
## ##
@ -80,7 +85,7 @@ proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
let nope = seq[byte].default.initNibbleRange let nope = seq[byte].default.initNibbleRange
result = pfx.slice(0,64) & nope # nope forces re-alignment result = pfx.slice(0,64) & nope # nope forces re-alignment
proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey = func pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
## Variant of `pathPfxPad()`. ## Variant of `pathPfxPad()`.
## ##
## Extend (or cut) the argument nibbles sequence `pfx` for generating a ## Extend (or cut) the argument nibbles sequence `pfx` for generating a

View File

@ -90,7 +90,7 @@ proc get*(
return ok(data) return ok(data)
db.getBE key db.getBE key
proc contains*( proc hasKey*(
db: KvtDbRef; # Database db: KvtDbRef; # Database
key: openArray[byte]; # Key of database record key: openArray[byte]; # Key of database record
): Result[bool,KvtError] = ): Result[bool,KvtError] =

View File

@ -25,7 +25,8 @@ template xCheck*(expr: untyped): untyped =
template xCheck*(expr: untyped; ifFalse: untyped): untyped = template xCheck*(expr: untyped; ifFalse: untyped): untyped =
## Note: this check will invoke `expr` twice ## Note: this check will invoke `expr` twice
if not (expr): if not (expr):
ifFalse block:
ifFalse
check expr check expr
return return

View File

@ -74,7 +74,7 @@ proc fList(be: BackendRef): seq[(QueueID,FilterRef)] =
func ppFil(w: FilterRef; db = AristoDbRef(nil)): string = func ppFil(w: FilterRef; db = AristoDbRef(nil)): string =
proc qq(key: HashKey; db: AristoDbRef): string = proc qq(key: HashKey; db: AristoDbRef): string =
if db.isNil: if db.isNil:
let n = key.to(HashID).UInt256 let n = key.to(UInt256)
if n == 0: "£ø" else: "£" & $n if n == 0: "£ø" else: "£" & $n
else: else:
HashLabel(root: VertexID(1), key: key).pp(db) HashLabel(root: VertexID(1), key: key).pp(db)
@ -377,7 +377,7 @@ proc checkFilterTrancoderOk(
# ------------------------- # -------------------------
func to(fid: FilterID; T: type HashKey): T = func to(fid: FilterID; T: type HashKey): T =
fid.uint64.to(HashID).to(T) fid.uint64.u256.toBytesBE.T
proc qid2fidFn(be: BackendRef): QuFilMap = proc qid2fidFn(be: BackendRef): QuFilMap =
result = proc(qid: QueueID): FilterID = result = proc(qid: QueueID): FilterID =
@ -491,12 +491,12 @@ proc validateFifo(
## .. | .. | .. ## .. | .. | ..
## ##
var var
lastTrg = serial.u256.to(HashID) lastTrg = serial.u256
inx = 0 inx = 0
lastFid = FilterID(serial+1) lastFid = FilterID(serial+1)
if hashesOk: if hashesOk:
lastTrg = be.getKeyFn(VertexID(1)).get(otherwise = VOID_HASH_KEY).to(HashID) lastTrg = be.getKeyFn(VertexID(1)).get(otherwise=VOID_HASH_KEY).to(UInt256)
for chn,fifo in be.fifos: for chn,fifo in be.fifos:
for (qid,filter) in fifo: for (qid,filter) in fifo:
@ -504,8 +504,8 @@ proc validateFifo(
# Check filter objects # Check filter objects
xCheck chn == (qid.uint64 shr 62).int xCheck chn == (qid.uint64 shr 62).int
xCheck filter != FilterRef(nil) xCheck filter != FilterRef(nil)
xCheck filter.src.to(HashID) == lastTrg xCheck filter.src.to(UInt256) == lastTrg
lastTrg = filter.trg.to(HashID) lastTrg = filter.trg.to(UInt256)
# Check random access # Check random access
xCheck qid == be.filters[inx] xCheck qid == be.filters[inx]

View File

@ -162,7 +162,7 @@ proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
kvpLst: w.data.accounts.mapIt(LeafTiePayload( kvpLst: w.data.accounts.mapIt(LeafTiePayload(
leafTie: LeafTie( leafTie: LeafTie(
root: rootVid, root: rootVid,
path: it.accKey.to(HashKey).to(HashID)), path: it.accKey.to(HashKey).to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob)))) payload: PayloadRef(pType: RawData, rawBlob: it.accBlob))))
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T = proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
@ -179,7 +179,7 @@ proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
kvpLst: w.data.mapIt(LeafTiePayload( kvpLst: w.data.mapIt(LeafTiePayload(
leafTie: LeafTie( leafTie: LeafTie(
root: rootVid, root: rootVid,
path: it.slotHash.to(HashKey).to(HashID)), path: it.slotHash.to(HashKey).to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.slotData)))) payload: PayloadRef(pType: RawData, rawBlob: it.slotData))))
if 0 < result.len: if 0 < result.len:
result[^1].proof = s.data.proof result[^1].proof = s.data.proof

View File

@ -230,7 +230,7 @@ proc fwdWalkVerify(
n = 0 n = 0
for (key,_) in db.right low(LeafTie,root): for (key,_) in db.right low(LeafTie,root):
xCheck key in leftOver: xCheck key in leftOver:
noisy.say "*** fwdWalkVerify", " id=", n + (nLeafs + 1) * debugID noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID
leftOver.excl key leftOver.excl key
last = key last = key
n.inc n.inc
@ -239,13 +239,10 @@ proc fwdWalkVerify(
if last.root == VertexID(0): if last.root == VertexID(0):
last = low(LeafTie,root) last = low(LeafTie,root)
elif last != high(LeafTie,root): elif last != high(LeafTie,root):
last = last + 1 last = last.next
let rc = last.right db let rc = last.right db
if rc.isOk: xCheck rc.isErr
xCheck rc == WalkStopErr xCheck rc.error[1] == NearbyBeyondRange
else:
xCheck rc.error[1] == NearbyBeyondRange
xCheck n == nLeafs xCheck n == nLeafs
true true
@ -274,13 +271,10 @@ proc revWalkVerify(
if last.root == VertexID(0): if last.root == VertexID(0):
last = high(LeafTie,root) last = high(LeafTie,root)
elif last != low(LeafTie,root): elif last != low(LeafTie,root):
last = last - 1 last = last.prev
let rc = last.left db let rc = last.left db
if rc.isOk: xCheck rc.isErr
xCheck rc == WalkStopErr xCheck rc.error[1] == NearbyBeyondRange
else:
xCheck rc.error[1] == NearbyBeyondRange
xCheck n == nLeafs xCheck n == nLeafs
true true
@ -475,8 +469,8 @@ proc testTxSpanMultiInstances*(
dx: seq[AristoDbRef] dx: seq[AristoDbRef]
var genID = genBase var genID = genBase
proc newHashID(): HashID = proc newPathID(): PathID =
result = HashID(genID.u256) result = PathID(pfx: genID.u256, length: 64)
genID.inc genID.inc
proc newPayload(): Blob = proc newPayload(): Blob =
result = @[genID].encode result = @[genID].encode
@ -501,7 +495,7 @@ proc testTxSpanMultiInstances*(
# Add some data and first transaction # Add some data and first transaction
block: block:
let rc = db.merge(newHashID(), newPayload()) let rc = db.merge(newPathID(), newPayload())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
block: block:
let rc = db.checkTop(relax=true) let rc = db.checkTop(relax=true)
@ -515,7 +509,7 @@ proc testTxSpanMultiInstances*(
xCheckRc rc.error == 0 xCheckRc rc.error == 0
dx.add rc.value dx.add rc.value
block: block:
let rc = dx[^1].merge(newHashID(), newPayload()) let rc = dx[^1].merge(newPathID(), newPayload())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
block: block:
let rc = db.checkTop(relax=true) let rc = db.checkTop(relax=true)
@ -546,10 +540,10 @@ proc testTxSpanMultiInstances*(
# Add more data .. # Add more data ..
block: block:
let rc = db.merge(newHashID(), newPayload()) let rc = db.merge(newPathID(), newPayload())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
for n in 0 ..< dx.len: for n in 0 ..< dx.len:
let rc = dx[n].merge(newHashID(), newPayload()) let rc = dx[n].merge(newPathID(), newPayload())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
#show(3) #show(3)