Aristo db allow shorter than 64 nibbles path keys (#1864)

* Aristo: Single `FetchPathNotFound` error in `fetchXxx()` and `hasPath()`

why:
  Missing path hike returns too many detailed reasons why it failed
  which becomes cumbersome to handle.

also:
  Renamed `contains()` => `hasPath()` which disables the `in` operator on
  non-boolean 	`contains()` functions

* Kvt: Renamed `contains()` => `hasKey()`

why:
  which disables the `in` operator on non-boolean 	`contains()` functions

* Aristo: Generalising `HashID` by variable length `PathID`

why:
  There are cases when the `Aristo` database is to be used with
  shorter than 64 nibbles keys when handling transactions indexes
  with sequence IDs.

caveat:
  This patch only works reliable for full length `PathID` values. Tests
  for shorter `PathID` values are currently missing.
This commit is contained in:
Jordan Hrycaj 2023-10-27 22:36:51 +01:00 committed by GitHub
parent 3198ad1bbd
commit 3fe0a49a5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 260 additions and 196 deletions

View File

@ -30,6 +30,11 @@ import
export
getKeyRc
import
aristo/aristo_path
export
pathAsBlob
import aristo/aristo_desc/[desc_identifiers, desc_structural]
export
AristoAccount,

View File

@ -81,7 +81,6 @@ proc checkBE*(
return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendVoid:
return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
ok()
proc check*(

View File

@ -24,6 +24,15 @@ import
# Private functions
# ------------------------------------------------------------------------------
proc toHex(w: VertexID): string =
w.uint64.toHex.toLowerAscii
proc toHex(w: HashKey): string =
w.ByteArray32.toHex.toLowerAscii
proc toHexLsb(w: int8): string =
$"0123456789abcdef"[w and 15]
proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] =
lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafTie): int = cmp(a,b))
@ -80,7 +89,7 @@ proc ppVid(vid: VertexID; pfx = true): string =
if pfx:
result = "$"
if vid.isValid:
result &= vid.uint64.toHex.stripZeros.toLowerAscii
result &= vid.toHex.stripZeros.toLowerAscii
else:
result &= "ø"
@ -110,7 +119,7 @@ proc ppQid(qid: QueueID): string =
else:
break here
return
result &= qid.uint64.toHex.stripZeros.toLowerAscii
result &= qid.toHex.stripZeros.toLowerAscii
proc ppVidList(vGen: openArray[VertexID]): string =
"[" & vGen.mapIt(it.ppVid).join(",") & "]"
@ -132,9 +141,7 @@ proc ppKey(key: HashKey): string =
if key == VOID_HASH_KEY:
return "£r"
"%" & key.ByteArray32
.mapIt(it.toHex(2)).join.tolowerAscii
.squeeze(hex=true,ignLen=true)
"%" & key.toHex.squeeze(hex=true,ignLen=true)
proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
if lbl.key == HashKey.default:
@ -143,7 +150,7 @@ proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
return "£r"
let rid = if not lbl.root.isValid: "ø:"
else: ($lbl.root.uint64.toHex).stripZeros & ":"
else: ($lbl.root.toHex).stripZeros & ":"
if not db.top.isNil:
let vid = db.top.pAmk.getOrVoid lbl
if vid.isValid:
@ -153,9 +160,7 @@ proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
if vid.isValid:
return "£" & rid & vid.ppVid(pfx=false)
"%" & rid & lbl.key.ByteArray32
.mapIt(it.toHex(2)).join.tolowerAscii
.squeeze(hex=true,ignLen=true)
"%" & rid & lbl.key.toHex.squeeze(hex=true,ignLen=true)
proc ppRootKey(a: HashKey): string =
if a.isValid:
@ -169,17 +174,14 @@ proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string =
let vid = db.top.lTab.getOrVoid lty
if vid.isValid:
return "@" & vid.ppVid
"@" & ($lty.root.uint64.toHex).stripZeros & ":" &
lty.path.to(HashKey).ByteArray32
.mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
"@" & $lty
proc ppPathPfx(pfx: NibblesSeq): string =
let s = $pfx
if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1] & ":" & $s.len
proc ppNibble(n: int8): string =
if n < 0: "ø" elif n < 10: $n else: n.toHex(1).toLowerAscii
if n < 0: "ø" elif n < 10: $n else: n.toHexLsb
proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
if p.isNil:

View File

@ -62,6 +62,7 @@ type
# Path function `hikeUp()`
HikeRootMissing
HikeEmptyPath
HikeLeafTooEarly
HikeBranchTailEmpty
HikeBranchBlindEdge
@ -70,6 +71,7 @@ type
# Path/nibble/key conversions in `aisto_path.nim`
PathExpected64Nibbles
PathAtMost64Nibbles
PathExpectedLeaf
# Merge leaf `merge()`
@ -164,7 +166,6 @@ type
NearbyLeafExpected
NearbyNestingTooDeep
NearbyPathTailUnexpected
NearbyPathTailInxOverflow
NearbyUnexpectedVtx
NearbyVidInvalid
@ -204,7 +205,7 @@ type
FilTrgTopSrcMismatch
FilSiblingsCommitUnfinshed
# Get functions form `aristo_get.nim`
# Get functions from `aristo_get.nim`
GetLeafNotFound
GetVtxNotFound
GetKeyNotFound
@ -213,6 +214,9 @@ type
GetIdgNotFound
GetFqsNotFound
# Fetch functions from `aristo_fetch.nim`
FetchPathNotFound
# RocksDB backend
RdbBeCantCreateDataDir
RdbBeCantCreateBackupDir

View File

@ -37,17 +37,24 @@ type
## backend of the database, there is no other reference to the node than
## the very same `VertexID`.
HashID* = distinct UInt256
## Variant of a `Hash256` object that can be used in a order relation
## (i.e. it can be sorted.) Among temporary conversions for sorting, the
## `HashID` type is consistently used for addressing leaf vertices (see
## below `LeafTie`.)
HashKey* = distinct ByteArray32
## Dedicated `Hash256` object variant that is used for labelling the
## vertices of the `Patricia Trie` in order to make it a
## `Merkle Patricia Tree`.
PathID* = object
## Path into the `Patricia Trie`. This is a chain of maximal 64 nibbles
## (which is 32 bytes.) In most cases, the length is 64. So the path is
## encoded as a numeric value which is often easier to handle than a
## chain of nibbles.
##
## The path ID should be kept normalised, i.e.
## * 0 <= `length` <= 64
## * the unused trailing nibbles in `pfx` ar set to `0`
##
pfx*: UInt256
length*: uint8
# ----------
LeafTie* = object
@ -60,7 +67,7 @@ type
## Note that `LeafTie` objects have no representation in the `Aristo Trie`.
## They are used temporarily and in caches or backlog tables.
root*: VertexID ## Root ID for the sub-trie
path*: HashID ## Path into the `Patricia Trie`
path*: PathID ## Path into the `Patricia Trie`
HashLabel* = object
## Merkle hash key uniquely associated with a vertex ID. As hashes in a
@ -127,162 +134,202 @@ func `-`*(a: FilterID; b: uint64): FilterID = (a.uint64-b).FilterID
func `-`*(a, b: FilterID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------
# Public helpers: `HashID` scalar data model
# Public helpers: `PathID` ordered scalar data model
# ------------------------------------------------------------------------------
func u256*(lp: HashID): UInt256 = lp.UInt256
func low*(T: type HashID): T = low(UInt256).T
func high*(T: type HashID): T = high(UInt256).T
func high*(_: type PathID): PathID =
## Highest possible `PathID` object for given root vertex.
PathID(pfx: high(UInt256), length: 64)
func `+`*(a: HashID; b: UInt256): HashID = (a.u256+b).HashID
func `-`*(a: HashID; b: UInt256): HashID = (a.u256-b).HashID
func `-`*(a, b: HashID): UInt256 = (a.u256 - b.u256)
func low*(_: type PathID): PathID =
## Lowest possible `PathID` object for given root vertex.
PathID()
func `==`*(a, b: HashID): bool = a.u256 == b.u256
func `<=`*(a, b: HashID): bool = a.u256 <= b.u256
func `<`*(a, b: HashID): bool = a.u256 < b.u256
func next*(pid: PathID): PathID =
## Return a `PathID` object with incremented path field. This function might
## return also a modified `length` field.
##
## The function returns the argument `pid` if it is already at its
## maximum value `high(PathID)`.
if pid.pfx == 0 and pid.length < 64:
PathID(length: pid.length + 1)
elif pid.pfx < high(UInt256):
PathID(pfx: pid.pfx + 1, length: 64)
else:
pid
func cmp*(x, y: HashID): int = cmp(x.UInt256, y.UInt256)
func prev*(pid: PathID): PathID =
## Return a `PathID` object with decremented path field. This function might
## return also a modified `length` field.
##
## The function returns the argument `pid` if it is already at its
## minimum value `low(PathID)`.
if 0 < pid.pfx:
PathID(pfx: pid.pfx - 1, length: 64)
elif 0 < pid.length:
PathID(length: pid.length - 1)
else:
pid
func `<`*(a, b: PathID): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.pfx < b.pfx or (a.pfx == b.pfx and a.length < b.length)
func `<=`*(a, b: PathID): bool =
not (b < a)
func `==`*(a, b: PathID): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.pfx == b.pfx and a.length == b.length
# ------------------------------------------------------------------------------
# Public helpers: `LeafTie`
# Public helpers: `LeafTie` ordered scalar data model
# ------------------------------------------------------------------------------
func high*(_: type LeafTie; root = VertexID(1)): LeafTie =
## Highest possible `LeafTie` object for given root vertex.
LeafTie(root: root, path: high(HashID))
LeafTie(root: root, path: high(PathID))
func low*(_: type LeafTie; root = VertexID(1)): LeafTie =
## Lowest possible `LeafTie` object for given root vertex.
LeafTie(root: root, path: low(HashID))
LeafTie(root: root, path: low(PathID))
func `+`*(lty: LeafTie, n: int): LeafTie =
## Return a `LeafTie` object with incremented path field. This function
## will not check for a path field overflow. Neither it will verify that
## the argument `n` is non-negative.
LeafTie(root: lty.root, path: HashID(lty.path.u256 + n.u256))
func next*(lty: LeafTie): LeafTie =
## Return a `LeafTie` object with the `next()` path field.
LeafTie(root: lty.root, path: lty.path.next)
func `-`*(lty: LeafTie, n: int): LeafTie =
## Return a `LeafTie` object with decremented path field. This function
## will not check for a path field underflow. Neither it will verify that
## the argument `n` is non-negative.
LeafTie(root: lty.root, path: HashID(lty.path.u256 - n.u256))
func prev*(lty: LeafTie): LeafTie =
## Return a `LeafTie` object with the `prev()` path field.
LeafTie(root: lty.root, path: lty.path.prev)
func `<`*(a, b: LeafTie): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.root < b.root or (a.root == b.root and a.path < b.path)
func `==`*(a, b: LeafTie): bool =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
a.root == b.root and a.path == b.path
func cmp*(a, b: LeafTie): int =
## This function assumes that the arguments `a` and `b` are normalised
## (see `normal()`.)
if a < b: -1 elif a == b: 0 else: 1
# ------------------------------------------------------------------------------
# Public helpers: Conversions between `HashID`, `HashKey`, `Hash256`
# Public helpers: Reversible conversions between `PathID`, `HashKey`, etc.
# ------------------------------------------------------------------------------
func to*(hid: HashID; T: type Hash256): T =
result.data = hid.UInt256.toBytesBE
func to*(hid: HashID; T: type HashKey): T =
hid.UInt256.toBytesBE.T
func to*(key: HashKey; T: type HashID): T =
UInt256.fromBytesBE(key.ByteArray32).T
proc to*(key: HashKey; T: type UInt256): T =
T.fromBytesBE key.ByteArray32
func to*(key: HashKey; T: type Hash256): T =
T(data: ByteArray32(key))
func to*(key: HashKey; T: type PathID): T =
## Not necessarily reversible for shorter lengths `PathID` values
T(pfx: UInt256.fromBytesBE key.ByteArray32, length: 64)
func to*(hash: Hash256; T: type HashKey): T =
hash.data.T
func to*(key: Hash256; T: type HashID): T =
key.data.HashKey.to(T)
# ------------------------------------------------------------------------------
# Public helpers: Miscellaneous mappings
# ------------------------------------------------------------------------------
func to*(key: HashKey; T: type Blob): T =
## Representation of a `HashKey` as `Blob` (preserving full information)
key.ByteArray32.toSeq
func to*(hid: HashID; T: type Blob): T =
## Representation of a `HashID` as `Blob` (preserving full information)
hid.UInt256.toBytesBE.toSeq
func to*(key: HashKey; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
key.ByteArray32.initNibbleRange()
func to*(hid: HashID; T: type NibblesSeq): T =
func to*(pid: PathID; T: type NibblesSeq): T =
## Representation of a `HashKey` as `NibbleSeq` (preserving full information)
ByteArray32(hid.to(HashKey)).initNibbleRange()
let nibbles = pid.pfx.UInt256.toBytesBE.toSeq.initNibbleRange()
if pid.length < 64:
nibbles.slice(0, pid.length.int)
else:
nibbles
func to*(n: SomeUnsignedInt|UInt256; T: type HashID): T =
## Representation of a scalar as `HashID` (preserving full information)
n.u256.T
func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
## Representation of a scalar as `PathID` (preserving full information)
T(pfx: n.u256, length: 64)
# ------------------------------------------------------------------------------
# Public helpers: Miscellaneous mappings
# ------------------------------------------------------------------------------
func digestTo*(data: openArray[byte]; T: type HashKey): T =
## Keccak hash of a `Blob` like argument, represented as a `HashKey`
keccakHash(data).data.T
func normal*(a: PathID): PathID =
## Normalise path ID representation
result = a
if 64 < a.length:
result.length = 64
elif a.length < 64:
result.pfx = a.pfx and not (1.u256 shl (4 * (64 - a.length))) - 1.u256
# ------------------------------------------------------------------------------
# Public helpers: `Tables` and `Rlp` support
# ------------------------------------------------------------------------------
func hash*(a: HashID): Hash =
func hash*(a: PathID): Hash =
## Table/KeyedQueue mixin
a.to(HashKey).ByteArray32.hash
var h: Hash = 0
h = h !& a.pfx.toBytesBE.hash
h = h !& a.length.hash
!$h
func hash*(a: HashKey): Hash =
## Table/KeyedQueue mixin
a.ByteArray32.hash
func hash*(a: HashKey): Hash {.borrow.}
func `==`*(a, b: HashKey): bool =
## Table/KeyedQueue mixin
a.ByteArray32 == b.ByteArray32
func `==`*(a, b: HashKey): bool {.borrow.}
func read*[T: HashID|HashKey](
rlp: var Rlp;
W: type T;
): T
{.gcsafe, raises: [RlpError].} =
func read*(rlp: var Rlp; T: type HashKey;): T {.gcsafe, raises: [RlpError].} =
rlp.read(Hash256).to(T)
func append*(writer: var RlpWriter, val: HashID|HashKey) =
func append*(writer: var RlpWriter, val: HashKey) =
writer.append(val.to(Hash256))
# ------------------------------------------------------------------------------
# Public helpers: `LeafTie` scalar data model
# ------------------------------------------------------------------------------
func `<`*(a, b: LeafTie): bool =
a.root < b.root or (a.root == b.root and a.path < b.path)
func `==`*(a, b: LeafTie): bool =
a.root == b.root and a.path == b.path
func cmp*(a, b: LeafTie): int =
if a < b: -1 elif a == b: 0 else: 1
func `$`*(a: LeafTie): string =
let w = $a.root.uint64.toHex & ":" & $a.path.Uint256.toHex
w.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
# ------------------------------------------------------------------------------
# Miscellaneous helpers
# ------------------------------------------------------------------------------
func `$`*(hid: HashID): string =
if hid == high(HashID):
func `$`*(key: HashKey): string =
let w = UInt256.fromBytesBE key.ByteArray32
if w == high(UInt256):
"2^256-1"
elif hid == 0.u256.HashID:
elif w == 0.u256:
"0"
elif hid == 2.u256.pow(255).HashID:
elif w == 2.u256.pow 255:
"2^255" # 800...
elif hid == 2.u256.pow(254).HashID:
elif w == 2.u256.pow 254:
"2^254" # 400..
elif hid == 2.u256.pow(253).HashID:
elif w == 2.u256.pow 253:
"2^253" # 200...
elif hid == 2.u256.pow(251).HashID:
elif w == 2.u256.pow 251:
"2^252" # 100...
else:
hid.UInt256.toHex
w.toHex
func `$`*(key: HashKey): string =
$key.to(HashID)
func `$`*(a: PathID): string =
if a.pfx != 0:
result = ($a.pfx.toHex).strip(
leading=true, trailing=false, chars={'0'}).toLowerAscii
elif a.length != 0:
result = "0"
if a.length < 64:
result &= "(" & $a.length & ")"
func `$`*(a: LeafTie): string =
if a.root != 0:
result = ($a.root.uint64.toHex).strip(
leading=true, trailing=false, chars={'0'}).toLowerAscii
else:
result = "0"
result &= ":" & $a.path
# ------------------------------------------------------------------------------
# End

View File

@ -18,6 +18,13 @@ import
results,
"."/[aristo_desc, aristo_hike]
const
AcceptableHikeStops = {
HikeBranchTailEmpty,
HikeBranchBlindEdge,
HikeExtTailEmpty,
HikeExtTailMismatch}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
@ -29,6 +36,8 @@ proc fetchPayloadImpl(
let vid =
if rc.error[0].legs.len == 0: VertexID(0)
else: rc.error[0].legs[^1].wp.vid
if rc.error[1] in AcceptableHikeStops:
return err((vid, FetchPathNotFound))
return err((vid, rc.error[1]))
ok rc.value.legs[^1].wp.vtx.lData
@ -63,7 +72,7 @@ proc fetchPayload*(
return err((VertexID(0),LeafKeyInvalid))
db.fetchPayloadImpl(root, path)
proc contains*(
proc hasPath*(
db: AristoDbRef; # Database
root: VertexID;
path: openArray[byte]; # Key of database record
@ -75,7 +84,9 @@ proc contains*(
let rc = db.fetchPayloadImpl(root, path)
if rc.isOk:
return ok(true)
return ok(false)
if rc.error[1] == FetchPathNotFound:
return ok(false)
err(rc.error)
# ------------------------------------------------------------------------------
# End

View File

@ -75,6 +75,8 @@ proc hikeUp*(
if not root.isValid:
return err((hike,HikeRootMissing))
if path.len == 0:
return err((hike,HikeEmptyPath))
var vid = root
while vid.isValid:

View File

@ -11,7 +11,7 @@
## Aristo DB -- Patricia Trie builder, raw node insertion
## ======================================================
##
## This module merges `HashID` values as hexary lookup paths into the
## This module merges `PathID` values as hexary lookup paths into the
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
## associated (but separated) Merkle hashes will be deleted unless locked.
## Instead of deleting locked hashes error handling is applied.
@ -627,8 +627,8 @@ proc merge*(
# Double check the result until the code is more reliable
block:
let rc = okHike.to(NibblesSeq).pathToKey
if rc.isErr or rc.value != leafTie.path.to(HashKey):
let rc = okHike.to(NibblesSeq).pathToTag
if rc.isErr or rc.value != leafTie.path:
return err(MergeAssemblyFailed) # Ooops
# Update leaf acccess cache
@ -640,12 +640,12 @@ proc merge*(
proc merge*(
db: AristoDbRef; # Database, top layer
root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database
path: openArray[byte]; # Even nibbled byte path
payload: PayloadRef; # Payload value
): Result[bool,AristoError] =
## Variant of `merge()` for `(root,path)` arguments instead of a `LeafTie`
## object.
let lty = LeafTie(root: root, path: ? path.pathToTag)
let lty = LeafTie(root: root, path: ? path.initNibbleRange.pathToTag)
db.merge(lty, payload).to(typeof result)
proc merge*(
@ -688,7 +688,7 @@ proc merge*(
proc merge*(
db: AristoDbRef; # Database, top layer
path: HashID; # Path into database
path: PathID; # Path into database
rlpData: openArray[byte]; # RLP encoded payload data
): Result[bool,AristoError] =
## Variant of `merge()` for storing a single item with implicte state root
@ -697,7 +697,7 @@ proc merge*(
db.merge(
LeafTie(
root: VertexID(1),
path: path),
path: path.normal),
PayloadRef(
pType: RlpData,
rlpBlob: @rlpData)).to(typeof result)

View File

@ -72,7 +72,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
# ------------------
proc toTLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) =
proc toLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) =
## Shortcut for iterators. This function will gloriously crash unless the
## `hike` argument is complete.
(LeafTie(root: hike.root, path: hike.to(NibblesSeq).pathToTag.value),
@ -167,10 +167,15 @@ proc zeroAdjust(
case root.vType:
of Branch:
# Find first non-dangling link and assign it
if hike.tail.len == 0:
break fail
let n = root.branchBorderNibble hike.tail[0].int8
let nibbleID = block:
when doLeast:
if hike.tail.len == 0: 0i8
else: hike.tail[0].int8
else:
if hike.tail.len == 0:
break fail
hike.tail[0].int8
let n = root.branchBorderNibble nibbleID
if n < 0:
# Before or after the database range
return err((hike.root,NearbyBeyondRange))
@ -179,26 +184,16 @@ proc zeroAdjust(
of Extension:
let ePfx = root.ePfx
# Must be followed by a branch vertex
if hike.tail.len < 2 or not hike.accept(ePfx):
if not hike.accept ePfx:
break fail
let vtx = db.getVtx root.eVid
if not vtx.isValid:
break fail
let ePfxLen = ePfx.len
if hike.tail.len <= ePfxLen:
return err((root.eVid,NearbyPathTailInxOverflow))
let tailPfx = hike.tail.slice(0,ePfxLen)
when doLeast:
if ePfx < tailPfx:
return err((root.eVid,NearbyBeyondRange))
else:
if tailPfx < ePfx:
return err((root.eVid,NearbyBeyondRange))
pfx = ePfx
of Leaf:
pfx = root.lPfx
if not hike.accept(pfx):
if not hike.accept pfx:
# Before or after the database range
return err((hike.root,NearbyBeyondRange))
@ -368,13 +363,12 @@ proc nearbyNext(
# Handle some pathological cases
hike.finalise(db, moveRight)
proc nearbyNextLeafTie(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDbRef; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any)
moveRight:static[bool]; # Direction of next vertex
): Result[HashID,(VertexID,AristoError)] =
): Result[PathID,(VertexID,AristoError)] =
## Variant of `nearbyNext()`, convenience wrapper
let hike = ? lty.hikeUp(db).to(Hike).nearbyNext(db, hikeLenMax, moveRight)
@ -383,7 +377,7 @@ proc nearbyNextLeafTie(
return err((hike.legs[^1].wp.vid,NearbyLeafExpected))
let rc = hike.legsTo(NibblesSeq).pathToKey
if rc.isOk:
return ok rc.value.to(HashID)
return ok rc.value.to(PathID)
return err((VertexID(0),rc.error))
err((VertexID(0),NearbyLeafExpected))
@ -411,7 +405,7 @@ proc right*(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDbRef; # Database layer
): Result[LeafTie,(VertexID,AristoError)] =
## Variant of `nearbyRight()` working with a `HashID` argument instead
## Variant of `nearbyRight()` working with a `LeafTie` argument instead
## of a `Hike`.
ok LeafTie(
root: lty.root,
@ -428,14 +422,14 @@ iterator right*(
rc = hike.right db
while rc.isOK:
hike = rc.value
let (key, pyl) = hike.toTLeafTiePayload
let (key, pyl) = hike.toLeafTiePayload
yield (key, pyl)
if high(HashID) <= key.path:
if high(PathID) <= key.path:
break
# Increment `key` by one and update `hike`. In many cases, the current
# `hike` can be modified and re-used which saves some database lookups.
block:
block reuseHike:
let tail = hike.legs[^1].wp.vtx.lPfx
if 0 < tail.len:
let topNibble = tail[tail.len - 1]
@ -443,16 +437,16 @@ iterator right*(
let newNibble = @[topNibble+1].initNibbleRange.slice(1)
hike.tail = tail.slice(0, tail.len - 1) & newNibble
hike.legs.setLen(hike.legs.len - 1)
break
break reuseHike
if 1 < tail.len:
let nxtNibble = tail[tail.len - 2]
if nxtNibble < 15:
let dblNibble = @[((nxtNibble+1) shl 4) + 0].initNibbleRange
hike.tail = tail.slice(0, tail.len - 2) & dblNibble
hike.legs.setLen(hike.legs.len - 1)
break
break reuseHike
# Fall back to default method
hike = (key + 1).hikeUp(db).to(Hike)
hike = key.next.hikeUp(db).to(Hike)
rc = hike.right db
# End while
@ -473,7 +467,7 @@ proc left*(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDbRef; # Database layer
): Result[LeafTie,(VertexID,AristoError)] =
## Similar to `nearbyRight()` for `HashID` argument instead of a `Hike`.
## Similar to `nearbyRight()` for `LeafTie` argument instead of a `Hike`.
ok LeafTie(
root: lty.root,
path: ? lty.nearbyNextLeafTie(db, 64, moveRight=false))
@ -491,14 +485,14 @@ iterator left*(
rc = hike.left db
while rc.isOK:
hike = rc.value
let (key, pyl) = hike.toTLeafTiePayload
let (key, pyl) = hike.toLeafTiePayload
yield (key, pyl)
if key.path <= low(HashID):
if key.path <= low(PathID):
break
# Decrement `key` by one and update `hike`. In many cases, the current
# `hike` can be modified and re-used which saves some database lookups.
block:
block reuseHike:
let tail = hike.legs[^1].wp.vtx.lPfx
if 0 < tail.len:
let topNibble = tail[tail.len - 1]
@ -506,16 +500,16 @@ iterator left*(
let newNibble = @[topNibble - 1].initNibbleRange.slice(1)
hike.tail = tail.slice(0, tail.len - 1) & newNibble
hike.legs.setLen(hike.legs.len - 1)
break
break reuseHike
if 1 < tail.len:
let nxtNibble = tail[tail.len - 2]
if 0 < nxtNibble:
let dblNibble = @[((nxtNibble-1) shl 4) + 15].initNibbleRange
hike.tail = tail.slice(0, tail.len - 2) & dblNibble
hike.legs.setLen(hike.legs.len - 1)
break
break reuseHike
# Fall back to default method
hike = (key - 1).hikeUp(db).to(Hike)
hike = key.prev.hikeUp(db).to(Hike)
rc = hike.left db
# End while

View File

@ -30,14 +30,16 @@ import
#
# where the `ignored` part is typically expected a zero nibble.
func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc pathAsBlob*(keyOrTag: HashKey|HashID): Blob =
keyOrTag.pathAsNibbles.hexPrefixEncode(isLeaf=true)
func pathAsBlob*(keyOrTag: HashKey|PathID): Blob =
keyOrTag.to(NibblesSeq).hexPrefixEncode(isLeaf=true)
proc pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
func pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
var key: ByteArray32
if partPath.len == 64:
# Trailing dummy nibbles (aka no nibbles) force a nibble seq reorg
@ -46,7 +48,7 @@ proc pathToKey*(partPath: NibblesSeq): Result[HashKey,AristoError] =
return ok(key.HashKey)
err(PathExpected64Nibbles)
proc pathToKey*(
func pathToKey*(
partPath: openArray[byte];
): Result[HashKey,AristoError] =
let (isLeaf,pathSegment) = partPath.hexPrefixDecode
@ -54,14 +56,17 @@ proc pathToKey*(
return pathSegment.pathToKey()
err(PathExpectedLeaf)
proc pathToTag*(
partPath: NibblesSeq|openArray[byte];
): Result[HashID,AristoError] =
ok (? partPath.pathToKey).to(HashID)
func pathToTag*(partPath: NibblesSeq): Result[PathID,AristoError] =
## Nickname `tag` for `PathID`
if partPath.len <= 64:
return ok PathID(
pfx: UInt256.fromBytesBE partPath.pathPfxPad(0).getBytes(),
length: partPath.len.uint8)
err(PathAtMost64Nibbles)
# --------------------
proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
func pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
## `NibblesSeq` with exactly 64 nibbles, the equivalent of a path key.
##
@ -80,7 +85,7 @@ proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
let nope = seq[byte].default.initNibbleRange
result = pfx.slice(0,64) & nope # nope forces re-alignment
proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
func pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): HashKey =
## Variant of `pathPfxPad()`.
##
## Extend (or cut) the argument nibbles sequence `pfx` for generating a

View File

@ -90,7 +90,7 @@ proc get*(
return ok(data)
db.getBE key
proc contains*(
proc hasKey*(
db: KvtDbRef; # Database
key: openArray[byte]; # Key of database record
): Result[bool,KvtError] =

View File

@ -25,7 +25,8 @@ template xCheck*(expr: untyped): untyped =
template xCheck*(expr: untyped; ifFalse: untyped): untyped =
## Note: this check will invoke `expr` twice
if not (expr):
ifFalse
block:
ifFalse
check expr
return

View File

@ -74,7 +74,7 @@ proc fList(be: BackendRef): seq[(QueueID,FilterRef)] =
func ppFil(w: FilterRef; db = AristoDbRef(nil)): string =
proc qq(key: HashKey; db: AristoDbRef): string =
if db.isNil:
let n = key.to(HashID).UInt256
let n = key.to(UInt256)
if n == 0: "£ø" else: "£" & $n
else:
HashLabel(root: VertexID(1), key: key).pp(db)
@ -377,7 +377,7 @@ proc checkFilterTrancoderOk(
# -------------------------
func to(fid: FilterID; T: type HashKey): T =
fid.uint64.to(HashID).to(T)
fid.uint64.u256.toBytesBE.T
proc qid2fidFn(be: BackendRef): QuFilMap =
result = proc(qid: QueueID): FilterID =
@ -491,12 +491,12 @@ proc validateFifo(
## .. | .. | ..
##
var
lastTrg = serial.u256.to(HashID)
lastTrg = serial.u256
inx = 0
lastFid = FilterID(serial+1)
if hashesOk:
lastTrg = be.getKeyFn(VertexID(1)).get(otherwise = VOID_HASH_KEY).to(HashID)
lastTrg = be.getKeyFn(VertexID(1)).get(otherwise=VOID_HASH_KEY).to(UInt256)
for chn,fifo in be.fifos:
for (qid,filter) in fifo:
@ -504,8 +504,8 @@ proc validateFifo(
# Check filter objects
xCheck chn == (qid.uint64 shr 62).int
xCheck filter != FilterRef(nil)
xCheck filter.src.to(HashID) == lastTrg
lastTrg = filter.trg.to(HashID)
xCheck filter.src.to(UInt256) == lastTrg
lastTrg = filter.trg.to(UInt256)
# Check random access
xCheck qid == be.filters[inx]

View File

@ -162,7 +162,7 @@ proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
kvpLst: w.data.accounts.mapIt(LeafTiePayload(
leafTie: LeafTie(
root: rootVid,
path: it.accKey.to(HashKey).to(HashID)),
path: it.accKey.to(HashKey).to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob))))
proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
@ -179,7 +179,7 @@ proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
kvpLst: w.data.mapIt(LeafTiePayload(
leafTie: LeafTie(
root: rootVid,
path: it.slotHash.to(HashKey).to(HashID)),
path: it.slotHash.to(HashKey).to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.slotData))))
if 0 < result.len:
result[^1].proof = s.data.proof

View File

@ -230,7 +230,7 @@ proc fwdWalkVerify(
n = 0
for (key,_) in db.right low(LeafTie,root):
xCheck key in leftOver:
noisy.say "*** fwdWalkVerify", " id=", n + (nLeafs + 1) * debugID
noisy.say "*** fwdWalkVerify", "id=", n + (nLeafs + 1) * debugID
leftOver.excl key
last = key
n.inc
@ -239,13 +239,10 @@ proc fwdWalkVerify(
if last.root == VertexID(0):
last = low(LeafTie,root)
elif last != high(LeafTie,root):
last = last + 1
last = last.next
let rc = last.right db
if rc.isOk:
xCheck rc == WalkStopErr
else:
xCheck rc.error[1] == NearbyBeyondRange
xCheck rc.isErr
xCheck rc.error[1] == NearbyBeyondRange
xCheck n == nLeafs
true
@ -274,13 +271,10 @@ proc revWalkVerify(
if last.root == VertexID(0):
last = high(LeafTie,root)
elif last != low(LeafTie,root):
last = last - 1
last = last.prev
let rc = last.left db
if rc.isOk:
xCheck rc == WalkStopErr
else:
xCheck rc.error[1] == NearbyBeyondRange
xCheck rc.isErr
xCheck rc.error[1] == NearbyBeyondRange
xCheck n == nLeafs
true
@ -475,8 +469,8 @@ proc testTxSpanMultiInstances*(
dx: seq[AristoDbRef]
var genID = genBase
proc newHashID(): HashID =
result = HashID(genID.u256)
proc newPathID(): PathID =
result = PathID(pfx: genID.u256, length: 64)
genID.inc
proc newPayload(): Blob =
result = @[genID].encode
@ -501,7 +495,7 @@ proc testTxSpanMultiInstances*(
# Add some data and first transaction
block:
let rc = db.merge(newHashID(), newPayload())
let rc = db.merge(newPathID(), newPayload())
xCheckRc rc.error == 0
block:
let rc = db.checkTop(relax=true)
@ -515,7 +509,7 @@ proc testTxSpanMultiInstances*(
xCheckRc rc.error == 0
dx.add rc.value
block:
let rc = dx[^1].merge(newHashID(), newPayload())
let rc = dx[^1].merge(newPathID(), newPayload())
xCheckRc rc.error == 0
block:
let rc = db.checkTop(relax=true)
@ -546,10 +540,10 @@ proc testTxSpanMultiInstances*(
# Add more data ..
block:
let rc = db.merge(newHashID(), newPayload())
let rc = db.merge(newPathID(), newPayload())
xCheckRc rc.error == 0
for n in 0 ..< dx.len:
let rc = dx[n].merge(newHashID(), newPayload())
let rc = dx[n].merge(newPathID(), newPayload())
xCheckRc rc.error == 0
#show(3)