Aristo db update delete functionality (#1621)

* Fix missing branch checks in transcoder

why:
  Symmetry problem. `Blobify()` allowed for encoding degenerate branch
  vertices while `Deblobify()` rejected decoding wrongly encoded data.

* Update memory backend so that it rejects storing bogus vertices.

why:
  Error behaviour made similar to the rocks DB backend.

* Make sure that leaf vertex IDs are not repurposed

why:
  This makes it easier to record leaf node changes

* Update error return code for next()/right() traversal

why:
  Returning offending vertex ID (besides error code) helps debugging

* Update Merkle hasher for deleted nodes

why:
  Not implemented, yet

also:
  Provide cache & backend consistency check functions. This was
  partly re-implemented from `hashifyCheck()`

* Simplify some unit tests

* Fix delete function

why:
  Was conceptually wrong
This commit is contained in:
Jordan Hrycaj 2023-06-30 23:22:33 +01:00 committed by GitHub
parent aa6d47864f
commit dd1c8ed6f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1489 additions and 526 deletions

View File

@ -0,0 +1,93 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Consistency checks
## ===============================
##
{.push raises: [].}
import
std/[algorithm, sequtils, sets, tables],
eth/common,
stew/[interval_set, results],
./aristo_init/[aristo_memory, aristo_rocksdb],
"."/[aristo_desc, aristo_get, aristo_init, aristo_vid],
./aristo_hashify/hashify_helper,
./aristo_check/[check_be, check_cache]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc checkCache*(
db: AristoDb; # Database, top layer
relax = false; # Check existing hashes only
): Result[void,(VertexID,AristoError)] =
## Verify that the cache structure is correct as it would be after `merge()`
## and `hashify()` operations. Unless `relaxed` is set `true` it would not
## fully check against the backend, which is typically not applicable after
## `delete()` operations.
##
## The following is verified:
##
## * Each `sTab[]` entry has a valid vertex which can be compiled as a node.
## If `relax` is set `false`, the Merkle hashes are recompiled and must
## match.
##
## * The hash table `kMap[]` and its inverse lookup table `pAmk[]` must
## correnspond.
##
if relax:
let rc = db.checkCacheRelaxed()
if rc.isErr:
return rc
else:
let rc = db.checkCacheStrict()
if rc.isErr:
return rc
db.checkCacheCommon()
proc checkBE*(
db: AristoDb; # Database, top layer
relax = true; # Not re-compiling hashes if `true`
cache = true; # Also verify cache
): Result[void,(VertexID,AristoError)] =
## Veryfy database backend structure. If the argument `relax` is set `false`,
## all necessary Merkle hashes are compiled and verified. If the argument
## `cache` is set `true`, the cache is also checked so that a `safe()`
## operation will leave the backend consistent.
##
## The following is verified:
##
## * Each vertex ID on the structural table can be represented as a Merkle
## patricia Tree node. If `relax` is set `false`, the Merkle hashes are
## all recompiled and must match.
##
## * The set of free vertex IDa as potentally suppliedby the ID generator
## state is disjunct to the set of already used vertex IDs on the database.
## Moreover, the union of both sets is equivalent to the set of positive
## `uint64` numbers.
##
if not db.backend.isNil:
let be = db.to(TypedBackendRef)
case be.kind:
of BackendMemory:
return be.MemBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendRocksDB:
return be.RdbBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendNone:
discard
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,175 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[algorithm, sequtils, sets, tables],
eth/common,
stew/interval_set,
../aristo_hashify/hashify_helper,
../aristo_init/[aristo_memory, aristo_rocksdb],
".."/[aristo_desc, aristo_get, aristo_vid]
const
Vid2 = @[VertexID(2)].toHashSet
# ------------------------------------------------------------------------------
# Private helper
# ------------------------------------------------------------------------------
proc invTo(s: IntervalSetRef[VertexID,uint64]; T: type HashSet[VertexID]): T =
## Convert the complement of the argument list `s` to a set of vertex IDs
## as it would appear with a vertex generator state list.
if s.total < high(uint64):
for w in s.increasing:
if w.maxPt == high(VertexID):
result.incl w.minPt # last interval
else:
for pt in w.minPt .. w.maxPt:
result.incl pt
proc toNodeBe(
vtx: VertexRef; # Vertex to convert
db: AristoDb; # Database, top layer
): Result[NodeRef,VertexID] =
## Similar to `toNode()` but fetching from the backend only
case vtx.vType:
of Leaf:
return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
var missing: seq[VertexID]
for n in 0 .. 15:
let vid = vtx.bVid[n]
if vid.isValid:
let rc = db.getKeyBackend vid
if rc.isOk and rc.value.isValid:
node.key[n] = rc.value
else:
return err(vid)
else:
node.key[n] = VOID_HASH_KEY
return ok node
of Extension:
let
vid = vtx.eVid
rc = db.getKeyBackend vid
if rc.isOk and rc.value.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
node.key[0] = rc.value
return ok node
return err(vid)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc checkBE*[T](
be: T; # backend descriptor
db: AristoDb; # Database, top layer
relax: bool; # Not compiling hashes if `true`
cache: bool; # Also verify cache
): Result[void,(VertexID,AristoError)] =
## Make sure that each vertex has a Merkle hash and vice versa. Also check
## the vertex ID generator state.
let vids = IntervalSetRef[VertexID,uint64].init()
discard vids.merge Interval[VertexID,uint64].new(VertexID(1),high(VertexID))
for (_,vid,vtx) in be.walkVtx:
if not vtx.isValid:
return err((vid,CheckBeVtxInvalid))
let rc = db.getKeyBackend vid
if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeKeyMissing))
for (_,vid,key) in be.walkKey:
if not key.isvalid:
return err((vid,CheckBeKeyInvalid))
let rc = db.getVtxBackend vid
if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeVtxMissing))
let rx = rc.value.toNodeBe db # backend only
if rx.isErr:
return err((vid,CheckBeKeyCantCompile))
if not relax:
let expected = rx.value.toHashKey
if expected != key:
return err((vid,CheckBeKeyMismatch))
discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
# Compare calculated state against database state
block:
# Extract vertex ID generator state
var vGen: HashSet[VertexID]
for (_,_,w) in be.walkIdg:
vGen = vGen + w.toHashSet
let
vGenExpected = vids.invTo(HashSet[VertexID])
delta = vGenExpected -+- vGen # symmetric difference
if 0 < delta.len:
# Exclude fringe case when there is a single root vertex only
if vGenExpected != Vid2 or 0 < vGen.len:
return err((delta.toSeq.sorted[^1],CheckBeGarbledVGen))
# Check cache against backend
if cache:
# Check structural table
for (vid,vtx) in db.top.sTab.pairs:
# A `kMap[]` entry must exist.
if not db.top.kMap.hasKey vid:
return err((vid,CheckBeCacheKeyMissing))
if vtx.isValid:
# Register existing vid against backend generator state
discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
else:
# Some vertex is to be deleted, the key must be empty
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
return err((vid,CheckBeCacheKeyNonEmpty))
# There must be a representation on the backend DB
if db.getVtxBackend(vid).isErr:
return err((vid,CheckBeCacheVidUnsynced))
# Register deleted vid against backend generator state
discard vids.merge Interval[VertexID,uint64].new(vid,vid)
# Check key table
for (vid,lbl) in db.top.kMap.pairs:
let vtx = db.getVtx vid
if not db.top.sTab.hasKey(vid) and not vtx.isValid:
return err((vid,CheckBeCacheKeyDangling))
if lbl.isValid and not relax:
if not vtx.isValid:
return err((vid,CheckBeCacheVtxDangling))
let rc = vtx.toNode db # compile cache first
if rc.isErr:
return err((vid,CheckBeCacheKeyCantCompile))
let expected = rc.value.toHashKey
if expected != lbl.key:
return err((vid,CheckBeCacheKeyMismatch))
# Check vGen
var tmp = AristoDB(top: AristoLayerRef(vGen: db.top.vGen))
tmp.vidReorg()
let
vGen = tmp.top.vGen.toHashSet
vGenExpected = vids.invTo(HashSet[VertexID])
delta = vGenExpected -+- vGen # symmetric difference
if 0 < delta.len:
# Exclude fringe case when there is a single root vertex only
if vGenExpected != Vid2 or 0 < vGen.len:
return err((delta.toSeq.sorted[^1],CheckBeCacheGarbledVGen))
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,126 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[sequtils, sets, tables],
eth/common,
stew/results,
../aristo_hashify/hashify_helper,
".."/[aristo_desc, aristo_get]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc checkCacheStrict*(
db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] =
for (vid,vtx) in db.top.sTab.pairs:
let rc = vtx.toNode db
if rc.isErr:
return err((vid,CheckStkVtxIncomplete))
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,CheckStkVtxKeyMissing))
if lbl.key != rc.value.toHashKey:
return err((vid,CheckStkVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
if not revVid.isValid:
return err((vid,CheckStkRevKeyMissing))
if revVid != vid:
return err((vid,CheckStkRevKeyMismatch))
if 0 < db.top.pAmk.len and db.top.pAmk.len < db.top.sTab.len:
# Cannot have less changes than cached entries
return err((VertexID(0),CheckStkVtxCountMismatch))
ok()
proc checkCacheRelaxed*(
db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] =
if 0 < db.top.pPrf.len:
for vid in db.top.pPrf:
let vtx = db.top.sTab.getOrVoid vid
if vtx.isValid:
let rc = vtx.toNode db
if rc.isErr:
return err((vid,CheckRlxVtxIncomplete))
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,CheckRlxVtxKeyMissing))
if lbl.key != rc.value.toHashKey:
return err((vid,CheckRlxVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
if not revVid.isValid:
return err((vid,CheckRlxRevKeyMissing))
if revVid != vid:
return err((vid,CheckRlxRevKeyMismatch))
else:
# Is be a deleted entry
let rc = db.getVtxBackend vid
if rc.isErr:
return err((vid,CheckRlxVidVtxBeMissing))
if not db.top.kMap.hasKey vid:
return err((vid,CheckRlxVtxEmptyKeyMissing))
if db.top.kMap.getOrVoid(vid).isValid:
return err((vid,CheckRlxVtxEmptyKeyExpected))
else:
for (vid,lbl) in db.top.kMap.pairs:
if lbl.isValid: # Otherwise to be deleted
let vtx = db.getVtx vid
if vtx.isValid:
let rc = vtx.toNode db
if rc.isOk:
if lbl.key != rc.value.toHashKey:
return err((vid,CheckRlxVtxKeyMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
if not revVid.isValid:
return err((vid,CheckRlxRevKeyMissing))
if revVid != vid:
return err((vid,CheckRlxRevKeyMissing))
if revVid != vid:
return err((vid,CheckRlxRevKeyMismatch))
ok()
proc checkCacheCommon*(
db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] =
# Some `kMap[]` entries may ne void indicating backend deletion
let kMapCount = db.top.kMap.values.toSeq.filterIt(it.isValid).len
if db.top.pAmk.len != kMapCount:
var knownKeys: HashSet[VertexID]
for (key,vid) in db.top.pAmk.pairs:
if not db.top.kMap.hasKey(vid):
return err((vid,CheckAnyRevVtxMissing))
if vid in knownKeys:
return err((vid,CheckAnyRevVtxDup))
knownKeys.incl vid
return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!)
for vid in db.top.pPrf:
if not db.top.kMap.hasKey(vid):
return err((vid,CheckAnyVtxLockWithoutKey))
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -78,6 +78,9 @@ proc ppVid(vid: VertexID; pfx = true): string =
else:
result &= "ø"
proc ppVidList(vGen: openArray[VertexID]): string =
"[" & vGen.mapIt(it.ppVid).join(",") & "]"
proc vidCode(lbl: HashLabel, db: AristoDb): uint64 =
if lbl.isValid:
if not db.top.isNil:
@ -165,7 +168,7 @@ proc ppPayload(p: PayloadRef, db: AristoDb): string =
proc ppVtx(nd: VertexRef, db: AristoDb, vid: VertexID): string =
if not nd.isValid:
result = "n/a"
result = "ø"
else:
if db.top.isNil or not vid.isValid or vid in db.top.pPrf:
result = ["L(", "X(", "B("][nd.vType.ord]
@ -186,6 +189,29 @@ proc ppVtx(nd: VertexRef, db: AristoDb, vid: VertexID): string =
result &= ","
result &= ")"
proc ppSTab(
sTab: Table[VertexID,VertexRef];
db = AristoDb();
indent = 4;
): string =
"{" & sTab.sortedKeys
.mapIt((it, sTab.getOrVoid it))
.mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
.join("," & indent.toPfx(1)) & "}"
proc ppLTab(
lTab: Table[LeafTie,VertexID];
indent = 4;
): string =
var db = AristoDb()
"{" & lTab.sortedKeys
.mapIt((it, lTab.getOrVoid it))
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
.join("," & indent.toPfx(1)) & "}"
proc ppPPrf(pPrf: HashSet[VertexID]): string =
"{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
proc ppXMap*(
db: AristoDb;
kMap: Table[VertexID,HashLabel];
@ -215,7 +241,7 @@ proc ppXMap*(
s &= "(" & s
s &= ",*" & $count
else:
s &= "£r(!)"
s &= "£ø"
if s[0] == '(':
s &= ")"
s & ","
@ -233,7 +259,11 @@ proc ppXMap*(
r.inc
if r != vid:
if i+1 != n:
result &= ".. " & revKeys[n-1].ppRevlabel
if i+1 == n-1:
result &= pfx
else:
result &= ".. "
result &= revKeys[n-1].ppRevlabel
result &= pfx & vid.ppRevlabel
(i, r) = (n, vid)
if i < revKeys.len - 1:
@ -261,13 +291,18 @@ proc ppXMap*(
result &= pfx
result &= cache[i][0].ppNtry
for n in 1 ..< cache.len:
let w = cache[n]
r[0].inc
r[1].inc
let
m = cache[n-1]
w = cache[n]
r = (r[0]+1, r[1]+1, r[2])
if r != w or w[2]:
if i+1 != n:
result &= ".. " & cache[n-1][0].ppNtry
result &= pfx & cache[n][0].ppNtry
if i+1 == n-1:
result &= pfx
else:
result &= ".. "
result &= m[0].ppNtry
result &= pfx & w[0].ppNtry
(i, r) = (n, w)
if i < cache.len - 1:
if i+1 != cache.len - 1:
@ -296,6 +331,63 @@ proc ppBe[T](be: T; db: AristoDb; indent: int): string =
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey & ")"
).join(pfx2) & "}"
proc ppCache(
db: AristoDb;
vGenOk: bool;
sTabOk: bool;
lTabOk: bool;
kMapOk: bool;
pPrfOk: bool;
indent = 4;
): string =
let
pfx1 = indent.toPfx
pfx2 = indent.toPfx(1)
tagOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
var
pfy = ""
proc doPrefix(s: string; dataOk: bool): string =
var rc: string
if tagOk:
rc = pfy & s & (if dataOk: pfx2 else: " ")
pfy = pfx1
else:
rc = pfy
pfy = pfx2
rc
if not db.top.isNil:
if vGenOk:
let
tLen = db.top.vGen.len
info = "vGen(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.vGen.ppVidList
if sTabOk:
let
tLen = db.top.sTab.len
info = "sTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.sTab.ppSTab(db,indent+1)
if lTabOk:
let
tlen = db.top.lTab.len
info = "lTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.lTab.ppLTab(indent+1)
if kMapOk:
let
tLen = db.top.kMap.len
ulen = db.top.pAmk.len
lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen
info = "kMap(" & lInf & ")"
result &= info.doPrefix(0 < tLen + uLen)
result &= db.ppXMap(db.top.kMap,db.top.pAmk,indent+1)
if pPrfOk:
let
tLen = db.top.pPrf.len
info = "pPrf(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.pPrf.ppPPrf
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
@ -326,7 +418,7 @@ proc pp*(vid: VertexID): string =
vid.ppVid
proc pp*(vGen: openArray[VertexID]): string =
"[" & vGen.mapIt(it.ppVid).join(",") & "]"
vGen.ppVidList
proc pp*(p: PayloadRef, db = AristoDb()): string =
p.ppPayload(db)
@ -372,22 +464,13 @@ proc pp*(nd: NodeRef): string =
nd.pp(db)
proc pp*(sTab: Table[VertexID,VertexRef]; db = AristoDb(); indent = 4): string =
"{" & sTab.sortedKeys
.mapIt((it, sTab.getOrVoid it))
.filterIt(it[1].isValid)
.mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
.join("," & indent.toPfx(1)) & "}"
sTab.ppSTab
proc pp*(lTab: Table[LeafTie,VertexID]; indent = 4): string =
var db = AristoDb()
"{" & lTab.sortedKeys
.mapIt((it, lTab.getOrVoid it))
.filterIt(it[1].isValid)
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
.join("," & indent.toPfx(1)) & "}"
lTab.ppLTab
proc pp*(pPrf: HashSet[VertexID]): string =
"{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
pPrf.ppPPrf
proc pp*(leg: Leg; db = AristoDb()): string =
result = "(" & leg.wp.vid.ppVid & ","
@ -454,49 +537,31 @@ proc pp*(
proc pp*(
db: AristoDb;
vGenOk = true;
sTabOk = true;
lTabOk = true;
kMapOk = true;
pPrfOk = true;
indent = 4;
): string =
let
pfx1 = indent.toPfx
pfx2 = indent.toPfx(1)
tagOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
var
pfy = ""
proc doPrefix(s: string): string =
var rc: string
if tagOk:
rc = pfy & s & pfx2
pfy = pfx1
else:
rc = pfy
pfy = pfx2
rc
if not db.top.isNil:
if vGenOk:
let info = "vGen(" & $db.top.vGen.len & ")"
result &= info.doPrefix & db.top.vGen.pp
if sTabOk:
let info = "sTab(" & $db.top.sTab.len & ")"
result &= info.doPrefix & db.top.sTab.pp(db,indent+1)
if lTabOk:
let info = "lTab(" & $db.top.lTab.len & ")"
result &= info.doPrefix & db.top.lTab.pp(indent+1)
if kMapOk:
let info = "kMap(" & $db.top.kMap.len & "," & $db.top.pAmk.len & ")"
result &= info.doPrefix & db.ppXMap(db.top.kMap,db.top.pAmk,indent+1)
if pPrfOk:
let info = "pPrf(" & $db.top.pPrf.len & ")"
result &= info.doPrefix & db.top.pPrf.pp
db.ppCache(
vGenOk=true, sTabOk=true, lTabOk=true, kMapOk=true, pPrfOk=true)
proc pp*(
be: AristoTypedBackendRef;
db: AristoDb;
xTabOk: bool;
indent = 4;
): string =
db.ppCache(
vGenOk=true, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=true, pPrfOk=true)
proc pp*(
db: AristoDb;
xTabOk: bool;
kMapOk: bool;
other = false;
indent = 4;
): string =
db.ppCache(
vGenOk=other, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=kMapOk, pPrfOk=other)
proc pp*(
be: TypedBackendRef;
db: AristoDb;
indent = 4;
): string =

View File

@ -26,13 +26,21 @@ logScope:
topics = "aristo-delete"
# ------------------------------------------------------------------------------
# Private functions
# Private heplers
# ------------------------------------------------------------------------------
proc branchStillNeeded(vtx: VertexRef): bool =
proc branchStillNeeded(vtx: VertexRef): Result[int,void] =
## Returns the nibble if there is only one reference left.
var nibble = -1
for n in 0 .. 15:
if vtx.bVid[n].isValid:
return true
if 0 <= nibble:
return ok(-1)
nibble = n
if 0 <= nibble:
return ok(nibble)
# Oops, degenerated branch node
err()
proc clearKey(
db: AristoDb; # Database, top layer
@ -52,15 +60,208 @@ proc doneWith(
vid: VertexID; # Vertex IDs to clear
) =
# Remove entry
db.vidDispose vid # Will be propagated to backend
db.top.sTab.del vid
if db.getVtxBackend(vid).isOk:
db.top.sTab[vid] = VertexRef(nil) # Will be propagated to backend
else:
db.top.sTab.del vid
db.vidDispose vid
db.clearKey vid
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc collapseBranch(
db: AristoDb; # Database, top layer
hike: Hike; # Fully expanded path
nibble: byte; # Applicable link for `Branch` vertex
): Result[void,(VertexID,AristoError)] =
## Convert/merge vertices:
## ::
## current | becomes | condition
## | |
## ^3 ^2 | ^3 ^2 |
## -------------------+---------------------+------------------
## Branch <br> Branch | Branch <ext> Branch | 2 < legs.len (1)
## Ext <br> Branch | <ext> Branch | 2 < legs.len (2)
## <br> Branch | <ext> Branch | legs.len == 2 (3)
##
## Depending on whether the parent `par` is an extension, merge `br` into
## `par`. Otherwise replace `br` by an extension.
##
let br = hike.legs[^2].wp
var xt = VidVtxPair( # Rewrite `br`
vid: br.vid,
vtx: VertexRef(
vType: Extension,
ePfx: @[nibble].initNibbleRange.slice(1),
eVid: br.vtx.bVid[nibble]))
if 2 < hike.legs.len: # (1) or (2)
let par = hike.legs[^3].wp
case par.vtx.vType:
of Branch: # (1)
# Replace `br` (use `xt` as-is)
discard
of Extension: # (2)
# Merge `br` into ^3 (update `xt`)
db.doneWith xt.vid
xt.vid = par.vid
xt.vtx.ePfx = par.vtx.ePfx & xt.vtx.ePfx
of Leaf:
return err((par.vid,DelLeafUnexpected))
else: # (3)
# Replace `br` (use `xt` as-is)
discard
db.top.sTab[xt.vid] = xt.vtx
ok()
proc collapseExt(
db: AristoDb; # Database, top layer
hike: Hike; # Fully expanded path
nibble: byte; # Link for `Branch` vertex `^2`
vtx: VertexRef; # Follow up extension vertex (nibble)
): Result[void,(VertexID,AristoError)] =
## Convert/merge vertices:
## ::
## ^3 ^2 `vtx` | ^3 ^2 |
## --------------------+-----------------------+------------------
## Branch <br> Ext | Branch <ext> | 2 < legs.len (1)
## Ext <br> Ext | <ext> | 2 < legs.len (2)
## <br> Ext | <ext> | legs.len == 2 (3)
##
## Merge `vtx` into `br` and unlink `vtx`.
##
let br = hike.legs[^2].wp
var xt = VidVtxPair( # Merge `vtx` into `br`
vid: br.vid,
vtx: VertexRef(
vType: Extension,
ePfx: @[nibble].initNibbleRange.slice(1) & vtx.ePfx,
eVid: vtx.eVid))
db.doneWith br.vtx.bVid[nibble] # `vtx` is obsolete now
if 2 < hike.legs.len: # (1) or (2)
let par = hike.legs[^3].wp
case par.vtx.vType:
of Branch: # (1)
# Replace `br` by `^2 & vtx` (use `xt` as-is)
discard
of Extension: # (2)
# Replace ^3 by `^3 & ^2 & vtx` (update `xt`)
db.doneWith xt.vid
xt.vid = par.vid
xt.vtx.ePfx = par.vtx.ePfx & xt.vtx.ePfx
of Leaf:
return err((par.vid,DelLeafUnexpected))
else: # (3)
# Replace ^2 by `^2 & vtx` (use `xt` as-is)
discard
db.top.sTab[xt.vid] = xt.vtx
ok()
proc collapseLeaf(
db: AristoDb; # Database, top layer
hike: Hike; # Fully expanded path
nibble: byte; # Link for `Branch` vertex `^2`
vtx: VertexRef; # Follow up leaf vertex (from nibble)
): Result[void,(VertexID,AristoError)] =
## Convert/merge vertices:
## ::
## current | becomes | condition
## | |
## ^4 ^3 ^2 `vtx` | ^4 ^3 ^2 |
## -------------------------+----------------------------+------------------
## .. Branch <br> Leaf | .. Branch <Leaf> | 2 < legs.len (1)
## Branch Ext <br> Leaf | Branch <Leaf> | 3 < legs.len (2)
## Ext <br> Leaf | <Leaf> | legs.len == 3 (3)
## <br> Leaf | <Leaf> | legs.len == 2 (4)
##
## Merge `<br>` and `Leaf` replacing one and removing the other.
##
let br = hike.legs[^2].wp
var lf = VidVtxPair( # Merge `br` into `vtx`
vid: br.vtx.bVid[nibble],
vtx: VertexRef(
vType: Leaf,
lPfx: @[nibble].initNibbleRange.slice(1) & vtx.lPfx,
lData: vtx.lData))
db.doneWith br.vid # `br` is obsolete now
db.clearKey lf.vid # `vtx` was modified
if 2 < hike.legs.len: # (1), (2), or (3)
# Merge `br` into the leaf `vtx` and unlink `br`.
let par = hike.legs[^3].wp
case par.vtx.vType:
of Branch: # (1)
# Replace `vtx` by `^2 & vtx` (use `lf` as-is)
par.vtx.bVid[hike.legs[^3].nibble] = lf.vid
db.top.sTab[par.vid] = par.vtx
db.top.sTab[lf.vid] = lf.vtx
return ok()
of Extension: # (2) or (3)
# Merge `^3` into `lf` but keep the leaf vertex ID unchanged. This
# avoids some `lTab[]` registry update.
lf.vtx.lPfx = par.vtx.ePfx & lf.vtx.lPfx
if 3 < hike.legs.len: # (2)
# Grandparent exists
let gpr = hike.legs[^4].wp
if gpr.vtx.vType != Branch:
return err((gpr.vid,DelBranchExpexted))
db.doneWith par.vid # `par` is obsolete now
gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid
db.top.sTab[gpr.vid] = gpr.vtx
db.top.sTab[lf.vid] = lf.vtx
return ok()
# No grandparent, so ^3 is root vertex # (3)
db.top.sTab[par.vid] = lf.vtx
# Continue below
of Leaf:
return err((par.vid,DelLeafUnexpected))
else: # (4)
# Replace ^2 by `^2 & vtx` (use `lf` as-is)
db.top.sTab[br.vid] = lf.vtx
# Continue below
# Common part for setting up `lf` as root vertex # Rest of (3) or (4)
let rc = lf.vtx.lPfx.pathToTag
if rc.isErr:
return err((br.vid,rc.error))
#
# No need to update the cache unless `lf` is present there. The leaf path
# as well as the value associated with the leaf path has not been changed.
let lfTie = LeafTie(root: hike.root, path: rc.value)
if db.top.lTab.hasKey lfTie:
db.top.lTab[lfTie] = lf.vid
# Clean up stale leaf vertex which has moved to root position
db.doneWith lf.vid
ok()
# -------------------------
proc deleteImpl(
db: AristoDb; # Database, top layer
hike: Hike; # Fully expanded path
lty: LeafTie; # `Patricia Trie` path root-to-leaf
db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] =
## Implementation of *delete* functionality.
if hike.error != AristoError(0):
@ -68,53 +269,58 @@ proc deleteImpl(
return err((hike.legs[^1].wp.vid,hike.error))
return err((VertexID(0),hike.error))
# doAssert 0 < hike.legs.len and hike.tail.len == 0 # as assured by `hikeUp()`
# Remove leaf entry on the top
let lf = hike.legs[^1].wp
if lf.vtx.vType != Leaf:
return err((lf.vid,DelLeafExpexted))
if lf.vid in db.top.pPrf:
return err((lf.vid, DelLeafLocked))
db.doneWith lf.vid
var lf: VidVtxPair
block:
var inx = hike.legs.len - 1
if 1 < hike.legs.len:
# Remove leaf entry on the top
lf = hike.legs[inx].wp
if lf.vtx.vType != Leaf:
return err((lf.vid,DelLeafExpexted))
if lf.vid in db.top.pPrf:
return err((lf.vid, DelLeafLocked))
db.doneWith(lf.vid)
inx.dec
# Get current `Branch` vertex `br`
let br = hike.legs[^2].wp
if br.vtx.vType != Branch:
return err((br.vid,DelBranchExpexted))
while 0 <= inx:
# Unlink child vertex
let br = hike.legs[inx].wp
if br.vtx.vType != Branch:
return err((br.vid,DelBranchExpexted))
if br.vid in db.top.pPrf:
return err((br.vid, DelBranchLocked))
br.vtx.bVid[hike.legs[inx].nibble] = VertexID(0)
db.top.sTab[br.vid] = br.vtx
# Unlink child vertex from structural table
br.vtx.bVid[hike.legs[^2].nibble] = VertexID(0)
db.top.sTab[br.vid] = br.vtx
if br.vtx.branchStillNeeded:
# Clear all keys up to the toot key
db.clearKey(br.vid)
while 0 < inx:
inx.dec
db.clearKey(hike.legs[inx].wp.vid)
break
# Clear all keys up to the root key
for n in 0 .. hike.legs.len - 2:
let vid = hike.legs[n].wp.vid
if vid in db.top.pPrf:
return err((vid, DelBranchLocked))
db.clearKey vid
# Remove this `Branch` entry
db.doneWith(br.vid)
inx.dec
let nibble = block:
let rc = br.vtx.branchStillNeeded()
if rc.isErr:
return err((br.vid,DelBranchWithoutRefs))
rc.value
if inx < 0:
break
# Convert to `Extension` or `Leaf` vertex
if 0 <= nibble:
# Get child vertex (there must be one after a `Branch` node)
let nxt = block:
let vid = br.vtx.bVid[nibble]
VidVtxPair(vid: vid, vtx: db.getVtx vid)
if not nxt.vtx.isValid:
return err((nxt.vid, DelVidStaleVtx))
# There might be an optional `Extension` to remove
let ext = hike.legs[inx].wp
if ext.vtx.vType == Extension:
if br.vid in db.top.pPrf:
return err((ext.vid, DelExtLocked))
db.doneWith(ext.vid)
inx.dec
# Collapse `Branch` vertex `br` depending on `nxt` vertex type
let rc = block:
case nxt.vtx.vType:
of Branch:
db.collapseBranch(hike, nibble.byte)
of Extension:
db.collapseExt(hike, nibble.byte, nxt.vtx)
of Leaf:
db.collapseLeaf(hike, nibble.byte, nxt.vtx)
if rc.isErr:
return err(rc.error)
# Delete leaf entry
let rc = db.getVtxBackend lf.vid
@ -132,24 +338,24 @@ proc deleteImpl(
# ------------------------------------------------------------------------------
proc delete*(
hike: Hike; # Fully expanded chain of vertices
db: AristoDb; # Database, top layer
hike: Hike; # Fully expanded chain of vertices
): Result[void,(VertexID,AristoError)] =
## Delete argument `hike` chain of vertices from the database
# Need path in order to remove it from `lTab[]`
let lky = block:
let lty = block:
let rc = hike.to(NibblesSeq).pathToTag()
if rc.isErr:
return err((VertexID(0),DelPathTagError))
LeafTie(root: hike.root, path: rc.value)
hike.deleteImpl(lky, db)
db.deleteImpl(hike, lty)
proc delete*(
lty: LeafTie; # `Patricia Trie` path root-to-leaf
db: AristoDb; # Database, top layer
lty: LeafTie; # `Patricia Trie` path root-to-leaf
): Result[void,(VertexID,AristoError)] =
## Variant of `delete()`
lty.hikeUp(db).deleteImpl(lty, db)
db.deleteImpl(lty.hikeUp(db), lty)
# ------------------------------------------------------------------------------
# End

View File

@ -24,8 +24,10 @@ type
RlpOtherException
# Data record transcoders, `deblobify()` and `blobify()`
BlobifyVtxExPathOverflow
BlobifyVtxLeafPathOverflow
BlobifyBranchMissingRefs
BlobifyExtMissingRefs
BlobifyExtPathOverflow
BlobifyLeafPathOverflow
DeblobNilArgument
DeblobUnknown
@ -87,18 +89,49 @@ type
HashifyLeafToRootAllFailed
HashifyRootHashMismatch
HashifyRootVidMismatch
HashifyVidCircularDependence
HashifyVtxMissing
HashifyCheckRevCountMismatch
HashifyCheckRevHashMismatch
HashifyCheckRevHashMissing
HashifyCheckRevVtxDup
HashifyCheckRevVtxMissing
HashifyCheckVidVtxMismatch
HashifyCheckVtxCountMismatch
HashifyCheckVtxHashMismatch
HashifyCheckVtxHashMissing
HashifyCheckVtxIncomplete
HashifyCheckVtxLockWithoutKey
# Cache checker `checkCache()`
CheckStkVtxIncomplete
CheckStkVtxKeyMissing
CheckStkVtxKeyMismatch
CheckStkRevKeyMissing
CheckStkRevKeyMismatch
CheckStkVtxCountMismatch
CheckRlxVidVtxMismatch
CheckRlxVtxIncomplete
CheckRlxVtxKeyMissing
CheckRlxVtxKeyMismatch
CheckRlxRevKeyMissing
CheckRlxRevKeyMismatch
CheckRlxVidVtxBeMissing
CheckRlxVtxEmptyKeyMissing
CheckRlxVtxEmptyKeyExpected
CheckAnyRevVtxMissing
CheckAnyRevVtxDup
CheckAnyRevCountMismatch
CheckAnyVtxLockWithoutKey
# Backend structural check `checkBE()`
CheckBeVtxInvalid
CheckBeKeyInvalid
CheckBeVtxMissing
CheckBeKeyMissing
CheckBeKeyCantCompile
CheckBeKeyMismatch
CheckBeGarbledVGen
CheckBeCacheKeyMissing
CheckBeCacheKeyNonEmpty
CheckBeCacheVidUnsynced
CheckBeCacheKeyDangling
CheckBeCacheVtxDangling
CheckBeCacheKeyCantCompile
CheckBeCacheKeyMismatch
CheckBeCacheGarbledVGen
# Neighbour vertex, tree traversal `nearbyRight()` and `nearbyLeft()`
NearbyBeyondRange
@ -113,17 +146,22 @@ type
NearbyPathTailUnexpected
NearbyPathTailInxOverflow
NearbyUnexpectedVtx
NearbyVidInvalid
# Deletion of vertices, `delete()`
DelPathTagError
DelLeafExpexted
DelLeafLocked
DelLeafUnexpected
DelBranchExpexted
DelBranchLocked
DelBranchWithoutRefs
DelExtLocked
DelVidStaleVtx
# Save permanently, `save()`
SaveBackendMissing
SaveLeafVidRepurposed
# Get functions form `aristo_get.nim`
GetLeafNotFound

View File

@ -75,6 +75,7 @@ static:
# ------------------------------------------------------------------------------
func `<`*(a, b: VertexID): bool {.borrow.}
func `<=`*(a, b: VertexID): bool {.borrow.}
func `==`*(a, b: VertexID): bool {.borrow.}
func cmp*(a, b: VertexID): int {.borrow.}
func `$`*(a: VertexID): string = $a.uint64
@ -82,6 +83,11 @@ func `$`*(a: VertexID): string = $a.uint64
func `==`*(a: VertexID; b: static[uint]): bool =
a == VertexID(b)
# Scalar model extension for `IntervalSetRef[VertexID,uint64]`
proc `+`*(a: VertexID; b: uint64): VertexID = (a.uint64+b).VertexID
proc `-`*(a: VertexID; b: uint64): VertexID = (a.uint64-b).VertexID
proc `-`*(a, b: VertexID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------
# Public helpers: `HashID` scalar data model
# ------------------------------------------------------------------------------

View File

@ -143,21 +143,24 @@ proc dup*(pld: PayloadRef): PayloadRef =
proc dup*(vtx: VertexRef): VertexRef =
## Duplicate vertex.
# Not using `deepCopy()` here (some `gc` needs `--deepcopy:on`.)
case vtx.vType:
of Leaf:
VertexRef(
vType: Leaf,
lPfx: vtx.lPfx,
lData: vtx.ldata.dup)
of Extension:
VertexRef(
vType: Extension,
ePfx: vtx.ePfx,
eVid: vtx.eVid)
of Branch:
VertexRef(
vType: Branch,
bVid: vtx.bVid)
if vtx.isNil:
VertexRef(nil)
else:
case vtx.vType:
of Leaf:
VertexRef(
vType: Leaf,
lPfx: vtx.lPfx,
lData: vtx.ldata.dup)
of Extension:
VertexRef(
vType: Extension,
ePfx: vtx.ePfx,
eVid: vtx.eVid)
of Branch:
VertexRef(
vType: Branch,
bVid: vtx.bVid)
proc to*(node: NodeRef; T: type VertexRef): T =
## Extract a copy of the `VertexRef` part from a `NodeRef`.

View File

@ -42,12 +42,12 @@
{.push raises: [].}
import
std/[sequtils, sets, strutils, tables],
std/[algorithm, sequtils, sets, strutils, tables],
chronicles,
eth/common,
stew/results,
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hike,
aristo_transcode, aristo_vid]
stew/[interval_set, results],
"."/[aristo_desc, aristo_get, aristo_hike, aristo_vid],
./aristo_hashify/hashify_helper
type
BackVidValRef = ref object
@ -78,31 +78,6 @@ func isValid(brv: BackVidValRef): bool =
# Private functions
# ------------------------------------------------------------------------------
proc toNode(vtx: VertexRef; db: AristoDb): Result[NodeRef,void] =
case vtx.vType:
of Leaf:
return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
for n in 0 .. 15:
if vtx.bVid[n].isValid:
let key = db.getKey vtx.bVid[n]
if key.isValid:
node.key[n] = key
continue
return err()
else:
node.key[n] = VOID_HASH_KEY
return ok node
of Extension:
if vtx.eVid.isValid:
let key = db.getKey vtx.eVid
if key.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vtx.eVid)
node.key[0] = key
return ok node
proc updateHashKey(
db: AristoDb; # Database, top layer
root: VertexID; # Root ID
@ -174,13 +149,66 @@ proc leafToRootHasher(
# Check against existing key, or store new key
let
key = rc.value.encode.digestTo(HashKey)
key = rc.value.toHashKey
rx = db.updateHashKey(hike.root, wp.vid, key, bg)
if rx.isErr:
return err((wp.vid,rx.error))
ok -1 # all could be hashed
# ------------------
proc deletedLeafHasher(
db: AristoDb; # Database, top layer
hike: Hike; # Hike for labelling leaf..root
): Result[void,(VertexID,AristoError)] =
var
todo = hike.legs.reversed.mapIt(it.wp)
solved: HashSet[VertexID]
# Edge case for empty `hike`
if todo.len == 0:
let vtx = db.getVtx hike.root
if not vtx.isValid:
return err((hike.root,HashifyVtxMissing))
todo = @[VidVtxPair(vid: hike.root, vtx: vtx)]
while 0 < todo.len:
var
delayed: seq[VidVtxPair]
didHere: HashSet[VertexID] # avoid duplicates
for wp in todo:
let rc = wp.vtx.toNode(db, stopEarly=false)
if rc.isOk:
let
expected = rc.value.toHashKey
key = db.getKey wp.vid
if key.isValid:
if key != expected:
return err((wp.vid,HashifyExistingHashMismatch))
else:
db.vidAttach(HashLabel(root: hike.root, key: expected), wp.vid)
solved.incl wp.vid
else:
# Resolve follow up vertices first
for vid in rc.error:
let vtx = db.getVtx vid
if not vtx.isValid:
return err((vid,HashifyVtxMissing))
if vid in solved:
discard wp.vtx.toNode(db, stopEarly=false)
return err((vid,HashifyVidCircularDependence))
if vid notin didHere:
didHere.incl vid
delayed.add VidVtxPair(vid: vid, vtx: vtx)
# Followed by this vertex which relies on the ones registered above.
if wp.vid notin didHere:
didHere.incl wp.vid
delayed.add wp
todo = delayed
ok()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
@ -212,40 +240,48 @@ proc hashify*(
for (lky,vid) in db.top.lTab.pairs:
let hike = lky.hikeUp(db)
if hike.error != AristoError(0):
return err((vid,hike.error))
roots.incl hike.root
# Hash as much of the `hike` as possible
let n = block:
let rc = db.leafToRootHasher hike
# There might be deleted entries on the leaf table. If this is tha case,
# the Merkle hashes for the vertices in the `hike` can all be compiled.
if not vid.isValid:
let rc = db.deletedLeafHasher hike
if rc.isErr:
return err(rc.error)
rc.value
if 0 < n:
# Backtrack and register remaining nodes. Note that in case *n == 0*, the
# root vertex has not been fully resolved yet.
#
# hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..)
# | | | |
# | <---- | <---- | <---- |
# | | |
# | backLink[] | downMost |
#
downMost[hike.legs[n].wp.vid] = BackVidValRef(
root: hike.root,
onBe: hike.legs[n].backend,
toVid: hike.legs[n-1].wp.vid)
for u in (n-1).countDown(1):
backLink[hike.legs[u].wp.vid] = BackVidValRef(
elif hike.error != AristoError(0):
return err((vid,hike.error))
else:
# Hash as much of the `hike` as possible
let n = block:
let rc = db.leafToRootHasher hike
if rc.isErr:
return err(rc.error)
rc.value
roots.incl hike.root
if 0 < n:
# Backtrack and register remaining nodes. Note that in case *n == 0*,
# the root vertex has not been fully resolved yet.
#
# hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..)
# | | | |
# | <---- | <---- | <---- |
# | | |
# | backLink[] | downMost |
#
downMost[hike.legs[n].wp.vid] = BackVidValRef(
root: hike.root,
onBe: hike.legs[u].backend,
toVid: hike.legs[u-1].wp.vid)
elif n < 0:
completed.incl hike.root
onBe: hike.legs[n].backend,
toVid: hike.legs[n-1].wp.vid)
for u in (n-1).countDown(1):
backLink[hike.legs[u].wp.vid] = BackVidValRef(
root: hike.root,
onBe: hike.legs[u].backend,
toVid: hike.legs[u-1].wp.vid)
elif n < 0:
completed.incl hike.root
# At least one full path leaf..root should have succeeded with labelling
# for each root.
@ -263,7 +299,7 @@ proc hashify*(
# references have Merkle hashes.
#
# Also `db.getVtx(vid)` => not nil as it was fetched earlier, already
let rc = db.getVtx(vid).toNode(db)
let rc = db.getVtx(vid).toNode db
if rc.isErr:
# Cannot complete with this vertex, so do it later
redo[vid] = val
@ -271,7 +307,7 @@ proc hashify*(
else:
# Update Merkle hash
let
key = rc.value.encode.digestTo(HashKey)
key = rc.value.toHashKey
rx = db.updateHashKey(val.root, vid, key, val.onBe)
if rx.isErr:
return err((vid,rx.error))
@ -295,95 +331,6 @@ proc hashify*(
ok completed
# ------------------------------------------------------------------------------
# Public debugging functions
# ------------------------------------------------------------------------------
proc hashifyCheck*(
db: AristoDb; # Database, top layer
relax = false; # Check existing hashes only
): Result[void,(VertexID,AristoError)] =
## Verify that the Merkle hash keys are either completely missing or
## match all known vertices on the argument database layer `db`.
if not relax:
for (vid,vtx) in db.top.sTab.pairs:
let rc = vtx.toNode(db)
if rc.isErr:
return err((vid,HashifyCheckVtxIncomplete))
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,HashifyCheckVtxHashMissing))
if lbl.key != rc.value.encode.digestTo(HashKey):
return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
if not revVid.isValid:
return err((vid,HashifyCheckRevHashMissing))
if revVid != vid:
return err((vid,HashifyCheckRevHashMismatch))
elif 0 < db.top.pPrf.len:
for vid in db.top.pPrf:
let vtx = db.top.sTab.getOrVoid vid
if not vtx.isValid:
return err((vid,HashifyCheckVidVtxMismatch))
let rc = vtx.toNode(db)
if rc.isErr:
return err((vid,HashifyCheckVtxIncomplete))
let lbl = db.top.kMap.getOrVoid vid
if not lbl.isValid:
return err((vid,HashifyCheckVtxHashMissing))
if lbl.key != rc.value.encode.digestTo(HashKey):
return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
if not revVid.isValid:
return err((vid,HashifyCheckRevHashMissing))
if revVid != vid:
return err((vid,HashifyCheckRevHashMismatch))
else:
for (vid,lbl) in db.top.kMap.pairs:
if lbl.isValid: # Otherwise to be deleted
let vtx = db.getVtx vid
if vtx.isValid:
let rc = vtx.toNode(db)
if rc.isOk:
if lbl.key != rc.value.encode.digestTo(HashKey):
return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.top.pAmk.getOrVoid lbl
if not revVid.isValid:
return err((vid,HashifyCheckRevHashMissing))
if revVid != vid:
return err((vid,HashifyCheckRevHashMismatch))
# Some `kMap[]` entries may ne void indicating backend deletion
let kMapCount = db.top.kMap.values.toSeq.filterIt(it.isValid).len
if db.top.pAmk.len != kMapCount:
var knownKeys: HashSet[VertexID]
for (key,vid) in db.top.pAmk.pairs:
if not db.top.kMap.hasKey(vid):
return err((vid,HashifyCheckRevVtxMissing))
if vid in knownKeys:
return err((vid,HashifyCheckRevVtxDup))
knownKeys.incl vid
return err((VertexID(0),HashifyCheckRevCountMismatch)) # should not apply(!)
if 0 < db.top.pAmk.len and not relax and db.top.pAmk.len < db.top.sTab.len:
# Cannot have less changes than cached entries
return err((VertexID(0),HashifyCheckVtxCountMismatch))
for vid in db.top.pPrf:
if not db.top.kMap.hasKey(vid):
return err((vid,HashifyCheckVtxLockWithoutKey))
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,69 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
#std/[tables],
eth/common,
stew/results,
".."/[aristo_constants, aristo_desc, aristo_get, aristo_transcode]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc toNode*(
vtx: VertexRef; # Vertex to convert
db: AristoDb; # Database, top layer
stopEarly = true; # Full list of missing links if `false`
): Result[NodeRef,seq[VertexID]] =
## Convert argument vertex to node
case vtx.vType:
of Leaf:
return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
var missing: seq[VertexID]
for n in 0 .. 15:
let vid = vtx.bVid[n]
if vid.isValid:
let key = db.getKey vid
if key.isValid:
node.key[n] = key
else:
missing.add vid
if stopEarly:
break
else:
node.key[n] = VOID_HASH_KEY
if 0 < missing.len:
return err(missing)
return ok node
of Extension:
let
vid = vtx.eVid
key = db.getKey vid
if key.isValid:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
node.key[0] = key
return ok node
return err(@[vid])
# This function cannot go into `aristo_desc` as it depends on `aristo_transcode`
# which depends on `aristo_desc`.
proc toHashKey*(node: NodeRef): HashKey =
## Convert argument `node` to Merkle hash key
node.encode.digestTo(HashKey)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -21,7 +21,7 @@ import
./aristo_desc/aristo_types_backend
export
AristoBackendType, AristoStorageType, AristoTypedBackendRef
AristoBackendType, AristoStorageType, TypedBackendRef
# ------------------------------------------------------------------------------
# Public database constuctors, destructor
@ -91,7 +91,10 @@ proc finish*(db: var AristoDb; flush = false) =
# -----------------
proc to*[W: MemBackendRef|RdbBackendRef](db: AristoDb; T: type W): T =
proc to*[W: TypedBackendRef|MemBackendRef|RdbBackendRef](
db: AristoDb;
T: type W;
): T =
## Handy helper for lew-level access to some backend functionality
db.backend.T

View File

@ -10,6 +10,7 @@
{.push raises: [].}
import
../aristo_desc,
../aristo_desc/aristo_types_backend
const
@ -22,13 +23,19 @@ type
BackendMemory
BackendRocksDB
AristoTypedBackendRef* = ref object of AristoBackendRef
TypedBackendRef* = ref object of AristoBackendRef
kind*: AristoBackendType ## Backend type identifier
when verifyIxId:
txGen: uint ## Transaction ID generator (for debugging)
txId: uint ## Active transaction ID (for debugging)
TypedPutHdlErrRef* = ref object of RootRef
pfx*: AristoStorageType ## Error sub-table
vid*: VertexID ## Vertex ID where the error occured
code*: AristoError ## Error code (if any)
TypedPutHdlRef* = ref object of PutHdlRef
error*: TypedPutHdlErrRef ## Track error while collecting transaction
when verifyIxId:
txId: uint ## Transaction ID (for debugging)
@ -42,7 +49,7 @@ type
# Public helpers
# ------------------------------------------------------------------------------
proc beginSession*(hdl: TypedPutHdlRef; db: AristoTypedBackendRef) =
proc beginSession*(hdl: TypedPutHdlRef; db: TypedBackendRef) =
when verifyIxId:
doAssert db.txId == 0
if db.txGen == 0:
@ -51,11 +58,11 @@ proc beginSession*(hdl: TypedPutHdlRef; db: AristoTypedBackendRef) =
hdl.txId = db.txGen
db.txGen.inc
proc verifySession*(hdl: TypedPutHdlRef; db: AristoTypedBackendRef) =
proc verifySession*(hdl: TypedPutHdlRef; db: TypedBackendRef) =
when verifyIxId:
doAssert db.txId == hdl.txId
proc finishSession*(hdl: TypedPutHdlRef; db: AristoTypedBackendRef) =
proc finishSession*(hdl: TypedPutHdlRef; db: TypedBackendRef) =
when verifyIxId:
doAssert db.txId == hdl.txId
db.txId = 0

View File

@ -28,6 +28,7 @@
import
std/[algorithm, sequtils, tables],
chronicles,
eth/common,
stew/results,
../aristo_constants,
@ -37,7 +38,7 @@ import
./aristo_init_common
type
MemBackendRef* = ref object of AristoTypedBackendRef
MemBackendRef* = ref object of TypedBackendRef
## Inheriting table so access can be extended for debugging purposes
sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
kMap: Table[VertexID,HashKey] ## Merkle hash key mapping
@ -53,6 +54,10 @@ type
# Private helpers
# ------------------------------------------------------------------------------
template logTxt(info: static[string]): static[string] =
"MemoryDB " & info
proc newSession(db: MemBackendRef): MemPutHdlRef =
new result
result.TypedPutHdlRef.beginSession db
@ -102,28 +107,43 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
result =
proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
let hdl = hdl.getSession db
for (vid,vtx) in vrps:
hdl.sTab[vid] = vtx.dup
if hdl.error.isNil:
for (vid,vtx) in vrps:
if not vtx.isNil:
let rc = vtx.blobify # verify data record
if rc.isErr:
hdl.error = TypedPutHdlErrRef(
pfx: VtxPfx,
vid: vid,
code: rc.error)
return
hdl.sTab[vid] = vtx.dup
proc putKeyFn(db: MemBackendRef): PutKeyFn =
result =
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
let hdl = hdl.getSession db
for (vid,key) in vkps:
hdl.kMap[vid] = key
if hdl.error.isNil:
for (vid,key) in vkps:
hdl.kMap[vid] = key
proc putIdgFn(db: MemBackendRef): PutIdgFn =
result =
proc(hdl: PutHdlRef; vs: openArray[VertexID]) =
let hdl = hdl.getSession db
hdl.vGen = vs.toSeq
hdl.vGenOk = true
if hdl.error.isNil:
hdl.vGen = vs.toSeq
hdl.vGenOk = true
proc putEndFn(db: MemBackendRef): PutEndFn =
result =
proc(hdl: PutHdlRef): AristoError =
let hdl = hdl.endSession db
if not hdl.error.isNil:
debug logTxt "putEndFn: failed",
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
return hdl.error.code
for (vid,vtx) in hdl.sTab.pairs:
if vtx.isValid:

View File

@ -42,17 +42,11 @@ logScope:
topics = "aristo-backend"
type
RdbBackendRef* = ref object of AristoTypedBackendRef
RdbBackendRef* = ref object of TypedBackendRef
rdb: RdbInst ## Allows low level access to database
RdbPutHdlErr = tuple
pfx: AristoStorageType ## Error sub-table
vid: VertexID ## Vertex ID where the error occured
code: AristoError ## Error code (if any)
RdbPutHdlRef = ref object of TypedPutHdlRef
cache: RdbTabs ## Tranaction cache
error: RdbPutHdlErr ## Track error while collecting transaction
const
extraTraceMessages = false or true
@ -151,12 +145,15 @@ proc putVtxFn(db: RdbBackendRef): PutVtxFn =
result =
proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
let hdl = hdl.getSession db
if hdl.error.code == AristoError(0):
if hdl.error.isNil:
for (vid,vtx) in vrps:
if vtx.isValid:
let rc = vtx.blobify
if rc.isErr:
hdl.error = (VtxPfx, vid, rc.error)
hdl.error = TypedPutHdlErrRef(
pfx: VtxPfx,
vid: vid,
code: rc.error)
return
hdl.cache[VtxPfx][vid] = rc.value
else:
@ -166,7 +163,7 @@ proc putKeyFn(db: RdbBackendRef): PutKeyFn =
result =
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
let hdl = hdl.getSession db
if hdl.error.code == AristoError(0):
if hdl.error.isNil:
for (vid,key) in vkps:
if key.isValid:
hdl.cache[KeyPfx][vid] = key.to(Blob)
@ -178,7 +175,7 @@ proc putIdgFn(db: RdbBackendRef): PutIdgFn =
result =
proc(hdl: PutHdlRef; vs: openArray[VertexID]) =
let hdl = hdl.getSession db
if hdl.error.code == AristoError(0):
if hdl.error.isNil:
if 0 < vs.len:
hdl.cache[IdgPfx][VertexID(0)] = vs.blobify
else:
@ -189,7 +186,7 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
result =
proc(hdl: PutHdlRef): AristoError =
let hdl = hdl.endSession db
if hdl.error.code != AristoError(0):
if not hdl.error.isNil:
debug logTxt "putEndFn: failed",
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
return hdl.error.code

View File

@ -15,7 +15,7 @@
import
std/[sequtils, tables],
stew/results,
"."/[aristo_desc, aristo_get]
"."/[aristo_desc, aristo_get, aristo_vid]
type
DeltaHistoryRef* = ref object
@ -75,7 +75,7 @@ proc pop*(db: var AristoDb; merge = true): AristoError =
proc save*(
db: var AristoDb; # Database to be updated
clear = true; # Clear current top level cache
): Result[DeltaHistoryRef,AristoError] =
): Result[DeltaHistoryRef,(VertexID,AristoError)] =
## Save top layer into persistent database. There is no check whether the
## current layer is fully consistent as a Merkle Patricia Tree. It is
## advised to run `hashify()` on the top layer before calling `save()`.
@ -88,9 +88,9 @@ proc save*(
##
let be = db.backend
if be.isNil:
return err(SaveBackendMissing)
return err((VertexID(0),SaveBackendMissing))
let hst = DeltaHistoryRef() # Change history
let hst = DeltaHistoryRef() # Change history
# Record changed `Leaf` nodes into the history table
for (lky,vid) in db.top.lTab.pairs:
@ -99,14 +99,17 @@ proc save*(
let rc = db.getVtxBackend vid
if rc.isErr:
if rc.error != GetVtxNotFound:
return err(rc.error) # Stop
hst.leafs[lky] = PayloadRef(nil) # So this is a new leaf vertex
return err((vid,rc.error)) # Stop
hst.leafs[lky] = PayloadRef(nil) # So this is a new leaf vertex
elif rc.value.vType == Leaf:
hst.leafs[lky] = rc.value.lData # Record previous payload
hst.leafs[lky] = rc.value.lData # Record previous payload
else:
hst.leafs[lky] = PayloadRef(nil) # Was re-puropsed as leaf vertex
return err((vid,SaveLeafVidRepurposed)) # Was re-puropsed
else:
hst.leafs[lky] = PayloadRef(nil) # New leaf vertex
hst.leafs[lky] = PayloadRef(nil) # New leaf vertex
# Compact recycled nodes
db.vidReorg()
# Save structural and other table entries
let txFrame = be.putBegFn()
@ -115,7 +118,7 @@ proc save*(
be.putIdgFn(txFrame, db.top.vGen)
let w = be.putEndFn txFrame
if w != AristoError(0):
return err(w)
return err((VertexID(0),w))
# Delete stack and clear top
db.stack.setLen(0)

View File

@ -150,7 +150,7 @@ proc insertBranch(
return Hike(error: MergeBrLinkLeafGarbled)
let
local = db.vidFetch
local = db.vidFetch(pristine = true)
lty = LeafTie(root: hike.root, path: rc.value)
db.top.lTab[lty] = local # update leaf path lookup cache
db.top.sTab[local] = linkVtx
@ -168,7 +168,7 @@ proc insertBranch(
forkVtx.bVid[linkInx] = local
block:
let local = db.vidFetch
let local = db.vidFetch(pristine = true)
forkVtx.bVid[leafInx] = local
leafLeg.wp.vid = local
leafLeg.wp.vtx = VertexRef(
@ -242,7 +242,7 @@ proc concatBranchAndLeaf(
# Append leaf vertex
let
vid = db.vidFetch
vid = db.vidFetch(pristine = true)
vtx = VertexRef(
vType: Leaf,
lPfx: hike.tail.slice(1),
@ -364,7 +364,7 @@ proc topIsExtAddLeaf(
return Hike(error: MergeBranchProofModeLock)
let
vid = db.vidFetch
vid = db.vidFetch(pristine = true)
vtx = VertexRef(
vType: Leaf,
lPfx: hike.tail.slice(1),
@ -396,7 +396,7 @@ proc topIsEmptyAddLeaf(
return Hike(error: MergeBranchProofModeLock)
let
leafVid = db.vidFetch
leafVid = db.vidFetch(pristine = true)
leafVtx = VertexRef(
vType: Leaf,
lPfx: hike.tail.slice(1),

View File

@ -80,22 +80,23 @@ proc complete(
db: AristoDb; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any)
doLeast: static[bool]; # Direction: *least* or *most*
): Hike =
): Result[Hike,(VertexID,AristoError)] =
## Extend `hike` using least or last vertex without recursion.
if not vid.isValid:
return err((VertexID(0),NearbyVidInvalid))
var
vid = vid
vtx = db.getVtx vid
uHike = Hike(root: hike.root, legs: hike.legs)
if not vtx.isValid:
return Hike(error: GetVtxNotFound)
return err((vid,GetVtxNotFound))
while uHike.legs.len < hikeLenMax:
var leg = Leg(wp: VidVtxPair(vid: vid, vtx: vtx), nibble: -1)
case vtx.vType:
of Leaf:
uHike.legs.add leg
return uHike # done
return ok(uHike) # done
of Extension:
vid = vtx.eVid
@ -104,7 +105,7 @@ proc complete(
if vtx.isValid:
uHike.legs.add leg
continue
return Hike(error: NearbyExtensionError) # Oops, no way
return err((vid,NearbyExtensionError)) # Oops, no way
of Branch:
when doLeast:
@ -117,16 +118,16 @@ proc complete(
if vtx.isValid:
uHike.legs.add leg
continue
return Hike(error: NearbyBranchError) # Oops, no way
return err((leg.wp.vid,NearbyBranchError)) # Oops, no way
Hike(error: NearbyNestingTooDeep)
err((VertexID(0),NearbyNestingTooDeep))
proc zeroAdjust(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
doLeast: static[bool]; # Direction: *least* or *most*
): Hike =
): Result[Hike,(VertexID,AristoError)] =
## Adjust empty argument path to the first vertex entry to the right. Ths
## applies is the argument `hike` is before the first entry in the database.
## The result is a hike which is aligned with the first entry.
@ -149,9 +150,7 @@ proc zeroAdjust(
pfx.pathPfxPad(255).hikeUp(root, db)
if 0 < hike.legs.len:
result = hike
result.error = AristoError(0)
return
return ok(hike)
let root = db.getVtx hike.root
if root.isValid:
@ -166,7 +165,7 @@ proc zeroAdjust(
let n = root.branchBorderNibble hike.tail[0].int8
if n < 0:
# Before or after the database range
return Hike(error: NearbyBeyondRange)
return err((hike.root,NearbyBeyondRange))
pfx = @[n.byte].initNibbleRange.slice(1)
of Extension:
@ -179,35 +178,34 @@ proc zeroAdjust(
break fail
let ePfxLen = ePfx.len
if hike.tail.len <= ePfxLen:
return Hike(error: NearbyPathTailInxOverflow)
return err((root.eVid,NearbyPathTailInxOverflow))
let tailPfx = hike.tail.slice(0,ePfxLen)
when doLeast:
if ePfx < tailPfx:
return Hike(error: NearbyBeyondRange)
return err((root.eVid,NearbyBeyondRange))
else:
if tailPfx < ePfx:
return Hike(error: NearbyBeyondRange)
return err((root.eVid,NearbyBeyondRange))
pfx = ePfx
of Leaf:
pfx = root.lPfx
if not hike.accept(pfx):
# Before or after the database range
return Hike(error: NearbyBeyondRange)
return err((hike.root,NearbyBeyondRange))
var newHike = pfx.toHike(hike.root, db)
if 0 < newHike.legs.len:
newHike.error = AristoError(0)
return newHike
return ok(newHike)
Hike(error: NearbyEmptyHike)
err((VertexID(0),NearbyEmptyHike))
proc finalise(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
moveRight: static[bool]; # Direction of next vertex
): Hike =
): Result[Hike,(VertexID,AristoError)] =
## Handle some pathological cases after main processing failed
proc beyond(p: Hike; pfx: NibblesSeq): bool =
when moveRight:
@ -223,7 +221,7 @@ proc finalise(
# Just for completeness (this case should have been handled, already)
if hike.legs.len == 0:
return Hike(error: NearbyEmptyHike)
return err((VertexID(0),NearbyEmptyHike))
# Check whether the path is beyond the database range
if 0 < hike.tail.len: # nothing to compare against, otherwise
@ -232,9 +230,11 @@ proc finalise(
# Note that only a `Branch` vertices has a non-zero nibble
if 0 <= top.nibble and top.nibble == top.wp.vtx.branchBorderNibble:
# Check the following up vertex
let vtx = db.getVtx top.wp.vtx.bVid[top.nibble]
let
vid = top.wp.vtx.bVid[top.nibble]
vtx = db.getVtx vid
if not vtx.isValid:
return Hike(error: NearbyDanglingLink)
return err((vid,NearbyDanglingLink))
var pfx: NibblesSeq
case vtx.vType:
@ -245,16 +245,16 @@ proc finalise(
of Branch:
pfx = @[vtx.branchBorderNibble.byte].initNibbleRange.slice(1)
if hike.beyond pfx:
return Hike(error: NearbyBeyondRange)
return err((vid,NearbyBeyondRange))
# Pathological cases
# * finalise right: nfffff.. for n < f or
# * finalise left: n00000.. for 0 < n
if hike.legs[0].wp.vtx.vType == Branch or
(1 < hike.legs.len and hike.legs[1].wp.vtx.vType == Branch):
return Hike(error: NearbyFailed) # no more vertices
return err((VertexID(0),NearbyFailed)) # no more vertices
Hike(error: NearbyUnexpectedVtx) # error
err((hike.legs[^1].wp.vid,NearbyUnexpectedVtx)) # error
proc nearbyNext(
@ -262,7 +262,7 @@ proc nearbyNext(
db: AristoDb; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any)
moveRight: static[bool]; # Direction of next vertex
): Hike =
): Result[Hike,(VertexID,AristoError)] =
## Unified implementation of `nearbyRight()` and `nearbyLeft()`.
proc accept(nibble: int8): bool =
## Accept `nibble` unless on boundaty dependent on `moveRight`
@ -284,9 +284,11 @@ proc nearbyNext(
w.branchNibbleMax(n - 1)
# Some easy cases
var hike = hike.zeroAdjust(db, doLeast=moveRight)
if hike.error != AristoError(0):
return hike
let hike = block:
var rc = hike.zeroAdjust(db, doLeast=moveRight)
if rc.isErr:
return err(rc.error)
rc.value
if hike.legs[^1].wp.vtx.vType == Extension:
let vid = hike.legs[^1].wp.vtx.eVid
@ -299,10 +301,10 @@ proc nearbyNext(
let top = uHike.legs[^1]
case top.wp.vtx.vType:
of Leaf:
return uHike
return ok(uHike)
of Branch:
if top.nibble < 0 or uHike.tail.len == 0:
return Hike(error: NearbyUnexpectedVtx)
return err((top.wp.vid,NearbyUnexpectedVtx))
of Extension:
uHike.tail = top.wp.vtx.ePfx & uHike.tail
uHike.legs.setLen(uHike.legs.len - 1)
@ -318,11 +320,11 @@ proc nearbyNext(
if start:
let vid = top.wp.vtx.bVid[top.nibble]
if not vid.isValid:
return Hike(error: NearbyDanglingLink) # error
return err((top.wp.vid,NearbyDanglingLink)) # error
let vtx = db.getVtx vid
if not vtx.isValid:
return Hike(error: GetVtxNotFound) # error
return err((vid,GetVtxNotFound)) # error
case vtx.vType
of Leaf:
@ -360,36 +362,40 @@ proc nearbyNext(
# End while
# Handle some pathological cases
return hike.finalise(db, moveRight)
hike.finalise(db, moveRight)
proc nearbyNext(
proc nearbyNextLeafTie(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDb; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any)
moveRight:static[bool]; # Direction of next vertex
): Result[HashID,AristoError] =
## Variant of `nearbyNext()`, convenience wrapper
let hike = lty.hikeUp(db).nearbyNext(db, hikeLenMax, moveRight)
if hike.error != AristoError(0):
return err(hike.error)
): Result[HashID,(VertexID,AristoError)] =
## Variant of `nearbyNext()`, convenience wrapper
let hike = block:
let rc = lty.hikeUp(db).nearbyNext(db, hikeLenMax, moveRight)
if rc.isErr:
return err(rc.error)
rc.value
if 0 < hike.legs.len and hike.legs[^1].wp.vtx.vType == Leaf:
if 0 < hike.legs.len:
if hike.legs[^1].wp.vtx.vType != Leaf:
return err((hike.legs[^1].wp.vid,NearbyLeafExpected))
let rc = hike.legsTo(NibblesSeq).pathToKey
if rc.isOk:
return ok rc.value.to(HashID)
return err(rc.error)
return err((VertexID(0),rc.error))
err(NearbyLeafExpected)
err((VertexID(0),NearbyLeafExpected))
# ------------------------------------------------------------------------------
# Public functions, moving and right boundary proof
# ------------------------------------------------------------------------------
proc nearbyRight*(
proc right*(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
): Hike =
): Result[Hike,(VertexID,AristoError)] =
## Extends the maximally extended argument vertices `hike` to the right (i.e.
## with non-decreasing path value). This function does not backtrack if
## there are dangling links in between. It will return an error in that case.
@ -401,33 +407,33 @@ proc nearbyRight*(
## verify that there is no leaf vertex *right* of a boundary path value.
hike.nearbyNext(db, 64, moveRight=true)
proc nearbyRight*(
proc right*(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDb; # Database layer
): Result[LeafTie,AristoError] =
): Result[LeafTie,(VertexID,AristoError)] =
## Variant of `nearbyRight()` working with a `HashID` argument instead
## of a `Hike`.
let rc = lty.nearbyNext(db, 64, moveRight=true)
let rc = lty.nearbyNextLeafTie(db, 64, moveRight=true)
if rc.isErr:
return err(rc.error)
ok LeafTie(root: lty.root, path: rc.value)
proc nearbyLeft*(
proc left*(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
): Hike =
): Result[Hike,(VertexID,AristoError)] =
## Similar to `nearbyRight()`.
##
## This code is intended to be used for verifying a right-bound proof to
## verify that there is no leaf vertex *left* to a boundary path value.
hike.nearbyNext(db, 64, moveRight=false)
proc nearbyLeft*(
proc left*(
lty: LeafTie; # Some `Patricia Trie` path
db: AristoDb; # Database layer
): Result[LeafTie,AristoError] =
): Result[LeafTie,(VertexID,AristoError)] =
## Similar to `nearbyRight()` for `HashID` argument instead of a `Hike`.
let rc = lty.nearbyNext(db, 64, moveRight=false)
let rc = lty.nearbyNextLeafTie(db, 64, moveRight=false)
if rc.isErr:
return err(rc.error)
ok LeafTie(root: lty.root, path: rc.value)
@ -436,7 +442,7 @@ proc nearbyLeft*(
# Public debugging helpers
# ------------------------------------------------------------------------------
proc nearbyRightMissing*(
proc rightMissing*(
hike: Hike; # Partially expanded chain of vertices
db: AristoDb; # Database layer
): Result[bool,AristoError] =

View File

@ -127,10 +127,10 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
# Public db record transcoders
# ------------------------------------------------------------------------------
proc blobify*(node: VertexRef; data: var Blob): AristoError =
## This function serialises the node argument to a database record. Contrary
## to RLP based serialisation, these records aim to align on fixed byte
## boundaries.
proc blobify*(vtx: VertexRef; data: var Blob): AristoError =
## This function serialises the vertex argument to a database record.
## Contrary to RLP based serialisation, these records aim to align on
## fixed byte boundaries.
## ::
## Branch:
## uint64, ... -- list of up to 16 child vertices lookup keys
@ -152,7 +152,7 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
## ::
## 8 * n * ((access shr (n * 4)) and 15)
##
case node.vType:
case vtx.vType:
of Branch:
var
top = 0u64
@ -160,30 +160,34 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
refs: Blob
keys: Blob
for n in 0..15:
if node.bVid[n].isValid:
if vtx.bVid[n].isValid:
access = access or (1u16 shl n)
refs &= node.bVid[n].uint64.toBytesBE.toSeq
refs &= vtx.bVid[n].uint64.toBytesBE.toSeq
if refs.len < 16:
return BlobifyBranchMissingRefs
data = refs & access.toBytesBE.toSeq & @[0u8]
of Extension:
let
pSegm = node.ePfx.hexPrefixEncode(isleaf = false)
pSegm = vtx.ePfx.hexPrefixEncode(isleaf = false)
psLen = pSegm.len.byte
if psLen == 0 or 33 < pslen:
return BlobifyVtxExPathOverflow
data = node.eVid.uint64.toBytesBE.toSeq & pSegm & @[0x80u8 or psLen]
return BlobifyExtPathOverflow
if not vtx.eVid.isValid:
return BlobifyExtMissingRefs
data = vtx.eVid.uint64.toBytesBE.toSeq & pSegm & @[0x80u8 or psLen]
of Leaf:
let
pSegm = node.lPfx.hexPrefixEncode(isleaf = true)
pSegm = vtx.lPfx.hexPrefixEncode(isleaf = true)
psLen = pSegm.len.byte
if psLen == 0 or 33 < psLen:
return BlobifyVtxLeafPathOverflow
data = node.lData.convertTo(Blob) & pSegm & @[0xC0u8 or psLen]
return BlobifyLeafPathOverflow
data = vtx.lData.convertTo(Blob) & pSegm & @[0xC0u8 or psLen]
proc blobify*(node: VertexRef): Result[Blob, AristoError] =
proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
## Variant of `blobify()`
var
data: Blob
info = node.blobify data
info = vtx.blobify data
if info != AristoError(0):
return err(info)
ok(data)

View File

@ -21,19 +21,23 @@ import
# Public functions
# ------------------------------------------------------------------------------
proc vidFetch*(db: AristoDb): VertexID =
## Create a new `VertexID`. Reusable *ID*s are kept in a list where the top
## entry *ID0* has the property that any other *ID* larger *ID0* is also not
proc vidFetch*(db: AristoDb; pristine = false): VertexID =
## Create a new `VertexID`. Reusable vertex *ID*s are kept in a list where
## the top entry *ID* has the property that any other *ID* larger is also not
## not used on the database.
##
## The function prefers to return recycled vertex *ID*s if there are any.
## When the argument `pristine` is set `true`, the function guarantees to
## return a non-recycled, brand new vertex *ID* which is the preferred mode
## when creating leaf vertices.
let top = db.top
case top.vGen.len:
of 0:
if top.vGen.len == 0:
# Note that `VertexID(1)` is the root of the main trie
top.vGen = @[VertexID(3)]
result = VertexID(2)
of 1:
elif top.vGen.len == 1 or pristine:
result = top.vGen[^1]
top.vGen = @[VertexID(result.uint64 + 1)]
top.vGen[^1] = result + 1
else:
result = top.vGen[^2]
top.vGen[^2] = top.vGen[^1]

View File

@ -222,7 +222,7 @@ proc accountsRunner(
check noisy.test_nearbyKvpList(accLst, resetDb)
test &"Delete accounts database, successively {accLst.len} entries":
check noisy.test_delete accLst
check noisy.test_delete(accLst, dbDir)
proc storagesRunner(
@ -261,7 +261,7 @@ proc storagesRunner(
check noisy.test_nearbyKvpList(stoLst, resetDb)
test &"Delete storage database, successively {stoLst.len} entries":
check noisy.test_delete stoLst
check noisy.test_delete(stoLst, dbDir)
# ------------------------------------------------------------------------------
# Main function(s)
@ -279,11 +279,11 @@ when isMainModule:
setErrorLevel()
when true: # and false:
when true and false:
noisy.miscRunner()
# Borrowed from `test_sync_snap.nim`
when true: # and false:
when true and false:
for n,sam in snapTestList:
noisy.transcodeRunner(sam)
for n,sam in snapTestStorageList:

View File

@ -59,7 +59,7 @@ proc mergeData(
noisy.say "***", "dataMerge(9)",
" nLeafs=", leafs.len,
"\n cache dump\n ", db.pp,
"\n backend dump\n ", db.backend.AristoTypedBackendRef.pp(db)
"\n backend dump\n ", db.to(TypedBackendRef).pp(db)
check rc.error == (VertexID(0),AristoError(0))
return
@ -174,13 +174,13 @@ proc test_backendConsistency*(
noisy.say "***", "beCon(2) <", n, "/", list.len-1, ">",
" groups=", count,
"\n cache dump\n ", ndb.pp,
"\n backend dump\n ", ndb.backend.AristoTypedBackendRef.pp(ndb),
"\n backend dump\n ", ndb.to(TypedBackendRef).pp(ndb),
"\n -------------",
"\n mdb cache\n ", mdb.pp,
"\n mdb backend\n ", mdb.to(MemBackendRef).pp(ndb),
"\n mdb backend\n ", mdb.to(TypedBackendRef).pp(ndb),
"\n -------------",
"\n rdb cache\n ", rdb.pp,
"\n rdb backend\n ", rdb.to(RdbBackendRef).pp(ndb),
"\n rdb backend\n ", rdb.to(TypedBackendRef).pp(ndb),
"\n -------------"
when true and false:
@ -200,7 +200,7 @@ proc test_backendConsistency*(
#noisy.say "***", "db-dump\n ", mdb.pp
let rc = mdb.save
if rc.isErr:
check rc.error == AristoError(0)
check rc.error == (0,0)
return
rc.value
@ -208,11 +208,11 @@ proc test_backendConsistency*(
let rdbHist = block:
let rc = rdb.save
if rc.isErr:
check rc.error == AristoError(0)
check rc.error == (0,0)
return
rc.value
if not ndb.top.verify(mdb.backend.MemBackendRef, noisy):
if not ndb.top.verify(mdb.to(MemBackendRef), noisy):
when true and false:
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">",
" groups=", count,
@ -223,7 +223,7 @@ proc test_backendConsistency*(
#"\n mdb pre-save backend\n ", mdbPreSaveBackend,
"\n -------------",
"\n mdb cache\n ", mdb.pp,
"\n mdb backend\n ", mdb.to(MemBackendRef).pp(ndb),
"\n mdb backend\n ", mdb.to(TypedBackendRef).pp(ndb),
"\n -------------"
return
@ -239,10 +239,10 @@ proc test_backendConsistency*(
"\n rdb pre-save backend\n ", rdbPreSaveBackend,
"\n -------------",
"\n rdb cache\n ", rdb.pp,
"\n rdb backend\n ", rdb.to(RdbBackendRef).pp(ndb),
"\n rdb backend\n ", rdb.to(TypedBackendRef).pp(ndb),
#"\n -------------",
#"\n mdb cache\n ", mdb.pp,
#"\n mdb backend\n ", mdb.to(MemBackendRef).pp(ndb),
#"\n mdb backend\n ", mdb.to(TypedBackendRef).pp(ndb),
"\n -------------"
return

View File

@ -12,13 +12,14 @@
## Aristo (aka Patricia) DB records merge test
import
std/[algorithm, bitops, sequtils],
std/[algorithm, bitops, sequtils, strutils, sets],
eth/common,
stew/results,
unittest2,
../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_delete, aristo_hashify, aristo_init,
aristo_nearby, aristo_merge],
aristo_check, aristo_desc, aristo_debug, aristo_delete, aristo_get,
aristo_hashify, aristo_hike, aristo_init, aristo_layer, aristo_nearby,
aristo_merge],
./test_helpers
type
@ -32,6 +33,9 @@ type
proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] =
lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafTie): int = cmp(a,b))
proc pp(q: HashSet[LeafTie]): string =
"{" & q.toSeq.mapIt(it.pp).join(",") & "}"
# --------------
proc posixPrngRand(state: var uint32): byte =
@ -73,38 +77,153 @@ proc rand(td: var TesterDesc; top: int): int =
# -----------------------
proc randomisedLeafs(db: AristoDb; td: var TesterDesc): seq[LeafTie] =
result = db.top.lTab.sortedKeys
if 2 < result.len:
for n in 0 ..< result.len-1:
let r = n + td.rand(result.len - n)
result[n].swap result[r]
proc saveToBackend(
db: var AristoDb;
relax: bool;
noisy: bool;
debugID: int;
): bool =
let
trigger = false # or (debugID == 340)
prePreCache = db.pp
prePreBe = db.to(TypedBackendRef).pp(db)
if trigger:
noisy.say "***", "saveToBackend =========================== ", debugID
block:
let rc = db.checkCache(relax=true)
if rc.isErr:
noisy.say "***", "saveToBackend (1) hashifyCheck",
" debugID=", debugID,
" error=", rc.error,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
block:
let rc = db.hashify # (noisy = trigger)
if rc.isErr:
noisy.say "***", "saveToBackend (2) hashify",
" debugID=", debugID,
" error=", rc.error,
"\n pre-cache\n ", prePreCache,
"\n pre-be\n ", prePreBe,
"\n -------- hasify() -----",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
let
preCache = db.pp
preBe = db.to(TypedBackendRef).pp(db)
block:
let rc = db.checkBE(relax=true)
if rc.isErr:
let noisy = true
noisy.say "***", "saveToBackend (3) checkBE",
" debugID=", debugID,
" error=", rc.error,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
block:
let rc = db.save()
if rc.isErr:
check rc.error == (0,0)
return
block:
let rc = db.checkBE(relax=relax)
if rc.isErr:
let noisy = true
noisy.say "***", "saveToBackend (4) checkBE",
" debugID=", debugID,
" error=", rc.error,
"\n prePre-cache\n ", prePreCache,
"\n prePre-be\n ", prePreBe,
"\n -------- hashify() -----",
"\n pre-cache\n ", preCache,
"\n pre-be\n ", preBe,
"\n -------- save() --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
when true and false:
if trigger:
noisy.say "***", "saveToBackend (9)",
" debugID=", debugID,
"\n prePre-cache\n ", prePreCache,
"\n prePre-be\n ", prePreBe,
"\n -------- hashify() -----",
"\n pre-cache\n ", preCache,
"\n pre-be\n ", preBe,
"\n -------- save() --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
true
proc fwdWalkVerify(
db: AristoDb;
root: VertexID;
left: HashSet[LeafTie];
noisy: bool;
): tuple[visited: int, error: AristoError] =
debugID: int;
): tuple[visited: int, error: AristoError] =
let
lTabLen = db.top.lTab.len
nLeafs = left.len
var
error = AristoError(0)
lfLeft = left
lty = LeafTie(root: root)
n = 0
while n < lTabLen + 1:
let rc = lty.nearbyRight(db)
#noisy.say "=================== ", n
while n < nLeafs + 1:
let id = n + (nLeafs + 1) * debugID
noisy.say "NearbyBeyondRange =================== ", id
let rc = lty.right db
if rc.isErr:
if rc.error != NearbyBeyondRange:
noisy.say "***", "<", n, "/", lTabLen-1, "> fwd-walk error=", rc.error
error = rc.error
check rc.error == AristoError(0)
break
if rc.error[1] != NearbyBeyondRange or 0 < lfLeft.len:
noisy.say "***", "fwdWalkVerify (1) nearbyRight",
" n=", n, "/", nLeafs,
" lty=", lty.pp(db),
" error=", rc.error
check rc.error == (0,0)
return (n,rc.error[1])
return (0, AristoError(0))
if rc.value notin lfLeft:
noisy.say "***", "fwdWalkVerify (2) lfLeft",
" n=", n, "/", nLeafs,
" lty=", lty.pp(db)
check rc.error == (0,0)
return (n,rc.error[1])
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
lfLeft.excl rc.value
n.inc
if error != AristoError(0):
return (n,error)
if n != lTabLen:
check n == lTabLen
return (-1, AristoError(1))
(0, AristoError(0))
noisy.say "***", "fwdWalkVerify (9) oops",
" n=", n, "/", nLeafs,
" lfLeft=", lfLeft.pp
check n <= nLeafs
(-1, AristoError(1))
# ------------------------------------------------------------------------------
# Public test function
@ -113,77 +232,149 @@ proc fwdWalkVerify(
proc test_delete*(
noisy: bool;
list: openArray[ProofTrieData];
): bool =
var td = TesterDesc.init 42
rdbPath: string; # Rocks DB storage directory
): bool =
var
td = TesterDesc.init 42
db: AristoDb
defer:
db.finish(flush=true)
for n,w in list:
# Start with new database
db.finish(flush=true)
db = block:
let rc = AristoDb.init(BackendRocksDB,rdbPath)
if rc.isErr:
check rc.error == 0
return
rc.value
# Merge leaf data into main trie (w/vertex ID 1)
let
db = AristoDb.init BackendNone # (top: AristoLayerRef())
lstLen = list.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
leafs = w.kvpLst.mapRootVid VertexID(1)
added = db.merge leafs
preState = db.pp
if added.error != AristoError(0):
check added.error == AristoError(0)
if added.error != 0:
check added.error == 0
return
let rc = db.hashify
if rc.isErr:
check rc.error == (VertexID(0),AristoError(0))
return
# Now `db` represents a (fully labelled) `Merkle Patricia Tree`
# Provide a (reproducible) peudo-random copy of the leafs list
var leafTies = db.top.lTab.sortedKeys
if 2 < leafTies.len:
for n in 0 ..< leafTies.len-1:
let r = n + td.rand(leafTies.len - n)
leafTies[n].swap leafTies[r]
let leafTies = db.randomisedLeafs td
var leafsLeft = leafs.mapIt(it.leafTie).toHashSet
let uMax = leafTies.len - 1
# Complete as `Merkle Patricia Tree` and save to backend, clears cache
block:
let saveBeOk = db.saveToBackend(relax=true, noisy=false, 0)
if not saveBeOk:
check saveBeOk
return
# Trigger subsequent saving tasks in loop below
let (saveMod, saveRest, relax) = block:
if leafTies.len < 17: (7, 3, false)
elif leafTies.len < 31: (11, 7, false)
else: (leafTies.len div 5, 11, true)
# Loop over leaf ties
for u,leafTie in leafTies:
let rc = leafTie.delete db # ,noisy)
# Get leaf vertex ID so making sure that it is on the database
let
runID = n + list.len * u
doSaveBeOk = ((u mod saveMod) == saveRest) # or true
trigger = false # or runID in {60,80}
tailWalkVerify = 20 # + 999
leafVid = block:
let hike = leafTie.hikeUp(db)
if hike.error != 0: # Ooops
check hike.error == 0
return
hike.legs[^1].wp.vid
if doSaveBeOk:
when true and false:
noisy.say "***", "del(1)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
" relax=", relax,
" leafVid=", leafVid.pp
let saveBeOk = db.saveToBackend(relax=relax, noisy=noisy, runID)
if not saveBeOk:
noisy.say "***", "del(2)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" leafVid=", leafVid.pp
check saveBeOk
return
# Delete leaf
let
preCache = db.pp
rc = db.delete leafTie
if rc.isErr:
check rc.error == (VertexID(0),AristoError(0))
check rc.error == (0,0)
return
if leafTie in db.top.lTab:
check leafTie notin db.top.lTab
return
if uMax != db.top.lTab.len + u:
check uMax == db.top.lTab.len + u
# Update list of remaininf leafs
leafsLeft.excl leafTie
let leafVtx = db.getVtx leafVid
if leafVtx.isValid:
noisy.say "***", "del(3)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
" root=", leafTie.root.pp,
" leafVid=", leafVid.pp,
"\n --------",
"\n pre-cache\n ", preCache,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check leafVtx.isValid == false
return
# Walking the database is too slow for large tables. So the hope is that
# potential errors will not go away and rather pop up later, as well.
const tailCheck = 999
if uMax < u + tailCheck:
if u < uMax:
let vfy = db.fwdWalkVerify(leafTie.root, noisy)
if vfy.error != AristoError(0):
check vfy == (0, AristoError(0))
if leafsLeft.len <= tailWalkVerify:
if u < leafTies.len-1:
let
noisy = false
vfy = db.fwdWalkVerify(leafTie.root, leafsLeft, noisy, runID)
if vfy.error != AristoError(0): # or 7 <= u:
noisy.say "***", "del(5)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
" root=", leafTie.root.pp,
" leafVid=", leafVid.pp,
"\n leafVtx=", leafVtx.pp(db),
"\n --------",
"\n pre-cache\n ", preCache,
"\n -------- delete() -------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check vfy == (0,0)
return
elif 0 < db.top.sTab.len:
check db.top.sTab.len == 0
return
let rc = db.hashifyCheck(relax=true) # ,noisy=true)
if rc.isErr:
noisy.say "***", "<", n, "/", lstLen-1, ">",
" item=", u, "/", uMax,
"\n --------",
"\n pre-DB\n ", preState,
"\n --------",
"\n cache\n ", db.pp,
"\n --------"
check rc.error == (VertexID(0),AristoError(0))
return
when true and false:
if uMax < u + tailCheck or (u mod 777) == 3:
noisy.say "***", "step lTab=", db.top.lTab.len
if trigger:
noisy.say "***", "del(8)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
"\n pre-cache\n ", preCache,
"\n -------- delete() -------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
when true: # and false:
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", leafs.len
when true and false:
noisy.say "***", "sample <", n, "/", list.len-1, ">",
" lstLen=", leafs.len
true
# ------------------------------------------------------------------------------

View File

@ -94,6 +94,12 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
# Public helpers
# ------------------------------------------------------------------------------
proc `==`*[T: AristoError|VertexID](a: T, b: int): bool =
a == T(b)
proc `==`*[S,T](a: (S,T), b: (int,int)): bool =
a == (S(b[0]), T(b[1]))
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
## Convert test data into usable in-memory format
let file = sample.file.findFilePath.value

View File

@ -18,8 +18,8 @@ import
unittest2,
../../nimbus/db/aristo/aristo_init/aristo_rocksdb,
../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_get, aristo_hashify, aristo_init,
aristo_hike, aristo_layer, aristo_merge],
aristo_check, aristo_desc, aristo_debug, aristo_get, aristo_hashify,
aristo_init, aristo_hike, aristo_layer, aristo_merge],
./test_helpers
type
@ -76,7 +76,7 @@ proc mergeStepwise(
stopOk = true
let hashesOk = block:
let rc = db.hashifyCheck(relax=true)
let rc = db.checkCache(relax=true)
if rc.isOk:
(VertexID(0),AristoError(0))
else:
@ -196,7 +196,7 @@ proc test_mergeKvpList*(
"\n --------"
block:
let rc = db.hashifyCheck()
let rc = db.checkCache()
if rc.isErr:
noisy.say "*** kvp(4)", "<", n, "/", lstLen-1, "> db dump",
"\n pre-DB\n ", preDb,
@ -211,7 +211,7 @@ proc test_mergeKvpList*(
let rdbHist = block:
let rc = db.save
if rc.isErr:
check rc.error == AristoError(0)
check rc.error == (0,0)
return
rc.value
@ -324,7 +324,7 @@ proc test_mergeProofAndKvpList*(
block:
let
preDb = db.pp(sTabOk=false, lTabOk=false)
preDb = db.pp(xTabOk=false)
rc = db.hashify() # noisy=true)
# Handle known errors
@ -354,7 +354,7 @@ proc test_mergeProofAndKvpList*(
let rdbHist = block:
let rc = db.save
if rc.isErr:
check rc.error == AristoError(0)
check rc.error == (0,0)
return
rc.value

View File

@ -33,40 +33,37 @@ proc fwdWalkLeafsCompleteDB(
let
tLen = tags.len
var
error = AristoError(0)
lty = LeafTie(root: root, path: HashID(tags[0].u256 div 2))
n = 0
while true:
let rc = lty.nearbyRight(db)
let rc = lty.right(db)
#noisy.say "=================== ", n
if rc.isErr:
if rc.error != NearbyBeyondRange:
if rc.error[1] != NearbyBeyondRange:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk error=", rc.error
error = rc.error
check rc.error == AristoError(0)
elif n != tLen:
error = AristoError(1)
check rc.error == (0,0)
return (n,rc.error[1])
if n != tLen:
check n == tLen
return (n,AristoError(1))
break
if tLen <= n:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk -- ",
" oops, too many leafs (index overflow)"
error = AristoError(1)
check n < tlen
break
return (n,AristoError(1))
if rc.value.path != tags[n]:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk -- leafs differ,",
" got=", rc.value.pp(db),
" wanted=", LeafTie(root: root, path: tags[n]).pp(db) #,
# " db-dump\n ", db.pp
error = AristoError(1)
check rc.value.path == tags[n]
break
return (n,AristoError(1))
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
n.inc
(n,error)
(n,AristoError(0))
proc revWalkLeafsCompleteDB(
@ -78,39 +75,36 @@ proc revWalkLeafsCompleteDB(
let
tLen = tags.len
var
error = AristoError(0)
delta = ((high(UInt256) - tags[^1].u256) div 2)
lty = LeafTie(root: root, path: HashID(tags[^1].u256 + delta))
n = tLen-1
while true: # and false:
let rc = lty.nearbyLeft(db)
let rc = lty.left(db)
if rc.isErr:
if rc.error != NearbyBeyondRange:
if rc.error[1] != NearbyBeyondRange:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk error=", rc.error
error = rc.error
check rc.error == AristoError(0)
elif n != -1:
error = AristoError(1)
check rc.error == (0,0)
return (n,rc.error[1])
if n != -1:
check n == -1
return (n,AristoError(1))
break
if n < 0:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk -- ",
" oops, too many leafs (index underflow)"
error = AristoError(1)
check 0 <= n
break
return (n,AristoError(1))
if rc.value.path != tags[n]:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk -- leafs differ,",
" got=", rc.value.pp(db),
" wanted=", tags[n]..pp(db) #, " db-dump\n ", db.pp
error = AristoError(1)
check rc.value.path == tags[n]
break
return (n,AristoError(1))
if low(HashID) < rc.value.path:
lty.path = HashID(rc.value.path.u256 - 1)
n.dec
(tLen-1 - n, error)
(tLen-1 - n, AristoError(0))
# ------------------------------------------------------------------------------
# Public test function