Aristo db update serialisation (#1700)

* Remove unused unit test sources

* Redefine and document serialised data records for Aristo backend

why:
  Unique record types determined by marker byte, i.e. the last byte of a
  serialisation record. This just needed some tweaking after adding new
  record types.
This commit is contained in:
Jordan Hrycaj 2023-08-21 19:18:06 +01:00 committed by GitHub
parent 445fa75251
commit b9a4fd3137
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 143 additions and 1172 deletions

View File

@ -214,11 +214,11 @@ and implemented as 64 bit values, stored *Big Endian* in the serialisation.
+--+--+
| | -- access(16) bitmap
+--+--+
|| | -- marker(2) + unused(6)
| | -- marker(8), 0x08
+--+
where
marker(2) is the double bit array 00
marker(8) is the eight bit array *0000-1000*
For a given index *n* between *0..15*, if the bit at position *n* of the bit
vector *access(16)* is reset to zero, then there is no *n*-th structural
@ -281,7 +281,7 @@ data, for RLP encoded or for unstructured data as defined below.
+-- .. --+--+
| | -- code hash, 0, 8 or 32 bytes
+--+ .. --+--+
| | -- bitmask(2)-word array
| | -- 4 x bitmask(2), word array
+--+
where each bitmask(2)-word array entry defines the length of
@ -290,30 +290,32 @@ data, for RLP encoded or for unstructured data as defined below.
01 -- field lengthh is 8 bytes
10 -- field lengthh is 32 bytes
Apparently, entries 0 and and 2 of the bitmask(2) word array cannot have the
value 10 as they refer to the nonce and the storage ID data fields. So, joining
the bitmask(2)-word array to a single byte, the maximum value of that byte is
0x99.
Apparently, entries 0 and and 2 of the *4 x bitmask(2)* word array cannot have
the two bit value *10* as they refer to the nonce and the storage ID data
fields. So, joining the *4 x bitmask(2)* word array to a single byte, the
maximum value of that byte is 0x99.
### 4.5 Leaf record payload serialisation for RLP encoded data
0 +--+ .. --+
| | | -- data, at least one byte
+--+ .. --+
| | -- marker byte
| | -- marker(8), 0x6a
+--+
where the marker byte is 0xaa
where
marker(8) is the eight bit array *0110-1010*
### 4.6 Leaf record payload serialisation for unstructured data
0 +--+ .. --+
| | | -- data, at least one byte
+--+ .. --+
| | -- marker byte
| | -- marker(8), 0x6b
+--+
where the marker byte is 0xff
where
marker(8) is the eight bit array *0110-1011*
### 4.7 Serialisation of the list of unused vertex IDs
@ -322,11 +324,11 @@ the bitmask(2)-word array to a single byte, the maximum value of that byte is
+--+--+--+--+--+--+--+--+
| | -- last unused vertex IDs
+--+--+--+--+--+--+--+--+
|| | -- marker(2) + unused(6)
| | -- marker(8), 0x7c
+--+
where
marker(2) is the double bit array 01
marker(8) is the eight bit array *0111-1100*
The vertex IDs in this record must all be non-zero. The last entry in the list
indicates that all ID values greater or equal than this value are free and can
@ -334,7 +336,7 @@ be used as vertex IDs. If this record is missing, the value *(1u64,0x01)* is
assumed, i.e. the list with the single vertex ID *1*.
### 4.7 Backend filter record serialisation
### 4.8 Backend filter record serialisation
0 +--+--+--+--+--+ .. --+--+ .. --+
| | -- 32 bytes filter source hash
@ -360,6 +362,9 @@ assumed, i.e. the list with the single vertex ID *1*.
|| | -- flg(3) + vtxLen(29), 2nd triplet
+--+--+--+--+
...
+--+
| | -- marker(8), 0x7d
+--+
where
+ minimum size of an empty filer is 72 bytes
@ -380,6 +385,42 @@ assumed, i.e. the list with the single vertex ID *1*.
+ the vtxLen(29) is the number of bytes of the optional vertex record
which has maximum size 2^29-1 which is short of 512 MiB
+ the marker(8) is the eight bit array *0111-1101*
### 4.9 Serialisation of a list of filter IDs
0 +-- ..
... -- some filter ID
+--+--+--+--+--+--+--+--+
| | -- last filter IDs
+--+--+--+--+--+--+--+--+
| | -- marker(8), 0x7e
+--+
where
marker(8) is the eight bit array *0111-1110*
This list is used to control the filters on the database. By holding some IDs
in a dedicated list (e.g. the latest filters) one can quickly access particular
entries without searching through the set of filters.
### 4.10 Serialisation record identifier identification
Any of the above records can uniquely be identified by its trailing marker,
i.e. the last byte of a serialised record.
|** Bit mask**| **Hex value** | **Record type** |**Chapter reference**|
|:-----------:|:----------------:|:--------------------:|:-------------------:|
| 0000 1000 | 0x08 | Branch record | 4.1 |
| 10xx xxxx | 0x80 + x(6) | Extension record | 4.2 |
| 11xx xxxx | 0xC0 + x(6) | Leaf record | 4.3 |
| 0xxx 0yyy | (x(3)<<4) + y(3) | account payload | 4.4 |
| 0110 1010 | 0x6a | RLP encoded payload | 4.5 |
| 0110 1011 | 0x6b | unstructured payload | 4.6 |
| 0111 1100 | 0x7c | list of vertex IDs | 4.7 |
| 0111 1101 | 0x7d | Filter record | 4.8 |
| 0111 1110 | 0x7e | list of vertex IDs | 4.9 |
5. *Patricia Trie* implementation notes
---------------------------------------

View File

@ -96,6 +96,9 @@ func isValid*(nd: NodeRef): bool =
func isValid*(pld: PayloadRef): bool =
pld != PayloadRef(nil)
func isValid*(filter: FilterRef): bool =
filter != FilterRef(nil)
func isValid*(key: HashKey): bool =
key != VOID_HASH_KEY
@ -105,8 +108,8 @@ func isValid*(lbl: HashLabel): bool =
func isValid*(vid: VertexID): bool =
vid != VertexID(0)
func isValid*(filter: FilterRef): bool =
filter != FilterRef(nil)
func isValid*(fid: FilterID): bool =
fid != FilterID(0)
# ------------------------------------------------------------------------------
# Public functions, miscellaneous

View File

@ -186,9 +186,9 @@ type
GetLeafNotFound
GetVtxNotFound
GetKeyNotFound
GetFilNotFound
GetIdgNotFound
GetLogNotFound
GetEpoNotFound
GetFasNotFound
# RocksDB backend
RdbBeCantCreateDataDir

View File

@ -23,6 +23,9 @@ type
ByteArray32* = array[32,byte]
## Used for 32 byte hash components repurposed as Merkle hash labels.
FilterID* = distinct uint64
## Identifier used to tag filter logs stored on the backend.
VertexID* = distinct uint64
## Unique identifier for a vertex of the `Aristo Trie`. The vertex is the
## prefix tree (aka `Patricia Trie`) component. When augmented by hash
@ -89,6 +92,13 @@ proc `+`*(a: VertexID; b: uint64): VertexID = (a.uint64+b).VertexID
proc `-`*(a: VertexID; b: uint64): VertexID = (a.uint64-b).VertexID
proc `-`*(a, b: VertexID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------
# Public helpers: `FilterID` scalar data model
# ------------------------------------------------------------------------------
func `==`*(a, b: FilterID): bool {.borrow.}
func `$`*(a: FilterID): string {.borrow.}
# ------------------------------------------------------------------------------
# Public helpers: `HashID` scalar data model
# ------------------------------------------------------------------------------

View File

@ -168,9 +168,9 @@ proc blobify*(pyl: PayloadRef): Blob =
return
case pyl.pType
of RawData:
result = pyl.rawBlob & @[0xff.byte]
result = pyl.rawBlob & @[0x6b.byte]
of RlpData:
result = pyl.rlpBlob & @[0xaa.byte]
result = pyl.rlpBlob & @[0x6a.byte]
of AccountData:
var mask: byte
@ -203,17 +203,17 @@ proc blobify*(vtx: VertexRef; data: var Blob): AristoError =
## Branch:
## uint64, ... -- list of up to 16 child vertices lookup keys
## uint16 -- index bitmap
## 0x00 -- marker(2) + unused(2)
## 0x08 -- marker(8)
##
## Extension:
## uint64 -- child vertex lookup key
## Blob -- hex encoded partial path (at least one byte)
## 0x80 -- marker(2) + unused(2)
## 0x80 + xx -- marker(2) + pathSegmentLen(6)
##
## Leaf:
## Blob -- opaque leaf data payload (might be zero length)
## Blob -- hex encoded partial path (at least one byte)
## 0xc0 -- marker(2) + partialPathLen(6)
## 0xc0 + yy -- marker(2) + partialPathLen(6)
##
## For a branch record, the bytes of the `access` array indicate the position
## of the Patricia Trie vertex reference. So the `vertexID` with index `n` has
@ -233,7 +233,7 @@ proc blobify*(vtx: VertexRef; data: var Blob): AristoError =
refs &= vtx.bVid[n].uint64.toBytesBE.toSeq
if refs.len < 16:
return BlobifyBranchMissingRefs
data = refs & access.toBytesBE.toSeq & @[0u8]
data = refs & access.toBytesBE.toSeq & @[0x08u8]
of Extension:
let
pSegm = vtx.ePfx.hexPrefixEncode(isleaf = false)
@ -262,42 +262,39 @@ proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
ok(data)
proc blobify*(vGen: openArray[VertexID]; data: var Blob) =
## This function serialises the vertex ID generator state used in the
## `AristoDbRef` descriptor.
##
## This data record is supposed to be as in a dedicated slot in the
## persistent tables.
## This function serialises a list of vertex IDs.
## ::
## Admin:
## uint64, ... -- list of IDs
## 0x40
## uint64, ... -- list of IDs
## 0x7c -- marker(8)
##
data.setLen(0)
for w in vGen:
data &= w.uint64.toBytesBE.toSeq
data.add 0x40u8
data.add 0x7Cu8
proc blobify*(vGen: openArray[VertexID]): Blob =
## Variant of `blobify()`
vGen.blobify result
proc blobify*(filter: FilterRef; data: var Blob): AristoError =
## This function serialises an Aristo DB filter object
## ::
## Filter encoding:
## Uint256 -- source key
## Uint256 -- target key
## uint32 -- number of vertex IDs (vertex ID generator state)
## uint32 -- number of (id,key,vertex) triplets
## Uint256 -- source key
## Uint256 -- target key
## uint32 -- number of vertex IDs (vertex ID generator state)
## uint32 -- number of (id,key,vertex) triplets
##
## uint64, ... -- list of vertex IDs (vertex ID generator state)
## uint64, ... -- list of vertex IDs (vertex ID generator state)
##
## uint32 -- flag(3) + vtxLen(29), first triplet
## uint64 -- vertex ID
## Uint256 -- optional key
## Blob -- optional vertex
## uint32 -- flag(3) + vtxLen(29), first triplet
## uint64 -- vertex ID
## Uint256 -- optional key
## Blob -- optional vertex
##
## ... -- more triplets
## 0x7d -- marker(8)
##
## ... -- more triplets
##
data.setLen(0)
data &= filter.src.ByteArray32.toSeq
@ -370,6 +367,7 @@ proc blobify*(filter: FilterRef; data: var Blob): AristoError =
keyBlob
data[68 ..< 72] = n.uint32.toBytesBE.toSeq
data.add 0x7Du8
proc blobify*(filter: FilterRef): Result[Blob, AristoError] =
## ...
@ -379,6 +377,22 @@ proc blobify*(filter: FilterRef): Result[Blob, AristoError] =
return err(error)
ok data
proc blobify*(vFos: openArray[FilterID]; data: var Blob) =
## This function serialises a list of filter IDs.
## ::
## uint64, ... -- list of IDs
## 0x7e -- marker(8)
##
data.setLen(0)
for w in vFos:
data &= w.uint64.toBytesBE.toSeq
data.add 0x7Eu8
proc blobify*(vFos: openArray[FilterID]): Blob =
## Variant of `blobify()`
vFos.blobify result
# -------------
proc deblobify(data: Blob; pyl: var PayloadRef): AristoError =
@ -387,10 +401,10 @@ proc deblobify(data: Blob; pyl: var PayloadRef): AristoError =
return
let mask = data[^1]
if mask == 0xff:
if mask == 0x6b: # unstructured payload
pyl = PayloadRef(pType: RawData, rawBlob: data[0 .. ^2])
return
if mask == 0xaa:
if mask == 0x6a: # RLP encoded payload
pyl = PayloadRef(pType: RlpData, rlpBlob: data[0 .. ^2])
return
var
@ -455,6 +469,8 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
case record[^1] shr 6:
of 0: # `Branch` vertex
if record[^1] != 0x08u8:
return DeblobUnknown
if record.len < 19: # at least two edges
return DeblobBranchTooShort
if (record.len mod 8) != 3:
@ -516,7 +532,6 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
else:
return DeblobUnknown
proc deblobify*(data: Blob; T: type VertexRef): Result[T,AristoError] =
## Variant of `deblobify()` for vertex deserialisation.
var vtx = T(nil) # will be auto-initialised
@ -525,6 +540,7 @@ proc deblobify*(data: Blob; T: type VertexRef): Result[T,AristoError] =
return err(info)
ok vtx
proc deblobify*(data: Blob; vGen: var seq[VertexID]): AristoError =
## De-serialise the data record encoded with `blobify()` into the vertex ID
## generator argument `vGen`.
@ -533,7 +549,7 @@ proc deblobify*(data: Blob; vGen: var seq[VertexID]): AristoError =
else:
if (data.len mod 8) != 1:
return DeblobSizeGarbled
if data[^1] shr 6 != 1:
if data[^1] != 0x7c:
return DeblobWrongType
for n in 0 ..< (data.len div 8):
let w = n * 8
@ -552,6 +568,8 @@ proc deblobify*(data: Blob; filter: var FilterRef): AristoError =
## De-serialise an Aristo DB filter object
if data.len < 72: # minumum length 72 for an empty filter
return DeblobFilterTooShort
if data[^1] != 0x7d:
return DeblobWrongType
let f = FilterRef()
(addr f.src.ByteArray32[0]).copyMem(unsafeAddr data[0], 32)
@ -604,7 +622,7 @@ proc deblobify*(data: Blob; filter: var FilterRef): AristoError =
elif (flag mod 3) == 1: # {0,1,2} x {1}
f.sTab[vid] = VertexRef(nil)
if data.len != offs:
if data.len != offs + 1:
return DeblobFilterSizeGarbled
filter = f
@ -617,6 +635,28 @@ proc deblobify*(data: Blob; T: type FilterRef): Result[T,AristoError] =
return err(error)
ok filter
proc deblobify*(data: Blob; vFas: var seq[FilterID]): AristoError =
## De-serialise the data record encoded with `blobify()` into a filter ID
## argument liet `vFas`.
if data.len == 0:
vFas = @[]
else:
if (data.len mod 8) != 1:
return DeblobSizeGarbled
if data[^1] != 0x7e:
return DeblobWrongType
for n in 0 ..< (data.len div 8):
let w = n * 8
vFas.add (uint64.fromBytesBE data[w ..< w + 8]).FilterID
proc deblobify*(data: Blob; T: type seq[FilterID]): Result[T,AristoError] =
## Variant of `deblobify()` for deserialising the vertex ID generator state
var vFas: seq[FilterID]
let info = data.deblobify vFas
if info != AristoError(0):
return err(info)
ok vFas
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,197 +0,0 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
## Parked here, currently uded only for trancode tests
import
std/tables,
eth/common,
stew/results,
../../nimbus/db/aristo/[aristo_desc, aristo_transcode, aristo_vid]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc convertPartially(
db: AristoDbRef;
vtx: VertexRef;
nd: var NodeRef;
): seq[VertexID] =
## Returns true if completely converted by looking up the cached hashes.
## This function does not recurse. It will return the vertex IDs that are
## are missing in order to convert in a single step.
case vtx.vType:
of Leaf:
nd = NodeRef(
vType: Leaf,
lPfx: vtx.lPfx,
lData: vtx.lData)
if vtx.lData.pType != AccountData:
return
let vid = vtx.lData.account.storageID
if vid.isValid:
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
nd.key[0] = lbl.key
return
result.add vid
of Extension:
nd = NodeRef(
vType: Extension,
ePfx: vtx.ePfx,
eVid: vtx.eVid)
let lbl = db.top.kMap.getOrVoid vtx.eVid
if lbl.isValid:
nd.key[0] = lbl.key
return
result.add vtx.eVid
of Branch:
nd = NodeRef(
vType: Branch,
bVid: vtx.bVid)
for n in 0..15:
if vtx.bVid[n].isValid:
let lbl = db.top.kMap.getOrVoid vtx.bVid[n]
if lbl.isValid:
nd.key[n] = lbl.key
continue
result.add vtx.bVid[n]
proc convertPartiallyOk(
db: AristoDbRef;
vtx: VertexRef;
nd: var NodeRef;
): bool =
## Variant of `convertPartially()`, shortcut for `convertPartially().le==0`.
case vtx.vType:
of Leaf:
nd = NodeRef(
vType: Leaf,
lPfx: vtx.lPfx,
lData: vtx.lData)
if vtx.lData.pType != AccountData:
result = true
else:
let vid = vtx.lData.account.storageID
if vid.isValid:
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
nd.key[0] = lbl.key
result = true
of Extension:
nd = NodeRef(
vType: Extension,
ePfx: vtx.ePfx,
eVid: vtx.eVid)
let lbl = db.top.kMap.getOrVoid vtx.eVid
if lbl.isValid:
nd.key[0] = lbl.key
result = true
of Branch:
nd = NodeRef(
vType: Branch,
bVid: vtx.bVid)
result = true
for n in 0..15:
if vtx.bVid[n].isValid:
let lbl = db.top.kMap.getOrVoid vtx.bVid[n]
if lbl.isValid:
nd.key[n] = lbl.key
continue
return false
proc cachedVID(db: AristoDbRef; lbl: HashLabel): VertexID =
## Get vertex ID from reverse cache
result = db.top.pAmk.getOrVoid lbl
if not result.isValid:
result = db.vidAttach lbl
# ------------------------------------------------------------------------------
# Public functions for `VertexID` => `HashKey` mapping
# ------------------------------------------------------------------------------
proc pal*(db: AristoDbRef; rootID: VertexID; vid: VertexID): HashKey =
## Retrieve the cached `Merkel` hash (aka `HashKey` object) associated with
## the argument `VertexID` type argument `vid`. Return a zero `HashKey` if
## there is none.
##
## If the vertex ID `vid` is not found in the cache, then the structural
## table is checked whether the cache can be updated.
if not db.top.isNil:
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
return lbl.key
let vtx = db.top.sTab.getOrVoid vid
if vtx.isValid:
var node: NodeRef
if db.convertPartiallyOk(vtx,node):
var w = initRlpWriter()
w.append node
result = w.finish.keccakHash.data.HashKey
db.top.kMap[vid] = HashLabel(root: rootID, key: result)
# ------------------------------------------------------------------------------
# Public funcions extending/completing vertex records
# ------------------------------------------------------------------------------
proc updated*(nd: NodeRef; root: VertexID; db: AristoDbRef): NodeRef =
## Return a copy of the argument node `nd` with updated missing vertex IDs.
##
## For a `Leaf` node, the payload data `PayloadRef` type reference is *not*
## duplicated and returned as-is.
##
## This function will not complain if all `Merkel` hashes (aka `HashKey`
## objects) are zero for either `Extension` or `Leaf` nodes.
if nd.isValid:
case nd.vType:
of Leaf:
result = NodeRef(
vType: Leaf,
lPfx: nd.lPfx,
lData: nd.lData)
of Extension:
result = NodeRef(
vType: Extension,
ePfx: nd.ePfx)
if nd.key[0].isValid:
result.eVid = db.cachedVID HashLabel(root: root, key: nd.key[0])
result.key[0] = nd.key[0]
of Branch:
result = NodeRef(
vType: Branch,
key: nd.key)
for n in 0..15:
if nd.key[n].isValid:
result.bVid[n] = db.cachedVID HashLabel(root: root, key: nd.key[n])
proc asNode*(vtx: VertexRef; db: AristoDbRef): NodeRef =
## Return a `NodeRef` object by augmenting missing `Merkel` hashes (aka
## `HashKey` objects) from the cache or from calculated cached vertex
## entries, if available.
##
## If not all `Merkel` hashes are available in a single lookup, then the
## result object is a wrapper around an error code.
if not db.convertPartiallyOk(vtx, result):
return NodeRef(error: CacheMissingNodekeys)
proc asNode*(rc: Result[VertexRef,AristoError]; db: AristoDbRef): NodeRef =
## Variant of `asNode()`.
if rc.isErr:
return NodeRef(error: rc.error)
rc.value.asNode(db)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,382 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Aristo (aka Patricia) DB records merge test
import
std/[algorithm, bitops, sequtils, strutils, sets],
eth/common,
stew/results,
unittest2,
../../nimbus/db/aristo/[
aristo_check, aristo_desc, aristo_debug, aristo_delete, aristo_get,
aristo_hashify, aristo_hike, aristo_init, aristo_layer, aristo_nearby,
aristo_merge],
./test_helpers
type
TesterDesc = object
prng: uint32 ## random state
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] =
lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafTie): int = cmp(a,b))
proc pp(q: HashSet[LeafTie]): string =
"{" & q.toSeq.mapIt(it.pp).join(",") & "}"
# --------------
proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
state = state * 1103515245 + 12345;
let val = (state shr 16) and 32767 # mod 2^31
(val shr 8).byte # Extract second byte
proc rand[W: SomeInteger|VertexID](ap: var TesterDesc; T: type W): T =
var a: array[sizeof T,byte]
for n in 0 ..< sizeof T:
a[n] = ap.prng.posixPrngRand().byte
when sizeof(T) == 1:
let w = uint8.fromBytesBE(a).T
when sizeof(T) == 2:
let w = uint16.fromBytesBE(a).T
when sizeof(T) == 4:
let w = uint32.fromBytesBE(a).T
else:
let w = uint64.fromBytesBE(a).T
when T is SomeUnsignedInt:
# That way, `fromBytesBE()` can be applied to `uint`
result = w
else:
# That way the result is independent of endianness
(addr result).copyMem(unsafeAddr w, sizeof w)
proc init(T: type TesterDesc; seed: int): TesterDesc =
result.prng = (seed and 0x7fffffff).uint32
proc rand(td: var TesterDesc; top: int): int =
if 0 < top:
let mask = (1 shl (8 * sizeof(int) - top.countLeadingZeroBits)) - 1
for _ in 0 ..< 100:
let w = mask and td.rand(typeof(result))
if w < top:
return w
raiseAssert "Not here (!)"
# -----------------------
proc randomisedLeafs(db: AristoDbRef; td: var TesterDesc): seq[LeafTie] =
result = db.top.lTab.sortedKeys
if 2 < result.len:
for n in 0 ..< result.len-1:
let r = n + td.rand(result.len - n)
result[n].swap result[r]
proc saveToBackend(
db: AristoDbRef;
relax: bool;
noisy: bool;
debugID: int;
): bool =
let
trigger = false # or (debugID == 340)
prePreCache = db.pp
prePreBe = db.to(TypedBackendRef).pp(db)
if trigger:
noisy.say "***", "saveToBackend =========================== ", debugID
block:
let rc = db.checkCache(relax=true)
if rc.isErr:
noisy.say "***", "saveToBackend (1) hashifyCheck",
" debugID=", debugID,
" error=", rc.error,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
block:
let rc = db.hashify # (noisy = trigger)
if rc.isErr:
noisy.say "***", "saveToBackend (2) hashify",
" debugID=", debugID,
" error=", rc.error,
"\n pre-cache\n ", prePreCache,
"\n pre-be\n ", prePreBe,
"\n -------- hasify() -----",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
let
preCache = db.pp
preBe = db.to(TypedBackendRef).pp(db)
block:
let rc = db.checkBE(relax=true)
if rc.isErr:
let noisy = true
noisy.say "***", "saveToBackend (3) checkBE",
" debugID=", debugID,
" error=", rc.error,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
block:
let rc = db.save()
if rc.isErr:
check rc.error == (0,0)
return
block:
let rc = db.checkBE(relax=relax)
if rc.isErr:
let noisy = true
noisy.say "***", "saveToBackend (4) checkBE",
" debugID=", debugID,
" error=", rc.error,
"\n prePre-cache\n ", prePreCache,
"\n prePre-be\n ", prePreBe,
"\n -------- hashify() -----",
"\n pre-cache\n ", preCache,
"\n pre-be\n ", preBe,
"\n -------- save() --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check rc.error == (0,0)
return
when true and false:
if trigger:
noisy.say "***", "saveToBackend (9)",
" debugID=", debugID,
"\n prePre-cache\n ", prePreCache,
"\n prePre-be\n ", prePreBe,
"\n -------- hashify() -----",
"\n pre-cache\n ", preCache,
"\n pre-be\n ", preBe,
"\n -------- save() --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
true
proc fwdWalkVerify(
db: AristoDbRef;
root: VertexID;
left: HashSet[LeafTie];
noisy: bool;
debugID: int;
): tuple[visited: int, error: AristoError] =
let
nLeafs = left.len
var
lfLeft = left
lty = LeafTie(root: root)
n = 0
while n < nLeafs + 1:
let id = n + (nLeafs + 1) * debugID
noisy.say "NearbyBeyondRange =================== ", id
let rc = lty.right db
if rc.isErr:
if rc.error[1] != NearbyBeyondRange or 0 < lfLeft.len:
noisy.say "***", "fwdWalkVerify (1) nearbyRight",
" n=", n, "/", nLeafs,
" lty=", lty.pp(db),
" error=", rc.error
check rc.error == (0,0)
return (n,rc.error[1])
return (0, AristoError(0))
if rc.value notin lfLeft:
noisy.say "***", "fwdWalkVerify (2) lfLeft",
" n=", n, "/", nLeafs,
" lty=", lty.pp(db)
check rc.error == (0,0)
return (n,rc.error[1])
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
lfLeft.excl rc.value
n.inc
noisy.say "***", "fwdWalkVerify (9) oops",
" n=", n, "/", nLeafs,
" lfLeft=", lfLeft.pp
check n <= nLeafs
(-1, AristoError(1))
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc testDelete*(
noisy: bool;
list: openArray[ProofTrieData];
rdbPath: string; # Rocks DB storage directory
): bool =
var
td = TesterDesc.init 42
db = AristoDbRef()
defer:
db.finish(flush=true)
for n,w in list:
# Start with new database
db.finish(flush=true)
db = block:
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
if rc.isErr:
check rc.error == 0
return
rc.value
# Merge leaf data into main trie (w/vertex ID 1)
let
leafs = w.kvpLst.mapRootVid VertexID(1)
added = db.merge leafs
if added.error != 0:
check added.error == 0
return
# Provide a (reproducible) peudo-random copy of the leafs list
let leafTies = db.randomisedLeafs td
var leafsLeft = leafs.mapIt(it.leafTie).toHashSet
# Complete as `Merkle Patricia Tree` and save to backend, clears cache
block:
let saveBeOk = db.saveToBackend(relax=true, noisy=false, 0)
if not saveBeOk:
check saveBeOk
return
# Trigger subsequent saving tasks in loop below
let (saveMod, saveRest, relax) = block:
if leafTies.len < 17: (7, 3, false)
elif leafTies.len < 31: (11, 7, false)
else: (leafTies.len div 5, 11, true)
# Loop over leaf ties
for u,leafTie in leafTies:
# Get leaf vertex ID so making sure that it is on the database
let
runID = n + list.len * u
doSaveBeOk = ((u mod saveMod) == saveRest) # or true
trigger = false # or runID in {60,80}
tailWalkVerify = 20 # + 999
leafVid = block:
let hike = leafTie.hikeUp(db)
if hike.error != 0: # Ooops
check hike.error == 0
return
hike.legs[^1].wp.vid
if doSaveBeOk:
when true and false:
noisy.say "***", "del(1)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
" relax=", relax,
" leafVid=", leafVid.pp
let saveBeOk = db.saveToBackend(relax=relax, noisy=noisy, runID)
if not saveBeOk:
noisy.say "***", "del(2)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" leafVid=", leafVid.pp
check saveBeOk
return
# Delete leaf
let
preCache = db.pp
rc = db.delete leafTie
if rc.isErr:
check rc.error == (0,0)
return
# Update list of remaininf leafs
leafsLeft.excl leafTie
let leafVtx = db.getVtx leafVid
if leafVtx.isValid:
noisy.say "***", "del(3)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
" root=", leafTie.root.pp,
" leafVid=", leafVid.pp,
"\n --------",
"\n pre-cache\n ", preCache,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check leafVtx.isValid == false
return
# Walking the database is too slow for large tables. So the hope is that
# potential errors will not go away and rather pop up later, as well.
if leafsLeft.len <= tailWalkVerify:
if u < leafTies.len-1:
let
noisy = false
vfy = db.fwdWalkVerify(leafTie.root, leafsLeft, noisy, runID)
if vfy.error != AristoError(0): # or 7 <= u:
noisy.say "***", "del(5)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
" root=", leafTie.root.pp,
" leafVid=", leafVid.pp,
"\n leafVtx=", leafVtx.pp(db),
"\n --------",
"\n pre-cache\n ", preCache,
"\n -------- delete() -------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
check vfy == (0,0)
return
when true and false:
if trigger:
noisy.say "***", "del(8)",
" n=", n, "/", list.len,
" u=", u, "/", leafTies.len,
" runID=", runID,
"\n pre-cache\n ", preCache,
"\n -------- delete() -------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(TypedBackendRef).pp(db),
"\n --------"
when true and false:
noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", leafs.len
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,373 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Aristo (aka Patricia) DB records merge test
import
std/tables,
eth/common,
stew/[byteutils, results],
unittest2,
../../nimbus/db/aristo/aristo_init/aristo_rocksdb,
../../nimbus/db/aristo/[
aristo_check, aristo_desc, aristo_debug, aristo_get, aristo_hashify,
aristo_init, aristo_hike, aristo_layer, aristo_merge],
./test_helpers
type
KnownHasherFailure* = seq[(string,(int,AristoError))]
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc pp(w: tuple[merged: int, dups: int, error: AristoError]): string =
result = "(merged: " & $w.merged & ", dups: " & $w.dups
if w.error != AristoError(0):
result &= ", error: " & $w.error
result &= ")"
proc mergeStepwise(
db: AristoDbRef;
leafs: openArray[LeafTiePayload];
noisy = false;
): tuple[merged: int, dups: int, error: AristoError] =
let
lTabLen = db.top.lTab.len
var
(merged, dups, error) = (0, 0, AristoError(0))
for n,leaf in leafs:
var
event = false
dumpOk = false or event
stopOk = false
when true: # and false:
noisy.say "***", "step <", n, "/", leafs.len-1, "> leaf=", leaf.pp(db)
let
preState = db.pp
hike = db.merge leaf
ekih = leaf.leafTie.hikeUp(db)
case hike.error:
of AristoError(0):
merged.inc
of MergeLeafPathCachedAlready:
dups.inc
else:
error = hike.error
dumpOk = true
stopOk = true
if ekih.error != AristoError(0) or
ekih.legs[^1].wp.vtx.lData.blob != leaf.payload.blob:
dumpOk = true
stopOk = true
let hashesOk = block:
let rc = db.checkCache(relax=true)
if rc.isOk:
(VertexID(0),AristoError(0))
else:
dumpOk = true
stopOk = true
if error == AristoError(0):
error = rc.error[1]
rc.error
if db.top.lTab.len < lTabLen + merged:
dumpOk = true
if dumpOk:
noisy.say "***", "<", n, "/", leafs.len-1, ">",
" merged=", merged,
" dups=", dups,
" leaf=", leaf.pp(db),
"\n --------",
"\n hike\n ", hike.pp(db),
"\n ekih\n ", ekih.pp(db),
"\n pre-DB\n ", preState,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
check hike.error in {AristoError(0), MergeLeafPathCachedAlready}
check ekih.error == AristoError(0)
check hashesOk == (VertexID(0),AristoError(0))
if ekih.legs.len == 0:
check 0 < ekih.legs.len
elif ekih.legs[^1].wp.vtx.vType != Leaf:
check ekih.legs[^1].wp.vtx.vType == Leaf
elif hike.error != MergeLeafPathCachedAlready:
check ekih.legs[^1].wp.vtx.lData.blob.toHex == leaf.payload.blob.toHex
if db.top.lTab.len < lTabLen + merged:
check lTabLen + merged <= db.top.lTab.len
error = GenericError
stopOk = true # makes no sense to go on
if stopOk:
noisy.say "***", "<", n, "/", leafs.len-1, "> stop"
break
(merged,dups,error)
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_mergeKvpList*(
noisy: bool;
list: openArray[ProofTrieData];
rdbPath: string; # Rocks DB storage directory
resetDb = false;
): bool =
var
db = AristoDbRef()
defer:
db.finish(flush=true)
for n,w in list:
if resetDb or db.top.isNil:
db.finish(flush=true)
db = block:
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
if rc.isErr:
check rc.error == AristoError(0)
return
rc.value
let
lstLen = list.len
lTabLen = db.top.lTab.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
when true and false:
if true and 40 <= n:
noisy.say "*** kvp(1)", "<", n, "/", lstLen-1, ">",
" nLeafs=", leafs.len,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
let
added = db.merge leafs
#added = db.mergeStepwise(leafs) #, noisy=40 <= n)
if added.error != AristoError(0):
check added.error == AristoError(0)
return
# There might be an extra leaf in the cache after inserting a Branch
# which forks a previous leaf node and a new one.
check lTabLen + added.merged <= db.top.lTab.len
check added.merged + added.dups == leafs.len
let
preDb = db.pp
block:
let rc = db.hashify # (noisy=(0 < n))
if rc.isErr: # or true:
noisy.say "*** kvp(2)", "<", n, "/", lstLen-1, ">",
" added=", added,
"\n pre-DB\n ", preDb,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
if rc.isErr:
check rc.error == (VertexID(0),AristoError(0)) # force message
return
when true and false:
noisy.say "*** kvp(3)", "<", n, "/", lstLen-1, ">",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
block:
let rc = db.checkCache()
if rc.isErr:
noisy.say "*** kvp(4)", "<", n, "/", lstLen-1, "> db dump",
"\n pre-DB\n ", preDb,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
if rc.isErr:
check rc == Result[void,(VertexID,AristoError)].ok()
return
block:
let rc = db.save
if rc.isErr:
check rc.error == (0,0)
return
when true and false:
noisy.say "*** kvp(5)", "<", n, "/", lstLen-1, ">",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
when true and false:
noisy.say "*** kvp(9)", "sample ", n, "/", lstLen-1,
" merged=", added.merged,
" dup=", added.dups
true
proc test_mergeProofAndKvpList*(
noisy: bool;
list: openArray[ProofTrieData];
rdbPath: string; # Rocks DB storage directory
resetDb = false;
idPfx = "";
oops: KnownHasherFailure = @[];
): bool =
let
oopsTab = oops.toTable
var
db = AristoDbRef()
rootKey = HashKey.default
count = 0
defer:
db.finish(flush=true)
for n,w in list:
if resetDb or w.root != rootKey or w.proof.len == 0:
db.finish(flush=true)
db = block:
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
if rc.isErr:
check rc.error == 0
return
rc.value
rootKey = w.root
count = 0
count.inc
let
testId = idPfx & "#" & $w.id & "." & $n
lstLen = list.len
sTabLen = db.top.sTab.len
lTabLen = db.top.lTab.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
when true and false:
noisy.say "***", "proofs(1) <", n, "/", lstLen-1, ">",
" groups=", count, " nLeafs=", leafs.len,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
var
proved: tuple[merged: int, dups: int, error: AristoError]
preDb: string
if 0 < w.proof.len:
let rc = db.merge(rootKey, VertexID(1))
if rc.isErr:
check rc.error == 0
return
preDb = db.pp
proved = db.merge(w.proof, rc.value) # , noisy)
check proved.error in {AristoError(0),MergeHashKeyCachedAlready}
check w.proof.len == proved.merged + proved.dups
check db.top.lTab.len == lTabLen
check db.top.sTab.len <= proved.merged + sTabLen
check proved.merged < db.top.pAmk.len
when true and false:
if 0 < w.proof.len:
noisy.say "***", "proofs(2) <", n, "/", lstLen-1, ">",
" groups=", count,
" nLeafs=", leafs.len,
" proved=", proved,
"\n pre-DB\n ", preDb,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
return
let
merged = db.merge leafs
#merged = db.mergeStepwise(leafs, noisy=false)
check db.top.lTab.len == lTabLen + merged.merged
check merged.merged + merged.dups == leafs.len
block:
if merged.error notin {AristoError(0), MergeLeafPathCachedAlready}:
noisy.say "***", "<", n, "/", lstLen-1, ">\n ", db.pp
check merged.error in {AristoError(0), MergeLeafPathCachedAlready}
return
when true and false:
noisy.say "***", "proofs(3) <", n, "/", lstLen-1, ">",
" groups=", count, " nLeafs=", leafs.len, " merged=", merged,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
block:
let
preDb = db.pp(xTabOk=false)
rc = db.hashify() # noisy=true)
# Handle known errors
if oopsTab.hasKey testId:
if rc.isOK:
check rc.isErr
return
let oops = (VertexID(oopsTab[testId][0]), oopsTab[testId][1])
if oops != rc.error:
check oops == rc.error
return
# Otherwise, check for correctness
elif rc.isErr:
noisy.say "***", "proofs(4) <", n, "/", lstLen-1, ">",
" testId=", testId,
" groups=", count,
"\n pre-DB",
"\n ", preDb,
"\n --------",
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
check rc.error == (VertexID(0),AristoError(0))
return
block:
let rc = db.save
if rc.isErr:
check rc.error == (0,0)
return
when true and false:
noisy.say "***", "proofs(5) <", n, "/", lstLen-1, ">",
" groups=", count,
"\n cache\n ", db.pp,
"\n backend\n ", db.to(RdbBackendRef).pp(db),
"\n --------"
when true and false:
noisy.say "***", "proofs(6) <", n, "/", lstLen-1, ">",
" groups=", count, " proved=", proved.pp, " merged=", merged.pp
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,171 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Aristo (aka Patricia) DB records merge test
import
std/[algorithm, sequtils, sets],
eth/common,
stew/results,
unittest2,
../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_merge, aristo_nearby],
./test_helpers
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc fwdWalkLeafsCompleteDB(
db: AristoDbRef;
root: VertexID;
tags: openArray[HashID];
noisy: bool;
): tuple[visited: int, error: AristoError] =
let
tLen = tags.len
var
lty = LeafTie(root: root, path: HashID(tags[0].u256 div 2))
n = 0
while true:
let rc = lty.right(db)
#noisy.say "=================== ", n
if rc.isErr:
if rc.error[1] != NearbyBeyondRange:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk error=", rc.error
check rc.error == (0,0)
return (n,rc.error[1])
if n != tLen:
check n == tLen
return (n,AristoError(1))
break
if tLen <= n:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk -- ",
" oops, too many leafs (index overflow)"
check n < tlen
return (n,AristoError(1))
if rc.value.path != tags[n]:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk -- leafs differ,",
" got=", rc.value.pp(db),
" wanted=", LeafTie(root: root, path: tags[n]).pp(db) #,
# " db-dump\n ", db.pp
check rc.value.path == tags[n]
return (n,AristoError(1))
if rc.value.path < high(HashID):
lty.path = HashID(rc.value.path.u256 + 1)
n.inc
(n,AristoError(0))
proc revWalkLeafsCompleteDB(
db: AristoDbRef;
root: VertexID;
tags: openArray[HashID];
noisy: bool;
): tuple[visited: int, error: AristoError] =
let
tLen = tags.len
var
delta = ((high(UInt256) - tags[^1].u256) div 2)
lty = LeafTie(root: root, path: HashID(tags[^1].u256 + delta))
n = tLen-1
while true: # and false:
let rc = lty.left(db)
if rc.isErr:
if rc.error[1] != NearbyBeyondRange:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk error=", rc.error
check rc.error == (0,0)
return (n,rc.error[1])
if n != -1:
check n == -1
return (n,AristoError(1))
break
if n < 0:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk -- ",
" oops, too many leafs (index underflow)"
check 0 <= n
return (n,AristoError(1))
if rc.value.path != tags[n]:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk -- leafs differ,",
" got=", rc.value.pp(db),
" wanted=", tags[n]..pp(db) #, " db-dump\n ", db.pp
check rc.value.path == tags[n]
return (n,AristoError(1))
if low(HashID) < rc.value.path:
lty.path = HashID(rc.value.path.u256 - 1)
n.dec
(tLen-1 - n, AristoError(0))
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_nearbyKvpList*(
noisy: bool;
list: openArray[ProofTrieData];
resetDb = false;
): bool =
let
db = AristoDbRef()
var
rootKey = HashKey.default
tagSet: HashSet[HashID]
count = 0
for n,w in list:
if resetDb or w.root != rootKey:
db.top = AristoLayerRef()
rootKey = w.root
tagSet.reset
count = 0
count.inc
let
lstLen = list.len
lTabLen = db.top.lTab.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
added = db.merge leafs
if added.error != AristoError(0):
check added.error == AristoError(0)
return
check db.top.lTab.len == lTabLen + added.merged
check added.merged + added.dups == leafs.len
for kvp in leafs:
tagSet.incl kvp.leafTie.path
let
tags = tagSet.toSeq.sorted
rootVid = leafs[0].leafTie.root
fwdWalk = db.fwdWalkLeafsCompleteDB(rootVid, tags, noisy=true)
revWalk = db.revWalkLeafsCompleteDB(rootVid, tags, noisy=true)
check fwdWalk.error == AristoError(0)
check revWalk.error == AristoError(0)
check fwdWalk == revWalk
if {fwdWalk.error, revWalk.error} != {AristoError(0)}:
noisy.say "***", "<", n, "/", lstLen-1, ">",
" groups=", count, " db dump",
"\n post-state ", db.pp,
"\n"
return
#noisy.say "***", "sample ",n,"/",lstLen-1, " visited=", fwdWalk.visited
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------