Aristo db implement filter serialisation for storage (#1695)
* Remove concept of empty/blind filters why: Not needed. A non-existent filter is is coded as a nil reference. * Slightly generalised backend iterators why: * VertexID as key for the ID generator state makes no sense * there will be more tables addressed by non-VertexID keys * Store serialised/blobified vertices on memory backend why: This is more in line with the RocksDB backend so more appropriate for testing when comparing behaviour. For a speedy memory database, a backend-less variant should be used. * Drop the `Aristo` prefix from names `AristoLayerRef`, etc. * Suppress compiler warning why: duplicate imports * Add filter serialisation transcoder why: Will be used as storage format
This commit is contained in:
parent
df9c73cf98
commit
4c9141ffac
|
@ -14,7 +14,6 @@ import
|
|||
../../constants,
|
||||
../../db/accounts_cache,
|
||||
../../transaction,
|
||||
../../utils/utils,
|
||||
../../vm_state,
|
||||
../../vm_types,
|
||||
../../utils/debug,
|
||||
|
|
|
@ -220,7 +220,7 @@ and implemented as 64 bit values, stored *Big Endian* in the serialisation.
|
|||
where
|
||||
marker(2) is the double bit array 00
|
||||
|
||||
For a given index *n* between *0..15*, if the bit at position *n* of the it
|
||||
For a given index *n* between *0..15*, if the bit at position *n* of the bit
|
||||
vector *access(16)* is reset to zero, then there is no *n*-th structural
|
||||
*vertexID*. Otherwise one calculates
|
||||
|
||||
|
@ -320,7 +320,7 @@ the bitmask(2)-word array to a single byte, the maximum value of that byte is
|
|||
0 +-- ..
|
||||
... -- recycled vertexIDs
|
||||
+--+--+--+--+--+--+--+--+
|
||||
| | -- bottom of unused vertexIDs
|
||||
| | -- bottom of unused vertex IDs
|
||||
+--+--+--+--+--+--+--+--+
|
||||
|| | -- marker(2) + unused(6)
|
||||
+--+
|
||||
|
@ -337,6 +337,52 @@ this value can be used as vertexID.
|
|||
The vertexIDs in the descriptor record must all be non-zero and record itself
|
||||
should be allocated in the structural table associated with the zero key.
|
||||
|
||||
### 4.7 Backend filter record serialisation
|
||||
|
||||
0 +--+--+--+--+--+ .. --+--+ .. --+
|
||||
| | -- 32 bytes filter source hash
|
||||
32 +--+--+--+--+--+ .. --+--+ .. --+
|
||||
| | -- 32 bytes filter target hash
|
||||
64 +--+--+--+--+--+ .. --+--+ .. --+
|
||||
| | -- number of unused vertex IDs
|
||||
68 +--+--+--+--+
|
||||
| | -- number of structural triplets
|
||||
72 +--+--+--+--+--+ .. --+
|
||||
| | -- first unused vertex ID
|
||||
80 +--+--+--+--+--+ .. --+
|
||||
... -- more unused vertex ID
|
||||
N1 +--+--+--+--+
|
||||
|| | -- flg(3) + vtxLen(29), 1st triplet
|
||||
+--+--+--+--+--+ .. --+
|
||||
| | -- vertex ID of first triplet
|
||||
+--+--+--+--+--+ .. --+--+ .. --+
|
||||
| | -- optional 32 bytes hash key
|
||||
+--+--+--+--+--+ .. --+--+ .. --+
|
||||
... -- optional vertex record
|
||||
N2 +--+--+--+--+
|
||||
|| | -- flg(3) + vtxLen(29), 2nd triplet
|
||||
+--+--+--+--+
|
||||
...
|
||||
|
||||
where
|
||||
+ minimum size of an empty filer is 72 bytes
|
||||
|
||||
+ the flg(3) represents the tuple (key-mode,vertex-mode) encoding
|
||||
the serialised storage states
|
||||
|
||||
0 -- encoded and present
|
||||
1 -- not encoded, void => considered deleted
|
||||
2 -- not encoded, to be ignored
|
||||
|
||||
so, when encoded as
|
||||
|
||||
flg(3) = key-mode * 3 + vertex-mode
|
||||
|
||||
the the tuple (2,2) will never occur and flg(3) < 9
|
||||
|
||||
+ the vtxLen(29) is the number of bytes of the optional vertex record
|
||||
which has maximum size 2^29-1 which is short of 512 MiB
|
||||
|
||||
5. *Patricia Trie* implementation notes
|
||||
---------------------------------------
|
||||
|
||||
|
@ -400,7 +446,7 @@ database, the above architecture mutates to
|
|||
When looked at descriptor API there are no changes when accessing data via
|
||||
*db1*, *db2*, or *db3*. In a different, more algebraic notation, the above
|
||||
tansformation is written as
|
||||
|
||||
|
||||
| tx1, ø | (8)
|
||||
| tx2, ø | PBE
|
||||
| tx3, ø |
|
||||
|
@ -414,7 +460,7 @@ tansformation is written as
|
|||
|
||||
The system can be further converted without changing the API by committing
|
||||
and saving *tx2* on the middle line of matrix (9)
|
||||
|
||||
|
||||
| ø, ø | (10)
|
||||
| ø, tx2+~tx1 | tx1+PBE
|
||||
| tx3, ~tx1 |
|
||||
|
|
|
@ -317,7 +317,7 @@ proc ppXMap*(
|
|||
else:
|
||||
result &= "}"
|
||||
|
||||
proc ppFilter(fl: AristoFilterRef; db: AristoDbRef; indent: int): string =
|
||||
proc ppFilter(fl: FilterRef; db: AristoDbRef; indent: int): string =
|
||||
## Walk over filter tables
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
|
@ -329,10 +329,9 @@ proc ppFilter(fl: AristoFilterRef; db: AristoDbRef; indent: int): string =
|
|||
return
|
||||
result &= pfx & "trg(" & fl.trg.ppKey & ")"
|
||||
result &= pfx & "src(" & fl.src.ppKey & ")"
|
||||
result &= pfx & "vGen" & pfx1 & "["
|
||||
if fl.vGen.isSome:
|
||||
result &= fl.vGen.unsafeGet.mapIt(it.ppVid).join(",")
|
||||
result &= "]" & pfx & "sTab" & pfx1 & "{"
|
||||
result &= pfx & "vGen" & pfx1 & "[" &
|
||||
fl.vGen.mapIt(it.ppVid).join(",") & "]"
|
||||
result &= pfx & "sTab" & pfx1 & "{"
|
||||
for n,vid in fl.sTab.sortedKeys:
|
||||
let vtx = fl.sTab.getOrVoid vid
|
||||
if 0 < n: result &= pfx2
|
||||
|
@ -366,7 +365,7 @@ proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
|
|||
db.roFilter.ppFilter(db, indent+1) & indent.toPfx & be.ppBeOnly(db,indent+1)
|
||||
|
||||
proc ppLayer(
|
||||
layer: AristoLayerRef;
|
||||
layer: LayerRef;
|
||||
db: AristoDbRef;
|
||||
vGenOk: bool;
|
||||
sTabOk: bool;
|
||||
|
@ -454,7 +453,7 @@ proc pp*(p: PayloadRef, db = AristoDbRef()): string =
|
|||
proc pp*(nd: VertexRef, db = AristoDbRef()): string =
|
||||
nd.ppVtx(db, VertexID(0))
|
||||
|
||||
proc pp*(nd: NodeRef; root: VertexID; db: AristoDBRef): string =
|
||||
proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
|
||||
if not nd.isValid:
|
||||
result = "n/a"
|
||||
elif nd.error != AristoError(0):
|
||||
|
@ -567,7 +566,7 @@ proc pp*(
|
|||
# ---------------------
|
||||
|
||||
proc pp*(
|
||||
layer: AristoLayerRef;
|
||||
layer: LayerRef;
|
||||
db: AristoDbRef;
|
||||
indent = 4;
|
||||
): string =
|
||||
|
@ -575,7 +574,7 @@ proc pp*(
|
|||
db, vGenOk=true, sTabOk=true, lTabOk=true, kMapOk=true, pPrfOk=true)
|
||||
|
||||
proc pp*(
|
||||
layer: AristoLayerRef;
|
||||
layer: LayerRef;
|
||||
db: AristoDbRef;
|
||||
xTabOk: bool;
|
||||
indent = 4;
|
||||
|
@ -584,7 +583,7 @@ proc pp*(
|
|||
db, vGenOk=true, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=true, pPrfOk=true)
|
||||
|
||||
proc pp*(
|
||||
layer: AristoLayerRef;
|
||||
layer: LayerRef;
|
||||
db: AristoDbRef;
|
||||
xTabOk: bool;
|
||||
kMapOk: bool;
|
||||
|
@ -618,7 +617,7 @@ proc pp*(
|
|||
db.top.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
|
||||
|
||||
proc pp*(
|
||||
filter: AristoFilterRef;
|
||||
filter: FilterRef;
|
||||
db = AristoDbRef();
|
||||
indent = 4;
|
||||
): string =
|
||||
|
|
|
@ -29,7 +29,7 @@ import
|
|||
aristo_error, aristo_types_identifiers, aristo_types_structural]
|
||||
|
||||
from ./aristo_desc/aristo_types_backend
|
||||
import AristoBackendRef
|
||||
import BackendRef
|
||||
|
||||
# Not auto-exporting backend
|
||||
export
|
||||
|
@ -44,7 +44,7 @@ type
|
|||
txUid*: uint ## Unique ID among transactions
|
||||
level*: int ## Stack index for this transaction
|
||||
|
||||
AristoDudesRef* = ref object
|
||||
DudesRef* = ref object
|
||||
case rwOk*: bool
|
||||
of true:
|
||||
roDudes*: HashSet[AristoDbRef] ## Read-only peers
|
||||
|
@ -54,14 +54,14 @@ type
|
|||
AristoDbRef* = ref AristoDbObj
|
||||
AristoDbObj* = object
|
||||
## Three tier database object supporting distributed instances.
|
||||
top*: AristoLayerRef ## Database working layer, mutable
|
||||
stack*: seq[AristoLayerRef] ## Stashed immutable parent layers
|
||||
roFilter*: AristoFilterRef ## Apply read filter (locks writing)
|
||||
backend*: AristoBackendRef ## Backend database (may well be `nil`)
|
||||
top*: LayerRef ## Database working layer, mutable
|
||||
stack*: seq[LayerRef] ## Stashed immutable parent layers
|
||||
roFilter*: FilterRef ## Apply read filter (locks writing)
|
||||
backend*: BackendRef ## Backend database (may well be `nil`)
|
||||
|
||||
txRef*: AristoTxRef ## Latest active transaction
|
||||
txUidGen*: uint ## Tx-relative unique number generator
|
||||
dudes*: AristoDudesRef ## Related DB descriptors
|
||||
dudes*: DudesRef ## Related DB descriptors
|
||||
|
||||
# Debugging data below, might go away in future
|
||||
xMap*: Table[HashLabel,VertexID] ## For pretty printing, extends `pAmk`
|
||||
|
@ -105,6 +105,9 @@ func isValid*(lbl: HashLabel): bool =
|
|||
func isValid*(vid: VertexID): bool =
|
||||
vid != VertexID(0)
|
||||
|
||||
func isValid*(filter: FilterRef): bool =
|
||||
filter != FilterRef(nil)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, miscellaneous
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -28,6 +28,7 @@ type
|
|||
BlobifyExtMissingRefs
|
||||
BlobifyExtPathOverflow
|
||||
BlobifyLeafPathOverflow
|
||||
BlobifyFilterRecordOverflow
|
||||
|
||||
DeblobNilArgument
|
||||
DeblobUnknown
|
||||
|
@ -48,6 +49,11 @@ type
|
|||
DeblobBalanceLenUnsupported
|
||||
DeblobStorageLenUnsupported
|
||||
DeblobCodeLenUnsupported
|
||||
DeblobFilterTooShort
|
||||
DeblobFilterGenTooShort
|
||||
DeblobFilterTrpTooShort
|
||||
DeblobFilterTrpVtxSizeGarbled
|
||||
DeblobFilterSizeGarbled
|
||||
|
||||
# Converter `asNode()`, currenly for unit tests only
|
||||
CacheMissingNodekeys
|
||||
|
@ -181,6 +187,8 @@ type
|
|||
GetVtxNotFound
|
||||
GetKeyNotFound
|
||||
GetIdgNotFound
|
||||
GetLogNotFound
|
||||
GetEpoNotFound
|
||||
|
||||
# RocksDB backend
|
||||
RdbBeCantCreateDataDir
|
||||
|
|
|
@ -79,7 +79,7 @@ type
|
|||
|
||||
# -------------
|
||||
|
||||
AristoBackendRef* = ref object of RootRef
|
||||
BackendRef* = ref object of RootRef
|
||||
## Backend interface.
|
||||
getVtxFn*: GetVtxFn ## Read vertex record
|
||||
getKeyFn*: GetKeyFn ## Read Merkle hash/key
|
||||
|
|
|
@ -79,7 +79,7 @@ func `<`*(a, b: VertexID): bool {.borrow.}
|
|||
func `<=`*(a, b: VertexID): bool {.borrow.}
|
||||
func `==`*(a, b: VertexID): bool {.borrow.}
|
||||
func cmp*(a, b: VertexID): int {.borrow.}
|
||||
func `$`*(a: VertexID): string = $a.uint64
|
||||
func `$`*(a: VertexID): string {.borrow.}
|
||||
|
||||
func `==`*(a: VertexID; b: static[uint]): bool =
|
||||
a == VertexID(b)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, sets, tables],
|
||||
std/[sets, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
"."/[aristo_error, aristo_types_identifiers]
|
||||
|
||||
|
@ -80,23 +80,15 @@ type
|
|||
|
||||
# ----------------------
|
||||
|
||||
AristoDeltaRef* = ref object
|
||||
## Delta layer between backend and top/stack transaction layers.
|
||||
src*: HashKey ## Applicable to this state root
|
||||
sTab*: seq[(VertexID,VertexRef)] ## Filter structural vertex table
|
||||
kMap*: seq[(VertexID,HashKey)] ## Filter Merkle hash key mapping
|
||||
vGen*: Option[seq[VertexID]] ## Filter unique vertex ID generator
|
||||
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`)
|
||||
|
||||
AristoFilterRef* = ref object
|
||||
FilterRef* = ref object
|
||||
## Delta layer with expanded sequences for quick access
|
||||
src*: HashKey ## Applicable to this state root
|
||||
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`)
|
||||
sTab*: Table[VertexID,VertexRef] ## Filter structural vertex table
|
||||
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
|
||||
vGen*: Option[seq[VertexID]] ## Filter unique vertex ID generator
|
||||
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`)
|
||||
vGen*: seq[VertexID] ## Filter unique vertex ID generator
|
||||
|
||||
AristoLayerRef* = ref object
|
||||
LayerRef* = ref object
|
||||
## Hexary trie database layer structures. Any layer holds the full
|
||||
## change relative to the backend.
|
||||
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
|
||||
|
@ -238,9 +230,9 @@ proc dup*(node: NodeRef): NodeRef =
|
|||
bVid: node.bVid,
|
||||
key: node.key)
|
||||
|
||||
proc dup*(layer: AristoLayerRef): AristoLayerRef =
|
||||
proc dup*(layer: LayerRef): LayerRef =
|
||||
## Duplicate layer.
|
||||
result = AristoLayerRef(
|
||||
result = LayerRef(
|
||||
lTab: layer.lTab,
|
||||
kMap: layer.kMap,
|
||||
pAmk: layer.pAmk,
|
||||
|
@ -250,9 +242,9 @@ proc dup*(layer: AristoLayerRef): AristoLayerRef =
|
|||
for (k,v) in layer.sTab.pairs:
|
||||
result.sTab[k] = v.dup
|
||||
|
||||
proc dup*(filter: AristoFilterRef): AristoFilterRef =
|
||||
proc dup*(filter: FilterRef): FilterRef =
|
||||
## Duplicate layer.
|
||||
result = AristoFilterRef(
|
||||
result = FilterRef(
|
||||
src: filter.src,
|
||||
kMap: filter.kMap,
|
||||
vGen: filter.vGen,
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
##
|
||||
|
||||
import
|
||||
std/[options, sequtils, sets, tables],
|
||||
std/[sequtils, sets, tables],
|
||||
results,
|
||||
./aristo_desc/aristo_types_backend,
|
||||
"."/[aristo_desc, aristo_get, aristo_vid]
|
||||
|
@ -29,7 +29,7 @@ type
|
|||
|
||||
proc getLayerStateRoots(
|
||||
db: AristoDbRef;
|
||||
layer: AristoLayerRef;
|
||||
layer: LayerRef;
|
||||
chunkedMpt: bool;
|
||||
): Result[StateRootPair,AristoError] =
|
||||
## Get the Merkle hash key for target state root to arrive at after this
|
||||
|
@ -69,10 +69,10 @@ proc getLayerStateRoots(
|
|||
|
||||
proc merge(
|
||||
db: AristoDbRef;
|
||||
upper: AristoFilterRef; # Src filter, `nil` is ok
|
||||
lower: AristoFilterRef; # Trg filter, `nil` is ok
|
||||
upper: FilterRef; # Src filter, `nil` is ok
|
||||
lower: FilterRef; # Trg filter, `nil` is ok
|
||||
beStateRoot: HashKey; # Merkle hash key
|
||||
): Result[AristoFilterRef,(VertexID,AristoError)] =
|
||||
): Result[FilterRef,(VertexID,AristoError)] =
|
||||
## Merge argument `upper` into the `lower` filter instance.
|
||||
##
|
||||
## Comparing before and after merge
|
||||
|
@ -87,19 +87,16 @@ proc merge(
|
|||
## |
|
||||
##
|
||||
# Degenerate case: `upper` is void
|
||||
if lower.isNil or lower.vGen.isNone:
|
||||
if upper.isNil or upper.vGen.isNone:
|
||||
if lower.isNil:
|
||||
if upper.isNil:
|
||||
# Even more degenerate case when both filters are void
|
||||
return ok AristoFilterRef(
|
||||
src: beStateRoot,
|
||||
trg: beStateRoot,
|
||||
vGen: none(seq[VertexID]))
|
||||
return ok FilterRef(nil)
|
||||
if upper.src != beStateRoot:
|
||||
return err((VertexID(1),FilStateRootMismatch))
|
||||
return ok(upper)
|
||||
|
||||
# Degenerate case: `upper` is non-trivial and `lower` is void
|
||||
if upper.isNil or upper.vGen.isNone:
|
||||
if upper.isNil:
|
||||
if lower.src != beStateRoot:
|
||||
return err((VertexID(0), FilStateRootMismatch))
|
||||
return ok(lower)
|
||||
|
@ -110,7 +107,7 @@ proc merge(
|
|||
return err((VertexID(0), FilStateRootMismatch))
|
||||
|
||||
# There is no need to deep copy table vertices as they will not be modified.
|
||||
let newFilter = AristoFilterRef(
|
||||
let newFilter = FilterRef(
|
||||
src: lower.src,
|
||||
sTab: lower.sTab,
|
||||
kMap: lower.kMap,
|
||||
|
@ -148,19 +145,19 @@ proc merge(
|
|||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func bulk*(filter: AristoFilterRef): int =
|
||||
func bulk*(filter: FilterRef): int =
|
||||
## Some measurement for the size of the filter calculated as the length of
|
||||
## the `sTab[]` table plus the lengthof the `kMap[]` table. This can be used
|
||||
## to set a threshold when to flush the staging area to the backend DB to
|
||||
## be used in `stow()`.
|
||||
##
|
||||
## The `filter` argument may be `nil`, i.e. `AristoFilterRef(nil).bulk == 0`
|
||||
## The `filter` argument may be `nil`, i.e. `FilterRef(nil).bulk == 0`
|
||||
if filter.isNil: 0 else: filter.sTab.len + filter.kMap.len
|
||||
|
||||
func bulk*(layer: AristolayerRef): int =
|
||||
func bulk*(layer: LayerRef): int =
|
||||
## Variant of `bulk()` for layers rather than filters.
|
||||
##
|
||||
## The `layer` argument may be `nil`, i.e. `AristoLayerRef(nil).bulk == 0`
|
||||
## The `layer` argument may be `nil`, i.e. `LayerRef(nil).bulk == 0`
|
||||
if layer.isNil: 0 else: layer.sTab.len + layer.kMap.len
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -169,9 +166,9 @@ func bulk*(layer: AristolayerRef): int =
|
|||
|
||||
proc fwdFilter*(
|
||||
db: AristoDbRef;
|
||||
layer: AristoLayerRef;
|
||||
layer: LayerRef;
|
||||
chunkedMpt = false;
|
||||
): Result[AristoFilterRef,(VertexID,AristoError)] =
|
||||
): Result[FilterRef,(VertexID,AristoError)] =
|
||||
## Assemble forward delta, i.e. changes to the backend equivalent to applying
|
||||
## the current top layer.
|
||||
##
|
||||
|
@ -191,22 +188,22 @@ proc fwdFilter*(
|
|||
if rc.isOK:
|
||||
(rc.value.be, rc.value.fg)
|
||||
elif rc.error == FilPrettyPointlessLayer:
|
||||
return ok AristoFilterRef(vGen: none(seq[VertexID]))
|
||||
return ok FilterRef(nil)
|
||||
else:
|
||||
return err((VertexID(1), rc.error))
|
||||
|
||||
ok AristoFilterRef(
|
||||
ok FilterRef(
|
||||
src: srcRoot,
|
||||
sTab: layer.sTab,
|
||||
kMap: layer.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable,
|
||||
vGen: some(layer.vGen.vidReorg), # Compact recycled IDs
|
||||
vGen: layer.vGen.vidReorg, # Compact recycled IDs
|
||||
trg: trgRoot)
|
||||
|
||||
|
||||
proc revFilter*(
|
||||
db: AristoDbRef;
|
||||
filter: AristoFilterRef;
|
||||
): Result[AristoFilterRef,(VertexID,AristoError)] =
|
||||
filter: FilterRef;
|
||||
): Result[FilterRef,(VertexID,AristoError)] =
|
||||
## Assemble reverse filter for the `filter` argument, i.e. changes to the
|
||||
## backend that reverse the effect of applying the this read-only filter.
|
||||
##
|
||||
|
@ -214,7 +211,7 @@ proc revFilter*(
|
|||
## backend (excluding optionally installed read-only filter.)
|
||||
##
|
||||
# Register MPT state roots for reverting back
|
||||
let rev = AristoFilterRef(
|
||||
let rev = FilterRef(
|
||||
src: filter.trg,
|
||||
trg: filter.src)
|
||||
|
||||
|
@ -223,7 +220,7 @@ proc revFilter*(
|
|||
let rc = db.getIdgUBE()
|
||||
if rc.isErr:
|
||||
return err((VertexID(0), rc.error))
|
||||
rev.vGen = some rc.value
|
||||
rev.vGen = rc.value
|
||||
|
||||
# Calculate reverse changes for the `sTab[]` structural table
|
||||
for vid in filter.sTab.keys:
|
||||
|
@ -253,7 +250,7 @@ proc revFilter*(
|
|||
|
||||
proc merge*(
|
||||
db: AristoDbRef;
|
||||
filter: AristoFilterRef;
|
||||
filter: FilterRef;
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Merge the argument `filter` into the read-only filter layer. Note that
|
||||
## this function has no control of the filter source. Having merged the
|
||||
|
@ -297,9 +294,6 @@ proc resolveBE*(db: AristoDbRef): Result[void,(VertexID,AristoError)] =
|
|||
# Blind or missing filter
|
||||
if db.roFilter.isNil:
|
||||
return ok()
|
||||
if db.roFilter.vGen.isNone:
|
||||
db.roFilter = AristoFilterRef(nil)
|
||||
return ok()
|
||||
|
||||
let ubeRootKey = block:
|
||||
let rc = db.getKeyUBE VertexID(1)
|
||||
|
@ -311,7 +305,7 @@ proc resolveBE*(db: AristoDbRef): Result[void,(VertexID,AristoError)] =
|
|||
return err((VertexID(1),rc.error))
|
||||
|
||||
# Filters rollback helper
|
||||
var roFilters: seq[(AristoDbRef,AristoFilterRef)]
|
||||
var roFilters: seq[(AristoDbRef,FilterRef)]
|
||||
proc rollback() =
|
||||
for (d,f) in roFilters:
|
||||
d.roFilter = f
|
||||
|
@ -342,7 +336,7 @@ proc resolveBE*(db: AristoDbRef): Result[void,(VertexID,AristoError)] =
|
|||
txFrame = be.putBegFn()
|
||||
be.putVtxFn(txFrame, db.roFilter.sTab.pairs.toSeq)
|
||||
be.putKeyFn(txFrame, db.roFilter.kMap.pairs.toSeq)
|
||||
be.putIdgFn(txFrame, db.roFilter.vGen.unsafeGet)
|
||||
be.putIdgFn(txFrame, db.roFilter.vGen)
|
||||
let w = be.putEndFn txFrame
|
||||
if w != AristoError(0):
|
||||
rollback()
|
||||
|
@ -357,7 +351,7 @@ proc ackqRwMode*(db: AristoDbRef): Result[void,AristoError] =
|
|||
# Steal dudes list, make the rw-parent a read-only dude
|
||||
let parent = db.dudes.rwDb
|
||||
db.dudes = parent.dudes
|
||||
parent.dudes = AristoDudesRef(rwOk: false, rwDb: db)
|
||||
parent.dudes = DudesRef(rwOk: false, rwDb: db)
|
||||
|
||||
# Exclude self
|
||||
db.dudes.roDudes.excl db
|
||||
|
@ -390,9 +384,9 @@ proc dispose*(db: AristoDbRef): Result[void,AristoError] =
|
|||
|
||||
# Unlink more so it would not do harm if used wrongly
|
||||
db.stack.setlen(0)
|
||||
db.backend = AristoBackendRef(nil)
|
||||
db.backend = BackendRef(nil)
|
||||
db.txRef = AristoTxRef(nil)
|
||||
db.dudes = AristoDudesRef(nil)
|
||||
db.dudes = DudesRef(nil)
|
||||
return ok()
|
||||
|
||||
err(FilNotReadOnlyDude)
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, tables],
|
||||
std/tables,
|
||||
results,
|
||||
./aristo_desc
|
||||
|
||||
|
@ -62,8 +62,8 @@ proc getIdgBE*(
|
|||
db: AristoDbRef;
|
||||
): Result[seq[VertexID],AristoError] =
|
||||
## Get the ID generator state the `backened` layer if available.
|
||||
if not db.roFilter.isNil and db.roFilter.vGen.isSome:
|
||||
return ok(db.roFilter.vGen.unsafeGet)
|
||||
if not db.roFilter.isNil:
|
||||
return ok(db.roFilter.vGen)
|
||||
db.getIdgUBE()
|
||||
|
||||
proc getVtxBE*(
|
||||
|
|
|
@ -18,20 +18,30 @@ const
|
|||
## Enforce session tracking
|
||||
|
||||
type
|
||||
AristoBackendType* = enum
|
||||
BackendType* = enum
|
||||
BackendVoid ## For providing backend-less constructor
|
||||
BackendMemory
|
||||
BackendRocksDB
|
||||
|
||||
TypedBackendRef* = ref object of AristoBackendRef
|
||||
kind*: AristoBackendType ## Backend type identifier
|
||||
StorageType* = enum
|
||||
## Storage types, key prefix
|
||||
Oops = 0
|
||||
IdgPfx = 1 ## ID generator
|
||||
VtxPfx = 2 ## Vertex data
|
||||
KeyPfx = 3 ## Key/hash data
|
||||
|
||||
TypedBackendRef* = ref object of BackendRef
|
||||
kind*: BackendType ## Backend type identifier
|
||||
when verifyIxId:
|
||||
txGen: uint ## Transaction ID generator (for debugging)
|
||||
txId: uint ## Active transaction ID (for debugging)
|
||||
|
||||
TypedPutHdlErrRef* = ref object of RootRef
|
||||
pfx*: AristoStorageType ## Error sub-table
|
||||
vid*: VertexID ## Vertex ID where the error occured
|
||||
case pfx*: StorageType ## Error sub-table
|
||||
of VtxPfx, KeyPfx:
|
||||
vid*: VertexID ## Vertex ID where the error occured
|
||||
of IdgPfx, Oops:
|
||||
discard
|
||||
code*: AristoError ## Error code (if any)
|
||||
|
||||
TypedPutHdlRef* = ref object of PutHdlRef
|
||||
|
@ -39,12 +49,6 @@ type
|
|||
when verifyIxId:
|
||||
txId: uint ## Transaction ID (for debugging)
|
||||
|
||||
AristoStorageType* = enum
|
||||
## Storage types, key prefix
|
||||
IdgPfx = 1 ## ID generator
|
||||
VtxPfx = 2 ## Vertex data
|
||||
KeyPfx = 3 ## Key/hash data
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -40,12 +40,12 @@ import
|
|||
type
|
||||
MemBackendRef* = ref object of TypedBackendRef
|
||||
## Inheriting table so access can be extended for debugging purposes
|
||||
sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
|
||||
sTab: Table[VertexID,Blob] ## Structural vertex table making up a trie
|
||||
kMap: Table[VertexID,HashKey] ## Merkle hash key mapping
|
||||
vGen: seq[VertexID]
|
||||
|
||||
MemPutHdlRef = ref object of TypedPutHdlRef
|
||||
sTab: Table[VertexID,VertexRef]
|
||||
sTab: Table[VertexID,Blob]
|
||||
kMap: Table[VertexID,HashKey]
|
||||
vGen: seq[VertexID]
|
||||
vGenOk: bool
|
||||
|
@ -77,9 +77,13 @@ proc endSession(hdl: PutHdlRef; db: MemBackendRef): MemPutHdlRef =
|
|||
proc getVtxFn(db: MemBackendRef): GetVtxFn =
|
||||
result =
|
||||
proc(vid: VertexID): Result[VertexRef,AristoError] =
|
||||
let vtx = db.sTab.getOrVoid vid
|
||||
if vtx.isValid:
|
||||
return ok vtx.dup
|
||||
# Fetch serialised data record
|
||||
let data = db.sTab.getOrDefault(vid, EmptyBlob)
|
||||
if 0 < data.len:
|
||||
let rc = data.deblobify VertexRef
|
||||
if rc.isErr:
|
||||
debug logTxt "getVtxFn() failed", vid, error=rc.error, info=rc.error
|
||||
return rc
|
||||
err(GetVtxNotFound)
|
||||
|
||||
proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
||||
|
@ -109,15 +113,17 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
|||
let hdl = hdl.getSession db
|
||||
if hdl.error.isNil:
|
||||
for (vid,vtx) in vrps:
|
||||
if not vtx.isNil:
|
||||
let rc = vtx.blobify # verify data record
|
||||
if vtx.isValid:
|
||||
let rc = vtx.blobify()
|
||||
if rc.isErr:
|
||||
hdl.error = TypedPutHdlErrRef(
|
||||
pfx: VtxPfx,
|
||||
vid: vid,
|
||||
code: rc.error)
|
||||
return
|
||||
hdl.sTab[vid] = vtx.dup
|
||||
hdl.sTab[vid] = rc.value
|
||||
else:
|
||||
hdl.sTab[vid] = EmptyBlob
|
||||
|
||||
proc putKeyFn(db: MemBackendRef): PutKeyFn =
|
||||
result =
|
||||
|
@ -141,13 +147,18 @@ proc putEndFn(db: MemBackendRef): PutEndFn =
|
|||
proc(hdl: PutHdlRef): AristoError =
|
||||
let hdl = hdl.endSession db
|
||||
if not hdl.error.isNil:
|
||||
debug logTxt "putEndFn: failed",
|
||||
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
|
||||
case hdl.error.pfx:
|
||||
of VtxPfx, KeyPfx:
|
||||
debug logTxt "putEndFn: vtx/key failed",
|
||||
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
|
||||
else:
|
||||
debug logTxt "putEndFn: failed",
|
||||
pfx=hdl.error.pfx, error=hdl.error.code
|
||||
return hdl.error.code
|
||||
|
||||
for (vid,vtx) in hdl.sTab.pairs:
|
||||
if vtx.isValid:
|
||||
db.sTab[vid] = vtx
|
||||
for (vid,data) in hdl.sTab.pairs:
|
||||
if 0 < data.len:
|
||||
db.sTab[vid] = data
|
||||
else:
|
||||
db.sTab.del vid
|
||||
|
||||
|
@ -172,7 +183,7 @@ proc closeFn(db: MemBackendRef): CloseFn =
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc memoryBackend*(): AristoBackendRef =
|
||||
proc memoryBackend*(): BackendRef =
|
||||
let db = MemBackendRef(kind: BackendMemory)
|
||||
|
||||
db.getVtxFn = getVtxFn db
|
||||
|
@ -195,19 +206,23 @@ proc memoryBackend*(): AristoBackendRef =
|
|||
|
||||
iterator walkIdg*(
|
||||
be: MemBackendRef;
|
||||
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
|
||||
): tuple[n: int, id: uint64, vGen: seq[VertexID]] =
|
||||
## Iteration over the ID generator sub-table (there is at most one instance).
|
||||
if 0 < be.vGen.len:
|
||||
yield(0, VertexID(0), be.vGen)
|
||||
yield(0, 0u64, be.vGen)
|
||||
|
||||
iterator walkVtx*(
|
||||
be: MemBackendRef;
|
||||
): tuple[n: int, vid: VertexID, vtx: VertexRef] =
|
||||
## Iteration over the vertex sub-table.
|
||||
for n,vid in be.sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let vtx = be.sTab.getOrVoid vid
|
||||
if vtx.isValid:
|
||||
yield (n, vid, vtx)
|
||||
let data = be.sTab.getOrDefault(vid, EmptyBlob)
|
||||
if 0 < data.len:
|
||||
let rc = data.deblobify VertexRef
|
||||
if rc.isErr:
|
||||
debug logTxt "walkVtxFn() skip", n, vid, error=rc.error
|
||||
else:
|
||||
yield (n, vid, rc.value)
|
||||
|
||||
iterator walkKey*(
|
||||
be: MemBackendRef;
|
||||
|
@ -220,21 +235,25 @@ iterator walkKey*(
|
|||
|
||||
iterator walk*(
|
||||
be: MemBackendRef;
|
||||
): tuple[n: int, pfx: AristoStorageType, vid: VertexID, data: Blob] =
|
||||
): tuple[n: int, pfx: StorageType, xid: uint64, data: Blob] =
|
||||
## Walk over all key-value pairs of the database.
|
||||
##
|
||||
## Non-decodable entries are stepped over while the counter `n` of the
|
||||
## yield record is still incremented.
|
||||
for (n,vid,vGen) in be.walkIdg:
|
||||
yield (n, IdgPfx, vid, vGen.blobify)
|
||||
var n = 0
|
||||
for (_,id,vGen) in be.walkIdg:
|
||||
yield (n, IdgPfx, id, vGen.blobify)
|
||||
n.inc
|
||||
|
||||
for (n,vid,vtx) in be.walkVtx:
|
||||
let rc = vtx.blobify
|
||||
if rc.isOk:
|
||||
yield (n, VtxPfx, vid, rc.value)
|
||||
for vid in be.sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let data = be.sTab.getOrDefault(vid, EmptyBlob)
|
||||
if 0 < data.len:
|
||||
yield (n, VtxPfx, vid.uint64, data)
|
||||
n.inc
|
||||
|
||||
for (n,vid,key) in be.walkKey:
|
||||
yield (n, KeyPfx, vid, key.to(Blob))
|
||||
for (_,vid,key) in be.walkKey:
|
||||
yield (n, KeyPfx, vid.uint64, key.to(Blob))
|
||||
n.inc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -148,7 +148,7 @@ proc putVtxFn(db: RdbBackendRef): PutVtxFn =
|
|||
if hdl.error.isNil:
|
||||
for (vid,vtx) in vrps:
|
||||
if vtx.isValid:
|
||||
let rc = vtx.blobify
|
||||
let rc = vtx.blobify()
|
||||
if rc.isErr:
|
||||
hdl.error = TypedPutHdlErrRef(
|
||||
pfx: VtxPfx,
|
||||
|
@ -187,8 +187,13 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|||
proc(hdl: PutHdlRef): AristoError =
|
||||
let hdl = hdl.endSession db
|
||||
if not hdl.error.isNil:
|
||||
debug logTxt "putEndFn: failed",
|
||||
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
|
||||
case hdl.error.pfx:
|
||||
of VtxPfx, KeyPfx:
|
||||
debug logTxt "putEndFn: vtx/key failed",
|
||||
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
|
||||
else:
|
||||
debug logTxt "putEndFn: failed",
|
||||
pfx=hdl.error.pfx, error=hdl.error.code
|
||||
return hdl.error.code
|
||||
let rc = db.rdb.put hdl.cache
|
||||
if rc.isErr:
|
||||
|
@ -208,7 +213,7 @@ proc closeFn(db: RdbBackendRef): CloseFn =
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rocksDbBackend*(path: string): Result[AristoBackendRef,AristoError] =
|
||||
proc rocksDbBackend*(path: string): Result[BackendRef,AristoError] =
|
||||
let
|
||||
db = RdbBackendRef(kind: BackendRocksDB)
|
||||
rc = db.rdb.init(path, maxOpenFiles)
|
||||
|
@ -238,7 +243,7 @@ proc rocksDbBackend*(path: string): Result[AristoBackendRef,AristoError] =
|
|||
|
||||
iterator walk*(
|
||||
be: RdbBackendRef;
|
||||
): tuple[n: int, pfx: AristoStorageType, vid: VertexID, data: Blob] =
|
||||
): tuple[n: int, pfx: StorageType, xid: uint64, data: Blob] =
|
||||
## Walk over all key-value pairs of the database.
|
||||
##
|
||||
## Non-decodable entries are stepped over while the counter `n` of the
|
||||
|
@ -248,30 +253,30 @@ iterator walk*(
|
|||
|
||||
iterator walkIdg*(
|
||||
be: RdbBackendRef;
|
||||
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
|
||||
): tuple[n: int, id: uint64, vGen: seq[VertexID]] =
|
||||
## Variant of `walk()` iteration over the ID generator sub-table.
|
||||
for (n, vid, data) in be.rdb.walk IdgPfx:
|
||||
for (n, id, data) in be.rdb.walk IdgPfx:
|
||||
let rc = data.deblobify seq[VertexID]
|
||||
if rc.isOk:
|
||||
yield (n, vid, rc.value)
|
||||
yield (n, id, rc.value)
|
||||
|
||||
iterator walkVtx*(
|
||||
be: RdbBackendRef;
|
||||
): tuple[n: int, vid: VertexID, vtx: VertexRef] =
|
||||
## Variant of `walk()` iteration over the vertex sub-table.
|
||||
for (n, vid, data) in be.rdb.walk VtxPfx:
|
||||
for (n, xid, data) in be.rdb.walk VtxPfx:
|
||||
let rc = data.deblobify VertexRef
|
||||
if rc.isOk:
|
||||
yield (n, vid, rc.value)
|
||||
yield (n, VertexID(xid), rc.value)
|
||||
|
||||
iterator walkkey*(
|
||||
be: RdbBackendRef;
|
||||
): tuple[n: int, vid: VertexID, key: HashKey] =
|
||||
## Variant of `walk()` iteration over the Markle hash sub-table.
|
||||
for (n, vid, data) in be.rdb.walk KeyPfx:
|
||||
for (n, xid, data) in be.rdb.walk KeyPfx:
|
||||
var hashKey: HashKey
|
||||
if hashKey.init data:
|
||||
yield (n, vid, hashKey)
|
||||
yield (n, VertexID(xid), hashKey)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -33,7 +33,7 @@ type
|
|||
RdbKey* = array[1 + sizeof VertexID, byte]
|
||||
## Sub-table key, <pfx> + VertexID
|
||||
|
||||
RdbTabs* = array[AristoStorageType,Table[VertexID,Blob]]
|
||||
RdbTabs* = array[StorageType,Table[VertexID,Blob]]
|
||||
## Combined table for caching data to be stored/updated
|
||||
|
||||
const
|
||||
|
@ -47,12 +47,12 @@ const
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toRdbKey*(vid: VertexID; pfx: AristoStorageType): Rdbkey =
|
||||
proc toRdbKey*(vid: VertexID; pfx: StorageType): Rdbkey =
|
||||
let vidKey = vid.uint64.toBytesBE
|
||||
result[0] = pfx.ord.byte
|
||||
copyMem(addr result[1], unsafeAddr vidKey, sizeof vidKey)
|
||||
|
||||
template toOpenArray*(vid: VertexID; pfx: AristoStorageType): openArray[byte] =
|
||||
template toOpenArray*(vid: VertexID; pfx: StorageType): openArray[byte] =
|
||||
vid.toRdbKey(pfx).toOpenArray(0, sizeof VertexID)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -159,7 +159,7 @@ proc put*(
|
|||
# Vertices with empty table values will be deleted
|
||||
var delKey: HashSet[RdbKey]
|
||||
|
||||
for pfx in low(AristoStorageType) .. high(AristoStorageType):
|
||||
for pfx in low(StorageType) .. high(StorageType):
|
||||
when extraTraceMessages:
|
||||
trace logTxt "sub-table", pfx, nItems=tabs[pfx].len
|
||||
|
||||
|
|
|
@ -31,13 +31,12 @@ func keyPfx(kData: cstring, kLen: csize_t): int =
|
|||
else:
|
||||
-1
|
||||
|
||||
func keyVid(kData: cstring, kLen: csize_t): VertexID =
|
||||
func keyXid(kData: cstring, kLen: csize_t): uint64 =
|
||||
if not kData.isNil and kLen == 1 + sizeof(VertexID):
|
||||
var data = kData.toOpenArrayByte(1,int(kLen)-1).toSeq
|
||||
return uint64.fromBytesBE(data).VertexID
|
||||
return uint64.fromBytesBE kData.toOpenArrayByte(1,int(kLen)-1).toSeq
|
||||
|
||||
func to(vid: VertexID; T: type Blob): T =
|
||||
vid.uint64.toBytesBE.toSeq
|
||||
func to(xid: uint64; T: type Blob): T =
|
||||
xid.toBytesBE.toSeq
|
||||
|
||||
func valBlob(vData: cstring, vLen: csize_t): Blob =
|
||||
if not vData.isNil and 0 < vLen:
|
||||
|
@ -49,7 +48,7 @@ func valBlob(vData: cstring, vLen: csize_t): Blob =
|
|||
|
||||
iterator walk*(
|
||||
rdb: RdbInst;
|
||||
): tuple[n: int, pfx: AristoStorageType, vid: VertexID, data: Blob] =
|
||||
): tuple[n: int, pfx: StorageType, xid: uint64, data: Blob] =
|
||||
## Walk over all key-value pairs of the database.
|
||||
##
|
||||
## Non-decodable entries are stepped over while the counter `n` of the
|
||||
|
@ -66,17 +65,17 @@ iterator walk*(
|
|||
|
||||
let pfx = kData.keyPfx(kLen)
|
||||
if 0 <= pfx:
|
||||
if high(AristoStorageType).ord < pfx:
|
||||
if high(StorageType).ord < pfx:
|
||||
break
|
||||
|
||||
let vid = kData.keyVid(kLen)
|
||||
if vid.isValid:
|
||||
let xid = kData.keyXid(kLen)
|
||||
if 0 < xid:
|
||||
var vLen: csize_t
|
||||
let vData = rit.rocksdb_iter_value(addr vLen)
|
||||
|
||||
let val = vData.valBlob(vLen)
|
||||
if 0 < val.len:
|
||||
yield (count, pfx.AristoStorageType, vid, val)
|
||||
yield (count, pfx.StorageType, xid, val)
|
||||
|
||||
# Update Iterator (might overwrite kData/vdata)
|
||||
rit.rocksdb_iter_next()
|
||||
|
@ -85,8 +84,8 @@ iterator walk*(
|
|||
|
||||
iterator walk*(
|
||||
rdb: RdbInst;
|
||||
pfx: AristoStorageType;
|
||||
): tuple[n: int, vid: VertexID, data: Blob] =
|
||||
pfx: StorageType;
|
||||
): tuple[n: int, xid: uint64, data: Blob] =
|
||||
## Walk over key-value pairs of the table referted to by the argument `pfx`.
|
||||
##
|
||||
## Non-decodable entries are stepped over while the counter `n` of the
|
||||
|
@ -107,7 +106,7 @@ iterator walk*(
|
|||
kData = rit.rocksdb_iter_key(addr kLen)
|
||||
|
||||
case pfx:
|
||||
of IdgPfx:
|
||||
of Oops, IdgPfx:
|
||||
discard
|
||||
of VtxPfx, KeyPfx:
|
||||
# Skip over admin records until vertex sub-table reached
|
||||
|
@ -121,7 +120,7 @@ iterator walk*(
|
|||
# End while
|
||||
|
||||
case pfx:
|
||||
of IdgPfx, VtxPfx:
|
||||
of Oops, IdgPfx, VtxPfx:
|
||||
discard
|
||||
of KeyPfx:
|
||||
# Reposition search head to key sub-table
|
||||
|
@ -130,7 +129,7 @@ iterator walk*(
|
|||
# Move search head to the first Merkle hash entry by seeking the same
|
||||
# vertex ID on the key table. This might skip over stale keys smaller
|
||||
# than the current one.
|
||||
let key = @[KeyPfx.ord.byte] & kData.keyVid(kLen).to(Blob)
|
||||
let key = @[KeyPfx.ord.byte] & kData.keyXid(kLen).to(Blob)
|
||||
rit.rocksdb_iter_seek(cast[cstring](unsafeAddr key[0]), csize_t(kLen))
|
||||
|
||||
# It is not clear what happens when the `key` does not exist. The guess
|
||||
|
@ -154,8 +153,8 @@ iterator walk*(
|
|||
if pfx.ord < kPfx:
|
||||
break walkBody # done
|
||||
|
||||
let vid = kData.keyVid(kLen)
|
||||
if vid.isValid or pfx == IdgPfx:
|
||||
let xid = kData.keyXid(kLen)
|
||||
if 0 < xid or pfx == IdgPfx:
|
||||
|
||||
# Fetch value data
|
||||
var vLen: csize_t
|
||||
|
@ -163,7 +162,7 @@ iterator walk*(
|
|||
|
||||
let val = vData.valBlob(vLen)
|
||||
if 0 < val.len:
|
||||
yield (count, vid, val)
|
||||
yield (count, xid, val)
|
||||
|
||||
# Update Iterator
|
||||
rit.rocksdb_iter_next()
|
||||
|
|
|
@ -25,7 +25,7 @@ type
|
|||
## Dummy descriptor type, will typically used as `nil` reference
|
||||
|
||||
export
|
||||
AristoBackendType,
|
||||
BackendType,
|
||||
VoidBackendRef,
|
||||
MemBackendRef,
|
||||
TypedBackendRef
|
||||
|
@ -35,14 +35,14 @@ export
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newAristoDbRef*(
|
||||
backend: static[AristoBackendType];
|
||||
backend: static[BackendType];
|
||||
): AristoDbRef =
|
||||
## Simplified prototype for `BackendNone` and `BackendMemory` type backend.
|
||||
when backend == BackendVoid:
|
||||
AristoDbRef(top: AristoLayerRef())
|
||||
AristoDbRef(top: LayerRef())
|
||||
|
||||
elif backend == BackendMemory:
|
||||
AristoDbRef(top: AristoLayerRef(), backend: memoryBackend())
|
||||
AristoDbRef(top: LayerRef(), backend: memoryBackend())
|
||||
|
||||
elif backend == BackendRocksDB:
|
||||
{.error: "Aristo DB backend \"BackendRocksDB\" needs basePath argument".}
|
||||
|
|
|
@ -31,7 +31,7 @@ export
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newAristoDbRef*(
|
||||
backend: static[AristoBackendType];
|
||||
backend: static[BackendType];
|
||||
basePath: string;
|
||||
): Result[AristoDbRef, AristoError] =
|
||||
## Generic constructor, `basePath` argument is ignored for `BackendNone` and
|
||||
|
@ -49,7 +49,7 @@ proc newAristoDbRef*(
|
|||
be.closeFn(flush = false)
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
ok AristoDbRef(top: AristoLayerRef(vGen: vGen), backend: be)
|
||||
ok AristoDbRef(top: LayerRef(vGen: vGen), backend: be)
|
||||
|
||||
elif backend == BackendVoid:
|
||||
{.error: "Use BackendNone.init() without path argument".}
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[bitops, sequtils],
|
||||
std/[bitops, sequtils, sets],
|
||||
eth/[common, rlp, trie/nibbles],
|
||||
stew/results,
|
||||
"."/[aristo_constants, aristo_desc]
|
||||
|
@ -262,8 +262,8 @@ proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
|
|||
ok(data)
|
||||
|
||||
proc blobify*(vGen: openArray[VertexID]; data: var Blob) =
|
||||
## This function serialises the key generator used in the `AristoDb`
|
||||
## descriptor.
|
||||
## This function serialises the vertex ID generator state used in the
|
||||
## `AristoDbRef` descriptor.
|
||||
##
|
||||
## This data record is supposed to be as in a dedicated slot in the
|
||||
## persistent tables.
|
||||
|
@ -281,6 +281,104 @@ proc blobify*(vGen: openArray[VertexID]): Blob =
|
|||
## Variant of `blobify()`
|
||||
vGen.blobify result
|
||||
|
||||
proc blobify*(filter: FilterRef; data: var Blob): AristoError =
|
||||
## This function serialises an Aristo DB filter object
|
||||
## ::
|
||||
## Filter encoding:
|
||||
## Uint256 -- source key
|
||||
## Uint256 -- target key
|
||||
## uint32 -- number of vertex IDs (vertex ID generator state)
|
||||
## uint32 -- number of (id,key,vertex) triplets
|
||||
##
|
||||
## uint64, ... -- list of vertex IDs (vertex ID generator state)
|
||||
##
|
||||
## uint32 -- flag(3) + vtxLen(29), first triplet
|
||||
## uint64 -- vertex ID
|
||||
## Uint256 -- optional key
|
||||
## Blob -- optional vertex
|
||||
##
|
||||
## ... -- more triplets
|
||||
##
|
||||
data.setLen(0)
|
||||
data &= filter.src.ByteArray32.toSeq
|
||||
data &= filter.trg.ByteArray32.toSeq
|
||||
|
||||
data &= filter.vGen.len.uint32.toBytesBE.toSeq
|
||||
data &= newSeq[byte](4) # place holder
|
||||
|
||||
# Store vertex ID generator state
|
||||
for w in filter.vGen:
|
||||
data &= w.uint64.toBytesBE.toSeq
|
||||
|
||||
var
|
||||
n = 0
|
||||
leftOver = filter.kMap.keys.toSeq.toHashSet
|
||||
|
||||
# Loop over vertex table
|
||||
for (vid,vtx) in filter.sTab.pairs:
|
||||
n.inc
|
||||
leftOver.excl vid
|
||||
|
||||
var
|
||||
keyMode = 0u # present and usable
|
||||
vtxMode = 0u # present and usable
|
||||
keyBlob: Blob
|
||||
vtxBlob: Blob
|
||||
|
||||
let key = filter.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
keyBlob = key.ByteArray32.toSeq
|
||||
elif filter.kMap.hasKey vid:
|
||||
keyMode = 1u # void hash key => considered deleted
|
||||
else:
|
||||
keyMode = 2u # ignore that hash key
|
||||
|
||||
if vtx.isValid:
|
||||
let error = vtx.blobify vtxBlob
|
||||
if error != AristoError(0):
|
||||
return error
|
||||
else:
|
||||
vtxMode = 1u # nil vertex => considered deleted
|
||||
|
||||
if (vtxBlob.len and not 0x1fffffff) != 0:
|
||||
return BlobifyFilterRecordOverflow
|
||||
|
||||
let pfx = ((keyMode * 3 + vtxMode) shl 29) or vtxBlob.len.uint
|
||||
data &=
|
||||
pfx.uint32.toBytesBE.toSeq &
|
||||
vid.uint64.toBytesBE.toSeq &
|
||||
keyBlob &
|
||||
vtxBlob
|
||||
|
||||
# Loop over remaining data from key table
|
||||
for vid in leftOver:
|
||||
n.inc
|
||||
var
|
||||
mode = 2u # key present and usable, ignore vtx
|
||||
keyBlob: Blob
|
||||
|
||||
let key = filter.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
keyBlob = key.ByteArray32.toSeq
|
||||
else:
|
||||
mode = 5u # 1 * 3 + 2: void key, ignore vtx
|
||||
|
||||
let pfx = (mode shl 29)
|
||||
data &=
|
||||
pfx.uint32.toBytesBE.toSeq &
|
||||
vid.uint64.toBytesBE.toSeq &
|
||||
keyBlob
|
||||
|
||||
data[68 ..< 72] = n.uint32.toBytesBE.toSeq
|
||||
|
||||
proc blobify*(filter: FilterRef): Result[Blob, AristoError] =
|
||||
## ...
|
||||
var data: Blob
|
||||
let error = filter.blobify data
|
||||
if error != AristoError(0):
|
||||
return err(error)
|
||||
ok data
|
||||
|
||||
# -------------
|
||||
|
||||
proc deblobify(data: Blob; pyl: var PayloadRef): AristoError =
|
||||
|
@ -449,6 +547,76 @@ proc deblobify*(data: Blob; T: type seq[VertexID]): Result[T,AristoError] =
|
|||
return err(info)
|
||||
ok vGen
|
||||
|
||||
|
||||
proc deblobify*(data: Blob; filter: var FilterRef): AristoError =
|
||||
## De-serialise an Aristo DB filter object
|
||||
if data.len < 72: # minumum length 72 for an empty filter
|
||||
return DeblobFilterTooShort
|
||||
|
||||
let f = FilterRef()
|
||||
(addr f.src.ByteArray32[0]).copyMem(unsafeAddr data[0], 32)
|
||||
(addr f.trg.ByteArray32[0]).copyMem(unsafeAddr data[32], 32)
|
||||
|
||||
let
|
||||
nVids = uint32.fromBytesBE data[64 ..< 68]
|
||||
nTriplets = uint32.fromBytesBE data[68 ..< 72]
|
||||
nTrplStart = (72 + nVids * 8).int
|
||||
|
||||
if data.len < nTrplStart:
|
||||
return DeblobFilterGenTooShort
|
||||
for n in 0 ..< nVids:
|
||||
let w = 72 + n * 8
|
||||
f.vGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID
|
||||
|
||||
var offs = nTrplStart
|
||||
for n in 0 ..< nTriplets:
|
||||
if data.len < offs + 12:
|
||||
return DeblobFilterTrpTooShort
|
||||
|
||||
let
|
||||
flag = data[offs] shr 5 # double triplets: {0,1,2} x {0,1,2}
|
||||
vLen = ((uint32.fromBytesBE data[offs ..< offs + 4]) and 0x1fffffff).int
|
||||
if (vLen == 0) != ((flag mod 3) > 0):
|
||||
return DeblobFilterTrpVtxSizeGarbled # contadiction
|
||||
offs = offs + 4
|
||||
|
||||
let vid = (uint64.fromBytesBE data[offs ..< offs + 8]).VertexID
|
||||
offs = offs + 8
|
||||
|
||||
if data.len < offs + (flag < 3).ord * 32 + vLen:
|
||||
return DeblobFilterTrpTooShort
|
||||
|
||||
if flag < 3: # {0} x {0,1,2}
|
||||
var key: HashKey
|
||||
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[offs], 32)
|
||||
f.kMap[vid] = key
|
||||
offs = offs + 32
|
||||
elif flag < 6: # {0,1} x {0,1,2}
|
||||
f.kMap[vid] = VOID_HASH_KEY
|
||||
|
||||
if 0 < vLen:
|
||||
var vtx: VertexRef
|
||||
let error = data[offs ..< offs + vLen].deblobify vtx
|
||||
if error != AristoError(0):
|
||||
return error
|
||||
f.sTab[vid] = vtx
|
||||
offs = offs + vLen
|
||||
elif (flag mod 3) == 1: # {0,1,2} x {1}
|
||||
f.sTab[vid] = VertexRef(nil)
|
||||
|
||||
if data.len != offs:
|
||||
return DeblobFilterSizeGarbled
|
||||
|
||||
filter = f
|
||||
|
||||
proc deblobify*(data: Blob; T: type FilterRef): Result[T,AristoError] =
|
||||
## Variant of `deblobify()` for deserialising an Aristo DB filter object
|
||||
var filter: T
|
||||
let error = data.deblobify filter
|
||||
if error != AristoError(0):
|
||||
return err(error)
|
||||
ok filter
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -40,11 +40,11 @@ proc getTxUid(db: AristoDbRef): uint =
|
|||
|
||||
proc linkClone(db: AristoDbRef; clone: AristoDbRef) =
|
||||
## Link clone to parent
|
||||
clone.dudes = AristoDudesRef(
|
||||
clone.dudes = DudesRef(
|
||||
rwOk: false,
|
||||
rwDb: db)
|
||||
if db.dudes.isNil:
|
||||
db.dudes = AristoDudesRef(
|
||||
db.dudes = DudesRef(
|
||||
rwOk: true,
|
||||
roDudes: @[clone].toHashSet)
|
||||
else:
|
||||
|
@ -100,7 +100,7 @@ proc copyCat*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
|
|||
let db = tx.db
|
||||
|
||||
# Provide new top layer
|
||||
var topLayer: AristoLayerRef
|
||||
var topLayer: LayerRef
|
||||
if db.txRef == tx:
|
||||
topLayer = db.top.dup
|
||||
elif tx.level < db.stack.len:
|
||||
|
@ -115,9 +115,9 @@ proc copyCat*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
|
|||
let stackLayer = block:
|
||||
let rc = db.getIdgBE()
|
||||
if rc.isOk:
|
||||
AristoLayerRef(vGen: rc.value)
|
||||
LayerRef(vGen: rc.value)
|
||||
elif rc.error == GetIdgNotFound:
|
||||
AristoLayerRef()
|
||||
LayerRef()
|
||||
else:
|
||||
return err(rc.error)
|
||||
|
||||
|
@ -128,7 +128,7 @@ proc copyCat*(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
|
|||
roFilter: db.roFilter, # no need to copy contents (done when updated)
|
||||
backend: db.backend,
|
||||
txUidGen: 1,
|
||||
dudes: AristoDudesRef(
|
||||
dudes: DudesRef(
|
||||
rwOk: false,
|
||||
rwDb: db))
|
||||
|
||||
|
@ -348,23 +348,22 @@ proc stow*(
|
|||
return err(rc.error)
|
||||
rc.value
|
||||
|
||||
if fwd.vGen.isSome: # Otherwise this layer is pointless
|
||||
block:
|
||||
# Merge `top` layer into `roFilter`
|
||||
let rc = db.merge fwd
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
db.top = AristoLayerRef(vGen: db.roFilter.vGen.unsafeGet)
|
||||
if fwd.isValid:
|
||||
# Merge `top` layer into `roFilter`
|
||||
let rc = db.merge fwd
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
db.top = LayerRef(vGen: db.roFilter.vGen)
|
||||
|
||||
if persistent:
|
||||
let rc = db.resolveBE()
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
db.roFilter = AristoFilterRef(nil)
|
||||
db.roFilter = FilterRef(nil)
|
||||
|
||||
# Delete or clear stack and clear top
|
||||
db.stack.setLen(0)
|
||||
db.top = AristoLayerRef(vGen: db.top.vGen, txUid: db.top.txUid)
|
||||
db.top = LayerRef(vGen: db.top.vGen, txUid: db.top.txUid)
|
||||
|
||||
ok()
|
||||
|
||||
|
|
|
@ -25,12 +25,12 @@ iterator walkVtxBeImpl*[T](
|
|||
var n = 0
|
||||
|
||||
when be is VoidBackendRef:
|
||||
let filter = if db.roFilter.isNil: AristoFilterRef() else: db.roFilter
|
||||
let filter = if db.roFilter.isNil: FilterRef() else: db.roFilter
|
||||
|
||||
else:
|
||||
mixin walkVtx
|
||||
|
||||
let filter = AristoFilterRef()
|
||||
let filter = FilterRef()
|
||||
if not db.roFilter.isNil:
|
||||
filter.sTab = db.roFilter.sTab # copy table
|
||||
|
||||
|
@ -60,12 +60,12 @@ iterator walkKeyBeImpl*[T](
|
|||
var n = 0
|
||||
|
||||
when be is VoidBackendRef:
|
||||
let filter = if db.roFilter.isNil: AristoFilterRef() else: db.roFilter
|
||||
let filter = if db.roFilter.isNil: FilterRef() else: db.roFilter
|
||||
|
||||
else:
|
||||
mixin walkKey
|
||||
|
||||
let filter = AristoFilterRef()
|
||||
let filter = FilterRef()
|
||||
if not db.roFilter.isNil:
|
||||
filter.kMap = db.roFilter.kMap # copy table
|
||||
|
||||
|
@ -90,19 +90,19 @@ iterator walkKeyBeImpl*[T](
|
|||
iterator walkIdgBeImpl*[T](
|
||||
be: T; # Backend descriptor
|
||||
db: AristoDbRef; # Database with optional backend filter
|
||||
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
|
||||
): tuple[n: int, id: uint64, vGen: seq[VertexID]] =
|
||||
## Generic pseudo iterator
|
||||
var nNext = 0
|
||||
if not db.roFilter.isNil and db.roFilter.vGen.isSome:
|
||||
yield(0, VertexID(0), db.roFilter.vGen.unsafeGet)
|
||||
nNext = 1
|
||||
if db.roFilter.isValid:
|
||||
yield(0, 0u64, db.roFilter.vGen)
|
||||
nNext = 1
|
||||
|
||||
when be isnot VoidBackendRef:
|
||||
mixin walkIdg
|
||||
|
||||
for (n,vid,vGen) in be.walkIdg:
|
||||
for (n,id,vGen) in be.walkIdg:
|
||||
if nNext <= n:
|
||||
yield(n,vid,vGen)
|
||||
yield(n,id,vGen)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -45,7 +45,7 @@ iterator walkKeyBe*[T: MemBackendRef|VoidBackendRef](
|
|||
iterator walkIdgBe*[T: MemBackendRef|VoidBackendRef](
|
||||
_: type T;
|
||||
db: AristoDbRef;
|
||||
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
|
||||
): tuple[n: int, id: uint64, vGen: seq[VertexID]] =
|
||||
## Similar to `walkVtxBe()` but for vertex ID generator states.
|
||||
for (n,vid,vGen) in db.to(T).walkIdgBeImpl db:
|
||||
yield (n,vid,vGen)
|
||||
|
|
|
@ -50,7 +50,7 @@ iterator walkKeyBe*(
|
|||
iterator walkIdgBe*(
|
||||
T: type RdbBackendRef;
|
||||
db: AristoDbRef;
|
||||
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
|
||||
): tuple[n: int, id: uint64, vGen: seq[VertexID]] =
|
||||
## Similar to `walkVtxBe()` but for vertex ID generator states.
|
||||
for (n,vid,vGen) in db.to(T).walkIdgBeImpl db:
|
||||
yield (n,vid,vGen)
|
||||
|
|
|
@ -65,7 +65,7 @@ proc mergeData(
|
|||
true
|
||||
|
||||
proc verify(
|
||||
ly: AristoLayerRef; # Database layer
|
||||
ly: LayerRef; # Database layer
|
||||
be: MemBackendRef|RdbBackendRef; # Backend
|
||||
noisy: bool;
|
||||
): bool =
|
||||
|
|
|
@ -18,19 +18,16 @@ import
|
|||
unittest2,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
|
||||
aristo_merge],
|
||||
aristo_merge, aristo_transcode],
|
||||
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
|
||||
./test_helpers
|
||||
|
||||
type
|
||||
LeafTriplet = tuple
|
||||
a, b, c: seq[LeafTiePayload]
|
||||
LeafQuartet =
|
||||
array[0..3, seq[LeafTiePayload]]
|
||||
|
||||
LeafQuartet = tuple
|
||||
a, b, c, d: seq[LeafTiePayload]
|
||||
|
||||
DbTriplet = object
|
||||
db1, db2, db3: AristoDbRef
|
||||
DbTriplet =
|
||||
array[0..2, AristoDbRef]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
|
@ -61,7 +58,7 @@ proc dump(dx: varargs[AristoDbRef]): string =
|
|||
"".dump dx
|
||||
|
||||
proc dump(w: DbTriplet): string =
|
||||
"db".dump(w.db1, w.db2, w.db3)
|
||||
"db".dump(w[0], w[1], w[2])
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
|
@ -76,110 +73,63 @@ iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet =
|
|||
|
||||
if lst.len < 8:
|
||||
if 2 < collect.len:
|
||||
yield(collect[0], collect[1], collect[2], lst)
|
||||
yield [collect[0], collect[1], collect[2], lst]
|
||||
collect.setLen(0)
|
||||
else:
|
||||
collect.add lst
|
||||
else:
|
||||
if collect.len == 0:
|
||||
let a = lst.len div 4
|
||||
yield(lst[0 ..< a], lst[a ..< 2*a], lst[2*a ..< 3*a], lst[3*a .. ^1])
|
||||
yield [lst[0 ..< a], lst[a ..< 2*a], lst[2*a ..< 3*a], lst[3*a .. ^1]]
|
||||
else:
|
||||
if collect.len == 1:
|
||||
let a = lst.len div 3
|
||||
yield(collect[0], lst[0 ..< a], lst[a ..< 2*a], lst[a .. ^1])
|
||||
yield [collect[0], lst[0 ..< a], lst[a ..< 2*a], lst[a .. ^1]]
|
||||
elif collect.len == 2:
|
||||
let a = lst.len div 2
|
||||
yield(collect[0], collect[1], lst[0 ..< a], lst[a .. ^1])
|
||||
yield [collect[0], collect[1], lst[0 ..< a], lst[a .. ^1]]
|
||||
else:
|
||||
yield(collect[0], collect[1], collect[2], lst)
|
||||
yield [collect[0], collect[1], collect[2], lst]
|
||||
collect.setLen(0)
|
||||
|
||||
proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
||||
let
|
||||
db1 = block:
|
||||
let rc = newAristoDbRef(BackendRocksDB,rdbPath)
|
||||
if rc.isErr:
|
||||
check rc.error == 0
|
||||
return
|
||||
rc.value
|
||||
|
||||
# Fill backend
|
||||
m0 = db1.merge w.a
|
||||
rc = db1.stow(persistent=true)
|
||||
|
||||
if rc.isErr:
|
||||
check rc.error == (0,0)
|
||||
return
|
||||
|
||||
let
|
||||
db2 = db1.copyCat.value
|
||||
db3 = db1.copyCat.value
|
||||
|
||||
# Clause (9) from `aristo/README.md` example
|
||||
m1 = db1.merge w.b
|
||||
m2 = db2.merge w.c
|
||||
m3 = db3.merge w.d
|
||||
|
||||
if m1.error == 0 and
|
||||
m2.error == 0 and
|
||||
m3.error == 0:
|
||||
return ok DbTriplet(db1: db1, db2: db2, db3: db3)
|
||||
|
||||
# Failed
|
||||
db1.finish(flush=true)
|
||||
|
||||
check m1.error == 0
|
||||
check m2.error == 0
|
||||
check m3.error == 0
|
||||
|
||||
var error = m1.error
|
||||
if error != 0: error = m2.error
|
||||
if error != 0: error = m3.error
|
||||
err(error)
|
||||
|
||||
|
||||
proc checkBeOk(
|
||||
dx: DbTriplet;
|
||||
relax = false;
|
||||
forceCache = false;
|
||||
noisy = true;
|
||||
): bool =
|
||||
check not dx.db1.top.isNil
|
||||
block:
|
||||
let
|
||||
cache = if forceCache: true else: not dx.db1.top.dirty
|
||||
rc1 = dx.db1.checkBE(relax=relax, cache=cache)
|
||||
if rc1.isErr:
|
||||
noisy.say "***", "db1 check failed (do-cache=", cache, ")"
|
||||
check rc1.error == (0,0)
|
||||
let db = block:
|
||||
let rc = newAristoDbRef(BackendRocksDB,rdbPath)
|
||||
if rc.isErr:
|
||||
check rc.error == 0
|
||||
return
|
||||
block:
|
||||
let
|
||||
cache = if forceCache: true else: not dx.db2.top.dirty
|
||||
rc2 = dx.db2.checkBE(relax=relax, cache=cache)
|
||||
if rc2.isErr:
|
||||
noisy.say "***", "db2 check failed (do-cache=", cache, ")"
|
||||
check rc2.error == (0,0)
|
||||
return
|
||||
block:
|
||||
let
|
||||
cache = if forceCache: true else: not dx.db3.top.dirty
|
||||
rc3 = dx.db3.checkBE(relax=relax, cache=cache)
|
||||
if rc3.isErr:
|
||||
noisy.say "***", "db3 check failed (do-cache=", cache, ")"
|
||||
check rc3.error == (0,0)
|
||||
return
|
||||
true
|
||||
rc.value
|
||||
|
||||
# ---------
|
||||
# Fill backend
|
||||
block:
|
||||
let report = db.merge w[0]
|
||||
if report.error != AristoError(0):
|
||||
db.finish(flush=true)
|
||||
check report.error == 0
|
||||
return err(report.error)
|
||||
let rc = db.stow(persistent=true)
|
||||
if rc.isErr:
|
||||
check rc.error == (0,0)
|
||||
return
|
||||
|
||||
let dx = [db, db.copyCat.value, db.copyCat.value]
|
||||
|
||||
# Clause (9) from `aristo/README.md` example
|
||||
for n in 0 ..< dx.len:
|
||||
let report = dx[n].merge w[n+1]
|
||||
if report.error != AristoError(0):
|
||||
db.finish(flush=true)
|
||||
check (n, report.error) == (n,0)
|
||||
return err(report.error)
|
||||
|
||||
return ok dx
|
||||
|
||||
# ----------------------
|
||||
|
||||
proc cleanUp(dx: DbTriplet) =
|
||||
discard dx.db3.dispose
|
||||
discard dx.db2.dispose
|
||||
dx.db1.finish(flush=true)
|
||||
dx[0].finish(flush=true)
|
||||
|
||||
proc eq(a, b: AristoFilterRef; db: AristoDbRef; noisy = true): bool =
|
||||
proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
|
||||
## Verify that argument filter `a` has the same effect on the
|
||||
## physical/unfiltered backend of `db` as argument filter `b`.
|
||||
if a.isNil:
|
||||
|
@ -219,7 +169,7 @@ proc eq(a, b: AristoFilterRef; db: AristoDbRef; noisy = true): bool =
|
|||
return false # general error
|
||||
|
||||
if 0 < bTab.len:
|
||||
noisy.say "*** eq:", "bTabLen=", bTab.len
|
||||
noisy.say "***", "not dbEq:", "bTabLen=", bTab.len
|
||||
return false
|
||||
|
||||
# Similar for `kMap[]`
|
||||
|
@ -249,11 +199,116 @@ proc eq(a, b: AristoFilterRef; db: AristoDbRef; noisy = true): bool =
|
|||
return false # general error
|
||||
|
||||
if 0 < bMap.len:
|
||||
noisy.say "*** eq:", " bMapLen=", bMap.len
|
||||
noisy.say "***", "not dbEq:", " bMapLen=", bMap.len
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
proc isEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
|
||||
## ..
|
||||
if a.src != b.src:
|
||||
noisy.say "***", "not isEq:", " a.src=", a.src.pp, " b.src=", b.src.pp
|
||||
return
|
||||
if a.trg != b.trg:
|
||||
noisy.say "***", "not isEq:", " a.trg=", a.trg.pp, " b.trg=", b.trg.pp
|
||||
return
|
||||
if a.vGen != b.vGen:
|
||||
noisy.say "***", "not isEq:", " a.vGen=", a.vGen.pp, " b.vGen=", b.vGen.pp
|
||||
return
|
||||
if a.sTab.len != b.sTab.len:
|
||||
noisy.say "***", "not isEq:",
|
||||
" a.sTab.len=", a.sTab.len,
|
||||
" b.sTab.len=", b.sTab.len
|
||||
return
|
||||
if a.kMap.len != b.kMap.len:
|
||||
noisy.say "***", "not isEq:",
|
||||
" a.kMap.len=", a.kMap.len,
|
||||
" b.kMap.len=", b.kMap.len
|
||||
return
|
||||
for (vid,aVtx) in a.sTab.pairs:
|
||||
if b.sTab.hasKey vid:
|
||||
let bVtx = b.sTab.getOrVoid vid
|
||||
if aVtx != bVtx:
|
||||
noisy.say "***", "not isEq:",
|
||||
" vid=", vid.pp,
|
||||
" aVtx=", aVtx.pp(db),
|
||||
" bVtx=", bVtx.pp(db)
|
||||
return
|
||||
else:
|
||||
noisy.say "***", "not isEq:",
|
||||
" vid=", vid.pp,
|
||||
" aVtx=", aVtx.pp(db),
|
||||
" bVtx=n/a"
|
||||
return
|
||||
for (vid,aKey) in a.kMap.pairs:
|
||||
if b.kMap.hasKey vid:
|
||||
let bKey = b.kMap.getOrVoid vid
|
||||
if aKey != bkey:
|
||||
noisy.say "***", "not isEq:",
|
||||
" vid=", vid.pp,
|
||||
" aKey=", aKey.pp,
|
||||
" bKey=", bKey.pp
|
||||
return
|
||||
else:
|
||||
noisy.say "*** not eq:",
|
||||
" vid=", vid.pp,
|
||||
" aKey=", aKey.pp,
|
||||
" bKey=n/a"
|
||||
return
|
||||
|
||||
true
|
||||
|
||||
# ----------------------
|
||||
|
||||
proc checkBeOk(
|
||||
dx: DbTriplet;
|
||||
relax = false;
|
||||
forceCache = false;
|
||||
noisy = true;
|
||||
): bool =
|
||||
## ..
|
||||
for n in 0 ..< dx.len:
|
||||
let
|
||||
cache = if forceCache: true else: not dx[n].top.dirty
|
||||
rc = dx[n].checkBE(relax=relax, cache=cache)
|
||||
if rc.isErr:
|
||||
noisy.say "***", "db check failed", " n=", n, " cache=", cache
|
||||
check (n, rc.error[0], rc.error[1]) == (n, 0, 0)
|
||||
return
|
||||
true
|
||||
|
||||
proc checkFilterTrancoderOk(
|
||||
dx: DbTriplet;
|
||||
noisy = true;
|
||||
): bool =
|
||||
## ..
|
||||
for n in 0 ..< dx.len:
|
||||
if dx[n].roFilter.isValid:
|
||||
let data = block:
|
||||
let rc = dx[n].roFilter.blobify()
|
||||
if rc.isErr:
|
||||
noisy.say "***", "db serialisation failed",
|
||||
" n=", n, " error=", rc.error
|
||||
check rc.error == 0
|
||||
return
|
||||
rc.value
|
||||
let dcdRoundTrip = block:
|
||||
let rc = data.deblobify FilterRef
|
||||
if rc.isErr:
|
||||
noisy.say "***", "db de-serialisation failed",
|
||||
" n=", n, " error=", rc.error
|
||||
check rc.error == 0
|
||||
return
|
||||
rc.value
|
||||
if not dx[n].roFilter.isEq(dcdRoundTrip, dx[n], noisy):
|
||||
#noisy.say "***", "checkFilterTrancoderOk failed",
|
||||
# "\n roFilter=", dx[n].roFilter.pp(dx[n]),
|
||||
# "\n dcdRoundTrip=", dcdRoundTrip.pp(dx[n])
|
||||
check (n,dx[n].roFilter) == (n,dcdRoundTrip)
|
||||
return
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -270,8 +325,8 @@ proc testDistributedAccess*(
|
|||
# Resulting clause (11) filters from `aristo/README.md` example
|
||||
# which will be used in the second part of the tests
|
||||
var
|
||||
c11Filter1 = AristoFilterRef(nil)
|
||||
c11Filter3 = AristoFilterRef(nil)
|
||||
c11Filter1 = FilterRef(nil)
|
||||
c11Filter3 = FilterRef(nil)
|
||||
|
||||
# Work through clauses (8)..(11) from `aristo/README.md` example
|
||||
block:
|
||||
|
@ -283,7 +338,7 @@ proc testDistributedAccess*(
|
|||
if rc.isErr:
|
||||
return
|
||||
rc.value
|
||||
(db1, db2, db3) = (dx.db1, dx.db2, dx.db3)
|
||||
(db1, db2, db3) = (dx[0], dx[1], dx[2])
|
||||
defer:
|
||||
dx.cleanUp()
|
||||
|
||||
|
@ -297,8 +352,8 @@ proc testDistributedAccess*(
|
|||
# noisy.say "*** testDistributedAccess (2) n=", n, dx.dump
|
||||
check rc.error == (0,0)
|
||||
return
|
||||
if db1.roFilter != AristoFilterRef(nil):
|
||||
check db1.roFilter == AristoFilterRef(nil)
|
||||
if db1.roFilter.isValid:
|
||||
check db1.roFilter == FilterRef(nil)
|
||||
return
|
||||
if db2.roFilter != db3.roFilter:
|
||||
check db2.roFilter == db3.roFilter
|
||||
|
@ -310,8 +365,8 @@ proc testDistributedAccess*(
|
|||
noisy.say "*** testDistributedAccess (3)", "n=", n, "db2".dump db2
|
||||
check rc.error == (0,0)
|
||||
return
|
||||
if db1.roFilter != AristoFilterRef(nil):
|
||||
check db1.roFilter == AristoFilterRef(nil)
|
||||
if db1.roFilter.isValid:
|
||||
check db1.roFilter == FilterRef(nil)
|
||||
return
|
||||
if db2.roFilter == db3.roFilter:
|
||||
check db2.roFilter != db3.roFilter
|
||||
|
@ -328,8 +383,8 @@ proc testDistributedAccess*(
|
|||
if rc.isErr:
|
||||
check rc.error == (0,0)
|
||||
return
|
||||
if db2.roFilter != AristoFilterRef(nil):
|
||||
check db2.roFilter == AristoFilterRef(nil)
|
||||
if db2.roFilter.isValid:
|
||||
check db2.roFilter == FilterRef(nil)
|
||||
return
|
||||
|
||||
# Check/verify backends
|
||||
|
@ -339,6 +394,11 @@ proc testDistributedAccess*(
|
|||
noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3
|
||||
check ok
|
||||
return
|
||||
block:
|
||||
let ok = dx.checkFilterTrancoderOk(noisy=noisy)
|
||||
if not ok:
|
||||
check ok
|
||||
return
|
||||
|
||||
# Capture filters from clause (11)
|
||||
c11Filter1 = db1.roFilter
|
||||
|
@ -357,7 +417,7 @@ proc testDistributedAccess*(
|
|||
if rc.isErr:
|
||||
return
|
||||
rc.value
|
||||
(db1, db2, db3) = (dy.db1, dy.db2, dy.db3)
|
||||
(db1, db2, db3) = (dy[0], dy[1], dy[2])
|
||||
defer:
|
||||
dy.cleanUp()
|
||||
|
||||
|
@ -372,8 +432,8 @@ proc testDistributedAccess*(
|
|||
if rc.isErr:
|
||||
check rc.error == (0,0)
|
||||
return
|
||||
if db2.roFilter != AristoFilterRef(nil):
|
||||
check db1.roFilter == AristoFilterRef(nil)
|
||||
if db2.roFilter.isValid:
|
||||
check db1.roFilter == FilterRef(nil)
|
||||
return
|
||||
if db1.roFilter != db3.roFilter:
|
||||
check db1.roFilter == db3.roFilter
|
||||
|
@ -388,22 +448,22 @@ proc testDistributedAccess*(
|
|||
|
||||
# Clause (14) from `aristo/README.md` check
|
||||
block:
|
||||
let c11Filter1_eq_db1RoFilter = c11Filter1.eq(db1.roFilter, db1, noisy)
|
||||
if not c11Filter1_eq_db1RoFilter:
|
||||
let c11Fil1_eq_db1RoFilter = c11Filter1.isDbEq(db1.roFilter, db1, noisy)
|
||||
if not c11Fil1_eq_db1RoFilter:
|
||||
noisy.say "*** testDistributedAccess (7)", "n=", n,
|
||||
"\n c11Filter1=", c11Filter3.pp(db1),
|
||||
"db1".dump(db1)
|
||||
check c11Filter1_eq_db1RoFilter
|
||||
check c11Fil1_eq_db1RoFilter
|
||||
return
|
||||
|
||||
# Clause (15) from `aristo/README.md` check
|
||||
block:
|
||||
let c11Filter3_eq_db3RoFilter = c11Filter3.eq(db3. roFilter, db3, noisy)
|
||||
if not c11Filter3_eq_db3RoFilter:
|
||||
let c11Fil3_eq_db3RoFilter = c11Filter3.isDbEq(db3.roFilter, db3, noisy)
|
||||
if not c11Fil3_eq_db3RoFilter:
|
||||
noisy.say "*** testDistributedAccess (8)", "n=", n,
|
||||
"\n c11Filter3=", c11Filter3.pp(db3),
|
||||
"db3".dump(db3)
|
||||
check c11Filter3_eq_db3RoFilter
|
||||
check c11Fil3_eq_db3RoFilter
|
||||
return
|
||||
|
||||
# Check/verify backends
|
||||
|
@ -412,6 +472,11 @@ proc testDistributedAccess*(
|
|||
if not ok:
|
||||
check ok
|
||||
return
|
||||
block:
|
||||
let ok = dy.checkFilterTrancoderOk(noisy=noisy)
|
||||
if not ok:
|
||||
check ok
|
||||
return
|
||||
|
||||
when false: # or true:
|
||||
noisy.say "*** testDistributedAccess (9)", "n=", n, dy.dump
|
||||
|
|
|
@ -106,6 +106,8 @@ proc `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =
|
|||
proc `==`*(a: (int,AristoError), b: (int,int)): bool =
|
||||
(a[0],a[1].int) == b
|
||||
|
||||
proc `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool =
|
||||
(a[0], a[1].int, a[2].int) == b
|
||||
|
||||
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
|
||||
## Convert test data into usable in-memory format
|
||||
|
|
Loading…
Reference in New Issue