Aristo db use filter betw backend and tx cache (#1678)

* Provide deep copy for each transaction layer

why:
  Localising changes. Selective deep copy was just overlooked.

* Generalise vertex ID generator state reorg function `vidReorg()`

why:
  makes it somewhat easier to handle when saving layers.

* Provide dummy back end descriptor `NoneBackendRef`

* Optional read-only filter between backend and transaction cache

why:
  Some staging area for accumulating changes to the backend DB. This
  will eventually be an access layer for emulating a backend with
  multiple/historic state roots.

* Re-factor `persistent()` with filter between backend/tx-cache => `stow()`

why:
  The filter provides an abstraction from the physically stored data on
  disk. So, there can be several MPT instances using the same disk data
  with different state roots. Of course, all the MPT instances should
  not differ too much for practical reasons :).

TODO:
  Filter administration tools need to be provided.
This commit is contained in:
Jordan Hrycaj 2023-08-10 21:01:28 +01:00 committed by GitHub
parent a7db7b9101
commit 09fabd04eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 790 additions and 235 deletions

View File

@ -13,11 +13,6 @@
##
{.push raises: [].}
import aristo/aristo_desc/[
aristo_types_identifiers, aristo_types_structural]
export
aristo_types_identifiers, aristo_types_structural
import aristo/[
aristo_constants, aristo_delete, aristo_fetch, aristo_hashify, aristo_init,
aristo_merge, aristo_nearby, aristo_tx, aristo_utils]
@ -30,10 +25,13 @@ import
export
append, read
import
aristo/aristo_vid
import aristo/aristo_desc/[
aristo_types_identifiers, aristo_types_structural]
export
vidFetch
AristoAccount,
PayloadRef,
PayloadType,
`==`
import
aristo/aristo_desc

View File

@ -17,7 +17,7 @@ import
std/[algorithm, sequtils, sets, tables],
eth/common,
stew/[interval_set, results],
./aristo_init/[aristo_memory, aristo_rocksdb],
./aristo_walk/persistent,
"."/[aristo_desc, aristo_get, aristo_init, aristo_vid, aristo_utils],
./aristo_check/[check_be, check_cache]
@ -80,11 +80,11 @@ proc checkBE*(
let be = db.to(TypedBackendRef)
case be.kind:
of BackendMemory:
return be.MemBackendRef.checkBE(db, cache=cache, relax=relax)
return MemBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendRocksDB:
return be.RdbBackendRef.checkBE(db, cache=cache, relax=relax)
return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
of BackendNone:
discard
return NoneBackendRef.checkBE(db, cache=cache, relax=relax)
ok()
# ------------------------------------------------------------------------------

View File

@ -14,8 +14,9 @@ import
std/[algorithm, sequtils, sets, tables],
eth/common,
stew/interval_set,
../aristo_init/[aristo_memory, aristo_rocksdb],
".."/[aristo_desc, aristo_get, aristo_vid, aristo_transcode, aristo_utils]
../../aristo,
../aristo_walk/persistent,
".."/[aristo_desc, aristo_get, aristo_vid, aristo_transcode]
const
Vid2 = @[VertexID(2)].toHashSet
@ -79,8 +80,8 @@ proc toNodeBe(
# Public functions
# ------------------------------------------------------------------------------
proc checkBE*[T](
be: T; # backend descriptor
proc checkBE*[T: RdbBackendRef|MemBackendRef|NoneBackendRef](
_: type T;
db: AristoDbRef; # Database, top layer
relax: bool; # Not compiling hashes if `true`
cache: bool; # Also verify cache
@ -90,14 +91,14 @@ proc checkBE*[T](
let vids = IntervalSetRef[VertexID,uint64].init()
discard vids.merge Interval[VertexID,uint64].new(VertexID(1),high(VertexID))
for (_,vid,vtx) in be.walkVtx:
for (_,vid,vtx) in T.walkVtxBE db:
if not vtx.isValid:
return err((vid,CheckBeVtxInvalid))
let rc = db.getKeyBackend vid
if rc.isErr or not rc.value.isValid:
return err((vid,CheckBeKeyMissing))
for (_,vid,key) in be.walkKey:
for (_,vid,key) in T.walkKeyBE db:
if not key.isvalid:
return err((vid,CheckBeKeyInvalid))
let rc = db.getVtxBackend vid
@ -116,7 +117,7 @@ proc checkBE*[T](
block:
# Extract vertex ID generator state
var vGen: HashSet[VertexID]
for (_,_,w) in be.walkIdg:
for (_,_,w) in T.walkIdgBE db:
vGen = vGen + w.toHashSet
let
vGenExpected = vids.invTo(HashSet[VertexID])
@ -128,7 +129,6 @@ proc checkBE*[T](
# Check cache against backend
if cache:
# Check structural table
for (vid,vtx) in db.top.sTab.pairs:
# A `kMap[]` entry must exist.
@ -164,10 +164,8 @@ proc checkBE*[T](
return err((vid,CheckBeCacheKeyMismatch))
# Check vGen
var tmp = AristoDbRef(top: AristoLayerRef(vGen: db.top.vGen))
tmp.vidReorg()
let
vGen = tmp.top.vGen.toHashSet
vGen = db.top.vGen.vidReorg.toHashSet
vGenExpected = vids.invTo(HashSet[VertexID])
delta = vGenExpected -+- vGen # symmetric difference
if 0 < delta.len:

View File

@ -17,6 +17,9 @@ import
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_init],
./aristo_init/[aristo_memory, aristo_rocksdb]
export
TypedBackendRef, aristo_init.to
# ------------------------------------------------------------------------------
# Ptivate functions
# ------------------------------------------------------------------------------
@ -27,6 +30,9 @@ proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] =
proc sortedKeys(kMap: Table[VertexID,HashLabel]): seq[VertexID] =
kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(kMap: Table[VertexID,HashKey]): seq[VertexID] =
kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(sTab: Table[VertexID,VertexRef]): seq[VertexID] =
sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
@ -311,7 +317,32 @@ proc ppXMap*(
else:
result &= "}"
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
proc ppFilter(fl: AristoFilterRef; db: AristoDbRef; indent: int): string =
## Walk over filter tables
let
pfx = indent.toPfx
pfx1 = indent.toPfx(1)
pfx2 = indent.toPfx(2)
result = "<filter>"
if db.roFilter.isNil:
result &= " n/a"
return
result &= pfx & "vGen" & pfx1 & "["
if fl.vGen.isSome:
result &= fl.vGen.unsafeGet.mapIt(it.ppVid).join(",")
result &= "]" & pfx & "sTab" & pfx1 & "{"
for n,vid in fl.sTab.sortedKeys:
let vtx = fl.sTab.getOrVoid vid
if 0 < n: result &= pfx2
result &= $(1+n) & "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")"
result &= "}" & pfx & "kMap" & pfx1 & "{"
for n,vid in fl.kMap.sortedKeys:
let key = fl.kMap.getOrVoid vid
if 0 < n: result &= pfx2
result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey & ")"
result &= "}"
proc ppBeOnly[T](be: T; db: AristoDbRef; indent: int): string =
## Walk over backend tables
let
pfx = indent.toPfx
@ -328,8 +359,12 @@ proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey & ")"
).join(pfx2) & "}"
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
## backend + filter
db.roFilter.ppFilter(db, indent) & indent.toPfx & be.ppBeOnly(db,indent)
proc ppCache(
proc ppLayer(
layer: AristoLayerRef;
db: AristoDbRef;
vGenOk: bool;
sTabOk: bool;
@ -355,35 +390,35 @@ proc ppCache(
pfy = pfx2
rc
if not db.top.isNil:
if not layer.isNil:
if vGenOk:
let
tLen = db.top.vGen.len
tLen = layer.vGen.len
info = "vGen(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.vGen.ppVidList
result &= info.doPrefix(0 < tLen) & layer.vGen.ppVidList
if sTabOk:
let
tLen = db.top.sTab.len
tLen = layer.sTab.len
info = "sTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.sTab.ppSTab(db,indent+1)
result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+1)
if lTabOk:
let
tlen = db.top.lTab.len
tlen = layer.lTab.len
info = "lTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.lTab.ppLTab(indent+1)
result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+1)
if kMapOk:
let
tLen = db.top.kMap.len
ulen = db.top.pAmk.len
tLen = layer.kMap.len
ulen = layer.pAmk.len
lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen
info = "kMap(" & lInf & ")"
result &= info.doPrefix(0 < tLen + uLen)
result &= db.ppXMap(db.top.kMap,db.top.pAmk,indent+1)
result &= db.ppXMap(layer.kMap, layer.pAmk,indent+1)
if pPrfOk:
let
tLen = db.top.pPrf.len
tLen = layer.pPrf.len
info = "pPrf(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & db.top.pPrf.ppPPrf
result &= info.doPrefix(0 < tLen) & layer.pPrf.ppPPrf
# ------------------------------------------------------------------------------
# Public functions
@ -523,19 +558,46 @@ proc pp*(
# ---------------------
proc pp*(
layer: AristoLayerRef;
db: AristoDbRef;
indent = 4;
): string =
db.ppCache(
vGenOk=true, sTabOk=true, lTabOk=true, kMapOk=true, pPrfOk=true)
layer.ppLayer(
db, vGenOk=true, sTabOk=true, lTabOk=true, kMapOk=true, pPrfOk=true)
proc pp*(
layer: AristoLayerRef;
db: AristoDbRef;
xTabOk: bool;
indent = 4;
): string =
layer.ppLayer(
db, vGenOk=true, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=true, pPrfOk=true)
proc pp*(
layer: AristoLayerRef;
db: AristoDbRef;
xTabOk: bool;
kMapOk: bool;
other = false;
indent = 4;
): string =
layer.ppLayer(
db, vGenOk=other, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=kMapOk, pPrfOk=other)
proc pp*(
db: AristoDbRef;
indent = 4;
): string =
db.top.pp(db, indent=indent)
proc pp*(
db: AristoDbRef;
xTabOk: bool;
indent = 4;
): string =
db.ppCache(
vGenOk=true, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=true, pPrfOk=true)
db.top.pp(db, xTabOk=xTabOk, indent=indent)
proc pp*(
db: AristoDbRef;
@ -544,15 +606,15 @@ proc pp*(
other = false;
indent = 4;
): string =
db.ppCache(
vGenOk=other, sTabOk=xTabOk, lTabOk=xTabOk, kMapOk=kMapOk, pPrfOk=other)
db.top.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
proc pp*(
be: TypedBackendRef;
db: AristoDbRef;
indent = 4;
): string =
## May be called as `db.to(TypedBackendRef).pp(db)`
case (if be.isNil: BackendNone else: be.kind)
of BackendMemory:
be.MemBackendRef.ppBe(db, indent)
@ -561,7 +623,7 @@ proc pp*(
be.RdbBackendRef.ppBe(db, indent)
of BackendNone:
"n/a"
db.roFilter.ppFilter(db, indent) & indent.toPfx & "<BackendNone>"
# ------------------------------------------------------------------------------
# End

View File

@ -22,12 +22,14 @@
{.push raises: [].}
import
std/[sets, tables],
std/tables,
eth/common,
./aristo_constants,
./aristo_desc/[
aristo_error, aristo_types_backend,
aristo_types_identifiers, aristo_types_structural]
aristo_error, aristo_types_identifiers, aristo_types_structural]
from ./aristo_desc/aristo_types_backend
import AristoBackendRef
export
# Not auto-exporting backend
@ -35,11 +37,6 @@ export
aristo_types_structural
type
AristoChangeLogRef* = ref object
## Change log: database state before backend saving.
root*: HashKey ## Previous hash key for `VertexID(1)`
leafs*: Table[LeafTie,PayloadRef] ## Changed leafs after merge into backend
AristoTxRef* = ref object
## Transaction descriptor
db*: AristoDbRef ## Database descriptor
@ -47,24 +44,13 @@ type
txUid*: uint ## Unique ID among transactions
stackInx*: int ## Stack index for this transaction
AristoLayerRef* = ref object
## Hexary trie database layer structures. Any layer holds the full
## change relative to the backend.
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: Table[HashLabel,VertexID] ## Reverse `kMap` entries, hash key lookup
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
txUid*: uint ## Transaction identifier if positive
AristoDbRef* = ref AristoDbObj
AristoDbObj* = object
## Set of database layers, supporting transaction frames
top*: AristoLayerRef ## Database working layer, mutable
stack*: seq[AristoLayerRef] ## Stashed immutable parent layers
roFilter*: AristoFilterRef ## Apply read filter (locks writing)
backend*: AristoBackendRef ## Backend database (may well be `nil`)
history*: seq[AristoChangeLogRef] ## Backend saving history
txRef*: AristoTxRef ## Latest active transaction
txUidGen*: uint ## Tx-relative unique number generator
@ -82,6 +68,9 @@ func getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
func getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
tab.getOrDefault(w, VOID_HASH_LABEL)
func getOrVoid*[W](tab: Table[W,HashKey]; w: W): HashKey =
tab.getOrDefault(w, VOID_HASH_KEY)
func getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
tab.getOrDefault(w, VertexID(0))
@ -106,7 +95,6 @@ func isValid*(vid: VertexID): bool =
vid != VertexID(0)
# ------------------------------------------------------------------------------
# Public functions, miscellaneous
# ------------------------------------------------------------------------------

View File

@ -166,17 +166,16 @@ type
DelExtLocked
DelVidStaleVtx
# Save permanently, `save()`
SaveBackendMissing
SaveStateRootMissing
SaveLeafVidRepurposed
# Functions from `aristo_filter.nim`
FilStateRootMissing
FilStateRootMismatch
FilPrettyPointlessLayer
# Get functions form `aristo_get.nim`
GetLeafNotFound
# All backend and get functions form `aristo_get.nim`
GetVtxNotFound
GetKeyNotFound
GetIdgNotFound
# RocksDB backend
RdbBeCantCreateDataDir
@ -199,6 +198,7 @@ type
TxExecBaseTxLocked
TxExecDirectiveLocked
TxStackUnderflow
TxBackendMissing
# Miscelaneous handy helpers
PayloadTypeUnsupported

View File

@ -15,7 +15,7 @@
{.push raises: [].}
import
stew/results,
results,
"."/[aristo_error, aristo_types_identifiers, aristo_types_structural]
type
@ -77,6 +77,8 @@ type
## `false` the outcome might differ depending on the type of backend
## (e.g. in-memory backends would flush on close.)
# -------------
AristoBackendRef* = ref object of RootRef
## Backend interface.
getVtxFn*: GetVtxFn ## Read vertex record

View File

@ -15,6 +15,7 @@
{.push raises: [].}
import
std/[options, sets, tables],
eth/[common, trie/nibbles],
"."/[aristo_error, aristo_types_identifiers]
@ -77,6 +78,35 @@ type
error*: AristoError ## Can be used for error signalling
key*: array[16,HashKey] ## Merkle hash/es for vertices
# ----------------------
AristoDeltaRef* = ref object
## Delta layer between backend and top/stack transaction layers.
src*: HashKey ## Applicable to this state root
sTab*: seq[(VertexID,VertexRef)] ## Filter structural vertex table
kMap*: seq[(VertexID,HashKey)] ## Filter Merkle hash key mapping
vGen*: Option[seq[VertexID]] ## Filter unique vertex ID generator
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`)
AristoFilterRef* = ref object
## Delta layer with expanded sequences for quick access
src*: HashKey ## Applicable to this state root
sTab*: Table[VertexID,VertexRef] ## Filter structural vertex table
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
vGen*: Option[seq[VertexID]] ## Filter unique vertex ID generator
trg*: HashKey ## Resulting state root (i.e. `kMap[1]`)
AristoLayerRef* = ref object
## Hexary trie database layer structures. Any layer holds the full
## change relative to the backend.
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: Table[HashLabel,VertexID] ## Reverse `kMap` entries, hash key lookup
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
txUid*: uint ## Transaction identifier if positive
# ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef`
# ------------------------------------------------------------------------------
@ -207,6 +237,18 @@ proc dup*(node: NodeRef): NodeRef =
bVid: node.bVid,
key: node.key)
proc dup*(layer: AristoLayerRef): AristoLayerRef =
## Duplicate layer.
result = AristoLayerRef(
lTab: layer.lTab,
kMap: layer.kMap,
pAmk: layer.pAmk,
pPrf: layer.pPrf,
vGen: layer.vGen,
txUid: layer.txUid)
for (k,v) in layer.sTab.pairs:
result.sTab[k] = v.dup
proc to*(node: NodeRef; T: type VertexRef): T =
## Extract a copy of the `VertexRef` part from a `NodeRef`.
node.VertexRef.dup

View File

@ -0,0 +1,210 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Patricia Trie filter management
## =============================================
##
import
std/[options, sequtils, tables],
results,
"."/[aristo_desc, aristo_get, aristo_vid]
type
StateRootPair = object
be: HashKey
fg: HashKey
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc getBeStateRoot(
db: AristoDbRef;
): Result[HashKey,AristoError] =
let rc = db.getKeyBackend VertexID(1)
if rc.isOk:
return ok(rc.value)
if rc.error == GetKeyNotFound:
return ok(VOID_HASH_KEY)
err(rc.error)
proc getLayerStateRoots(
db: AristoDbRef;
layer: AristoLayerRef;
extendOK: bool;
): Result[StateRootPair,AristoError] =
## Get the Merkle hash key for target state root to arrive at after this
## reverse filter was applied.
var spr: StateRootPair
block:
let rc = db.getBeStateRoot()
if rc.isErr:
return err(rc.error)
spr.be = rc.value
block:
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
if spr.fg.isValid:
return ok(spr)
if extendOK:
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
if vid == VertexID(1):
spr.fg = spr.be
return ok(spr)
if layer.sTab.len == 0 and
layer.kMap.len == 0 and
layer.pAmk.len == 0:
return err(FilPrettyPointlessLayer)
err(FilStateRootMismatch)
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func bulk*(filter: AristoFilterRef): int =
## Some measurement for the size of the filter calculated as the length of
## the `sTab[]` table plus the lengthof the `kMap[]` table. This can be used
## to set a threshold when to flush the staging area to the backend DB to
## be used in `stow()`.
##
## The `filter` argument may be `nil`, i.e. `AristoFilterRef(nil).bulk == 0`
if filter.isNil: 0 else: filter.sTab.len + filter.kMap.len
func bulk*(layer: AristolayerRef): int =
## Variant of `bulk()` for layers rather than filters.
##
## The `layer` argument may be `nil`, i.e. `AristoLayerRef(nil).bulk == 0`
if layer.isNil: 0 else: layer.sTab.len + layer.kMap.len
# ------------------------------------------------------------------------------
# Public functions, construct filters
# ------------------------------------------------------------------------------
proc fwdFilter*(
db: AristoDbRef;
layer: AristoLayerRef;
extendOK = false;
): Result[AristoFilterRef,(VertexID,AristoError)] =
## Assemble forward delta, i.e. changes to the backend equivalent to applying
## the current top layer.
##
## Typically, the `layer` layer would reflect a change of the MPT but there
## is the case of partial MPTs sent over the network when synchronising (see
## `snap` protocol.) In this case, the state root might not see a change on
## the `layer` layer which would result in an error unless the argument
## `extendOK` is set `true`
##
## This delta is taken against the current backend including optional
## read-only filter.
##
# Register the Merkle hash keys of the MPT where this reverse filter will be
# applicable: `be => fg`
let (srcRoot, trgRoot) = block:
let rc = db.getLayerStateRoots(layer, extendOk)
if rc.isOK:
(rc.value.be, rc.value.fg)
elif rc.error == FilPrettyPointlessLayer:
return ok AristoFilterRef(vGen: none(seq[VertexID]))
else:
return err((VertexID(1), rc.error))
ok AristoFilterRef(
src: srcRoot,
sTab: layer.sTab,
kMap: layer.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable,
vGen: some(layer.vGen.vidReorg), # Compact recycled IDs
trg: trgRoot)
# ------------------------------------------------------------------------------
# Public functions, apply/install filters
# ------------------------------------------------------------------------------
proc merge*(
db: AristoDbRef;
filter: AristoFilterRef;
): Result[void,(VertexID,AristoError)] =
## Merge argument `filter` to the filter layer.
##
## Comparing before and after merge
## ::
## current | merged
## ----------------------------------+--------------------------------
## trg2 --filter-- (src2==trg1) |
## | trg2 --newFilter-- (src1==trg0)
## trg1 --db.roFilter-- (src1==trg0) |
## |
## trg0 --db.backend | trg0 --db.backend
## |
let beRoot = block:
let rc = db.getBeStateRoot()
if rc.isErr:
return err((VertexID(1),FilStateRootMissing))
rc.value
if filter.vGen.isNone:
# Blind argument filter
if db.roFilter.isNil:
# Force read-only system
db.roFilter = AristoFilterRef(
src: beRoot,
trg: beRoot,
vGen: none(seq[VertexID]))
return ok()
# Simple case: no read-only filter yet
if db.roFilter.isNil or db.roFilter.vGen.isNone:
if filter.src != beRoot:
return err((VertexID(1),FilStateRootMismatch))
db.roFilter = filter
return ok()
# Verify merge stackability into existing read-only filter
if filter.src != db.roFilter.trg:
return err((VertexID(1),FilStateRootMismatch))
# Merge `filter` into `roFilter` as `newFilter`. There is no need to deep
# copy table vertices as they will not be modified.
let newFilter = AristoFilterRef(
src: db.roFilter.src,
sTab: db.roFilter.sTab,
kMap: db.roFilter.kMap,
vGen: filter.vGen,
trg: filter.trg)
for (vid,vtx) in filter.sTab.pairs:
if vtx.isValid or not newFilter.sTab.hasKey vid:
newFilter.sTab[vid] = vtx
elif newFilter.sTab.getOrVoid(vid).isValid:
let rc = db.getVtxUnfilteredBackend vid
if rc.isOk:
newFilter.sTab[vid] = vtx # VertexRef(nil)
elif rc.error == GetVtxNotFound:
newFilter.sTab.del vid
else:
return err((vid,rc.error))
for (vid,key) in filter.kMap.pairs:
if key.isValid or not newFilter.kMap.hasKey vid:
newFilter.kMap[vid] = key
elif newFilter.kMap.getOrVoid(vid).isValid:
let rc = db.getKeyUnfilteredBackend vid
if rc.isOk:
newFilter.kMap[vid] = key # VOID_HASH_KEY
elif rc.error == GetKeyNotFound:
newFilter.kMap.del vid
else:
return err((vid,rc.error))
db.roFilter = newFilter
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -14,9 +14,9 @@
{.push raises: [].}
import
std/tables,
stew/results,
"."/aristo_desc
std/[options, tables],
results,
./aristo_desc
type
VidVtxPair* = object
@ -27,7 +27,16 @@ type
# Public functions
# ------------------------------------------------------------------------------
proc getVtxBackend*(
proc getIdgUnfilteredBackend*(
db: AristoDbRef;
): Result[seq[VertexID],AristoError] =
## Get the ID generator state the `backened` layer if available.
let be = db.backend
if not be.isNil:
return be.getIdgFn()
err(GetIdgNotFound)
proc getVtxUnfilteredBackend*(
db: AristoDbRef;
vid: VertexID;
): Result[VertexRef,AristoError] =
@ -35,9 +44,9 @@ proc getVtxBackend*(
let be = db.backend
if not be.isNil:
return be.getVtxFn vid
err(GetVtxNotFound)
err GetVtxNotFound
proc getKeyBackend*(
proc getKeyUnfilteredBackend*(
db: AristoDbRef;
vid: VertexID;
): Result[HashKey,AristoError] =
@ -45,7 +54,41 @@ proc getKeyBackend*(
let be = db.backend
if not be.isNil:
return be.getKeyFn vid
err(GetKeyNotFound)
err GetKeyNotFound
# ------------------
proc getIdgBackend*(
db: AristoDbRef;
): Result[seq[VertexID],AristoError] =
## Get the ID generator state the `backened` layer if available.
if not db.roFilter.isNil and db.roFilter.vGen.isSome:
return ok(db.roFilter.vGen.unsafeGet)
db.getIdgUnfilteredBackend()
proc getVtxBackend*(
db: AristoDbRef;
vid: VertexID;
): Result[VertexRef,AristoError] =
## Get the vertex from the `backened` layer if available.
if not db.roFilter.isNil and db.roFilter.sTab.hasKey vid:
let vtx = db.roFilter.sTab.getOrVoid vid
if vtx.isValid:
return ok(vtx)
return err(GetVtxNotFound)
db.getVtxUnfilteredBackend vid
proc getKeyBackend*(
db: AristoDbRef;
vid: VertexID;
): Result[HashKey,AristoError] =
## Get the merkle hash/key from the backend
if not db.roFilter.isNil and db.roFilter.kMap.hasKey vid:
let key = db.roFilter.kMap.getOrVoid vid
if key.isValid:
return ok(key)
return err(GetKeyNotFound)
db.getKeyUnfilteredBackend vid
# ------------------
@ -101,6 +144,10 @@ proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =
return rc.value
VOID_HASH_KEY
proc getRootKey*(db: AristoDbRef; vid: VertexID): HashKey =
## Shortcut for `db.getkey VertexID(1)`
db.getKey VertexID(1)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -13,7 +13,7 @@
##
## See `./README.md` for implementation details
##
## This module provides a memory datanase only. For providing a persistent
## This module provides a memory database only. For providing a persistent
## constructor, import `aristo_init/persistent` though avoiding to
## unnecessarily link to the persistent backend library (e.g. `rocksdb`)
## when a memory only database is used.

View File

@ -49,7 +49,7 @@ type
kMap: Table[VertexID,HashKey]
vGen: seq[VertexID]
vGenOk: bool
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------

View File

@ -19,8 +19,15 @@ import
../aristo_desc/aristo_types_backend,
"."/[aristo_init_common, aristo_memory]
type
NoneBackendRef* = ref object of TypedBackendRef
## Dummy descriptor type, will typically used as `nil` reference
export
AristoBackendType, TypedBackendRef
AristoBackendType,
MemBackendRef,
NoneBackendRef,
TypedBackendRef
# ------------------------------------------------------------------------------
# Public database constuctors, destructor
@ -59,7 +66,7 @@ proc finish*(db: AristoDbRef; flush = false) =
# -----------------
proc to*[W: TypedBackendRef|MemBackendRef](
proc to*[W: TypedBackendRef|MemBackendRef|NoneBackendRef](
db: AristoDbRef;
T: type W;
): T =

View File

@ -22,8 +22,8 @@ import
results,
../aristo_desc,
"."/[aristo_init_common, aristo_rocksdb, memory_only]
export
RdbBackendRef,
memory_only
# ------------------------------------------------------------------------------

View File

@ -1,87 +0,0 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Patricia Trie layer management
## ===========================================
##
import
std/[sequtils, tables],
stew/results,
"."/[aristo_desc, aristo_get, aristo_vid]
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc save*(
db: AristoDbRef; # Database to be updated
): Result[void,(VertexID,AristoError)] =
## Save the top layer cache onto the persistent database. There is no check
## whether the current layer is fully consistent as a Merkle Patricia Tree.
## It is advised to run `hashify()` on the top layer before calling `save()`.
##
## After successful storage, all parent layers are cleared as well as the
## the top layer cache.
##
## Upon successful return, the previous state of the backend data is saved
## as a new entry in `history` field of the argument descriptor `db`.
##
let be = db.backend
if be.isNil:
return err((VertexID(0),SaveBackendMissing))
# Get Merkle hash for state root
let key = db.getKey VertexID(1)
if not key.isValid:
return err((VertexID(1),SaveStateRootMissing))
let hst = AristoChangeLogRef(root: key) # Change history, previous state
# Record changed `Leaf` nodes into the history table
for (lky,vid) in db.top.lTab.pairs:
if vid.isValid:
# Get previous payload for this vertex
let rc = db.getVtxBackend vid
if rc.isErr:
if rc.error != GetVtxNotFound:
return err((vid,rc.error)) # Stop
hst.leafs[lky] = PayloadRef(nil) # So this is a new leaf vertex
elif rc.value.vType == Leaf:
hst.leafs[lky] = rc.value.lData # Record previous payload
else:
return err((vid,SaveLeafVidRepurposed)) # Was re-puropsed
else:
hst.leafs[lky] = PayloadRef(nil) # New leaf vertex
# Compact recycled nodes
db.vidReorg()
# Save structural and other table entries
let txFrame = be.putBegFn()
be.putVtxFn(txFrame, db.top.sTab.pairs.toSeq)
be.putKeyFn(txFrame, db.top.kMap.pairs.toSeq.mapIt((it[0],it[1].key)))
be.putIdgFn(txFrame, db.top.vGen)
let w = be.putEndFn txFrame
if w != AristoError(0):
return err((VertexID(0),w))
# Delete stack and clear top
db.stack.setLen(0)
db.top = AristoLayerRef(vGen: db.top.vGen)
# Save history
db.history.add hst
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -14,8 +14,9 @@
{.push raises: [].}
import
std/[options, sequtils, tables],
results,
"."/[aristo_desc, aristo_layer]
"."/[aristo_desc, aristo_filter]
type
AristoTxAction* = proc() {.gcsafe, raises: [CatchableError].}
@ -34,21 +35,15 @@ proc backup(db: AristoDbRef): AristoDbRef =
AristoDbRef(
top: db.top, # ref
stack: db.stack, # sequence of refs
history: db.history, # sequence of refs
txRef: db.txRef, # ref
txUidGen: db.txUidGen) # number
proc restore(db: AristoDbRef, backup: AristoDbRef) =
db.top = backup.top
db.stack = backup.stack
db.history = backup.history
db.txRef = backup.txRef
db.txUidGen = backup.txUidGen
proc cpy(layer: AristoLayerRef): AristoLayerRef =
new result
result[] = layer[]
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
@ -121,8 +116,8 @@ proc exec*(
## `tx.rollack()` will throw an `AssertDefect` error.
## * The `ececute()` call must not be nested. Doing otherwise will throw an
## `AssertDefect` error.
## * Changes on the database referred to by `tx` cannot be saved on disk with
## the `persistent()` directive.
## * Changes on the database referred to by `tx` can be staged but not saved
## persistently with the `stow()` directive.
##
## After return, the state of the underlying database will not have changed.
## Any transactions left open by the `action()` call will have been discarded.
@ -171,7 +166,7 @@ proc txBegin*(db: AristoDbRef): AristoTxRef =
## ... continue using db ...
## tx.commit()
##
db.stack.add db.top.cpy # push (save and use top later)
db.stack.add db.top.dup # push (save and use top later)
db.top.txUid = db.getTxUid()
db.txRef = AristoTxRef(
@ -272,18 +267,71 @@ proc collapse*(
# Public functions: save database
# ------------------------------------------------------------------------------
proc persistent*(db: AristoDbRef): Result[void,AristoError] =
## ...
let noTxPending = db.txRef.isNil
if not noTxPending and TxUidLocked <= db.txRef.txUid:
proc stow*(
db: AristoDbRef;
stageOnly = true;
extendOK = false;
): Result[void,AristoError] =
## If there is no backend while the `stageOnly` is set `true`, the function
## returns immediately with an error.The same happens if the backend is
## locked due to an `exec()` call while `stageOnly` is set.
##
## Otherwise, the data changes from the top layer cache are merged into the
## backend stage area. After that, the top layer cache is cleared.
##
## If the argument `stageOnly` is set `true`, all the staged data are merged
## into the backend database. The staged data area is cleared.
##
if not db.txRef.isNil and
TxUidLocked <= db.txRef.txUid and
not stageOnly:
return err(TxExecDirectiveLocked)
let rc = db.save()
if rc.isErr:
return err(rc.error[1])
let be = db.backend
if be.isNil and not stageOnly:
return err(TxBackendMissing)
let fwd = block:
let rc = db.fwdFilter(db.top, extendOK)
if rc.isErr:
return err(rc.error[1])
rc.value
if fwd.vGen.isSome: # Otherwise this layer is pointless
block:
let rc = db.merge fwd
if rc.isErr:
return err(rc.error[1])
rc.value
if not stageOnly:
# Save structural and other table entries
let txFrame = be.putBegFn()
be.putVtxFn(txFrame, db.roFilter.sTab.pairs.toSeq)
be.putKeyFn(txFrame, db.roFilter.kMap.pairs.toSeq)
be.putIdgFn(txFrame, db.roFilter.vGen.unsafeGet)
let w = be.putEndFn txFrame
if w != AristoError(0):
return err(w)
db.roFilter = AristoFilterRef(nil)
# Delete or clear stack and clear top
db.stack.setLen(0)
db.top = AristoLayerRef(vGen: db.top.vGen, txUid: db.top.txUid)
ok()
proc stow*(
db: AristoDbRef;
stageLimit: int;
extendOK = false;
): Result[void,AristoError] =
## Variant of `stow()` with the `stageOnly` argument replaced by
## `stageLimit <= max(db.roFilter.bulk, db.top.bulk)`.
let w = max(db.roFilter.bulk, db.top.bulk)
db.stow(stageOnly=(stageLimit <= w), extendOK=extendOK)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -70,14 +70,14 @@ proc vidDispose*(db: AristoDbRef; vid: VertexID) =
db.top.vGen[^1] = vid
db.top.vGen.add topID
proc vidReorg*(db: AristoDbRef) =
## Remove redundant items from the recycle queue. All recycled entries are
## typically kept in the queue until the backend database is committed.
if 1 < db.top.vGen.len:
let lst = db.top.vGen.mapIt(uint64(it)).sorted.mapIt(VertexID(it))
proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
## Return a compacted version of the argument vertex ID generator state
## `vGen`. The function removes redundant items from the recycle queue.
if 1 < vGen.len:
let lst = vGen.mapIt(uint64(it)).sorted.mapIt(VertexID(it))
for n in (lst.len-1).countDown(1):
if lst[n-1].uint64 + 1 != lst[n].uint64:
# All elements larger than `lst[n-1` are in increasing order. For
# All elements larger than `lst[n-1]` are in increasing order. For
# the last continuously increasing sequence, only the smallest item
# is needed and the rest can be removed
#
@ -88,11 +88,12 @@ proc vidReorg*(db: AristoDbRef) =
# n
#
if n < lst.len-1:
db.top.vGen.shallowCopy lst
db.top.vGen.setLen(n+1)
return
return lst[0..n]
return vGen
# All entries are continuously increasing
db.top.vGen = @[lst[0]]
return @[lst[0]]
vGen
proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) =
## Attach (i.r. register) a Merkle hash key to a vertex ID.

View File

@ -0,0 +1,27 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Backend DB traversal for Aristo DB
## ==================================
##
## This module provides iterators for the memory based backend or the
## backend-less database. Do import `aristo_walk/persistent` for the
## persistent backend though avoiding to unnecessarily link to the persistent
## backend library (e.g. `rocksdb`) when a memory only database is used.
##
{.push raises: [].}
import
./aristo_walk/memory_only
export
memory_only
# End

View File

@ -0,0 +1,109 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/[algorithm, sequtils, tables],
".."/[aristo_desc, aristo_init]
# ------------------------------------------------------------------------------
# Public generic iterators
# ------------------------------------------------------------------------------
iterator walkVtxBeImpl*[T](
be: T; # Backend descriptor
db: AristoDbRef; # Database with optional backend filter
): tuple[n: int, vid: VertexID, vtx: VertexRef] =
## Generic iterator
var n = 0
when be is NoneBackendRef:
let filter = if db.roFilter.isNil: AristoFilterRef() else: db.roFilter
else:
mixin walkVtx
let filter = AristoFilterRef()
if not db.roFilter.isNil:
filter.sTab = db.roFilter.sTab # copy table
for (_,vid,vtx) in be.walkVtx:
if filter.sTab.hasKey vid:
let fVtx = filter.sTab.getOrVoid vid
if fVtx.isValid:
yield (n,vid,fVtx)
n.inc
filter.sTab.del vid
else:
yield (n,vid,vtx)
n.inc
for vid in filter.sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
let vtx = filter.sTab.getOrVoid vid
if vtx.isValid:
yield (n,vid,vtx)
n.inc
iterator walkKeyBeImpl*[T](
be: T; # Backend descriptor
db: AristoDbRef; # Database with optional backend filter
): tuple[n: int, vid: VertexID, key: HashKey] =
## Generic iterator
var n = 0
when be is NoneBackendRef:
let filter = if db.roFilter.isNil: AristoFilterRef() else: db.roFilter
else:
mixin walkKey
let filter = AristoFilterRef()
if not db.roFilter.isNil:
filter.kMap = db.roFilter.kMap # copy table
for (_,vid,key) in be.walkKey:
if filter.kMap.hasKey vid:
let fKey = filter.kMap.getOrVoid vid
if fKey.isValid:
yield (n,vid,fKey)
n.inc
filter.kMap.del vid
else:
yield (n,vid,key)
n.inc
for vid in filter.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
let key = filter.kMap.getOrVoid vid
if key.isValid:
yield (n,vid,key)
n.inc
iterator walkIdgBeImpl*[T](
be: T; # Backend descriptor
db: AristoDbRef; # Database with optional backend filter
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
## Generic pseudo iterator
var nNext = 0
if not db.roFilter.isNil and db.roFilter.vGen.isSome:
yield(0, VertexID(0), db.roFilter.vGen.unsafeGet)
nNext = 1
when be isnot NoneBackendRef:
mixin walkIdg
for (n,vid,vGen) in be.walkIdg:
if nNext <= n:
yield(n,vid,vGen)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,55 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Iterators for non-persistent backend of the Aristo DB
## =====================================================
##
import
../aristo_init/[aristo_memory, memory_only],
".."/[aristo_desc, aristo_init],
./aristo_walk_private
export
aristo_memory,
memory_only
# ------------------------------------------------------------------------------
# Public iterators (all in one)
# ------------------------------------------------------------------------------
iterator walkVtxBe*[T: MemBackendRef|NoneBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, vtx: VertexRef] =
## Iterate over filtered memory backend or backend-less vertices. This
## function depends on the particular backend type name which must match
## the backend descriptor.
for (n,vid,vtx) in db.to(T).walkVtxBeImpl db:
yield (n,vid,vtx)
iterator walkKeyBe*[T: MemBackendRef|NoneBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, key: HashKey] =
## Similar to `walkVtxBe()` but for keys.
for (n,vid,key) in db.to(T).walkKeyBeImpl db:
yield (n,vid,key)
iterator walkIdgBe*[T: MemBackendRef|NoneBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
## Similar to `walkVtxBe()` but for vertex ID generator states.
for (n,vid,vGen) in db.to(T).walkIdgBeImpl db:
yield (n,vid,vGen)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,60 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Iterators for persistent backend of the Aristo DB
## =================================================
##
## This module automatically pulls in the persistent backend library at the
## linking stage (e.g. `rocksdb`) which can be avoided for pure memory DB
## applications by importing `./aristo_walk/memory_only` (rather than
## `./aristo_walk/persistent`.)
##
import
../aristo_init/[aristo_rocksdb, persistent],
".."/[aristo_desc, aristo_init],
"."/[aristo_walk_private, memory_only]
export
aristo_rocksdb,
memory_only,
persistent
# ------------------------------------------------------------------------------
# Public iterators (all in one)
# ------------------------------------------------------------------------------
iterator walkVtxBe*(
T: type RdbBackendRef;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, vtx: VertexRef] =
## Iterate over filtered RocksDB backend vertices. This function depends on
## the particular backend type name which must match the backend descriptor.
for (n,vid,vtx) in db.to(T).walkVtxBeImpl db:
yield (n,vid,vtx)
iterator walkKeyBe*(
T: type RdbBackendRef;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, key: HashKey] =
## Similar to `walkVtxBe()` but for keys.
for (n,vid,key) in db.to(T).walkKeyBeImpl db:
yield (n,vid,key)
iterator walkIdgBe*(
T: type RdbBackendRef;
db: AristoDbRef;
): tuple[n: int, vid: VertexID, vGen: seq[VertexID]] =
## Similar to `walkVtxBe()` but for vertex ID generator states.
for (n,vid,vGen) in db.to(T).walkIdgBeImpl db:
yield (n,vid,vGen)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -16,11 +16,10 @@ import
stew/results,
unittest2,
../../nimbus/sync/protocol,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[aristo_desc, aristo_debug],
../../nimbus/db/aristo/aristo_init/[
aristo_memory, aristo_rocksdb, persistent],
../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_hashify, aristo_init, aristo_layer,
aristo_merge],
./test_helpers
# ------------------------------------------------------------------------------
@ -198,15 +197,15 @@ proc test_backendConsistency*(
# Store onto backend database
block:
#noisy.say "***", "db-dump\n ", mdb.pp
let rc = mdb.save
let rc = mdb.stow(stageOnly=false, extendOK=true)
if rc.isErr:
check rc.error == (0,0)
check rc.error == 0
return
if doRdbOk:
let rc = rdb.save
let rc = rdb.stow(stageOnly=false, extendOK=true)
if rc.isErr:
check rc.error == (0,0)
check rc.error == 0
return
if not ndb.top.verify(mdb.to(MemBackendRef), noisy):

View File

@ -234,33 +234,16 @@ proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
check db.top.vGen.len == 1
# Recycling and re-org tests
db.top.vGen = @[8, 7, 3, 4, 5, 9].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3, 4, 5, 7].mapIt(VertexID(it))
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(it))
db.top.vGen = @[8, 7, 6, 3, 4, 5, 9].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3].mapIt(VertexID(it))
check @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[3, 4, 5, 7] .toVQ
check @[8, 7, 6, 3, 4, 5, 9] .toVQ.vidReorg == @[3] .toVQ
check @[5, 4, 3, 7] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
check @[5] .toVQ.vidReorg == @[5] .toVQ
check @[3, 5] .toVQ.vidReorg == @[3, 5] .toVQ
check @[4, 5] .toVQ.vidReorg == @[4] .toVQ
db.top.vGen = @[5, 4, 3, 7].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[5, 4, 3, 7].mapIt(VertexID(it))
db.top.vGen = @[5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[5].mapIt(VertexID(it))
db.top.vGen = @[3, 5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3, 5].mapIt(VertexID(it))
db.top.vGen = @[4, 5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[4].mapIt(VertexID(it))
db.top.vGen.setLen(0)
db.vidReorg()
check db.top.vGen.len == 0
check newSeq[VertexID](0).vidReorg().len == 0
# ------------------------------------------------------------------------------
# End

View File

@ -30,6 +30,8 @@ type
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
const
MaxFilterBulk = 15_000
WalkStopRc =
Result[LeafTie,(VertexID,AristoError)].err((VertexID(0),NearbyBeyondRange))
@ -98,6 +100,7 @@ proc innerCleanUp(db: AristoDbRef) =
proc saveToBackend(
tx: var AristoTxRef;
extendOK: bool;
relax: bool;
noisy: bool;
debugID: int;
@ -158,7 +161,7 @@ proc saveToBackend(
check rc.value.level < 0
return
block:
let rc = db.persistent()
let rc = db.stow(stageLimit=MaxFilterBulk, extendOK=extendOK)
if rc.isErr:
check rc.error == 0
return
@ -175,6 +178,7 @@ proc saveToBackend(
proc saveToBackendWithOops(
tx: var AristoTxRef;
extendOK: bool;
noisy: bool;
debugID: int;
oops: (int,AristoError);
@ -225,7 +229,7 @@ proc saveToBackendWithOops(
check rc.value.level < 0
return
block:
let rc = db.persistent()
let rc = db.stow(stageLimit=MaxFilterBulk, extendOK=extendOK)
if rc.isErr:
check rc.error == 0
return
@ -378,7 +382,8 @@ proc testTxMergeAndDelete*(
(leaf, lid) = lvp
if doSaveBeOk:
if not tx.saveToBackend(relax=relax, noisy=noisy, runID):
if not tx.saveToBackend(
extendOK=false, relax=relax, noisy=noisy, runID):
return
# Delete leaf
@ -490,7 +495,8 @@ proc testTxMergeProofAndKvpList*(
block:
let oops = oopsTab.getOrDefault(testId,(0,AristoError(0)))
if not tx.saveToBackendWithOops(noisy, runID, oops):
if not tx.saveToBackendWithOops(
extendOK=true, noisy=noisy, debugID=runID, oops):
return
when true and false: