mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-11 21:04:11 +00:00
Aristo db update foreground caching (#1605)
* Fix vertex ID generator state handling for rocksdb backend why: * Key error in walk iterator * Needs to be loaded when opening the database * Use non-zero sub-table prefixes for rocksdb why: Handy for debugging * Fix error code for missing key on rocksdb backend why: Previously returned `VOID_HASH_KEY` rather than `GetKeyNotFound` * Explicitly copy vertex data between internal table and function/result argument why: Function argument or return reference may still refer to the same data object. * Updated error symbols why: Error symbol names for the hike module now start with the prefix `Hike`. * Write back modified branch node into local top layer cache why: With the backend available, the source of the branch node references might not be the top layer cache. So any change must be explicitely recorded.
This commit is contained in:
parent
2de9c95770
commit
83dbe87159
@ -21,6 +21,9 @@ const
|
||||
EmptyNibbleSeq* = EmptyBlob.initNibbleRange
|
||||
## Useful shortcut (borrowed from `sync/snap/constants.nim`)
|
||||
|
||||
EmptyVidSeq* = seq[VertexID].default
|
||||
## Useful shortcut
|
||||
|
||||
VOID_CODE_KEY* = EMPTY_CODE_HASH.to(HashKey)
|
||||
## Equivalent of `nil` for `Account` object code hash
|
||||
|
||||
|
@ -251,17 +251,20 @@ proc ppXMap*(
|
||||
|
||||
proc ppBe[T](be: T; db: AristoDb; indent: int): string =
|
||||
## Walk over backend tables
|
||||
let pfx = indent.toPfx
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
pfx1 = indent.toPfx(1)
|
||||
pfx2 = indent.toPfx(2)
|
||||
result = "<" & $be.kind & ">"
|
||||
result &= pfx & "vGen" & pfx & " [" & be.walkIdg.toSeq.mapIt(
|
||||
it[2].pp
|
||||
result &= pfx & "vGen" & pfx1 & "[" & be.walkIdg.toSeq.mapIt(
|
||||
it[2].mapIt(it.ppVid).join(",")
|
||||
).join(",") & "]"
|
||||
result &= pfx & "sTab" & pfx & " {" & be.walkVtx.toSeq.mapIt(
|
||||
result &= pfx & "sTab" & pfx1 & "{" & be.walkVtx.toSeq.mapIt(
|
||||
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppVtx(db,it[1]) & ")"
|
||||
).join("," & pfx & " ") & "}"
|
||||
result &= pfx & "kMap" & pfx & " {" & be.walkKey.toSeq.mapIt(
|
||||
).join(pfx2) & "}"
|
||||
result &= pfx & "kMap" & pfx1 & "{" & be.walkKey.toSeq.mapIt(
|
||||
$(1+it[0]) & "(" & it[1].ppVid & "," & it[2].ppKey & ")"
|
||||
).join("," & pfx & " ") & "}"
|
||||
).join(pfx2) & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
@ -421,6 +424,7 @@ proc pp*(
|
||||
|
||||
proc pp*(
|
||||
db: AristoDb;
|
||||
vGenOk = true;
|
||||
sTabOk = true;
|
||||
lTabOk = true;
|
||||
kMapOk = true;
|
||||
@ -430,7 +434,7 @@ proc pp*(
|
||||
let
|
||||
pfx1 = indent.toPfx
|
||||
pfx2 = indent.toPfx(1)
|
||||
tagOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord
|
||||
tagOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord + vGenOk.ord
|
||||
var
|
||||
pfy = ""
|
||||
|
||||
@ -445,6 +449,9 @@ proc pp*(
|
||||
rc
|
||||
|
||||
if not db.top.isNil:
|
||||
if vGenOk:
|
||||
let info = "vGen(" & $db.top.vGen.len & ")"
|
||||
result &= info.doPrefix & db.top.vGen.pp
|
||||
if sTabOk:
|
||||
let info = "sTab(" & $db.top.sTab.len & ")"
|
||||
result &= info.doPrefix & db.top.sTab.pp(db,indent+1)
|
||||
|
@ -34,24 +34,27 @@ proc branchStillNeeded(vtx: VertexRef): bool =
|
||||
if vtx.bVid[n].isValid:
|
||||
return true
|
||||
|
||||
proc clearKey(db: AristoDb; vid: VertexID) =
|
||||
let key = db.top.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
proc clearKey(
|
||||
db: AristoDb; # Database, top layer
|
||||
vid: VertexID; # Vertex IDs to clear
|
||||
) =
|
||||
let lbl = db.top.kMap.getOrVoid vid
|
||||
if lbl.isValid:
|
||||
db.top.kMap.del vid
|
||||
db.top.pAmk.del key
|
||||
db.top.pAmk.del lbl
|
||||
elif db.getKeyBackend(vid).isOK:
|
||||
# Register for deleting on backend
|
||||
db.top.kMap[vid] = VOID_HASH_LABEL
|
||||
db.top.pAmk.del key
|
||||
db.top.pAmk.del lbl
|
||||
|
||||
proc doneWith(db: AristoDb; vid: VertexID) =
|
||||
proc doneWith(
|
||||
db: AristoDb; # Database, top layer
|
||||
vid: VertexID; # Vertex IDs to clear
|
||||
) =
|
||||
# Remove entry
|
||||
db.vidDispose vid # Will be propagated to backend
|
||||
db.vidDispose vid # Will be propagated to backend
|
||||
db.top.sTab.del vid
|
||||
let key = db.top.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
db.top.kMap.del vid
|
||||
db.top.pAmk.del key
|
||||
db.clearKey vid
|
||||
|
||||
|
||||
proc deleteImpl(
|
||||
@ -66,45 +69,53 @@ proc deleteImpl(
|
||||
return err((VertexID(0),hike.error))
|
||||
|
||||
# doAssert 0 < hike.legs.len and hike.tail.len == 0 # as assured by `hikeUp()`
|
||||
var inx = hike.legs.len - 1
|
||||
|
||||
# Remove leaf entry on the top
|
||||
let lf = hike.legs[inx].wp
|
||||
if lf.vtx.vType != Leaf:
|
||||
return err((lf.vid,DelLeafExpexted))
|
||||
if lf.vid in db.top.pPrf:
|
||||
return err((lf.vid, DelLeafLocked))
|
||||
db.doneWith lf.vid
|
||||
inx.dec
|
||||
var lf: VidVtxPair
|
||||
block:
|
||||
var inx = hike.legs.len - 1
|
||||
|
||||
while 0 <= inx:
|
||||
# Unlink child vertex
|
||||
let br = hike.legs[inx].wp
|
||||
if br.vtx.vType != Branch:
|
||||
return err((br.vid,DelBranchExpexted))
|
||||
if br.vid in db.top.pPrf:
|
||||
return err((br.vid, DelBranchLocked))
|
||||
br.vtx.bVid[hike.legs[inx].nibble] = VertexID(0)
|
||||
|
||||
if br.vtx.branchStillNeeded:
|
||||
db.clearKey br.vid
|
||||
break
|
||||
|
||||
# Remove this `Branch` entry
|
||||
db.doneWith br.vid
|
||||
# Remove leaf entry on the top
|
||||
lf = hike.legs[inx].wp
|
||||
if lf.vtx.vType != Leaf:
|
||||
return err((lf.vid,DelLeafExpexted))
|
||||
if lf.vid in db.top.pPrf:
|
||||
return err((lf.vid, DelLeafLocked))
|
||||
db.doneWith(lf.vid)
|
||||
inx.dec
|
||||
|
||||
if inx < 0:
|
||||
break
|
||||
|
||||
# There might be an optional `Extension` to remove
|
||||
let ext = hike.legs[inx].wp
|
||||
if ext.vtx.vType == Extension:
|
||||
while 0 <= inx:
|
||||
# Unlink child vertex
|
||||
let br = hike.legs[inx].wp
|
||||
if br.vtx.vType != Branch:
|
||||
return err((br.vid,DelBranchExpexted))
|
||||
if br.vid in db.top.pPrf:
|
||||
return err((ext.vid, DelExtLocked))
|
||||
db.doneWith ext.vid
|
||||
return err((br.vid, DelBranchLocked))
|
||||
br.vtx.bVid[hike.legs[inx].nibble] = VertexID(0)
|
||||
db.top.sTab[br.vid] = br.vtx
|
||||
|
||||
if br.vtx.branchStillNeeded:
|
||||
# Clear all keys up to the toot key
|
||||
db.clearKey(br.vid)
|
||||
while 0 < inx:
|
||||
inx.dec
|
||||
db.clearKey(hike.legs[inx].wp.vid)
|
||||
break
|
||||
|
||||
# Remove this `Branch` entry
|
||||
db.doneWith(br.vid)
|
||||
inx.dec
|
||||
|
||||
if inx < 0:
|
||||
break
|
||||
|
||||
# There might be an optional `Extension` to remove
|
||||
let ext = hike.legs[inx].wp
|
||||
if ext.vtx.vType == Extension:
|
||||
if br.vid in db.top.pPrf:
|
||||
return err((ext.vid, DelExtLocked))
|
||||
db.doneWith(ext.vid)
|
||||
inx.dec
|
||||
|
||||
# Delete leaf entry
|
||||
let rc = db.getVtxBackend lf.vid
|
||||
if rc.isErr and rc.error == GetVtxNotFound:
|
||||
|
@ -41,14 +41,14 @@ type
|
||||
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
|
||||
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex
|
||||
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
|
||||
pAmk*: Table[HashLabel,VertexID] ## Reverse mapper for data import
|
||||
pAmk*: Table[HashLabel,VertexID] ## Reverse `kMap` entries, hash key lookup
|
||||
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
|
||||
vGen*: seq[VertexID] ## Unique vertex ID generator
|
||||
|
||||
AristoDb* = object
|
||||
## Set of database layers, supporting transaction frames
|
||||
top*: AristoLayerRef ## Database working layer
|
||||
stack*: seq[AristoLayerRef] ## Stashed parent layers
|
||||
top*: AristoLayerRef ## Database working layer, mutable
|
||||
stack*: seq[AristoLayerRef] ## Stashed immutable parent layers
|
||||
backend*: AristoBackendRef ## Backend database (may well be `nil`)
|
||||
|
||||
# Debugging data below, might go away in future
|
||||
@ -58,13 +58,13 @@ type
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
|
||||
func getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
|
||||
tab.getOrDefault(w, VertexRef(nil))
|
||||
|
||||
proc getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
|
||||
func getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
|
||||
tab.getOrDefault(w, VOID_HASH_LABEL)
|
||||
|
||||
proc getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
|
||||
func getOrVoid*[W](tab: Table[W,VertexID]; w: W): VertexID =
|
||||
tab.getOrDefault(w, VertexID(0))
|
||||
|
||||
# --------
|
||||
|
@ -45,12 +45,12 @@ type
|
||||
CacheMissingNodekeys
|
||||
|
||||
# Path function `hikeUp()`
|
||||
PathRootMissing
|
||||
PathLeafTooEarly
|
||||
PathBranchTailEmpty
|
||||
PathBranchBlindEdge
|
||||
PathExtTailEmpty
|
||||
PathExtTailMismatch
|
||||
HikeRootMissing
|
||||
HikeLeafTooEarly
|
||||
HikeBranchTailEmpty
|
||||
HikeBranchBlindEdge
|
||||
HikeExtTailEmpty
|
||||
HikeExtTailMismatch
|
||||
|
||||
# Path/nibble/key conversions in `aisto_path.nim`
|
||||
PathExpected64Nibbles
|
||||
@ -69,6 +69,7 @@ type
|
||||
MergeLeafPathCachedAlready
|
||||
MergeNonBranchProofModeLock
|
||||
MergeRootBranchLinkBusy
|
||||
MergeAssemblyFailed # Ooops, internal error
|
||||
|
||||
MergeHashKeyInvalid
|
||||
MergeRootVidInvalid
|
||||
|
@ -42,13 +42,12 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, strutils],
|
||||
std/[sets, tables],
|
||||
std/[sequtils, sets, strutils, tables],
|
||||
chronicles,
|
||||
eth/common,
|
||||
stew/results,
|
||||
"."/[aristo_constants, aristo_debug, aristo_desc, aristo_get,
|
||||
aristo_hike, aristo_transcode, aristo_vid]
|
||||
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hike,
|
||||
aristo_transcode, aristo_vid]
|
||||
|
||||
type
|
||||
BackVidValRef = ref object
|
||||
@ -75,25 +74,6 @@ func getOrVoid(tab: BackVidTab; vid: VertexID): BackVidValRef =
|
||||
func isValid(brv: BackVidValRef): bool =
|
||||
brv != BackVidValRef(nil)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper, debugging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp(w: BackVidValRef): string =
|
||||
if w.isNil:
|
||||
return "n/a"
|
||||
result = "(" & w.root.pp & ","
|
||||
if w.onBe:
|
||||
result &= "*"
|
||||
result &= "," & w.toVid.pp & ")"
|
||||
|
||||
proc pp(t: BackVidTab): string =
|
||||
proc pp(b: bool): string =
|
||||
if b: "*" else: ""
|
||||
"{" & t.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
|
||||
.mapIt("(" & it.pp & "," & t.getOrVoid(it).pp & ")")
|
||||
.join(",") & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -233,7 +213,7 @@ proc hashify*(
|
||||
for (lky,vid) in db.top.lTab.pairs:
|
||||
let hike = lky.hikeUp(db)
|
||||
if hike.error != AristoError(0):
|
||||
return err((hike.root,hike.error))
|
||||
return err((vid,hike.error))
|
||||
|
||||
roots.incl hike.root
|
||||
|
||||
@ -367,20 +347,24 @@ proc hashifyCheck*(
|
||||
|
||||
else:
|
||||
for (vid,lbl) in db.top.kMap.pairs:
|
||||
let vtx = db.getVtx vid
|
||||
if vtx.isValid:
|
||||
let rc = vtx.toNode(db)
|
||||
if rc.isOk:
|
||||
if lbl.key != rc.value.encode.digestTo(HashKey):
|
||||
return err((vid,HashifyCheckVtxHashMismatch))
|
||||
if lbl.isValid: # Otherwise to be deleted
|
||||
let vtx = db.getVtx vid
|
||||
if vtx.isValid:
|
||||
let rc = vtx.toNode(db)
|
||||
if rc.isOk:
|
||||
if lbl.key != rc.value.encode.digestTo(HashKey):
|
||||
return err((vid,HashifyCheckVtxHashMismatch))
|
||||
|
||||
let revVid = db.top.pAmk.getOrVoid lbl
|
||||
if not revVid.isValid:
|
||||
return err((vid,HashifyCheckRevHashMissing))
|
||||
if revVid != vid:
|
||||
return err((vid,HashifyCheckRevHashMismatch))
|
||||
let revVid = db.top.pAmk.getOrVoid lbl
|
||||
if not revVid.isValid:
|
||||
return err((vid,HashifyCheckRevHashMissing))
|
||||
if revVid != vid:
|
||||
return err((vid,HashifyCheckRevHashMismatch))
|
||||
|
||||
if db.top.pAmk.len != db.top.kMap.len:
|
||||
# Some `kMap[]` entries may ne void indicating backend deletion
|
||||
let kMapCount = db.top.kMap.values.toSeq.filterIt(it.isValid).len
|
||||
|
||||
if db.top.pAmk.len != kMapCount:
|
||||
var knownKeys: HashSet[VertexID]
|
||||
for (key,vid) in db.top.pAmk.pairs:
|
||||
if not db.top.kMap.hasKey(vid):
|
||||
@ -390,7 +374,8 @@ proc hashifyCheck*(
|
||||
knownKeys.incl vid
|
||||
return err((VertexID(0),HashifyCheckRevCountMismatch)) # should not apply(!)
|
||||
|
||||
if 0 < db.top.pAmk.len and not relax and db.top.pAmk.len != db.top.sTab.len:
|
||||
if 0 < db.top.pAmk.len and not relax and db.top.pAmk.len < db.top.sTab.len:
|
||||
# Cannot have less changes than cached entries
|
||||
return err((VertexID(0),HashifyCheckVtxCountMismatch))
|
||||
|
||||
for vid in db.top.pPrf:
|
||||
|
@ -71,7 +71,7 @@ proc hikeUp*(
|
||||
tail: path)
|
||||
|
||||
if not root.isValid:
|
||||
result.error = PathRootMissing
|
||||
result.error = HikeRootMissing
|
||||
|
||||
else:
|
||||
var vid = root
|
||||
@ -96,13 +96,13 @@ proc hikeUp*(
|
||||
result.legs.add leg
|
||||
result.tail = EmptyNibbleSeq
|
||||
else:
|
||||
result.error = PathLeafTooEarly # Ooops
|
||||
result.error = HikeLeafTooEarly # Ooops
|
||||
break # Buck stops here
|
||||
|
||||
of Branch:
|
||||
if result.tail.len == 0:
|
||||
result.legs.add leg
|
||||
result.error = PathBranchTailEmpty # Ooops
|
||||
result.error = HikeBranchTailEmpty # Ooops
|
||||
break
|
||||
|
||||
let
|
||||
@ -110,7 +110,7 @@ proc hikeUp*(
|
||||
nextVid = leg.wp.vtx.bVid[nibble]
|
||||
|
||||
if not nextVid.isValid:
|
||||
result.error = PathBranchBlindEdge # Ooops
|
||||
result.error = HikeBranchBlindEdge # Ooops
|
||||
break
|
||||
|
||||
leg.nibble = nibble
|
||||
@ -122,11 +122,11 @@ proc hikeUp*(
|
||||
if result.tail.len == 0:
|
||||
result.legs.add leg
|
||||
result.tail = EmptyNibbleSeq
|
||||
result.error = PathExtTailEmpty # Well, somehow odd
|
||||
result.error = HikeExtTailEmpty # Well, somehow odd
|
||||
break
|
||||
|
||||
if leg.wp.vtx.ePfx.len != result.tail.sharedPrefixLen(leg.wp.vtx.ePfx):
|
||||
result.error = PathExtTailMismatch # Need to branch from here
|
||||
result.error = HikeExtTailMismatch # Need to branch from here
|
||||
break
|
||||
|
||||
result.legs.add leg
|
||||
|
@ -42,10 +42,17 @@ proc init*(
|
||||
ok T(top: AristoLayerRef(), backend: memoryBackend())
|
||||
|
||||
elif backend == BackendRocksDB:
|
||||
let rc = rocksDbBackend basePath
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
ok T(top: AristoLayerRef(), backend: rc.value)
|
||||
let be = block:
|
||||
let rc = rocksDbBackend basePath
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
let vGen = block:
|
||||
let rc = be.getIdgFn()
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
ok T(top: AristoLayerRef(vGen: vGen), backend: be)
|
||||
|
||||
else:
|
||||
{.error: "Unknown/unsupported Aristo DB backend".}
|
||||
@ -76,9 +83,11 @@ proc finish*(db: var AristoDb; flush = false) =
|
||||
## always flush on close.)
|
||||
if not db.backend.isNil:
|
||||
db.backend.closeFn flush
|
||||
db.backend = AristoBackendRef(nil)
|
||||
db.top = AristoLayerRef(nil)
|
||||
db.stack.setLen(0)
|
||||
|
||||
# -----------------
|
||||
|
||||
proc to*[W: MemBackendRef|RdbBackendRef](db: AristoDb; T: type W): T =
|
||||
## Handy helper for lew-level access to some backend functionality
|
||||
|
@ -34,9 +34,9 @@ type
|
||||
|
||||
AristoStorageType* = enum
|
||||
## Storage types, key prefix
|
||||
IdgPfx = 0 ## ID generator
|
||||
VtxPfx = 1 ## Vertex data
|
||||
KeyPfx = 2 ## Key/hash data
|
||||
IdgPfx = 1 ## ID generator
|
||||
VtxPfx = 2 ## Vertex data
|
||||
KeyPfx = 3 ## Key/hash data
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
|
@ -65,6 +65,12 @@ proc endSession(hdl: PutHdlRef; db: MemBackendRef): MemPutHdlRef =
|
||||
hdl.TypedPutHdlRef.finishSession db
|
||||
hdl.MemPutHdlRef
|
||||
|
||||
proc cpy(vtx: VertexRef): VertexRef =
|
||||
new result
|
||||
result[] = vtx[]
|
||||
if vtx.vType == Leaf:
|
||||
result.lData[] = vtx.lData[]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: interface
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -72,16 +78,16 @@ proc endSession(hdl: PutHdlRef; db: MemBackendRef): MemPutHdlRef =
|
||||
proc getVtxFn(db: MemBackendRef): GetVtxFn =
|
||||
result =
|
||||
proc(vid: VertexID): Result[VertexRef,AristoError] =
|
||||
let vtx = db.sTab.getOrDefault(vid, VertexRef(nil))
|
||||
if vtx != VertexRef(nil):
|
||||
return ok vtx
|
||||
let vtx = db.sTab.getOrVoid vid
|
||||
if vtx.isValid:
|
||||
return ok cpy(vtx)
|
||||
err(GetVtxNotFound)
|
||||
|
||||
proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
||||
result =
|
||||
proc(vid: VertexID): Result[HashKey,AristoError] =
|
||||
let key = db.kMap.getOrDefault(vid, VOID_HASH_KEY)
|
||||
if key != VOID_HASH_KEY:
|
||||
if key.isValid:
|
||||
return ok key
|
||||
err(GetKeyNotFound)
|
||||
|
||||
@ -103,7 +109,7 @@ proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
||||
proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
|
||||
let hdl = hdl.getSession db
|
||||
for (vid,vtx) in vrps:
|
||||
hdl.sTab[vid] = vtx
|
||||
hdl.sTab[vid] = cpy(vtx)
|
||||
|
||||
proc putKeyFn(db: MemBackendRef): PutKeyFn =
|
||||
result =
|
||||
|
@ -31,10 +31,12 @@ import
|
||||
eth/common,
|
||||
rocksdb,
|
||||
stew/results,
|
||||
".."/[aristo_desc, aristo_transcode],
|
||||
../aristo_constants,
|
||||
../aristo_desc,
|
||||
../aristo_desc/aristo_types_backend,
|
||||
./aristo_rocksdb/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk],
|
||||
./aristo_init_common
|
||||
../aristo_transcode,
|
||||
./aristo_init_common,
|
||||
./aristo_rocksdb/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
|
||||
|
||||
logScope:
|
||||
topics = "aristo-backend"
|
||||
@ -113,9 +115,10 @@ proc getKeyFn(db: RdbBackendRef): GetKeyFn =
|
||||
return err(rc.error[0])
|
||||
|
||||
# Decode data record
|
||||
var key: HashKey
|
||||
if key.init rc.value:
|
||||
return ok key
|
||||
if 0 < rc.value.len:
|
||||
var key: HashKey
|
||||
if key.init rc.value:
|
||||
return ok key
|
||||
|
||||
err(GetKeyNotFound)
|
||||
|
||||
@ -129,8 +132,12 @@ proc getIdgFn(db: RdbBackendRef): GetIdgFn =
|
||||
debug logTxt "getIdgFn: failed", error=rc.error[1]
|
||||
return err(rc.error[0])
|
||||
|
||||
if rc.value.len == 0:
|
||||
let w = EmptyVidSeq
|
||||
return ok w
|
||||
|
||||
# Decode data record
|
||||
return rc.value.deblobify seq[VertexID]
|
||||
rc.value.deblobify seq[VertexID]
|
||||
|
||||
# -------------
|
||||
|
||||
|
@ -155,7 +155,7 @@ iterator walk*(
|
||||
break walkBody # done
|
||||
|
||||
let vid = kData.keyVid(kLen)
|
||||
if vid.isValid:
|
||||
if vid.isValid or pfx == IdgPfx:
|
||||
|
||||
# Fetch value data
|
||||
var vLen: csize_t
|
||||
|
@ -120,7 +120,7 @@ proc save*(
|
||||
# Delete stack and clear top
|
||||
db.stack.setLen(0)
|
||||
if clear:
|
||||
db.top = AristoLayerRef(vGen: db.top.vgen)
|
||||
db.top = AristoLayerRef(vGen: db.top.vGen)
|
||||
|
||||
ok hst
|
||||
|
||||
|
@ -75,14 +75,14 @@ proc clearMerkleKeys(
|
||||
vid: VertexID; # Additionall vertex IDs to clear
|
||||
) =
|
||||
for vid in hike.legs.mapIt(it.wp.vid) & @[vid]:
|
||||
let key = db.top.kMap.getOrVoid vid
|
||||
if key.isValid:
|
||||
let lbl = db.top.kMap.getOrVoid vid
|
||||
if lbl.isValid:
|
||||
db.top.kMap.del vid
|
||||
db.top.pAmk.del key
|
||||
db.top.pAmk.del lbl
|
||||
elif db.getKeyBackend(vid).isOK:
|
||||
# Register for deleting on backend
|
||||
db.top.kMap[vid] = VOID_HASH_LABEL
|
||||
db.top.pAmk.del key
|
||||
db.top.pAmk.del lbl
|
||||
|
||||
# -----------
|
||||
|
||||
@ -248,6 +248,7 @@ proc concatBranchAndLeaf(
|
||||
lPfx: hike.tail.slice(1),
|
||||
lData: payload)
|
||||
brVtx.bVid[nibble] = vid
|
||||
db.top.sTab[brVid] = brVtx
|
||||
db.top.sTab[vid] = vtx
|
||||
result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
||||
|
||||
@ -369,8 +370,9 @@ proc topIsExtAddLeaf(
|
||||
lPfx: hike.tail.slice(1),
|
||||
lData: payload)
|
||||
brVtx.bVid[nibble] = vid
|
||||
db.top.sTab[brVid] = brVtx
|
||||
db.top.sTab[vid] = vtx
|
||||
result.legs[^1].nibble = nibble
|
||||
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
||||
result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
||||
|
||||
|
||||
@ -400,6 +402,7 @@ proc topIsEmptyAddLeaf(
|
||||
lPfx: hike.tail.slice(1),
|
||||
lData: payload)
|
||||
rootVtx.bVid[nibble] = leafVid
|
||||
db.top.sTab[hike.root] = rootVtx
|
||||
db.top.sTab[leafVid] = leafVtx
|
||||
return Hike(
|
||||
root: hike.root,
|
||||
@ -408,6 +411,25 @@ proc topIsEmptyAddLeaf(
|
||||
|
||||
db.insertBranch(hike, hike.root, rootVtx, payload)
|
||||
|
||||
|
||||
proc updatePayload(
|
||||
db: AristoDb; # Database, top layer
|
||||
hike: Hike; # No path legs
|
||||
leaf: LeafTiePayload; # Leaf data and payload
|
||||
): Hike =
|
||||
## Update leaf vertex if payloads differ
|
||||
result = hike
|
||||
let vtx = result.legs[^1].wp.vtx
|
||||
|
||||
# Update payloads if they differ
|
||||
if vtx.lData != leaf.payload:
|
||||
let vid = result.legs[^1].wp.vid
|
||||
|
||||
vtx.lData = leaf.payload
|
||||
db.top.sTab[vid] = vtx
|
||||
db.top.lTab[leaf.leafTie] = vid
|
||||
db.clearMerkleKeys(result, vid)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: add Merkle proof node
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -512,7 +534,17 @@ proc merge*(
|
||||
## stored with the leaf vertex in the database unless the leaf vertex exists
|
||||
## already.
|
||||
##
|
||||
if db.top.lTab.hasKey leaf.leafTie:
|
||||
|
||||
# Check whether the leaf is on the database and payloads match
|
||||
var haveLeafOk = false
|
||||
block:
|
||||
let vid = db.top.lTab.getOrVoid leaf.leafTie
|
||||
if vid.isValid:
|
||||
let vtx = db.getVtx vid
|
||||
if vtx.isValid and vtx.lData == leaf.payload:
|
||||
haveLeafOk = true
|
||||
|
||||
if haveLeafOk:
|
||||
result.error = MergeLeafPathCachedAlready
|
||||
|
||||
else:
|
||||
@ -524,7 +556,7 @@ proc merge*(
|
||||
of Leaf:
|
||||
if 0 < hike.tail.len: # `Leaf` vertex problem?
|
||||
return Hike(error: MergeLeafGarbledHike)
|
||||
result = hike
|
||||
result = db.updatePayload(hike, leaf)
|
||||
of Extension:
|
||||
result = db.topIsExtAddLeaf(hike, leaf.payload)
|
||||
|
||||
@ -545,6 +577,12 @@ proc merge*(
|
||||
db.top.sTab[wp.vid] = wp.vtx
|
||||
result = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
|
||||
|
||||
# Double check the result until the code is more reliable
|
||||
block:
|
||||
let rc = result.to(NibblesSeq).pathToKey
|
||||
if rc.isErr or rc.value != leaf.leafTie.path.to(HashKey):
|
||||
result.error = MergeAssemblyFailed # Ooops
|
||||
|
||||
# Update leaf acccess cache
|
||||
if result.error == AristoError(0):
|
||||
db.top.lTab[leaf.leafTie] = result.legs[^1].wp.vid
|
||||
|
@ -290,7 +290,7 @@ proc deblobify*(data: Blob; vGen: var seq[VertexID]): AristoError =
|
||||
## De-serialise the data record encoded with `blobify()` into the vertex ID
|
||||
## generator argument `vGen`.
|
||||
if data.len == 0:
|
||||
vGen = @[1.VertexID]
|
||||
vGen = @[]
|
||||
else:
|
||||
if (data.len mod 8) != 1:
|
||||
return DeblobSizeGarbled
|
||||
|
@ -188,13 +188,17 @@ proc transcodeRunner(noisy =true; sample=accSample; stopAfter=high(int)) =
|
||||
noisy.test_transcodeAccounts(db.cdb[0].rocksStoreRef, stopAfter)
|
||||
|
||||
|
||||
proc accountsRunner(noisy=true; sample=accSample, resetDb=false) =
|
||||
proc accountsRunner(
|
||||
noisy = true;
|
||||
sample = accSample;
|
||||
resetDb = false;
|
||||
cmpBackends = true;
|
||||
) =
|
||||
let
|
||||
accLst = sample.to(seq[UndumpAccounts]).to(seq[ProofTrieData])
|
||||
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
||||
listMode = if resetDb: "" else: ", merged data lists"
|
||||
baseDir = getTmpDir() / sample.name & "-accounts"
|
||||
dbDir = baseDir / "tmp"
|
||||
|
||||
defer:
|
||||
try: baseDir.removeDir except CatchableError: discard
|
||||
@ -202,18 +206,27 @@ proc accountsRunner(noisy=true; sample=accSample, resetDb=false) =
|
||||
suite &"Aristo: accounts data dump from {fileInfo}{listMode}":
|
||||
|
||||
test &"Merge {accLst.len} account lists to database":
|
||||
check noisy.test_mergeKvpList(accLst, resetDb)
|
||||
# Local sud-directories needed as DB might be kept locked after close
|
||||
let dbDir = baseDir / "tmp1"
|
||||
check noisy.test_mergeKvpList(accLst, dbDir, resetDb)
|
||||
|
||||
test &"Merge {accLst.len} proof & account lists to database":
|
||||
let dbDir = baseDir / "tmp2"
|
||||
check noisy.test_mergeProofAndKvpList(accLst, resetDb)
|
||||
|
||||
test &"Store {accLst.len} account lists on database backends":
|
||||
check noisy.test_backendConsistency(accLst, dbDir, resetDb)
|
||||
test &"Compare {accLst.len} account lists on database backends":
|
||||
if cmpBackends:
|
||||
let dbDir = baseDir / "tmp3"
|
||||
check noisy.test_backendConsistency(accLst, dbDir, resetDb)
|
||||
else:
|
||||
skip()
|
||||
|
||||
test &"Traverse accounts database w/{accLst.len} account lists":
|
||||
let dbDir = baseDir / "tmp4"
|
||||
check noisy.test_nearbyKvpList(accLst, resetDb)
|
||||
|
||||
test &"Delete accounts database, successively {accLst.len} entries":
|
||||
let dbDir = baseDir / "tmp5"
|
||||
check noisy.test_delete accLst
|
||||
|
||||
|
||||
@ -222,13 +235,13 @@ proc storagesRunner(
|
||||
sample = storSample;
|
||||
resetDb = false;
|
||||
oops: KnownHasherFailure = @[];
|
||||
cmpBackends = true;
|
||||
) =
|
||||
let
|
||||
stoLst = sample.to(seq[UndumpStorages]).to(seq[ProofTrieData])
|
||||
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
||||
listMode = if resetDb: "" else: ", merged data lists"
|
||||
baseDir = getTmpDir() / sample.name & "-storage"
|
||||
dbDir = baseDir / "tmp"
|
||||
|
||||
defer:
|
||||
try: baseDir.removeDir except CatchableError: discard
|
||||
@ -236,18 +249,28 @@ proc storagesRunner(
|
||||
suite &"Aristo: storages data dump from {fileInfo}{listMode}":
|
||||
|
||||
test &"Merge {stoLst.len} storage slot lists to database":
|
||||
check noisy.test_mergeKvpList(stoLst, resetDb)
|
||||
# Local sud-directories needed as DB might be kept locked after close
|
||||
let dbDir = baseDir / "tmp1"
|
||||
check noisy.test_mergeKvpList(stoLst, dbDir, resetDb)
|
||||
|
||||
test &"Merge {stoLst.len} proof & slots lists to database":
|
||||
check noisy.test_mergeProofAndKvpList(stoLst, resetDb, fileInfo, oops)
|
||||
let dbDir = baseDir / "tmp2"
|
||||
check noisy.test_mergeProofAndKvpList(
|
||||
stoLst, resetDb, fileInfo, oops)
|
||||
|
||||
test &"Store {stoLst.len} slot lists on database backends":
|
||||
check noisy.test_backendConsistency(stoLst, dbDir, resetDb)
|
||||
test &"Compare {stoLst.len} slot lists on database backends":
|
||||
let dbDir = baseDir / "tmp3"
|
||||
if cmpBackends:
|
||||
check noisy.test_backendConsistency(stoLst, dbDir, resetDb)
|
||||
else:
|
||||
skip()
|
||||
|
||||
test &"Traverse storage slots database w/{stoLst.len} account lists":
|
||||
let dbDir = baseDir / "tmp4"
|
||||
check noisy.test_nearbyKvpList(stoLst, resetDb)
|
||||
|
||||
test &"Delete storage database, successively {stoLst.len} entries":
|
||||
let dbDir = baseDir / "tmp5"
|
||||
check noisy.test_delete stoLst
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -266,7 +289,7 @@ when isMainModule:
|
||||
|
||||
setErrorLevel()
|
||||
|
||||
when true and false:
|
||||
when true: # and false:
|
||||
noisy.miscRunner()
|
||||
|
||||
# Borrowed from `test_sync_snap.nim`
|
||||
|
@ -228,7 +228,7 @@ proc test_backendConsistency*(
|
||||
return
|
||||
|
||||
if doRdbOk:
|
||||
if not ndb.top.verify(rdb.backend.RdbBackendRef, noisy):
|
||||
if not ndb.top.verify(rdb.to(RdbBackendRef), noisy):
|
||||
when true and false:
|
||||
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">",
|
||||
" groups=", count,
|
||||
|
@ -17,8 +17,8 @@ import
|
||||
stew/results,
|
||||
unittest2,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_desc, aristo_delete, aristo_hashify, aristo_init, aristo_nearby,
|
||||
aristo_merge],
|
||||
aristo_desc, aristo_debug, aristo_delete, aristo_hashify, aristo_init,
|
||||
aristo_nearby, aristo_merge],
|
||||
./test_helpers
|
||||
|
||||
type
|
||||
@ -121,6 +121,7 @@ proc test_delete*(
|
||||
lstLen = list.len
|
||||
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
|
||||
added = db.merge leafs
|
||||
preState = db.pp
|
||||
|
||||
if added.error != AristoError(0):
|
||||
check added.error == AristoError(0)
|
||||
@ -141,7 +142,7 @@ proc test_delete*(
|
||||
|
||||
let uMax = leafTies.len - 1
|
||||
for u,leafTie in leafTies:
|
||||
let rc = leafTie.delete(db)
|
||||
let rc = leafTie.delete db # ,noisy)
|
||||
if rc.isErr:
|
||||
check rc.error == (VertexID(0),AristoError(0))
|
||||
return
|
||||
@ -164,8 +165,15 @@ proc test_delete*(
|
||||
elif 0 < db.top.sTab.len:
|
||||
check db.top.sTab.len == 0
|
||||
return
|
||||
let rc = db.hashifyCheck(relax=true)
|
||||
let rc = db.hashifyCheck(relax=true) # ,noisy=true)
|
||||
if rc.isErr:
|
||||
noisy.say "***", "<", n, "/", lstLen-1, ">",
|
||||
" item=", u, "/", uMax,
|
||||
"\n --------",
|
||||
"\n pre-DB\n ", preState,
|
||||
"\n --------",
|
||||
"\n cache\n ", db.pp,
|
||||
"\n --------"
|
||||
check rc.error == (VertexID(0),AristoError(0))
|
||||
return
|
||||
|
||||
|
@ -76,6 +76,9 @@ proc pp*(w: openArray[ProofTrieData]; indent = 4): string =
|
||||
let pfx = indent.toPfx
|
||||
"[" & w.mapIt(it.pp(indent + 1)).join("," & pfx & " ") & "]"
|
||||
|
||||
proc pp*(ltp: LeafTiePayload; db: AristoDB): string =
|
||||
"(" & ltp.leafTie.pp(db) & "," & ltp.payload.pp(db) & ")"
|
||||
|
||||
# ----------
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
|
@ -14,11 +14,12 @@
|
||||
import
|
||||
std/tables,
|
||||
eth/common,
|
||||
stew/results,
|
||||
stew/[byteutils, results],
|
||||
unittest2,
|
||||
../../nimbus/db/aristo/aristo_init/aristo_rocksdb,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_desc, aristo_debug, aristo_get, aristo_hashify, aristo_init,
|
||||
aristo_hike, aristo_merge],
|
||||
aristo_hike, aristo_layer, aristo_merge],
|
||||
./test_helpers
|
||||
|
||||
type
|
||||
@ -38,7 +39,7 @@ proc pp(w: tuple[merged: int, dups: int, error: AristoError]): string =
|
||||
proc mergeStepwise(
|
||||
db: AristoDb;
|
||||
leafs: openArray[LeafTiePayload];
|
||||
noisy: bool;
|
||||
noisy = false;
|
||||
): tuple[merged: int, dups: int, error: AristoError] =
|
||||
let
|
||||
lTabLen = db.top.lTab.len
|
||||
@ -47,16 +48,18 @@ proc mergeStepwise(
|
||||
|
||||
for n,leaf in leafs:
|
||||
var
|
||||
event = false # or (2 < u) or true
|
||||
event = false
|
||||
dumpOk = false or event
|
||||
stopOk = false
|
||||
|
||||
when true: # and false:
|
||||
noisy.say "***", "step <", n, "/", leafs.len-1, "> leaf=", leaf.pp(db)
|
||||
|
||||
let
|
||||
preState = db.pp
|
||||
hike = db.merge leaf
|
||||
ekih = leaf.leafTie.hikeUp(db)
|
||||
|
||||
noisy.say "***", "step <", n, "/", leafs.len-1, "> "
|
||||
|
||||
case hike.error:
|
||||
of AristoError(0):
|
||||
merged.inc
|
||||
@ -67,12 +70,13 @@ proc mergeStepwise(
|
||||
dumpOk = true
|
||||
stopOk = true
|
||||
|
||||
if ekih.error != AristoError(0):
|
||||
if ekih.error != AristoError(0) or
|
||||
ekih.legs[^1].wp.vtx.lData.blob != leaf.payload.blob:
|
||||
dumpOk = true
|
||||
stopOk = true
|
||||
|
||||
let hashesOk = block:
|
||||
let rc = db.hashifyCheck(relax = true)
|
||||
let rc = db.hashifyCheck(relax=true)
|
||||
if rc.isOk:
|
||||
(VertexID(0),AristoError(0))
|
||||
else:
|
||||
@ -82,17 +86,22 @@ proc mergeStepwise(
|
||||
error = rc.error[1]
|
||||
rc.error
|
||||
|
||||
if db.top.lTab.len < lTabLen + merged:
|
||||
dumpOk = true
|
||||
|
||||
if dumpOk:
|
||||
noisy.say "***", "<", n, "/", leafs.len-1, "> ", leaf.leafTie.pp,
|
||||
"\n pre-state ", preState,
|
||||
"\n --------",
|
||||
"\n merge => hike",
|
||||
"\n ", hike.pp(db),
|
||||
"\n --------",
|
||||
"\n ekih", ekih.pp(db),
|
||||
"\n --------",
|
||||
"\n post-state ", db.pp,
|
||||
"\n"
|
||||
noisy.say "***", "<", n, "/", leafs.len-1, ">",
|
||||
" merged=", merged,
|
||||
" dups=", dups,
|
||||
" leaf=", leaf.pp(db),
|
||||
"\n --------",
|
||||
"\n hike\n ", hike.pp(db),
|
||||
"\n ekih\n ", ekih.pp(db),
|
||||
"\n pre-DB\n ", preState,
|
||||
"\n --------",
|
||||
"\n cache\n ", db.pp,
|
||||
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
||||
"\n --------"
|
||||
|
||||
check hike.error in {AristoError(0), MergeLeafPathCachedAlready}
|
||||
check ekih.error == AristoError(0)
|
||||
@ -103,12 +112,12 @@ proc mergeStepwise(
|
||||
elif ekih.legs[^1].wp.vtx.vType != Leaf:
|
||||
check ekih.legs[^1].wp.vtx.vType == Leaf
|
||||
elif hike.error != MergeLeafPathCachedAlready:
|
||||
check ekih.legs[^1].wp.vtx.lData.blob == leaf.payload.blob
|
||||
check ekih.legs[^1].wp.vtx.lData.blob.toHex == leaf.payload.blob.toHex
|
||||
|
||||
if db.top.lTab.len != lTabLen + merged:
|
||||
if db.top.lTab.len < lTabLen + merged:
|
||||
check lTabLen + merged <= db.top.lTab.len
|
||||
error = GenericError
|
||||
check db.top.lTab.len == lTabLen + merged # quick leaf access table
|
||||
stopOk = true # makes no sense to go on
|
||||
stopOk = true # makes no sense to go on
|
||||
|
||||
if stopOk:
|
||||
noisy.say "***", "<", n, "/", leafs.len-1, "> stop"
|
||||
@ -123,61 +132,96 @@ proc mergeStepwise(
|
||||
proc test_mergeKvpList*(
|
||||
noisy: bool;
|
||||
list: openArray[ProofTrieData];
|
||||
rdbPath: string; # Rocks DB storage directory
|
||||
resetDb = false;
|
||||
): bool =
|
||||
|
||||
var db = AristoDb.init BackendNone
|
||||
var db: AristoDb
|
||||
for n,w in list:
|
||||
if resetDb:
|
||||
db.top = AristoLayerRef()
|
||||
if resetDb or db.top.isNil:
|
||||
db.finish(flush=true)
|
||||
db = block:
|
||||
let rc = AristoDb.init(BackendRocksDB,rdbPath)
|
||||
if rc.isErr:
|
||||
check rc.error == AristoError(0)
|
||||
return
|
||||
rc.value
|
||||
let
|
||||
lstLen = list.len
|
||||
lTabLen = db.top.lTab.len
|
||||
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
|
||||
#prePreDb = db.pp
|
||||
added = db.merge leafs
|
||||
#added = db.mergeStepwise(leafs, noisy=true)
|
||||
|
||||
check added.error == AristoError(0)
|
||||
check db.top.lTab.len == lTabLen + added.merged
|
||||
when true and false:
|
||||
if true and 40 <= n:
|
||||
noisy.say "*** kvp(1)", "<", n, "/", lstLen-1, ">",
|
||||
" nLeafs=", leafs.len,
|
||||
"\n cache\n ", db.pp,
|
||||
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
||||
"\n --------"
|
||||
let
|
||||
added = db.merge leafs
|
||||
#added = db.mergeStepwise(leafs) #, noisy=40 <= n)
|
||||
|
||||
if added.error != AristoError(0):
|
||||
check added.error == AristoError(0)
|
||||
return
|
||||
# There might be an extra leaf in the cache after inserting a Branch
|
||||
# which forks a previous leaf node and a new one.
|
||||
check lTabLen + added.merged <= db.top.lTab.len
|
||||
check added.merged + added.dups == leafs.len
|
||||
|
||||
let
|
||||
#preDb = db.pp
|
||||
preKMap = (db.top.kMap.len, db.pp(sTabOk=false, lTabOk=false))
|
||||
prePAmk = (db.top.pAmk.len, db.top.pAmk.pp(db))
|
||||
preDb = db.pp
|
||||
|
||||
block:
|
||||
let rc = db.hashify # (noisy=true)
|
||||
let rc = db.hashify # (noisy=(0 < n))
|
||||
if rc.isErr: # or true:
|
||||
noisy.say "***", "<", n, ">",
|
||||
noisy.say "*** kvp(2)", "<", n, "/", lstLen-1, ">",
|
||||
" added=", added,
|
||||
" db dump",
|
||||
"\n pre-kMap(", preKMap[0], ")\n ", preKMap[1],
|
||||
#"\n pre-pre-DB", prePreDb, "\n --------\n pre-DB", preDb,
|
||||
"\n --------",
|
||||
"\n post-state ", db.pp,
|
||||
"\n"
|
||||
"\n pre-DB\n ", preDb,
|
||||
"\n --------",
|
||||
"\n cache\n ", db.pp,
|
||||
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
||||
"\n --------"
|
||||
if rc.isErr:
|
||||
check rc.error == (VertexID(0),AristoError(0)) # force message
|
||||
return
|
||||
|
||||
when true and false:
|
||||
noisy.say "*** kvp(3)", "<", n, "/", lstLen-1, ">",
|
||||
"\n cache\n ", db.pp,
|
||||
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
||||
"\n --------"
|
||||
|
||||
block:
|
||||
let rc = db.hashifyCheck()
|
||||
if rc.isErr:
|
||||
noisy.say "***", "<", n, "/", lstLen-1, "> db dump",
|
||||
"\n pre-kMap(", preKMap[0], ")\n ", preKMap[1],
|
||||
"\n --------",
|
||||
"\n pre-pAmk(", prePAmk[0], ")\n ", prePAmk[1],
|
||||
"\n --------",
|
||||
"\n post-state ", db.pp,
|
||||
"\n"
|
||||
noisy.say "*** kvp(4)", "<", n, "/", lstLen-1, "> db dump",
|
||||
"\n pre-DB\n ", preDb,
|
||||
"\n --------",
|
||||
"\n cache\n ", db.pp,
|
||||
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
||||
"\n --------"
|
||||
if rc.isErr:
|
||||
check rc == Result[void,(VertexID,AristoError)].ok()
|
||||
return
|
||||
|
||||
block:
|
||||
let rdbHist = block:
|
||||
let rc = db.save
|
||||
if rc.isErr:
|
||||
check rc.error == AristoError(0)
|
||||
return
|
||||
rc.value
|
||||
|
||||
when true and false:
|
||||
noisy.say "***", "sample ", n, "/", lstLen-1,
|
||||
noisy.say "*** kvp(5)", "<", n, "/", lstLen-1, ">",
|
||||
"\n cache\n ", db.pp,
|
||||
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
||||
"\n --------"
|
||||
|
||||
when true and false:
|
||||
noisy.say "*** kvp(9)", "sample ", n, "/", lstLen-1,
|
||||
" merged=", added.merged,
|
||||
" dup=", added.dups
|
||||
true
|
||||
|
Loading…
x
Reference in New Issue
Block a user