Update aristo journal functionality (#2155)

* Aristo: Code cosmetics, e.g. update some CamelCase names

* CoreDb+Aristo: Provide oldest known state root implied

details:
  The Aristo journal allows to recover earlier but not all state roots.

* Aristo: Fix journal backward index operator, e.g. `[^1]`

* Aristo: Fix journal updater

why:
  The `fifosStore()` store function slightly misinterpreted the update
  instructions when translation is to database `put()` functions. The
  effect was that the journal was ever growing due to stale entries which
  were never deleted.

* CoreDb+Aristo: Provide utils for purging stale data from the KVT

details:
  See earlier patch, not all state roots are available. This patch
  provides a mapping from some state root to a block number and allows to
  remove all KVT data related to a particular block number

* Aristo+Kvt: Implement a clean up schedule for expired data in KVT

why:
  For a single state ledger like `Aristo`, there is only a limited
  backlog of states. So KVT data (i.e. headers etc.) are cleaned up
  regularly

* Fix copyright year
This commit is contained in:
Jordan Hrycaj 2024-04-26 13:43:52 +00:00 committed by GitHub
parent 1512f95067
commit 0d4ef023ed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 506 additions and 83 deletions

View File

@ -36,6 +36,10 @@ type
PersistBlockFlags = set[PersistBlockFlag] PersistBlockFlags = set[PersistBlockFlag]
const
CleanUpEpoch = 30_000.u256
## Regular checks for history clean up (applies to single state DB)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private # Private
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -53,12 +57,22 @@ proc getVmState(c: ChainRef, header: BlockHeader):
return err() return err()
return ok(vmState) return ok(vmState)
proc purgeExpiredBlocks(db: CoreDbRef) {.inline, raises: [RlpError].} =
## Remove non-reachable blocks from KVT database
var blkNum = db.getOldestJournalBlockNumber()
if 0 < blkNum:
blkNum = blkNum - 1
while 0 < blkNum:
if not db.forgetHistory blkNum:
break
blkNum = blkNum - 1
proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
bodies: openArray[BlockBody], bodies: openArray[BlockBody],
flags: PersistBlockFlags = {}): ValidationResult flags: PersistBlockFlags = {}): ValidationResult
# wildcard exception, wrapped below in public section # wildcard exception, wrapped below in public section
{.inline, raises: [CatchableError].} = {.inline, raises: [CatchableError].} =
let dbTx = c.db.beginTransaction() let dbTx = c.db.beginTransaction()
defer: dbTx.dispose() defer: dbTx.dispose()
@ -71,10 +85,13 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
let vmState = c.getVmState(headers[0]).valueOr: let vmState = c.getVmState(headers[0]).valueOr:
return ValidationResult.Error return ValidationResult.Error
trace "Persisting blocks", # Check point
fromBlock = headers[0].blockNumber, let stateRootChpt = vmState.parent.stateRoot
toBlock = headers[^1].blockNumber
# Needed for figuring out whether KVT cleanup is due (see at the end)
let (fromBlock, toBlock) = (headers[0].blockNumber, headers[^1].blockNumber)
trace "Persisting blocks", fromBlock, toBlock
for i in 0 ..< headers.len: for i in 0 ..< headers.len:
let (header, body) = (headers[i], bodies[i]) let (header, body) = (headers[i], bodies[i])
@ -177,6 +194,13 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
# The `c.db.persistent()` call is ignored by the legacy DB which # The `c.db.persistent()` call is ignored by the legacy DB which
# automatically saves persistently when reaching the zero level transaction # automatically saves persistently when reaching the zero level transaction
c.db.persistent() c.db.persistent()
# For a single state ledger, there is only a limited backlog. So clean up
# regularly (the `CleanUpEpoch` should not be too small as each lookup pulls
# a journal entry from disk.)
if (fromBlock mod CleanUpEpoch) <= (toBlock - fromBlock):
c.db.purgeExpiredBlocks()
ValidationResult.OK ValidationResult.OK
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -148,6 +148,13 @@ type
## pair was found on the filter or the backend, this transaction is ## pair was found on the filter or the backend, this transaction is
## empty. ## empty.
AristoApiGetFilUbeFn* =
proc(db: AristoDbRef;
qid: QueueID;
): Result[FilterRef,AristoError]
{.noRaise.}
## Get the filter from the unfiltered backened if available.
AristoApiGetKeyRcFn* = AristoApiGetKeyRcFn* =
proc(db: AristoDbRef; proc(db: AristoDbRef;
vid: VertexID; vid: VertexID;
@ -352,6 +359,7 @@ type
forget*: AristoApiForgetFn forget*: AristoApiForgetFn
forkTop*: AristoApiForkTopFn forkTop*: AristoApiForkTopFn
forkWith*: AristoApiForkWithFn forkWith*: AristoApiForkWithFn
getFilUbe*: AristoApiGetFilUbeFn
getKeyRc*: AristoApiGetKeyRcFn getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn hashify*: AristoApiHashifyFn
hasPath*: AristoApiHasPathFn hasPath*: AristoApiHasPathFn
@ -384,6 +392,7 @@ type
AristoApiProfForgetFn = "forget" AristoApiProfForgetFn = "forget"
AristoApiProfForkTopFn = "forkTop" AristoApiProfForkTopFn = "forkTop"
AristoApiProfForkWithFn = "forkWith" AristoApiProfForkWithFn = "forkWith"
AristoApiProfGetFilUbeFn = "getFilUBE"
AristoApiProfGetKeyRcFn = "getKeyRc" AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify" AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathFn = "hasPath" AristoApiProfHasPathFn = "hasPath"
@ -434,6 +443,7 @@ when AutoValidateApiHooks:
doAssert not api.forget.isNil doAssert not api.forget.isNil
doAssert not api.forkTop.isNil doAssert not api.forkTop.isNil
doAssert not api.forkWith.isNil doAssert not api.forkWith.isNil
doAssert not api.getFilUbe.isNil
doAssert not api.getKeyRc.isNil doAssert not api.getKeyRc.isNil
doAssert not api.hashify.isNil doAssert not api.hashify.isNil
doAssert not api.hasPath.isNil doAssert not api.hasPath.isNil
@ -486,6 +496,7 @@ func init*(api: var AristoApiObj) =
api.forget = forget api.forget = forget
api.forkTop = forkTop api.forkTop = forkTop
api.forkWith = forkWith api.forkWith = forkWith
api.getFilUbe = getFilUbe
api.getKeyRc = getKeyRc api.getKeyRc = getKeyRc
api.hashify = hashify api.hashify = hashify
api.hasPath = hasPath api.hasPath = hasPath
@ -521,6 +532,7 @@ func dup*(api: AristoApiRef): AristoApiRef =
forget: api.forget, forget: api.forget,
forkTop: api.forkTop, forkTop: api.forkTop,
forkWith: api.forkWith, forkWith: api.forkWith,
getFilUbe: api.getFilUbe,
getKeyRc: api.getKeyRc, getKeyRc: api.getKeyRc,
hashify: api.hashify, hashify: api.hashify,
hasPath: api.hasPath, hasPath: api.hasPath,
@ -609,6 +621,11 @@ func init*(
AristoApiProfForkWithFn.profileRunner: AristoApiProfForkWithFn.profileRunner:
result = api.forkWith(a, b, c, d) result = api.forkWith(a, b, c, d)
profApi.getFilUbe =
proc(a: AristoDbRef; b: QueueID): auto =
AristoApiProfGetFilUbeFn.profileRunner:
result = api.getFilUbe(a, b)
profApi.getKeyRc = profApi.getKeyRc =
proc(a: AristoDbRef; b: VertexID): auto = proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyRcFn.profileRunner: AristoApiProfGetKeyRcFn.profileRunner:

View File

@ -20,7 +20,7 @@ import
results, results,
./aristo_walk/persistent, ./aristo_walk/persistent,
"."/[aristo_desc, aristo_get, aristo_init, aristo_utils], "."/[aristo_desc, aristo_get, aristo_init, aristo_utils],
./aristo_check/[check_be, check_top] ./aristo_check/[check_be, check_journal, check_top]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -55,7 +55,7 @@ proc checkBE*(
cache = true; # Also verify against top layer cache cache = true; # Also verify against top layer cache
fifos = false; # Also verify cascaded filter fifos fifos = false; # Also verify cascaded filter fifos
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Veryfy database backend structure. If the argument `relax` is set `false`, ## Verify database backend structure. If the argument `relax` is set `false`,
## all necessary Merkle hashes are compiled and verified. If the argument ## all necessary Merkle hashes are compiled and verified. If the argument
## `cache` is set `true`, the cache is also checked so that a safe operation ## `cache` is set `true`, the cache is also checked so that a safe operation
## (like `resolveBackendFilter()`) will leave the backend consistent. ## (like `resolveBackendFilter()`) will leave the backend consistent.
@ -79,6 +79,18 @@ proc checkBE*(
of BackendVoid: of BackendVoid:
return VoidBackendRef.checkBE(db, cache=cache, relax=relax) return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
proc checkJournal*(
db: AristoDbRef; # Database, top layer
): Result[void,(QueueID,AristoError)] =
## Verify database backend journal.
case db.backend.kind:
of BackendMemory:
return MemBackendRef.checkJournal(db)
of BackendRocksDB:
return RdbBackendRef.checkJournal(db)
of BackendVoid:
return ok() # no journal
proc check*( proc check*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
@ -89,7 +101,11 @@ proc check*(
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Shortcut for running `checkTop()` followed by `checkBE()` ## Shortcut for running `checkTop()` followed by `checkBE()`
? db.checkTop(proofMode = proofMode) ? db.checkTop(proofMode = proofMode)
? db.checkBE(relax = relax, cache = cache) ? db.checkBE(relax = relax, cache = cache, fifos = fifos)
if fifos:
let rc = db.checkJournal()
if rc.isErr:
return err((VertexID(0),rc.error[1]))
ok() ok()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -185,7 +185,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
if fifos and if fifos and
not db.backend.isNil and not db.backend.isNil and
not db.backend.journal.isNil: not db.backend.journal.isNil:
var lastTrg = db.getKeyUBE(VertexID(1)).get(otherwise = VOID_HASH_KEY) var lastTrg = db.getKeyUbe(VertexID(1)).get(otherwise = VOID_HASH_KEY)
.to(Hash256) .to(Hash256)
for (qid,filter) in db.backend.T.walkFifoBe: # walk in fifo order for (qid,filter) in db.backend.T.walkFifoBe: # walk in fifo order
if filter.src != lastTrg: if filter.src != lastTrg:

View File

@ -0,0 +1,203 @@
# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[algorithm, sequtils, sets, tables],
eth/common,
results,
../aristo_filter/filter_scheduler,
../aristo_walk/persistent,
".."/[aristo_desc, aristo_blobify]
const
ExtraDebugMessages = false
type
JrnRec = tuple
src: Hash256
trg: Hash256
size: int
when ExtraDebugMessages:
import
../aristo_debug
# ------------------------------------------------------------------------------
# Private functions and helpers
# ------------------------------------------------------------------------------
template noValueError(info: static[string]; code: untyped) =
try:
code
except ValueError as e:
raiseAssert info & ", name=\"" & $e.name & "\", msg=\"" & e.msg & "\""
when ExtraDebugMessages:
proc pp(t: var Table[QueueID,JrnRec]): string =
result = "{"
for qid in t.keys.toSeq.sorted:
t.withValue(qid,w):
result &= qid.pp & "#" & $w[].size & ","
if result[^1] == '{':
result &= "}"
else:
result[^1] = '}'
proc pp(t: seq[QueueID]): string =
result = "{"
var list = t
for n in 2 ..< list.len:
if list[n-1] == list[n] - 1 and
(list[n-2] == QueueID(0) or list[n-2] == list[n] - 2):
list[n-1] = QueueID(0)
for w in list:
if w != QueueID(0):
result &= w.pp & ","
elif result[^1] == ',':
result[^1] = '.'
result &= "."
if result[^1] == '{':
result &= "}"
else:
result[^1] = '}'
proc pp(t: HashSet[QueueID]): string =
result = "{"
var list = t.toSeq.sorted
for n in 2 ..< list.len:
if list[n-1] == list[n] - 1 and
(list[n-2] == QueueID(0) or list[n-2] == list[n] - 2):
list[n-1] = QueueID(0)
for w in list:
if w != QueueID(0):
result &= w.pp & ","
elif result[^1] == ',':
result[^1] = '.'
result &= "."
if result[^1] == '{':
result &= "}"
else:
result[^1] = '}'
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc checkJournal*[T: RdbBackendRef|MemBackendRef](
_: type T;
db: AristoDbRef;
): Result[void,(QueueID,AristoError)] =
let jrn = db.backend.journal
if jrn.isNil: return ok()
var
nToQid: seq[QueueID] # qids sorted by history/age
cached: HashSet[QueueID] # `nToQid[]` as set
saved: Table[QueueID,JrnRec]
error: (QueueID,AristoError)
when ExtraDebugMessages:
var
sizeTally = 0
maxBlock = 0
proc moan(n = -1, s = "", listOk = true) =
var txt = ""
if 0 <= n:
txt &= " (" & $n & ")"
if error[1] != AristoError(0):
txt &= " oops"
txt &=
" jLen=" & $jrn.len &
" tally=" & $sizeTally &
" maxBlock=" & $maxBlock &
""
if 0 < s.len:
txt &= " " & s
if error[1] != AristoError(0):
txt &=
" errQid=" & error[0].pp &
" error=" & $error[1] &
""
if listOk:
txt &=
"\n cached=" & cached.pp &
"\n saved=" & saved.pp &
""
debugEcho "*** checkJournal", txt
else:
template moan(n = -1, s = "", listOk = true) =
discard
# Collect cached handles
for n in 0 ..< jrn.len:
let qid = jrn[n]
# Must be no overlap
if qid in cached:
error = (qid,CheckJrnCachedQidOverlap)
moan(2)
return err(error)
cached.incl qid
nToQid.add qid
# Collect saved data
for (qid,fil) in db.backend.T.walkFilBe():
var jrnRec: JrnRec
jrnRec.src = fil.src
jrnRec.trg = fil.trg
when ExtraDebugMessages:
let rc = fil.blobify
if rc.isErr:
moan(5)
return err((qid,rc.error))
jrnRec.size = rc.value.len
if maxBlock < jrnRec.size:
maxBlock = jrnRec.size
sizeTally += jrnRec.size
saved[qid] = jrnRec
# Compare cached against saved data
let
savedQids = saved.keys.toSeq.toHashSet
unsavedQids = cached - savedQids
staleQids = savedQids - cached
if 0 < unsavedQids.len:
error = (unsavedQids.toSeq.sorted[0],CheckJrnSavedQidMissing)
moan(6)
return err(error)
if 0 < staleQids.len:
error = (staleQids.toSeq.sorted[0], CheckJrnSavedQidStale)
moan(7)
return err(error)
# Compare whether journal records link together
if 1 < nToQid.len:
noValueError("linked journal records"):
var prvRec = saved[nToQid[0]]
for n in 1 ..< nToQid.len:
let thisRec = saved[nToQid[n]]
if prvRec.trg != thisRec.src:
error = (nToQid[n],CheckJrnLinkingGap)
moan(8, "qidInx=" & $n)
return err(error)
prvRec = thisRec
moan(9, listOk=false)
ok()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -41,10 +41,21 @@ const
## Useful shortcut ## Useful shortcut
DEFAULT_QID_QUEUES* = [ DEFAULT_QID_QUEUES* = [
(128, 0), ## Consecutive list of 128 filter slots (128, 0), # Consecutive list of (at least) 128 filter slots
( 64, 63), ## Overflow list, 64 filters, skipping 63 filters in-between ( 16, 3), # Overflow list with (at least) 16 filter slots (with gap size 3)
( 64, 127), ## .. # each slot covering 4 filters from previous list
( 64, 255)] ( 1, 1), # ..
( 1, 1)]
## The `DEFAULT_QID_QUEUES` schedule has the following properties:
## * most recent consecutive slots: 128
## * maximal slots used: 151
## * covered backlog savings: between 216..231
## This was calculated via the `capacity()` function from the
## `filter_scheduler.nim` source. So, saving each block after executing
## it, the previous 128 block chain states will be directly accessible.
## For older block chain states (of at least back to 216), the system can
## be positioned before the desired state and block by block executed
## forward.
SUB_TREE_DISPOSAL_MAX* = 200_000 SUB_TREE_DISPOSAL_MAX* = 200_000
## Some limit for disposing sub-trees in one go using `delete()`. ## Some limit for disposing sub-trees in one go using `delete()`.

View File

@ -169,6 +169,12 @@ type
CheckBeFifoSrcTrgMismatch CheckBeFifoSrcTrgMismatch
CheckBeFifoTrgNotStateRoot CheckBeFifoTrgNotStateRoot
# Jornal check `checkJournal()`
CheckJrnCachedQidOverlap
CheckJrnSavedQidMissing
CheckJrnSavedQidStale
CheckJrnLinkingGap
# Neighbour vertex, tree traversal `nearbyRight()` and `nearbyLeft()` # Neighbour vertex, tree traversal `nearbyRight()` and `nearbyLeft()`
NearbyBeyondRange NearbyBeyondRange
NearbyBranchError NearbyBranchError

View File

@ -324,7 +324,7 @@ func to*(node: NodeRef; T: type VertexRef): T =
node.VertexRef.dup node.VertexRef.dup
func to*(a: array[4,tuple[size, width: int]]; T: type QidLayoutRef): T = func to*(a: array[4,tuple[size, width: int]]; T: type QidLayoutRef): T =
## Convert a size-width array to a `QidLayoutRef` layout. Over large ## Convert a size-width array to a `QidLayoutRef` layout. Overly large
## array field values are adjusted to its maximal size. ## array field values are adjusted to its maximal size.
var q: array[4,QidSpec] var q: array[4,QidSpec]
for n in 0..3: for n in 0..3:
@ -335,18 +335,20 @@ func to*(a: array[4,tuple[size, width: int]]; T: type QidLayoutRef): T =
T(q: q) T(q: q)
func to*(a: array[4,tuple[size, width, wrap: int]]; T: type QidLayoutRef): T = func to*(a: array[4,tuple[size, width, wrap: int]]; T: type QidLayoutRef): T =
## Convert a size-width-wrap array to a `QidLayoutRef` layout. Over large ## Convert a size-width-wrap array to a `QidLayoutRef` layout. Overly large
## array field values are adjusted to its maximal size. Too small `wrap` ## array field values are adjusted to its maximal size. Too small `wrap`
## values are adjusted to its minimal size. ## field values are adjusted to its minimal size.
var q: array[4,QidSpec] var q: array[4,QidSpec]
for n in 0..2: for n in 0..2:
q[n] = (min(a[n].size.uint, QidSpecSizeMax), q[n] = (min(a[n].size.uint, QidSpecSizeMax),
min(a[n].width.uint, QidSpecWidthMax), min(a[n].width.uint, QidSpecWidthMax),
QueueID(max(a[n].size + a[n+1].width, a[n].width+1, a[n].wrap))) QueueID(max(a[n].size + a[n+1].width, a[n].width+1,
min(a[n].wrap, DefaultQidWrap.int))))
q[0].width = 0 q[0].width = 0
q[3] = (min(a[3].size.uint, QidSpecSizeMax), q[3] = (min(a[3].size.uint, QidSpecSizeMax),
min(a[3].width.uint, QidSpecWidthMax), min(a[3].width.uint, QidSpecWidthMax),
QueueID(max(a[3].size, a[3].width, a[3].wrap))) QueueID(max(a[3].size, a[3].width,
min(a[3].wrap, DefaultQidWrap.int))))
T(q: q) T(q: q)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -73,7 +73,7 @@ proc merge*(
## argument `filter`, all the `top` and `stack` layers should be cleared. ## argument `filter`, all the `top` and `stack` layers should be cleared.
## ##
let ubeRoot = block: let ubeRoot = block:
let rc = db.getKeyUBE VertexID(1) let rc = db.getKeyUbe VertexID(1)
if rc.isOk: if rc.isOk:
rc.value.to(Hash256) rc.value.to(Hash256)
elif rc.error == GetKeyNotFound: elif rc.error == GetKeyNotFound:

View File

@ -75,6 +75,7 @@ proc fifosStore*(
# Update journal filters and calculate database update # Update journal filters and calculate database update
var var
instr = FifoInstr(scd: upd.fifo) instr = FifoInstr(scd: upd.fifo)
dbClear: seq[QueueID]
hold: seq[FilterRef] hold: seq[FilterRef]
saved = false saved = false
@ -100,10 +101,12 @@ proc fifosStore*(
of HoldQid: of HoldQid:
# Push filter # Push filter
dbClear.add act.qid
hold.add be.getFilterOrReturn act.qid hold.add be.getFilterOrReturn act.qid
# Merge additional journal filters into top filter # Merge additional journal filters into top filter
for w in act.qid+1 .. act.xid: for w in act.qid+1 .. act.xid:
dbClear.add w
let lower = be.getFilterOrReturn w let lower = be.getFilterOrReturn w
hold[^1] = hold[^1].joinFiltersOrReturn lower hold[^1] = hold[^1].joinFiltersOrReturn lower
@ -115,6 +118,9 @@ proc fifosStore*(
let upper = hold.pop let upper = hold.pop
lower = upper.joinFiltersOrReturn lower lower = upper.joinFiltersOrReturn lower
instr.put.add (act.qid, lower) instr.put.add (act.qid, lower)
for qid in dbClear:
instr.put.add (qid, FilterRef(nil))
dbClear.setLen(0)
if not saved: if not saved:
return err(FilExecSaveMissing) return err(FilExecSaveMissing)

View File

@ -127,8 +127,8 @@ proc getFilterOverlap*(
## Return the number of journal filters in the leading chain that is ## Return the number of journal filters in the leading chain that is
## reverted by the argument `filter`. A heuristc approach is used here ## reverted by the argument `filter`. A heuristc approach is used here
## for an argument `filter` with a valid filter ID when the chain is ## for an argument `filter` with a valid filter ID when the chain is
## longer than one items. So only single chain overlaps a guaranteed to ## longer than one items. Only single step filter overlaps are guaranteed
## be found. ## to be found.
## ##
# Check against the top-fifo entry # Check against the top-fifo entry
let qid = be.journal[0] let qid = be.journal[0]
@ -148,7 +148,7 @@ proc getFilterOverlap*(
if filter.trg == top.trg: if filter.trg == top.trg:
return 1 return 1
# Check against sme stored filter IDs # Check against some stored filter IDs
if filter.isValid: if filter.isValid:
let rc = be.getFilterFromFifo(filter.fid, earlierOK=true) let rc = be.getFilterFromFifo(filter.fid, earlierOK=true)
if rc.isOk: if rc.isOk:

View File

@ -75,7 +75,7 @@ proc merge*(
if vtx.isValid or not newFilter.sTab.hasKey vid: if vtx.isValid or not newFilter.sTab.hasKey vid:
newFilter.sTab[vid] = vtx newFilter.sTab[vid] = vtx
elif newFilter.sTab.getOrVoid(vid).isValid: elif newFilter.sTab.getOrVoid(vid).isValid:
let rc = db.getVtxUBE vid let rc = db.getVtxUbe vid
if rc.isOk: if rc.isOk:
newFilter.sTab[vid] = vtx # VertexRef(nil) newFilter.sTab[vid] = vtx # VertexRef(nil)
elif rc.error == GetVtxNotFound: elif rc.error == GetVtxNotFound:
@ -87,7 +87,7 @@ proc merge*(
if key.isValid or not newFilter.kMap.hasKey vid: if key.isValid or not newFilter.kMap.hasKey vid:
newFilter.kMap[vid] = key newFilter.kMap[vid] = key
elif newFilter.kMap.getOrVoid(vid).isValid: elif newFilter.kMap.getOrVoid(vid).isValid:
let rc = db.getKeyUBE vid let rc = db.getKeyUbe vid
if rc.isOk: if rc.isOk:
newFilter.kMap[vid] = key newFilter.kMap[vid] = key
elif rc.error == GetKeyNotFound: elif rc.error == GetKeyNotFound:

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH # Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -34,7 +34,7 @@ proc revFilter*(
# Get vid generator state on backend # Get vid generator state on backend
block: block:
let rc = db.getIdgUBE() let rc = db.getIdgUbe()
if rc.isOk: if rc.isOk:
rev.vGen = rc.value rev.vGen = rc.value
elif rc.error != GetIdgNotFound: elif rc.error != GetIdgNotFound:
@ -42,7 +42,7 @@ proc revFilter*(
# Calculate reverse changes for the `sTab[]` structural table # Calculate reverse changes for the `sTab[]` structural table
for vid in filter.sTab.keys: for vid in filter.sTab.keys:
let rc = db.getVtxUBE vid let rc = db.getVtxUbe vid
if rc.isOk: if rc.isOk:
rev.sTab[vid] = rc.value rev.sTab[vid] = rc.value
elif rc.error == GetVtxNotFound: elif rc.error == GetVtxNotFound:
@ -52,7 +52,7 @@ proc revFilter*(
# Calculate reverse changes for the `kMap` sequence. # Calculate reverse changes for the `kMap` sequence.
for vid in filter.kMap.keys: for vid in filter.kMap.keys:
let rc = db.getKeyUBE vid let rc = db.getKeyUbe vid
if rc.isOk: if rc.isOk:
rev.kMap[vid] = rc.value rev.kMap[vid] = rc.value
elif rc.error == GetKeyNotFound: elif rc.error == GetKeyNotFound:

View File

@ -261,7 +261,7 @@ func capacity(
func capacity*( func capacity*(
ctx: openArray[tuple[size, width, wrap: int]]; # Schedule layout ctx: openArray[tuple[size, width, wrap: int]]; # Schedule layout
): tuple[maxQueue: int, minCovered: int, maxCovered: int] = ): tuple[maxQueue: int, minCovered: int, maxCovered: int] =
## Variant of `capacity()` below. ## Variant of `capacity()`.
ctx.toSeq.mapIt((it[0],it[1])).capacity ctx.toSeq.mapIt((it[0],it[1])).capacity
func capacity*( func capacity*(
@ -289,16 +289,17 @@ func addItem*(
## SaveQid <queue-id> -- Store a new item under the address ## SaveQid <queue-id> -- Store a new item under the address
## -- <queue-id> on the database. ## -- <queue-id> on the database.
## ##
## HoldQid <from-id>..<to-id> -- Move the records accessed by the argument ## HoldQid <from-id>..<to-id> -- Move the records referred to by the
## -- addresses from the database to the right ## -- argument addresses from the database to
## -- end of the local hold queue. The age of ## -- the right end of the local hold queue.
## -- the items on the hold queue increases ## -- The age of the items on the hold queue
## -- left to right. ## -- increases left to right.
## ##
## DequQid <queue-id> -- Merge items from the hold queue into a ## DequQid <queue-id> -- Merge items from the hold queue into a
## -- new item and store it under the address ## -- new item and store it under the address
## -- <queue-id> on the database. Clear the ## -- <queue-id> on the database. Clear the
## -- the hold queue. ## -- the hold queue and the corresponding
## -- items on the database.
## ##
## DelQid <queue-id> -- Delete item. This happens if the last ## DelQid <queue-id> -- Delete item. This happens if the last
## -- oberflow queue needs to make space for ## -- oberflow queue needs to make space for
@ -557,7 +558,7 @@ func `[]`*(
bix: BackwardsIndex; # Index into latest items bix: BackwardsIndex; # Index into latest items
): QueueID = ): QueueID =
## Variant of `[]` for provifing `[^bix]`. ## Variant of `[]` for provifing `[^bix]`.
fifo[fifo.state.len - bix.distinctBase] fifo[fifo.len - bix.distinctBase]
func `[]`*( func `[]`*(

View File

@ -22,7 +22,7 @@ import
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc getIdgUBE*( proc getIdgUbe*(
db: AristoDbRef; db: AristoDbRef;
): Result[seq[VertexID],AristoError] = ): Result[seq[VertexID],AristoError] =
## Get the ID generator state from the unfiltered backened if available. ## Get the ID generator state from the unfiltered backened if available.
@ -31,7 +31,7 @@ proc getIdgUBE*(
return be.getIdgFn() return be.getIdgFn()
err(GetIdgNotFound) err(GetIdgNotFound)
proc getFqsUBE*( proc getFqsUbe*(
db: AristoDbRef; db: AristoDbRef;
): Result[seq[(QueueID,QueueID)],AristoError] = ): Result[seq[(QueueID,QueueID)],AristoError] =
## Get the list of filter IDs unfiltered backened if available. ## Get the list of filter IDs unfiltered backened if available.
@ -40,7 +40,7 @@ proc getFqsUBE*(
return be.getFqsFn() return be.getFqsFn()
err(GetFqsNotFound) err(GetFqsNotFound)
proc getVtxUBE*( proc getVtxUbe*(
db: AristoDbRef; db: AristoDbRef;
vid: VertexID; vid: VertexID;
): Result[VertexRef,AristoError] = ): Result[VertexRef,AristoError] =
@ -50,17 +50,17 @@ proc getVtxUBE*(
return be.getVtxFn vid return be.getVtxFn vid
err GetVtxNotFound err GetVtxNotFound
proc getKeyUBE*( proc getKeyUbe*(
db: AristoDbRef; db: AristoDbRef;
vid: VertexID; vid: VertexID;
): Result[HashKey,AristoError] = ): Result[HashKey,AristoError] =
## Get the merkle hash/key from the unfiltered backend if available. ## Get the Merkle hash/key from the unfiltered backend if available.
let be = db.backend let be = db.backend
if not be.isNil: if not be.isNil:
return be.getKeyFn vid return be.getKeyFn vid
err GetKeyNotFound err GetKeyNotFound
proc getFilUBE*( proc getFilUbe*(
db: AristoDbRef; db: AristoDbRef;
qid: QueueID; qid: QueueID;
): Result[FilterRef,AristoError] = ): Result[FilterRef,AristoError] =
@ -78,7 +78,7 @@ proc getIdgBE*(
## Get the ID generator state the `backened` layer if available. ## Get the ID generator state the `backened` layer if available.
if not db.roFilter.isNil: if not db.roFilter.isNil:
return ok(db.roFilter.vGen) return ok(db.roFilter.vGen)
db.getIdgUBE() db.getIdgUbe()
proc getVtxBE*( proc getVtxBE*(
db: AristoDbRef; db: AristoDbRef;
@ -90,7 +90,7 @@ proc getVtxBE*(
if vtx.isValid: if vtx.isValid:
return ok(vtx) return ok(vtx)
return err(GetVtxNotFound) return err(GetVtxNotFound)
db.getVtxUBE vid db.getVtxUbe vid
proc getKeyBE*( proc getKeyBE*(
db: AristoDbRef; db: AristoDbRef;
@ -102,7 +102,7 @@ proc getKeyBE*(
if key.isValid: if key.isValid:
return ok(key) return ok(key)
return err(GetKeyNotFound) return err(GetKeyNotFound)
db.getKeyUBE vid db.getKeyUbe vid
# ------------------ # ------------------

View File

@ -279,7 +279,7 @@ proc forkWith*(
# Try `(vid,key)` on unfiltered backend # Try `(vid,key)` on unfiltered backend
block: block:
let beKey = db.getKeyUBE(vid).valueOr: VOID_HASH_KEY let beKey = db.getKeyUbe(vid).valueOr: VOID_HASH_KEY
if beKey == key: if beKey == key:
let rc = db.fork(noFilter = true) let rc = db.fork(noFilter = true)
if rc.isOk: if rc.isOk:
@ -451,7 +451,7 @@ proc stow*(
if db.roFilter.isValid: if db.roFilter.isValid:
db.top.final.vGen = db.roFilter.vGen db.top.final.vGen = db.roFilter.vGen
else: else:
let rc = db.getIdgUBE() let rc = db.getIdgUbe()
if rc.isOk: if rc.isOk:
db.top.final.vGen = rc.value db.top.final.vGen = rc.value
else: else:

View File

@ -242,6 +242,11 @@ func toAristo*(mBe: CoreDbMptBackendRef): AristoDbRef =
if not mBe.isNil and mBe.parent.isAristo: if not mBe.isNil and mBe.parent.isAristo:
return mBe.AristoCoreDbMptBE.adb return mBe.AristoCoreDbMptBE.adb
proc toAristoOldestStateRoot*(mBe: CoreDbMptBackendRef): Hash256 =
if not mBe.isNil and mBe.parent.isAristo:
return mBe.parent.AristoCoreDbRef.adbBase.toJournalOldestStateRoot()
EMPTY_ROOT_HASH
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public aristo iterators # Public aristo iterators
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -17,6 +17,7 @@ import
stew/byteutils, stew/byteutils,
results, results,
../../../aristo, ../../../aristo,
../../../aristo/aristo_filter/filter_scheduler,
../../base, ../../base,
../../base/base_desc, ../../base/base_desc,
./common_desc ./common_desc
@ -566,6 +567,20 @@ func toVoidRc*[T](
return ok() return ok()
err((VoidVID,rc.error).toError(base, info, error)) err((VoidVID,rc.error).toError(base, info, error))
proc toJournalOldestStateRoot*(base: AristoBaseRef): Hash256 =
let
adb = base.ctx.mpt
be = adb.backend
if not be.isNil:
let jrn = be.journal
if not jrn.isNil:
let qid = jrn[^1]
if qid.isValid:
let rc = base.api.getFilUbe(adb, qid)
if rc.isOk:
return rc.value.trg
EMPTY_ROOT_HASH
# --------------------- # ---------------------
func to*(dsc: CoreDxMptRef, T: type AristoDbRef): T = func to*(dsc: CoreDxMptRef, T: type AristoDbRef): T =

View File

@ -18,9 +18,10 @@ import
chronicles, chronicles,
eth/[common, rlp], eth/[common, rlp],
results, results,
stew/byteutils, stew/[byteutils, endians2],
"../.."/[errors, constants], "../.."/[errors, constants],
".."/[aristo, storage_types], ".."/[aristo, storage_types],
./backend/aristo_db,
"."/base "."/base
logScope: logScope:
@ -88,6 +89,23 @@ template discardRlpException(info: static[string]; code: untyped) =
except RlpError as e: except RlpError as e:
warn logTxt info, error=($e.name), msg=e.msg warn logTxt info, error=($e.name), msg=e.msg
# ---------
func to(bn: BlockNumber; T: type Blob): T =
if bn <= high(uint64).toBlockNumber:
bn.truncate(uint64).toBytesBE.toSeq
else:
bn.toBytesBE.toSeq
func to(data: openArray[byte]; T: type BlockNumber): T =
case data.len:
of 8:
return uint64.fromBytesBE(data).toBlockNumber
of 32:
return UInt256.fromBytesBE(data).toBlockNumber
else:
discard
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private iterators # Private iterators
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -320,7 +338,6 @@ proc markCanonicalChain(
return true return true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -330,6 +347,27 @@ proc exists*(db: CoreDbRef, hash: Hash256): bool =
warn logTxt "exisis()", hash, action="hasKey()", error=($$error) warn logTxt "exisis()", hash, action="hasKey()", error=($$error)
return false return false
proc getBlockNumber*(db: CoreDbRef; stateRoot: Hash256): BlockNumber =
const info = "getBlockNumber()"
if stateRoot != EMPTY_ROOT_HASH:
let
kvt = db.newKvt()
data = kvt.get(stRootToBlockNumKey(stateRoot).toOpenArray).valueOr:
if error.error != KvtNotFound:
warn logTxt info, stateRoot, action="get()", error=($$error)
return
return data.to(BlockNumber)
proc getOldestJournalBlockNumber*(db: CoreDbRef): BlockNumber =
## Returns the block number implied by the database journal if there is any,
## or `BlockNumber(0)`. At the moment, only the `Aristo` database has a
## journal.
##
let be = db.ctx.getMpt(CtGeneric).backend
if be.parent.isAristo:
return db.getBlockNumber be.toAristoOldestStateRoot()
proc getBlockHeader*( proc getBlockHeader*(
db: CoreDbRef; db: CoreDbRef;
blockHash: Hash256; blockHash: Hash256;
@ -523,8 +561,18 @@ proc getAncestorsHashes*(
dec ancestorCount dec ancestorCount
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) = proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
let blockNumberKey = blockNumberToHashKey(header.blockNumber) ## The function stores lookup for
db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr: ## ::
## header.stateRoot -> header.blockNumber -> header.hash()
##
let
blockNumberKey = blockNumberToHashKey(header.blockNumber)
stRootKey = stRootToBlockNumKey(header.stateRoot)
kvt = db.newKvt()
kvt.put(stRootKey.toOpenArray, header.blockNumber.to(Blob)).isOkOr:
warn logTxt "addBlockNumberToHashLookup()",
stRootKey, action="put()", `error`=($$error)
kvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr:
warn logTxt "addBlockNumberToHashLookup()", warn logTxt "addBlockNumberToHashLookup()",
blockNumberKey, action="put()", error=($$error) blockNumberKey, action="put()", error=($$error)
@ -558,6 +606,26 @@ proc persistTransactions*(
warn logTxt info, action="state()" warn logTxt info, action="state()"
return EMPTY_ROOT_HASH return EMPTY_ROOT_HASH
proc forgetHistory*(
db: CoreDbRef;
blockNum: BlockNumber;
): bool
{.gcsafe, raises: [RlpError].} =
## Remove all data related to the block number argument `num`. This function
## returns `true`, if some history was available and deleted.
var blockHash: Hash256
if db.getBlockHash(blockNum, blockHash):
let kvt = db.newKvt()
# delete blockNum->blockHash
discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray)
result = true
var header: BlockHeader
if db.getBlockHeader(blockHash, header):
# delete blockHash->header, stateRoot->blockNum
discard kvt.del(genericHashKey(blockHash).toOpenArray)
discard kvt.del(stRootToBlockNumKey(header.stateRoot).toOpenArray)
proc getTransaction*( proc getTransaction*(
db: CoreDbRef; db: CoreDbRef;
txRoot: Hash256; txRoot: Hash256;
@ -858,6 +926,12 @@ proc persistHeaderToDbWithoutSetHead*(
let let
kvt = db.newKvt() kvt = db.newKvt()
scoreKey = blockHashToScoreKey(headerHash) scoreKey = blockHashToScoreKey(headerHash)
# This extra call `addBlockNumberToHashLookup()` has been added in order
# to access the burrent block by the state root. So it can be deleted
# if not needed, anymore.
db.addBlockNumberToHashLookup(header)
kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr:
warn logTxt "persistHeaderToDbWithoutSetHead()", warn logTxt "persistHeaderToDbWithoutSetHead()",
scoreKey, action="put()", `error`=($$error) scoreKey, action="put()", `error`=($$error)

View File

@ -34,6 +34,7 @@ export
isAristo, isAristo,
toAristo, toAristo,
toAristoProfData, toAristoProfData,
toAristoOldestStateRoot,
# see `legacy_db` # see `legacy_db`
isLegacy, isLegacy,

View File

@ -32,6 +32,7 @@ type
snapSyncStorageSlot snapSyncStorageSlot
snapSyncStateRoot snapSyncStateRoot
blockHashToBlockWitness blockHashToBlockWitness
stRootToBlockNum
DbKey* = object DbKey* = object
# The first byte stores the key type. The rest are key-specific values # The first byte stores the key type. The rest are key-specific values
@ -135,6 +136,11 @@ proc blockHashToBlockWitnessKey*(h: Hash256): DbKey {.inline.} =
result.data[1 .. 32] = h.data result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32 result.dataEndPos = uint8 32
proc stRootToBlockNumKey*(h: Hash256): DbKey =
result.data[0] = byte ord(stRootToBlockNum)
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32
template toOpenArray*(k: DbKey): openArray[byte] = template toOpenArray*(k: DbKey): openArray[byte] =
k.data.toOpenArray(0, int(k.dataEndPos)) k.data.toOpenArray(0, int(k.dataEndPos))

View File

@ -74,8 +74,9 @@ proc setErrorLevel {.used.} =
proc miscRunner( proc miscRunner(
noisy = true; noisy = true;
qidSampleSize = QidSample; layout = LyoSamples[0];
) = ) =
let (lyo,qidSampleSize) = layout
suite "Aristo: Miscellaneous tests": suite "Aristo: Miscellaneous tests":
@ -83,10 +84,10 @@ proc miscRunner(
check noisy.testVidRecycleLists() check noisy.testVidRecycleLists()
test &"Low level cascaded fifos API (sample size: {qidSampleSize})": test &"Low level cascaded fifos API (sample size: {qidSampleSize})":
check noisy.testQidScheduler(sampleSize = qidSampleSize) check noisy.testQidScheduler(layout = lyo, sampleSize = qidSampleSize)
test &"High level cascaded fifos API (sample size: {qidSampleSize})": test &"High level cascaded fifos API (sample size: {qidSampleSize})":
check noisy.testFilterFifo(sampleSize = qidSampleSize) check noisy.testFilterFifo(layout = lyo, sampleSize = qidSampleSize)
test "Short keys and other patholgical cases": test "Short keys and other patholgical cases":
check noisy.testShortKeys() check noisy.testShortKeys()
@ -197,7 +198,8 @@ when isMainModule:
noisy.accountsRunner(persistent=false) noisy.accountsRunner(persistent=false)
when true: # and false: when true: # and false:
noisy.miscRunner(qidSampleSize = 1_000) for n,w in LyoSamples:
noisy.miscRunner() # layouts = (w[0], 1_000))
# This one uses dumps from the external `nimbus-eth1-blob` repo # This one uses dumps from the external `nimbus-eth1-blob` repo
when true and false: when true and false:

View File

@ -64,7 +64,10 @@ proc fifos(be: BackendRef): seq[seq[(QueueID,FilterRef)]] =
discard discard
check be.kind == BackendMemory or be.kind == BackendRocksDB check be.kind == BackendMemory or be.kind == BackendRocksDB
func flatten(a: seq[seq[(QueueID,FilterRef)]]): seq[(QueueID,FilterRef)] {.used.} = func flatten(
a: seq[seq[(QueueID,FilterRef)]];
): seq[(QueueID,FilterRef)]
{.used.} =
for w in a: for w in a:
result &= w result &= w
@ -238,7 +241,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
if aVtx.isValid and bVtx.isValid: if aVtx.isValid and bVtx.isValid:
return false return false
# The valid one must match the backend data # The valid one must match the backend data
let rc = db.getVtxUBE vid let rc = db.getVtxUbe vid
if rc.isErr: if rc.isErr:
return false return false
let vtx = if aVtx.isValid: aVtx else: bVtx let vtx = if aVtx.isValid: aVtx else: bVtx
@ -246,7 +249,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
return false return false
elif not vid.isValid and not bTab.hasKey vid: elif not vid.isValid and not bTab.hasKey vid:
let rc = db.getVtxUBE vid let rc = db.getVtxUbe vid
if rc.isOk: if rc.isOk:
return false # Exists on backend but missing on `bTab[]` return false # Exists on backend but missing on `bTab[]`
elif rc.error != GetKeyNotFound: elif rc.error != GetKeyNotFound:
@ -268,7 +271,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
if aKey.isValid and bKey.isValid: if aKey.isValid and bKey.isValid:
return false return false
# The valid one must match the backend data # The valid one must match the backend data
let rc = db.getKeyUBE vid let rc = db.getKeyUbe vid
if rc.isErr: if rc.isErr:
return false return false
let key = if aKey.isValid: aKey else: bKey let key = if aKey.isValid: aKey else: bKey
@ -276,7 +279,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool =
return false return false
elif not vid.isValid and not bMap.hasKey vid: elif not vid.isValid and not bMap.hasKey vid:
let rc = db.getKeyUBE vid let rc = db.getKeyUbe vid
if rc.isOk: if rc.isOk:
return false # Exists on backend but missing on `bMap[]` return false # Exists on backend but missing on `bMap[]`
elif rc.error != GetKeyNotFound: elif rc.error != GetKeyNotFound:
@ -348,18 +351,24 @@ proc checkBeOk(
dx: DbTriplet; dx: DbTriplet;
relax = false; relax = false;
forceCache = false; forceCache = false;
fifos = true;
noisy = true; noisy = true;
): bool = ): bool =
## .. ## ..
for n in 0 ..< dx.len: for n in 0 ..< dx.len:
let let cache = if forceCache: true else: dx[n].dirty.len == 0
cache = if forceCache: true else: dx[n].dirty.len == 0 block:
rc = dx[n].checkBE(relax=relax, cache=cache) let rc = dx[n].checkBE(relax=relax, cache=cache, fifos=fifos)
xCheckRc rc.error == (0,0): xCheckRc rc.error == (0,0):
noisy.say "***", "db check failed", noisy.say "***", "db checkBE failed",
" n=", n, "/", dx.len-1,
" cache=", cache
if fifos:
let rc = dx[n].checkJournal()
xCheckRc rc.error == (0,0):
noisy.say "***", "db checkJournal failed",
" n=", n, "/", dx.len-1, " n=", n, "/", dx.len-1,
" cache=", cache " cache=", cache
true true
proc checkFilterTrancoderOk( proc checkFilterTrancoderOk(
@ -602,7 +611,7 @@ proc testDistributedAccess*(
# Check/verify backends # Check/verify backends
block: block:
let ok = dx.checkBeOk(noisy=noisy) let ok = dx.checkBeOk(noisy=noisy,fifos=true)
xCheck ok: xCheck ok:
noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3 noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3
block: block:
@ -661,7 +670,7 @@ proc testDistributedAccess*(
# Check/verify backends # Check/verify backends
block: block:
let ok = dy.checkBeOk(noisy=noisy) let ok = dy.checkBeOk(noisy=noisy,fifos=true)
xCheck ok xCheck ok
block: block:
let ok = dy.checkFilterTrancoderOk(noisy=noisy) let ok = dy.checkFilterTrancoderOk(noisy=noisy)
@ -675,8 +684,8 @@ proc testDistributedAccess*(
proc testFilterFifo*( proc testFilterFifo*(
noisy = true; noisy = true;
layout = QidSlotLyo; # Backend fifos layout layout = LyoSamples[0][0]; # Backend fifos layout
sampleSize = QidSample; # Synthetic filters generation sampleSize = LyoSamples[0][1]; # Synthetic filters generation
reorgPercent = 40; # To be deleted and re-filled reorgPercent = 40; # To be deleted and re-filled
rdbPath = ""; # Optional Rocks DB storage directory rdbPath = ""; # Optional Rocks DB storage directory
): bool = ): bool =
@ -710,9 +719,21 @@ proc testFilterFifo*(
# ------------------- # -------------------
block:
let rc = db.checkJournal()
xCheckRc rc.error == (0,0)
for n in 1 .. sampleSize: for n in 1 .. sampleSize:
#let trigger = n in {7,8}
#if trigger: show(n, be.journal.addItem.exec)
block:
let storeFilterOK = be.storeFilter(serial=n) let storeFilterOK = be.storeFilter(serial=n)
xCheck storeFilterOK xCheck storeFilterOK
block:
#if trigger: show(n)
let rc = db.checkJournal()
xCheckRc rc.error == (0,0)
block:
let validateFifoOk = be.validateFifo(serial=n) let validateFifoOk = be.validateFifo(serial=n)
xCheck validateFifoOk xCheck validateFifoOk
@ -739,6 +760,9 @@ proc testFilterFifo*(
#show(n) #show(n)
let validateFifoOk = be.validateFifo(serial=n) let validateFifoOk = be.validateFifo(serial=n)
xCheck validateFifoOk xCheck validateFifoOk
block:
let rc = db.checkJournal()
xCheckRc rc.error == (0,0)
true true
@ -746,7 +770,7 @@ proc testFilterFifo*(
proc testFilterBacklog*( proc testFilterBacklog*(
noisy: bool; noisy: bool;
list: openArray[ProofTrieData]; # Sample data for generating filters list: openArray[ProofTrieData]; # Sample data for generating filters
layout = QidSlotLyo; # Backend fifos layout layout = LyoSamples[0][0]; # Backend fifos layout
reorgPercent = 40; # To be deleted and re-filled reorgPercent = 40; # To be deleted and re-filled
rdbPath = ""; # Optional Rocks DB storage directory rdbPath = ""; # Optional Rocks DB storage directory
sampleSize = 777; # Truncate `list` sampleSize = 777; # Truncate `list`
@ -786,6 +810,9 @@ proc testFilterBacklog*(
block: block:
let rc = db.stow(persistent=true) let rc = db.stow(persistent=true)
xCheckRc rc.error == 0 xCheckRc rc.error == 0
block:
let rc = db.checkJournal()
xCheckRc rc.error == (0,0)
let validateFifoOk = be.validateFifo(serial=n, hashesOk=true) let validateFifoOk = be.validateFifo(serial=n, hashesOk=true)
xCheck validateFifoOk xCheck validateFifoOk
when false: # or true: when false: # or true:
@ -845,6 +872,9 @@ proc testFilterBacklog*(
block: block:
let rc = xb.check(relax=false) let rc = xb.check(relax=false)
xCheckRc rc.error == (0,0) xCheckRc rc.error == (0,0)
block:
let rc = db.checkJournal()
xCheckRc rc.error == (0,0)
#show(episode, "testFilterBacklog (3)") #show(episode, "testFilterBacklog (3)")

View File

@ -31,8 +31,12 @@ type
kvpLst*: seq[LeafTiePayload] kvpLst*: seq[LeafTiePayload]
const const
QidSlotLyo* = [(4,0,10),(3,3,10),(3,4,10),(3,5,10)] samples = [
QidSample* = (3 * QidSlotLyo.capacity.minCovered) div 2 [ (4,0,10), (3,3,10), (3,4,10), (3,5,10)],
[(2,0,high int),(1,1,high int),(1,1,high int),(1,1,high int)],
]
LyoSamples* = samples.mapIt((it, (3 * it.capacity.minCovered) div 2))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
@ -106,7 +110,7 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
func `==`*[T: AristoError|VertexID](a: T, b: int): bool = func `==`*[T: AristoError|VertexID](a: T, b: int): bool =
a == T(b) a == T(b)
func `==`*(a: (VertexID,AristoError), b: (int,int)): bool = func `==`*(a: (VertexID|QueueID,AristoError), b: (int,int)): bool =
(a[0].int,a[1].int) == b (a[0].int,a[1].int) == b
func `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool = func `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool =

View File

@ -335,8 +335,8 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool =
proc testQidScheduler*( proc testQidScheduler*(
noisy = true; noisy = true;
layout = QidSlotLyo; layout = LyoSamples[0][0];
sampleSize = QidSample; sampleSize = LyoSamples[0][1];
reorgPercent = 40 reorgPercent = 40
): bool = ): bool =
## ##

View File

@ -43,7 +43,7 @@ const
## Policy settig for `pack()` ## Policy settig for `pack()`
let let
TxQidLyo = QidSlotLyo.to(QidLayoutRef) TxQidLyo = LyoSamples[0][0].to(QidLayoutRef)
## Cascaded filter slots layout for testing ## Cascaded filter slots layout for testing
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------