From 0d4ef023ed7e1f230c38d82e77893ba3f9bbb476 Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Fri, 26 Apr 2024 13:43:52 +0000 Subject: [PATCH] Update aristo journal functionality (#2155) * Aristo: Code cosmetics, e.g. update some CamelCase names * CoreDb+Aristo: Provide oldest known state root implied details: The Aristo journal allows to recover earlier but not all state roots. * Aristo: Fix journal backward index operator, e.g. `[^1]` * Aristo: Fix journal updater why: The `fifosStore()` store function slightly misinterpreted the update instructions when translation is to database `put()` functions. The effect was that the journal was ever growing due to stale entries which were never deleted. * CoreDb+Aristo: Provide utils for purging stale data from the KVT details: See earlier patch, not all state roots are available. This patch provides a mapping from some state root to a block number and allows to remove all KVT data related to a particular block number * Aristo+Kvt: Implement a clean up schedule for expired data in KVT why: For a single state ledger like `Aristo`, there is only a limited backlog of states. So KVT data (i.e. headers etc.) are cleaned up regularly * Fix copyright year --- nimbus/core/chain/persist_blocks.nim | 32 ++- nimbus/db/aristo/aristo_api.nim | 17 ++ nimbus/db/aristo/aristo_check.nim | 22 +- nimbus/db/aristo/aristo_check/check_be.nim | 2 +- .../db/aristo/aristo_check/check_journal.nim | 203 ++++++++++++++++++ nimbus/db/aristo/aristo_constants.nim | 19 +- nimbus/db/aristo/aristo_desc/desc_error.nim | 6 + .../db/aristo/aristo_desc/desc_structural.nim | 12 +- nimbus/db/aristo/aristo_filter.nim | 2 +- .../db/aristo/aristo_filter/filter_fifos.nim | 6 + .../aristo/aristo_filter/filter_helpers.nim | 6 +- .../db/aristo/aristo_filter/filter_merge.nim | 4 +- .../aristo/aristo_filter/filter_reverse.nim | 8 +- .../aristo/aristo_filter/filter_scheduler.nim | 17 +- nimbus/db/aristo/aristo_get.nim | 18 +- nimbus/db/aristo/aristo_tx.nim | 4 +- nimbus/db/core_db/backend/aristo_db.nim | 5 + .../backend/aristo_db/handlers_aristo.nim | 15 ++ nimbus/db/core_db/core_apps_newapi.nim | 82 ++++++- nimbus/db/core_db/memory_only.nim | 1 + nimbus/db/storage_types.nim | 6 + tests/test_aristo.nim | 12 +- tests/test_aristo/test_filter.nim | 74 +++++-- tests/test_aristo/test_helpers.nim | 10 +- tests/test_aristo/test_misc.nim | 4 +- tests/test_aristo/test_tx.nim | 2 +- 26 files changed, 506 insertions(+), 83 deletions(-) create mode 100644 nimbus/db/aristo/aristo_check/check_journal.nim diff --git a/nimbus/core/chain/persist_blocks.nim b/nimbus/core/chain/persist_blocks.nim index cbe6b97d1..35b92d144 100644 --- a/nimbus/core/chain/persist_blocks.nim +++ b/nimbus/core/chain/persist_blocks.nim @@ -36,6 +36,10 @@ type PersistBlockFlags = set[PersistBlockFlag] +const + CleanUpEpoch = 30_000.u256 + ## Regular checks for history clean up (applies to single state DB) + # ------------------------------------------------------------------------------ # Private # ------------------------------------------------------------------------------ @@ -53,12 +57,22 @@ proc getVmState(c: ChainRef, header: BlockHeader): return err() return ok(vmState) +proc purgeExpiredBlocks(db: CoreDbRef) {.inline, raises: [RlpError].} = + ## Remove non-reachable blocks from KVT database + var blkNum = db.getOldestJournalBlockNumber() + if 0 < blkNum: + blkNum = blkNum - 1 + while 0 < blkNum: + if not db.forgetHistory blkNum: + break + blkNum = blkNum - 1 + + proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; bodies: openArray[BlockBody], flags: PersistBlockFlags = {}): ValidationResult # wildcard exception, wrapped below in public section {.inline, raises: [CatchableError].} = - let dbTx = c.db.beginTransaction() defer: dbTx.dispose() @@ -71,10 +85,13 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; let vmState = c.getVmState(headers[0]).valueOr: return ValidationResult.Error - trace "Persisting blocks", - fromBlock = headers[0].blockNumber, - toBlock = headers[^1].blockNumber + # Check point + let stateRootChpt = vmState.parent.stateRoot + # Needed for figuring out whether KVT cleanup is due (see at the end) + let (fromBlock, toBlock) = (headers[0].blockNumber, headers[^1].blockNumber) + + trace "Persisting blocks", fromBlock, toBlock for i in 0 ..< headers.len: let (header, body) = (headers[i], bodies[i]) @@ -177,6 +194,13 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; # The `c.db.persistent()` call is ignored by the legacy DB which # automatically saves persistently when reaching the zero level transaction c.db.persistent() + + # For a single state ledger, there is only a limited backlog. So clean up + # regularly (the `CleanUpEpoch` should not be too small as each lookup pulls + # a journal entry from disk.) + if (fromBlock mod CleanUpEpoch) <= (toBlock - fromBlock): + c.db.purgeExpiredBlocks() + ValidationResult.OK # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_api.nim b/nimbus/db/aristo/aristo_api.nim index 88385d837..cdf32be89 100644 --- a/nimbus/db/aristo/aristo_api.nim +++ b/nimbus/db/aristo/aristo_api.nim @@ -148,6 +148,13 @@ type ## pair was found on the filter or the backend, this transaction is ## empty. + AristoApiGetFilUbeFn* = + proc(db: AristoDbRef; + qid: QueueID; + ): Result[FilterRef,AristoError] + {.noRaise.} + ## Get the filter from the unfiltered backened if available. + AristoApiGetKeyRcFn* = proc(db: AristoDbRef; vid: VertexID; @@ -352,6 +359,7 @@ type forget*: AristoApiForgetFn forkTop*: AristoApiForkTopFn forkWith*: AristoApiForkWithFn + getFilUbe*: AristoApiGetFilUbeFn getKeyRc*: AristoApiGetKeyRcFn hashify*: AristoApiHashifyFn hasPath*: AristoApiHasPathFn @@ -384,6 +392,7 @@ type AristoApiProfForgetFn = "forget" AristoApiProfForkTopFn = "forkTop" AristoApiProfForkWithFn = "forkWith" + AristoApiProfGetFilUbeFn = "getFilUBE" AristoApiProfGetKeyRcFn = "getKeyRc" AristoApiProfHashifyFn = "hashify" AristoApiProfHasPathFn = "hasPath" @@ -434,6 +443,7 @@ when AutoValidateApiHooks: doAssert not api.forget.isNil doAssert not api.forkTop.isNil doAssert not api.forkWith.isNil + doAssert not api.getFilUbe.isNil doAssert not api.getKeyRc.isNil doAssert not api.hashify.isNil doAssert not api.hasPath.isNil @@ -486,6 +496,7 @@ func init*(api: var AristoApiObj) = api.forget = forget api.forkTop = forkTop api.forkWith = forkWith + api.getFilUbe = getFilUbe api.getKeyRc = getKeyRc api.hashify = hashify api.hasPath = hasPath @@ -521,6 +532,7 @@ func dup*(api: AristoApiRef): AristoApiRef = forget: api.forget, forkTop: api.forkTop, forkWith: api.forkWith, + getFilUbe: api.getFilUbe, getKeyRc: api.getKeyRc, hashify: api.hashify, hasPath: api.hasPath, @@ -609,6 +621,11 @@ func init*( AristoApiProfForkWithFn.profileRunner: result = api.forkWith(a, b, c, d) + profApi.getFilUbe = + proc(a: AristoDbRef; b: QueueID): auto = + AristoApiProfGetFilUbeFn.profileRunner: + result = api.getFilUbe(a, b) + profApi.getKeyRc = proc(a: AristoDbRef; b: VertexID): auto = AristoApiProfGetKeyRcFn.profileRunner: diff --git a/nimbus/db/aristo/aristo_check.nim b/nimbus/db/aristo/aristo_check.nim index e6f8ca918..e856ad47d 100644 --- a/nimbus/db/aristo/aristo_check.nim +++ b/nimbus/db/aristo/aristo_check.nim @@ -20,7 +20,7 @@ import results, ./aristo_walk/persistent, "."/[aristo_desc, aristo_get, aristo_init, aristo_utils], - ./aristo_check/[check_be, check_top] + ./aristo_check/[check_be, check_journal, check_top] # ------------------------------------------------------------------------------ # Public functions @@ -55,7 +55,7 @@ proc checkBE*( cache = true; # Also verify against top layer cache fifos = false; # Also verify cascaded filter fifos ): Result[void,(VertexID,AristoError)] = - ## Veryfy database backend structure. If the argument `relax` is set `false`, + ## Verify database backend structure. If the argument `relax` is set `false`, ## all necessary Merkle hashes are compiled and verified. If the argument ## `cache` is set `true`, the cache is also checked so that a safe operation ## (like `resolveBackendFilter()`) will leave the backend consistent. @@ -79,6 +79,18 @@ proc checkBE*( of BackendVoid: return VoidBackendRef.checkBE(db, cache=cache, relax=relax) +proc checkJournal*( + db: AristoDbRef; # Database, top layer + ): Result[void,(QueueID,AristoError)] = + ## Verify database backend journal. + case db.backend.kind: + of BackendMemory: + return MemBackendRef.checkJournal(db) + of BackendRocksDB: + return RdbBackendRef.checkJournal(db) + of BackendVoid: + return ok() # no journal + proc check*( db: AristoDbRef; # Database, top layer @@ -89,7 +101,11 @@ proc check*( ): Result[void,(VertexID,AristoError)] = ## Shortcut for running `checkTop()` followed by `checkBE()` ? db.checkTop(proofMode = proofMode) - ? db.checkBE(relax = relax, cache = cache) + ? db.checkBE(relax = relax, cache = cache, fifos = fifos) + if fifos: + let rc = db.checkJournal() + if rc.isErr: + return err((VertexID(0),rc.error[1])) ok() # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index 33358a802..da00ad1a7 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -185,7 +185,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( if fifos and not db.backend.isNil and not db.backend.journal.isNil: - var lastTrg = db.getKeyUBE(VertexID(1)).get(otherwise = VOID_HASH_KEY) + var lastTrg = db.getKeyUbe(VertexID(1)).get(otherwise = VOID_HASH_KEY) .to(Hash256) for (qid,filter) in db.backend.T.walkFifoBe: # walk in fifo order if filter.src != lastTrg: diff --git a/nimbus/db/aristo/aristo_check/check_journal.nim b/nimbus/db/aristo/aristo_check/check_journal.nim new file mode 100644 index 000000000..86c9bbdc5 --- /dev/null +++ b/nimbus/db/aristo/aristo_check/check_journal.nim @@ -0,0 +1,203 @@ +# nimbus-eth1 +# Copyright (c) 2023-2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or distributed +# except according to those terms. + +{.push raises: [].} + +import + std/[algorithm, sequtils, sets, tables], + eth/common, + results, + ../aristo_filter/filter_scheduler, + ../aristo_walk/persistent, + ".."/[aristo_desc, aristo_blobify] + +const + ExtraDebugMessages = false + +type + JrnRec = tuple + src: Hash256 + trg: Hash256 + size: int + +when ExtraDebugMessages: + import + ../aristo_debug + +# ------------------------------------------------------------------------------ +# Private functions and helpers +# ------------------------------------------------------------------------------ + +template noValueError(info: static[string]; code: untyped) = + try: + code + except ValueError as e: + raiseAssert info & ", name=\"" & $e.name & "\", msg=\"" & e.msg & "\"" + +when ExtraDebugMessages: + proc pp(t: var Table[QueueID,JrnRec]): string = + result = "{" + for qid in t.keys.toSeq.sorted: + t.withValue(qid,w): + result &= qid.pp & "#" & $w[].size & "," + if result[^1] == '{': + result &= "}" + else: + result[^1] = '}' + + proc pp(t: seq[QueueID]): string = + result = "{" + var list = t + for n in 2 ..< list.len: + if list[n-1] == list[n] - 1 and + (list[n-2] == QueueID(0) or list[n-2] == list[n] - 2): + list[n-1] = QueueID(0) + for w in list: + if w != QueueID(0): + result &= w.pp & "," + elif result[^1] == ',': + result[^1] = '.' + result &= "." + if result[^1] == '{': + result &= "}" + else: + result[^1] = '}' + + proc pp(t: HashSet[QueueID]): string = + result = "{" + var list = t.toSeq.sorted + for n in 2 ..< list.len: + if list[n-1] == list[n] - 1 and + (list[n-2] == QueueID(0) or list[n-2] == list[n] - 2): + list[n-1] = QueueID(0) + for w in list: + if w != QueueID(0): + result &= w.pp & "," + elif result[^1] == ',': + result[^1] = '.' + result &= "." + if result[^1] == '{': + result &= "}" + else: + result[^1] = '}' + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +proc checkJournal*[T: RdbBackendRef|MemBackendRef]( + _: type T; + db: AristoDbRef; + ): Result[void,(QueueID,AristoError)] = + let jrn = db.backend.journal + if jrn.isNil: return ok() + + var + nToQid: seq[QueueID] # qids sorted by history/age + cached: HashSet[QueueID] # `nToQid[]` as set + saved: Table[QueueID,JrnRec] + error: (QueueID,AristoError) + + when ExtraDebugMessages: + var + sizeTally = 0 + maxBlock = 0 + + proc moan(n = -1, s = "", listOk = true) = + var txt = "" + if 0 <= n: + txt &= " (" & $n & ")" + if error[1] != AristoError(0): + txt &= " oops" + txt &= + " jLen=" & $jrn.len & + " tally=" & $sizeTally & + " maxBlock=" & $maxBlock & + "" + if 0 < s.len: + txt &= " " & s + if error[1] != AristoError(0): + txt &= + " errQid=" & error[0].pp & + " error=" & $error[1] & + "" + if listOk: + txt &= + "\n cached=" & cached.pp & + "\n saved=" & saved.pp & + "" + debugEcho "*** checkJournal", txt + else: + template moan(n = -1, s = "", listOk = true) = + discard + + # Collect cached handles + for n in 0 ..< jrn.len: + let qid = jrn[n] + # Must be no overlap + if qid in cached: + error = (qid,CheckJrnCachedQidOverlap) + moan(2) + return err(error) + cached.incl qid + nToQid.add qid + + # Collect saved data + for (qid,fil) in db.backend.T.walkFilBe(): + var jrnRec: JrnRec + jrnRec.src = fil.src + jrnRec.trg = fil.trg + + when ExtraDebugMessages: + let rc = fil.blobify + if rc.isErr: + moan(5) + return err((qid,rc.error)) + jrnRec.size = rc.value.len + if maxBlock < jrnRec.size: + maxBlock = jrnRec.size + sizeTally += jrnRec.size + + saved[qid] = jrnRec + + # Compare cached against saved data + let + savedQids = saved.keys.toSeq.toHashSet + unsavedQids = cached - savedQids + staleQids = savedQids - cached + + if 0 < unsavedQids.len: + error = (unsavedQids.toSeq.sorted[0],CheckJrnSavedQidMissing) + moan(6) + return err(error) + + if 0 < staleQids.len: + error = (staleQids.toSeq.sorted[0], CheckJrnSavedQidStale) + moan(7) + return err(error) + + # Compare whether journal records link together + if 1 < nToQid.len: + noValueError("linked journal records"): + var prvRec = saved[nToQid[0]] + for n in 1 ..< nToQid.len: + let thisRec = saved[nToQid[n]] + if prvRec.trg != thisRec.src: + error = (nToQid[n],CheckJrnLinkingGap) + moan(8, "qidInx=" & $n) + return err(error) + prvRec = thisRec + + moan(9, listOk=false) + ok() + +# ------------------------------------------------------------------------------ +# End +# ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_constants.nim b/nimbus/db/aristo/aristo_constants.nim index d143979ad..dffcf880f 100644 --- a/nimbus/db/aristo/aristo_constants.nim +++ b/nimbus/db/aristo/aristo_constants.nim @@ -41,10 +41,21 @@ const ## Useful shortcut DEFAULT_QID_QUEUES* = [ - (128, 0), ## Consecutive list of 128 filter slots - ( 64, 63), ## Overflow list, 64 filters, skipping 63 filters in-between - ( 64, 127), ## .. - ( 64, 255)] + (128, 0), # Consecutive list of (at least) 128 filter slots + ( 16, 3), # Overflow list with (at least) 16 filter slots (with gap size 3) + # each slot covering 4 filters from previous list + ( 1, 1), # .. + ( 1, 1)] + ## The `DEFAULT_QID_QUEUES` schedule has the following properties: + ## * most recent consecutive slots: 128 + ## * maximal slots used: 151 + ## * covered backlog savings: between 216..231 + ## This was calculated via the `capacity()` function from the + ## `filter_scheduler.nim` source. So, saving each block after executing + ## it, the previous 128 block chain states will be directly accessible. + ## For older block chain states (of at least back to 216), the system can + ## be positioned before the desired state and block by block executed + ## forward. SUB_TREE_DISPOSAL_MAX* = 200_000 ## Some limit for disposing sub-trees in one go using `delete()`. diff --git a/nimbus/db/aristo/aristo_desc/desc_error.nim b/nimbus/db/aristo/aristo_desc/desc_error.nim index 6d050c74b..8515429fd 100644 --- a/nimbus/db/aristo/aristo_desc/desc_error.nim +++ b/nimbus/db/aristo/aristo_desc/desc_error.nim @@ -169,6 +169,12 @@ type CheckBeFifoSrcTrgMismatch CheckBeFifoTrgNotStateRoot + # Jornal check `checkJournal()` + CheckJrnCachedQidOverlap + CheckJrnSavedQidMissing + CheckJrnSavedQidStale + CheckJrnLinkingGap + # Neighbour vertex, tree traversal `nearbyRight()` and `nearbyLeft()` NearbyBeyondRange NearbyBranchError diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index 3df6f829e..7d4bca236 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -324,7 +324,7 @@ func to*(node: NodeRef; T: type VertexRef): T = node.VertexRef.dup func to*(a: array[4,tuple[size, width: int]]; T: type QidLayoutRef): T = - ## Convert a size-width array to a `QidLayoutRef` layout. Over large + ## Convert a size-width array to a `QidLayoutRef` layout. Overly large ## array field values are adjusted to its maximal size. var q: array[4,QidSpec] for n in 0..3: @@ -335,18 +335,20 @@ func to*(a: array[4,tuple[size, width: int]]; T: type QidLayoutRef): T = T(q: q) func to*(a: array[4,tuple[size, width, wrap: int]]; T: type QidLayoutRef): T = - ## Convert a size-width-wrap array to a `QidLayoutRef` layout. Over large + ## Convert a size-width-wrap array to a `QidLayoutRef` layout. Overly large ## array field values are adjusted to its maximal size. Too small `wrap` - ## values are adjusted to its minimal size. + ## field values are adjusted to its minimal size. var q: array[4,QidSpec] for n in 0..2: q[n] = (min(a[n].size.uint, QidSpecSizeMax), min(a[n].width.uint, QidSpecWidthMax), - QueueID(max(a[n].size + a[n+1].width, a[n].width+1, a[n].wrap))) + QueueID(max(a[n].size + a[n+1].width, a[n].width+1, + min(a[n].wrap, DefaultQidWrap.int)))) q[0].width = 0 q[3] = (min(a[3].size.uint, QidSpecSizeMax), min(a[3].width.uint, QidSpecWidthMax), - QueueID(max(a[3].size, a[3].width, a[3].wrap))) + QueueID(max(a[3].size, a[3].width, + min(a[3].wrap, DefaultQidWrap.int)))) T(q: q) # ------------------------------------------------------------------------------ diff --git a/nimbus/db/aristo/aristo_filter.nim b/nimbus/db/aristo/aristo_filter.nim index b40d08281..1eb2bb4b3 100644 --- a/nimbus/db/aristo/aristo_filter.nim +++ b/nimbus/db/aristo/aristo_filter.nim @@ -73,7 +73,7 @@ proc merge*( ## argument `filter`, all the `top` and `stack` layers should be cleared. ## let ubeRoot = block: - let rc = db.getKeyUBE VertexID(1) + let rc = db.getKeyUbe VertexID(1) if rc.isOk: rc.value.to(Hash256) elif rc.error == GetKeyNotFound: diff --git a/nimbus/db/aristo/aristo_filter/filter_fifos.nim b/nimbus/db/aristo/aristo_filter/filter_fifos.nim index f2f42892d..2dc5670c9 100644 --- a/nimbus/db/aristo/aristo_filter/filter_fifos.nim +++ b/nimbus/db/aristo/aristo_filter/filter_fifos.nim @@ -75,6 +75,7 @@ proc fifosStore*( # Update journal filters and calculate database update var instr = FifoInstr(scd: upd.fifo) + dbClear: seq[QueueID] hold: seq[FilterRef] saved = false @@ -100,10 +101,12 @@ proc fifosStore*( of HoldQid: # Push filter + dbClear.add act.qid hold.add be.getFilterOrReturn act.qid # Merge additional journal filters into top filter for w in act.qid+1 .. act.xid: + dbClear.add w let lower = be.getFilterOrReturn w hold[^1] = hold[^1].joinFiltersOrReturn lower @@ -115,6 +118,9 @@ proc fifosStore*( let upper = hold.pop lower = upper.joinFiltersOrReturn lower instr.put.add (act.qid, lower) + for qid in dbClear: + instr.put.add (qid, FilterRef(nil)) + dbClear.setLen(0) if not saved: return err(FilExecSaveMissing) diff --git a/nimbus/db/aristo/aristo_filter/filter_helpers.nim b/nimbus/db/aristo/aristo_filter/filter_helpers.nim index 68d23e0ef..13ed421f8 100644 --- a/nimbus/db/aristo/aristo_filter/filter_helpers.nim +++ b/nimbus/db/aristo/aristo_filter/filter_helpers.nim @@ -127,8 +127,8 @@ proc getFilterOverlap*( ## Return the number of journal filters in the leading chain that is ## reverted by the argument `filter`. A heuristc approach is used here ## for an argument `filter` with a valid filter ID when the chain is - ## longer than one items. So only single chain overlaps a guaranteed to - ## be found. + ## longer than one items. Only single step filter overlaps are guaranteed + ## to be found. ## # Check against the top-fifo entry let qid = be.journal[0] @@ -148,7 +148,7 @@ proc getFilterOverlap*( if filter.trg == top.trg: return 1 - # Check against sme stored filter IDs + # Check against some stored filter IDs if filter.isValid: let rc = be.getFilterFromFifo(filter.fid, earlierOK=true) if rc.isOk: diff --git a/nimbus/db/aristo/aristo_filter/filter_merge.nim b/nimbus/db/aristo/aristo_filter/filter_merge.nim index 32f16c3b4..99bca3b45 100644 --- a/nimbus/db/aristo/aristo_filter/filter_merge.nim +++ b/nimbus/db/aristo/aristo_filter/filter_merge.nim @@ -75,7 +75,7 @@ proc merge*( if vtx.isValid or not newFilter.sTab.hasKey vid: newFilter.sTab[vid] = vtx elif newFilter.sTab.getOrVoid(vid).isValid: - let rc = db.getVtxUBE vid + let rc = db.getVtxUbe vid if rc.isOk: newFilter.sTab[vid] = vtx # VertexRef(nil) elif rc.error == GetVtxNotFound: @@ -87,7 +87,7 @@ proc merge*( if key.isValid or not newFilter.kMap.hasKey vid: newFilter.kMap[vid] = key elif newFilter.kMap.getOrVoid(vid).isValid: - let rc = db.getKeyUBE vid + let rc = db.getKeyUbe vid if rc.isOk: newFilter.kMap[vid] = key elif rc.error == GetKeyNotFound: diff --git a/nimbus/db/aristo/aristo_filter/filter_reverse.nim b/nimbus/db/aristo/aristo_filter/filter_reverse.nim index 89d0aae2e..12d25e4ec 100644 --- a/nimbus/db/aristo/aristo_filter/filter_reverse.nim +++ b/nimbus/db/aristo/aristo_filter/filter_reverse.nim @@ -1,5 +1,5 @@ # nimbus-eth1 -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -34,7 +34,7 @@ proc revFilter*( # Get vid generator state on backend block: - let rc = db.getIdgUBE() + let rc = db.getIdgUbe() if rc.isOk: rev.vGen = rc.value elif rc.error != GetIdgNotFound: @@ -42,7 +42,7 @@ proc revFilter*( # Calculate reverse changes for the `sTab[]` structural table for vid in filter.sTab.keys: - let rc = db.getVtxUBE vid + let rc = db.getVtxUbe vid if rc.isOk: rev.sTab[vid] = rc.value elif rc.error == GetVtxNotFound: @@ -52,7 +52,7 @@ proc revFilter*( # Calculate reverse changes for the `kMap` sequence. for vid in filter.kMap.keys: - let rc = db.getKeyUBE vid + let rc = db.getKeyUbe vid if rc.isOk: rev.kMap[vid] = rc.value elif rc.error == GetKeyNotFound: diff --git a/nimbus/db/aristo/aristo_filter/filter_scheduler.nim b/nimbus/db/aristo/aristo_filter/filter_scheduler.nim index 6f6fcab23..bdba4f91c 100644 --- a/nimbus/db/aristo/aristo_filter/filter_scheduler.nim +++ b/nimbus/db/aristo/aristo_filter/filter_scheduler.nim @@ -261,7 +261,7 @@ func capacity( func capacity*( ctx: openArray[tuple[size, width, wrap: int]]; # Schedule layout ): tuple[maxQueue: int, minCovered: int, maxCovered: int] = - ## Variant of `capacity()` below. + ## Variant of `capacity()`. ctx.toSeq.mapIt((it[0],it[1])).capacity func capacity*( @@ -289,16 +289,17 @@ func addItem*( ## SaveQid -- Store a new item under the address ## -- on the database. ## - ## HoldQid .. -- Move the records accessed by the argument - ## -- addresses from the database to the right - ## -- end of the local hold queue. The age of - ## -- the items on the hold queue increases - ## -- left to right. + ## HoldQid .. -- Move the records referred to by the + ## -- argument addresses from the database to + ## -- the right end of the local hold queue. + ## -- The age of the items on the hold queue + ## -- increases left to right. ## ## DequQid -- Merge items from the hold queue into a ## -- new item and store it under the address ## -- on the database. Clear the - ## -- the hold queue. + ## -- the hold queue and the corresponding + ## -- items on the database. ## ## DelQid -- Delete item. This happens if the last ## -- oberflow queue needs to make space for @@ -557,7 +558,7 @@ func `[]`*( bix: BackwardsIndex; # Index into latest items ): QueueID = ## Variant of `[]` for provifing `[^bix]`. - fifo[fifo.state.len - bix.distinctBase] + fifo[fifo.len - bix.distinctBase] func `[]`*( diff --git a/nimbus/db/aristo/aristo_get.nim b/nimbus/db/aristo/aristo_get.nim index 0a1041292..b800d586f 100644 --- a/nimbus/db/aristo/aristo_get.nim +++ b/nimbus/db/aristo/aristo_get.nim @@ -22,7 +22,7 @@ import # Public functions # ------------------------------------------------------------------------------ -proc getIdgUBE*( +proc getIdgUbe*( db: AristoDbRef; ): Result[seq[VertexID],AristoError] = ## Get the ID generator state from the unfiltered backened if available. @@ -31,7 +31,7 @@ proc getIdgUBE*( return be.getIdgFn() err(GetIdgNotFound) -proc getFqsUBE*( +proc getFqsUbe*( db: AristoDbRef; ): Result[seq[(QueueID,QueueID)],AristoError] = ## Get the list of filter IDs unfiltered backened if available. @@ -40,7 +40,7 @@ proc getFqsUBE*( return be.getFqsFn() err(GetFqsNotFound) -proc getVtxUBE*( +proc getVtxUbe*( db: AristoDbRef; vid: VertexID; ): Result[VertexRef,AristoError] = @@ -50,17 +50,17 @@ proc getVtxUBE*( return be.getVtxFn vid err GetVtxNotFound -proc getKeyUBE*( +proc getKeyUbe*( db: AristoDbRef; vid: VertexID; ): Result[HashKey,AristoError] = - ## Get the merkle hash/key from the unfiltered backend if available. + ## Get the Merkle hash/key from the unfiltered backend if available. let be = db.backend if not be.isNil: return be.getKeyFn vid err GetKeyNotFound -proc getFilUBE*( +proc getFilUbe*( db: AristoDbRef; qid: QueueID; ): Result[FilterRef,AristoError] = @@ -78,7 +78,7 @@ proc getIdgBE*( ## Get the ID generator state the `backened` layer if available. if not db.roFilter.isNil: return ok(db.roFilter.vGen) - db.getIdgUBE() + db.getIdgUbe() proc getVtxBE*( db: AristoDbRef; @@ -90,7 +90,7 @@ proc getVtxBE*( if vtx.isValid: return ok(vtx) return err(GetVtxNotFound) - db.getVtxUBE vid + db.getVtxUbe vid proc getKeyBE*( db: AristoDbRef; @@ -102,7 +102,7 @@ proc getKeyBE*( if key.isValid: return ok(key) return err(GetKeyNotFound) - db.getKeyUBE vid + db.getKeyUbe vid # ------------------ diff --git a/nimbus/db/aristo/aristo_tx.nim b/nimbus/db/aristo/aristo_tx.nim index e048825bf..d26a770fd 100644 --- a/nimbus/db/aristo/aristo_tx.nim +++ b/nimbus/db/aristo/aristo_tx.nim @@ -279,7 +279,7 @@ proc forkWith*( # Try `(vid,key)` on unfiltered backend block: - let beKey = db.getKeyUBE(vid).valueOr: VOID_HASH_KEY + let beKey = db.getKeyUbe(vid).valueOr: VOID_HASH_KEY if beKey == key: let rc = db.fork(noFilter = true) if rc.isOk: @@ -451,7 +451,7 @@ proc stow*( if db.roFilter.isValid: db.top.final.vGen = db.roFilter.vGen else: - let rc = db.getIdgUBE() + let rc = db.getIdgUbe() if rc.isOk: db.top.final.vGen = rc.value else: diff --git a/nimbus/db/core_db/backend/aristo_db.nim b/nimbus/db/core_db/backend/aristo_db.nim index dee4ccec9..d63881fa4 100644 --- a/nimbus/db/core_db/backend/aristo_db.nim +++ b/nimbus/db/core_db/backend/aristo_db.nim @@ -242,6 +242,11 @@ func toAristo*(mBe: CoreDbMptBackendRef): AristoDbRef = if not mBe.isNil and mBe.parent.isAristo: return mBe.AristoCoreDbMptBE.adb +proc toAristoOldestStateRoot*(mBe: CoreDbMptBackendRef): Hash256 = + if not mBe.isNil and mBe.parent.isAristo: + return mBe.parent.AristoCoreDbRef.adbBase.toJournalOldestStateRoot() + EMPTY_ROOT_HASH + # ------------------------------------------------------------------------------ # Public aristo iterators # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim b/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim index 52d971dc9..1cf7538ad 100644 --- a/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim +++ b/nimbus/db/core_db/backend/aristo_db/handlers_aristo.nim @@ -17,6 +17,7 @@ import stew/byteutils, results, ../../../aristo, + ../../../aristo/aristo_filter/filter_scheduler, ../../base, ../../base/base_desc, ./common_desc @@ -566,6 +567,20 @@ func toVoidRc*[T]( return ok() err((VoidVID,rc.error).toError(base, info, error)) +proc toJournalOldestStateRoot*(base: AristoBaseRef): Hash256 = + let + adb = base.ctx.mpt + be = adb.backend + if not be.isNil: + let jrn = be.journal + if not jrn.isNil: + let qid = jrn[^1] + if qid.isValid: + let rc = base.api.getFilUbe(adb, qid) + if rc.isOk: + return rc.value.trg + EMPTY_ROOT_HASH + # --------------------- func to*(dsc: CoreDxMptRef, T: type AristoDbRef): T = diff --git a/nimbus/db/core_db/core_apps_newapi.nim b/nimbus/db/core_db/core_apps_newapi.nim index abe5d2d20..bc3a265a0 100644 --- a/nimbus/db/core_db/core_apps_newapi.nim +++ b/nimbus/db/core_db/core_apps_newapi.nim @@ -18,9 +18,10 @@ import chronicles, eth/[common, rlp], results, - stew/byteutils, + stew/[byteutils, endians2], "../.."/[errors, constants], ".."/[aristo, storage_types], + ./backend/aristo_db, "."/base logScope: @@ -88,6 +89,23 @@ template discardRlpException(info: static[string]; code: untyped) = except RlpError as e: warn logTxt info, error=($e.name), msg=e.msg +# --------- + +func to(bn: BlockNumber; T: type Blob): T = + if bn <= high(uint64).toBlockNumber: + bn.truncate(uint64).toBytesBE.toSeq + else: + bn.toBytesBE.toSeq + +func to(data: openArray[byte]; T: type BlockNumber): T = + case data.len: + of 8: + return uint64.fromBytesBE(data).toBlockNumber + of 32: + return UInt256.fromBytesBE(data).toBlockNumber + else: + discard + # ------------------------------------------------------------------------------ # Private iterators # ------------------------------------------------------------------------------ @@ -320,7 +338,6 @@ proc markCanonicalChain( return true - # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ @@ -330,6 +347,27 @@ proc exists*(db: CoreDbRef, hash: Hash256): bool = warn logTxt "exisis()", hash, action="hasKey()", error=($$error) return false +proc getBlockNumber*(db: CoreDbRef; stateRoot: Hash256): BlockNumber = + const info = "getBlockNumber()" + if stateRoot != EMPTY_ROOT_HASH: + let + kvt = db.newKvt() + data = kvt.get(stRootToBlockNumKey(stateRoot).toOpenArray).valueOr: + if error.error != KvtNotFound: + warn logTxt info, stateRoot, action="get()", error=($$error) + return + return data.to(BlockNumber) + +proc getOldestJournalBlockNumber*(db: CoreDbRef): BlockNumber = + ## Returns the block number implied by the database journal if there is any, + ## or `BlockNumber(0)`. At the moment, only the `Aristo` database has a + ## journal. + ## + let be = db.ctx.getMpt(CtGeneric).backend + if be.parent.isAristo: + return db.getBlockNumber be.toAristoOldestStateRoot() + + proc getBlockHeader*( db: CoreDbRef; blockHash: Hash256; @@ -523,8 +561,18 @@ proc getAncestorsHashes*( dec ancestorCount proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) = - let blockNumberKey = blockNumberToHashKey(header.blockNumber) - db.newKvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr: + ## The function stores lookup for + ## :: + ## header.stateRoot -> header.blockNumber -> header.hash() + ## + let + blockNumberKey = blockNumberToHashKey(header.blockNumber) + stRootKey = stRootToBlockNumKey(header.stateRoot) + kvt = db.newKvt() + kvt.put(stRootKey.toOpenArray, header.blockNumber.to(Blob)).isOkOr: + warn logTxt "addBlockNumberToHashLookup()", + stRootKey, action="put()", `error`=($$error) + kvt.put(blockNumberKey.toOpenArray, rlp.encode(header.hash)).isOkOr: warn logTxt "addBlockNumberToHashLookup()", blockNumberKey, action="put()", error=($$error) @@ -558,6 +606,26 @@ proc persistTransactions*( warn logTxt info, action="state()" return EMPTY_ROOT_HASH +proc forgetHistory*( + db: CoreDbRef; + blockNum: BlockNumber; + ): bool + {.gcsafe, raises: [RlpError].} = + ## Remove all data related to the block number argument `num`. This function + ## returns `true`, if some history was available and deleted. + var blockHash: Hash256 + if db.getBlockHash(blockNum, blockHash): + let kvt = db.newKvt() + # delete blockNum->blockHash + discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray) + result = true + + var header: BlockHeader + if db.getBlockHeader(blockHash, header): + # delete blockHash->header, stateRoot->blockNum + discard kvt.del(genericHashKey(blockHash).toOpenArray) + discard kvt.del(stRootToBlockNumKey(header.stateRoot).toOpenArray) + proc getTransaction*( db: CoreDbRef; txRoot: Hash256; @@ -858,6 +926,12 @@ proc persistHeaderToDbWithoutSetHead*( let kvt = db.newKvt() scoreKey = blockHashToScoreKey(headerHash) + + # This extra call `addBlockNumberToHashLookup()` has been added in order + # to access the burrent block by the state root. So it can be deleted + # if not needed, anymore. + db.addBlockNumberToHashLookup(header) + kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: warn logTxt "persistHeaderToDbWithoutSetHead()", scoreKey, action="put()", `error`=($$error) diff --git a/nimbus/db/core_db/memory_only.nim b/nimbus/db/core_db/memory_only.nim index 7d99fe73f..aed60e757 100644 --- a/nimbus/db/core_db/memory_only.nim +++ b/nimbus/db/core_db/memory_only.nim @@ -34,6 +34,7 @@ export isAristo, toAristo, toAristoProfData, + toAristoOldestStateRoot, # see `legacy_db` isLegacy, diff --git a/nimbus/db/storage_types.nim b/nimbus/db/storage_types.nim index 1cf756e05..3d5e90f84 100644 --- a/nimbus/db/storage_types.nim +++ b/nimbus/db/storage_types.nim @@ -32,6 +32,7 @@ type snapSyncStorageSlot snapSyncStateRoot blockHashToBlockWitness + stRootToBlockNum DbKey* = object # The first byte stores the key type. The rest are key-specific values @@ -135,6 +136,11 @@ proc blockHashToBlockWitnessKey*(h: Hash256): DbKey {.inline.} = result.data[1 .. 32] = h.data result.dataEndPos = uint8 32 +proc stRootToBlockNumKey*(h: Hash256): DbKey = + result.data[0] = byte ord(stRootToBlockNum) + result.data[1 .. 32] = h.data + result.dataEndPos = uint8 32 + template toOpenArray*(k: DbKey): openArray[byte] = k.data.toOpenArray(0, int(k.dataEndPos)) diff --git a/tests/test_aristo.nim b/tests/test_aristo.nim index b715d30a5..1028137cf 100644 --- a/tests/test_aristo.nim +++ b/tests/test_aristo.nim @@ -74,8 +74,9 @@ proc setErrorLevel {.used.} = proc miscRunner( noisy = true; - qidSampleSize = QidSample; - ) = + layout = LyoSamples[0]; + ) = + let (lyo,qidSampleSize) = layout suite "Aristo: Miscellaneous tests": @@ -83,10 +84,10 @@ proc miscRunner( check noisy.testVidRecycleLists() test &"Low level cascaded fifos API (sample size: {qidSampleSize})": - check noisy.testQidScheduler(sampleSize = qidSampleSize) + check noisy.testQidScheduler(layout = lyo, sampleSize = qidSampleSize) test &"High level cascaded fifos API (sample size: {qidSampleSize})": - check noisy.testFilterFifo(sampleSize = qidSampleSize) + check noisy.testFilterFifo(layout = lyo, sampleSize = qidSampleSize) test "Short keys and other patholgical cases": check noisy.testShortKeys() @@ -197,7 +198,8 @@ when isMainModule: noisy.accountsRunner(persistent=false) when true: # and false: - noisy.miscRunner(qidSampleSize = 1_000) + for n,w in LyoSamples: + noisy.miscRunner() # layouts = (w[0], 1_000)) # This one uses dumps from the external `nimbus-eth1-blob` repo when true and false: diff --git a/tests/test_aristo/test_filter.nim b/tests/test_aristo/test_filter.nim index d02151336..0abfbafcc 100644 --- a/tests/test_aristo/test_filter.nim +++ b/tests/test_aristo/test_filter.nim @@ -64,7 +64,10 @@ proc fifos(be: BackendRef): seq[seq[(QueueID,FilterRef)]] = discard check be.kind == BackendMemory or be.kind == BackendRocksDB -func flatten(a: seq[seq[(QueueID,FilterRef)]]): seq[(QueueID,FilterRef)] {.used.} = +func flatten( + a: seq[seq[(QueueID,FilterRef)]]; + ): seq[(QueueID,FilterRef)] + {.used.} = for w in a: result &= w @@ -238,7 +241,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool = if aVtx.isValid and bVtx.isValid: return false # The valid one must match the backend data - let rc = db.getVtxUBE vid + let rc = db.getVtxUbe vid if rc.isErr: return false let vtx = if aVtx.isValid: aVtx else: bVtx @@ -246,7 +249,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool = return false elif not vid.isValid and not bTab.hasKey vid: - let rc = db.getVtxUBE vid + let rc = db.getVtxUbe vid if rc.isOk: return false # Exists on backend but missing on `bTab[]` elif rc.error != GetKeyNotFound: @@ -268,7 +271,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool = if aKey.isValid and bKey.isValid: return false # The valid one must match the backend data - let rc = db.getKeyUBE vid + let rc = db.getKeyUbe vid if rc.isErr: return false let key = if aKey.isValid: aKey else: bKey @@ -276,7 +279,7 @@ proc isDbEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool = return false elif not vid.isValid and not bMap.hasKey vid: - let rc = db.getKeyUBE vid + let rc = db.getKeyUbe vid if rc.isOk: return false # Exists on backend but missing on `bMap[]` elif rc.error != GetKeyNotFound: @@ -348,18 +351,24 @@ proc checkBeOk( dx: DbTriplet; relax = false; forceCache = false; + fifos = true; noisy = true; ): bool = ## .. for n in 0 ..< dx.len: - let - cache = if forceCache: true else: dx[n].dirty.len == 0 - rc = dx[n].checkBE(relax=relax, cache=cache) - xCheckRc rc.error == (0,0): - noisy.say "***", "db check failed", - " n=", n, "/", dx.len-1, - " cache=", cache - + let cache = if forceCache: true else: dx[n].dirty.len == 0 + block: + let rc = dx[n].checkBE(relax=relax, cache=cache, fifos=fifos) + xCheckRc rc.error == (0,0): + noisy.say "***", "db checkBE failed", + " n=", n, "/", dx.len-1, + " cache=", cache + if fifos: + let rc = dx[n].checkJournal() + xCheckRc rc.error == (0,0): + noisy.say "***", "db checkJournal failed", + " n=", n, "/", dx.len-1, + " cache=", cache true proc checkFilterTrancoderOk( @@ -602,7 +611,7 @@ proc testDistributedAccess*( # Check/verify backends block: - let ok = dx.checkBeOk(noisy=noisy) + let ok = dx.checkBeOk(noisy=noisy,fifos=true) xCheck ok: noisy.say "*** testDistributedAccess (4)", "n=", n, "db3".dump db3 block: @@ -661,7 +670,7 @@ proc testDistributedAccess*( # Check/verify backends block: - let ok = dy.checkBeOk(noisy=noisy) + let ok = dy.checkBeOk(noisy=noisy,fifos=true) xCheck ok block: let ok = dy.checkFilterTrancoderOk(noisy=noisy) @@ -675,8 +684,8 @@ proc testDistributedAccess*( proc testFilterFifo*( noisy = true; - layout = QidSlotLyo; # Backend fifos layout - sampleSize = QidSample; # Synthetic filters generation + layout = LyoSamples[0][0]; # Backend fifos layout + sampleSize = LyoSamples[0][1]; # Synthetic filters generation reorgPercent = 40; # To be deleted and re-filled rdbPath = ""; # Optional Rocks DB storage directory ): bool = @@ -710,11 +719,23 @@ proc testFilterFifo*( # ------------------- + block: + let rc = db.checkJournal() + xCheckRc rc.error == (0,0) + for n in 1 .. sampleSize: - let storeFilterOK = be.storeFilter(serial=n) - xCheck storeFilterOK - let validateFifoOk = be.validateFifo(serial=n) - xCheck validateFifoOk + #let trigger = n in {7,8} + #if trigger: show(n, be.journal.addItem.exec) + block: + let storeFilterOK = be.storeFilter(serial=n) + xCheck storeFilterOK + block: + #if trigger: show(n) + let rc = db.checkJournal() + xCheckRc rc.error == (0,0) + block: + let validateFifoOk = be.validateFifo(serial=n) + xCheck validateFifoOk # Squash some entries on the fifo block: @@ -739,6 +760,9 @@ proc testFilterFifo*( #show(n) let validateFifoOk = be.validateFifo(serial=n) xCheck validateFifoOk + block: + let rc = db.checkJournal() + xCheckRc rc.error == (0,0) true @@ -746,7 +770,7 @@ proc testFilterFifo*( proc testFilterBacklog*( noisy: bool; list: openArray[ProofTrieData]; # Sample data for generating filters - layout = QidSlotLyo; # Backend fifos layout + layout = LyoSamples[0][0]; # Backend fifos layout reorgPercent = 40; # To be deleted and re-filled rdbPath = ""; # Optional Rocks DB storage directory sampleSize = 777; # Truncate `list` @@ -786,6 +810,9 @@ proc testFilterBacklog*( block: let rc = db.stow(persistent=true) xCheckRc rc.error == 0 + block: + let rc = db.checkJournal() + xCheckRc rc.error == (0,0) let validateFifoOk = be.validateFifo(serial=n, hashesOk=true) xCheck validateFifoOk when false: # or true: @@ -845,6 +872,9 @@ proc testFilterBacklog*( block: let rc = xb.check(relax=false) xCheckRc rc.error == (0,0) + block: + let rc = db.checkJournal() + xCheckRc rc.error == (0,0) #show(episode, "testFilterBacklog (3)") diff --git a/tests/test_aristo/test_helpers.nim b/tests/test_aristo/test_helpers.nim index dccb45b6c..e7e22f22c 100644 --- a/tests/test_aristo/test_helpers.nim +++ b/tests/test_aristo/test_helpers.nim @@ -31,8 +31,12 @@ type kvpLst*: seq[LeafTiePayload] const - QidSlotLyo* = [(4,0,10),(3,3,10),(3,4,10),(3,5,10)] - QidSample* = (3 * QidSlotLyo.capacity.minCovered) div 2 + samples = [ + [ (4,0,10), (3,3,10), (3,4,10), (3,5,10)], + [(2,0,high int),(1,1,high int),(1,1,high int),(1,1,high int)], + ] + + LyoSamples* = samples.mapIt((it, (3 * it.capacity.minCovered) div 2)) # ------------------------------------------------------------------------------ # Private helpers @@ -106,7 +110,7 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) = func `==`*[T: AristoError|VertexID](a: T, b: int): bool = a == T(b) -func `==`*(a: (VertexID,AristoError), b: (int,int)): bool = +func `==`*(a: (VertexID|QueueID,AristoError), b: (int,int)): bool = (a[0].int,a[1].int) == b func `==`*(a: (VertexID,AristoError), b: (int,AristoError)): bool = diff --git a/tests/test_aristo/test_misc.nim b/tests/test_aristo/test_misc.nim index 09eb1b2fe..8c244b56d 100644 --- a/tests/test_aristo/test_misc.nim +++ b/tests/test_aristo/test_misc.nim @@ -335,8 +335,8 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool = proc testQidScheduler*( noisy = true; - layout = QidSlotLyo; - sampleSize = QidSample; + layout = LyoSamples[0][0]; + sampleSize = LyoSamples[0][1]; reorgPercent = 40 ): bool = ## diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index 29bc16b3d..b7eb5561f 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -43,7 +43,7 @@ const ## Policy settig for `pack()` let - TxQidLyo = QidSlotLyo.to(QidLayoutRef) + TxQidLyo = LyoSamples[0][0].to(QidLayoutRef) ## Cascaded filter slots layout for testing # ------------------------------------------------------------------------------