Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
2024-02-01 21:27:48 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-05-11 14:25:29 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Aristo (aka Patricia) DB trancoder test
|
|
|
|
|
|
|
|
import
|
2023-08-30 17:08:39 +00:00
|
|
|
std/[algorithm, sequtils, sets, strutils],
|
2023-05-11 14:25:29 +00:00
|
|
|
eth/common,
|
2023-08-21 14:58:30 +00:00
|
|
|
results,
|
2023-05-11 14:25:29 +00:00
|
|
|
stew/byteutils,
|
2023-09-13 02:32:38 +00:00
|
|
|
stew/endians2,
|
2023-05-11 14:25:29 +00:00
|
|
|
unittest2,
|
2023-08-21 14:58:30 +00:00
|
|
|
../../nimbus/db/aristo,
|
2023-08-25 22:53:59 +00:00
|
|
|
../../nimbus/db/aristo/[
|
2023-12-19 12:39:23 +00:00
|
|
|
aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_layers,
|
|
|
|
aristo_vid],
|
2023-09-11 20:38:49 +00:00
|
|
|
../../nimbus/db/aristo/aristo_filter/filter_scheduler,
|
2023-10-11 19:09:11 +00:00
|
|
|
../replay/xcheck,
|
2023-08-21 14:58:30 +00:00
|
|
|
./test_helpers
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
TesterDesc = object
|
|
|
|
prng: uint32 ## random state
|
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
QValRef = ref object
|
|
|
|
fid: FilterID
|
|
|
|
width: uint32
|
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
QTabRef = TableRef[QueueID,QValRef]
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc posixPrngRand(state: var uint32): byte =
|
|
|
|
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
|
|
|
|
state = state * 1103515245 + 12345;
|
|
|
|
let val = (state shr 16) and 32767 # mod 2^31
|
|
|
|
(val shr 8).byte # Extract second byte
|
|
|
|
|
|
|
|
proc rand[W: SomeInteger|VertexID](ap: var TesterDesc; T: type W): T =
|
|
|
|
var a: array[sizeof T,byte]
|
|
|
|
for n in 0 ..< sizeof T:
|
|
|
|
a[n] = ap.prng.posixPrngRand().byte
|
|
|
|
when sizeof(T) == 1:
|
|
|
|
let w = uint8.fromBytesBE(a).T
|
|
|
|
when sizeof(T) == 2:
|
|
|
|
let w = uint16.fromBytesBE(a).T
|
|
|
|
when sizeof(T) == 4:
|
|
|
|
let w = uint32.fromBytesBE(a).T
|
|
|
|
else:
|
|
|
|
let w = uint64.fromBytesBE(a).T
|
|
|
|
when T is SomeUnsignedInt:
|
|
|
|
# That way, `fromBytesBE()` can be applied to `uint`
|
|
|
|
result = w
|
|
|
|
else:
|
|
|
|
# That way the result is independent of endianness
|
|
|
|
(addr result).copyMem(unsafeAddr w, sizeof w)
|
|
|
|
|
|
|
|
proc vidRand(td: var TesterDesc; bits = 19): VertexID =
|
|
|
|
if bits < 64:
|
|
|
|
let
|
|
|
|
mask = (1u64 shl max(1,bits)) - 1
|
|
|
|
rval = td.rand uint64
|
|
|
|
(rval and mask).VertexID
|
|
|
|
else:
|
|
|
|
td.rand VertexID
|
|
|
|
|
|
|
|
proc init(T: type TesterDesc; seed: int): TesterDesc =
|
|
|
|
result.prng = (seed and 0x7fffffff).uint32
|
|
|
|
|
|
|
|
proc `+`(a: VertexID, b: int): VertexID =
|
|
|
|
(a.uint64 + b.uint64).VertexID
|
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
# ---------------------
|
|
|
|
|
2023-09-05 13:57:20 +00:00
|
|
|
iterator walkFifo(qt: QTabRef;scd: QidSchedRef): (QueueID,QValRef) =
|
|
|
|
## ...
|
2023-08-25 22:53:59 +00:00
|
|
|
proc kvp(chn: int, qid: QueueID): (QueueID,QValRef) =
|
2023-09-05 13:57:20 +00:00
|
|
|
let cid = QueueID((chn.uint64 shl 62) or qid.uint64)
|
|
|
|
(cid, qt.getOrDefault(cid, QValRef(nil)))
|
|
|
|
|
|
|
|
if not scd.isNil:
|
|
|
|
for i in 0 ..< scd.state.len:
|
|
|
|
let (left, right) = scd.state[i]
|
|
|
|
if left == 0:
|
|
|
|
discard
|
|
|
|
elif left <= right:
|
|
|
|
for j in right.countDown left:
|
|
|
|
yield kvp(i, j)
|
|
|
|
else:
|
|
|
|
for j in right.countDown QueueID(1):
|
|
|
|
yield kvp(i, j)
|
|
|
|
for j in scd.ctx.q[i].wrap.countDown left:
|
|
|
|
yield kvp(i, j)
|
|
|
|
|
|
|
|
proc fifos(qt: QTabRef; scd: QidSchedRef): seq[seq[(QueueID,QValRef)]] =
|
|
|
|
## ..
|
|
|
|
var lastChn = -1
|
|
|
|
for (qid,val) in qt.walkFifo scd:
|
|
|
|
let chn = (qid.uint64 shr 62).int
|
|
|
|
while lastChn < chn:
|
|
|
|
lastChn.inc
|
|
|
|
result.add newSeq[(QueueID,QValRef)](0)
|
|
|
|
result[^1].add (qid,val)
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-09-05 13:57:20 +00:00
|
|
|
func sortedPairs(qt: QTabRef): seq[(QueueID,QValRef)] =
|
|
|
|
qt.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.QueueID).mapIt((it,qt[it]))
|
2023-08-25 22:53:59 +00:00
|
|
|
|
|
|
|
func flatten(a: seq[seq[(QueueID,QValRef)]]): seq[(QueueID,QValRef)] =
|
|
|
|
for w in a:
|
|
|
|
result &= w
|
|
|
|
|
|
|
|
func pp(val: QValRef): string =
|
|
|
|
if val.isNil:
|
|
|
|
return "ø"
|
|
|
|
result = $val.fid.uint64
|
|
|
|
if 0 < val.width:
|
|
|
|
result &= ":" & $val.width
|
|
|
|
|
|
|
|
func pp(kvp: (QueueID,QValRef)): string =
|
|
|
|
kvp[0].pp & "=" & kvp[1].pp
|
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
func pp(qt: QTabRef): string =
|
|
|
|
"{" & qt.sortedPairs.mapIt(it.pp).join(",") & "}"
|
|
|
|
|
|
|
|
func pp(qt: QTabRef; scd: QidSchedRef): string =
|
|
|
|
result = "["
|
|
|
|
for w in qt.fifos scd:
|
|
|
|
if w.len == 0:
|
|
|
|
result &= "ø"
|
|
|
|
else:
|
|
|
|
result &= w.mapIt(it.pp).join(",")
|
|
|
|
result &= ","
|
|
|
|
if result[^1] == ',':
|
|
|
|
result[^1] = ']'
|
|
|
|
else:
|
|
|
|
result &= "]"
|
|
|
|
|
|
|
|
# ------------------
|
|
|
|
|
|
|
|
proc exec(db: QTabRef; serial: int; instr: seq[QidAction]; relax: bool): bool =
|
|
|
|
## ..
|
|
|
|
var
|
|
|
|
saved: bool
|
|
|
|
hold: seq[(QueueID,QueueID)]
|
|
|
|
|
|
|
|
for act in instr:
|
|
|
|
case act.op:
|
|
|
|
of Oops:
|
|
|
|
xCheck act.op != Oops
|
|
|
|
|
|
|
|
of SaveQid:
|
|
|
|
xCheck not saved
|
|
|
|
db[act.qid] = QValRef(fid: FilterID(serial))
|
|
|
|
saved = true
|
|
|
|
|
|
|
|
of DelQid:
|
|
|
|
let val = db.getOrDefault(act.qid, QValRef(nil))
|
|
|
|
xCheck not val.isNil
|
|
|
|
db.del act.qid
|
|
|
|
|
|
|
|
of HoldQid:
|
|
|
|
hold.add (act.qid, act.xid)
|
|
|
|
|
|
|
|
of DequQid:
|
|
|
|
var merged = QValRef(nil)
|
|
|
|
for w in hold:
|
|
|
|
for qid in w[0] .. w[1]:
|
|
|
|
let val = db.getOrDefault(qid, QValRef(nil))
|
|
|
|
if not relax:
|
|
|
|
xCheck not val.isNil
|
|
|
|
if not val.isNil:
|
|
|
|
if merged.isNil:
|
|
|
|
merged = val
|
|
|
|
else:
|
|
|
|
if relax:
|
|
|
|
xCheck merged.fid + merged.width + 1 <= val.fid
|
|
|
|
else:
|
|
|
|
xCheck merged.fid + merged.width + 1 == val.fid
|
|
|
|
merged.width += val.width + 1
|
|
|
|
db.del qid
|
|
|
|
if not relax:
|
|
|
|
xCheck not merged.isNil
|
|
|
|
if not merged.isNil:
|
|
|
|
db[act.qid] = merged
|
|
|
|
hold.setLen(0)
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
xCheck saved
|
|
|
|
xCheck hold.len == 0
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
|
|
|
|
proc validate(db: QTabRef; scd: QidSchedRef; serial: int; relax: bool): bool =
|
|
|
|
## Verify that the round-robin queues in `db` are consecutive and in the
|
|
|
|
## right order.
|
|
|
|
var
|
|
|
|
step = 1u
|
|
|
|
lastVal = FilterID(serial+1)
|
|
|
|
|
|
|
|
for chn,queue in db.fifos scd:
|
|
|
|
step *= scd.ctx.q[chn].width + 1 # defined by schedule layout
|
|
|
|
for kvp in queue:
|
2024-02-16 09:08:07 +00:00
|
|
|
let val = kvp[1]
|
2023-08-30 17:08:39 +00:00
|
|
|
if not relax:
|
|
|
|
xCheck not val.isNil # Entries must exist
|
|
|
|
xCheck val.fid + step == lastVal # Item distances must match
|
|
|
|
if not val.isNil:
|
|
|
|
xCheck val.fid + step <= lastVal # Item distances must decrease
|
|
|
|
xCheck val.width + 1 == step # Must correspond to `step` size
|
|
|
|
lastVal = val.fid
|
|
|
|
|
|
|
|
# Compare database against expected fill state
|
|
|
|
if relax:
|
|
|
|
xCheck db.len <= scd.len
|
|
|
|
else:
|
|
|
|
xCheck db.len == scd.len
|
|
|
|
|
|
|
|
proc qFn(qid: QueueID): FilterID =
|
|
|
|
let val = db.getOrDefault(qid, QValRef(nil))
|
|
|
|
if not val.isNil:
|
|
|
|
return val.fid
|
|
|
|
|
|
|
|
# Test filter ID selection
|
|
|
|
var lastFid = FilterID(serial + 1)
|
|
|
|
|
|
|
|
xCheck scd.le(lastFid + 0, qFn) == scd[0] # Test fringe condition
|
|
|
|
xCheck scd.le(lastFid + 1, qFn) == scd[0] # Test fringe condition
|
|
|
|
|
|
|
|
for (qid,val) in db.fifos(scd).flatten:
|
2023-09-11 20:38:49 +00:00
|
|
|
xCheck scd.eq(val.fid, qFn) == qid
|
|
|
|
xCheck scd.le(val.fid, qFn) == qid
|
|
|
|
for w in val.fid+1 ..< lastFid:
|
2023-08-30 17:08:39 +00:00
|
|
|
xCheck scd.le(w, qFn) == qid
|
2023-09-11 20:38:49 +00:00
|
|
|
xCheck scd.eq(w, qFn) == QueueID(0)
|
2023-08-30 17:08:39 +00:00
|
|
|
lastFid = val.fid
|
|
|
|
|
|
|
|
if FilterID(1) < lastFid: # Test fringe condition
|
|
|
|
xCheck scd.le(lastFid - 1, qFn) == QueueID(0)
|
|
|
|
|
|
|
|
if FilterID(2) < lastFid: # Test fringe condition
|
|
|
|
xCheck scd.le(lastFid - 2, qFn) == QueueID(0)
|
|
|
|
|
|
|
|
true
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
proc testVidRecycleLists*(noisy = true; seed = 42): bool =
|
2023-05-11 14:25:29 +00:00
|
|
|
## Transcode VID lists held in `AristoDb` descriptor
|
2023-08-25 22:53:59 +00:00
|
|
|
##
|
2023-05-11 14:25:29 +00:00
|
|
|
var td = TesterDesc.init seed
|
2023-09-15 15:23:53 +00:00
|
|
|
let db = AristoDbRef.init()
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# Add some randum numbers
|
|
|
|
block:
|
|
|
|
let first = td.vidRand()
|
2023-05-14 17:43:01 +00:00
|
|
|
db.vidDispose first
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
expectedVids = 1
|
|
|
|
count = 1
|
|
|
|
# Feed some numbers used and some discaded
|
|
|
|
while expectedVids < 5 or count < 5 + expectedVids:
|
|
|
|
count.inc
|
|
|
|
let vid = td.vidRand()
|
|
|
|
expectedVids += (vid < first).ord
|
2023-05-14 17:43:01 +00:00
|
|
|
db.vidDispose vid
|
2023-05-11 14:25:29 +00:00
|
|
|
|
2024-02-08 16:32:16 +00:00
|
|
|
xCheck db.vGen.len == expectedVids:
|
|
|
|
noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# Serialise/deserialise
|
|
|
|
block:
|
2023-12-19 12:39:23 +00:00
|
|
|
let dbBlob = db.vGen.blobify
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# Deserialise
|
2023-06-20 13:26:25 +00:00
|
|
|
let
|
2023-09-15 15:23:53 +00:00
|
|
|
db1 = AristoDbRef.init()
|
2023-06-20 13:26:25 +00:00
|
|
|
rc = dbBlob.deblobify seq[VertexID]
|
2023-09-05 13:57:20 +00:00
|
|
|
xCheckRc rc.error == 0
|
2023-12-19 12:39:23 +00:00
|
|
|
db1.top.final.vGen = rc.value
|
2023-05-11 14:25:29 +00:00
|
|
|
|
2023-12-19 12:39:23 +00:00
|
|
|
xCheck db.vGen == db1.vGen
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# Make sure that recycled numbers are fetched first
|
2023-12-19 12:39:23 +00:00
|
|
|
let topVid = db.vGen[^1]
|
|
|
|
while 1 < db.vGen.len:
|
2023-05-14 17:43:01 +00:00
|
|
|
let w = db.vidFetch()
|
2023-08-30 17:08:39 +00:00
|
|
|
xCheck w < topVid
|
2023-12-19 12:39:23 +00:00
|
|
|
xCheck db.vGen.len == 1 and db.vGen[0] == topVid
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# Get some consecutive vertex IDs
|
|
|
|
for n in 0 .. 5:
|
2023-05-14 17:43:01 +00:00
|
|
|
let w = db.vidFetch()
|
2023-08-30 17:08:39 +00:00
|
|
|
xCheck w == topVid + n
|
2023-12-19 12:39:23 +00:00
|
|
|
xCheck db.vGen.len == 1
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# Repeat last test after clearing the cache
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.final.vGen.setLen(0)
|
2023-05-11 14:25:29 +00:00
|
|
|
for n in 0 .. 5:
|
2023-05-14 17:43:01 +00:00
|
|
|
let w = db.vidFetch()
|
2024-02-01 21:27:48 +00:00
|
|
|
xCheck w == VertexID(LEAST_FREE_VID) + n # VertexID(1) is default root ID
|
2023-12-19 12:39:23 +00:00
|
|
|
xCheck db.vGen.len == 1
|
2023-06-09 11:17:37 +00:00
|
|
|
|
|
|
|
# Recycling and re-org tests
|
2024-02-01 21:27:48 +00:00
|
|
|
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(LEAST_FREE_VID+it))
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2024-02-22 08:24:58 +00:00
|
|
|
# Heuristic prevents from re-org
|
|
|
|
xCheck @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[8, 7, 3, 4, 5, 9] .toVQ
|
|
|
|
xCheck @[8, 7, 6, 3, 4, 5, 9] .toVQ.vidReorg == @[8, 7, 6, 3, 4, 5, 9].toVQ
|
|
|
|
xCheck @[5, 4, 3, 7] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
|
|
|
|
xCheck @[5] .toVQ.vidReorg == @[5] .toVQ
|
|
|
|
xCheck @[3, 5] .toVQ.vidReorg == @[3, 5] .toVQ
|
|
|
|
xCheck @[4, 5] .toVQ.vidReorg == @[4, 5] .toVQ
|
|
|
|
|
|
|
|
# performing re-org
|
|
|
|
xCheck @[5, 7, 3, 4, 8, 9] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
|
|
|
|
xCheck @[5, 7, 6, 3, 4, 8, 9] .toVQ.vidReorg == @[3] .toVQ
|
|
|
|
xCheck @[3, 4, 5, 7] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
xCheck newSeq[VertexID](0).vidReorg().len == 0
|
2023-08-25 22:53:59 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
|
|
|
|
proc testQidScheduler*(
|
|
|
|
noisy = true;
|
|
|
|
layout = QidSlotLyo;
|
|
|
|
sampleSize = QidSample;
|
2023-08-30 17:08:39 +00:00
|
|
|
reorgPercent = 40
|
2023-08-25 22:53:59 +00:00
|
|
|
): bool =
|
|
|
|
##
|
|
|
|
## Example table for `QidSlotLyo` layout after 10_000 cycles
|
|
|
|
## ::
|
|
|
|
## QueueID | QValRef |
|
|
|
|
## | FilterID | width | comment
|
|
|
|
## --------+----------+-------+----------------------------------
|
2023-08-30 17:08:39 +00:00
|
|
|
## %a | 10000 | 0 | %a stands for QueueID(10)
|
2023-08-25 22:53:59 +00:00
|
|
|
## %9 | 9999 | 0 |
|
2023-08-30 17:08:39 +00:00
|
|
|
## %8 | 9998 | 0 |
|
|
|
|
## %7 | 9997 | 0 |
|
2023-08-25 22:53:59 +00:00
|
|
|
## | | |
|
|
|
|
## %1:9 | 9993 | 3 | 9993 + 3 + 1 => 9997, see %7
|
2023-08-30 17:08:39 +00:00
|
|
|
## %1:8 | 9989 | 3 |
|
|
|
|
## %1:7 | 9985 | 3 |
|
|
|
|
## %1:6 | 9981 | 3 | %1:6 stands for QueueID((1 shl 62) + 6)
|
2023-08-25 22:53:59 +00:00
|
|
|
## | | |
|
|
|
|
## %2:9 | 9961 | 19 | 9961 + 19 + 1 => 9981, see %1:6
|
2023-08-30 17:08:39 +00:00
|
|
|
## %2:8 | 9941 | 19 |
|
|
|
|
## %2:7 | 9921 | 19 |
|
|
|
|
## %2:6 | 9901 | 19 |
|
|
|
|
## %2:5 | 9881 | 19 |
|
|
|
|
## %2:4 | 9861 | 19 |
|
|
|
|
## %2:3 | 9841 | 19 |
|
2023-08-25 22:53:59 +00:00
|
|
|
## | | |
|
|
|
|
## %3:2 | 9721 | 119 | 9721 + 119 + 1 => 9871, see %2:3
|
2023-08-30 17:08:39 +00:00
|
|
|
## %3:1 | 9601 | 119 |
|
|
|
|
## %3:a | 9481 | 119 |
|
2023-08-25 22:53:59 +00:00
|
|
|
##
|
|
|
|
var
|
|
|
|
debug = false # or true
|
|
|
|
let
|
2023-08-30 17:08:39 +00:00
|
|
|
list = newTable[QueueID,QValRef]()
|
2023-08-25 22:53:59 +00:00
|
|
|
scd = QidSchedRef.init layout
|
|
|
|
ctx = scd.ctx.q
|
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
proc show(serial = 0; exec: seq[QidAction] = @[]) =
|
|
|
|
var s = ""
|
|
|
|
if 0 < serial:
|
|
|
|
s &= "n=" & $serial
|
|
|
|
if 0 < exec.len:
|
|
|
|
s &= " exec=" & exec.pp
|
|
|
|
s &= "" &
|
|
|
|
"\n state=" & scd.state.pp &
|
|
|
|
"\n list=" & list.pp &
|
|
|
|
"\n fifo=" & list.pp(scd) &
|
|
|
|
"\n"
|
|
|
|
noisy.say "***", s
|
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
if debug:
|
2023-08-30 17:08:39 +00:00
|
|
|
noisy.say "***", "sampleSize=", sampleSize,
|
|
|
|
" ctx=", ctx, " stats=", scd.ctx.stats
|
2023-08-25 22:53:59 +00:00
|
|
|
|
|
|
|
for n in 1 .. sampleSize:
|
|
|
|
let w = scd.addItem()
|
2023-08-30 17:08:39 +00:00
|
|
|
let execOk = list.exec(serial=n, instr=w.exec, relax=false)
|
|
|
|
xCheck execOk
|
|
|
|
scd[] = w.fifo[]
|
|
|
|
let validateOk = list.validate(scd, serial=n, relax=false)
|
|
|
|
xCheck validateOk:
|
|
|
|
show(serial=n, exec=w.exec)
|
|
|
|
|
|
|
|
let fifoID = list.fifos(scd).flatten.mapIt(it[0])
|
|
|
|
for j in 0 ..< list.len:
|
2023-09-05 13:57:20 +00:00
|
|
|
# Check fifo order
|
2023-08-30 17:08:39 +00:00
|
|
|
xCheck fifoID[j] == scd[j]:
|
|
|
|
noisy.say "***", "n=", n, " exec=", w.exec.pp,
|
|
|
|
" fifoID[", j, "]=", fifoID[j].pp,
|
|
|
|
" scd[", j, "]=", scd[j].pp,
|
|
|
|
"\n fifo=", list.pp scd
|
2023-09-05 13:57:20 +00:00
|
|
|
# Check random access and reverse
|
|
|
|
let qid = scd[j]
|
|
|
|
xCheck j == scd[qid]
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
if debug:
|
|
|
|
show(exec=w.exec)
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
# -------------------
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
# Mark deleted some entries from database
|
|
|
|
var
|
|
|
|
nDel = (list.len * reorgPercent) div 100
|
|
|
|
delIDs: HashSet[QueueID]
|
|
|
|
for n in 0 ..< nDel:
|
|
|
|
delIDs.incl scd[n]
|
|
|
|
|
|
|
|
# Delete these entries
|
|
|
|
let fetch = scd.fetchItems nDel
|
|
|
|
for act in fetch.exec:
|
|
|
|
xCheck act.op == HoldQid
|
|
|
|
for qid in act.qid .. act.xid:
|
|
|
|
xCheck qid in delIDs
|
|
|
|
xCheck list.hasKey qid
|
|
|
|
delIDs.excl qid
|
|
|
|
list.del qid
|
|
|
|
|
|
|
|
xCheck delIDs.len == 0
|
|
|
|
scd[] = fetch.fifo[]
|
|
|
|
|
|
|
|
# -------------------
|
|
|
|
|
|
|
|
# Continue adding items
|
|
|
|
for n in sampleSize + 1 .. 2 * sampleSize:
|
|
|
|
let w = scd.addItem()
|
|
|
|
let execOk = list.exec(serial=n, instr=w.exec, relax=true)
|
|
|
|
xCheck execOk
|
2023-08-25 22:53:59 +00:00
|
|
|
scd[] = w.fifo[]
|
2023-08-30 17:08:39 +00:00
|
|
|
let validateOk = list.validate(scd, serial=n, relax=true)
|
|
|
|
xCheck validateOk:
|
|
|
|
show(serial=n, exec=w.exec)
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-08-30 17:08:39 +00:00
|
|
|
# Continue adding items, now strictly
|
|
|
|
for n in 2 * sampleSize + 1 .. 3 * sampleSize:
|
|
|
|
let w = scd.addItem()
|
|
|
|
let execOk = list.exec(serial=n, instr=w.exec, relax=false)
|
|
|
|
xCheck execOk
|
|
|
|
scd[] = w.fifo[]
|
|
|
|
let validateOk = list.validate(scd, serial=n, relax=false)
|
|
|
|
xCheck validateOk
|
2023-08-25 22:53:59 +00:00
|
|
|
|
2023-09-05 13:57:20 +00:00
|
|
|
if debug:
|
2023-08-30 17:08:39 +00:00
|
|
|
show()
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
true
|
2023-05-11 14:25:29 +00:00
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
|
|
|
|
proc testShortKeys*(
|
|
|
|
noisy = true;
|
|
|
|
): bool =
|
|
|
|
## Check for some pathological cases
|
|
|
|
func x(s: string): Blob = s.hexToSeqByte
|
|
|
|
func k(s: string): HashKey = HashKey.fromBytes(s.x).value
|
|
|
|
|
|
|
|
let samples = [
|
|
|
|
# From InvalidBlocks/bc4895-withdrawals/twoIdenticalIndex.json
|
|
|
|
[("80".x,
|
|
|
|
"da808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
|
|
|
"27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973".k),
|
|
|
|
("01".x,
|
|
|
|
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
|
|
|
"81eac5f476f48feb289af40ee764015f6b49036760438ea45df90d5342b6ae61".k),
|
|
|
|
("02".x,
|
|
|
|
"da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
|
|
|
"463769ae507fcc6d6231c8888425191c5622f330fdd4b78a7b24c4521137b573".k),
|
|
|
|
("03".x,
|
|
|
|
"da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x,
|
|
|
|
"a95b9a7b58a6b3cb4001eb0be67951c5517141cb0183a255b5cae027a7b10b36".k)]]
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
let gossip = false # or noisy
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
for n,sample in samples:
|
|
|
|
let sig = merkleSignBegin()
|
|
|
|
var inx = -1
|
|
|
|
for (k,v,r) in sample:
|
|
|
|
inx.inc
|
|
|
|
sig.merkleSignAdd(k,v)
|
2023-12-12 17:47:41 +00:00
|
|
|
gossip.say "*** testShortkeys (1)", "n=", n, " inx=", inx,
|
2023-11-08 12:18:32 +00:00
|
|
|
"\n k=", k.toHex, " v=", v.toHex,
|
|
|
|
"\n r=", r.pp(sig),
|
|
|
|
"\n ", sig.pp(),
|
|
|
|
"\n"
|
|
|
|
let w = sig.merkleSignCommit().value
|
2023-12-12 17:47:41 +00:00
|
|
|
gossip.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
|
2023-11-08 12:18:32 +00:00
|
|
|
"\n k=", k.toHex, " v=", v.toHex,
|
|
|
|
"\n r=", r.pp(sig),
|
|
|
|
"\n R=", w.pp(sig),
|
|
|
|
"\n ", sig.pp(),
|
|
|
|
"\n ----------------",
|
|
|
|
"\n"
|
|
|
|
let rc = sig.db.check
|
|
|
|
xCheckRc rc.error == (0,0)
|
|
|
|
xCheck r == w
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|