2023-05-30 11:47:47 +00:00
|
|
|
# Nimbus - Types, data structures and shared utilities used in network sync
|
|
|
|
#
|
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Aristo (aka Patricia) DB records merge test
|
|
|
|
|
|
|
|
import
|
2023-06-12 13:48:47 +00:00
|
|
|
std/tables,
|
2023-05-30 11:47:47 +00:00
|
|
|
eth/common,
|
2023-06-22 11:13:24 +00:00
|
|
|
stew/[byteutils, results],
|
2023-05-30 11:47:47 +00:00
|
|
|
unittest2,
|
2023-06-22 11:13:24 +00:00
|
|
|
../../nimbus/db/aristo/aristo_init/aristo_rocksdb,
|
2023-05-30 11:47:47 +00:00
|
|
|
../../nimbus/db/aristo/[
|
2023-06-30 22:22:33 +00:00
|
|
|
aristo_check, aristo_desc, aristo_debug, aristo_get, aristo_hashify,
|
|
|
|
aristo_init, aristo_hike, aristo_layer, aristo_merge],
|
2023-05-30 11:47:47 +00:00
|
|
|
./test_helpers
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
type
|
2023-06-09 11:17:37 +00:00
|
|
|
KnownHasherFailure* = seq[(string,(int,AristoError))]
|
|
|
|
## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
proc pp(w: tuple[merged: int, dups: int, error: AristoError]): string =
|
|
|
|
result = "(merged: " & $w.merged & ", dups: " & $w.dups
|
|
|
|
if w.error != AristoError(0):
|
|
|
|
result &= ", error: " & $w.error
|
|
|
|
result &= ")"
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
proc mergeStepwise(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef;
|
2023-06-12 13:48:47 +00:00
|
|
|
leafs: openArray[LeafTiePayload];
|
2023-06-22 11:13:24 +00:00
|
|
|
noisy = false;
|
2023-05-30 21:21:15 +00:00
|
|
|
): tuple[merged: int, dups: int, error: AristoError] =
|
|
|
|
let
|
2023-06-09 11:17:37 +00:00
|
|
|
lTabLen = db.top.lTab.len
|
2023-05-30 21:21:15 +00:00
|
|
|
var
|
|
|
|
(merged, dups, error) = (0, 0, AristoError(0))
|
|
|
|
|
|
|
|
for n,leaf in leafs:
|
|
|
|
var
|
2023-06-22 11:13:24 +00:00
|
|
|
event = false
|
2023-05-30 21:21:15 +00:00
|
|
|
dumpOk = false or event
|
|
|
|
stopOk = false
|
2023-06-22 11:13:24 +00:00
|
|
|
|
|
|
|
when true: # and false:
|
|
|
|
noisy.say "***", "step <", n, "/", leafs.len-1, "> leaf=", leaf.pp(db)
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
|
|
|
preState = db.pp
|
|
|
|
hike = db.merge leaf
|
2023-06-12 13:48:47 +00:00
|
|
|
ekih = leaf.leafTie.hikeUp(db)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
case hike.error:
|
|
|
|
of AristoError(0):
|
|
|
|
merged.inc
|
|
|
|
of MergeLeafPathCachedAlready:
|
|
|
|
dups.inc
|
|
|
|
else:
|
|
|
|
error = hike.error
|
|
|
|
dumpOk = true
|
|
|
|
stopOk = true
|
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
if ekih.error != AristoError(0) or
|
|
|
|
ekih.legs[^1].wp.vtx.lData.blob != leaf.payload.blob:
|
2023-05-30 21:21:15 +00:00
|
|
|
dumpOk = true
|
|
|
|
stopOk = true
|
|
|
|
|
|
|
|
let hashesOk = block:
|
2023-06-30 22:22:33 +00:00
|
|
|
let rc = db.checkCache(relax=true)
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isOk:
|
|
|
|
(VertexID(0),AristoError(0))
|
|
|
|
else:
|
|
|
|
dumpOk = true
|
|
|
|
stopOk = true
|
|
|
|
if error == AristoError(0):
|
|
|
|
error = rc.error[1]
|
|
|
|
rc.error
|
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
if db.top.lTab.len < lTabLen + merged:
|
|
|
|
dumpOk = true
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
if dumpOk:
|
2023-06-22 11:13:24 +00:00
|
|
|
noisy.say "***", "<", n, "/", leafs.len-1, ">",
|
|
|
|
" merged=", merged,
|
|
|
|
" dups=", dups,
|
|
|
|
" leaf=", leaf.pp(db),
|
|
|
|
"\n --------",
|
|
|
|
"\n hike\n ", hike.pp(db),
|
|
|
|
"\n ekih\n ", ekih.pp(db),
|
|
|
|
"\n pre-DB\n ", preState,
|
|
|
|
"\n --------",
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
check hike.error in {AristoError(0), MergeLeafPathCachedAlready}
|
|
|
|
check ekih.error == AristoError(0)
|
|
|
|
check hashesOk == (VertexID(0),AristoError(0))
|
|
|
|
|
|
|
|
if ekih.legs.len == 0:
|
|
|
|
check 0 < ekih.legs.len
|
|
|
|
elif ekih.legs[^1].wp.vtx.vType != Leaf:
|
|
|
|
check ekih.legs[^1].wp.vtx.vType == Leaf
|
2023-06-02 10:04:29 +00:00
|
|
|
elif hike.error != MergeLeafPathCachedAlready:
|
2023-06-22 11:13:24 +00:00
|
|
|
check ekih.legs[^1].wp.vtx.lData.blob.toHex == leaf.payload.blob.toHex
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
if db.top.lTab.len < lTabLen + merged:
|
|
|
|
check lTabLen + merged <= db.top.lTab.len
|
2023-05-30 21:21:15 +00:00
|
|
|
error = GenericError
|
2023-06-22 11:13:24 +00:00
|
|
|
stopOk = true # makes no sense to go on
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
if stopOk:
|
|
|
|
noisy.say "***", "<", n, "/", leafs.len-1, "> stop"
|
|
|
|
break
|
|
|
|
|
|
|
|
(merged,dups,error)
|
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
proc test_mergeKvpList*(
|
2023-05-30 11:47:47 +00:00
|
|
|
noisy: bool;
|
2023-06-02 10:04:29 +00:00
|
|
|
list: openArray[ProofTrieData];
|
2023-06-22 11:13:24 +00:00
|
|
|
rdbPath: string; # Rocks DB storage directory
|
2023-06-02 10:04:29 +00:00
|
|
|
resetDb = false;
|
2023-06-09 11:17:37 +00:00
|
|
|
): bool =
|
2023-06-22 19:21:33 +00:00
|
|
|
var
|
2023-07-04 18:24:03 +00:00
|
|
|
db = AristoDbRef()
|
2023-06-22 19:21:33 +00:00
|
|
|
defer:
|
|
|
|
db.finish(flush=true)
|
2023-06-02 10:04:29 +00:00
|
|
|
for n,w in list:
|
2023-06-22 11:13:24 +00:00
|
|
|
if resetDb or db.top.isNil:
|
|
|
|
db.finish(flush=true)
|
|
|
|
db = block:
|
2023-07-04 18:24:03 +00:00
|
|
|
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
|
2023-06-22 11:13:24 +00:00
|
|
|
if rc.isErr:
|
|
|
|
check rc.error == AristoError(0)
|
|
|
|
return
|
|
|
|
rc.value
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
2023-06-02 10:04:29 +00:00
|
|
|
lstLen = list.len
|
2023-06-09 11:17:37 +00:00
|
|
|
lTabLen = db.top.lTab.len
|
|
|
|
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
when true and false:
|
|
|
|
if true and 40 <= n:
|
|
|
|
noisy.say "*** kvp(1)", "<", n, "/", lstLen-1, ">",
|
|
|
|
" nLeafs=", leafs.len,
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
|
|
|
let
|
|
|
|
added = db.merge leafs
|
|
|
|
#added = db.mergeStepwise(leafs) #, noisy=40 <= n)
|
|
|
|
|
|
|
|
if added.error != AristoError(0):
|
|
|
|
check added.error == AristoError(0)
|
|
|
|
return
|
|
|
|
# There might be an extra leaf in the cache after inserting a Branch
|
|
|
|
# which forks a previous leaf node and a new one.
|
|
|
|
check lTabLen + added.merged <= db.top.lTab.len
|
2023-05-30 21:21:15 +00:00
|
|
|
check added.merged + added.dups == leafs.len
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
2023-06-22 11:13:24 +00:00
|
|
|
preDb = db.pp
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
block:
|
2023-06-22 11:13:24 +00:00
|
|
|
let rc = db.hashify # (noisy=(0 < n))
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isErr: # or true:
|
2023-06-22 11:13:24 +00:00
|
|
|
noisy.say "*** kvp(2)", "<", n, "/", lstLen-1, ">",
|
2023-06-02 10:04:29 +00:00
|
|
|
" added=", added,
|
2023-06-22 11:13:24 +00:00
|
|
|
"\n pre-DB\n ", preDb,
|
|
|
|
"\n --------",
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isErr:
|
2023-06-02 10:04:29 +00:00
|
|
|
check rc.error == (VertexID(0),AristoError(0)) # force message
|
2023-05-30 21:21:15 +00:00
|
|
|
return
|
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
when true and false:
|
|
|
|
noisy.say "*** kvp(3)", "<", n, "/", lstLen-1, ">",
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
block:
|
2023-06-30 22:22:33 +00:00
|
|
|
let rc = db.checkCache()
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isErr:
|
2023-06-22 11:13:24 +00:00
|
|
|
noisy.say "*** kvp(4)", "<", n, "/", lstLen-1, "> db dump",
|
|
|
|
"\n pre-DB\n ", preDb,
|
|
|
|
"\n --------",
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isErr:
|
|
|
|
check rc == Result[void,(VertexID,AristoError)].ok()
|
|
|
|
return
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-07-04 18:24:03 +00:00
|
|
|
block:
|
2023-06-22 19:21:33 +00:00
|
|
|
let rc = db.save
|
|
|
|
if rc.isErr:
|
2023-06-30 22:22:33 +00:00
|
|
|
check rc.error == (0,0)
|
2023-06-22 19:21:33 +00:00
|
|
|
return
|
2023-06-22 11:13:24 +00:00
|
|
|
|
|
|
|
when true and false:
|
|
|
|
noisy.say "*** kvp(5)", "<", n, "/", lstLen-1, ">",
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
when true and false:
|
2023-06-22 11:13:24 +00:00
|
|
|
noisy.say "*** kvp(9)", "sample ", n, "/", lstLen-1,
|
2023-06-09 11:17:37 +00:00
|
|
|
" merged=", added.merged,
|
2023-06-02 10:04:29 +00:00
|
|
|
" dup=", added.dups
|
2023-06-09 11:17:37 +00:00
|
|
|
true
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
proc test_mergeProofAndKvpList*(
|
2023-05-30 11:47:47 +00:00
|
|
|
noisy: bool;
|
2023-06-02 10:04:29 +00:00
|
|
|
list: openArray[ProofTrieData];
|
2023-06-22 19:21:33 +00:00
|
|
|
rdbPath: string; # Rocks DB storage directory
|
2023-06-02 10:04:29 +00:00
|
|
|
resetDb = false;
|
|
|
|
idPfx = "";
|
|
|
|
oops: KnownHasherFailure = @[];
|
2023-06-09 11:17:37 +00:00
|
|
|
): bool =
|
2023-06-12 13:48:47 +00:00
|
|
|
let
|
|
|
|
oopsTab = oops.toTable
|
2023-06-02 10:04:29 +00:00
|
|
|
var
|
2023-07-04 18:24:03 +00:00
|
|
|
db = AristoDbRef()
|
2023-06-12 18:16:03 +00:00
|
|
|
rootKey = HashKey.default
|
2023-06-02 10:04:29 +00:00
|
|
|
count = 0
|
2023-06-22 19:21:33 +00:00
|
|
|
defer:
|
|
|
|
db.finish(flush=true)
|
2023-06-02 10:04:29 +00:00
|
|
|
for n,w in list:
|
2023-06-09 11:17:37 +00:00
|
|
|
if resetDb or w.root != rootKey or w.proof.len == 0:
|
2023-06-22 19:21:33 +00:00
|
|
|
db.finish(flush=true)
|
|
|
|
db = block:
|
2023-07-04 18:24:03 +00:00
|
|
|
let rc = AristoDbRef.init(BackendRocksDB,rdbPath)
|
2023-06-22 19:21:33 +00:00
|
|
|
if rc.isErr:
|
2023-07-04 18:24:03 +00:00
|
|
|
check rc.error == 0
|
2023-06-22 19:21:33 +00:00
|
|
|
return
|
|
|
|
rc.value
|
2023-06-02 10:04:29 +00:00
|
|
|
rootKey = w.root
|
|
|
|
count = 0
|
|
|
|
count.inc
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
let
|
2023-06-02 10:04:29 +00:00
|
|
|
testId = idPfx & "#" & $w.id & "." & $n
|
|
|
|
lstLen = list.len
|
2023-06-09 11:17:37 +00:00
|
|
|
sTabLen = db.top.sTab.len
|
|
|
|
lTabLen = db.top.lTab.len
|
|
|
|
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
|
2023-06-02 10:04:29 +00:00
|
|
|
|
|
|
|
when true and false:
|
2023-06-22 19:21:33 +00:00
|
|
|
noisy.say "***", "proofs(1) <", n, "/", lstLen-1, ">",
|
2023-06-09 11:17:37 +00:00
|
|
|
" groups=", count, " nLeafs=", leafs.len,
|
2023-06-22 19:21:33 +00:00
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2023-06-22 19:21:33 +00:00
|
|
|
var
|
|
|
|
proved: tuple[merged: int, dups: int, error: AristoError]
|
|
|
|
preDb: string
|
2023-06-02 10:04:29 +00:00
|
|
|
if 0 < w.proof.len:
|
2023-06-09 11:17:37 +00:00
|
|
|
let rc = db.merge(rootKey, VertexID(1))
|
|
|
|
if rc.isErr:
|
2023-07-04 18:24:03 +00:00
|
|
|
check rc.error == 0
|
2023-06-09 11:17:37 +00:00
|
|
|
return
|
2023-06-22 19:21:33 +00:00
|
|
|
|
|
|
|
preDb = db.pp
|
|
|
|
proved = db.merge(w.proof, rc.value) # , noisy)
|
|
|
|
|
2023-06-12 18:16:03 +00:00
|
|
|
check proved.error in {AristoError(0),MergeHashKeyCachedAlready}
|
2023-06-02 10:04:29 +00:00
|
|
|
check w.proof.len == proved.merged + proved.dups
|
2023-06-09 11:17:37 +00:00
|
|
|
check db.top.lTab.len == lTabLen
|
2023-06-22 19:21:33 +00:00
|
|
|
check db.top.sTab.len <= proved.merged + sTabLen
|
2023-06-09 11:17:37 +00:00
|
|
|
check proved.merged < db.top.pAmk.len
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
when true and false:
|
2023-06-09 11:17:37 +00:00
|
|
|
if 0 < w.proof.len:
|
2023-06-22 19:21:33 +00:00
|
|
|
noisy.say "***", "proofs(2) <", n, "/", lstLen-1, ">",
|
|
|
|
" groups=", count,
|
|
|
|
" nLeafs=", leafs.len,
|
|
|
|
" proved=", proved,
|
|
|
|
"\n pre-DB\n ", preDb,
|
|
|
|
"\n --------",
|
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
|
|
|
return
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-05-30 11:47:47 +00:00
|
|
|
let
|
2023-06-02 10:04:29 +00:00
|
|
|
merged = db.merge leafs
|
|
|
|
#merged = db.mergeStepwise(leafs, noisy=false)
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
check db.top.lTab.len == lTabLen + merged.merged
|
2023-06-02 10:04:29 +00:00
|
|
|
check merged.merged + merged.dups == leafs.len
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
block:
|
2023-06-02 10:04:29 +00:00
|
|
|
if merged.error notin {AristoError(0), MergeLeafPathCachedAlready}:
|
|
|
|
noisy.say "***", "<", n, "/", lstLen-1, ">\n ", db.pp
|
|
|
|
check merged.error in {AristoError(0), MergeLeafPathCachedAlready}
|
2023-05-30 21:21:15 +00:00
|
|
|
return
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
when true and false:
|
2023-06-22 19:21:33 +00:00
|
|
|
noisy.say "***", "proofs(3) <", n, "/", lstLen-1, ">",
|
2023-06-09 11:17:37 +00:00
|
|
|
" groups=", count, " nLeafs=", leafs.len, " merged=", merged,
|
2023-06-22 19:21:33 +00:00
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
block:
|
2023-06-02 10:04:29 +00:00
|
|
|
let
|
2023-06-30 22:22:33 +00:00
|
|
|
preDb = db.pp(xTabOk=false)
|
2023-06-09 11:17:37 +00:00
|
|
|
rc = db.hashify() # noisy=true)
|
2023-06-02 10:04:29 +00:00
|
|
|
|
|
|
|
# Handle known errors
|
2023-06-12 13:48:47 +00:00
|
|
|
if oopsTab.hasKey testId:
|
2023-06-02 10:04:29 +00:00
|
|
|
if rc.isOK:
|
|
|
|
check rc.isErr
|
|
|
|
return
|
2023-06-09 11:17:37 +00:00
|
|
|
let oops = (VertexID(oopsTab[testId][0]), oopsTab[testId][1])
|
|
|
|
if oops != rc.error:
|
|
|
|
check oops == rc.error
|
2023-06-02 10:04:29 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# Otherwise, check for correctness
|
|
|
|
elif rc.isErr:
|
2023-06-22 19:21:33 +00:00
|
|
|
noisy.say "***", "proofs(4) <", n, "/", lstLen-1, ">",
|
2023-06-02 10:04:29 +00:00
|
|
|
" testId=", testId,
|
|
|
|
" groups=", count,
|
|
|
|
"\n pre-DB",
|
2023-06-20 13:26:25 +00:00
|
|
|
"\n ", preDb,
|
2023-06-02 10:04:29 +00:00
|
|
|
"\n --------",
|
2023-06-22 19:21:33 +00:00
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-06-02 10:04:29 +00:00
|
|
|
check rc.error == (VertexID(0),AristoError(0))
|
2023-05-30 21:21:15 +00:00
|
|
|
return
|
2023-05-30 11:47:47 +00:00
|
|
|
|
2023-07-04 18:24:03 +00:00
|
|
|
block:
|
2023-06-22 19:21:33 +00:00
|
|
|
let rc = db.save
|
|
|
|
if rc.isErr:
|
2023-06-30 22:22:33 +00:00
|
|
|
check rc.error == (0,0)
|
2023-06-22 19:21:33 +00:00
|
|
|
return
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
when true and false:
|
2023-06-22 19:21:33 +00:00
|
|
|
noisy.say "***", "proofs(5) <", n, "/", lstLen-1, ">",
|
2023-06-09 11:17:37 +00:00
|
|
|
" groups=", count,
|
2023-06-22 19:21:33 +00:00
|
|
|
"\n cache\n ", db.pp,
|
|
|
|
"\n backend\n ", db.to(RdbBackendRef).pp(db),
|
|
|
|
"\n --------"
|
2023-06-02 10:04:29 +00:00
|
|
|
|
|
|
|
when true and false:
|
2023-06-22 19:21:33 +00:00
|
|
|
noisy.say "***", "proofs(6) <", n, "/", lstLen-1, ">",
|
2023-06-02 10:04:29 +00:00
|
|
|
" groups=", count, " proved=", proved.pp, " merged=", merged.pp
|
2023-06-09 11:17:37 +00:00
|
|
|
true
|
2023-05-30 11:47:47 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|