2023-06-20 13:26:25 +00:00
|
|
|
# Nimbus
|
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Aristo (aka Patricia) DB records merge test
|
|
|
|
|
|
|
|
import
|
2023-08-22 18:44:54 +00:00
|
|
|
std/[algorithm, hashes, sequtils, sets, strutils, tables],
|
2023-06-20 13:26:25 +00:00
|
|
|
eth/common,
|
2023-08-22 18:44:54 +00:00
|
|
|
results,
|
2023-06-20 13:26:25 +00:00
|
|
|
unittest2,
|
|
|
|
../../nimbus/sync/protocol,
|
2023-08-10 20:01:28 +00:00
|
|
|
../../nimbus/db/aristo,
|
2023-08-22 18:44:54 +00:00
|
|
|
../../nimbus/db/aristo/[
|
|
|
|
aristo_debug,
|
|
|
|
aristo_desc,
|
2023-08-25 22:53:59 +00:00
|
|
|
aristo_desc/desc_backend,
|
2023-08-22 18:44:54 +00:00
|
|
|
aristo_hashify,
|
2023-08-25 22:53:59 +00:00
|
|
|
aristo_init/memory_db,
|
|
|
|
aristo_init/rocks_db,
|
2023-08-22 18:44:54 +00:00
|
|
|
aristo_persistent,
|
|
|
|
aristo_transcode,
|
|
|
|
aristo_vid],
|
2023-06-20 13:26:25 +00:00
|
|
|
./test_helpers
|
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
const
|
|
|
|
BlindHash = EmptyBlob.hash
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
func hash(filter: FilterRef): Hash =
|
|
|
|
## Unique hash/filter -- cannot use de/blobify as the expressions
|
|
|
|
## `filter.blobify` and `filter.blobify.value.deblobify.value.blobify` are
|
|
|
|
## not necessarily the same binaries due to unsorted tables.
|
|
|
|
##
|
|
|
|
var h = BlindHash
|
|
|
|
if not filter.isNil:
|
|
|
|
h = h !& filter.src.ByteArray32.hash
|
|
|
|
h = h !& filter.trg.ByteArray32.hash
|
|
|
|
|
|
|
|
for w in filter.vGen.vidReorg:
|
|
|
|
h = h !& w.uint64.hash
|
|
|
|
|
|
|
|
for w in filter.sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
|
|
|
let data = filter.sTab.getOrVoid(w).blobify.get(otherwise = EmptyBlob)
|
|
|
|
h = h !& (w.uint64.toBytesBE.toSeq & data).hash
|
|
|
|
|
|
|
|
for w in filter.kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
|
|
|
let data = filter.kMap.getOrVoid(w).ByteArray32.toSeq
|
|
|
|
h = h !& (w.uint64.toBytesBE.toSeq & data).hash
|
|
|
|
|
|
|
|
!$h
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
proc mergeData(
|
2023-07-04 18:24:03 +00:00
|
|
|
db: AristoDbRef;
|
2023-06-20 13:26:25 +00:00
|
|
|
rootKey: HashKey;
|
|
|
|
rootVid: VertexID;
|
|
|
|
proof: openArray[SnapProof];
|
|
|
|
leafs: openArray[LeafTiePayload];
|
|
|
|
noisy: bool;
|
|
|
|
): bool =
|
|
|
|
## Simplified loop body of `test_mergeProofAndKvpList()`
|
|
|
|
if 0 < proof.len:
|
|
|
|
let rc = db.merge(rootKey, rootVid)
|
|
|
|
if rc.isErr:
|
|
|
|
check rc.error == AristoError(0)
|
|
|
|
return
|
|
|
|
|
|
|
|
let proved = db.merge(proof, rc.value)
|
|
|
|
if proved.error notin {AristoError(0),MergeHashKeyCachedAlready}:
|
|
|
|
check proved.error in {AristoError(0),MergeHashKeyCachedAlready}
|
|
|
|
return
|
|
|
|
|
|
|
|
let merged = db.merge leafs
|
|
|
|
if merged.error notin {AristoError(0), MergeLeafPathCachedAlready}:
|
|
|
|
check merged.error in {AristoError(0), MergeLeafPathCachedAlready}
|
|
|
|
return
|
|
|
|
|
|
|
|
block:
|
|
|
|
let rc = db.hashify # (noisy, true)
|
|
|
|
if rc.isErr:
|
|
|
|
when true: # and false:
|
|
|
|
noisy.say "***", "dataMerge(9)",
|
|
|
|
" nLeafs=", leafs.len,
|
|
|
|
"\n cache dump\n ", db.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
"\n backend dump\n ", db.to(TypedBackendRef).pp(db)
|
2023-06-20 13:26:25 +00:00
|
|
|
check rc.error == (VertexID(0),AristoError(0))
|
|
|
|
return
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
proc verify(
|
2023-08-18 19:46:55 +00:00
|
|
|
ly: LayerRef; # Database layer
|
2023-06-20 13:26:25 +00:00
|
|
|
be: MemBackendRef|RdbBackendRef; # Backend
|
|
|
|
noisy: bool;
|
|
|
|
): bool =
|
|
|
|
## ..
|
|
|
|
|
|
|
|
let
|
|
|
|
beSTab = be.walkVtx.toSeq.mapIt((it[1],it[2])).toTable
|
|
|
|
beKMap = be.walkKey.toSeq.mapIt((it[1],it[2])).toTable
|
|
|
|
|
|
|
|
for vid in beSTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
|
|
|
let
|
|
|
|
nVtx = ly.sTab.getOrVoid vid
|
|
|
|
mVtx = beSTab.getOrVoid vid
|
|
|
|
if not nVtx.isValid and not mVtx.isValid:
|
|
|
|
check nVtx != VertexRef(nil)
|
|
|
|
check mVtx != VertexRef(nil)
|
|
|
|
return
|
|
|
|
if nVtx != mVtx:
|
|
|
|
noisy.say "***", "verify",
|
|
|
|
" beType=", be.typeof,
|
|
|
|
" vid=", vid.pp,
|
|
|
|
" nVtx=", nVtx.pp,
|
|
|
|
" mVtx=", mVtx.pp
|
|
|
|
check nVtx == mVtx
|
|
|
|
return
|
|
|
|
|
|
|
|
if beSTab.len != ly.sTab.len or
|
|
|
|
beKMap.len != ly.kMap.len:
|
|
|
|
check beSTab.len == ly.sTab.len
|
|
|
|
check beKMap.len == ly.kMap.len
|
|
|
|
return
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
# -----------
|
|
|
|
|
|
|
|
proc collectFilter(
|
|
|
|
db: AristoDbRef;
|
|
|
|
filter: FilterRef;
|
2023-08-25 22:53:59 +00:00
|
|
|
tab: var Table[QueueID,Hash];
|
2023-08-22 18:44:54 +00:00
|
|
|
noisy: bool;
|
|
|
|
): bool =
|
|
|
|
## Store filter on permanent BE and register digest
|
|
|
|
if not filter.isNil:
|
|
|
|
let
|
2023-08-25 22:53:59 +00:00
|
|
|
fid = QueueID(7 * (tab.len + 1)) # just some number
|
2023-08-22 18:44:54 +00:00
|
|
|
be = db.backend
|
|
|
|
tx = be.putBegFn()
|
|
|
|
|
|
|
|
be.putFilFn(tx, @[(fid,filter)])
|
|
|
|
let endOk = be.putEndFn tx
|
|
|
|
if endOk != AristoError(0):
|
|
|
|
check endOk == AristoError(0)
|
|
|
|
return
|
|
|
|
|
|
|
|
tab[fid] = filter.hash
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
proc verifyFiltersImpl[T: MemBackendRef|RdbBackendRef](
|
|
|
|
_: type T;
|
|
|
|
db: AristoDbRef;
|
2023-08-25 22:53:59 +00:00
|
|
|
tab: Table[QueueID,Hash];
|
2023-08-22 18:44:54 +00:00
|
|
|
noisy: bool;
|
|
|
|
): bool =
|
|
|
|
## Compare stored filters against registered ones
|
|
|
|
var n = 0
|
|
|
|
for (_,fid,filter) in T.walkFilBe db:
|
|
|
|
let
|
|
|
|
filterHash = filter.hash
|
|
|
|
registered = tab.getOrDefault(fid, BlindHash)
|
|
|
|
if registered == BlindHash:
|
|
|
|
check (fid,registered) != (0,BlindHash)
|
|
|
|
return
|
|
|
|
if filterHash != registered:
|
|
|
|
noisy.say "***", "verifyFiltersImpl",
|
|
|
|
" n=", n+1,
|
|
|
|
" fid=", fid.pp,
|
|
|
|
" filterHash=", filterHash.int.toHex,
|
|
|
|
" registered=", registered.int.toHex
|
|
|
|
check (fid,filterHash) == (fid,registered)
|
|
|
|
return
|
|
|
|
n.inc
|
|
|
|
|
|
|
|
if n != tab.len:
|
|
|
|
check n == tab.len
|
|
|
|
return
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
proc verifyFilters(
|
|
|
|
db: AristoDbRef;
|
2023-08-25 22:53:59 +00:00
|
|
|
tab: Table[QueueID,Hash];
|
2023-08-22 18:44:54 +00:00
|
|
|
noisy: bool;
|
|
|
|
): bool =
|
|
|
|
## Wrapper
|
|
|
|
let
|
|
|
|
be = db.to(TypedBackendRef)
|
|
|
|
kind = (if be.isNil: BackendVoid else: be.kind)
|
|
|
|
case kind:
|
|
|
|
of BackendMemory:
|
|
|
|
return MemBackendRef.verifyFiltersImpl(db, tab, noisy)
|
|
|
|
of BackendRocksDB:
|
|
|
|
return RdbBackendRef.verifyFiltersImpl(db, tab, noisy)
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
check kind == BackendMemory or kind == BackendRocksDB
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc test_backendConsistency*(
|
|
|
|
noisy: bool;
|
|
|
|
list: openArray[ProofTrieData]; # Test data
|
|
|
|
rdbPath: string; # Rocks DB storage directory
|
|
|
|
resetDb = false;
|
|
|
|
doRdbOk = true;
|
|
|
|
): bool =
|
|
|
|
## Import accounts
|
|
|
|
var
|
2023-08-25 22:53:59 +00:00
|
|
|
filTab: Table[QueueID,Hash] # Filter register
|
2023-07-04 18:24:03 +00:00
|
|
|
ndb = AristoDbRef() # Reference cache
|
|
|
|
mdb = AristoDbRef() # Memory backend database
|
|
|
|
rdb = AristoDbRef() # Rocks DB backend database
|
2023-06-20 13:26:25 +00:00
|
|
|
rootKey = HashKey.default
|
|
|
|
count = 0
|
|
|
|
|
|
|
|
defer:
|
|
|
|
rdb.finish(flush=true)
|
|
|
|
|
|
|
|
for n,w in list:
|
|
|
|
if w.root != rootKey or resetDB:
|
|
|
|
rootKey = w.root
|
|
|
|
count = 0
|
2023-08-11 17:23:57 +00:00
|
|
|
ndb = newAristoDbRef BackendVoid
|
2023-08-07 17:45:23 +00:00
|
|
|
mdb = newAristoDbRef BackendMemory
|
2023-06-20 13:26:25 +00:00
|
|
|
if doRdbOk:
|
2023-08-22 18:44:54 +00:00
|
|
|
if not rdb.backend.isNil: # ignore bootstrap
|
|
|
|
let verifyFiltersOk = rdb.verifyFilters(filTab, noisy)
|
|
|
|
if not verifyFiltersOk:
|
|
|
|
check verifyFiltersOk
|
|
|
|
return
|
|
|
|
filTab.clear
|
2023-06-20 13:26:25 +00:00
|
|
|
rdb.finish(flush=true)
|
2023-08-07 17:45:23 +00:00
|
|
|
let rc = newAristoDbRef(BackendRocksDB,rdbPath)
|
2023-06-20 13:26:25 +00:00
|
|
|
if rc.isErr:
|
2023-07-04 18:24:03 +00:00
|
|
|
check rc.error == 0
|
2023-06-20 13:26:25 +00:00
|
|
|
return
|
|
|
|
rdb = rc.value
|
|
|
|
count.inc
|
|
|
|
|
|
|
|
check ndb.backend.isNil
|
|
|
|
check not mdb.backend.isNil
|
|
|
|
check doRdbOk or not rdb.backend.isNil
|
|
|
|
|
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "beCon(1) <", n, "/", list.len-1, ">", " groups=", count
|
|
|
|
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
rootVid = VertexID(1)
|
|
|
|
leafs = w.kvpLst.mapRootVid VertexID(1) # for merging it into main trie
|
|
|
|
|
|
|
|
block:
|
|
|
|
let ndbOk = ndb.mergeData(
|
|
|
|
rootKey, rootVid, w.proof, leafs, noisy=false)
|
|
|
|
if not ndbOk:
|
|
|
|
check ndbOk
|
|
|
|
return
|
|
|
|
block:
|
|
|
|
let mdbOk = mdb.mergeData(
|
|
|
|
rootKey, rootVid, w.proof, leafs, noisy=false)
|
|
|
|
if not mdbOk:
|
|
|
|
check mdbOk
|
|
|
|
return
|
|
|
|
if doRdbOk: # optional
|
|
|
|
let rdbOk = rdb.mergeData(
|
|
|
|
rootKey, rootVid, w.proof, leafs, noisy=false)
|
|
|
|
if not rdbOk:
|
|
|
|
check rdbOk
|
|
|
|
return
|
|
|
|
|
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "beCon(2) <", n, "/", list.len-1, ">",
|
|
|
|
" groups=", count,
|
|
|
|
"\n cache dump\n ", ndb.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
"\n backend dump\n ", ndb.to(TypedBackendRef).pp(ndb),
|
2023-06-20 13:26:25 +00:00
|
|
|
"\n -------------",
|
|
|
|
"\n mdb cache\n ", mdb.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
"\n mdb backend\n ", mdb.to(TypedBackendRef).pp(ndb),
|
2023-06-20 13:26:25 +00:00
|
|
|
"\n -------------",
|
|
|
|
"\n rdb cache\n ", rdb.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
"\n rdb backend\n ", rdb.to(TypedBackendRef).pp(ndb),
|
2023-06-20 13:26:25 +00:00
|
|
|
"\n -------------"
|
|
|
|
|
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">", " groups=", count
|
|
|
|
|
|
|
|
var
|
|
|
|
mdbPreSaveCache, mdbPreSaveBackend: string
|
|
|
|
rdbPreSaveCache, rdbPreSaveBackend: string
|
|
|
|
when true: # and false:
|
2023-08-22 18:44:54 +00:00
|
|
|
#mdbPreSaveCache = mdb.pp
|
|
|
|
#mdbPreSaveBackend = mdb.to(MemBackendRef).pp(ndb)
|
2023-06-20 13:26:25 +00:00
|
|
|
rdbPreSaveCache = rdb.pp
|
|
|
|
rdbPreSaveBackend = rdb.to(RdbBackendRef).pp(ndb)
|
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
|
|
|
|
# Provide filter, store filter on permanent BE, and register filter digest
|
|
|
|
block:
|
|
|
|
let rc = mdb.stow(persistent=false, dontHashify=true, chunkedMpt=true)
|
|
|
|
if rc.isErr:
|
|
|
|
check rc.error == (0,0)
|
|
|
|
return
|
|
|
|
let collectFilterOk = rdb.collectFilter(mdb.roFilter, filTab, noisy)
|
|
|
|
if not collectFilterOk:
|
|
|
|
check collectFilterOk
|
|
|
|
return
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# Store onto backend database
|
2023-07-04 18:24:03 +00:00
|
|
|
block:
|
2023-06-20 13:26:25 +00:00
|
|
|
#noisy.say "***", "db-dump\n ", mdb.pp
|
2023-08-11 17:23:57 +00:00
|
|
|
let rc = mdb.stow(persistent=true, dontHashify=true, chunkedMpt=true)
|
2023-06-20 13:26:25 +00:00
|
|
|
if rc.isErr:
|
2023-08-11 17:23:57 +00:00
|
|
|
check rc.error == (0,0)
|
2023-06-20 13:26:25 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if doRdbOk:
|
2023-08-11 17:23:57 +00:00
|
|
|
let rc = rdb.stow(persistent=true, dontHashify=true, chunkedMpt=true)
|
2023-07-04 18:24:03 +00:00
|
|
|
if rc.isErr:
|
2023-08-11 17:23:57 +00:00
|
|
|
check rc.error == (0,0)
|
2023-07-04 18:24:03 +00:00
|
|
|
return
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2023-06-30 22:22:33 +00:00
|
|
|
if not ndb.top.verify(mdb.to(MemBackendRef), noisy):
|
2023-06-20 13:26:25 +00:00
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">",
|
|
|
|
" groups=", count,
|
|
|
|
"\n ndb cache\n ", ndb.pp,
|
|
|
|
"\n ndb backend=", ndb.backend.isNil.not,
|
|
|
|
#"\n -------------",
|
|
|
|
#"\n mdb pre-save cache\n ", mdbPreSaveCache,
|
|
|
|
#"\n mdb pre-save backend\n ", mdbPreSaveBackend,
|
|
|
|
"\n -------------",
|
|
|
|
"\n mdb cache\n ", mdb.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
"\n mdb backend\n ", mdb.to(TypedBackendRef).pp(ndb),
|
2023-06-20 13:26:25 +00:00
|
|
|
"\n -------------"
|
|
|
|
return
|
|
|
|
|
|
|
|
if doRdbOk:
|
2023-06-22 11:13:24 +00:00
|
|
|
if not ndb.top.verify(rdb.to(RdbBackendRef), noisy):
|
2023-06-20 13:26:25 +00:00
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "beCon(4) <", n, "/", list.len-1, ">",
|
|
|
|
" groups=", count,
|
|
|
|
"\n ndb cache\n ", ndb.pp,
|
|
|
|
"\n ndb backend=", ndb.backend.isNil.not,
|
|
|
|
"\n -------------",
|
|
|
|
"\n rdb pre-save cache\n ", rdbPreSaveCache,
|
|
|
|
"\n rdb pre-save backend\n ", rdbPreSaveBackend,
|
|
|
|
"\n -------------",
|
|
|
|
"\n rdb cache\n ", rdb.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
"\n rdb backend\n ", rdb.to(TypedBackendRef).pp(ndb),
|
2023-06-20 13:26:25 +00:00
|
|
|
#"\n -------------",
|
|
|
|
#"\n mdb cache\n ", mdb.pp,
|
2023-06-30 22:22:33 +00:00
|
|
|
#"\n mdb backend\n ", mdb.to(TypedBackendRef).pp(ndb),
|
2023-06-20 13:26:25 +00:00
|
|
|
"\n -------------"
|
|
|
|
return
|
|
|
|
|
|
|
|
when true and false:
|
|
|
|
noisy.say "***", "beCon(9) <", n, "/", list.len-1, ">", " groups=", count
|
|
|
|
|
2023-08-22 18:44:54 +00:00
|
|
|
# Finally ...
|
|
|
|
block:
|
|
|
|
let verifyFiltersOk = rdb.verifyFilters(filTab, noisy)
|
|
|
|
if not verifyFiltersOk:
|
|
|
|
check verifyFiltersOk
|
|
|
|
return
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
true
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|