Remove obsolete tests (#2307)

* Remove `test_sync_snap`

why:
  Snap sync needs to be re-factored. All the interesting database parts
  from this test suite has been recycled into `Aristo`

* Remove `test_rocksdb_timing`

* Update `all_tests`
This commit is contained in:
Jordan Hrycaj 2024-06-06 09:29:38 +00:00 committed by GitHub
parent 0268093fcc
commit 1e65093b3e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 67 additions and 3319 deletions

View File

@ -12,8 +12,6 @@ import ./all_tests_macro
cliBuilder:
import ./test_code_stream,
#./test_accounts_cache, -- does not compile
#./test_sync_snap, -- temporarily suspended
#./test_rocksdb_timing, -- probably redundant
#./test_jwt_auth, -- rpc is Aristo incompatible
./test_gas_meter,
./test_memory,

View File

@ -18,13 +18,12 @@ import
unittest2,
../nimbus/db/aristo/[aristo_desc, aristo_merge],
./replay/[pp, undump_accounts, undump_storages],
./test_sync_snap/[snap_test_xx, test_types],
./test_aristo/[test_filter, test_helpers, test_misc, test_tx]
./test_aristo/[test_samples_xx, test_filter, test_helpers, test_misc, test_tx]
const
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests", "nimbus-eth1-blobs"]
subDir = ["replay", "test_sync_snap", "replay"/"snap"]
subDir = ["replay", "test_aristo", "replay"/"snap"]
# Reference file for finding the database directory
sampleDirRefFile = "sample0.txt.gz"

View File

@ -17,8 +17,8 @@ import
aristo_hashify, aristo_hike, aristo_merge],
../../nimbus/db/kvstore_rocksdb,
../../nimbus/sync/protocol/snap/snap_types,
../test_sync_snap/test_types,
../replay/[pp, undump_accounts, undump_storages]
../replay/[pp, undump_accounts, undump_storages],
./test_samples_xx
from ../../nimbus/sync/snap/range_desc
import NodeKey, ByteArray32

View File

@ -10,9 +10,65 @@
import
std/os,
./test_types
eth/common
type
AccountsSample* = object
name*: string ## sample name, also used as sub-directory for db separation
file*: string
firstItem*: int
lastItem*: int
CaptureSpecs* = object
name*: string ## sample name, also used as sub-directory for db separation
network*: NetworkId
file*: string ## name of capture file
numBlocks*: int ## Number of blocks to load
SnapSyncSpecs* = object
name*: string
network*: NetworkId
snapDump*: string
tailBlocks*: string
pivotBlock*: uint64
nItems*: int
const
snapTest0* = AccountsSample(
name: "sample0",
file: "sample0.txt.gz",
firstItem: 0,
lastItem: high(int))
snapTest1* = AccountsSample(
name: "test1",
file: snapTest0.file,
lastItem: 0) # Only the first `snap/1` reply from the sample
snapTest2* = AccountsSample(
name: "sample1",
file: "sample1.txt.gz",
lastItem: high(int))
snapTest3* = AccountsSample(
name: "test3",
file: snapTest2.file,
lastItem: 0) # Only the first `snap/1` reply from the sample
# Also for storage tests
snapTest4* = AccountsSample(
name: "sample2",
file: "sample2.txt.gz",
lastItem: high(int))
# Also for storage tests
snapTest5* = AccountsSample(
name: "sample3",
file: "sample3.txt.gz",
lastItem: high(int))
# ----------------------
snapOther0a* = AccountsSample(
name: "Other0a",
file: "account" / "account0_00_06_dump.txt.gz",
@ -125,6 +181,12 @@ const
# ------------------------
snapTestList* = [
snapTest0, snapTest1, snapTest2, snapTest3]
snapTestStorageList* = [
snapTest4, snapTest5]
snapOtherHealingList* = [
@[snapOther0b, snapOther2, snapOther4],
@[snapOther0a, snapOther1a, snapOther5]]

View File

@ -1,303 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[os, strformat, strutils, tables],
chronicles,
eth/[common, p2p],
rocksdb,
unittest2,
../nimbus/db/kvstore_rocksdb,
../nimbus/db/core_db/persistent,
../nimbus/core/chain,
../nimbus/sync/snap/range_desc,
../nimbus/sync/snap/worker/db/hexary_desc,
./replay/pp,
./test_rocksdb_timing/[bulk_test_xx, test_db_timing]
when not defined(windows):
import distros
const
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests"/"replay", "tests"/"test_sync_snap",
"nimbus-eth1-blobs"/"replay"]
# Reference file for finding the database directory
sampleDirRefFile = "sample0.txt.gz"
# Standard test samples
bChainCapture = bulkTest0
# Number of database slots (needed for timing tests)
nTestDbInstances = 9
type
TestDbs = object
## Provide enough spare empty databases
persistent: bool
dbDir: string
baseDir: string # for cleanup
subDir: string # for cleanup
cdb: array[nTestDbInstances,CoreDbRef]
when defined(linux):
# The `detectOs(Ubuntu)` directive is not Windows compatible, causes an
# error when running the system command `lsb_release -d` in the background.
let isUbuntu32bit = detectOs(Ubuntu) and int.sizeof == 4
else:
const isUbuntu32bit = false
let
# There was a problem with the Github/CI which results in spurious crashes
# when leaving the `runner()` if the persistent ChainDBRef initialisation
# was present, see `test_custom_network` for more details.
disablePersistentDB = isUbuntu32bit
var
xTmpDir: string
xDbs: TestDbs # for repeated storage/overwrite tests
xTab32: Table[ByteArray32,Blob] # extracted data
xTab33: Table[ByteArray33,Blob]
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc findFilePath(file: string;
baseDir, repoDir: openArray[string]): Result[string,void] =
for dir in baseDir:
for repo in repoDir:
let path = dir / repo / file
if path.fileExists:
return ok(path)
echo "*** File not found \"", file, "\"."
err()
proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath(baseDir,repoDir).value.splitFile.dir
proc setTraceLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.TRACE)
proc setErrorLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc flushDbDir(s: string; subDir = "") =
if s != "":
let baseDir = s / "tmp"
for n in 0 ..< nTestDbInstances:
let instDir = if subDir == "": baseDir / $n else: baseDir / subDir / $n
if (instDir / "nimbus" / "data").dirExists:
# Typically under Windows: there might be stale file locks.
try: instDir.removeDir except CatchableError: discard
try: (baseDir / subDir).removeDir except CatchableError: discard
block dontClearUnlessEmpty:
for w in baseDir.walkDir:
break dontClearUnlessEmpty
try: baseDir.removeDir except CatchableError: discard
proc flushDbs(db: TestDbs) =
if db.persistent:
for n in 0 ..< nTestDbInstances:
if db.cdb[n].isNil or db.cdb[n].dbType != LegacyDbPersistent:
break
db.cdb[n].newKvt.backend.toRocksStoreRef.close()
db.baseDir.flushDbDir(db.subDir)
proc testDbs(
workDir: string;
subDir: string;
instances: int;
persistent: bool;
): TestDbs =
if disablePersistentDB or workDir == "" or not persistent:
result.persistent = false
result.dbDir = "*notused*"
else:
result.persistent = true
result.baseDir = workDir
result.subDir = subDir
if subDir != "":
result.dbDir = workDir / "tmp" / subDir
else:
result.dbDir = workDir / "tmp"
if result.persistent:
workDir.flushDbDir(subDir)
for n in 0 ..< min(result.cdb.len, instances):
result.cdb[n] = newCoreDbRef(LegacyDbPersistent, result.dbDir / $n)
# ------------------------------------------------------------------------------
# Test Runners: database timing tests
# ------------------------------------------------------------------------------
proc importRunner(noisy = true; persistent = true; capture = bChainCapture) =
let
fileInfo = capture.file.splitFile.name.split(".")[0]
filePath = capture.file.findFilePath(baseDir,repoDir).value
tmpDir = getTmpDir()
db = tmpDir.testDbs(capture.name & "-import", instances=1, persistent)
numBlocks = capture.numBlocks
numBlocksInfo = if numBlocks == high(int): "" else: $numBlocks & " "
loadNoise = noisy
defer:
db.flushDbs
suite &"RocksDB: using {fileInfo} capture for testing db timings":
var ddb: CommonRef # perstent DB on disk
test &"Create persistent ChainDBRef on {tmpDir}":
ddb = CommonRef.new(
db = if db.persistent: db.cdb[0] else: newCoreDbRef(LegacyDbMemory),
networkId = capture.network,
pruneTrie = true,
params = capture.network.networkParams)
ddb.initializeEmptyDb
test &"Storing {numBlocksInfo}persistent blocks from dump":
noisy.test_dbTimingUndumpBlocks(filePath, ddb, numBlocks, loadNoise)
test "Extract key-value records into memory tables via rocksdb iterator":
if db.cdb[0].newKvt.backend.toRocksStoreRef.isNil:
skip() # not persistent => db.cdb[0] is nil
else:
noisy.test_dbTimingRockySetup(xTab32, xTab33, db.cdb[0])
proc dbTimingRunner(noisy = true; persistent = true; cleanUp = true) =
let
fullNoise = false
var
emptyDb = "empty"
# Allows to repeat storing on existing data
if not xDbs.cdb[0].isNil:
emptyDb = "pre-loaded"
else:
xTmpDir = getTmpDir()
xDbs = xTmpDir.testDbs(
"timing-runner", instances=nTestDbInstances, persistent)
defer:
if cleanUp:
xDbs.flushDbs
xDbs.reset
suite &"RocksDB: storage tests on {emptyDb} databases":
#
# `xDbs` instance slots layout:
#
# * cdb[0] -- direct db, key length 32, no transaction
# * cdb[1] -- direct db, key length 32 as 33, no transaction
#
# * cdb[2] -- direct db, key length 32, transaction based
# * cdb[3] -- direct db, key length 32 as 33, transaction based
#
# * cdb[4] -- direct db, key length 33, no transaction
# * cdb[5] -- direct db, key length 33, transaction based
#
# * cdb[6] -- rocksdb, key length 32
# * cdb[7] -- rocksdb, key length 32 as 33
# * cdb[8] -- rocksdb, key length 33
#
doAssert 9 <= nTestDbInstances
doAssert not xDbs.cdb[8].isNil
let
storeDir32 = &"Directly store {xTab32.len} records"
storeDir33 = &"Directly store {xTab33.len} records"
storeTx32 = &"Transactionally store directly {xTab32.len} records"
storeTx33 = &"Transactionally store directly {xTab33.len} records"
intoTrieDb = &"into {emptyDb} trie db"
storeRks32 = &"Store {xTab32.len} records"
storeRks33 = &"Store {xTab33.len} records"
intoRksDb = &"into {emptyDb} rocksdb table"
if xTab32.len == 0 or xTab33.len == 0:
test &"Both tables with 32 byte keys(size={xTab32.len}), " &
&"33 byte keys(size={xTab32.len}) must be non-empty":
skip()
else:
test &"{storeDir32} (key length 32) {intoTrieDb}":
noisy.test_dbTimingStoreDirect32(xTab32, xDbs.cdb[0])
test &"{storeDir32} (key length 33) {intoTrieDb}":
noisy.test_dbTimingStoreDirectly32as33(xTab32, xDbs.cdb[1])
test &"{storeTx32} (key length 32) {intoTrieDb}":
noisy.test_dbTimingStoreTx32(xTab32, xDbs.cdb[2])
test &"{storeTx32} (key length 33) {intoTrieDb}":
noisy.test_dbTimingStoreTx32as33(xTab32, xDbs.cdb[3])
test &"{storeDir33} (key length 33) {intoTrieDb}":
noisy.test_dbTimingDirect33(xTab33, xDbs.cdb[4])
test &"{storeTx33} (key length 33) {intoTrieDb}":
noisy.test_dbTimingTx33(xTab33, xDbs.cdb[5])
if xDbs.cdb[0].newKvt.backend.toRocksStoreRef.isNil:
test "The rocksdb interface must be available": skip()
else:
test &"{storeRks32} (key length 32) {intoRksDb}":
noisy.test_dbTimingRocky32(xTab32, xDbs.cdb[6], fullNoise)
test &"{storeRks32} (key length 33) {intoRksDb}":
noisy.test_dbTimingRocky32as33(xTab32, xDbs.cdb[7], fullNoise)
test &"{storeRks33} (key length 33) {intoRksDb}":
noisy.test_dbTimingRocky33(xTab33, xDbs.cdb[8], fullNoise)
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc rocksDbTimingMain*(noisy = defined(debug)) =
noisy.importRunner() # small sample, just verify functionality
noisy.dbTimingRunner()
when isMainModule:
const
noisy = defined(debug) or true
#setTraceLevel()
setErrorLevel()
# This one uses the readily available dump: `bulkTest0` and some huge replay
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
# For specs see `tests/test_rocksdb_timing/bulk_test_xx.nim`.
var testList = @[bulkTest0]
when true and false:
testList &= @[bulkTest1, bulkTest2, bulkTest3]
for test in testList:
noisy.showElapsed("importRunner()"):
noisy.importRunner(capture = test)
noisy.showElapsed("dbTimingRunner()"):
true.dbTimingRunner(cleanUp = false)
true.dbTimingRunner()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,47 +0,0 @@
#
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
eth/common,
../../nimbus/common/chain_config
type
CaptureSpecs* = object
name*: string ## sample name, also used as sub-directory for db separation
network*: NetworkId
file*: string ## name of capture file
numBlocks*: int ## Number of blocks to load
const
bulkTest0* = CaptureSpecs(
name: "some-goerli",
network: GoerliNet,
file: "goerli68161.txt.gz",
numBlocks: 1_000)
bulkTest1* = CaptureSpecs(
name: "full-goerli",
network: bulkTest0.network,
file: bulkTest0.file,
numBlocks: high(int))
bulkTest2* = CaptureSpecs(
name: "more-goerli",
network: GoerliNet,
file: "goerli68161.txt.gz",
numBlocks: high(int))
bulkTest3* = CaptureSpecs(
name: "mainnet",
network: MainNet,
file: "mainnet332160.txt.gz",
numBlocks: high(int))
# End

View File

@ -1,506 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[algorithm, math, sequtils, strformat, times],
stew/byteutils,
rocksdb/lib/librocksdb,
rocksdb,
unittest2,
../../nimbus/core/chain,
../../nimbus/db/kvstore_rocksdb,
../../nimbus/db/core_db,
../../nimbus/db/core_db/persistent,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[hexary_desc, rocky_bulk_load],
../../nimbus/utils/prettify,
../replay/[pp, undump_blocks]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc to*(b: openArray[byte]; T: type ByteArray32): T =
## Convert to other representation (or exception)
if b.len == 32:
(addr result[0]).copyMem(unsafeAddr b[0], 32)
else:
doAssert b.len == 32
proc to*(b: openArray[byte]; T: type ByteArray33): T =
## Convert to other representation (or exception)
if b.len == 33:
(addr result[0]).copyMem(unsafeAddr b[0], 33)
else:
doAssert b.len == 33
proc to*(b: ByteArray32|ByteArray33; T: type Blob): T =
b.toSeq
proc to*(b: openArray[byte]; T: type NodeTag): T =
## Convert from serialised equivalent
UInt256.fromBytesBE(b).T
proc to*(w: (byte, NodeTag); T: type Blob): T =
let (b,t) = w
@[b] & toSeq(t.UInt256.toBytesBE)
proc to*(t: NodeTag; T: type Blob): T =
toSeq(t.UInt256.toBytesBE)
# ----------------
proc thisRecord(r: ptr rocksdb_iterator_t): (Blob,Blob) =
var kLen, vLen: csize_t
let
kData = r.rocksdb_iter_key(addr kLen)
vData = r.rocksdb_iter_value(addr vLen)
if not kData.isNil and not vData.isNil:
let
key = string.fromBytes(toOpenArrayByte(kData,0,int(kLen)-1))
value = string.fromBytes(toOpenArrayByte(vData,0,int(vLen)-1))
return (key.mapIt(it.byte),value.mapIt(it.byte))
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
if 0 < length:
result[0] = sum / length.float
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
# ------------------------------------------------------------------------------
# Public functions, pretty printing
# ------------------------------------------------------------------------------
proc pp*(d: Duration): string =
if 40 < d.inSeconds:
d.ppMins
elif 200 < d.inMilliseconds:
d.ppSecs
elif 200 < d.inMicroseconds:
d.ppMs
else:
d.ppUs
proc ppKvPc*(w: openArray[(string,int)]): string =
w.mapIt(&"{it[0]}={it[1]}%").join(", ")
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
echo "*** ", pfx
elif 0 < pfx.len and pfx[^1] != ' ':
echo pfx, " ", args.toSeq.join
else:
echo pfx, args.toSeq.join
# ------------------------------------------------------------------------------
# Public test function: setup
# ------------------------------------------------------------------------------
proc test_dbTimingUndumpBlocks*(
noisy: bool;
filePath: string;
com: CommonRef;
numBlocks: int;
loadNoise = false;
) =
## Store persistent blocks from dump into chain DB
let chain = com.newChain
for w in filePath.undumpBlocks:
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
if fromBlock == 0.u256:
doAssert w[0][0] == com.db.getBlockHeader(0.u256)
continue
# Message if [fromBlock,toBlock] contains a multiple of 700
if fromBlock + (toBlock mod 900) <= toBlock:
loadNoise.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
check chain.persistBlocks(w[0], w[1]).isOk()
if numBlocks.toBlockNumber <= w[0][^1].blockNumber:
break
proc test_dbTimingRockySetup*(
noisy: bool;
t32: var Table[ByteArray32,Blob],
t33: var Table[ByteArray33,Blob],
cdb: CoreDbRef;
) =
## Extract key-value records into memory tables via rocksdb iterator
let
rdb = cdb.newKvt.backend.toRocksStoreRef
rop = rocksdb_readoptions_create()
rit = rdb.rocksDb.cPtr.rocksdb_create_iterator(rop)
check not rit.isNil
var
v32Sum, v32SqSum: float # statistics
v33Sum, v33SqSum: float
t32.clear
t33.clear
rit.rocksdb_iter_seek_to_first()
while rit.rocksdb_iter_valid() != 0:
let (key,val) = rit.thisRecord()
rit.rocksdb_iter_next()
if key.len == 32:
t32[key.to(ByteArray32)] = val
v32Sum += val.len.float
v32SqSum += val.len.float * val.len.float
check key.to(ByteArray32).to(Blob) == key
elif key.len == 33:
t33[key.to(ByteArray33)] = val
v33Sum += val.len.float
v33SqSum += val.len.float * val.len.float
check key.to(ByteArray33).to(Blob) == key
else:
noisy.say "***", "ignoring key=", key.toHex
rit.rocksdb_iter_destroy()
rop.rocksdb_readoptions_destroy()
var
(mean32, stdv32) = meanStdDev(v32Sum, v32SqSum, t32.len)
(mean33, stdv33) = meanStdDev(v33Sum, v33SqSum, t33.len)
noisy.say "***",
"key 32 table: ",
&"size={t32.len} valLen={(mean32+0.5).int}({(stdv32+0.5).int})",
", key 33 table: ",
&"size={t33.len} valLen={(mean33+0.5).int}({(stdv33+0.5).int})"
# ------------------------------------------------------------------------------
# Public test function: timing
# ------------------------------------------------------------------------------
proc test_dbTimingStoreDirect32*(
noisy: bool;
t32: Table[ByteArray32,Blob];
cdb: CoreDbRef;
) =
## Direct db, key length 32, no transaction
var ela: Duration
let tdb = cdb.kvt
if noisy: echo ""
noisy.showElapsed("Standard db loader(keyLen 32)", ela):
for (key,val) in t32.pairs:
tdb.put(key, val)
if ela.inNanoseconds != 0:
let
elaNs = ela.inNanoseconds.float
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t32.len, ", ",
"perRecord=", perRec.pp
proc test_dbTimingStoreDirectly32as33*(
noisy: bool;
t32: Table[ByteArray32,Blob],
cdb: CoreDbRef;
) =
## Direct db, key length 32 as 33, no transaction
var ela = initDuration()
let tdb = cdb.kvt
if noisy: echo ""
noisy.showElapsed("Standard db loader(keyLen 32 as 33)", ela):
for (key,val) in t32.pairs:
tdb.put(@[99.byte] & key.toSeq, val)
if ela.inNanoseconds != 0:
let
elaNs = ela.inNanoseconds.float
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t32.len, ", ",
"perRecord=", perRec.pp
proc test_dbTimingStoreTx32*(
noisy: bool;
t32: Table[ByteArray32,Blob],
cdb: CoreDbRef;
) =
## Direct db, key length 32, transaction based
var ela: Duration
let tdb = cdb.kvt
if noisy: echo ""
noisy.showElapsed("Standard db loader(tx,keyLen 32)", ela):
let dbTx = cdb.beginTransaction
defer: dbTx.commit
for (key,val) in t32.pairs:
tdb.put(key, val)
if ela.inNanoseconds != 0:
let
elaNs = ela.inNanoseconds.float
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t32.len, ", ",
"perRecord=", perRec.pp
proc test_dbTimingStoreTx32as33*(
noisy: bool;
t32: Table[ByteArray32,Blob],
cdb: CoreDbRef;
) =
## Direct db, key length 32 as 33, transaction based
var ela: Duration
let tdb = cdb.kvt
if noisy: echo ""
noisy.showElapsed("Standard db loader(tx,keyLen 32 as 33)", ela):
let dbTx = cdb.beginTransaction
defer: dbTx.commit
for (key,val) in t32.pairs:
tdb.put(@[99.byte] & key.toSeq, val)
if ela.inNanoseconds != 0:
let
elaNs = ela.inNanoseconds.float
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t32.len, ", ",
"perRecord=", perRec.pp
proc test_dbTimingDirect33*(
noisy: bool;
t33: Table[ByteArray33,Blob],
cdb: CoreDbRef;
) =
## Direct db, key length 33, no transaction
var ela: Duration
let tdb = cdb.kvt
if noisy: echo ""
noisy.showElapsed("Standard db loader(keyLen 33)", ela):
for (key,val) in t33.pairs:
tdb.put(key, val)
if ela.inNanoseconds != 0:
let
elaNs = ela.inNanoseconds.float
perRec = ((elaNs / t33.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t33.len, ", ",
"perRecord=", perRec.pp
proc test_dbTimingTx33*(
noisy: bool;
t33: Table[ByteArray33,Blob],
cdb: CoreDbRef;
) =
## Direct db, key length 33, transaction based
var ela: Duration
let tdb = cdb.kvt
if noisy: echo ""
noisy.showElapsed("Standard db loader(tx,keyLen 33)", ela):
let dbTx = cdb.beginTransaction
defer: dbTx.commit
for (key,val) in t33.pairs:
tdb.put(key, val)
if ela.inNanoseconds != 0:
let
elaNs = ela.inNanoseconds.float
perRec = ((elaNs / t33.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t33.len, ", ",
"perRecord=", perRec.pp
proc test_dbTimingRocky32*(
noisy: bool;
t32: Table[ByteArray32,Blob],
cdb: CoreDbRef;
fullNoise = false;
) =
## Rocksdb, key length 32
var
ela: array[4,Duration]
size: int64
let
rdb = cdb.newKvt.backend.toRocksStoreRef
# Note that 32 and 33 size keys cannot be usefully merged into the same SST
# file. The keys must be added in a sorted mode. So playing safe, key sizes
# should be of equal length.
if noisy: echo ""
noisy.showElapsed("Rocky bulk loader(keyLen 32)", ela[0]):
let bulker = RockyBulkLoadRef.init(rdb)
defer: bulker.destroy()
check bulker.begin("rocky-bulk-cache")
var
keyList = newSeq[NodeTag](t32.len)
fullNoise.showElapsed("Rocky bulk loader/32, sorter", ela[1]):
var inx = 0
for key in t32.keys:
keyList[inx] = key.to(NodeTag)
inx.inc
keyList.sort(cmp)
fullNoise.showElapsed("Rocky bulk loader/32, append", ela[2]):
for n,nodeTag in keyList:
let key = nodeTag.to(Blob)
check bulker.add(key, t32[key.to(ByteArray32)])
fullNoise.showElapsed("Rocky bulk loader/32, slurp", ela[3]):
let rc = bulker.finish()
if rc.isOk:
size = rc.value
else:
check bulker.lastError == "" # force printing error
fullNoise.say "***", " ela[]=", $ela.toSeq.mapIt(it.pp)
if ela[0].inNanoseconds != 0:
let
elaNs = ela.toSeq.mapIt(it.inNanoseconds.float)
elaPc = elaNs.mapIt(((it / elaNs[0]) * 100 + 0.5).int)
perRec = ((elaNs[0] / t32.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t32.len, ", ",
"perRecord=", perRec.pp, ", ",
"sstSize=", size.uint64.toSI, ", ",
"perRecord=", ((size.float / t32.len.float) + 0.5).int, ", ",
["Total","Sorter","Append","Ingest"].zip(elaPc).ppKvPc
proc test_dbTimingRocky32as33*(
noisy: bool;
t32: Table[ByteArray32,Blob],
cdb: CoreDbRef;
fullNoise = false;
) =
## Rocksdb, key length 32 as 33
var
ela: array[4,Duration]
size: int64
let
rdb = cdb.newKvt.backend.toRocksStoreRef
# Note that 32 and 33 size keys cannot be usefiully merged into the same SST
# file. The keys must be added in a sorted mode. So playing safe, key sizes
# should be of equal length.
if noisy: echo ""
noisy.showElapsed("Rocky bulk loader(keyLen 32 as 33)", ela[0]):
let bulker = RockyBulkLoadRef.init(rdb)
defer: bulker.destroy()
check bulker.begin("rocky-bulk-cache")
var
keyList = newSeq[NodeTag](t32.len)
fullNoise.showElapsed("Rocky bulk loader/32 as 33, sorter", ela[1]):
var inx = 0
for key in t32.keys:
keyList[inx] = key.to(NodeTag)
inx.inc
keyList.sort(cmp)
fullNoise.showElapsed("Rocky bulk loader/32 as 33, append", ela[2]):
for n,nodeTag in keyList:
let key = nodeTag.to(Blob)
check bulker.add(@[99.byte] & key, t32[key.to(ByteArray32)])
fullNoise.showElapsed("Rocky bulk loader/32 as 33, slurp", ela[3]):
let rc = bulker.finish()
if rc.isOk:
size = rc.value
else:
check bulker.lastError == "" # force printing error
fullNoise.say "***", " ela[]=", $ela.toSeq.mapIt(it.pp)
if ela[0].inNanoseconds != 0:
let
elaNs = ela.toSeq.mapIt(it.inNanoseconds.float)
elaPc = elaNs.mapIt(((it / elaNs[0]) * 100 + 0.5).int)
perRec = ((elaNs[0] / t32.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t32.len, ", ",
"perRecord=", perRec.pp, ", ",
"sstSize=", size.uint64.toSI, ", ",
"perRecord=", ((size.float / t32.len.float) + 0.5).int, ", ",
["Total","Sorter","Append","Ingest"].zip(elaPc).ppKvPc
proc test_dbTimingRocky33*(
noisy: bool;
t33: Table[ByteArray33,Blob],
cdb: CoreDbRef;
fullNoise = false;
) =
## Rocksdb, key length 33
var
ela: array[4,Duration]
size: int64
let rdb = cdb.newKvt.backend.toRocksStoreRef
# Note that 32 and 33 size keys cannot be usefiully merged into the same SST
# file. The keys must be added in a sorted mode. So playing safe, key sizes
# should be of equal length.
if noisy: echo ""
noisy.showElapsed("Rocky bulk loader(keyLen 33)", ela[0]):
let bulker = RockyBulkLoadRef.init(rdb)
defer: bulker.destroy()
check bulker.begin("rocky-bulk-cache")
var
kKeys: seq[byte] # need to cacscade
kTab: Table[byte,seq[NodeTag]]
fullNoise.showElapsed("Rocky bulk loader/33, sorter", ela[1]):
for key in t33.keys:
if kTab.hasKey(key[0]):
kTab[key[0]].add key.toOpenArray(1,32).to(NodeTag)
else:
kTab[key[0]] = @[key.toOpenArray(1,32).to(NodeTag)]
kKeys = toSeq(kTab.keys).sorted
for w in kKeys:
kTab[w].sort(cmp)
fullNoise.showElapsed("Rocky bulk loader/33, append", ela[2]):
for w in kKeys:
fullNoise.say "***", " prefix=", w, " entries=", kTab[w].len
for n,nodeTag in kTab[w]:
let key = (w,nodeTag).to(Blob)
check bulker.add(key, t33[key.to(ByteArray33)])
fullNoise.showElapsed("Rocky bulk loader/33, slurp", ela[3]):
let rc = bulker.finish()
if rc.isOk:
size = rc.value
else:
check bulker.lastError == "" # force printing error
fullNoise.say "***", " ela[]=", $ela.toSeq.mapIt(it.pp)
if ela[0].inNanoseconds != 0:
let
elaNs = ela.toSeq.mapIt(it.inNanoseconds.float)
elaPc = elaNs.mapIt(((it / elaNs[0]) * 100 + 0.5).int)
perRec = ((elaNs[0] / t33.len.float) + 0.5).int.initDuration
noisy.say "***",
"nRecords=", t33.len, ", ",
"perRecord=", perRec.pp, ", ",
"sstSize=", size.uint64.toSI, ", ",
"perRecord=", ((size.float / t33.len.float) + 0.5).int, ", ",
["Total","Cascaded-Sorter","Append","Ingest"].zip(elaPc).ppKvPc
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,502 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[os, sets, sequtils, strformat, strutils, tables],
chronicles,
eth/[common, p2p],
rocksdb,
unittest2,
../nimbus/db/[core_db, kvstore_rocksdb],
../nimbus/db/core_db/persistent,
../nimbus/core/chain,
../nimbus/sync/types,
../nimbus/sync/snap/range_desc,
../nimbus/sync/snap/worker/db/[
hexary_desc, hexary_envelope, hexary_error, hexary_inspect, hexary_nearby,
hexary_paths, rocky_bulk_load, snapdb_accounts, snapdb_debug, snapdb_desc],
./replay/[pp, undump_accounts, undump_storages],
./test_sync_snap/[
snap_test_xx,
test_accounts, test_calc, test_helpers, test_node_range, test_inspect,
test_pivot, test_storage, test_syncdb, test_types]
const
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests", "nimbus-eth1-blobs"]
subDir = ["replay", "test_sync_snap", "replay"/"snap"]
# Reference file for finding the database directory
sampleDirRefFile = "sample0.txt.gz"
# Standard test samples
accSample = snapTest0
storSample = snapTest4
# Number of database slots available
nTestDbInstances = 9
type
TestDbs = object
## Provide enough spare empty databases
persistent: bool
dbDir: string
baseDir: string # for cleanup
subDir: string # for cleanup
cdb: array[nTestDbInstances,CoreDbRef]
SnapRunDesc = object
id: int
info: string
file: string
chn: ChainRef
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc findFilePath(
file: string;
baseDir: openArray[string] = baseDir;
repoDir: openArray[string] = repoDir;
subDir: openArray[string] = subDir;
): Result[string,void] =
for dir in baseDir:
if dir.dirExists:
for repo in repoDir:
if (dir / repo).dirExists:
for sub in subDir:
if (dir / repo / sub).dirExists:
let path = dir / repo / sub / file
if path.fileExists:
return ok(path)
echo "*** File not found \"", file, "\"."
err()
proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath.value.splitFile.dir
proc setTraceLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.TRACE)
proc setErrorLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc to(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
## Convert test data into usable in-memory format
let file = sample.file.findFilePath.value
var root: Hash256
for w in file.undumpNextAccount:
let n = w.seenAccounts - 1
if n < sample.firstItem:
continue
if sample.lastItem < n:
break
if sample.firstItem == n:
root = w.root
elif w.root != root:
break
result.add w
proc to(sample: AccountsSample; T: type seq[UndumpStorages]): T =
## Convert test data into usable in-memory format
let file = sample.file.findFilePath.value
var root: Hash256
for w in file.undumpNextStorages:
let n = w.seenAccounts - 1 # storages selector based on accounts
if n < sample.firstItem:
continue
if sample.lastItem < n:
break
if sample.firstItem == n:
root = w.root
elif w.root != root:
break
result.add w
proc flushDbDir(s: string; subDir = "") =
if s != "":
let baseDir = s / "tmp"
for n in 0 ..< nTestDbInstances:
let instDir = if subDir == "": baseDir / $n else: baseDir / subDir / $n
if (instDir / "nimbus" / "data").dirExists:
# Typically under Windows: there might be stale file locks.
try: instDir.removeDir except CatchableError: discard
try: (baseDir / subDir).removeDir except CatchableError: discard
block dontClearUnlessEmpty:
for w in baseDir.walkDir:
break dontClearUnlessEmpty
try: baseDir.removeDir except CatchableError: discard
proc flushDbs(db: TestDbs) =
if db.persistent:
for n in 0 ..< nTestDbInstances:
if db.cdb[n].isNil or db.cdb[n].dbType != LegacyDbPersistent:
break
db.cdb[n].newKvt.backend.toRocksStoreRef.close()
db.baseDir.flushDbDir(db.subDir)
proc testDbs(
workDir: string;
subDir: string;
instances: int;
persistent: bool;
): TestDbs =
if workDir == "" or not persistent:
result.persistent = false
result.dbDir = "*notused*"
else:
result.persistent = true
result.baseDir = workDir
result.subDir = subDir
if subDir != "":
result.dbDir = workDir / "tmp" / subDir
else:
result.dbDir = workDir / "tmp"
if result.persistent:
workDir.flushDbDir(subDir)
for n in 0 ..< min(result.cdb.len, instances):
result.cdb[n] = newCoreDbRef(LegacyDbPersistent, result.dbDir / $n)
proc snapDbRef(cdb: CoreDbRef; pers: bool): SnapDbRef =
if pers: SnapDbRef.init(cdb)
else: SnapDbRef.init(newCoreDbRef LegacyDbMemory)
proc snapDbAccountsRef(
cdb: CoreDbRef;
root: Hash256;
pers: bool;
):SnapDbAccountsRef =
SnapDbAccountsRef.init(cdb.snapDbRef(pers), root, Peer())
# ------------------------------------------------------------------------------
# Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------
proc accountsRunner(noisy = true; persistent = true; sample = accSample) =
let
accLst = sample.to(seq[UndumpAccounts])
root = accLst[0].root
tmpDir = getTmpDir()
db = tmpDir.testDbs(sample.name & "-accounts", instances=3, persistent)
info = if db.persistent: &"persistent db on \"{db.baseDir}\""
else: "in-memory db"
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
defer:
db.flushDbs
suite &"SyncSnap: {fileInfo} accounts and proofs for {info}":
block:
# New common descriptor for this sub-group of tests
let
desc = db.cdb[0].snapDbAccountsRef(root, db.persistent)
hexaDb = desc.hexaDb
getFn = desc.getAccountFn
dbg = if noisy: hexaDb else: nil
test &"Proofing {accLst.len} list items for state root ..{root.pp}":
accLst.test_accountsImport(desc, db.persistent)
# debugging, make sure that state root ~ "$0"
hexaDb.assignPrettyKeys(root.to(NodeKey))
# Beware: dumping a large database is not recommended
# true.say "***", "database dump\n ", hexaDb.pp(root.to(NodeKey))
test &"Retrieve accounts & proofs for previous account ranges":
if db.persistent:
accLst.test_NodeRangeProof(getFn, dbg)
else:
accLst.test_NodeRangeProof(hexaDb, dbg)
test &"Verify left boundary checks":
if db.persistent:
accLst.test_NodeRangeLeftBoundary(getFn, dbg)
else:
accLst.test_NodeRangeLeftBoundary(hexaDb, dbg)
block:
# List of keys to be shared by sub-group
var accKeys: seq[NodeKey]
# New common descriptor for this sub-group of tests
let desc = db.cdb[1].snapDbAccountsRef(root, db.persistent)
test &"Merging {accLst.len} accounts/proofs lists into single list":
accLst.test_accountsMergeProofs(desc, accKeys) # set up `accKeys`
test &"Revisiting {accKeys.len} stored items on ChainDBRef":
accKeys.test_accountsRevisitStoredItems(desc, noisy)
test &"Decompose path prefix envelopes on {info}":
let hexaDb = desc.hexaDb
if db.persistent:
accKeys.test_NodeRangeDecompose(root, desc.getAccountFn, hexaDb)
else:
accKeys.test_NodeRangeDecompose(root, hexaDb, hexaDb)
# This one works with a new clean database in order to avoid some
# problems on observed qemu/Win7.
test &"Storing/retrieving {accKeys.len} stored items " &
"on persistent pivot/checkpoint registry":
if db.persistent:
accKeys.test_pivotStoreRead(db.cdb[2])
else:
skip()
proc storagesRunner(
noisy = true;
persistent = true;
sample = storSample;
knownFailures: seq[(string,seq[(int,HexaryError)])] = @[]) {.used.} =
let
accLst = sample.to(seq[UndumpAccounts])
stoLst = sample.to(seq[UndumpStorages])
tmpDir = getTmpDir()
db = tmpDir.testDbs(sample.name & "-storages", instances=1, persistent)
info = if db.persistent: &"persistent db" else: "in-memory db"
idPfx = sample.file.splitPath.tail.replace(".txt.gz","")
defer:
db.flushDbs
suite &"SyncSnap: {idPfx} accounts storage for {info}":
let xdb = db.cdb[0].snapDbRef(db.persistent)
test &"Merging {accLst.len} accounts for state root ..{accLst[0].root.pp}":
accLst.test_storageAccountsImport(xdb, db.persistent)
test &"Merging {stoLst.len} storages lists":
stoLst.test_storageSlotsImport(xdb, db.persistent, knownFailures,idPfx)
test &"Inspecting {stoLst.len} imported storages lists sub-tries":
stoLst.test_storageSlotsTries(xdb, db.persistent, knownFailures,idPfx)
proc inspectionRunner(
noisy = true;
persistent = true;
cascaded = true;
sample: openArray[AccountsSample] = snapTestList) =
let
inspectList = sample.mapIt(it.to(seq[UndumpAccounts]))
tmpDir = getTmpDir()
db = tmpDir.testDbs(
sample[0].name & "-inspection", instances=nTestDbInstances, persistent)
info = if db.persistent: &"persistent db" else: "in-memory db"
fileInfo = "[" & sample[0].file.splitPath.tail.replace(".txt.gz","") & "..]"
defer:
db.flushDbs
suite &"SyncSnap: inspect {fileInfo} lists for {info} for healing":
var
singleStats: seq[(int,TrieNodeStat)]
accuStats: seq[(int,TrieNodeStat)]
let
ingerprinting = &"ingerprinting {inspectList.len}"
singleAcc = &"F{ingerprinting} single accounts lists"
accumAcc = &"F{ingerprinting} accumulated accounts"
cascAcc = &"Cascaded f{ingerprinting} accumulated accounts lists"
memBase = SnapDbRef.init(newCoreDbRef LegacyDbMemory)
dbSlot = proc(n: int): SnapDbRef =
if 2+n < nTestDbInstances and
not db.cdb[2+n].newKvt.backend.toRocksStoreRef.isNil:
return SnapDbRef.init(db.cdb[2+n])
test &"{singleAcc} for in-memory-db":
inspectList.test_inspectSingleAccountsMemDb(memBase, singleStats)
test &"{singleAcc} for persistent db":
if persistent:
inspectList.test_inspectSingleAccountsPersistent(dbSlot, singleStats)
else:
skip()
test &"{accumAcc} for in-memory-db":
inspectList.test_inspectAccountsInMemDb(memBase, accuStats)
test &"{accumAcc} for persistent db":
if persistent:
inspectList.test_inspectAccountsPersistent(db.cdb[0], accuStats)
else:
skip()
test &"{cascAcc} for in-memory-db":
if cascaded:
inspectList.test_inspectCascadedMemDb()
else:
skip()
test &"{cascAcc} for persistent db":
if cascaded and persistent:
inspectList.test_inspectCascadedPersistent(db.cdb[1])
else:
skip()
# ------------------------------------------------------------------------------
# Other test Runners
# ------------------------------------------------------------------------------
proc miscRunner(noisy = true) =
suite "SyncSnap: Verify setup, constants, limits":
test "RLP accounts list sizes":
test_calcAccountsListSizes()
test "RLP proofs list sizes":
test_calcProofsListSizes()
test "RLP en/decode GetTrieNodes arguments list":
test_calcTrieNodeTranscode()
test "RLP en/decode BockBody arguments list":
test_calcBlockBodyTranscode()
proc snapRunner(noisy = true; specs: SnapSyncSpecs) {.used.} =
let
tailInfo = specs.tailBlocks.splitPath.tail.replace(".txt.gz","")
tailPath = specs.tailBlocks.findFilePath.value
# allFile = "mainnet332160.txt.gz".findFilePath.value
pivot = specs.pivotBlock
updateSize = specs.nItems
tmpDir = getTmpDir()
db = tmpDir.testDbs(specs.name, instances=1, true)
defer:
db.flushDbs()
var dsc = SnapRunDesc(
info: specs.snapDump.splitPath.tail.replace(".txt.gz",""),
file: specs.snapDump.findFilePath.value,
chn: CommonRef.new(
db.cdb[0],
networkId = specs.network,
pruneTrie = true,
params = specs.network.networkParams).newChain)
dsc.chn.com.initializeEmptyDb()
suite &"SyncSnap: verify \"{dsc.info}\" snapshot against full sync":
#test "Import block chain":
# if dsc.chn.db.toLegacyBackend.rocksStoreRef.isNil:
# skip()
# else:
# noisy.showElapsed("import block chain"):
# check dsc.chn.test_syncdbImportChainBlocks(allFile, pivot) == pivot
# noisy.showElapsed("dump db"):
# dsc[1].chn.db.toLegacyBackend.rocksStoreRef.dumpAllDb()
test "Import snapshot dump":
if dsc.chn.db.newKvt.backend.toRocksStoreRef.isNil:
skip()
else:
noisy.showElapsed(&"undump \"{dsc.info}\""):
let
(a,b,c) = dsc.chn.test_syncdbImportSnapshot(dsc.file, noisy=noisy)
aSum = a[0] + a[1]
bSum = b.foldl(a + b)
cSum = c.foldl(a + b)
noisy.say "***", "[", dsc.info, "]",
" undumped ", aSum + bSum + cSum, " snapshot records",
" (key32=", aSum, ",",
" key33=", bSum, ",",
" other=", cSum, ")" #, " b=",b.pp, " c=", c.pp
when false: # or true:
noisy.showElapsed(&"dump db \"{dsc.info}\""):
dsc.chn.db.toLegacyBackend.rocksStoreRef.dumpAllDb()
test &"Append block chain from \"{tailInfo}\"":
if dsc.chn.db.newKvt.backend.toRocksStoreRef.isNil:
skip()
else:
dsc.chn.test_syncdbAppendBlocks(tailPath,pivot,updateSize,noisy)
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc syncSnapMain*(noisy = defined(debug)) =
noisy.miscRunner()
noisy.accountsRunner(persistent=true)
noisy.accountsRunner(persistent=false)
noisy.inspectionRunner()
when isMainModule:
const
noisy = defined(debug) or true
#setTraceLevel()
setErrorLevel()
# Test constants, calculations etc.
when true: # and false:
noisy.miscRunner()
# Test database snapshot handling. The test samples ate too big for
# `nimbus-eth1` so they are available on `nimbus-eth1-blobs.`
when true: # or false
import ./test_sync_snap/snap_syncdb_xx
for n,sam in snapSyncdbList:
false.snapRunner(sam)
# This one uses dumps from the external `nimbus-eth1-blob` repo
when true and false:
import ./test_sync_snap/snap_other_xx
noisy.showElapsed("accountsRunner()"):
for n,sam in snapOtherList:
false.accountsRunner(persistent=true, sam)
noisy.showElapsed("inspectRunner()"):
for n,sam in snapOtherHealingList:
false.inspectionRunner(persistent=true, cascaded=false, sam)
# This one usues dumps from the external `nimbus-eth1-blob` repo
when true and false:
import ./test_sync_snap/snap_storage_xx
let knownFailures: KnownStorageFailure = @[
("storages5__34__41_dump#10", @[( 508, RootNodeMismatch)]),
]
noisy.showElapsed("storageRunner()"):
for n,sam in snapStorageList:
false.storagesRunner(persistent=true, sam, knownFailures)
# This one uses readily available dumps
when true: # and false:
false.inspectionRunner()
for n,sam in snapTestList:
false.accountsRunner(persistent=false, sam)
false.accountsRunner(persistent=true, sam)
for n,sam in snapTestStorageList:
false.accountsRunner(persistent=false, sam)
false.accountsRunner(persistent=true, sam)
false.storagesRunner(persistent=true, sam)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,124 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/os,
./test_types
const
snapStorage0* = AccountsSample(
name: "Storage0",
file: "storage" / "storages0___0___1_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage1* = AccountsSample(
name: "Storage1",
file: "storage" / "storages1___2___9_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage2* = AccountsSample(
name: "Storage2",
file: "storage" / "storages2__10__17_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage3* = AccountsSample(
name: "Storage3",
file: "storage" / "storages3__18__25_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage4* = AccountsSample(
name: "Storage4",
file: "storage" / "storages4__26__33_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage5* = AccountsSample(
name: "Storage5",
file: "storage" / "storages5__34__41_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage6* = AccountsSample(
name: "Storage6",
file: "storage" / "storages6__42__50_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage7* = AccountsSample(
name: "Storage7",
file: "storage" / "storages7__51__59_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage8* = AccountsSample(
name: "Storage8",
file: "storage" / "storages8__60__67_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorage9* = AccountsSample(
name: "Storage9",
file: "storage" / "storages9__68__75_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageA* = AccountsSample(
name: "StorageA",
file: "storage" / "storagesA__76__83_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageB* = AccountsSample(
name: "StorageB",
file: "storage" / "storagesB__84__92_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageC* = AccountsSample(
name: "StorageC",
file: "storage" / "storagesC__93_101_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageD* = AccountsSample(
name: "StorageD",
file: "storage" / "storagesD_102_109_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageE* = AccountsSample(
name: "StorageE",
file: "storage" / "storagesE_110_118_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageF* = AccountsSample(
name: "StorageF",
file: "storage" / "storagesF_119_126_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageG* = AccountsSample(
name: "StorageG",
file: "storage" / "storagesG_127_129_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapStorageList* = [
snapStorage0, snapStorage1, snapStorage2, snapStorage3, snapStorage4,
snapStorage5, snapStorage6, snapStorage7, snapStorage8, snapStorage9,
snapStorageA, snapStorageB, snapStorageC, snapStorageD, snapStorageE,
snapStorageF, snapStorageG]
# End

View File

@ -1,59 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
../../nimbus/core/chain,
./test_types
const
snapSyncdb0* = SnapSyncSpecs(
name: "main-snap",
network: MainNet,
snapDump: "mainnet=64.txt.gz",
tailBlocks: "mainnet332160.txt.gz",
pivotBlock: 64u64,
nItems: 100)
snapSyncdb1* = SnapSyncSpecs(
name: "main-snap",
network: MainNet,
snapDump: "mainnet=128.txt.gz",
tailBlocks: "mainnet332160.txt.gz",
pivotBlock: 128u64,
nItems: 500)
snapSyncdb2* = SnapSyncSpecs(
name: "main-snap",
network: MainNet,
snapDump: "mainnet=500.txt.gz",
tailBlocks: "mainnet332160.txt.gz",
pivotBlock: 500u64,
nItems: 500)
snapSyncdb3* = SnapSyncSpecs(
name: "main-snap",
network: MainNet,
snapDump: "mainnet=1000.txt.gz",
tailBlocks: "mainnet332160.txt.gz",
pivotBlock: 1000u64,
nItems: 500)
snapSyncdb4* = SnapSyncSpecs(
name: "main-snap",
network: MainNet,
snapDump: "mainnet=300000.txt.gz",
tailBlocks: "mainnet299905-332160.txt.gz",
pivotBlock: 300000u64,
nItems: 500)
snapSyncdbList* = [
snapSyncdb0, snapSyncdb1, snapSyncdb2, snapSyncdb3, snapSyncdb4]
# End

View File

@ -1,54 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
./test_types
const
snapTest0* = AccountsSample(
name: "sample0",
file: "sample0.txt.gz",
firstItem: 0,
lastItem: high(int))
snapTest1* = AccountsSample(
name: "test1",
file: snapTest0.file,
lastItem: 0) # Only the first `snap/1` reply from the sample
snapTest2* = AccountsSample(
name: "sample1",
file: "sample1.txt.gz",
lastItem: high(int))
snapTest3* = AccountsSample(
name: "test3",
file: snapTest2.file,
lastItem: 0) # Only the first `snap/1` reply from the sample
# Also for storage tests
snapTest4* = AccountsSample(
name: "sample2",
file: "sample2.txt.gz",
lastItem: high(int))
# Also for storage tests
snapTest5* = AccountsSample(
name: "sample3",
file: "sample3.txt.gz",
lastItem: high(int))
snapTestList* = [
snapTest0, snapTest1, snapTest2, snapTest3]
snapTestStorageList* = [
snapTest4, snapTest5]
# End

View File

@ -1,185 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
##
## This module provides test bodies for storing chain chain data directly
## rather than derive them by executing the EVM. Here, only accounts are
## considered.
##
## The `snap/1` protocol allows to fetch data for a certain account range. The
## following boundary conditions apply to the received data:
##
## * `State root`: All data are relaive to the same state root.
##
## * `Accounts`: There is an accounts interval sorted in strictly increasing
## order. The accounts are required consecutive, i.e. without holes in
## between although this cannot be verified immediately.
##
## * `Lower bound`: There is a start value which might be lower than the first
## account hash. There must be no other account between this start value and
## the first account (not verifyable yet.) For all practicat purposes, this
## value is mostly ignored but carried through.
##
## * `Proof`: There is a list of hexary nodes which allow to build a partial
## Patricia-Merkle trie starting at the state root with all the account
## leaves. There are enough nodes that show that there is no account before
## the least account (which is currently ignored.)
##
## There are test data samples on the sub-directory `test_sync_snap`. These
## are complete replies for some (admittedly snap) test requests from a `kiln#`
## session.
##
## There are three tests:
##
## 1. Run the `test_accountsImport()` function which is the all-in-one
## production function processoing the data described above. The test
## applies it sequentially to all argument data sets.
##
## 2. With `test_accountsMergeProofs()` individual items are tested which are
## hidden in test 1. while merging the sample data.
## * Load/accumulate `proofs` data from several samples
## * Load/accumulate accounts (needs some unique sorting)
## * Build/complete hexary trie for accounts
## * Save/bulk-store hexary trie on disk. If rocksdb is available, data
## are bulk stored via sst.
##
## 3. The function `test_accountsRevisitStoredItems()` traverses trie nodes
## stored earlier. The accounts from test 2 are re-visted using the account
## hash as access path.
##
import
std/algorithm,
eth/[common, p2p],
unittest2,
../../nimbus/sync/protocol,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[
hexary_debug, hexary_desc, hexary_error,
snapdb_accounts, snapdb_debug, snapdb_desc],
../replay/[pp, undump_accounts],
./test_helpers
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc flatten(list: openArray[seq[SnapProof]]): seq[SnapProof] =
for w in list:
result.add w
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_accountsImport*(
inList: seq[UndumpAccounts];
desc: SnapDbAccountsRef;
persistent: bool;
) =
## Import accounts
for n,w in inList:
check desc.importAccounts(w.base, w.data, persistent).isImportOk
proc test_accountsMergeProofs*(
inList: seq[UndumpAccounts];
desc: SnapDbAccountsRef;
accKeys: var seq[NodeKey];
) =
## Merge account proofs
# Load/accumulate data from several samples (needs some particular sort)
let
getFn = desc.getAccountFn
baseTag = inList.mapIt(it.base).sortMerge
packed = PackedAccountRange(
accounts: inList.mapIt(it.data.accounts).sortMerge,
proof: inList.mapIt(it.data.proof).flatten)
nAccounts = packed.accounts.len
# Merging intervals will produce gaps, so the result is expected OK but
# different from `.isImportOk`
check desc.importAccounts(baseTag, packed, true).isOk
# for debugging, make sure that state root ~ "$0"
desc.hexaDb.assignPrettyKeys(desc.root)
# Update list of accounts. There might be additional accounts in the set
# of proof nodes, typically before the `lowerBound` of each block. As
# there is a list of account ranges (that were merged for testing), one
# need to check for additional records only on either end of a range.
var keySet = packed.accounts.mapIt(it.accKey).toHashSet
for w in inList:
var key = desc.prevAccountsChainDbKey(w.data.accounts[0].accKey, getFn)
while key.isOk and key.value notin keySet:
keySet.incl key.value
let newKey = desc.prevAccountsChainDbKey(key.value, getFn)
check newKey != key
key = newKey
key = desc.nextAccountsChainDbKey(w.data.accounts[^1].accKey, getFn)
while key.isOk and key.value notin keySet:
keySet.incl key.value
let newKey = desc.nextAccountsChainDbKey(key.value, getFn)
check newKey != key
key = newKey
accKeys = toSeq(keySet).mapIt(it.to(NodeTag)).sorted(cmp)
.mapIt(it.to(NodeKey))
# Some database samples have a few more account keys which come in by the
# proof nodes.
check nAccounts <= accKeys.len
# Verify against table importer
let
xDb = HexaryTreeDbRef.init() # Can dump database with `.pp(xDb)`
rc = xDb.fromPersistent(desc.root, getFn, accKeys.len + 100)
check rc == Result[int,HexaryError].ok(accKeys.len)
proc test_accountsRevisitStoredItems*(
accKeys: seq[NodeKey];
desc: SnapDbAccountsRef;
noisy = false;
) =
## Revisit stored items on ChainDBRef
let
getFn = desc.getAccountFn
var
nextAccount = accKeys[0]
prevAccount: NodeKey
count = 0
for accKey in accKeys:
count.inc
let
pfx = $count & "#"
byChainDB = desc.getAccountsData(accKey, persistent=true)
byNextKey = desc.nextAccountsChainDbKey(accKey, getFn)
byPrevKey = desc.prevAccountsChainDbKey(accKey, getFn)
if byChainDB.isErr:
noisy.say "*** find",
"<", count, "> byChainDb=", byChainDB.pp
check byChainDB.isOk
# Check `next` traversal funcionality. If `byNextKey.isOk` fails, the
# `nextAccount` value is still the old one and will be different from
# the account in the next for-loop cycle (if any.)
check pfx & accKey.pp(false) == pfx & nextAccount.pp(false)
if byNextKey.isOk:
nextAccount = byNextKey.get(otherwise = NodeKey.default)
# Check `prev` traversal funcionality
if prevAccount != NodeKey.default:
check byPrevKey.isOk
if byPrevKey.isOk:
check pfx & byPrevKey.value.pp(false) == pfx & prevAccount.pp(false)
prevAccount = accKey
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,210 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[random, sequtils],
eth/common,
stew/byteutils,
unittest2,
../../nimbus/sync/[handlers, protocol],
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[hexary_desc, hexary_range],
./test_helpers
const
accObjRlpMin = 70 # min size of an encoded `Account()` obj
accObjRlpMax = 110 # max size of an encoded `Account()` obj
var
accBlobs: array[accObjRlpMax - accObjRlpMin + 1, Blob]
brNode = XNodeObj(kind: Branch)
nodeBlob: Blob
# ------------------------------------------------------------------------------
# Private helpers for `test_calcAccountsListSizes()`
# ------------------------------------------------------------------------------
proc `==`(a,b: ChainId): bool {.borrow.}
## helper for ` test_calcBlockBodyTranscode()`
# ------------------
proc randAccSize(r: var Rand): int =
## Print random account size
accObjRlpMin + r.rand(accBlobs.len - 1)
proc accBlob(n: int): Blob =
let inx = n - accObjRlpMin
if 0 <= inx and inx < accBlobs.len:
accBlobs[inx]
else:
@[]
proc initAccBlobs() =
if accBlobs[0].len == 0:
let ffAccLen = Account(
storageRoot: Hash256(data: high(UInt256).toBytesBE),
codeHash: Hash256(data: high(UInt256).toBytesBE),
nonce: high(uint64),
balance: high(UInt256)).encode.len
check accObjRlpMin == Account().encode.len
check accObjRlpMax == ffAccLen
# Initialise
for n in 0 ..< accBlobs.len:
accBlobs[n] = 5.byte.repeat(accObjRlpMin + n)
# Verify
for n in 0 .. (accObjRlpMax + 2):
if accObjRlpMin <= n and n <= accObjRlpMax:
check n == accBlob(n).len
else:
check 0 == accBlob(n).len
proc accRndChain(r: var Rand; nItems: int): seq[RangeLeaf] =
for n in 0 ..< nItems:
result.add RangeLeaf(data: accBlob(r.randAccSize()))
discard result[^1].key.init (n mod 256).byte.repeat(32)
proc accRndChain(seed: int; nItems: int): seq[RangeLeaf] =
var prng = initRand(seed)
prng.accRndChain(nItems)
# ------------------------------------------------------------------------------
# Private helpers for `test_calcProofsListSizes()`
# ------------------------------------------------------------------------------
proc initBranchNodeSample() =
if nodeBlob.len == 0:
for n in 0 .. 15:
brNode.bLink[n] = high(NodeTag).to(Blob)
nodeBlob = brNode.convertTo(Blob)
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_calcAccountsListSizes*() =
## Verify accounts size calculation for `hexaryRangeLeafsProof()`.
initAccBlobs()
let chain = 42.accRndChain(123)
# Emulate `hexaryRangeLeafsProof()` size calculations
var sizeAccu = 0
for n in 0 ..< chain.len:
let (pairLen,listLen) =
chain[n].data.len.hexaryRangeRlpLeafListSize(sizeAccu)
check listLen == chain[0 .. n].encode.len
sizeAccu += pairLen
proc test_calcProofsListSizes*() =
## RLP does not allow static check ..
initBranchNodeSample()
for n in [0, 1, 2, 126, 127]:
let
nodeSample = nodeBlob.to(SnapProof).repeat(n)
nodeBlobsEncoded = nodeSample.proofEncode
nodeBlobsDecoded = nodeBlobsEncoded.proofDecode
nodeBlobsHex = nodeBlobsEncoded.toHex
brNodesHex = brNode.repeat(n).convertTo(Blob).toHex
#echo "+++ ", n, " ", nodeBlobsEncoded.rlpFromBytes.inspect
#echo ">>> ", n, " ", nodeBlobsHex
#echo "<<< ", n, " ", brNodesHex
check nodeBlobsEncoded.len == n.hexaryRangeRlpNodesListSizeMax
check nodeBlobsDecoded == nodeSample
check nodeBlobsHex == brNodesHex
proc test_calcTrieNodeTranscode*() =
## RLP encode/decode a list of `SnapTriePaths` objects
let
raw = @[
# Accounts
SnapTriePaths(accPath: @[1.byte]),
SnapTriePaths(accPath: @[2.byte]),
SnapTriePaths(accPath: @[3.byte]),
# Storage slots
SnapTriePaths(
accPath: 4.u256.NodeTag.to(Blob),
slotPaths: @[@[4.byte,1.byte], @[4.byte,2.byte], @[4.byte,3.byte]]),
SnapTriePaths(
accPath: 5.u256.NodeTag.to(Blob),
slotPaths: @[@[5.byte,4.byte], @[5.byte,5.byte], @[5.byte,6.byte]]),
SnapTriePaths(
accPath: 6.u256.NodeTag.to(Blob),
slotPaths: @[@[6.byte,7.byte], @[6.byte,8.byte], @[6.byte,9.byte]]),
# Accounts contd.
SnapTriePaths(accPath: @[7.byte]),
SnapTriePaths(accPath: @[8.byte]),
SnapTriePaths(accPath: @[9.byte])]
cured = @[
@[@[1.byte]],
@[@[2.byte]],
@[@[3.byte]],
@[4.u256.NodeTag.to(Blob),
@[4.byte,1.byte], @[4.byte,2.byte], @[4.byte,3.byte]],
@[5.u256.NodeTag.to(Blob),
@[5.byte,4.byte], @[5.byte,5.byte], @[5.byte,6.byte]],
@[6.u256.NodeTag.to(Blob),
@[6.byte,7.byte], @[6.byte,8.byte], @[6.byte,9.byte]],
@[@[7.byte]],
@[@[8.byte]],
@[@[9.byte]]]
# cook it
proc append(w: var RlpWriter; p: SnapTriePaths) {.used.} =
w.snapAppend p
let cooked = rlp.encode raw
check cooked == rlp.encode cured
# reverse
proc read(rlp: var Rlp; T: type SnapTriePaths): T {.used.} =
rlp.snapRead T
check raw == rlp.decode(cooked, seq[SnapTriePaths])
check cured == rlp.decode(cooked, seq[seq[Blob]])
proc test_calcBlockBodyTranscode*() =
## RLP encode/decode a list of `BlockBody` objects. Note that tere is/was a
## problem in `eth/common/eth_types_rlp.append()` for `BlockBody` encoding.
let blkSeq = @[
BlockBody(
transactions: @[
Transaction(nonce: 1)]),
BlockBody(
uncles: @[
BlockHeader(nonce: [0x20u8,0,0,0,0,0,0,0])]),
BlockBody(),
BlockBody(
transactions: @[
Transaction(nonce: 3),
Transaction(nonce: 4)])]
let trBlkSeq = blkSeq.encode.decode(typeof blkSeq)
check trBlkSeq.len == blkSeq.len
for n in 0 ..< min(trBlkSeq.len, trBlkSeq.len):
check (n, trBlkSeq[n]) == (n, blkSeq[n])
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,78 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/times,
eth/common,
stew/interval_set,
results,
unittest2,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/hexary_error,
../../nimbus/sync/snap/worker/db/[hexary_desc, snapdb_accounts],
../replay/pp
type
KnownStorageFailure* = seq[(string,seq[(int,HexaryError)])]
## (<sample-name> & "#" <instance>, @[(<slot-id>, <error-symbol>)), ..])
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
template isImportOk*(rc: Result[SnapAccountsGaps,HexaryError]): bool =
if rc.isErr:
check rc.error == NothingSerious # prints an error if different
false
elif 0 < rc.value.innerGaps.len:
check rc.value.innerGaps == seq[NodeSpecs].default
false
else:
true
proc lastTwo*(a: openArray[string]): seq[string] =
if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq
proc isOk*(rc: ValidationResult): bool =
rc == ValidationResult.OK
# ------------------------------------------------------------------------------
# Public type conversions
# ------------------------------------------------------------------------------
proc to*(t: NodeTag; T: type Blob): T =
toSeq(t.UInt256.toBytesBE)
proc convertTo*(key: RepairKey; T: type NodeKey): T =
## Might be lossy, check before use (if at all, unless debugging)
(addr result.ByteArray32[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
# ------------------------------------------------------------------------------
# Public functions, pretty printing
# ------------------------------------------------------------------------------
proc pp*(rc: Result[Account,HexaryError]): string =
if rc.isErr: $rc.error else: rc.value.pp
proc pp*(a: NodeKey; collapse = true): string =
a.to(Hash256).pp(collapse)
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
echo "*** ", pfx
elif 0 < pfx.len and pfx[^1] != ' ':
echo pfx, " ", args.toSeq.join
else:
echo pfx, args.toSeq.join
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,230 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/sequtils,
eth/[common, p2p],
unittest2,
../../nimbus/db/core_db,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[
hexary_desc, hexary_inspect, hexary_paths, snapdb_accounts, snapdb_desc],
../replay/[pp, undump_accounts],
./test_helpers
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_inspectSingleAccountsMemDb*(
inList: seq[seq[UndumpAccounts]];
memBase: SnapDbRef;
singleStats: var seq[(int,TrieNodeStat)];
) =
## Fingerprinting single accounts lists for in-memory-db (modifies
## `singleStats`)
for n,accList in inList:
# Separate storage
let
root = accList[0].root
rootKey = root.to(NodeKey)
desc = SnapDbAccountsRef.init(memBase, root, Peer())
for w in accList:
check desc.importAccounts(w.base, w.data, persistent=false).isImportOk
let stats = desc.hexaDb.hexaryInspectTrie(rootKey)
check not stats.stopped
let
dangling = stats.dangling.mapIt(it.partialPath)
keys = dangling.hexaryPathNodeKeys(
rootKey, desc.hexaDb, missingOk=true)
check dangling.len == keys.len
singleStats.add (desc.hexaDb.tab.len,stats)
# Verify piecemeal approach for `hexaryInspectTrie()` ...
var
ctx = TrieNodeStatCtxRef()
piecemeal: HashSet[Blob]
while not ctx.isNil:
let stat2 = desc.hexaDb.hexaryInspectTrie(
rootKey, resumeCtx=ctx, suspendAfter=128)
check not stat2.stopped
ctx = stat2.resumeCtx
piecemeal.incl stat2.dangling.mapIt(it.partialPath).toHashSet
# Must match earlier all-in-one result
check dangling.len == piecemeal.len
check dangling.toHashSet == piecemeal
proc test_inspectSingleAccountsPersistent*(
inList: seq[seq[UndumpAccounts]];
dbSlotCb: proc(n: int): SnapDbRef;
singleStats: seq[(int,TrieNodeStat)];
) =
## Fingerprinting single accounts listsfor persistent db"
for n,accList in inList:
let
root = accList[0].root
rootKey = root.to(NodeKey)
dbBase = n.dbSlotCb
if dbBase.isNil:
break
# Separate storage on persistent DB (leaving first db slot empty)
let desc = SnapDbAccountsRef.init(dbBase, root, Peer())
for w in accList:
check desc.importAccounts(w.base,w.data, persistent=true).isImportOk
let stats = desc.getAccountFn.hexaryInspectTrie(rootKey)
check not stats.stopped
let
dangling = stats.dangling.mapIt(it.partialPath)
keys = dangling.hexaryPathNodeKeys(
rootKey, desc.hexaDb, missingOk=true)
check dangling.len == keys.len
# Must be the same as the in-memory fingerprint
let ssn1 = singleStats[n][1].dangling.mapIt(it.partialPath)
check ssn1.toHashSet == dangling.toHashSet
# Verify piecemeal approach for `hexaryInspectTrie()` ...
var
ctx = TrieNodeStatCtxRef()
piecemeal: HashSet[Blob]
while not ctx.isNil:
let stat2 = desc.getAccountFn.hexaryInspectTrie(
rootKey, resumeCtx=ctx, suspendAfter=128)
check not stat2.stopped
ctx = stat2.resumeCtx
piecemeal.incl stat2.dangling.mapIt(it.partialPath).toHashSet
# Must match earlier all-in-one result
check dangling.len == piecemeal.len
check dangling.toHashSet == piecemeal
proc test_inspectAccountsInMemDb*(
inList: seq[seq[UndumpAccounts]];
memBase: SnapDbRef;
accuStats: var seq[(int,TrieNodeStat)];
) =
## Fingerprinting accumulated accounts for in-memory-db (updates `accuStats`)
let memDesc = SnapDbAccountsRef.init(memBase, Hash256(), Peer())
for n,accList in inList:
# Accumulated storage
let
root = accList[0].root
rootKey = root.to(NodeKey)
desc = memDesc.dup(root,Peer())
for w in accList:
check desc.importAccounts(w.base, w.data, persistent=false).isImportOk
let stats = desc.hexaDb.hexaryInspectTrie(rootKey)
check not stats.stopped
let
dangling = stats.dangling.mapIt(it.partialPath)
keys = dangling.hexaryPathNodeKeys(
rootKey, desc.hexaDb, missingOk=true)
check dangling.len == keys.len
accuStats.add (desc.hexaDb.tab.len, stats)
proc test_inspectAccountsPersistent*(
inList: seq[seq[UndumpAccounts]];
cdb: CoreDbRef;
accuStats: seq[(int,TrieNodeStat)];
) =
## Fingerprinting accumulated accounts for persistent db
let
perBase = SnapDbRef.init(cdb)
perDesc = SnapDbAccountsRef.init(perBase, Hash256(), Peer())
for n,accList in inList:
# Accumulated storage on persistent DB (using first db slot)
let
root = accList[0].root
rootKey = root.to(NodeKey)
desc = perDesc.dup(root,Peer())
for w in accList:
check desc.importAccounts(w.base, w.data, persistent=true).isImportOk
let stats = desc.getAccountFn.hexaryInspectTrie(rootKey)
check not stats.stopped
let
dangling = stats.dangling.mapIt(it.partialPath)
keys = dangling.hexaryPathNodeKeys(
rootKey, desc.hexaDb, missingOk=true)
check dangling.len == keys.len
check accuStats[n][1] == stats
proc test_inspectCascadedMemDb*(
inList: seq[seq[UndumpAccounts]];
) =
## Cascaded fingerprinting accounts for in-memory-db
let
cscBase = SnapDbRef.init(newCoreDbRef LegacyDbMemory)
cscDesc = SnapDbAccountsRef.init(cscBase, Hash256(), Peer())
var
cscStep: Table[NodeKey,(int,seq[Blob])]
for n,accList in inList:
# Accumulated storage
let
root = accList[0].root
rootKey = root.to(NodeKey)
desc = cscDesc.dup(root,Peer())
for w in accList:
check desc.importAccounts(w.base, w.data, persistent=false).isImportOk
if cscStep.hasKeyOrPut(rootKey, (1, seq[Blob].default)):
cscStep[rootKey][0].inc
let
stat0 = desc.hexaDb.hexaryInspectTrie(rootKey)
stats = desc.hexaDb.hexaryInspectTrie(rootKey, cscStep[rootKey][1])
check not stat0.stopped
check not stats.stopped
let
accumulated = stat0.dangling.mapIt(it.partialPath).toHashSet
cascaded = stats.dangling.mapIt(it.partialPath).toHashSet
check accumulated == cascaded
# Make sure that there are no trivial cases
let trivialCases = toSeq(cscStep.values).filterIt(it[0] <= 1).len
check trivialCases == 0
proc test_inspectCascadedPersistent*(
inList: seq[seq[UndumpAccounts]];
cdb: CoreDbRef;
) =
## Cascaded fingerprinting accounts for persistent db
let
cscBase = SnapDbRef.init(cdb)
cscDesc = SnapDbAccountsRef.init(cscBase, Hash256(), Peer())
var
cscStep: Table[NodeKey,(int,seq[Blob])]
for n,accList in inList:
# Accumulated storage
let
root = accList[0].root
rootKey = root.to(NodeKey)
desc = cscDesc.dup(root, Peer())
for w in accList:
check desc.importAccounts(w.base, w.data, persistent=true).isImportOk
if cscStep.hasKeyOrPut(rootKey, (1, seq[Blob].default)):
cscStep[rootKey][0].inc
let
stat0 = desc.getAccountFn.hexaryInspectTrie(rootKey)
stats = desc.getAccountFn.hexaryInspectTrie(rootKey, cscStep[rootKey][1])
check not stat0.stopped
check not stats.stopped
let
accumulated = stat0.dangling.mapIt(it.partialPath).toHashSet
cascaded = stats.dangling.mapIt(it.partialPath).toHashSet
check accumulated == cascaded
# Make sure that there are no trivial cases
let trivialCases = toSeq(cscStep.values).filterIt(it[0] <= 1).len
check trivialCases == 0
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,565 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[sequtils, sets, strformat, strutils],
eth/[common, p2p, trie/nibbles],
stew/[byteutils, interval_set],
results,
unittest2,
../../nimbus/sync/[handlers, protocol, types],
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[
hexary_debug, hexary_desc, hexary_envelope, hexary_error,
hexary_interpolate, hexary_nearby, hexary_paths, hexary_range,
snapdb_accounts, snapdb_debug, snapdb_desc],
../replay/[pp, undump_accounts],
./test_helpers
const
cmaNlSp0 = ",\n" & repeat(" ",12)
cmaNlSpc = ",\n" & repeat(" ",13)
# ------------------------------------------------------------------------------
# Private functions, pretty printing
# ------------------------------------------------------------------------------
proc ppNodeKeys(a: openArray[SnapProof]; dbg = HexaryTreeDbRef(nil)): string =
result = "["
if dbg.isNil:
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(collapse=true)).join(",")
else:
result &= a.mapIt(it.to(Blob).digestTo(NodeKey).pp(dbg)).join(",")
result &= "]"
proc ppHexPath(p: RPath|XPath; dbg = HexaryTreeDbRef(nil)): string =
if dbg.isNil:
"*pretty printing disabled*"
else:
p.pp(dbg)
proc pp(a: NodeTag; collapse = true): string =
a.to(NodeKey).pp(collapse)
proc pp(iv: NodeTagRange; collapse = false): string =
"(" & iv.minPt.pp(collapse) & "," & iv.maxPt.pp(collapse) & ")"
# ------------------------------------------------------------------------------
# Private functionsto(Blob)
# ------------------------------------------------------------------------------
proc print_data(
pfx: Blob;
pfxLen: int;
ivMin: NibblesSeq;
firstTag: NodeTag;
lastTag: NodeTag;
ivMax: NibblesSeq;
gaps: NodeTagRangeSet;
gapPaths: seq[NodeTagRange];
info: string;
) =
echo ">>>", info, " pfxMax=", pfxLen,
"\n pfx=", pfx, "/", ivMin.slice(0,pfxLen).hexPrefixEncode,
"\n ivMin=", ivMin,
"\n firstTag=", firstTag,
"\n lastTag=", lastTag,
"\n ivMax=", ivMax,
"\n gaps=@[", toSeq(gaps.increasing)
.mapIt(&"[{it.minPt}{cmaNlSpc}{it.maxPt}]")
.join(cmaNlSp0), "]",
"\n gapPaths=@[", gapPaths
.mapIt(&"[{it.minPt}{cmaNlSpc}{it.maxPt}]")
.join(cmaNlSp0), "]"
proc print_data(
pfx: Blob;
qfx: seq[NodeSpecs];
iv: NodeTagRange;
firstTag: NodeTag;
lastTag: NodeTag;
rootKey: NodeKey;
db: HexaryTreeDbRef|HexaryGetFn;
dbg: HexaryTreeDbRef;
) =
echo "***",
"\n qfx=@[", qfx
.mapIt(&"({it.partialPath.toHex},{it.nodeKey.pp(dbg)})")
.join(cmaNlSpc), "]",
"\n ivMin=", iv.minPt,
"\n ", iv.minPt.hexaryPath(rootKey,db).pp(dbg), "\n",
"\n firstTag=", firstTag,
"\n ", firstTag.hexaryPath(rootKey,db).pp(dbg), "\n",
"\n lastTag=", lastTag,
"\n ", lastTag.hexaryPath(rootKey,db).pp(dbg), "\n",
"\n ivMax=", iv.maxPt,
"\n ", iv.maxPt.hexaryPath(rootKey,db).pp(dbg), "\n",
"\n pfxMax=", pfx.hexaryEnvelope.maxPt,
"\n ", pfx.hexaryEnvelope.maxPt.hexaryPath(rootKey,db).pp(dbg)
proc printCompareRightLeafs(
rootKey: NodeKey;
baseTag: NodeTag;
accounts: seq[PackedAccount];
leafs: seq[RangeLeaf];
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg: HexaryTreeDbRef; ## Debugging env
) =
let
noisy = not dbg.isNil
var
top = 0
nMax = min(accounts.len, leafs.len)
step = nMax div 2
while top < nMax:
while 1 < step and accounts[top+step].accKey != leafs[top+step].key:
#noisy.say "***", "i=", top+step, " fail"
step = max(1, step div 2)
if accounts[top+step].accKey == leafs[top+step].key:
top += step
step = max(1, step div 2)
noisy.say "***", "i=", top, " step=", step, " ok"
continue
let start = top
top = nMax
for i in start ..< top:
if accounts[i].accKey == leafs[i].key:
noisy.say "***", "i=", i, " skip, ok"
continue
# Diagnostics and return
check (i,accounts[i].accKey) == (i,leafs[i].key)
let
lfsKey = leafs[i].key
accKey = accounts[i].accKey
prdKey = if 0 < i: accounts[i-1].accKey else: baseTag.to(NodeKey)
nxtTag = if 0 < i: prdKey.to(NodeTag) + 1.u256 else: baseTag
nxtPath = nxtTag.hexaryPath(rootKey,db)
rightRc = nxtPath.hexaryNearbyRight(db)
if rightRc.isOk:
check lfsKey == rightRc.value.getPartialPath.convertTo(NodeKey)
else:
check rightRc.error == HexaryError(0) # force error printing
if noisy: true.say "\n***", "i=", i, "/", accounts.len,
"\n\n prdKey=", prdKey,
"\n ", prdKey.hexaryPath(rootKey,db).pp(dbg),
"\n\n nxtKey=", nxtTag,
"\n ", nxtPath.pp(dbg),
"\n\n accKey=", accKey,
"\n ", accKey.hexaryPath(rootKey,db).pp(dbg),
"\n\n lfsKey=", lfsKey,
"\n ", lfsKey.hexaryPath(rootKey,db).pp(dbg),
"\n"
return
proc printCompareLeftNearby(
rootKey: NodeKey;
leftKey: NodeKey;
rightKey: NodeKey;
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg: HexaryTreeDbRef; ## Debugging env
) =
let
noisy = not dbg.isNil
rightPath = rightKey.hexaryPath(rootKey,db)
toLeftRc = rightPath.hexaryNearbyLeft(db)
var
toLeftKey: NodeKey
if toLeftRc.isErr:
check toLeftRc.error == HexaryError(0) # force error printing
else:
toLeftKey = toLeftRc.value.getPartialPath.convertTo(NodeKey)
if toLeftKey == leftKey:
return
if noisy: true.say "\n***",
" rightKey=", rightKey,
"\n ", rightKey.hexaryPath(rootKey,db).pp(dbg),
"\n\n leftKey=", leftKey,
"\n ", leftKey.hexaryPath(rootKey,db).pp(dbg),
"\n\n toLeftKey=", toLeftKey,
"\n ", toLeftKey.hexaryPath(rootKey,db).pp(dbg),
"\n"
proc verifyRangeProof(
rootKey: NodeKey;
baseTag: NodeTag;
leafs: seq[RangeLeaf];
proof: seq[SnapProof];
dbg = HexaryTreeDbRef(nil);
leafBeforeBase = true;
): Result[void,HexaryError] =
## Re-build temporary database and prove or disprove
let
noisy = dbg.isNil.not
xDb = HexaryTreeDbRef()
if not dbg.isNil:
xDb.keyPp = dbg.keyPp
result = ok()
block verify:
let leaf0Tag = leafs[0].key.to(NodeTag)
# Import proof nodes
result = xDb.mergeProofs(rootKey, proof)
if result.isErr:
check result == Result[void,HexaryError].ok()
break verify
# Build tree
var lItems = leafs.mapIt(RLeafSpecs(
pathTag: it.key.to(NodeTag),
payload: it.data))
result = xDb.hexaryInterpolate(rootKey, lItems)
if result.isErr:
check result == Result[void,HexaryError].ok()
break verify
# Left proof
result = xDb.verifyLowerBound(rootKey, baseTag, leaf0Tag)
if result.isErr:
check result == Result[void,HexaryError].ok()
break verify
# Inflated interval around first point
block:
let iv0 = xDb.hexaryRangeInflate(rootKey, leaf0Tag)
# Verify left end
if baseTag == low(NodeTag):
if iv0.minPt != low(NodeTag):
check iv0.minPt == low(NodeTag)
result = Result[void,HexaryError].err(NearbyFailed)
break verify
elif leafBeforeBase:
check iv0.minPt < baseTag
# Verify right end
if 1 < leafs.len:
if iv0.maxPt + 1.u256 != leafs[1].key.to(NodeTag):
check iv0.maxPt + 1.u256 == leafs[1].key.to(NodeTag)
result = Result[void,HexaryError].err(NearbyFailed)
break verify
# Inflated interval around last point
if 1 < leafs.len:
let
uPt = leafs[^1].key.to(NodeTag)
ivX = xDb.hexaryRangeInflate(rootKey, uPt)
# Verify left end
if leafs[^2].key.to(NodeTag) != ivX.minPt - 1.u256:
check leafs[^2].key.to(NodeTag) == ivX.minPt - 1.u256
result = Result[void,HexaryError].err(NearbyFailed)
break verify
# Verify right end
if uPt < high(NodeTag):
let
uPt1 = uPt + 1.u256
rx = uPt1.hexaryPath(rootKey,xDb).hexaryNearbyRightMissing(xDb)
ry = uPt1.hexaryNearbyRight(rootKey, xDb)
if rx.isErr:
if ry.isOk:
check rx.isErr and ry.isErr
result = Result[void,HexaryError].err(NearbyFailed)
break verify
elif rx.value != ry.isErr:
check rx.value == ry.isErr
result = Result[void,HexaryError].err(NearbyFailed)
break verify
if rx.get(otherwise=false):
if ivX.minPt + 1.u256 != high(NodeTag):
check ivX.minPt + 1.u256 == high(NodeTag)
result = Result[void,HexaryError].err(NearbyFailed)
break verify
return ok()
if noisy:
true.say "\n***", "error=", result.error,
#"\n",
#"\n unrefs=[", unrefs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
#"\n refs=[", refs.toSeq.mapIt(it.pp(dbg)).join(","), "]",
"\n\n proof=", proof.ppNodeKeys(dbg),
"\n\n first=", leafs[0].key,
"\n ", leafs[0].key.hexaryPath(rootKey,xDb).pp(dbg),
"\n\n last=", leafs[^1].key,
"\n ", leafs[^1].key.hexaryPath(rootKey,xDb).pp(dbg),
"\n\n database dump",
"\n ", xDb.pp(rootKey),
"\n"
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_NodeRangeDecompose*(
accKeys: seq[NodeKey]; ## Accounts key range
root: Hash256; ## State root
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg: HexaryTreeDbRef; ## Debugging env
) =
## Testing body for `hexary_nearby` and `hexary_envelope` tests
# The base data from above cannot be relied upon as there might be
# stray account nodes in the proof *before* the left boundary.
doAssert 2 < accKeys.len
let
rootKey = root.to(NodeKey)
baseTag = accKeys[0].to(NodeTag) + 1.u256
firstTag = baseTag.hexaryNearbyRight(rootKey, db).get(
otherwise = low(NodeTag))
lastTag = accKeys[^2].to(NodeTag)
topTag = accKeys[^1].to(NodeTag) - 1.u256
# Verify set up
check baseTag < firstTag
check firstTag < lastTag
check lastTag < topTag
# Verify right boundary proof function (left boundary is
# correct by definition of `firstTag`.)
check lastTag == topTag.hexaryNearbyLeft(rootKey, db).get(
otherwise = high(NodeTag))
# Construct test range
let
iv = NodeTagRange.new(baseTag, topTag)
ivMin = iv.minPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
ivMax = iv.maxPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
pfxLen = ivMin.sharedPrefixLen ivMax
# Use some overlapping prefixes. Note that a prefix must refer to
# an existing node
for n in 0 .. pfxLen:
let
pfx = ivMin.slice(0, pfxLen - n).hexPrefixEncode
qfx = block:
let rc = pfx.hexaryEnvelopeDecompose(rootKey, iv, db)
check rc.isOk
if rc.isOk:
rc.value
else:
seq[NodeSpecs].default
# Assemble possible gaps in decomposed envelope `qfx`
let gaps = NodeTagRangeSet.init()
# Start with full envelope and remove decomposed enveloped from `qfx`
discard gaps.merge pfx.hexaryEnvelope
# There are no node points between `iv.minPt` (aka base) and the first
# account `firstTag` and beween `lastTag` and `iv.maxPt`. So only the
# interval `[firstTag,lastTag]` is to be fully covered by `gaps`.
block:
let iw = NodeTagRange.new(firstTag, lastTag)
check iw.len == gaps.reduce iw
for w in qfx:
# The envelope of `w` must be fully contained in `gaps`
let iw = w.partialPath.hexaryEnvelope
check iw.len == gaps.reduce iw
# Remove that space between the start of `iv` and the first account
# key (if any.).
if iv.minPt < firstTag:
discard gaps.reduce(iv.minPt, firstTag-1.u256)
# There are no node points between `lastTag` and `iv.maxPt`
if lastTag < iv.maxPt:
discard gaps.reduce(lastTag+1.u256, iv.maxPt)
# All gaps must be empty intervals
var gapPaths: seq[NodeTagRange]
for w in gaps.increasing:
let rc = w.minPt.hexaryPath(rootKey,db).hexaryNearbyRight(db)
if rc.isOk:
var firstTag = rc.value.getPartialPath.convertTo(NodeTag)
# The point `firstTag` might be zero if there is a missing node
# in between to advance to the next key.
if w.minPt <= firstTag:
# The interval `w` starts before the first interval
if firstTag <= w.maxPt:
# Make sure that there is no leaf node in the range
gapPaths.add w
continue
# Some sub-tries might not exists which leads to gaps
let
wMin = w.minPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
wMax = w.maxPt.to(NodeKey).ByteArray32.toSeq.initNibbleRange
nPfx = wMin.sharedPrefixLen wMax
for nibble in wMin[nPfx] .. wMax[nPfx]:
let wPfy = wMin.slice(0,nPfx) & @[nibble].initNibbleRange.slice(1)
if wPfy.hexaryPathNodeKey(rootKey, db, missingOk=true).isOk:
gapPaths.add wPfy.hexPrefixEncode.hexaryEnvelope
# Verify :)
check gapPaths == seq[NodeTagRange].default
when false: # or true:
print_data(
pfx, pfxLen, ivMin, firstTag, lastTag, ivMax, gaps, gapPaths, "n=" & $n)
print_data(
pfx, qfx, iv, firstTag, lastTag, rootKey, db, dbg)
if true: quit()
proc test_NodeRangeProof*(
inLst: seq[UndumpAccounts];
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg = HexaryTreeDbRef(nil); ## Debugging env
) =
## Partition range and provide proofs suitable for `GetAccountRange` message
## from `snap/1` protocol.
let
rootKey = inLst[0].root.to(NodeKey)
noisy = not dbg.isNil
maxLen = high(int) # set it lower for debugging (eg. 5 for a small smaple)
# Assuming the `inLst` entries have been stored in the DB already
for n,w in inLst:
doAssert 1 < w.data.accounts.len
let
first = w.data.accounts[0].accKey.to(NodeTag)
delta = (w.data.accounts[1].accKey.to(NodeTag) - first) div 2
# Use the middle of the first two points as base unless w.base is zero.
# This is needed as the range extractor needs the node before the `base`
# (if ateher is any) in order to assemble the proof. But this node might
# not be present in the partial database.
(base, start) = if w.base == low(NodeTag): (w.base, 0)
else: (first + delta, 1)
# Assemble accounts list starting at the second item
accounts = w.data.accounts[start ..< min(w.data.accounts.len,maxLen)]
iv = NodeTagRange.new(base, accounts[^1].accKey.to(NodeTag))
rc = db.hexaryRangeLeafsProof(rootKey, iv)
check rc.isOk
if rc.isErr:
return
# Run over sub-samples of the given account range
var subCount = 0
for cutOff in {0, 2, 5, 10, 16, 23, 77}:
# Take sub-samples but not too small
if 0 < cutOff and rc.value.leafs.len < cutOff + 5:
break # remaining cases ignored
subCount.inc
let
leafs = rc.value.leafs[0 ..< rc.value.leafs.len - cutOff]
leafsRlpLen = leafs.encode.len
var
proof: seq[SnapProof]
# Calculate proof
if cutOff == 0:
if leafs.len != accounts.len or accounts[^1].accKey != leafs[^1].key:
noisy.say "***", "n=", n, " something went wrong .."
check (n,leafs.len) == (n,accounts.len)
rootKey.printCompareRightLeafs(base, accounts, leafs, db, dbg)
return
proof = rc.value.proof
# Some sizes to verify (full data list)
check rc.value.proofSize == proof.proofEncode.len
check rc.value.leafsSize == leafsRlpLen
else:
# Make sure that the size calculation delivers the expected number
# of entries.
let rx = db.hexaryRangeLeafsProof(rootKey, iv, leafsRlpLen + 1)
check rx.isOk
if rx.isErr:
return
check rx.value.leafs.len == leafs.len
# Some size to verify (truncated data list)
check rx.value.proofSize == rx.value.proof.proofEncode.len
# Re-adjust proof
proof = db.hexaryRangeLeafsProof(rootKey, rx.value).proof
# Import proof nodes and build trie
block:
var rx = rootKey.verifyRangeProof(base, leafs, proof)
if rx.isErr:
rx = rootKey.verifyRangeProof(base, leafs, proof, dbg)
let
baseNbls = iv.minPt.to(NodeKey).to(NibblesSeq)
lastNbls = iv.maxPt.to(NodeKey).to(NibblesSeq)
nPfxNblsLen = baseNbls.sharedPrefixLen lastNbls
pfxNbls = baseNbls.slice(0, nPfxNblsLen)
noisy.say "***", "n=", n,
" cutOff=", cutOff,
" leafs=", leafs.len,
" proof=", proof.ppNodeKeys(dbg),
"\n\n ",
" base=", iv.minPt,
"\n ", iv.minPt.hexaryPath(rootKey,db).ppHexPath(dbg),
"\n\n ",
" pfx=", pfxNbls,
" nPfx=", nPfxNblsLen,
"\n ", pfxNbls.hexaryPath(rootKey,db).ppHexPath(dbg),
"\n"
check rx == typeof(rx).ok()
return
noisy.say "***", "n=", n,
" leafs=", rc.value.leafs.len,
" proof=", rc.value.proof.len, "/", w.data.proof.len,
" sub-samples=", subCount
proc test_NodeRangeLeftBoundary*(
inLst: seq[UndumpAccounts];
db: HexaryTreeDbRef|HexaryGetFn; ## Database abstraction
dbg = HexaryTreeDbRef(nil); ## Debugging env
) =
## Verify left side boundary checks
let
rootKey = inLst[0].root.to(NodeKey)
noisy {.used.} = not dbg.isNil
# Assuming the `inLst` entries have been stored in the DB already
for n,w in inLst:
let accounts = w.data.accounts
for i in 1 ..< accounts.len:
let
leftKey = accounts[i-1].accKey
rightKey = (accounts[i].accKey.to(NodeTag) - 1.u256).to(NodeKey)
toLeftRc = rightKey.hexaryPath(rootKey,db).hexaryNearbyLeft(db)
if toLeftRc.isErr:
check toLeftRc.error == HexaryError(0) # force error printing
return
let toLeftKey = toLeftRc.value.getPartialPath.convertTo(NodeKey)
if leftKey != toLeftKey:
let j = i-1
check (n, j, leftKey) == (n, j, toLeftKey)
rootKey.printCompareLeftNearby(leftKey, rightKey, db, dbg)
return
# noisy.say "***", "n=", n, " accounts=", accounts.len
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,92 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
eth/[common, p2p],
unittest2,
../../nimbus/db/core_db,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[snapdb_desc, snapdb_pivot]
when defined(windows):
import std/os
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_pivotStoreRead*(
accKeys: seq[NodeKey];
cdb: CoreDbRef;
) =
## Storing/retrieving items on persistent pivot/checkpoint registry
let
dbBase = SnapDbRef.init(cdb)
processed = @[(1.to(NodeTag),2.to(NodeTag)),
(4.to(NodeTag),5.to(NodeTag)),
(6.to(NodeTag),7.to(NodeTag))]
slotAccounts = seq[NodeKey].default
for n in 0 ..< accKeys.len:
let w = accKeys[n]
check dbBase.pivotSaveDB(
SnapDbPivotRegistry(
header: BlockHeader(stateRoot: w.to(Hash256)),
nAccounts: n.uint64,
nSlotLists: n.uint64,
processed: processed,
slotAccounts: slotAccounts)).isOk
when defined(windows):
# There might be a race condition on Windows (seen on qemu/win7)
sleep(50)
# verify latest state root
block:
let rc = dbBase.pivotRecoverDB()
check rc.isOk
if rc.isOk:
check rc.value.nAccounts == n.uint64
check rc.value.nSlotLists == n.uint64
check rc.value.processed == processed
# Stop gossiping (happens whith corrupted database)
if rc.value.nAccounts != n.uint64 or
rc.value.nSlotLists != n.uint64 or
rc.value.processed != processed:
return
for n in 0 ..< accKeys.len:
let w = accKeys[n]
block:
let rc = dbBase.pivotRecoverDB(w)
check rc.isOk
if rc.isOk:
check rc.value.nAccounts == n.uint64
check rc.value.nSlotLists == n.uint64
# Update record in place
check dbBase.pivotSaveDB(
SnapDbPivotRegistry(
header: BlockHeader(stateRoot: w.to(Hash256)),
nAccounts: n.uint64,
nSlotLists: 0,
processed: @[],
slotAccounts: @[])).isOk
when defined(windows):
# There might be a race condition on Windows (seen on qemu/win7)
sleep(50)
block:
let rc = dbBase.pivotRecoverDB(w)
check rc.isOk
if rc.isOk:
check rc.value.nAccounts == n.uint64
check rc.value.nSlotLists == 0
check rc.value.processed == seq[(NodeTag,NodeTag)].default
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,106 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[sequtils, tables],
eth/[common, p2p],
unittest2,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[
hexary_desc, hexary_error, hexary_inspect,
snapdb_accounts, snapdb_desc, snapdb_storage_slots],
../replay/[pp, undump_accounts, undump_storages],
./test_helpers
let
# Forces `check()` to print the error (as opposed when using `isOk()`)
OkStoDb = Result[void,seq[(int,HexaryError)]].ok()
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc toStoDbRc(r: seq[HexaryNodeReport]): Result[void,seq[(int,HexaryError)]]=
## Kludge: map error report to (older version) return code
if r.len != 0:
return err(r.mapIt((it.slot.get(otherwise = -1),it.error)))
ok()
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_storageAccountsImport*(
inList: seq[UndumpAccounts];
dbBase: SnapDbRef;
persistent: bool;
) =
## Import and merge accounts lists
let
root = inList[0].root
for w in inList:
let desc = SnapDbAccountsRef.init(dbBase, root, Peer())
check desc.importAccounts(w.base, w.data, persistent).isImportOk
proc test_storageSlotsImport*(
inList: seq[UndumpStorages];
dbBase: SnapDbRef;
persistent: bool;
ignore: KnownStorageFailure;
idPfx: string;
) =
## Import and merge storages lists
let
skipEntry = ignore.toTable
dbDesc = SnapDbStorageSlotsRef.init(
dbBase, NodeKey.default, Hash256(), Peer())
for n,w in inList:
let
testId = idPfx & "#" & $n
expRc = if skipEntry.hasKey(testId):
Result[void,seq[(int,HexaryError)]].err(skipEntry[testId])
else:
OkStoDb
check dbDesc.importStorageSlots(w.data, persistent).toStoDbRc == expRc
proc test_storageSlotsTries*(
inList: seq[UndumpStorages];
dbBase: SnapDbRef;
persistent: bool;
ignore: KnownStorageFailure;
idPfx: string;
) =
## Inspecting imported storages lists sub-tries
let
skipEntry = ignore.toTable
for n,w in inList:
let
testId = idPfx & "#" & $n
errInx = if skipEntry.hasKey(testId): skipEntry[testId][0][0]
else: high(int)
for m in 0 ..< w.data.storages.len:
let
accKey = w.data.storages[m].account.accKey
root = w.data.storages[m].account.storageRoot
dbDesc = SnapDbStorageSlotsRef.init(dbBase, accKey, root, Peer())
rc = dbDesc.inspectStorageSlotsTrie(persistent=persistent)
if m == errInx:
check rc == Result[TrieNodeStat,HexaryError].err(TrieIsEmpty)
else:
check rc.isOk # ok => level > 0 and not stopped
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,215 +0,0 @@
# Nimbus
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Snap sync components tester and TDD environment
import
std/[sequtils, strutils],
eth/[common, trie/db],
stew/byteutils,
unittest2,
../../nimbus/common as nimbus_common,
../../nimbus/core/chain,
../../nimbus/db/storage_types,
../../nimbus/sync/snap/worker/db/snapdb_desc,
../replay/[pp, undump_blocks, undump_kvp],
./test_helpers
type
UndumpDBKeySubType* = array[DBKeyKind.high.ord+2,int]
proc pp*(a: UndumpDBKeySubType): string
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc pp(a: ((int,int),UndumpDBKeySubType,UndumpDBKeySubType)): string =
"([" & $a[0][0] & "," & $a[0][1] & "]," & a[1].pp & "," & a[2].pp & ")"
proc pairJoin[H,B](a: openArray[(seq[H],seq[B])]): (seq[H],seq[B]) =
for w in a:
result[0] &= w[0]
result[1] &= w[1]
proc pairSplit[H,B](a: (seq[H],seq[B]); start,size: int): seq[(seq[H],seq[B])] =
let
a0Len = a[0].len
a1Len = a[1].len
minLen = min(a0Len,a1Len)
var n = start
while n < minLen:
let top = min(n + size, minLen)
result.add (a[0][n ..< top], a[1][n ..< top])
n = top
if minLen < a0Len:
result.add (a[0][minLen ..< a0Len], seq[B].default)
elif minLen < a1Len:
result.add (seq[H].default, a[1][minLen ..< a1Len])
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc pp*(a: UndumpDBKeySubType): string =
result = ($a).replace(" 0,",",")
.replace(" 0]","]")
.replace("[0,","[,")
.replace(", ",",")
let n = result.len
if 3 < n and result[0] == '[' and result[^1] == ']':
if result[^3] == ',' and result[^2] == ',':
var p = n-4
while result[p] == ',':
p.dec
if p == 0:
result = "[]"
else:
result = result[0 .. p] & ",]"
elif result[1] == ',' and result[2] == ',' and result[^2] != ',':
var p = 3
while result[p] == ',':
p.inc
result = "[," & result[p ..< n]
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_syncdbImportChainBlocks*(
chn: ChainRef;
filePath: string;
lastNumber: uint64;
noisy = true;
): uint64
{.discardable.} =
## Import block chain (intended use for preparing database dumps)
var count = 0
for (h,b) in filePath.undumpBlocks:
if h.len == 1 and h[0].blockNumber.isZero:
continue
if h[^1].blockNumber < lastNumber.toBlockNumber:
check chn.persistBlocks(h,b).isOk
count.inc
if 70 < count:
noisy.say "*** import", " #", h[^1].blockNumber, ".."
count = 0
continue
var
sh: seq[BlockHeader]
sb: seq[BlockBody]
for n in 0 ..< h.len:
if lastNumber.toBlockNumber < h[n].blockNumber:
break
sh.add h[n]
sb.add b[n]
if 0 < sh.len:
check chn.persistBlocks(sh,sb).isOk
result = sh[^1].blockNumber.truncate(typeof result)
noisy.say "*** import", "ok #", result
break
proc test_syncdbImportSnapshot*(
chn: ChainRef;
filePath: string;
select = ChainRef(nil);
noisy = true;
): ((int,int), UndumpDBKeySubType, UndumpDBKeySubType)
{.discardable.} =
## Store snapshot dump. if the argument `select` is not `nil` then some
## data records are stored selectively only if they exist in the database
## addressed by the `select` argument.
var count = 0
for w in filePath.undumpKVP():
var
key: Blob
storeOk = true
case w.kind:
of UndumpKey32:
key = w.key32.toSeq
if select.isNil or 0 < select.com.db.newKvt.backend.toLegacy.get(key).len:
result[0][0].inc
else:
storeOk = false
result[0][1].inc
of UndumpKey33:
key = w.key33.toSeq
let inx = min(w.key33[0], DBKeyKind.high.ord+1)
#if inx == contractHash.ord:
# let digest = w.data.keccakHash.data.toSeq
# check (contractHash, digest) == (contractHash, key[1..32])
#if not select.isNil:
# if inx in {3,4,5,18}:
# storeOk = false
# elif inx in {0,1,2,6} and select.com.db.db.get(key).len == 0:
# storeOk = false
if storeOk:
result[1][inx].inc
of UndumpOther:
key = w.other
let inx = min(w.other[0], DBKeyKind.high.ord+1)
result[2][inx].inc
count.inc
if (count mod 23456) == 0:
noisy.say "*** import", result.pp, ".. "
if storeOk:
chn.com.db.newKvt.backend.toLegacy.put(key, w.data)
if (count mod 23456) != 0:
noisy.say "*** import", result.pp, " ok"
proc test_syncdbAppendBlocks*(
chn: ChainRef;
filePath: string;
pivotBlock: uint64;
nItemsMax: int;
noisy = true;
) =
## Verify seqHdr[0]` as pivot and add persistent blocks following
# Make sure that pivot header is in database
let
blkLen = 33
lastBlock = pivotBlock + max(1,nItemsMax).uint64
kvt = chn.com.db.newKvt.backend.toLegacy
# Join (headers,blocks) pair in the range pivotBlock..lastBlock
q = toSeq(filePath.undumpBlocks(pivotBlock,lastBlock)).pairJoin
pivHash = q[0][0].blockHash
pivNum = q[0][0].blockNumber
# Verify pivot
check 0 < kvt.get(pivHash.toBlockHeaderKey.toOpenArray).len
check pivHash == kvt.get(pivNum.toBlockNumberKey.toOpenArray).decode(Hash256)
# Set up genesis deputy.
chn.com.startOfHistory = pivHash
# Start after pivot and re-partition
for (h,b) in q.pairSplit(1,blkLen):
let persistentBlocksOk = chn.persistBlocks(h,b).isOk
if not persistentBlocksOk:
let (first,last) = ("#" & $h[0].blockNumber, "#" & $h[0].blockNumber)
check (persistentBlocksOk,first,last) == (true,first,last)
break
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,35 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
eth/common
type
AccountsSample* = object
name*: string ## sample name, also used as sub-directory for db separation
file*: string
firstItem*: int
lastItem*: int
CaptureSpecs* = object
name*: string ## sample name, also used as sub-directory for db separation
network*: NetworkId
file*: string ## name of capture file
numBlocks*: int ## Number of blocks to load
SnapSyncSpecs* = object
name*: string
network*: NetworkId
snapDump*: string
tailBlocks*: string
pivotBlock*: uint64
nItems*: int
# End