nimbus-eth1/tests/test_coredb.nim

299 lines
9.1 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Testing `CoreDB` wrapper implementation
import
std/[os, strformat, strutils, times],
chronicles,
eth/common,
results,
unittest2,
../nimbus/db/core_db/persistent,
../nimbus/db/ledger,
../nimbus/core/chain,
./replay/pp,
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
./test_coredb/[coredb_test_xx, test_chainsync, test_helpers]
const
# If `true`, this compile time option set up `unittest2` for manual parsing
unittest2DisableParamFiltering {.booldefine.} = false
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests", "nimbus-eth1-blobs"]
subDir = ["replay", "test_coredb", "custom-network", "customgenesis"]
# Reference file for finding some database directory base
sampleDirRefFile = "coredb_test_xx.nim"
dbTypeDefault = LegacyDbMemory
ldgTypeDefault = LegacyAccountsCache
let
# Standard test sample
bChainCapture = bulkTest0
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
when unittest2DisableParamFiltering:
import algorithm
# Filter out local options and pass on the rest to `unittest2`
proc cmdLineConfig(): tuple[samples: seq[CaptureSpecs]] {.used.} =
## This helper allows to pass additional command line options to the
## unit test.
##
## Example:
## ::
## nim c -r ...\
## -d:unittest2DisableParamFiltering \
## ./tests/test_coredb.nim \
## --output-level=VERBOSE \
## --sample=goerli-lp,goerli-ar
## or
## ::
## nim c ... -d:unittest2DisableParamFiltering ./tests/test_coredb.nim
## ./tests/test_coredb.out --output-level=VERBOSE --sample=goerli-ar
## ...
##
## At the moment, only the `--sample=` additional option is provided.
##
# Define sample list from the command line (if any)
const optPfx = "--sample=" # Custom option with sample list
proc parseError(s = "") =
let msg = if 0 < s.len: "Unsupported \"" & optPfx & "\" list item: " & s
else: "Empty \"" & optPfx & " list"
echo "*** ", getAppFilename().splitFile.name, ": ", msg
echo " Available: ", allSamples.mapIt(it.name).sorted.join(" ")
quit(99)
var other: seq[string] # Options for manual parsing by `unittest2`
for arg in commandLineParams():
if optPfx.len <= arg.len and arg[0 ..< optPfx.len] == optPfx:
for w in arg[optPfx.len ..< arg.len].split(",").mapIt(it.strip):
block findSample:
for sample in allSamples:
if w.cmpIgnoreCase(sample.name) == 0:
result.samples.add sample
break findSample
w.parseError()
if result.samples.len == 0:
parseError()
else:
other.add arg
# Setup `unittest2`
other.parseParameters
else:
# Kill the compilation process iff the directive `cmdLineConfig()` is used
template cmdLineConfig(): untyped {.used.} =
{.error: "cmdLineConfig() needs compiler option "&
" -d:unittest2DisableParamFiltering".}
proc findFilePath(
file: string;
baseDir: openArray[string] = baseDir;
repoDir: openArray[string] = repoDir;
subDir: openArray[string] = subDir;
): Result[string,void] =
file.findFilePathHelper(baseDir, repoDir, subDir)
proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath.value.splitFile.dir
proc flushDbDir(s: string) =
if s != "":
let dataDir = s / "nimbus"
if (dataDir / "data").dirExists:
# Typically under Windows: there might be stale file locks.
try: dataDir.removeDir except CatchableError: discard
block dontClearUnlessEmpty:
for w in s.walkDir:
break dontClearUnlessEmpty
try: s.removeDir except CatchableError: discard
# ----------------
proc setTraceLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.TRACE)
proc setDebugLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.DEBUG)
proc setErrorLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc initRunnerDB(
path: string;
specs: CaptureSpecs;
dbType: CoreDbType;
ldgType: LedgerType;
): CommonRef =
let coreDB =
# Resolve for static `dbType`
case dbType:
of LegacyDbMemory: LegacyDbMemory.newCoreDbRef()
of LegacyDbPersistent: LegacyDbPersistent.newCoreDbRef path
of AristoDbMemory: AristoDbMemory.newCoreDbRef()
of AristoDbRocks: AristoDbRocks.newCoreDbRef path
of AristoDbVoid: AristoDbVoid.newCoreDbRef()
else: raiseAssert "Oops"
when false: # or true:
setDebugLevel()
coreDB.trackLegaApi = true
coreDB.trackNewApi = true
coreDB.localDbOnly = true
var
params: NetworkParams
networkId: NetworkId
if specs.builtIn:
networkId = specs.network
params = networkId.networkParams()
else:
doAssert specs.genesis.findFilePath.value.loadNetworkParams(params)
networkId = params.config.chainId.NetworkId
result = CommonRef.new(
db = coreDB,
networkId = networkId,
params = params,
ldgType = ldgType)
result.initializeEmptyDb
setErrorLevel()
coreDB.trackLegaApi = false
coreDB.trackNewApi = false
coreDB.localDbOnly = false
# ------------------------------------------------------------------------------
# Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
proc chainSyncRunner(
noisy = true;
capture = bChainCapture;
dbType = CoreDbType(0);
ldgType = ldgTypeDefault;
profilingOk = false;
finalDiskCleanUpOk = true;
enaLoggingOk = false;
lastOneExtraOk = true;
) =
## Test backend database and ledger
let
fileInfo = capture.files[0]
.splitFile.name.split(".")[0]
.strip(leading=false, chars={'0'..'9'})
filePaths = capture.files.mapIt(it.findFilePath(baseDir,repoDir).value)
baseDir = getTmpDir() / capture.name & "-chain-sync"
dbDir = baseDir / "tmp"
numBlocks = capture.numBlocks
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
numBlocksInfo = if numBlocks == high(int): "all" else: $numBlocks
dbType = block:
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
var effDbType = dbTypeDefault
if dbType != CoreDbType(0):
effDbType = dbType
elif capture.dbType != CoreDbType(0):
effDbType = capture.dbType
effDbType
persistent = dbType in CoreDbPersistentTypes
defer:
if persistent: baseDir.flushDbDir
suite &"CoreDB and LedgerRef API on {fileInfo}, {dbType}, {ldgType}":
test &"Ledger API {ldgType}, {numBlocksInfo} blocks":
let
com = initRunnerDB(dbDir, capture, dbType, ldgType)
defer:
com.db.finish(flush = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
if noisy:
com.db.trackNewApi = true
com.db.trackNewApi = true
com.db.trackLedgerApi = true
com.db.localDbOnly = true
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
check noisy.test_chainSync(filePaths, com, numBlocks,
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk)
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc coreDbMain*(noisy = defined(debug)) =
noisy.chainSyncRunner(ldgType=LedgerCache)
when isMainModule:
const
noisy = defined(debug) or true
var
sampleList: seq[CaptureSpecs]
setErrorLevel()
# This one uses the readily available dump: `bulkTest0` and some huge replay
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
# For specs see `tests/test_coredb/bulk_test_xx.nim`.
sampleList = cmdLineConfig().samples
if sampleList.len == 0:
sampleList = @[bulkTest0]
when true:
sampleList = @[bulkTest2, bulkTest3]
sampleList = @[ariTest1] # debugging
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
var state: (Duration, int)
for n,capture in sampleList:
noisy.profileSection("@sample #" & $n, state):
noisy.chainSyncRunner(
capture = capture,
#dbType = ..,
ldgType=LedgerCache,
#profilingOk = true,
#finalDiskCleanUpOk = false,
#enaLoggingOk = ..,
#lastOneExtraOk = ..,
)
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
noisy.say "***", "total: ", state[0].pp, " sections: ", state[1]
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------