nimbus-eth1/tests/test_coredb.nim

370 lines
11 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Testing `CoreDB` wrapper implementation
import
std/[os, strformat, strutils],
chronicles,
eth/common,
results,
unittest2,
../nimbus/db/opts,
../nimbus/db/core_db/persistent,
../nimbus/core/chain,
./replay/pp,
./test_coredb/[
coredb_test_xx, test_chainsync, test_coredb_helpers, test_helpers]
const
# If `true`, this compile time option set up `unittest2` for manual parsing
unittest2DisableParamFiltering {.booldefine.} = false
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests"]
subDir = ["replay", "test_coredb", "custom-network", "main-era1"]
# Reference file for finding some database directory base
sampleDirRefFile = "coredb_test_xx.nim"
dbTypeDefault = AristoDbMemory
let
# Standard test sample
memorySampleDefault = mainTest0m
persistentSampleDefault = mainTest2r
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
when unittest2DisableParamFiltering:
import algorithm
# Filter out local options and pass on the rest to `unittest2`
proc cmdLineConfig(): tuple[samples: seq[CaptureSpecs]] {.used.} =
## This helper allows to pass additional command line options to the
## unit test.
##
## Example:
## ::
## nim c -r ...\
## -d:unittest2DisableParamFiltering \
## ./tests/test_coredb.nim \
## --output-level=VERBOSE \
No ext update (#2494) * Imported/rebase from `no-ext`, PR #2485 Store extension nodes together with the branch Extension nodes must be followed by a branch - as such, it makes sense to store the two together both in the database and in memory: * fewer reads, writes and updates to traverse the tree * simpler logic for maintaining the node structure * less space used, both memory and storage, because there are fewer nodes overall There is also a downside: hashes can no longer be cached for an extension - instead, only the extension+branch hash can be cached - this seems like a fine tradeoff since computing it should be fast. TODO: fix commented code * Fix merge functions and `toNode()` * Update `merkleSignCommit()` prototype why: Result is always a 32bit hash * Update short Merkle hash key generation details: Ethereum reference MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) This is specified in the yellow paper, appendix D. Different to the `Aristo` implementation, the reference MPT would not store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node is stored as a node link on the parent database. Only for the root hash, the top level node is always referred to by the hash. * Fix/update `Extension` sections why: Were commented out after removal of a dedicated `Extension` type which left the system disfunctional. * Clean up unused error codes * Update unit tests * Update docu --------- Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
## --sample=main-am,main-ar
## or
## ::
## nim c ... -d:unittest2DisableParamFiltering ./tests/test_coredb.nim
## ./tests/test_coredb.out --output-level=VERBOSE --sample=goerli-ar
## ...
##
## At the moment, only the `--sample=` additional option is provided.
##
# Define sample list from the command line (if any)
const optPfx = "--sample=" # Custom option with sample list
proc parseError(s = "") =
let msg = if 0 < s.len: "Unsupported \"" & optPfx & "\" list item: " & s
else: "Empty \"" & optPfx & " list"
echo "*** ", getAppFilename().splitFile.name, ": ", msg
echo " Available: ", allSamples.mapIt(it.name).sorted.join(" ")
quit(99)
var other: seq[string] # Options for manual parsing by `unittest2`
for arg in commandLineParams():
if optPfx.len <= arg.len and arg[0 ..< optPfx.len] == optPfx:
for w in arg[optPfx.len ..< arg.len].split(",").mapIt(it.strip):
block findSample:
for sample in allSamples:
if w.cmpIgnoreCase(sample.name) == 0:
result.samples.add sample
break findSample
w.parseError()
if result.samples.len == 0:
parseError()
else:
other.add arg
# Setup `unittest2`
other.parseParameters
else:
# Kill the compilation process iff the directive `cmdLineConfig()` is used
template cmdLineConfig(): untyped {.used.} =
{.error: "cmdLineConfig() needs compiler option "&
" -d:unittest2DisableParamFiltering".}
proc findFilePath(
file: string;
baseDir: openArray[string] = baseDir;
repoDir: openArray[string] = repoDir;
subDir: openArray[string] = subDir;
): Result[string,void] =
file.findFilePathHelper(baseDir, repoDir, subDir)
proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath.value.splitFile.dir
proc flushDbDir(s: string) =
if s != "":
let dataDir = s / "nimbus"
if (dataDir / "data").dirExists:
# Typically under Windows: there might be stale file locks.
try: dataDir.removeDir except CatchableError: discard
block dontClearUnlessEmpty:
for w in s.walkDir:
break dontClearUnlessEmpty
try: s.removeDir except CatchableError: discard
# ----------------
proc setTraceLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.TRACE)
proc setDebugLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.DEBUG)
proc setErrorLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc initRunnerDB(
path: string;
specs: CaptureSpecs;
dbType: CdbTypeEx;
pruneHistory: bool;
): CommonRef =
let coreDB =
# Resolve for static `dbType`
case dbType:
of CdbAristoMemory: AristoDbMemory.newCoreDbRef()
of CdbAristoRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
of CdbAristoDualRocks: newCdbAriAristoDualRocks(path, DbOptions.init())
of CdbAristoVoid: AristoDbVoid.newCoreDbRef()
of CdbOoops: raiseAssert "Ooops"
when false: # or true:
setDebugLevel()
coreDB.trackLegaApi = true
coreDB.trackNewApi = true
var
params: NetworkParams
networkId: NetworkId
if specs.builtIn:
networkId = specs.network
params = networkId.networkParams()
else:
doAssert specs.genesis.findFilePath.value.loadNetworkParams(params)
networkId = params.config.chainId.NetworkId
result = CommonRef.new(
db = coreDB,
taskpool = nil,
networkId = networkId,
params = params,
pruneHistory = pruneHistory)
setErrorLevel()
when CoreDbEnableApiTracking:
coreDB.trackCoreDbApi = false
coreDB.trackLedgerApi = false
# ------------------------------------------------------------------------------
# Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
proc chainSyncRunner(
noisy = true;
capture = memorySampleDefault;
dbType = CdbTypeEx(0);
pruneHistory = false;
profilingOk = false;
finalDiskCleanUpOk = true;
enaLoggingOk = false;
lastOneExtraOk = true;
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
oldLogAlign = false;
) =
## Test backend database and ledger
let
fileInfo = capture.files[0]
.splitFile.name.split(".")[0]
.strip(leading=false, chars={'0'..'9'})
filePaths = capture.files.mapIt(it.findFilePath(baseDir,repoDir).value)
baseDir = getTmpDir() / capture.dbName & "-chain-sync"
dbDir = baseDir / "tmp"
numBlocks = capture.numBlocks
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
numBlocksInfo = if numBlocks == high(int): "all" else: $numBlocks
dbType = block:
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
var effDbType = dbTypeDefault.to(CdbTypeEx)
if dbType != CdbTypeEx(0):
effDbType = dbType
elif capture.dbType != CoreDbType(0):
effDbType = capture.dbType.to(CdbTypeEx)
effDbType
persistent = dbType in CdbTypeExPersistent
defer:
if persistent: baseDir.flushDbDir
suite &"CoreDB and LedgerRef API on {fileInfo}, {dbType}":
test &"Ledger API {numBlocksInfo} blocks":
let
com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
defer:
com.db.finish(eradicate = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
when CoreDbEnableApiTracking:
if noisy:
com.db.trackCoreDbApi = true
com.db.trackLedgerApi = true
Core db aristo hasher profiling and timing improvement (#1938) * Explicitly use shared `Kvt` table on `Ledger` and `Clique` lookup. why: Speeds up lookup time with `Aristo` backend. For writing `Clique` data, the `Companion` model allows to write `Clique` data past the database locked by evm transactions. * Implement `CoreDb` profiling with API tracking why: Chasing time spent per APT procs ... * Implement `Ledger` profiling with API tracking why: Chasing time spent per APT procs ... * Always hashify when commiting or storing why: A dirty cache makes no sense when committing * Make sure that a zero key is created when adding/updating vertices why: This is an error fix mainly for edge cases. A typical error was that the root key got deleted when there were only a few vertices left on the DB. * Need all created and changed vertices zero-keyed on the cache why: A zero key (i.e. empty Merkle hash) indicates that a vertex key needs to be updated. This would not be needed immediately after a merge as there is an actual leaf path on the cache layer. But after subsequent merge and delete operations this information might get blurred. * Re-org hashing algorithm why: Apart from errors, the previous implementation was too slow for two reasons: + some control hashes were calculated for debugging (now all verification is done in `aristo_check` module) + the leaf paths stored on the cache are used to build the labelling (aka hashing) schedule; there paths were accumulated over successive hash sessions although it is clear that all keys were generated, already
2023-12-12 17:47:41 +00:00
check noisy.test_chainSync(filePaths, com, numBlocks,
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
oldLogAlign=oldLogAlign)
proc persistentSyncPreLoadAndResumeRunner(
noisy = true;
capture = persistentSampleDefault;
dbType = CdbTypeEx(0);
profilingOk = false;
pruneHistory = false;
finalDiskCleanUpOk = true;
enaLoggingOk = false;
lastOneExtraOk = true;
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
oldLogAlign = false;
) =
## Test backend database and ledger
let
filePaths = capture.files.mapIt(it.findFilePath(baseDir,repoDir).value)
baseDir = getTmpDir() / capture.dbName & "-chain-sync"
dbDir = baseDir / "tmp"
dbType = block:
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
var effDbType = dbTypeDefault.to(CdbTypeEx)
if dbType != CdbTypeEx(0):
effDbType = dbType
elif capture.dbType != CoreDbType(0):
effDbType = capture.dbType.to(CdbTypeEx)
effDbType
doAssert dbType in CdbTypeExPersistent
defer: baseDir.flushDbDir
let
firstPart = min(capture.numBlocks div 2, 200_000)
secndPart = capture.numBlocks
secndPartInfo = if secndPart == high(int): "all" else: $secndPart
suite &"CoreDB pre-load and resume test ..{firstPart}..{secndPartInfo}":
test "Populate db by initial sample parts":
let
com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
defer:
com.db.finish(eradicate = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint firstPart
when CoreDbEnableApiTracking:
if noisy:
com.db.trackCoreDbApi = true
com.db.trackLedgerApi = true
check noisy.test_chainSync(filePaths, com, firstPart,
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
oldLogAlign=oldLogAlign)
test &"Continue with rest of sample":
let
com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
defer:
com.db.finish(eradicate = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint secndPart
if finalDiskCleanUpOk: dbDir.flushDbDir
when CoreDbEnableApiTracking:
if noisy:
com.db.trackCoreDbApi = true
com.db.trackLedgerApi = true
check noisy.test_chainSync(filePaths, com, secndPart,
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
oldLogAlign=oldLogAlign)
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc coreDbMain*(noisy = defined(debug)) =
noisy.chainSyncRunner()
noisy.persistentSyncPreLoadAndResumeRunner()
when isMainModule:
const
noisy {.used.} = defined(debug) or true
var
sampleList: seq[CaptureSpecs]
setErrorLevel()
when true and false:
false.coreDbMain()
# This one uses the readily available dump: `bulkTest0` and some huge replay
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
# For specs see `tests/test_coredb/bulk_test_xx.nim`.
sampleList = cmdLineConfig().samples
if sampleList.len == 0:
sampleList = @[memorySampleDefault]
when true: # and false:
import std/times
var state: (Duration, int)
for n,capture in sampleList:
noisy.profileSection("@sample #" & $n, state):
noisy.chainSyncRunner(
#dbType = CdbAristoDualRocks,
capture = capture,
#pruneHistory = true,
#profilingOk = true,
#finalDiskCleanUpOk = false,
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
oldLogAlign = true
)
noisy.say "***", "total: ", state[0].pp, " sections: ", state[1]
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------