2023-12-12 19:12:56 +00:00
|
|
|
# Nimbus
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Copyright (c) 2023 Status Research & Development GmbH
|
2023-10-03 11:56:13 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Testing `CoreDB` wrapper implementation
|
|
|
|
|
|
|
|
import
|
2023-12-12 17:47:41 +00:00
|
|
|
std/[os, strformat, strutils, times],
|
2023-10-03 11:56:13 +00:00
|
|
|
chronicles,
|
|
|
|
eth/common,
|
|
|
|
results,
|
|
|
|
unittest2,
|
2023-12-12 19:12:56 +00:00
|
|
|
../../nimbus/db/[core_db/persistent, ledger],
|
2023-10-25 14:03:09 +00:00
|
|
|
../../nimbus/core/chain,
|
2023-10-03 11:56:13 +00:00
|
|
|
./replay/pp,
|
2023-12-12 17:47:41 +00:00
|
|
|
./test_coredb/[coredb_test_xx, test_chainsync, test_helpers]
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
baseDir = [".", "..", ".."/"..", $DirSep]
|
|
|
|
repoDir = [".", "tests", "nimbus-eth1-blobs"]
|
|
|
|
subDir = ["replay", "test_coredb"]
|
|
|
|
|
|
|
|
# Reference file for finding some database directory base
|
|
|
|
sampleDirRefFile = "coredb_test_xx.nim"
|
|
|
|
|
|
|
|
# Standard test sample
|
|
|
|
bChainCapture = bulkTest0
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc findFilePath(
|
|
|
|
file: string;
|
|
|
|
baseDir: openArray[string] = baseDir;
|
|
|
|
repoDir: openArray[string] = repoDir;
|
|
|
|
subDir: openArray[string] = subDir;
|
|
|
|
): Result[string,void] =
|
|
|
|
for dir in baseDir:
|
|
|
|
if dir.dirExists:
|
|
|
|
for repo in repoDir:
|
|
|
|
if (dir / repo).dirExists:
|
|
|
|
for sub in subDir:
|
|
|
|
if (dir / repo / sub).dirExists:
|
|
|
|
let path = dir / repo / sub / file
|
|
|
|
if path.fileExists:
|
|
|
|
return ok(path)
|
|
|
|
echo "*** File not found \"", file, "\"."
|
|
|
|
err()
|
|
|
|
|
|
|
|
proc getTmpDir(sampleDir = sampleDirRefFile): string =
|
|
|
|
sampleDir.findFilePath.value.splitFile.dir
|
|
|
|
|
|
|
|
proc flushDbDir(s: string) =
|
|
|
|
if s != "":
|
|
|
|
let dataDir = s / "nimbus"
|
|
|
|
if (dataDir / "data").dirExists:
|
|
|
|
# Typically under Windows: there might be stale file locks.
|
|
|
|
try: dataDir.removeDir except CatchableError: discard
|
|
|
|
block dontClearUnlessEmpty:
|
|
|
|
for w in s.walkDir:
|
|
|
|
break dontClearUnlessEmpty
|
|
|
|
try: s.removeDir except CatchableError: discard
|
|
|
|
|
|
|
|
# ----------------
|
|
|
|
|
|
|
|
proc setTraceLevel {.used.} =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.TRACE)
|
|
|
|
|
2023-12-12 19:12:56 +00:00
|
|
|
proc setDebugLevel {.used.} =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.DEBUG)
|
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
proc setErrorLevel {.used.} =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.ERROR)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-12-12 19:12:56 +00:00
|
|
|
proc initRunnerDB(
|
2023-10-03 11:56:13 +00:00
|
|
|
path: string;
|
|
|
|
network: NetworkId;
|
2023-12-12 19:12:56 +00:00
|
|
|
dbType: CoreDbType;
|
|
|
|
ldgType: LedgerType;
|
2023-10-03 11:56:13 +00:00
|
|
|
): CommonRef =
|
2023-12-12 19:12:56 +00:00
|
|
|
let coreDB =
|
|
|
|
# Resolve for static `dbType`
|
|
|
|
case dbType:
|
|
|
|
of LegacyDbMemory: LegacyDbMemory.newCoreDbRef()
|
|
|
|
of LegacyDbPersistent: LegacyDbPersistent.newCoreDbRef path
|
|
|
|
of AristoDbMemory: AristoDbMemory.newCoreDbRef()
|
|
|
|
of AristoDbRocks: AristoDbRocks.newCoreDbRef path
|
|
|
|
of AristoDbVoid: AristoDbVoid.newCoreDbRef()
|
|
|
|
else: raiseAssert "Oops"
|
|
|
|
|
|
|
|
when false: # or true:
|
|
|
|
setDebugLevel()
|
|
|
|
coreDB.trackLegaApi = true
|
|
|
|
coreDB.trackNewApi = true
|
|
|
|
coreDB.localDbOnly = true
|
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
result = CommonRef.new(
|
|
|
|
db = coreDB,
|
|
|
|
networkId = network,
|
Optional accounts cache module for creating genesis (#1897)
* Split off `ReadOnlyStateDB` from `AccountStateDB` from `state_db.nim`
why:
Apart from testing, applications use `ReadOnlyStateDB` as an easy
way to access the accounts ledger. This is well supported by the
`Aristo` db, but writable mode is only parially supported.
The writable AccountStateDB` object for modifying accounts is not
used by production code.
So, for lecgacy and testing apps, the full support of the previous
`AccountStateDB` is now enabled by `import db/state_db/read_write`
and the `import db/state_db` provides read-only mode.
* Encapsulate `AccountStateDB` as `GenesisLedgerRef` or genesis creation
why:
`AccountStateDB` has poor support for `Aristo` and is not widely used
in favour of `AccountsLedger` (which will be abstracted as `ledger`.)
Currently, using other than the `AccountStateDB` ledgers within the
`GenesisLedgerRef` wrapper is experimental and test only. Eventually,
the wrapper should disappear so that the `Ledger` object (which
encapsulates `AccountsCache` and `AccountsLedger`) will prevail.
* For the `Ledger`, provide access to raw accounts `MPT`
why:
This gives to the `CoreDbMptRef` descriptor from the `CoreDb` (which is
the legacy version of CoreDxMptRef`.) For the new `ledger` API, the
accounts are based on the `CoreDxMAccRef` descriptor which uses a
particular sub-system for accounts while legacy applications use the
`CoreDbPhkRef` equivalent of the `SecureHexaryTrie`.
The only place where this feature will currently be used is the
`genesis.nim` source file.
* Fix `Aristo` bugs, missing boundary checks, typos, etc.
* Verify root vertex in `MPT` and account constructors
why:
Was missing so far, in particular the accounts constructor must
verify `VertexID(1)
* Fix include file
2023-11-20 11:51:43 +00:00
|
|
|
params = network.networkParams,
|
2023-12-12 19:12:56 +00:00
|
|
|
ldgType = ldgType)
|
2023-10-03 11:56:13 +00:00
|
|
|
result.initializeEmptyDb
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Test Runners: accounts and accounts storages
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
proc chainSyncRunner(
|
2023-10-03 11:56:13 +00:00
|
|
|
noisy = true;
|
|
|
|
capture = bChainCapture;
|
2023-12-12 19:12:56 +00:00
|
|
|
dbType = LegacyDbMemory;
|
|
|
|
ldgType = LegacyAccountsCache;
|
|
|
|
enaLogging = false;
|
|
|
|
lastOneExtra = true;
|
2023-10-03 11:56:13 +00:00
|
|
|
) =
|
2023-12-12 19:12:56 +00:00
|
|
|
## Test backend database and ledger
|
2023-10-03 11:56:13 +00:00
|
|
|
let
|
|
|
|
fileInfo = capture.file.splitFile.name.split(".")[0]
|
|
|
|
filePath = capture.file.findFilePath(baseDir,repoDir).value
|
2023-12-12 19:12:56 +00:00
|
|
|
baseDir = getTmpDir() / capture.name & "-chain-sync"
|
|
|
|
dbDir = baseDir / "tmp"
|
2023-10-03 11:56:13 +00:00
|
|
|
numBlocks = capture.numBlocks
|
2023-12-12 17:47:41 +00:00
|
|
|
numBlocksInfo = if numBlocks == high(int): "all" else: $numBlocks
|
2023-12-12 19:12:56 +00:00
|
|
|
persistent = dbType in CoreDbPersistentTypes
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
defer:
|
|
|
|
if persistent: baseDir.flushDbDir
|
|
|
|
|
2023-12-12 19:12:56 +00:00
|
|
|
suite &"CoreDB and LedgerRef API on {fileInfo}, {dbType}, {ldgType}":
|
2023-10-03 11:56:13 +00:00
|
|
|
|
2023-12-12 19:12:56 +00:00
|
|
|
test &"Ledger API {ldgType}, {numBlocksInfo} blocks":
|
2023-10-03 11:56:13 +00:00
|
|
|
let
|
2023-12-12 19:12:56 +00:00
|
|
|
com = initRunnerDB(dbDir, capture.network, dbType, ldgType)
|
2023-10-03 11:56:13 +00:00
|
|
|
defer:
|
|
|
|
com.db.finish(flush = true)
|
2023-12-12 17:47:41 +00:00
|
|
|
noisy.testChainSyncProfilingPrint numBlocks
|
2023-10-03 11:56:13 +00:00
|
|
|
if persistent: dbDir.flushDbDir
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
if noisy:
|
|
|
|
com.db.trackNewApi = true
|
|
|
|
com.db.trackNewApi = true
|
|
|
|
com.db.trackLedgerApi = true
|
|
|
|
|
2023-12-12 19:12:56 +00:00
|
|
|
check noisy.testChainSync(filePath, com, numBlocks,
|
|
|
|
lastOneExtra=lastOneExtra, enaLogging=enaLogging)
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Main function(s)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc coreDbMain*(noisy = defined(debug)) =
|
2023-12-12 19:12:56 +00:00
|
|
|
noisy.chainSyncRunner(ldgType=LedgerCache)
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
when isMainModule:
|
|
|
|
const
|
|
|
|
noisy = defined(debug) or true
|
|
|
|
|
|
|
|
setErrorLevel()
|
|
|
|
|
2023-10-12 20:10:04 +00:00
|
|
|
# This one uses the readily available dump: `bulkTest0` and some huge replay
|
|
|
|
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
|
|
|
|
# For specs see `tests/test_coredb/bulk_test_xx.nim`.
|
2023-10-18 19:27:22 +00:00
|
|
|
var testList = @[bulkTest0] # This test is superseded by `bulkTest1` and `2`
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
#testList = @[failSample0]
|
2023-10-25 14:03:09 +00:00
|
|
|
when true and false:
|
2023-10-18 19:27:22 +00:00
|
|
|
testList = @[bulkTest2, bulkTest3]
|
2023-10-12 20:10:04 +00:00
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
var state: (Duration, int)
|
2023-10-12 20:10:04 +00:00
|
|
|
for n,capture in testList:
|
2023-12-12 17:47:41 +00:00
|
|
|
noisy.profileSection("@testList #" & $n, state):
|
2023-12-12 19:12:56 +00:00
|
|
|
noisy.chainSyncRunner(
|
|
|
|
capture=capture,
|
|
|
|
#dbType=AristoDbMemory,
|
|
|
|
ldgType=LedgerCache,
|
|
|
|
#enaLogging=true
|
|
|
|
)
|
2023-12-12 17:47:41 +00:00
|
|
|
|
|
|
|
noisy.say "***", "total elapsed: ", state[0].pp, " sections: ", state[1]
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|