Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
2024-02-01 21:27:48 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-05-11 14:25:29 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Re-invented implementation for Merkle Patricia Tree named as Aristo Trie
|
|
|
|
|
|
|
|
import
|
|
|
|
std/[os, strformat, strutils],
|
|
|
|
chronicles,
|
2023-08-21 14:58:30 +00:00
|
|
|
eth/common,
|
|
|
|
results,
|
2023-05-11 14:25:29 +00:00
|
|
|
unittest2,
|
2023-06-09 11:17:37 +00:00
|
|
|
../nimbus/db/aristo/[aristo_desc, aristo_merge],
|
2023-06-02 10:04:29 +00:00
|
|
|
./replay/[pp, undump_accounts, undump_storages],
|
2023-08-21 14:58:30 +00:00
|
|
|
./test_sync_snap/[snap_test_xx, test_types],
|
|
|
|
./test_aristo/[test_backend, test_filter, test_helpers, test_misc, test_tx]
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
baseDir = [".", "..", ".."/"..", $DirSep]
|
|
|
|
repoDir = [".", "tests", "nimbus-eth1-blobs"]
|
|
|
|
subDir = ["replay", "test_sync_snap", "replay"/"snap"]
|
|
|
|
|
|
|
|
# Reference file for finding the database directory
|
|
|
|
sampleDirRefFile = "sample0.txt.gz"
|
|
|
|
|
|
|
|
# Standard test samples
|
|
|
|
accSample = snapTest0
|
2023-06-02 10:04:29 +00:00
|
|
|
storSample = snapTest4
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc findFilePath(
|
|
|
|
file: string;
|
|
|
|
baseDir: openArray[string] = baseDir;
|
|
|
|
repoDir: openArray[string] = repoDir;
|
|
|
|
subDir: openArray[string] = subDir;
|
|
|
|
): Result[string,void] =
|
|
|
|
for dir in baseDir:
|
|
|
|
if dir.dirExists:
|
|
|
|
for repo in repoDir:
|
|
|
|
if (dir / repo).dirExists:
|
|
|
|
for sub in subDir:
|
|
|
|
if (dir / repo / sub).dirExists:
|
|
|
|
let path = dir / repo / sub / file
|
|
|
|
if path.fileExists:
|
|
|
|
return ok(path)
|
|
|
|
echo "*** File not found \"", file, "\"."
|
|
|
|
err()
|
|
|
|
|
|
|
|
proc getTmpDir(sampleDir = sampleDirRefFile): string =
|
|
|
|
sampleDir.findFilePath.value.splitFile.dir
|
|
|
|
|
|
|
|
proc setTraceLevel {.used.} =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.TRACE)
|
|
|
|
|
|
|
|
proc setErrorLevel {.used.} =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.ERROR)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Test Runners: accounts and accounts storages
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
proc miscRunner(
|
|
|
|
noisy = true;
|
|
|
|
qidSampleSize = QidSample;
|
|
|
|
) =
|
|
|
|
|
|
|
|
suite "Aristo: Miscellaneous tests":
|
|
|
|
|
|
|
|
test "VertexID recyling lists":
|
|
|
|
check noisy.testVidRecycleLists()
|
|
|
|
|
2023-09-05 18:00:40 +00:00
|
|
|
test &"Low level cascaded fifos API (sample size: {qidSampleSize})":
|
2023-08-25 22:53:59 +00:00
|
|
|
check noisy.testQidScheduler(sampleSize = qidSampleSize)
|
2023-05-11 14:25:29 +00:00
|
|
|
|
2023-09-05 18:00:40 +00:00
|
|
|
test &"High level cascaded fifos API (sample size: {qidSampleSize})":
|
|
|
|
check noisy.testFilterFifo(sampleSize = qidSampleSize)
|
|
|
|
|
2023-11-08 12:18:32 +00:00
|
|
|
test "Short keys and other patholgical cases":
|
|
|
|
check noisy.testShortKeys()
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-22 11:13:24 +00:00
|
|
|
proc accountsRunner(
|
|
|
|
noisy = true;
|
|
|
|
sample = accSample;
|
|
|
|
resetDb = false;
|
|
|
|
cmpBackends = true;
|
2023-09-11 20:38:49 +00:00
|
|
|
persistent = true;
|
2023-06-22 11:13:24 +00:00
|
|
|
) =
|
2023-05-30 21:21:15 +00:00
|
|
|
let
|
2023-06-02 10:04:29 +00:00
|
|
|
accLst = sample.to(seq[UndumpAccounts]).to(seq[ProofTrieData])
|
2023-05-30 21:21:15 +00:00
|
|
|
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
2023-09-12 18:45:12 +00:00
|
|
|
listMode = if resetDb: "" else: ", merged dumps"
|
2023-06-20 13:26:25 +00:00
|
|
|
baseDir = getTmpDir() / sample.name & "-accounts"
|
2023-09-11 20:38:49 +00:00
|
|
|
dbDir = if persistent: baseDir / "tmp" else: ""
|
2023-09-12 18:45:12 +00:00
|
|
|
isPersistent = if persistent: "persistent DB" else: "mem DB only"
|
2024-02-01 21:27:48 +00:00
|
|
|
doRdbOk = (cmpBackends and 0 < dbDir.len)
|
|
|
|
cmpBeInfo = if doRdbOk: "persistent" else: "memory"
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
defer:
|
|
|
|
try: baseDir.removeDir except CatchableError: discard
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-09-12 18:45:12 +00:00
|
|
|
suite &"Aristo: accounts data dump from {fileInfo}{listMode}, {isPersistent}":
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
test &"Merge {accLst.len} proof & account lists to database":
|
2023-07-05 13:50:11 +00:00
|
|
|
check noisy.testTxMergeProofAndKvpList(accLst, dbDir, resetDb)
|
2023-06-02 10:04:29 +00:00
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
test &"Compare {accLst.len} account lists on {cmpBeInfo}" &
|
|
|
|
" db backend vs. cache":
|
|
|
|
check noisy.testBackendConsistency(accLst, dbDir, resetDb)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
test &"Delete accounts database successively, {accLst.len} lists":
|
|
|
|
check noisy.testTxMergeAndDeleteOneByOne(accLst, dbDir)
|
|
|
|
|
|
|
|
test &"Delete accounts database sub-trees, {accLst.len} lists":
|
|
|
|
check noisy.testTxMergeAndDeleteSubTree(accLst, dbDir)
|
2023-06-02 19:21:46 +00:00
|
|
|
|
2023-08-17 13:42:01 +00:00
|
|
|
test &"Distributed backend access {accLst.len} entries":
|
|
|
|
check noisy.testDistributedAccess(accLst, dbDir)
|
|
|
|
|
2023-09-11 20:38:49 +00:00
|
|
|
test &"Filter backlog management {accLst.len} entries":
|
|
|
|
check noisy.testFilterBacklog(accLst, rdbPath=dbDir)
|
|
|
|
|
2023-06-02 10:04:29 +00:00
|
|
|
|
|
|
|
proc storagesRunner(
|
|
|
|
noisy = true;
|
|
|
|
sample = storSample;
|
|
|
|
resetDb = false;
|
|
|
|
oops: KnownHasherFailure = @[];
|
2023-06-22 11:13:24 +00:00
|
|
|
cmpBackends = true;
|
2023-09-11 20:38:49 +00:00
|
|
|
persistent = true;
|
2023-06-02 10:04:29 +00:00
|
|
|
) =
|
|
|
|
let
|
|
|
|
stoLst = sample.to(seq[UndumpStorages]).to(seq[ProofTrieData])
|
|
|
|
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
2023-09-12 18:45:12 +00:00
|
|
|
listMode = if resetDb: "" else: ", merged dumps"
|
2023-06-20 13:26:25 +00:00
|
|
|
baseDir = getTmpDir() / sample.name & "-storage"
|
2023-09-11 20:38:49 +00:00
|
|
|
dbDir = if persistent: baseDir / "tmp" else: ""
|
2023-09-12 18:45:12 +00:00
|
|
|
isPersistent = if persistent: "persistent DB" else: "mem DB only"
|
2024-02-01 21:27:48 +00:00
|
|
|
doRdbOk = (cmpBackends and 0 < dbDir.len)
|
|
|
|
cmpBeInfo = if doRdbOk: "persistent" else: "memory"
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
defer:
|
|
|
|
try: baseDir.removeDir except CatchableError: discard
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-09-12 18:45:12 +00:00
|
|
|
suite &"Aristo: storages data dump from {fileInfo}{listMode}, {isPersistent}":
|
2023-06-02 10:04:29 +00:00
|
|
|
|
|
|
|
test &"Merge {stoLst.len} proof & slots lists to database":
|
2023-07-05 13:50:11 +00:00
|
|
|
check noisy.testTxMergeProofAndKvpList(
|
2023-06-22 19:21:33 +00:00
|
|
|
stoLst, dbDir, resetDb, fileInfo, oops)
|
2023-06-22 11:13:24 +00:00
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
test &"Compare {stoLst.len} slot lists on {cmpBeInfo}" &
|
|
|
|
" db backend vs. cache":
|
|
|
|
check noisy.testBackendConsistency(stoLst, dbDir, resetDb)
|
|
|
|
|
|
|
|
test &"Delete storage database successively, {stoLst.len} lists":
|
|
|
|
check noisy.testTxMergeAndDeleteOneByOne(stoLst, dbDir)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
test &"Delete storage database sub-trees, {stoLst.len} lists":
|
|
|
|
check noisy.testTxMergeAndDeleteSubTree(stoLst, dbDir)
|
2023-06-02 19:21:46 +00:00
|
|
|
|
2023-08-17 13:42:01 +00:00
|
|
|
test &"Distributed backend access {stoLst.len} entries":
|
|
|
|
check noisy.testDistributedAccess(stoLst, dbDir)
|
|
|
|
|
2023-09-11 20:38:49 +00:00
|
|
|
test &"Filter backlog management {stoLst.len} entries":
|
|
|
|
check noisy.testFilterBacklog(stoLst, rdbPath=dbDir)
|
|
|
|
|
2023-05-11 14:25:29 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Main function(s)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc aristoMain*(noisy = defined(debug)) =
|
2023-06-09 11:17:37 +00:00
|
|
|
noisy.miscRunner()
|
2023-06-02 10:04:29 +00:00
|
|
|
noisy.accountsRunner()
|
|
|
|
noisy.storagesRunner()
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
when isMainModule:
|
|
|
|
const
|
|
|
|
noisy = defined(debug) or true
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
setErrorLevel()
|
|
|
|
|
2023-07-05 20:27:48 +00:00
|
|
|
when true: # and false:
|
2023-09-05 18:00:40 +00:00
|
|
|
noisy.miscRunner(qidSampleSize = 1_000)
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
# This one uses dumps from the external `nimbus-eth1-blob` repo
|
|
|
|
when true and false:
|
|
|
|
import ./test_sync_snap/snap_other_xx
|
2023-06-02 10:04:29 +00:00
|
|
|
noisy.showElapsed("@snap_other_xx"):
|
2023-05-30 21:21:15 +00:00
|
|
|
for n,sam in snapOtherList:
|
2023-06-02 19:21:46 +00:00
|
|
|
noisy.accountsRunner(sam, resetDb=true)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# This one usues dumps from the external `nimbus-eth1-blob` repo
|
2023-06-02 19:21:46 +00:00
|
|
|
when true and false:
|
2023-06-12 13:48:47 +00:00
|
|
|
import ./test_sync_snap/snap_storage_xx
|
2023-06-02 10:04:29 +00:00
|
|
|
let knownFailures: KnownHasherFailure = @[
|
2023-06-12 13:48:47 +00:00
|
|
|
("storages3__18__25_dump#12.27367",(3,HashifyExistingHashMismatch)),
|
|
|
|
("storages4__26__33_dump#12.23924",(6,HashifyExistingHashMismatch)),
|
2023-06-09 11:17:37 +00:00
|
|
|
("storages5__34__41_dump#10.20512",(1,HashifyRootHashMismatch)),
|
2023-06-12 13:48:47 +00:00
|
|
|
("storagesB__84__92_dump#7.9709", (7,HashifyExistingHashMismatch)),
|
|
|
|
("storagesD_102_109_dump#18.28287",(9,HashifyExistingHashMismatch)),
|
2023-06-02 10:04:29 +00:00
|
|
|
]
|
|
|
|
noisy.showElapsed("@snap_storage_xx"):
|
2023-05-30 21:21:15 +00:00
|
|
|
for n,sam in snapStorageList:
|
2023-06-02 19:21:46 +00:00
|
|
|
noisy.accountsRunner(sam, resetDb=true)
|
|
|
|
noisy.storagesRunner(sam, resetDb=true, oops=knownFailures)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
when true: # and false:
|
2024-02-01 21:27:48 +00:00
|
|
|
let persistent = false # or true
|
2023-09-11 20:38:49 +00:00
|
|
|
noisy.showElapsed("@snap_test_list"):
|
|
|
|
for n,sam in snapTestList:
|
2023-09-12 18:45:12 +00:00
|
|
|
noisy.accountsRunner(sam, persistent=persistent)
|
2023-09-11 20:38:49 +00:00
|
|
|
noisy.showElapsed("@snap_test_storage_list"):
|
|
|
|
for n,sam in snapTestStorageList:
|
2023-09-12 18:45:12 +00:00
|
|
|
noisy.accountsRunner(sam, persistent=persistent)
|
|
|
|
noisy.storagesRunner(sam, persistent=persistent)
|
2023-05-11 14:25:29 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|