2023-10-03 11:56:13 +00:00
|
|
|
# Nimbus - Types, data structures and shared utilities used in network sync
|
|
|
|
#
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Copyright (c) 2023 Status Research & Development GmbH
|
2023-10-03 11:56:13 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
std/strformat,
|
2023-10-18 19:27:22 +00:00
|
|
|
chronicles,
|
2023-10-03 11:56:13 +00:00
|
|
|
eth/common,
|
|
|
|
results,
|
|
|
|
unittest2,
|
2023-12-12 17:47:41 +00:00
|
|
|
../../nimbus/core/chain,
|
|
|
|
../../nimbus/db/ledger,
|
2023-10-11 19:09:11 +00:00
|
|
|
../replay/[undump_blocks, xcheck],
|
2023-10-03 11:56:13 +00:00
|
|
|
./test_helpers
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
type StopMoaningAboutLedger {.used.} = LedgerType
|
|
|
|
|
|
|
|
when CoreDbEnableApiProfiling or LedgerEnableApiProfiling:
|
|
|
|
import std/[algorithm, sequtils, strutils], ../replay/pp
|
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
proc setTraceLevel {.used.} =
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.TRACE)
|
|
|
|
|
|
|
|
proc setErrorLevel {.used.} =
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.ERROR)
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
# --------------
|
|
|
|
|
|
|
|
proc coreDbProfResults(info: string; indent = 4): string =
|
|
|
|
when CoreDbEnableApiProfiling:
|
|
|
|
let
|
|
|
|
pfx = indent.toPfx
|
|
|
|
pfx2 = pfx & " "
|
|
|
|
result = "CoreDb profiling results" & info & ":"
|
|
|
|
result &= "\n" & pfx & "by accumulated duration per procedure"
|
|
|
|
for (ela,w) in coreDbProfTab.byElapsed:
|
|
|
|
result &= pfx2 & ela.pp & ": " &
|
|
|
|
w.mapIt($it & coreDbProfTab.stats(it).pp(true)).sorted.join(", ")
|
|
|
|
result &= "\n" & pfx & "by number of visits"
|
|
|
|
for (count,w) in coreDbProfTab.byVisits:
|
|
|
|
result &= pfx2 & $count & ": " &
|
|
|
|
w.mapIt($it & coreDbProfTab.stats(it).pp).sorted.join(", ")
|
|
|
|
|
|
|
|
proc ledgerProfResults(info: string; indent = 4): string =
|
|
|
|
when LedgerEnableApiProfiling:
|
|
|
|
let
|
|
|
|
pfx = indent.toPfx
|
|
|
|
pfx2 = pfx & " "
|
|
|
|
result = "Ledger profiling results" & info & ":"
|
|
|
|
result &= "\n" & pfx & "by accumulated duration per procedure"
|
|
|
|
for (ela,w) in ledgerProfTab.byElapsed:
|
|
|
|
result &= pfx2 & ela.pp & ": " &
|
|
|
|
w.mapIt($it & ledgerProfTab.stats(it).pp(true)).sorted.join(", ")
|
|
|
|
result &= "\n" & pfx & "by number of visits"
|
|
|
|
for (count,w) in ledgerProfTab.byVisits:
|
|
|
|
result &= pfx2 & $count & ": " &
|
|
|
|
w.mapIt($it & ledgerProfTab.stats(it).pp).sorted.join(", ")
|
|
|
|
|
2023-10-03 11:56:13 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
proc test_chainSyncProfilingPrint*(
|
|
|
|
noisy = false;
|
|
|
|
nBlocks: int;
|
|
|
|
) =
|
|
|
|
if noisy:
|
|
|
|
let info =
|
|
|
|
if 0 < nBlocks and nBlocks < high(int): " (" & $nBlocks & " blocks)"
|
|
|
|
else: ""
|
|
|
|
block:
|
|
|
|
let s = info.coreDbProfResults()
|
|
|
|
if 0 < s.len: true.say "***", s, "\n"
|
|
|
|
block:
|
|
|
|
let s = info.ledgerProfResults()
|
|
|
|
if 0 < s.len: true.say "***", s, "\n"
|
|
|
|
|
|
|
|
|
2023-10-25 14:03:09 +00:00
|
|
|
proc test_chainSync*(
|
2023-10-03 11:56:13 +00:00
|
|
|
noisy: bool;
|
|
|
|
filePath: string;
|
|
|
|
com: CommonRef;
|
2023-10-18 19:27:22 +00:00
|
|
|
numBlocks = high(int);
|
|
|
|
lastOneExtra = true
|
2023-10-03 11:56:13 +00:00
|
|
|
): bool =
|
|
|
|
## Store persistent blocks from dump into chain DB
|
|
|
|
let
|
|
|
|
sayBlocks = 900.u256
|
|
|
|
chain = com.newChain
|
2023-10-18 19:27:22 +00:00
|
|
|
lastBlock = max(1, numBlocks - 1).toBlockNumber
|
2023-12-12 17:47:41 +00:00
|
|
|
save = (com.db.trackLegaApi, com.db.trackNewApi, com.db.trackLedgerApi)
|
2023-10-03 11:56:13 +00:00
|
|
|
|
|
|
|
for w in filePath.undumpBlocks:
|
|
|
|
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
|
|
|
if fromBlock == 0.u256:
|
|
|
|
xCheck w[0][0] == com.db.getBlockHeader(0.u256)
|
|
|
|
continue
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
if toBlock < lastBlock:
|
|
|
|
# Message if `[fromBlock,toBlock]` contains a multiple of `sayBlocks`
|
|
|
|
if fromBlock + (toBlock mod sayBlocks) <= toBlock:
|
|
|
|
noisy.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
|
|
|
|
let runPersistBlocksRc = chain.persistBlocks(w[0], w[1])
|
|
|
|
xCheck runPersistBlocksRc == ValidationResult.OK:
|
|
|
|
if noisy:
|
|
|
|
# Re-run with logging enabled
|
|
|
|
setTraceLevel()
|
|
|
|
discard chain.persistBlocks(w[0], w[1])
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Make sure that the `lastBlock` is the first item of the argument batch.
|
|
|
|
# So It might be necessary to Split off all blocks smaller than `lastBlock`
|
|
|
|
# and execute them first. Then the next batch starts with the `lastBlock`.
|
|
|
|
let
|
|
|
|
pivot = (lastBlock - fromBlock).truncate(uint)
|
|
|
|
headers9 = w[0][pivot .. ^1]
|
|
|
|
bodies9 = w[1][pivot .. ^1]
|
|
|
|
doAssert lastBlock == headers9[0].blockNumber
|
|
|
|
|
|
|
|
# Process leading betch before `lastBlock` (if any)
|
|
|
|
var dotsOrSpace = "..."
|
|
|
|
if fromBlock < lastBlock:
|
|
|
|
let
|
|
|
|
headers1 = w[0][0 ..< pivot]
|
|
|
|
bodies1 = w[1][0 ..< pivot]
|
|
|
|
noisy.say "***", &"processing {dotsOrSpace}[#{fromBlock},#{lastBlock-1}]"
|
|
|
|
let runPersistBlocks1Rc = chain.persistBlocks(headers1, bodies1)
|
|
|
|
xCheck runPersistBlocks1Rc == ValidationResult.OK
|
|
|
|
dotsOrSpace = " "
|
|
|
|
|
2023-10-25 14:03:09 +00:00
|
|
|
if noisy:
|
|
|
|
setTraceLevel()
|
|
|
|
com.db.trackLegaApi = true
|
|
|
|
com.db.trackNewApi = true
|
|
|
|
com.db.trackLedgerApi = true
|
|
|
|
com.db.localDbOnly = true
|
2023-10-18 19:27:22 +00:00
|
|
|
if lastOneExtra:
|
|
|
|
let
|
|
|
|
headers0 = headers9[0..0]
|
|
|
|
bodies0 = bodies9[0..0]
|
|
|
|
noisy.say "***", &"processing {dotsOrSpace}[#{lastBlock},#{lastBlock}]"
|
|
|
|
let runPersistBlocks0Rc = chain.persistBlocks(headers0, bodies0)
|
|
|
|
xCheck runPersistBlocks0Rc == ValidationResult.OK
|
|
|
|
else:
|
|
|
|
noisy.say "***", &"processing {dotsOrSpace}[#{lastBlock},#{toBlock}]"
|
|
|
|
let runPersistBlocks9Rc = chain.persistBlocks(headers9, bodies9)
|
|
|
|
xCheck runPersistBlocks9Rc == ValidationResult.OK
|
2023-10-03 11:56:13 +00:00
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
break
|
2023-10-03 11:56:13 +00:00
|
|
|
|
2023-12-12 17:47:41 +00:00
|
|
|
(com.db.trackLegaApi, com.db.trackNewApi, com.db.trackLedgerApi) = save
|
2023-10-03 11:56:13 +00:00
|
|
|
true
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|