2022-01-26 11:56:53 +00:00
|
|
|
# Nimbus
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
2022-01-26 11:56:53 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except
|
|
|
|
# according to those terms.
|
|
|
|
|
2022-03-28 08:35:26 +00:00
|
|
|
## This test has two different parts:
|
|
|
|
##
|
|
|
|
## :CI:
|
|
|
|
## This was roughly inspired by repeated failings of running nimbus
|
|
|
|
## similar to
|
|
|
|
## ::
|
|
|
|
## nimbus \
|
2022-02-11 16:28:39 +00:00
|
|
|
## --data-dir:./kintsugi/tmp \
|
|
|
|
## --custom-network:kintsugi-network.json \
|
|
|
|
## --bootstrap-file:kintsugi-bootnodes.txt \
|
|
|
|
## --prune-mode:full ...
|
|
|
|
##
|
2022-03-28 08:35:26 +00:00
|
|
|
## from `issue 932` <https://github.com/status-im/nimbus-eth1/issues/932>`_.
|
|
|
|
##
|
|
|
|
## :TDD (invoked as local executable):
|
|
|
|
## Test driven develomment to prepare for The Merge using real data, in
|
|
|
|
## particular studying TTD.
|
|
|
|
##
|
2022-02-11 16:28:39 +00:00
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
import
|
2023-07-31 13:43:38 +00:00
|
|
|
std/os,
|
|
|
|
chronicles,
|
|
|
|
results,
|
|
|
|
unittest2,
|
|
|
|
../nimbus/core/chain, # must be early (compilation annoyance)
|
2022-12-02 04:39:12 +00:00
|
|
|
../nimbus/config,
|
|
|
|
../nimbus/common/common,
|
2023-08-04 11:10:09 +00:00
|
|
|
../nimbus/db/core_db/persistent,
|
2023-07-31 13:43:38 +00:00
|
|
|
./replay/[undump_blocks, pp]
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
type
|
|
|
|
ReplaySession = object
|
|
|
|
fancyName: string # display name
|
|
|
|
genesisFile: string # json file base name
|
|
|
|
termTotalDff: UInt256 # terminal total difficulty (to verify)
|
2022-03-28 08:35:26 +00:00
|
|
|
mergeFork: uint64 # block number, merge fork (to verify)
|
|
|
|
captures: seq[string] # list of gzipped RPL data dumps
|
2022-03-16 09:13:17 +00:00
|
|
|
ttdReachedAt: uint64 # block number where total difficulty becomes `true`
|
|
|
|
failBlockAt: uint64 # stop here and expect that block to fail
|
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
const
|
2022-03-16 09:13:17 +00:00
|
|
|
baseDir = [".", "..", ".."/"..", $DirSep]
|
|
|
|
repoDir = [".", "tests"/"replay", "tests"/"customgenesis",
|
|
|
|
"nimbus-eth1-blobs"/"replay",
|
|
|
|
"nimbus-eth1-blobs"/"custom-network"]
|
|
|
|
|
|
|
|
devnet4 = ReplaySession(
|
|
|
|
fancyName: "Devnet4",
|
|
|
|
genesisFile: "devnet4.json",
|
2022-03-28 08:35:26 +00:00
|
|
|
captures: @["devnetfour5664.txt.gz"],
|
2022-03-16 09:13:17 +00:00
|
|
|
termTotalDff: 5_000_000_000.u256,
|
2022-03-28 08:35:26 +00:00
|
|
|
mergeFork: 100,
|
2022-03-16 09:13:17 +00:00
|
|
|
ttdReachedAt: 5645,
|
|
|
|
# Previously failed at `ttdReachedAt` (needed `state.nim` fix/update)
|
|
|
|
failBlockAt: 99999999)
|
|
|
|
|
|
|
|
devnet5 = ReplaySession(
|
|
|
|
fancyName: "Devnet5",
|
|
|
|
genesisFile: "devnet5.json",
|
2022-03-28 08:35:26 +00:00
|
|
|
captures: @["devnetfive43968.txt.gz"],
|
2022-03-16 09:13:17 +00:00
|
|
|
termTotalDff: 500_000_000_000.u256,
|
2022-03-28 08:35:26 +00:00
|
|
|
mergeFork: 1000,
|
2022-03-16 09:13:17 +00:00
|
|
|
ttdReachedAt: 43711,
|
|
|
|
failBlockAt: 99999999)
|
2022-02-11 16:28:39 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
kiln = ReplaySession(
|
|
|
|
fancyName: "Kiln",
|
|
|
|
genesisFile: "kiln.json",
|
2022-03-28 08:35:26 +00:00
|
|
|
captures: @[
|
|
|
|
"kiln048000.txt.gz",
|
|
|
|
"kiln048001-55296.txt.gz",
|
|
|
|
# "kiln055297-109056.txt.gz",
|
|
|
|
# "kiln109057-119837.txt.gz",
|
|
|
|
],
|
2022-03-16 09:13:17 +00:00
|
|
|
termTotalDff: 20_000_000_000_000.u256,
|
2022-03-28 08:35:26 +00:00
|
|
|
mergeFork: 1000,
|
|
|
|
ttdReachedAt: 55127,
|
2023-07-31 13:43:38 +00:00
|
|
|
failBlockAt: 1000) # Kludge, some change at the `merge` logic?
|
2022-02-01 12:04:20 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
# Block chains shared between test suites
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
mcom: CommonRef # memory DB
|
|
|
|
dcom: CommonRef # perstent DB on disk
|
|
|
|
ddbDir: string # data directory for disk database
|
|
|
|
sSpcs: ReplaySession # current replay session specs
|
2022-03-16 09:13:17 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
# FIXED: Persistent database crash on `Devnet4` replay if the database
|
|
|
|
# directory was acidentally deleted (due to a stray "defer:" directive.)
|
|
|
|
ddbCrashBlockNumber = 2105
|
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-04-06 14:11:13 +00:00
|
|
|
proc findFilePath(file: string): Result[string,void] =
|
2022-01-26 11:56:53 +00:00
|
|
|
for dir in baseDir:
|
|
|
|
for repo in repoDir:
|
|
|
|
let path = dir / repo / file
|
|
|
|
if path.fileExists:
|
2022-04-06 14:11:13 +00:00
|
|
|
return ok(path)
|
|
|
|
err()
|
2022-01-26 11:56:53 +00:00
|
|
|
|
|
|
|
proc flushDbDir(s: string) =
|
2022-03-16 09:13:17 +00:00
|
|
|
if s != "":
|
|
|
|
let dataDir = s / "nimbus"
|
|
|
|
if (dataDir / "data").dirExists:
|
|
|
|
# Typically under Windows: there might be stale file locks.
|
2023-12-12 19:12:56 +00:00
|
|
|
try: dataDir.removeDir except CatchableError: discard
|
2022-07-01 11:42:17 +00:00
|
|
|
block dontClearUnlessEmpty:
|
|
|
|
for w in s.walkDir:
|
|
|
|
break dontClearUnlessEmpty
|
2023-12-12 19:12:56 +00:00
|
|
|
try: s.removeDir except CatchableError: discard
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2022-02-11 16:28:39 +00:00
|
|
|
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|
|
|
if noisy:
|
|
|
|
if args.len == 0:
|
|
|
|
echo "*** ", pfx
|
|
|
|
elif 0 < pfx.len and pfx[^1] != ' ':
|
|
|
|
echo pfx, " ", args.toSeq.join
|
|
|
|
else:
|
|
|
|
echo pfx, args.toSeq.join
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
proc setTraceLevel =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.TRACE)
|
|
|
|
|
|
|
|
proc setErrorLevel =
|
|
|
|
discard
|
|
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
|
|
setLogLevel(LogLevel.ERROR)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc ddbCleanUp(dir: string) =
|
2023-07-31 13:43:38 +00:00
|
|
|
ddbDir = dir
|
|
|
|
dir.flushDbDir
|
2022-03-16 09:13:17 +00:00
|
|
|
|
|
|
|
proc ddbCleanUp =
|
|
|
|
ddbDir.ddbCleanUp
|
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc isOk(rc: ValidationResult): bool =
|
2022-03-16 09:13:17 +00:00
|
|
|
rc == ValidationResult.OK
|
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
proc ttdReached(com: CommonRef): bool =
|
|
|
|
if com.ttd.isSome:
|
|
|
|
return com.ttd.get <= com.db.headTotalDifficulty()
|
2022-03-16 09:13:17 +00:00
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
proc importBlocks(c: ChainRef; h: seq[BlockHeader]; b: seq[BlockBody];
|
2022-03-16 09:13:17 +00:00
|
|
|
noisy = false): bool =
|
|
|
|
## On error, the block number of the failng block is returned
|
|
|
|
let
|
|
|
|
(first, last) = (h[0].blockNumber, h[^1].blockNumber)
|
|
|
|
nTxs = b.mapIt(it.transactions.len).foldl(a+b)
|
|
|
|
nUnc = b.mapIt(it.uncles.len).foldl(a+b)
|
2022-12-02 04:39:12 +00:00
|
|
|
tddOk = c.com.ttdReached
|
2022-03-16 09:13:17 +00:00
|
|
|
bRng = if 1 < h.len: &"s [#{first}..#{last}]={h.len}" else: &" #{first}"
|
|
|
|
blurb = &"persistBlocks([#{first}..#"
|
|
|
|
|
|
|
|
catchException("persistBlocks()", trace = true):
|
|
|
|
if c.persistBlocks(h, b).isOk:
|
2023-07-31 13:43:38 +00:00
|
|
|
noisy.say "***", &"block{bRng} #txs={nTxs} #uncles={nUnc}"
|
2022-12-02 04:39:12 +00:00
|
|
|
if not tddOk and c.com.ttdReached:
|
2022-03-16 09:13:17 +00:00
|
|
|
noisy.say "***", &"block{bRng} => tddReached"
|
|
|
|
return true
|
|
|
|
|
2023-07-31 13:43:38 +00:00
|
|
|
noisy.say "***", &"block{bRng} #txs={nTxs} #uncles={nUnc} -- failed"
|
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Test Runner
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
proc genesisLoadRunner(noisy = true;
|
|
|
|
captureSession = devnet4;
|
|
|
|
persistPruneTrie = true) =
|
|
|
|
sSpcs = captureSession
|
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
let
|
2022-03-16 09:13:17 +00:00
|
|
|
gFileInfo = sSpcs.genesisFile.splitFile.name.split(".")[0]
|
2022-04-06 14:11:13 +00:00
|
|
|
gFilePath = sSpcs.genesisFile.findFilePath.value
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2023-07-31 13:43:38 +00:00
|
|
|
tmpDir = gFilePath.splitFile.dir / "tmp"
|
2022-02-01 12:04:20 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
persistPruneInfo = if persistPruneTrie: "pruning enabled"
|
|
|
|
else: "no pruning"
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
suite &"{sSpcs.fancyName} custom network genesis & database setup":
|
2022-01-26 11:56:53 +00:00
|
|
|
var
|
|
|
|
params: NetworkParams
|
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
test &"Load params from {gFileInfo}":
|
|
|
|
noisy.say "***", "custom-file=", gFilePath
|
|
|
|
check gFilePath.loadNetworkParams(params)
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
test "Construct in-memory ChainDBRef, pruning enabled":
|
|
|
|
mcom = CommonRef.new(
|
2023-08-04 11:10:09 +00:00
|
|
|
newCoreDbRef LegacyDbMemory,
|
2022-12-02 04:39:12 +00:00
|
|
|
networkId = params.config.chainId.NetworkId,
|
2022-01-26 11:56:53 +00:00
|
|
|
params = params)
|
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
check mcom.ttd.get == sSpcs.termTotalDff
|
2023-10-24 10:39:19 +00:00
|
|
|
check mcom.toHardFork(sSpcs.mergeFork.toBlockNumber.forkDeterminationInfo) == MergeFork
|
2022-03-16 09:13:17 +00:00
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
test &"Construct persistent ChainDBRef on {tmpDir}, {persistPruneInfo}":
|
2023-07-31 13:43:38 +00:00
|
|
|
# Before allocating the database, the data directory needs to be
|
|
|
|
# cleared. There might be left overs from a previous crash or
|
|
|
|
# because there were file locks under Windows which prevented a
|
|
|
|
# previous clean up.
|
|
|
|
tmpDir.ddbCleanUp
|
|
|
|
|
|
|
|
# Constructor ...
|
|
|
|
dcom = CommonRef.new(
|
2023-08-04 11:10:09 +00:00
|
|
|
newCoreDbRef(LegacyDbPersistent, tmpDir),
|
2023-07-31 13:43:38 +00:00
|
|
|
networkId = params.config.chainId.NetworkId,
|
|
|
|
pruneTrie = persistPruneTrie,
|
|
|
|
params = params)
|
|
|
|
|
|
|
|
check dcom.ttd.get == sSpcs.termTotalDff
|
2023-10-24 10:39:19 +00:00
|
|
|
check dcom.toHardFork(sSpcs.mergeFork.toBlockNumber.forkDeterminationInfo) == MergeFork
|
2022-03-16 09:13:17 +00:00
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
test "Initialise in-memory Genesis":
|
2022-12-02 04:39:12 +00:00
|
|
|
mcom.initializeEmptyDb
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2022-02-11 16:28:39 +00:00
|
|
|
# Verify variant of `toBlockHeader()`. The function `pp()` is used
|
|
|
|
# (rather than blockHash()) for readable error report (if any).
|
|
|
|
let
|
2022-12-02 04:39:12 +00:00
|
|
|
storedhHeaderPP = mcom.db.getBlockHeader(0.u256).pp
|
|
|
|
onTheFlyHeaderPP = mcom.genesisHeader.pp
|
2022-02-11 16:28:39 +00:00
|
|
|
check storedhHeaderPP == onTheFlyHeaderPP
|
2022-02-01 12:04:20 +00:00
|
|
|
|
2022-02-11 16:28:39 +00:00
|
|
|
test "Initialise persistent Genesis":
|
2023-07-31 13:43:38 +00:00
|
|
|
dcom.initializeEmptyDb
|
2022-01-26 11:56:53 +00:00
|
|
|
|
2023-07-31 13:43:38 +00:00
|
|
|
# Must be the same as the in-memory DB value
|
|
|
|
check dcom.db.getBlockHash(0.u256) == mcom.db.getBlockHash(0.u256)
|
2022-02-11 16:28:39 +00:00
|
|
|
|
2023-07-31 13:43:38 +00:00
|
|
|
let
|
|
|
|
storedhHeaderPP = dcom.db.getBlockHeader(0.u256).pp
|
|
|
|
onTheFlyHeaderPP = dcom.genesisHeader.pp
|
|
|
|
check storedhHeaderPP == onTheFlyHeaderPP
|
2022-02-11 16:28:39 +00:00
|
|
|
|
2022-03-16 09:13:17 +00:00
|
|
|
|
|
|
|
proc testnetChainRunner(noisy = true;
|
|
|
|
memoryDB = true;
|
|
|
|
stopAfterBlock = 999999999) =
|
|
|
|
let
|
2022-03-28 08:35:26 +00:00
|
|
|
cFileInfo = sSpcs.captures[0].splitFile.name.split(".")[0]
|
2022-04-06 14:11:13 +00:00
|
|
|
cFilePath = sSpcs.captures.mapIt(it.findFilePath.value)
|
2022-03-16 09:13:17 +00:00
|
|
|
dbInfo = if memoryDB: "in-memory" else: "persistent"
|
|
|
|
|
|
|
|
pivotBlockNumber = sSpcs.failBlockAt.u256
|
|
|
|
lastBlockNumber = stopAfterBlock.u256
|
|
|
|
ttdBlockNumber = sSpcs.ttdReachedAt.u256
|
|
|
|
|
|
|
|
suite &"Block chain DB inspector for {sSpcs.fancyName}":
|
|
|
|
var
|
2022-12-02 04:39:12 +00:00
|
|
|
bcom: CommonRef
|
|
|
|
chn: ChainRef
|
2022-03-16 09:13:17 +00:00
|
|
|
pivotHeader: BlockHeader
|
|
|
|
pivotBody: BlockBody
|
|
|
|
|
|
|
|
test &"Inherit {dbInfo} block chain DB from previous session":
|
2022-12-02 04:39:12 +00:00
|
|
|
check not mcom.isNil
|
|
|
|
check not dcom.isNil
|
2022-03-16 09:13:17 +00:00
|
|
|
|
|
|
|
# Whatever DB suits, mdb: in-memory, ddb: persistet/on-disk
|
2022-12-02 04:39:12 +00:00
|
|
|
bcom = if memoryDB: mcom else: dcom
|
2022-03-16 09:13:17 +00:00
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
chn = bcom.newChain
|
2022-03-16 09:13:17 +00:00
|
|
|
noisy.say "***", "ttd",
|
2022-12-02 04:39:12 +00:00
|
|
|
" db.config.TTD=", chn.com.ttd
|
2022-03-16 09:13:17 +00:00
|
|
|
# " db.arrowGlacierBlock=0x", chn.db.config.arrowGlacierBlock.toHex
|
|
|
|
|
|
|
|
test &"Replay {cFileInfo} capture, may fail ~#{pivotBlockNumber} "&
|
|
|
|
&"(slow -- time for coffee break)":
|
2022-03-28 08:35:26 +00:00
|
|
|
noisy.say "***", "capture-files=[", cFilePath.join(","), "]"
|
2022-03-16 09:13:17 +00:00
|
|
|
discard
|
|
|
|
|
|
|
|
test &"Processing {sSpcs.fancyName} blocks":
|
2023-04-21 21:11:04 +00:00
|
|
|
for w in cFilePath.mapIt(it.string).undumpBlocks:
|
2022-03-16 09:13:17 +00:00
|
|
|
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
|
|
|
|
|
|
|
# Install & verify Genesis
|
|
|
|
if w[0][0].blockNumber == 0.u256:
|
2022-12-02 04:39:12 +00:00
|
|
|
doAssert w[0][0] == bcom.db.getBlockHeader(0.u256)
|
2022-03-16 09:13:17 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# Persist blocks, full range before `pivotBlockNumber`
|
|
|
|
if toBlock < pivotBlockNumber:
|
|
|
|
if not chn.importBlocks(w[0], w[1], noisy):
|
|
|
|
# Just a guess -- might be any block in that range
|
|
|
|
(pivotHeader, pivotBody) = (w[0][0],w[1][0])
|
|
|
|
break
|
2022-12-02 04:39:12 +00:00
|
|
|
if chn.com.ttdReached:
|
2022-03-16 09:13:17 +00:00
|
|
|
check ttdBlockNumber <= toBlock
|
|
|
|
else:
|
|
|
|
check toBlock < ttdBlockNumber
|
|
|
|
if lastBlockNumber <= toBlock:
|
|
|
|
break
|
|
|
|
|
|
|
|
else:
|
|
|
|
let top = (pivotBlockNumber - fromBlock).truncate(uint64).int
|
|
|
|
|
|
|
|
# Load the blocks before the pivot block
|
|
|
|
if 0 < top:
|
|
|
|
check chn.importBlocks(w[0][0 ..< top],w[1][0 ..< top], noisy)
|
|
|
|
|
|
|
|
(pivotHeader, pivotBody) = (w[0][top],w[1][top])
|
|
|
|
break
|
|
|
|
|
|
|
|
test &"Processing {sSpcs.fancyName} block #{pivotHeader.blockNumber}, "&
|
|
|
|
&"persistBlocks() will fail":
|
|
|
|
|
|
|
|
setTraceLevel()
|
|
|
|
|
|
|
|
if pivotHeader.blockNumber == 0:
|
|
|
|
skip()
|
|
|
|
else:
|
|
|
|
# Expecting that the import fails at the current block ...
|
|
|
|
check not chn.importBlocks(@[pivotHeader], @[pivotBody], noisy)
|
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Main function(s)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-02-11 16:28:39 +00:00
|
|
|
proc customNetworkMain*(noisy = defined(debug)) =
|
2022-03-16 09:13:17 +00:00
|
|
|
defer: ddbCleanUp()
|
|
|
|
noisy.genesisLoadRunner
|
2022-01-26 11:56:53 +00:00
|
|
|
|
|
|
|
when isMainModule:
|
2022-03-16 09:13:17 +00:00
|
|
|
let noisy = defined(debug) or true
|
|
|
|
setErrorLevel()
|
|
|
|
|
|
|
|
noisy.showElapsed("customNetwork"):
|
|
|
|
defer: ddbCleanUp()
|
|
|
|
|
|
|
|
noisy.genesisLoadRunner(
|
|
|
|
# any of: devnet4, devnet5, kiln, etc.
|
2022-03-28 08:35:26 +00:00
|
|
|
captureSession = kiln)
|
2022-03-16 09:13:17 +00:00
|
|
|
|
|
|
|
# Note that the `testnetChainRunner()` finds the replay dump files
|
|
|
|
# typically on the `nimbus-eth1-blobs` module.
|
|
|
|
noisy.testnetChainRunner(
|
|
|
|
stopAfterBlock = 999999999)
|
2023-07-31 13:43:38 +00:00
|
|
|
|
2022-01-26 11:56:53 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|