Core db+aristo update storage trie handling (#2023)
* CoreDb: Test module with additional sample selector cmd line options * Aristo: Do not automatically remove a storage trie with the account why: This is an unnecessary side effect. Rather than using an automatism, a a storage root must be deleted manually. * Aristo: Can handle stale storage root vertex IDs as empty IDs. why: This is currently needed for the ledger API supporting both, a legacy and the `Aristo` database backend. This feature can be disabled at compile time by re-setting the `LOOSE_STORAGE_TRIE_COUPLING` flag in the `aristo_constants` module. * CoreDb+Aristo: Flush/delete storage trie when deleting account why: On either backend, a deleted account leave a dangling storage trie on the database. For consistency nn the legacy backend, storage tries must not be deleted as they might be shared by several accounts whereas on `Aristo` they are always unique.
This commit is contained in:
parent
35131b6d55
commit
9e50af839f
|
@ -59,6 +59,17 @@ const
|
|||
## functions with fixed assignments of the type of a state root (e.g. for
|
||||
## a receipt or a transaction root.)
|
||||
|
||||
LOOSE_STORAGE_TRIE_COUPLING* = true
|
||||
## Enabling the `LOOSE_STORAGE_TRIE_COUPLING` flag a sub-trie is considered
|
||||
## empty if the root vertex ID is zero or at least `LEAST_FREE_VID` and
|
||||
## there is no vertex available. If the vertex ID is not zero and should
|
||||
## be considered as such will affect calculating the Merkel hash node key
|
||||
## for an accou.t leaf of payload type `AccountData`.
|
||||
##
|
||||
## Setting this flag `true` might be helpful for running an API supporting
|
||||
## both, a legacy and# the `Aristo` database backend.
|
||||
##
|
||||
|
||||
static:
|
||||
doAssert 1 < LEAST_FREE_VID # must stay away from `VertexID(1)`
|
||||
|
||||
|
|
|
@ -298,10 +298,16 @@ proc collapseLeaf(
|
|||
proc delSubTree(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # Root vertex
|
||||
accPath: PathID; # Needed for real storage tries
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Implementation of *delete* sub-trie.
|
||||
if not root.isValid:
|
||||
return err((root,DelSubTreeVoidRoot))
|
||||
|
||||
if LEAST_FREE_VID <= root.distinctBase:
|
||||
db.registerAccount(root, accPath).isOkOr:
|
||||
return err((root,error))
|
||||
|
||||
var
|
||||
dispose = @[root]
|
||||
rootVtx = db.getVtxRc(root).valueOr:
|
||||
|
@ -333,7 +339,7 @@ proc deleteImpl(
|
|||
hike: Hike; # Fully expanded path
|
||||
lty: LeafTie; # `Patricia Trie` path root-to-leaf
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
): Result[bool,(VertexID,AristoError)] =
|
||||
## Implementation of *delete* functionality.
|
||||
|
||||
if LEAST_FREE_VID <= lty.root.distinctBase:
|
||||
|
@ -347,15 +353,13 @@ proc deleteImpl(
|
|||
if lf.vid in db.pPrf:
|
||||
return err((lf.vid, DelLeafLocked))
|
||||
|
||||
# Will be needed at the end. Just detect an error early enouhh
|
||||
let leafVidBe = block:
|
||||
let rc = db.getVtxBE lf.vid
|
||||
if rc.isErr:
|
||||
if rc.error != GetVtxNotFound:
|
||||
return err((lf.vid, rc.error))
|
||||
VertexRef(nil)
|
||||
else:
|
||||
rc.value
|
||||
# Verify thet there is no dangling storage trie
|
||||
block:
|
||||
let data = lf.vtx.lData
|
||||
if data.pType == AccountData:
|
||||
let vid = data.account.storageID
|
||||
if vid.isValid and db.getVtx(vid).isValid:
|
||||
return err((vid,DelDanglingStoTrie))
|
||||
|
||||
db.disposeOfVtx lf.vid
|
||||
|
||||
|
@ -407,14 +411,7 @@ proc deleteImpl(
|
|||
# at a later state.
|
||||
db.top.final.lTab[lty] = VertexID(0)
|
||||
|
||||
# Delete dependent leaf node storage tree if there is any
|
||||
let data = lf.vtx.lData
|
||||
if data.pType == AccountData:
|
||||
let vid = data.account.storageID
|
||||
if vid.isValid:
|
||||
return db.delSubTree vid
|
||||
|
||||
ok()
|
||||
ok(not db.getVtx(hike.root).isValid)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -423,22 +420,25 @@ proc deleteImpl(
|
|||
proc delete*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
root: VertexID; # Root vertex
|
||||
accPath: PathID; # Needed for real storage tries
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Delete sub-trie below `root`. The maximum supported sub-tree size is
|
||||
## `SUB_TREE_DISPOSAL_MAX`. Larger tries must be disposed by walk-deleting
|
||||
## leaf nodes using `left()` or `right()` traversal functions.
|
||||
##
|
||||
## Caveat:
|
||||
## There is no way to quickly verify that the `root` argument is isolated.
|
||||
## Deleting random sub-trees might lead to an inconsistent database.
|
||||
## For a `root` argument greater than `LEAST_FREE_VID`, the sub-tree spanned
|
||||
## by `root` is considered a storage trie linked to an account leaf referred
|
||||
## to by a valid `accPath` (i.e. different from `VOID_PATH_ID`.) In that
|
||||
## case, an account must exists. If there is payload of type `AccountData`,
|
||||
## its `storageID` field must be unset or equal to the `hike.root` vertex ID.
|
||||
##
|
||||
db.delSubTree root
|
||||
db.delSubTree(root, accPath)
|
||||
|
||||
proc delete*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Fully expanded chain of vertices
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
): Result[bool,(VertexID,AristoError)] =
|
||||
## Delete argument `hike` chain of vertices from the database.
|
||||
##
|
||||
## For a `hike.root` with `VertexID` greater than `LEAST_FREE_VID`, the
|
||||
|
@ -448,9 +448,7 @@ proc delete*(
|
|||
## of type `AccountData`, its `storageID` field must be unset or equal to the
|
||||
## `hike.root` vertex ID.
|
||||
##
|
||||
## Note:
|
||||
## If the leaf node has an account payload referring to a storage sub-trie,
|
||||
## this one will be deleted as well.
|
||||
## The return code is `true` iff the trie has become empty.
|
||||
##
|
||||
# Need path in order to remove it from `lTab[]`
|
||||
let lty = LeafTie(
|
||||
|
@ -462,7 +460,7 @@ proc delete*(
|
|||
db: AristoDbRef; # Database, top layer
|
||||
lty: LeafTie; # `Patricia Trie` path root-to-leaf
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
): Result[bool,(VertexID,AristoError)] =
|
||||
## Variant of `delete()`
|
||||
##
|
||||
db.deleteImpl(? lty.hikeUp(db).mapErr toVae, lty, accPath)
|
||||
|
@ -472,7 +470,7 @@ proc delete*(
|
|||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
accPath: PathID; # Needed for accounts payload
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
): Result[bool,(VertexID,AristoError)] =
|
||||
## Variant of `delete()`
|
||||
##
|
||||
let rc = path.initNibbleRange.hikeUp(root, db)
|
||||
|
|
|
@ -186,18 +186,19 @@ type
|
|||
NearbyVidInvalid
|
||||
|
||||
# Deletion of vertices, `delete()`
|
||||
DelPathTagError
|
||||
DelLeafExpexted
|
||||
DelLeafLocked
|
||||
DelLeafUnexpected
|
||||
DelBranchExpexted
|
||||
DelBranchLocked
|
||||
DelBranchWithoutRefs
|
||||
DelDanglingStoTrie
|
||||
DelExtLocked
|
||||
DelVidStaleVtx
|
||||
DelLeafExpexted
|
||||
DelLeafLocked
|
||||
DelLeafUnexpected
|
||||
DelPathNotFound
|
||||
DelPathTagError
|
||||
DelSubTreeTooBig
|
||||
DelSubTreeVoidRoot
|
||||
DelPathNotFound
|
||||
DelVidStaleVtx
|
||||
|
||||
# Functions from `aristo_filter.nim`
|
||||
FilBackendMissing
|
||||
|
|
|
@ -366,9 +366,7 @@ proc hashify*(
|
|||
if not vtx.isValid:
|
||||
# This might happen when proof nodes (see `snap` protocol) are on
|
||||
# an incomplete trie where this `vid` has a key but no vertex yet.
|
||||
# Also, the key (as part of the proof data) must be on the backend
|
||||
# by the way `leafToRootCrawler()` works. So it is enough to verify
|
||||
# the key there.
|
||||
# Also, the key (as part of the proof data) must be on the backend.
|
||||
discard db.getKeyBE(vid).valueOr:
|
||||
return err((vid,HashifyNodeUnresolved))
|
||||
else:
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
std/[sequtils, tables, typetraits],
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers]
|
||||
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hike, aristo_layers]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, converters
|
||||
|
@ -128,10 +128,16 @@ proc toNode*(
|
|||
let vid = vtx.lData.account.storageID
|
||||
if vid.isValid:
|
||||
let key = db.getKey vid
|
||||
if key.isValid:
|
||||
node.key[0] = key
|
||||
else:
|
||||
return err(@[vid])
|
||||
if not key.isValid:
|
||||
block looseCoupling:
|
||||
when LOOSE_STORAGE_TRIE_COUPLING:
|
||||
# Stale storage trie?
|
||||
if LEAST_FREE_VID <= vid.distinctBase and
|
||||
not db.getVtx(vid).isValid:
|
||||
node.lData.account.storageID = VertexID(0)
|
||||
break looseCoupling
|
||||
# Otherwise this is a stale storage trie.
|
||||
return err(@[vid])
|
||||
node.key[0] = key
|
||||
return ok node
|
||||
|
||||
|
|
|
@ -447,6 +447,11 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||
if rc.error[1] == DelPathNotFound:
|
||||
return err(rc.error.toError(db, info, MptNotFound))
|
||||
return err(rc.error.toError(db, info))
|
||||
|
||||
if rc.value:
|
||||
# Trie has become empty
|
||||
cMpt.root = VoidTrieID
|
||||
|
||||
ok()
|
||||
|
||||
proc mptHasPath(
|
||||
|
@ -575,6 +580,27 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||
return err(rc.error.toError(db, info))
|
||||
ok()
|
||||
|
||||
proc accStoFlush(
|
||||
cAcc: AristoChildDbRef;
|
||||
address: EthAddress;
|
||||
info: static[string];
|
||||
): CoreDbRc[void] =
|
||||
let
|
||||
db = cAcc.base.parent
|
||||
mpt = cAcc.mpt
|
||||
key = address.keccakHash.data
|
||||
pyl = mpt.fetchPayload(cAcc.root, key).valueOr:
|
||||
return ok()
|
||||
|
||||
# Use storage ID from account and delete that sub-trie
|
||||
if pyl.pType == AccountData:
|
||||
let stoID = pyl.account.storageID
|
||||
if stoID.isValid:
|
||||
let rc = mpt.delete(stoID, address.to(PathID))
|
||||
if rc.isErr:
|
||||
return err(rc.error.toError(db, info))
|
||||
ok()
|
||||
|
||||
proc accHasPath(
|
||||
cAcc: AristoChildDbRef;
|
||||
address: EthAddress;
|
||||
|
@ -602,6 +628,9 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
|
|||
deleteFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
cAcc.accDelete(address, "deleteFn()"),
|
||||
|
||||
stoFlushFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
cAcc.accStoFlush(address, "stoFlushFn()"),
|
||||
|
||||
mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] =
|
||||
cAcc.accMerge(acc, "mergeFn()"),
|
||||
|
||||
|
@ -869,7 +898,11 @@ proc newMptHandler*(
|
|||
if rc.isErr:
|
||||
return err(rc.error[1].toError(db, info, AccNotFound))
|
||||
if trie.reset:
|
||||
let rc = trie.ctx.mpt.delete(trie.root)
|
||||
# Note that `reset` only applies to non-dynamic trie roots with vertex ID
|
||||
# beween `VertexID(2) ..< LEAST_FREE_VID`. At the moment, this applies to
|
||||
# `GenericTrie` type sub-tries somehow emulating the behaviour of a new
|
||||
# empty MPT on the legacy database (handle with care, though.)
|
||||
let rc = trie.ctx.mpt.delete(trie.root, VOID_PATH_ID)
|
||||
if rc.isErr:
|
||||
return err(rc.error.toError(db, info, AutoFlushFailed))
|
||||
trie.reset = false
|
||||
|
|
|
@ -327,6 +327,9 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
|
|||
mpt.trie.del(k.keccakHash.data)
|
||||
ok(),
|
||||
|
||||
stoFlushFn: proc(k: EthAddress): CoreDbRc[void] =
|
||||
ok(),
|
||||
|
||||
mergeFn: proc(v: CoreDbAccount): CoreDbRc[void] =
|
||||
db.mapRlpException("mergeFn()"):
|
||||
mpt.trie.put(v.address.keccakHash.data, rlp.encode v.toAccount)
|
||||
|
|
|
@ -911,6 +911,21 @@ proc delete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
|||
result = acc.methods.deleteFn address
|
||||
acc.ifTrackNewApi: debug newApiTxt, ctx, elapsed, address, result
|
||||
|
||||
proc stoFlush*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
||||
## Recursively delete all data elements from the storage trie associated to
|
||||
## the account identified by the argument `address`. After successful run,
|
||||
## the storage trie will be empty.
|
||||
##
|
||||
## caveat:
|
||||
## This function has currently no effect on the legacy backend so it must
|
||||
## not be relied upon in general. On the legacy backend, storage tries
|
||||
## might be shared by several accounts whereas they are unique on the
|
||||
## `Aristo` backend.
|
||||
##
|
||||
acc.setTrackNewApi AccStoFlushFn
|
||||
result = acc.methods.stoFlushFn address
|
||||
acc.ifTrackNewApi: debug newApiTxt, ctx, elapsed, address, result
|
||||
|
||||
proc merge*(
|
||||
acc: CoreDxAccRef;
|
||||
account: CoreDbAccount;
|
||||
|
|
|
@ -34,6 +34,7 @@ type
|
|||
AccMergeFn = "acc/merge"
|
||||
AccNewMptFn = "acc/newMpt"
|
||||
AccPersistentFn = "acc/persistent"
|
||||
AccStoFlushFn = "acc/stoFlush"
|
||||
AccToMptFn = "acc/toMpt"
|
||||
|
||||
AnyBackendFn = "any/backend"
|
||||
|
|
|
@ -188,7 +188,6 @@ type
|
|||
persistentFn*: CoreDbMptPersistentFn
|
||||
forgetFn*: CoreDbMptForgetFn
|
||||
|
||||
|
||||
# ----------------------------------------------------
|
||||
# Sub-descriptor: Mpt/hexary trie methods for accounts
|
||||
# ------------------------------------------------------
|
||||
|
@ -196,6 +195,7 @@ type
|
|||
CoreDbAccNewMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.}
|
||||
CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccStoFlushFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccGetTrieFn* = proc(): CoreDbTrieRef {.noRaise.}
|
||||
|
@ -209,6 +209,7 @@ type
|
|||
newMptFn*: CoreDbAccNewMptFn
|
||||
fetchFn*: CoreDbAccFetchFn
|
||||
deleteFn*: CoreDbAccDeleteFn
|
||||
stoFlushFn*: CoreDbAccStoFlushFn
|
||||
mergeFn*: CoreDbAccMergeFn
|
||||
hasPathFn*: CoreDbAccHasPathFn
|
||||
getTrieFn*: CoreDbAccGetTrieFn
|
||||
|
|
|
@ -72,6 +72,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
|
|||
doAssert not fns.newMptFn.isNil
|
||||
doAssert not fns.fetchFn.isNil
|
||||
doAssert not fns.deleteFn.isNil
|
||||
doAssert not fns.stoFlushFn.isNil
|
||||
doAssert not fns.mergeFn.isNil
|
||||
doAssert not fns.hasPathFn.isNil
|
||||
doAssert not fns.getTrieFn.isNil
|
||||
|
|
|
@ -114,6 +114,10 @@ proc merge*(al: AccountLedger; account: CoreDbAccount) =
|
|||
|
||||
proc delete*(al: AccountLedger, eAddr: EthAddress) =
|
||||
const info = "AccountLedger/delete()"
|
||||
# Flush associated storage trie
|
||||
al.distinctBase.stoFlush(eAddr).isOkOr:
|
||||
raiseAssert info & $$error
|
||||
# Clear account
|
||||
al.distinctBase.delete(eAddr).isOkOr:
|
||||
if error.error == MptNotFound:
|
||||
return
|
||||
|
|
|
@ -476,7 +476,7 @@ proc testTxMergeAndDeleteSubTree*(
|
|||
""
|
||||
# Delete sub-tree
|
||||
block:
|
||||
let rc = db.delete VertexID(1)
|
||||
let rc = db.delete(VertexID(1), VOID_PATH_ID)
|
||||
xCheckRc rc.error == (0,0):
|
||||
noisy.say "***", "del(2)",
|
||||
" n=", n, "/", list.len,
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
## Testing `CoreDB` wrapper implementation
|
||||
|
||||
import
|
||||
std/[os, strformat, strutils, times],
|
||||
std/[algorithm, os, strformat, strutils, times],
|
||||
chronicles,
|
||||
eth/common,
|
||||
results,
|
||||
|
@ -22,6 +22,9 @@ import
|
|||
./test_coredb/[coredb_test_xx, test_chainsync, test_helpers]
|
||||
|
||||
const
|
||||
# If `true`, this compile time option set up `unittest2` for manual parsing
|
||||
unittest2DisableParamFiltering {.booldefine.} = false
|
||||
|
||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||
repoDir = [".", "tests", "nimbus-eth1-blobs"]
|
||||
subDir = ["replay", "test_coredb", "custom-network", "customgenesis"]
|
||||
|
@ -29,6 +32,9 @@ const
|
|||
# Reference file for finding some database directory base
|
||||
sampleDirRefFile = "coredb_test_xx.nim"
|
||||
|
||||
dbTypeDefault = LegacyDbMemory
|
||||
ldgTypeDefault = LegacyAccountsCache
|
||||
|
||||
let
|
||||
# Standard test sample
|
||||
bChainCapture = bulkTest0
|
||||
|
@ -37,27 +43,59 @@ let
|
|||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when unittest2DisableParamFiltering:
|
||||
# Filter out local options and pass on the rest to `unittest2`
|
||||
proc cmdLineConfig(): tuple[samples: seq[CaptureSpecs]] =
|
||||
|
||||
# Define sample list from the command line (if any)
|
||||
const optPfx = "--sample=" # Custom option with sample list
|
||||
|
||||
proc parseError(s = "") =
|
||||
let msg = if 0 < s.len: "Unsupported \"" & optPfx & "\" list item: " & s
|
||||
else: "Empty \"" & optPfx & " list"
|
||||
echo "*** ", getAppFilename().splitFile.name, ": ", msg
|
||||
echo " Available: ", allSamples.mapIt(it.name).sorted.join(" ")
|
||||
quit(99)
|
||||
|
||||
var other: seq[string] # Options for manual parsing by `unittest2`
|
||||
|
||||
for arg in commandLineParams():
|
||||
if optPfx.len <= arg.len and arg[0 ..< optPfx.len] == optPfx:
|
||||
for w in arg[optPfx.len ..< arg.len].split(",").mapIt(it.strip):
|
||||
block findSample:
|
||||
for sample in allSamples:
|
||||
if w.cmpIgnoreCase(sample.name) == 0:
|
||||
result.samples.add sample
|
||||
break findSample
|
||||
w.parseError()
|
||||
if result.samples.len == 0:
|
||||
parseError()
|
||||
else:
|
||||
other.add arg
|
||||
|
||||
# Setup `unittest2`
|
||||
other.parseParameters
|
||||
|
||||
else:
|
||||
# Kill the compilation process iff the directive `cmdLineConfig()` is used
|
||||
template cmdLineConfig(): untyped =
|
||||
{.error: "cmdLineConfig() needs compiler option "&
|
||||
" -d:unittest2DisableParamFiltering".}
|
||||
|
||||
|
||||
proc findFilePath(
|
||||
file: string;
|
||||
baseDir: openArray[string] = baseDir;
|
||||
repoDir: openArray[string] = repoDir;
|
||||
subDir: openArray[string] = subDir;
|
||||
): Result[string,void] =
|
||||
for dir in baseDir:
|
||||
if dir.dirExists:
|
||||
for repo in repoDir:
|
||||
if (dir / repo).dirExists:
|
||||
for sub in subDir:
|
||||
if (dir / repo / sub).dirExists:
|
||||
let path = dir / repo / sub / file
|
||||
if path.fileExists:
|
||||
return ok(path)
|
||||
echo "*** File not found \"", file, "\"."
|
||||
err()
|
||||
file.findFilePathHelper(baseDir, repoDir, subDir)
|
||||
|
||||
|
||||
proc getTmpDir(sampleDir = sampleDirRefFile): string =
|
||||
sampleDir.findFilePath.value.splitFile.dir
|
||||
|
||||
|
||||
proc flushDbDir(s: string) =
|
||||
if s != "":
|
||||
let dataDir = s / "nimbus"
|
||||
|
@ -142,8 +180,8 @@ proc initRunnerDB(
|
|||
proc chainSyncRunner(
|
||||
noisy = true;
|
||||
capture = bChainCapture;
|
||||
dbType = LegacyDbMemory;
|
||||
ldgType = LegacyAccountsCache;
|
||||
dbType = CoreDbType(0);
|
||||
ldgType = ldgTypeDefault;
|
||||
enaLogging = false;
|
||||
lastOneExtra = true;
|
||||
) =
|
||||
|
@ -156,6 +194,16 @@ proc chainSyncRunner(
|
|||
dbDir = baseDir / "tmp"
|
||||
numBlocks = capture.numBlocks
|
||||
numBlocksInfo = if numBlocks == high(int): "all" else: $numBlocks
|
||||
|
||||
dbType = block:
|
||||
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
|
||||
var effDbType = dbTypeDefault
|
||||
if dbType != CoreDbType(0):
|
||||
effDbType = dbType
|
||||
elif capture.dbType != CoreDbType(0):
|
||||
effDbType = capture.dbType
|
||||
effDbType
|
||||
|
||||
persistent = dbType in CoreDbPersistentTypes
|
||||
|
||||
defer:
|
||||
|
@ -168,7 +216,7 @@ proc chainSyncRunner(
|
|||
com = initRunnerDB(dbDir, capture, dbType, ldgType)
|
||||
defer:
|
||||
com.db.finish(flush = true)
|
||||
noisy.testChainSyncProfilingPrint numBlocks
|
||||
#noisy.testChainSyncProfilingPrint numBlocks
|
||||
if persistent: dbDir.flushDbDir
|
||||
|
||||
if noisy:
|
||||
|
@ -190,25 +238,30 @@ proc coreDbMain*(noisy = defined(debug)) =
|
|||
when isMainModule:
|
||||
const
|
||||
noisy = defined(debug) or true
|
||||
var
|
||||
sampleList: seq[CaptureSpecs]
|
||||
|
||||
setErrorLevel()
|
||||
|
||||
# This one uses the readily available dump: `bulkTest0` and some huge replay
|
||||
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
|
||||
# For specs see `tests/test_coredb/bulk_test_xx.nim`.
|
||||
var testList = @[bulkTest0] # This test is superseded by `bulkTest1` and `2`
|
||||
#testList = @[failSample0]
|
||||
when true and false:
|
||||
testList = @[bulkTest2, bulkTest3]
|
||||
|
||||
sampleList = cmdLineConfig().samples
|
||||
if sampleList.len == 0:
|
||||
sampleList = @[bulkTest0]
|
||||
when true:
|
||||
sampleList = @[bulkTest2, bulkTest3]
|
||||
sampleList = @[ariTest1] # debugging
|
||||
|
||||
var state: (Duration, int)
|
||||
for n,capture in testList:
|
||||
for n,capture in sampleList:
|
||||
noisy.profileSection("@testList #" & $n, state):
|
||||
noisy.chainSyncRunner(
|
||||
capture=capture,
|
||||
dbType=AristoDbMemory,
|
||||
#dbType = ...,
|
||||
ldgType=LedgerCache,
|
||||
#enaLogging=true
|
||||
#enaLogging = true
|
||||
)
|
||||
|
||||
noisy.say "***", "total elapsed: ", state[0].pp, " sections: ", state[1]
|
||||
|
|
|
@ -9,7 +9,9 @@
|
|||
# distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/strutils,
|
||||
eth/common,
|
||||
../../nimbus/db/core_db,
|
||||
../../nimbus/common/chain_config
|
||||
|
||||
type
|
||||
|
@ -22,28 +24,29 @@ type
|
|||
genesis*: string ## Optional config file (instead of `network`)
|
||||
files*: seq[string] ## Names of capture files
|
||||
numBlocks*: int ## Number of blocks to load
|
||||
dbType*: CoreDbType ## Use `CoreDbType(0)` for default
|
||||
|
||||
# Must not use `const` here, see `//github.com/nim-lang/Nim/issues/23295`
|
||||
# Waiting for fix `//github.com/nim-lang/Nim/pull/23297` (or similar) to
|
||||
# appear on local `Nim` compiler version.
|
||||
let
|
||||
bulkTest0* = CaptureSpecs(
|
||||
name: "some-goerli",
|
||||
builtIn: true,
|
||||
name: "goerli-some",
|
||||
network: GoerliNet,
|
||||
files: @["goerli68161.txt.gz"],
|
||||
numBlocks: 1_000)
|
||||
|
||||
bulkTest1* = CaptureSpecs(
|
||||
name: "more-goerli",
|
||||
builtIn: true,
|
||||
name: "goerli-more",
|
||||
network: GoerliNet,
|
||||
files: @["goerli68161.txt.gz"],
|
||||
numBlocks: high(int))
|
||||
|
||||
bulkTest2* = CaptureSpecs(
|
||||
name: "much-goerli",
|
||||
builtIn: true,
|
||||
name: "goerli",
|
||||
network: GoerliNet,
|
||||
files: @[
|
||||
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
|
||||
|
@ -51,8 +54,8 @@ let
|
|||
numBlocks: high(int))
|
||||
|
||||
bulkTest3* = CaptureSpecs(
|
||||
name: "mainnet",
|
||||
builtIn: true,
|
||||
name: "main",
|
||||
network: MainNet,
|
||||
files: @[
|
||||
"mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay
|
||||
|
@ -62,18 +65,53 @@ let
|
|||
numBlocks: high(int))
|
||||
|
||||
|
||||
failSample0* = CaptureSpecs(
|
||||
name: "fail-goerli",
|
||||
# Test samples with all the problems one can expect
|
||||
ariTest0* = CaptureSpecs(
|
||||
builtIn: true,
|
||||
network: GoerliNet,
|
||||
name: bulkTest2.name & "-am",
|
||||
network: bulkTest2.network,
|
||||
files: bulkTest2.files,
|
||||
numBlocks: 301_375 + 1) # +1 => crash on Aristo only
|
||||
numBlocks: high(int),
|
||||
dbType: AristoDbMemory)
|
||||
|
||||
failSample1* = CaptureSpecs(
|
||||
name: "fail-main",
|
||||
ariTest1* = CaptureSpecs(
|
||||
builtIn: true,
|
||||
network: MainNet,
|
||||
name: bulkTest2.name & "-ar",
|
||||
network: bulkTest2.network,
|
||||
files: bulkTest2.files,
|
||||
numBlocks: high(int),
|
||||
dbType: AristoDbRocks)
|
||||
|
||||
ariTest2* = CaptureSpecs(
|
||||
builtIn: true,
|
||||
name: bulkTest3.name & "-ar",
|
||||
network: bulkTest3.network,
|
||||
files: bulkTest3.files,
|
||||
numBlocks: 257_280 + 512)
|
||||
numBlocks: high(int),
|
||||
dbType: AristoDbRocks)
|
||||
|
||||
|
||||
# To be compared against the proof-of-concept implementation as reference
|
||||
legaTest1* = CaptureSpecs(
|
||||
builtIn: true,
|
||||
name: ariTest1.name.replace("-ar", "-lp"),
|
||||
network: ariTest1.network,
|
||||
files: ariTest1.files,
|
||||
numBlocks: ariTest1.numBlocks,
|
||||
dbType: LegacyDbPersistent)
|
||||
|
||||
legaTest2* = CaptureSpecs(
|
||||
builtIn: true,
|
||||
name: ariTest2.name.replace("-ar", "-lp"),
|
||||
files: ariTest2.files,
|
||||
numBlocks: ariTest2.numBlocks,
|
||||
dbType: LegacyDbPersistent)
|
||||
|
||||
# ------------------
|
||||
|
||||
allSamples* = [
|
||||
bulkTest0, bulkTest1, bulkTest2, bulkTest3,
|
||||
ariTest0, ariTest1, ariTest2,
|
||||
legaTest1, legaTest2]
|
||||
|
||||
# End
|
||||
|
|
|
@ -213,7 +213,6 @@ proc test_chainSync*(
|
|||
dotsOrSpace = " "
|
||||
|
||||
noisy.startLogging(headers9[0].blockNumber)
|
||||
|
||||
if lastOneExtra:
|
||||
let
|
||||
headers0 = headers9[0..0]
|
||||
|
@ -229,7 +228,6 @@ proc test_chainSync*(
|
|||
xCheck runPersistBlocks9Rc == ValidationResult.OK
|
||||
break
|
||||
|
||||
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -9,8 +9,9 @@
|
|||
# distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/[sequtils, times],
|
||||
std/[os, sequtils, times],
|
||||
eth/common,
|
||||
results,
|
||||
../../nimbus/utils/prettify,
|
||||
../replay/pp
|
||||
|
||||
|
@ -53,6 +54,24 @@ func pp*(
|
|||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc findFilePathHelper*(
|
||||
file: string;
|
||||
baseDir: openArray[string];
|
||||
repoDir: openArray[string];
|
||||
subDir: openArray[string];
|
||||
): Result[string,void] =
|
||||
for dir in baseDir:
|
||||
if dir.dirExists:
|
||||
for repo in repoDir:
|
||||
if (dir / repo).dirExists:
|
||||
for sub in subDir:
|
||||
if (dir / repo / sub).dirExists:
|
||||
let path = dir / repo / sub / file
|
||||
if path.fileExists:
|
||||
return ok(path)
|
||||
echo "*** File not found \"", file, "\"."
|
||||
err()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
Loading…
Reference in New Issue