Core db+aristo update storage trie handling (#2023)

* CoreDb: Test module with additional sample selector cmd line options

* Aristo: Do not automatically remove a storage trie with the account

why:
  This is an unnecessary side effect. Rather than using an automatism, a
  a storage root must be deleted manually.

* Aristo: Can handle stale storage root vertex IDs as empty IDs.

why:
  This is currently needed for the ledger API supporting both, a legacy
  and the `Aristo` database backend.

  This feature can be disabled at compile time by re-setting the
  `LOOSE_STORAGE_TRIE_COUPLING` flag in the `aristo_constants` module.

* CoreDb+Aristo: Flush/delete storage trie when deleting account

why:
  On either backend, a deleted account leave a dangling storage trie on
  the database.

  For consistency nn the legacy backend, storage tries must not be
  deleted as they might be shared by several accounts whereas on `Aristo`
  they are always unique.
This commit is contained in:
Jordan Hrycaj 2024-02-12 19:37:00 +00:00 committed by GitHub
parent 35131b6d55
commit 9e50af839f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 263 additions and 83 deletions

View File

@ -59,6 +59,17 @@ const
## functions with fixed assignments of the type of a state root (e.g. for ## functions with fixed assignments of the type of a state root (e.g. for
## a receipt or a transaction root.) ## a receipt or a transaction root.)
LOOSE_STORAGE_TRIE_COUPLING* = true
## Enabling the `LOOSE_STORAGE_TRIE_COUPLING` flag a sub-trie is considered
## empty if the root vertex ID is zero or at least `LEAST_FREE_VID` and
## there is no vertex available. If the vertex ID is not zero and should
## be considered as such will affect calculating the Merkel hash node key
## for an accou.t leaf of payload type `AccountData`.
##
## Setting this flag `true` might be helpful for running an API supporting
## both, a legacy and# the `Aristo` database backend.
##
static: static:
doAssert 1 < LEAST_FREE_VID # must stay away from `VertexID(1)` doAssert 1 < LEAST_FREE_VID # must stay away from `VertexID(1)`

View File

@ -298,10 +298,16 @@ proc collapseLeaf(
proc delSubTree( proc delSubTree(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
root: VertexID; # Root vertex root: VertexID; # Root vertex
accPath: PathID; # Needed for real storage tries
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Implementation of *delete* sub-trie. ## Implementation of *delete* sub-trie.
if not root.isValid: if not root.isValid:
return err((root,DelSubTreeVoidRoot)) return err((root,DelSubTreeVoidRoot))
if LEAST_FREE_VID <= root.distinctBase:
db.registerAccount(root, accPath).isOkOr:
return err((root,error))
var var
dispose = @[root] dispose = @[root]
rootVtx = db.getVtxRc(root).valueOr: rootVtx = db.getVtxRc(root).valueOr:
@ -333,7 +339,7 @@ proc deleteImpl(
hike: Hike; # Fully expanded path hike: Hike; # Fully expanded path
lty: LeafTie; # `Patricia Trie` path root-to-leaf lty: LeafTie; # `Patricia Trie` path root-to-leaf
accPath: PathID; # Needed for accounts payload accPath: PathID; # Needed for accounts payload
): Result[void,(VertexID,AristoError)] = ): Result[bool,(VertexID,AristoError)] =
## Implementation of *delete* functionality. ## Implementation of *delete* functionality.
if LEAST_FREE_VID <= lty.root.distinctBase: if LEAST_FREE_VID <= lty.root.distinctBase:
@ -347,15 +353,13 @@ proc deleteImpl(
if lf.vid in db.pPrf: if lf.vid in db.pPrf:
return err((lf.vid, DelLeafLocked)) return err((lf.vid, DelLeafLocked))
# Will be needed at the end. Just detect an error early enouhh # Verify thet there is no dangling storage trie
let leafVidBe = block: block:
let rc = db.getVtxBE lf.vid let data = lf.vtx.lData
if rc.isErr: if data.pType == AccountData:
if rc.error != GetVtxNotFound: let vid = data.account.storageID
return err((lf.vid, rc.error)) if vid.isValid and db.getVtx(vid).isValid:
VertexRef(nil) return err((vid,DelDanglingStoTrie))
else:
rc.value
db.disposeOfVtx lf.vid db.disposeOfVtx lf.vid
@ -407,14 +411,7 @@ proc deleteImpl(
# at a later state. # at a later state.
db.top.final.lTab[lty] = VertexID(0) db.top.final.lTab[lty] = VertexID(0)
# Delete dependent leaf node storage tree if there is any ok(not db.getVtx(hike.root).isValid)
let data = lf.vtx.lData
if data.pType == AccountData:
let vid = data.account.storageID
if vid.isValid:
return db.delSubTree vid
ok()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -423,22 +420,25 @@ proc deleteImpl(
proc delete*( proc delete*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
root: VertexID; # Root vertex root: VertexID; # Root vertex
accPath: PathID; # Needed for real storage tries
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Delete sub-trie below `root`. The maximum supported sub-tree size is ## Delete sub-trie below `root`. The maximum supported sub-tree size is
## `SUB_TREE_DISPOSAL_MAX`. Larger tries must be disposed by walk-deleting ## `SUB_TREE_DISPOSAL_MAX`. Larger tries must be disposed by walk-deleting
## leaf nodes using `left()` or `right()` traversal functions. ## leaf nodes using `left()` or `right()` traversal functions.
## ##
## Caveat: ## For a `root` argument greater than `LEAST_FREE_VID`, the sub-tree spanned
## There is no way to quickly verify that the `root` argument is isolated. ## by `root` is considered a storage trie linked to an account leaf referred
## Deleting random sub-trees might lead to an inconsistent database. ## to by a valid `accPath` (i.e. different from `VOID_PATH_ID`.) In that
## case, an account must exists. If there is payload of type `AccountData`,
## its `storageID` field must be unset or equal to the `hike.root` vertex ID.
## ##
db.delSubTree root db.delSubTree(root, accPath)
proc delete*( proc delete*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
hike: Hike; # Fully expanded chain of vertices hike: Hike; # Fully expanded chain of vertices
accPath: PathID; # Needed for accounts payload accPath: PathID; # Needed for accounts payload
): Result[void,(VertexID,AristoError)] = ): Result[bool,(VertexID,AristoError)] =
## Delete argument `hike` chain of vertices from the database. ## Delete argument `hike` chain of vertices from the database.
## ##
## For a `hike.root` with `VertexID` greater than `LEAST_FREE_VID`, the ## For a `hike.root` with `VertexID` greater than `LEAST_FREE_VID`, the
@ -448,9 +448,7 @@ proc delete*(
## of type `AccountData`, its `storageID` field must be unset or equal to the ## of type `AccountData`, its `storageID` field must be unset or equal to the
## `hike.root` vertex ID. ## `hike.root` vertex ID.
## ##
## Note: ## The return code is `true` iff the trie has become empty.
## If the leaf node has an account payload referring to a storage sub-trie,
## this one will be deleted as well.
## ##
# Need path in order to remove it from `lTab[]` # Need path in order to remove it from `lTab[]`
let lty = LeafTie( let lty = LeafTie(
@ -462,7 +460,7 @@ proc delete*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
lty: LeafTie; # `Patricia Trie` path root-to-leaf lty: LeafTie; # `Patricia Trie` path root-to-leaf
accPath: PathID; # Needed for accounts payload accPath: PathID; # Needed for accounts payload
): Result[void,(VertexID,AristoError)] = ): Result[bool,(VertexID,AristoError)] =
## Variant of `delete()` ## Variant of `delete()`
## ##
db.deleteImpl(? lty.hikeUp(db).mapErr toVae, lty, accPath) db.deleteImpl(? lty.hikeUp(db).mapErr toVae, lty, accPath)
@ -472,7 +470,7 @@ proc delete*(
root: VertexID; root: VertexID;
path: openArray[byte]; path: openArray[byte];
accPath: PathID; # Needed for accounts payload accPath: PathID; # Needed for accounts payload
): Result[void,(VertexID,AristoError)] = ): Result[bool,(VertexID,AristoError)] =
## Variant of `delete()` ## Variant of `delete()`
## ##
let rc = path.initNibbleRange.hikeUp(root, db) let rc = path.initNibbleRange.hikeUp(root, db)

View File

@ -186,18 +186,19 @@ type
NearbyVidInvalid NearbyVidInvalid
# Deletion of vertices, `delete()` # Deletion of vertices, `delete()`
DelPathTagError
DelLeafExpexted
DelLeafLocked
DelLeafUnexpected
DelBranchExpexted DelBranchExpexted
DelBranchLocked DelBranchLocked
DelBranchWithoutRefs DelBranchWithoutRefs
DelDanglingStoTrie
DelExtLocked DelExtLocked
DelVidStaleVtx DelLeafExpexted
DelLeafLocked
DelLeafUnexpected
DelPathNotFound
DelPathTagError
DelSubTreeTooBig DelSubTreeTooBig
DelSubTreeVoidRoot DelSubTreeVoidRoot
DelPathNotFound DelVidStaleVtx
# Functions from `aristo_filter.nim` # Functions from `aristo_filter.nim`
FilBackendMissing FilBackendMissing

View File

@ -366,9 +366,7 @@ proc hashify*(
if not vtx.isValid: if not vtx.isValid:
# This might happen when proof nodes (see `snap` protocol) are on # This might happen when proof nodes (see `snap` protocol) are on
# an incomplete trie where this `vid` has a key but no vertex yet. # an incomplete trie where this `vid` has a key but no vertex yet.
# Also, the key (as part of the proof data) must be on the backend # Also, the key (as part of the proof data) must be on the backend.
# by the way `leafToRootCrawler()` works. So it is enough to verify
# the key there.
discard db.getKeyBE(vid).valueOr: discard db.getKeyBE(vid).valueOr:
return err((vid,HashifyNodeUnresolved)) return err((vid,HashifyNodeUnresolved))
else: else:

View File

@ -14,10 +14,10 @@
{.push raises: [].} {.push raises: [].}
import import
std/[sequtils, tables], std/[sequtils, tables, typetraits],
eth/common, eth/common,
results, results,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers] "."/[aristo_constants, aristo_desc, aristo_get, aristo_hike, aristo_layers]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, converters # Public functions, converters
@ -128,9 +128,15 @@ proc toNode*(
let vid = vtx.lData.account.storageID let vid = vtx.lData.account.storageID
if vid.isValid: if vid.isValid:
let key = db.getKey vid let key = db.getKey vid
if key.isValid: if not key.isValid:
node.key[0] = key block looseCoupling:
else: when LOOSE_STORAGE_TRIE_COUPLING:
# Stale storage trie?
if LEAST_FREE_VID <= vid.distinctBase and
not db.getVtx(vid).isValid:
node.lData.account.storageID = VertexID(0)
break looseCoupling
# Otherwise this is a stale storage trie.
return err(@[vid]) return err(@[vid])
node.key[0] = key node.key[0] = key
return ok node return ok node

View File

@ -447,6 +447,11 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
if rc.error[1] == DelPathNotFound: if rc.error[1] == DelPathNotFound:
return err(rc.error.toError(db, info, MptNotFound)) return err(rc.error.toError(db, info, MptNotFound))
return err(rc.error.toError(db, info)) return err(rc.error.toError(db, info))
if rc.value:
# Trie has become empty
cMpt.root = VoidTrieID
ok() ok()
proc mptHasPath( proc mptHasPath(
@ -575,6 +580,27 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
return err(rc.error.toError(db, info)) return err(rc.error.toError(db, info))
ok() ok()
proc accStoFlush(
cAcc: AristoChildDbRef;
address: EthAddress;
info: static[string];
): CoreDbRc[void] =
let
db = cAcc.base.parent
mpt = cAcc.mpt
key = address.keccakHash.data
pyl = mpt.fetchPayload(cAcc.root, key).valueOr:
return ok()
# Use storage ID from account and delete that sub-trie
if pyl.pType == AccountData:
let stoID = pyl.account.storageID
if stoID.isValid:
let rc = mpt.delete(stoID, address.to(PathID))
if rc.isErr:
return err(rc.error.toError(db, info))
ok()
proc accHasPath( proc accHasPath(
cAcc: AristoChildDbRef; cAcc: AristoChildDbRef;
address: EthAddress; address: EthAddress;
@ -602,6 +628,9 @@ proc accMethods(cAcc: AristoChildDbRef): CoreDbAccFns =
deleteFn: proc(address: EthAddress): CoreDbRc[void] = deleteFn: proc(address: EthAddress): CoreDbRc[void] =
cAcc.accDelete(address, "deleteFn()"), cAcc.accDelete(address, "deleteFn()"),
stoFlushFn: proc(address: EthAddress): CoreDbRc[void] =
cAcc.accStoFlush(address, "stoFlushFn()"),
mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] = mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] =
cAcc.accMerge(acc, "mergeFn()"), cAcc.accMerge(acc, "mergeFn()"),
@ -869,7 +898,11 @@ proc newMptHandler*(
if rc.isErr: if rc.isErr:
return err(rc.error[1].toError(db, info, AccNotFound)) return err(rc.error[1].toError(db, info, AccNotFound))
if trie.reset: if trie.reset:
let rc = trie.ctx.mpt.delete(trie.root) # Note that `reset` only applies to non-dynamic trie roots with vertex ID
# beween `VertexID(2) ..< LEAST_FREE_VID`. At the moment, this applies to
# `GenericTrie` type sub-tries somehow emulating the behaviour of a new
# empty MPT on the legacy database (handle with care, though.)
let rc = trie.ctx.mpt.delete(trie.root, VOID_PATH_ID)
if rc.isErr: if rc.isErr:
return err(rc.error.toError(db, info, AutoFlushFailed)) return err(rc.error.toError(db, info, AutoFlushFailed))
trie.reset = false trie.reset = false

View File

@ -327,6 +327,9 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
mpt.trie.del(k.keccakHash.data) mpt.trie.del(k.keccakHash.data)
ok(), ok(),
stoFlushFn: proc(k: EthAddress): CoreDbRc[void] =
ok(),
mergeFn: proc(v: CoreDbAccount): CoreDbRc[void] = mergeFn: proc(v: CoreDbAccount): CoreDbRc[void] =
db.mapRlpException("mergeFn()"): db.mapRlpException("mergeFn()"):
mpt.trie.put(v.address.keccakHash.data, rlp.encode v.toAccount) mpt.trie.put(v.address.keccakHash.data, rlp.encode v.toAccount)

View File

@ -911,6 +911,21 @@ proc delete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
result = acc.methods.deleteFn address result = acc.methods.deleteFn address
acc.ifTrackNewApi: debug newApiTxt, ctx, elapsed, address, result acc.ifTrackNewApi: debug newApiTxt, ctx, elapsed, address, result
proc stoFlush*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
## Recursively delete all data elements from the storage trie associated to
## the account identified by the argument `address`. After successful run,
## the storage trie will be empty.
##
## caveat:
## This function has currently no effect on the legacy backend so it must
## not be relied upon in general. On the legacy backend, storage tries
## might be shared by several accounts whereas they are unique on the
## `Aristo` backend.
##
acc.setTrackNewApi AccStoFlushFn
result = acc.methods.stoFlushFn address
acc.ifTrackNewApi: debug newApiTxt, ctx, elapsed, address, result
proc merge*( proc merge*(
acc: CoreDxAccRef; acc: CoreDxAccRef;
account: CoreDbAccount; account: CoreDbAccount;

View File

@ -34,6 +34,7 @@ type
AccMergeFn = "acc/merge" AccMergeFn = "acc/merge"
AccNewMptFn = "acc/newMpt" AccNewMptFn = "acc/newMpt"
AccPersistentFn = "acc/persistent" AccPersistentFn = "acc/persistent"
AccStoFlushFn = "acc/stoFlush"
AccToMptFn = "acc/toMpt" AccToMptFn = "acc/toMpt"
AnyBackendFn = "any/backend" AnyBackendFn = "any/backend"

View File

@ -188,7 +188,6 @@ type
persistentFn*: CoreDbMptPersistentFn persistentFn*: CoreDbMptPersistentFn
forgetFn*: CoreDbMptForgetFn forgetFn*: CoreDbMptForgetFn
# ---------------------------------------------------- # ----------------------------------------------------
# Sub-descriptor: Mpt/hexary trie methods for accounts # Sub-descriptor: Mpt/hexary trie methods for accounts
# ------------------------------------------------------ # ------------------------------------------------------
@ -196,6 +195,7 @@ type
CoreDbAccNewMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.} CoreDbAccNewMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.}
CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.} CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.} CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccStoFlushFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.} CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.} CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbAccGetTrieFn* = proc(): CoreDbTrieRef {.noRaise.} CoreDbAccGetTrieFn* = proc(): CoreDbTrieRef {.noRaise.}
@ -209,6 +209,7 @@ type
newMptFn*: CoreDbAccNewMptFn newMptFn*: CoreDbAccNewMptFn
fetchFn*: CoreDbAccFetchFn fetchFn*: CoreDbAccFetchFn
deleteFn*: CoreDbAccDeleteFn deleteFn*: CoreDbAccDeleteFn
stoFlushFn*: CoreDbAccStoFlushFn
mergeFn*: CoreDbAccMergeFn mergeFn*: CoreDbAccMergeFn
hasPathFn*: CoreDbAccHasPathFn hasPathFn*: CoreDbAccHasPathFn
getTrieFn*: CoreDbAccGetTrieFn getTrieFn*: CoreDbAccGetTrieFn

View File

@ -72,6 +72,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
doAssert not fns.newMptFn.isNil doAssert not fns.newMptFn.isNil
doAssert not fns.fetchFn.isNil doAssert not fns.fetchFn.isNil
doAssert not fns.deleteFn.isNil doAssert not fns.deleteFn.isNil
doAssert not fns.stoFlushFn.isNil
doAssert not fns.mergeFn.isNil doAssert not fns.mergeFn.isNil
doAssert not fns.hasPathFn.isNil doAssert not fns.hasPathFn.isNil
doAssert not fns.getTrieFn.isNil doAssert not fns.getTrieFn.isNil

View File

@ -114,6 +114,10 @@ proc merge*(al: AccountLedger; account: CoreDbAccount) =
proc delete*(al: AccountLedger, eAddr: EthAddress) = proc delete*(al: AccountLedger, eAddr: EthAddress) =
const info = "AccountLedger/delete()" const info = "AccountLedger/delete()"
# Flush associated storage trie
al.distinctBase.stoFlush(eAddr).isOkOr:
raiseAssert info & $$error
# Clear account
al.distinctBase.delete(eAddr).isOkOr: al.distinctBase.delete(eAddr).isOkOr:
if error.error == MptNotFound: if error.error == MptNotFound:
return return

View File

@ -476,7 +476,7 @@ proc testTxMergeAndDeleteSubTree*(
"" ""
# Delete sub-tree # Delete sub-tree
block: block:
let rc = db.delete VertexID(1) let rc = db.delete(VertexID(1), VOID_PATH_ID)
xCheckRc rc.error == (0,0): xCheckRc rc.error == (0,0):
noisy.say "***", "del(2)", noisy.say "***", "del(2)",
" n=", n, "/", list.len, " n=", n, "/", list.len,

View File

@ -11,7 +11,7 @@
## Testing `CoreDB` wrapper implementation ## Testing `CoreDB` wrapper implementation
import import
std/[os, strformat, strutils, times], std/[algorithm, os, strformat, strutils, times],
chronicles, chronicles,
eth/common, eth/common,
results, results,
@ -22,6 +22,9 @@ import
./test_coredb/[coredb_test_xx, test_chainsync, test_helpers] ./test_coredb/[coredb_test_xx, test_chainsync, test_helpers]
const const
# If `true`, this compile time option set up `unittest2` for manual parsing
unittest2DisableParamFiltering {.booldefine.} = false
baseDir = [".", "..", ".."/"..", $DirSep] baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests", "nimbus-eth1-blobs"] repoDir = [".", "tests", "nimbus-eth1-blobs"]
subDir = ["replay", "test_coredb", "custom-network", "customgenesis"] subDir = ["replay", "test_coredb", "custom-network", "customgenesis"]
@ -29,6 +32,9 @@ const
# Reference file for finding some database directory base # Reference file for finding some database directory base
sampleDirRefFile = "coredb_test_xx.nim" sampleDirRefFile = "coredb_test_xx.nim"
dbTypeDefault = LegacyDbMemory
ldgTypeDefault = LegacyAccountsCache
let let
# Standard test sample # Standard test sample
bChainCapture = bulkTest0 bChainCapture = bulkTest0
@ -37,27 +43,59 @@ let
# Helpers # Helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
when unittest2DisableParamFiltering:
# Filter out local options and pass on the rest to `unittest2`
proc cmdLineConfig(): tuple[samples: seq[CaptureSpecs]] =
# Define sample list from the command line (if any)
const optPfx = "--sample=" # Custom option with sample list
proc parseError(s = "") =
let msg = if 0 < s.len: "Unsupported \"" & optPfx & "\" list item: " & s
else: "Empty \"" & optPfx & " list"
echo "*** ", getAppFilename().splitFile.name, ": ", msg
echo " Available: ", allSamples.mapIt(it.name).sorted.join(" ")
quit(99)
var other: seq[string] # Options for manual parsing by `unittest2`
for arg in commandLineParams():
if optPfx.len <= arg.len and arg[0 ..< optPfx.len] == optPfx:
for w in arg[optPfx.len ..< arg.len].split(",").mapIt(it.strip):
block findSample:
for sample in allSamples:
if w.cmpIgnoreCase(sample.name) == 0:
result.samples.add sample
break findSample
w.parseError()
if result.samples.len == 0:
parseError()
else:
other.add arg
# Setup `unittest2`
other.parseParameters
else:
# Kill the compilation process iff the directive `cmdLineConfig()` is used
template cmdLineConfig(): untyped =
{.error: "cmdLineConfig() needs compiler option "&
" -d:unittest2DisableParamFiltering".}
proc findFilePath( proc findFilePath(
file: string; file: string;
baseDir: openArray[string] = baseDir; baseDir: openArray[string] = baseDir;
repoDir: openArray[string] = repoDir; repoDir: openArray[string] = repoDir;
subDir: openArray[string] = subDir; subDir: openArray[string] = subDir;
): Result[string,void] = ): Result[string,void] =
for dir in baseDir: file.findFilePathHelper(baseDir, repoDir, subDir)
if dir.dirExists:
for repo in repoDir:
if (dir / repo).dirExists:
for sub in subDir:
if (dir / repo / sub).dirExists:
let path = dir / repo / sub / file
if path.fileExists:
return ok(path)
echo "*** File not found \"", file, "\"."
err()
proc getTmpDir(sampleDir = sampleDirRefFile): string = proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath.value.splitFile.dir sampleDir.findFilePath.value.splitFile.dir
proc flushDbDir(s: string) = proc flushDbDir(s: string) =
if s != "": if s != "":
let dataDir = s / "nimbus" let dataDir = s / "nimbus"
@ -142,8 +180,8 @@ proc initRunnerDB(
proc chainSyncRunner( proc chainSyncRunner(
noisy = true; noisy = true;
capture = bChainCapture; capture = bChainCapture;
dbType = LegacyDbMemory; dbType = CoreDbType(0);
ldgType = LegacyAccountsCache; ldgType = ldgTypeDefault;
enaLogging = false; enaLogging = false;
lastOneExtra = true; lastOneExtra = true;
) = ) =
@ -156,6 +194,16 @@ proc chainSyncRunner(
dbDir = baseDir / "tmp" dbDir = baseDir / "tmp"
numBlocks = capture.numBlocks numBlocks = capture.numBlocks
numBlocksInfo = if numBlocks == high(int): "all" else: $numBlocks numBlocksInfo = if numBlocks == high(int): "all" else: $numBlocks
dbType = block:
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
var effDbType = dbTypeDefault
if dbType != CoreDbType(0):
effDbType = dbType
elif capture.dbType != CoreDbType(0):
effDbType = capture.dbType
effDbType
persistent = dbType in CoreDbPersistentTypes persistent = dbType in CoreDbPersistentTypes
defer: defer:
@ -168,7 +216,7 @@ proc chainSyncRunner(
com = initRunnerDB(dbDir, capture, dbType, ldgType) com = initRunnerDB(dbDir, capture, dbType, ldgType)
defer: defer:
com.db.finish(flush = true) com.db.finish(flush = true)
noisy.testChainSyncProfilingPrint numBlocks #noisy.testChainSyncProfilingPrint numBlocks
if persistent: dbDir.flushDbDir if persistent: dbDir.flushDbDir
if noisy: if noisy:
@ -190,23 +238,28 @@ proc coreDbMain*(noisy = defined(debug)) =
when isMainModule: when isMainModule:
const const
noisy = defined(debug) or true noisy = defined(debug) or true
var
sampleList: seq[CaptureSpecs]
setErrorLevel() setErrorLevel()
# This one uses the readily available dump: `bulkTest0` and some huge replay # This one uses the readily available dump: `bulkTest0` and some huge replay
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package. # dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
# For specs see `tests/test_coredb/bulk_test_xx.nim`. # For specs see `tests/test_coredb/bulk_test_xx.nim`.
var testList = @[bulkTest0] # This test is superseded by `bulkTest1` and `2`
#testList = @[failSample0] sampleList = cmdLineConfig().samples
when true and false: if sampleList.len == 0:
testList = @[bulkTest2, bulkTest3] sampleList = @[bulkTest0]
when true:
sampleList = @[bulkTest2, bulkTest3]
sampleList = @[ariTest1] # debugging
var state: (Duration, int) var state: (Duration, int)
for n,capture in testList: for n,capture in sampleList:
noisy.profileSection("@testList #" & $n, state): noisy.profileSection("@testList #" & $n, state):
noisy.chainSyncRunner( noisy.chainSyncRunner(
capture=capture, capture=capture,
dbType=AristoDbMemory, #dbType = ...,
ldgType=LedgerCache, ldgType=LedgerCache,
#enaLogging = true #enaLogging = true
) )

View File

@ -9,7 +9,9 @@
# distributed except according to those terms. # distributed except according to those terms.
import import
std/strutils,
eth/common, eth/common,
../../nimbus/db/core_db,
../../nimbus/common/chain_config ../../nimbus/common/chain_config
type type
@ -22,28 +24,29 @@ type
genesis*: string ## Optional config file (instead of `network`) genesis*: string ## Optional config file (instead of `network`)
files*: seq[string] ## Names of capture files files*: seq[string] ## Names of capture files
numBlocks*: int ## Number of blocks to load numBlocks*: int ## Number of blocks to load
dbType*: CoreDbType ## Use `CoreDbType(0)` for default
# Must not use `const` here, see `//github.com/nim-lang/Nim/issues/23295` # Must not use `const` here, see `//github.com/nim-lang/Nim/issues/23295`
# Waiting for fix `//github.com/nim-lang/Nim/pull/23297` (or similar) to # Waiting for fix `//github.com/nim-lang/Nim/pull/23297` (or similar) to
# appear on local `Nim` compiler version. # appear on local `Nim` compiler version.
let let
bulkTest0* = CaptureSpecs( bulkTest0* = CaptureSpecs(
name: "some-goerli",
builtIn: true, builtIn: true,
name: "goerli-some",
network: GoerliNet, network: GoerliNet,
files: @["goerli68161.txt.gz"], files: @["goerli68161.txt.gz"],
numBlocks: 1_000) numBlocks: 1_000)
bulkTest1* = CaptureSpecs( bulkTest1* = CaptureSpecs(
name: "more-goerli",
builtIn: true, builtIn: true,
name: "goerli-more",
network: GoerliNet, network: GoerliNet,
files: @["goerli68161.txt.gz"], files: @["goerli68161.txt.gz"],
numBlocks: high(int)) numBlocks: high(int))
bulkTest2* = CaptureSpecs( bulkTest2* = CaptureSpecs(
name: "much-goerli",
builtIn: true, builtIn: true,
name: "goerli",
network: GoerliNet, network: GoerliNet,
files: @[ files: @[
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay "goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
@ -51,8 +54,8 @@ let
numBlocks: high(int)) numBlocks: high(int))
bulkTest3* = CaptureSpecs( bulkTest3* = CaptureSpecs(
name: "mainnet",
builtIn: true, builtIn: true,
name: "main",
network: MainNet, network: MainNet,
files: @[ files: @[
"mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay "mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay
@ -62,18 +65,53 @@ let
numBlocks: high(int)) numBlocks: high(int))
failSample0* = CaptureSpecs( # Test samples with all the problems one can expect
name: "fail-goerli", ariTest0* = CaptureSpecs(
builtIn: true, builtIn: true,
network: GoerliNet, name: bulkTest2.name & "-am",
network: bulkTest2.network,
files: bulkTest2.files, files: bulkTest2.files,
numBlocks: 301_375 + 1) # +1 => crash on Aristo only numBlocks: high(int),
dbType: AristoDbMemory)
failSample1* = CaptureSpecs( ariTest1* = CaptureSpecs(
name: "fail-main",
builtIn: true, builtIn: true,
network: MainNet, name: bulkTest2.name & "-ar",
network: bulkTest2.network,
files: bulkTest2.files,
numBlocks: high(int),
dbType: AristoDbRocks)
ariTest2* = CaptureSpecs(
builtIn: true,
name: bulkTest3.name & "-ar",
network: bulkTest3.network,
files: bulkTest3.files, files: bulkTest3.files,
numBlocks: 257_280 + 512) numBlocks: high(int),
dbType: AristoDbRocks)
# To be compared against the proof-of-concept implementation as reference
legaTest1* = CaptureSpecs(
builtIn: true,
name: ariTest1.name.replace("-ar", "-lp"),
network: ariTest1.network,
files: ariTest1.files,
numBlocks: ariTest1.numBlocks,
dbType: LegacyDbPersistent)
legaTest2* = CaptureSpecs(
builtIn: true,
name: ariTest2.name.replace("-ar", "-lp"),
files: ariTest2.files,
numBlocks: ariTest2.numBlocks,
dbType: LegacyDbPersistent)
# ------------------
allSamples* = [
bulkTest0, bulkTest1, bulkTest2, bulkTest3,
ariTest0, ariTest1, ariTest2,
legaTest1, legaTest2]
# End # End

View File

@ -213,7 +213,6 @@ proc test_chainSync*(
dotsOrSpace = " " dotsOrSpace = " "
noisy.startLogging(headers9[0].blockNumber) noisy.startLogging(headers9[0].blockNumber)
if lastOneExtra: if lastOneExtra:
let let
headers0 = headers9[0..0] headers0 = headers9[0..0]
@ -229,7 +228,6 @@ proc test_chainSync*(
xCheck runPersistBlocks9Rc == ValidationResult.OK xCheck runPersistBlocks9Rc == ValidationResult.OK
break break
true true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -9,8 +9,9 @@
# distributed except according to those terms. # distributed except according to those terms.
import import
std/[sequtils, times], std/[os, sequtils, times],
eth/common, eth/common,
results,
../../nimbus/utils/prettify, ../../nimbus/utils/prettify,
../replay/pp ../replay/pp
@ -53,6 +54,24 @@ func pp*(
# Public helpers # Public helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc findFilePathHelper*(
file: string;
baseDir: openArray[string];
repoDir: openArray[string];
subDir: openArray[string];
): Result[string,void] =
for dir in baseDir:
if dir.dirExists:
for repo in repoDir:
if (dir / repo).dirExists:
for sub in subDir:
if (dir / repo / sub).dirExists:
let path = dir / repo / sub / file
if path.fileExists:
return ok(path)
echo "*** File not found \"", file, "\"."
err()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------