restore a few tests, cleanup (#2234)

* remove `compensateLegacySetup`, `localDbOnly`
* enable trivially fixable tests
This commit is contained in:
Jacek Sieka 2024-05-28 14:49:35 +02:00 committed by GitHub
parent 741fcca9b6
commit 08e98eb385
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 17 additions and 53 deletions

View File

@ -286,8 +286,7 @@ func toHardFork*(
toHardFork(com.forkTransitionTable, forkDeterminer) toHardFork(com.forkTransitionTable, forkDeterminer)
proc hardForkTransition( proc hardForkTransition(
com: CommonRef, forkDeterminer: ForkDeterminationInfo) com: CommonRef, forkDeterminer: ForkDeterminationInfo) =
{.gcsafe, raises: [].} =
## When consensus type already transitioned to POS, ## When consensus type already transitioned to POS,
## the storage can choose not to store TD anymore, ## the storage can choose not to store TD anymore,
## at that time, TD is no longer needed to find a fork ## at that time, TD is no longer needed to find a fork
@ -302,8 +301,7 @@ proc hardForkTransition*(
com: CommonRef, com: CommonRef,
number: BlockNumber, number: BlockNumber,
td: Option[DifficultyInt], td: Option[DifficultyInt],
time: Option[EthTime]) time: Option[EthTime]) =
{.gcsafe, raises: [].} =
com.hardForkTransition(ForkDeterminationInfo( com.hardForkTransition(ForkDeterminationInfo(
blockNumber: number, time: time, td: td)) blockNumber: number, time: time, td: td))
@ -311,8 +309,7 @@ proc hardForkTransition*(
com: CommonRef, com: CommonRef,
parentHash: Hash256, parentHash: Hash256,
number: BlockNumber, number: BlockNumber,
time: Option[EthTime]) time: Option[EthTime]) =
{.gcsafe, raises: [].} =
com.hardForkTransition(number, getTdIfNecessary(com, parentHash), time) com.hardForkTransition(number, getTdIfNecessary(com, parentHash), time)
proc hardForkTransition*( proc hardForkTransition*(
@ -341,8 +338,7 @@ func forkGTE*(com: CommonRef, fork: HardFork): bool =
com.currentFork >= fork com.currentFork >= fork
# TODO: move this consensus code to where it belongs # TODO: move this consensus code to where it belongs
func minerAddress*(com: CommonRef; header: BlockHeader): EthAddress func minerAddress*(com: CommonRef; header: BlockHeader): EthAddress =
{.gcsafe, raises: [CatchableError].} =
# POW and POS return header.coinbase # POW and POS return header.coinbase
return header.coinbase return header.coinbase

View File

@ -158,9 +158,6 @@ proc baseMethods(db: AristoCoreDbRef): CoreDbBaseFns =
errorPrintFn: proc(e: CoreDbErrorRef): string = errorPrintFn: proc(e: CoreDbErrorRef): string =
e.errorPrint(), e.errorPrint(),
legacySetupFn: proc() =
discard,
newKvtFn: proc(offSite: bool): CoreDbRc[CoreDxKvtRef] = newKvtFn: proc(offSite: bool): CoreDbRc[CoreDxKvtRef] =
kBase.newKvtHandler(offSite, "newKvtFn()"), kBase.newKvtHandler(offSite, "newKvtFn()"),

View File

@ -269,15 +269,6 @@ proc dbType*(db: CoreDbRef): CoreDbType =
result = db.dbType result = db.dbType
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
proc compensateLegacySetup*(db: CoreDbRef) =
## On the persistent legacy hexary trie, this function is needed for
## bootstrapping and Genesis setup when the `purge` flag is activated.
## Otherwise the database backend may defect on an internal inconsistency.
##
db.setTrackNewApi BaseLegacySetupFn
db.methods.legacySetupFn()
db.ifTrackNewApi: debug newApiTxt, api, elapsed
proc parent*[T: CoreDxKvtRef | proc parent*[T: CoreDxKvtRef |
CoreDbColRef | CoreDbColRef |
CoreDbCtxRef | CoreDxMptRef | CoreDxPhkRef | CoreDxAccRef | CoreDbCtxRef | CoreDxMptRef | CoreDxPhkRef | CoreDxAccRef |

View File

@ -49,7 +49,6 @@ type
BaseColStateFn = "state" BaseColStateFn = "state"
BaseDbTypeFn = "dbType" BaseDbTypeFn = "dbType"
BaseFinishFn = "finish" BaseFinishFn = "finish"
BaseLegacySetupFn = "compensateLegacySetup"
BaseLevelFn = "level" BaseLevelFn = "level"
BaseNewCaptureFn = "newCapture" BaseNewCaptureFn = "newCapture"
BaseNewCtxFn = "ctx" BaseNewCtxFn = "ctx"

View File

@ -102,7 +102,6 @@ type
col: CoreDbColRef): CoreDbRc[Hash256] {.noRaise.} col: CoreDbColRef): CoreDbRc[Hash256] {.noRaise.}
CoreDbBaseColPrintFn* = proc(vid: CoreDbColRef): string {.noRaise.} CoreDbBaseColPrintFn* = proc(vid: CoreDbColRef): string {.noRaise.}
CoreDbBaseErrorPrintFn* = proc(e: CoreDbErrorRef): string {.noRaise.} CoreDbBaseErrorPrintFn* = proc(e: CoreDbErrorRef): string {.noRaise.}
CoreDbBaseInitLegaSetupFn* = proc() {.noRaise.}
CoreDbBaseLevelFn* = proc(): int {.noRaise.} CoreDbBaseLevelFn* = proc(): int {.noRaise.}
CoreDbBaseNewKvtFn* = proc(offSite: bool): CoreDbRc[CoreDxKvtRef] {.noRaise.} CoreDbBaseNewKvtFn* = proc(offSite: bool): CoreDbRc[CoreDxKvtRef] {.noRaise.}
CoreDbBaseNewCtxFn* = proc(): CoreDbCtxRef {.noRaise.} CoreDbBaseNewCtxFn* = proc(): CoreDbCtxRef {.noRaise.}
@ -121,7 +120,6 @@ type
colStateFn*: CoreDbBaseColStateFn colStateFn*: CoreDbBaseColStateFn
colPrintFn*: CoreDbBaseColPrintFn colPrintFn*: CoreDbBaseColPrintFn
errorPrintFn*: CoreDbBaseErrorPrintFn errorPrintFn*: CoreDbBaseErrorPrintFn
legacySetupFn*: CoreDbBaseInitLegaSetupFn
levelFn*: CoreDbBaseLevelFn levelFn*: CoreDbBaseLevelFn
# Kvt constructor # Kvt constructor
@ -281,7 +279,6 @@ type
trackLegaApi*: bool ## Debugging, support trackLegaApi*: bool ## Debugging, support
trackNewApi*: bool ## Debugging, support trackNewApi*: bool ## Debugging, support
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
localDbOnly*: bool ## Debugging, suggestion to ignore async fetch
profTab*: CoreDbProfListRef ## Profiling data (if any) profTab*: CoreDbProfListRef ## Profiling data (if any)
ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger
methods*: CoreDbBaseFns methods*: CoreDbBaseFns

View File

@ -32,7 +32,6 @@ proc validateMethodsDesc(base: CoreDbBaseFns) =
doAssert not base.colStateFn.isNil doAssert not base.colStateFn.isNil
doAssert not base.colPrintFn.isNil doAssert not base.colPrintFn.isNil
doAssert not base.errorPrintFn.isNil doAssert not base.errorPrintFn.isNil
doAssert not base.legacySetupFn.isNil
doAssert not base.levelFn.isNil doAssert not base.levelFn.isNil
doAssert not base.newKvtFn.isNil doAssert not base.newKvtFn.isNil
doAssert not base.newCtxFn.isNil doAssert not base.newCtxFn.isNil

View File

@ -577,8 +577,7 @@ proc persistTransactions*(
let let
mpt = db.ctx.getMpt(CtTxs) mpt = db.ctx.getMpt(CtTxs)
kvt = db.newKvt() kvt = db.newKvt()
# Prevent DB from coughing.
db.compensateLegacySetup()
for idx, tx in transactions: for idx, tx in transactions:
let let
encodedKey = rlp.encode(idx) encodedKey = rlp.encode(idx)

View File

@ -333,8 +333,6 @@ proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef, clearCache: bool) =
if not clearCache and acc.originalStorage.isNil: if not clearCache and acc.originalStorage.isNil:
acc.originalStorage = newTable[UInt256, UInt256]() acc.originalStorage = newTable[UInt256, UInt256]()
ac.ledger.db.compensateLegacySetup()
# Make sure that there is an account column on the database. This is needed # Make sure that there is an account column on the database. This is needed
# for saving the account-linked storage column on the Aristo database. # for saving the account-linked storage column on the Aristo database.
if acc.statement.storage.isNil: if acc.statement.storage.isNil:

View File

@ -241,7 +241,6 @@ proc start(nimbus: NimbusNode, conf: NimbusConf) =
params = conf.networkParams) params = conf.networkParams)
com.initializeEmptyDb() com.initializeEmptyDb()
com.db.compensateLegacySetup()
let protocols = conf.getProtocolFlags() let protocols = conf.getProtocolFlags()

View File

@ -65,7 +65,6 @@ proc main() {.used.} =
if canonicalHeadHashKey().toOpenArray notin com.db.kvt: if canonicalHeadHashKey().toOpenArray notin com.db.kvt:
persistToDb(com.db): persistToDb(com.db):
com.initializeEmptyDb() com.initializeEmptyDb()
com.db.compensateLegacySetup()
doAssert(canonicalHeadHashKey().toOpenArray in com.db.kvt) doAssert(canonicalHeadHashKey().toOpenArray in com.db.kvt)
var head = com.db.getCanonicalHead() var head = com.db.getCanonicalHead()

View File

@ -19,7 +19,7 @@ cliBuilder:
./test_memory, ./test_memory,
./test_stack, ./test_stack,
./test_genesis, ./test_genesis,
#./test_precompiles, -- fails /test_precompiles,
#./test_generalstate_json, -- fails #./test_generalstate_json, -- fails
./test_tracer_json, ./test_tracer_json,
#./test_persistblock_json, -- fails #./test_persistblock_json, -- fails
@ -49,7 +49,7 @@ cliBuilder:
#./test_merge, -- fails #./test_merge, -- fails
./test_eip4844, ./test_eip4844,
./test_beacon/test_skeleton, ./test_beacon/test_skeleton,
#./test_overflow, -- fails /test_overflow,
#./test_getproof_json, -- fails #./test_getproof_json, -- fails
#./test_rpc_experimental_json, -- fails #./test_rpc_experimental_json, -- fails
#./test_persistblock_witness_json -- fails #./test_persistblock_witness_json -- fails

View File

@ -299,8 +299,6 @@ proc initVMEnv*(network: string): BaseVMState =
gasLimit: 100_000 gasLimit: 100_000
) )
# Disable opportunistic DB layer features
com.db.localDbOnly = true
com.initializeEmptyDb() com.initializeEmptyDb()
BaseVMState.new(parent, header, com) BaseVMState.new(parent, header, com)
@ -347,7 +345,6 @@ proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: CallResult
var stateDB = vmState.stateDB var stateDB = vmState.stateDB
stateDB.persist() stateDB.persist()
var var
storageRoot = stateDB.getStorageRoot(codeAddress) storageRoot = stateDB.getStorageRoot(codeAddress)
trie = initStorageTrie(com.db, storageRoot) trie = initStorageTrie(com.db, storageRoot)

View File

@ -167,7 +167,6 @@ proc initRunnerDB(
setDebugLevel() setDebugLevel()
coreDB.trackLegaApi = true coreDB.trackLegaApi = true
coreDB.trackNewApi = true coreDB.trackNewApi = true
coreDB.localDbOnly = true
var var
params: NetworkParams params: NetworkParams
@ -191,7 +190,6 @@ proc initRunnerDB(
coreDB.trackLegaApi = false coreDB.trackLegaApi = false
coreDB.trackNewApi = false coreDB.trackNewApi = false
coreDB.trackLedgerApi =false coreDB.trackLedgerApi =false
coreDB.localDbOnly = false
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Test Runners: accounts and accounts storages # Test Runners: accounts and accounts storages
@ -247,7 +245,6 @@ proc chainSyncRunner(
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackLedgerApi = true com.db.trackLedgerApi = true
com.db.localDbOnly = true
check noisy.test_chainSync(filePaths, com, numBlocks, check noisy.test_chainSync(filePaths, com, numBlocks,
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk) lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk)
@ -302,7 +299,6 @@ proc persistentSyncPreLoadAndResumeRunner(
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackLedgerApi = true com.db.trackLedgerApi = true
com.db.localDbOnly = true
check noisy.test_chainSync(filePaths, com, firstPart, check noisy.test_chainSync(filePaths, com, firstPart,
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk) lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk)
@ -319,7 +315,6 @@ proc persistentSyncPreLoadAndResumeRunner(
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackLedgerApi = true com.db.trackLedgerApi = true
com.db.localDbOnly = true
check noisy.test_chainSync(filePaths, com, secndPart, check noisy.test_chainSync(filePaths, com, secndPart,
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk) lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk)

View File

@ -43,7 +43,7 @@ const
EnableExtraLoggingControl = true EnableExtraLoggingControl = true
var var
logStartTime {.used.} = Time() logStartTime {.used.} = Time()
logSavedEnv {.used.}: (bool,bool,bool,bool) logSavedEnv {.used.}: (bool,bool,bool)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
@ -70,18 +70,17 @@ template initLogging(noisy: bool, com: CommonRef) =
debug "start undumping into persistent blocks" debug "start undumping into persistent blocks"
logStartTime = Time() logStartTime = Time()
logSavedEnv = (com.db.trackLegaApi, com.db.trackNewApi, logSavedEnv = (com.db.trackLegaApi, com.db.trackNewApi,
com.db.trackLedgerApi, com.db.localDbOnly) com.db.trackLedgerApi)
setErrorLevel() setErrorLevel()
com.db.trackLegaApi = true com.db.trackLegaApi = true
com.db.trackNewApi = true com.db.trackNewApi = true
com.db.trackLedgerApi = true com.db.trackLedgerApi = true
com.db.localDbOnly = true
proc finishLogging(com: CommonRef) = proc finishLogging(com: CommonRef) =
when EnableExtraLoggingControl: when EnableExtraLoggingControl:
setErrorLevel() setErrorLevel()
(com.db.trackLegaApi, com.db.trackNewApi, (com.db.trackLegaApi, com.db.trackNewApi,
com.db.trackLedgerApi, com.db.localDbOnly) = logSavedEnv com.db.trackLedgerApi) = logSavedEnv
template startLogging(noisy: bool; num: BlockNumber) = template startLogging(noisy: bool; num: BlockNumber) =

View File

@ -8,7 +8,7 @@
# at your option. This file may not be copied, modified, or distributed except # at your option. This file may not be copied, modified, or distributed except
# according to those terms. # according to those terms.
import eth/keys import eth/[keys, trie]
import stew/byteutils import stew/byteutils
import unittest2 import unittest2
import ../nimbus/common import ../nimbus/common
@ -38,6 +38,7 @@ const
proc overflowMain*() = proc overflowMain*() =
test "GasCall unhandled overflow": test "GasCall unhandled overflow":
let header = BlockHeader( let header = BlockHeader(
stateRoot: emptyRlpHash,
blockNumber: u256(1150000), blockNumber: u256(1150000),
coinBase: coinbase, coinBase: coinbase,
gasLimit: 30000000, gasLimit: 30000000,
@ -45,7 +46,6 @@ proc overflowMain*() =
) )
let com = CommonRef.new(newCoreDbRef(DefaultDbMemory), config = chainConfigForNetwork(MainNet)) let com = CommonRef.new(newCoreDbRef(DefaultDbMemory), config = chainConfigForNetwork(MainNet))
let s = BaseVMState.new( let s = BaseVMState.new(
header, header,
header, header,

View File

@ -8,7 +8,7 @@
import import
std/[strformat, strutils, json, os, tables, macros], std/[strformat, strutils, json, os, tables, macros],
unittest2, stew/byteutils, unittest2, stew/byteutils,
eth/keys, eth/[keys, trie],
../nimbus/common/common, ../nimbus/common/common,
../nimbus/[vm_computation, ../nimbus/[vm_computation,
vm_state, vm_state,
@ -69,10 +69,11 @@ proc testFixture(fixtures: JsonNode, testStatusIMPL: var TestStatus) =
fork = parseFork(fixtures["fork"].getStr) fork = parseFork(fixtures["fork"].getStr)
data = fixtures["data"] data = fixtures["data"]
privateKey = PrivateKey.fromHex("7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d")[] privateKey = PrivateKey.fromHex("7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d")[]
com = CommonRef.new(newCoreDbRef DefaultDbMemory, config = ChainConfig())
vmState = BaseVMState.new( vmState = BaseVMState.new(
BlockHeader(blockNumber: 1.u256), BlockHeader(blockNumber: 1.u256, stateRoot: emptyRlpHash),
BlockHeader(), BlockHeader(),
CommonRef.new(newCoreDbRef DefaultDbMemory, config = ChainConfig()) com
) )
case toLowerAscii(label) case toLowerAscii(label)

View File

@ -147,7 +147,6 @@ proc rpcGetProofsTrackStateChangesMain*() =
let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, DATABASE_PATH)) let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, DATABASE_PATH))
com.initializeEmptyDb() com.initializeEmptyDb()
com.db.compensateLegacySetup()
let let
blockHeader = waitFor client.eth_getBlockByNumber(blockId(START_BLOCK), false) blockHeader = waitFor client.eth_getBlockByNumber(blockId(START_BLOCK), false)

View File

@ -436,7 +436,6 @@ proc snapRunner(noisy = true; specs: SnapSyncSpecs) {.used.} =
if dsc.chn.db.newKvt.backend.toRocksStoreRef.isNil: if dsc.chn.db.newKvt.backend.toRocksStoreRef.isNil:
skip() skip()
else: else:
dsc.chn.db.compensateLegacySetup
dsc.chn.test_syncdbAppendBlocks(tailPath,pivot,updateSize,noisy) dsc.chn.test_syncdbAppendBlocks(tailPath,pivot,updateSize,noisy)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------