From a375720c16327c7466123dfecbe5ef45ad96db03 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 31 May 2024 09:13:56 +0200 Subject: [PATCH] import: read from era files (#2254) This PR extends the `nimbus import` command to also allow reading from era files - this command allows creating or topping up an existing database with data coming from era files instead of network sync. * add `--era1-dir` and `--max-blocks` options to command line * make `persistBlocks` report basic stats like transactions and gas * improve error reporting in several API * allow importing multiple RLP files in one go * clean up logging options to match nimbus-eth2 * make sure database is closed properly on shutdown --- hive_integration/nodocker/engine/node.nim | 4 +- nimbus/beacon/api_handler/api_forkchoice.nim | 13 +-- nimbus/beacon/api_handler/api_newpayload.nim | 4 +- nimbus/beacon/api_handler/api_utils.nim | 24 ++-- nimbus/common/common.nim | 2 +- nimbus/config.nim | 26 ++++- nimbus/core/block_import.nim | 13 +-- nimbus/core/chain/chain_desc.nim | 7 +- nimbus/core/chain/persist_blocks.nim | 110 +++++++++-------- nimbus/nim.cfg | 9 +- nimbus/nimbus.nim | 66 +++++------ nimbus/nimbus_import.nim | 117 +++++++++++++++++++ nimbus/sync/beacon/skeleton_db.nim | 11 +- nimbus/sync/full/worker.nim | 2 +- premix/persist.nim | 11 +- scripts/check_copyright_year.sh | 2 +- tests/nim.cfg | 4 +- tests/test_accounts_cache.nim | 4 +- tests/test_blockchain_json.nim | 4 +- tests/test_configuration.nim | 2 +- tests/test_coredb/test_chainsync.nim | 8 +- tests/test_graphql.nim | 2 +- tests/test_persistblock_json.nim | 2 +- tests/test_persistblock_witness_json.nim | 2 +- tests/test_rocksdb_timing/test_db_timing.nim | 2 +- tests/test_rpc_experimental_json.nim | 2 +- tests/test_txpool/setup.nim | 2 +- tests/test_txpool2.nim | 4 +- 28 files changed, 298 insertions(+), 161 deletions(-) create mode 100644 nimbus/nimbus_import.nim diff --git a/hive_integration/nodocker/engine/node.nim b/hive_integration/nodocker/engine/node.nim index 3a60c09de..42cab8573 100644 --- a/hive_integration/nodocker/engine/node.nim +++ b/hive_integration/nodocker/engine/node.nim @@ -86,8 +86,7 @@ proc processBlock( ValidationResult.OK proc getVmState(c: ChainRef, header: BlockHeader): - Result[BaseVMState, void] - {.gcsafe, raises: [CatchableError].} = + Result[BaseVMState, void] = if c.vmState.isNil.not: return ok(c.vmState) @@ -96,6 +95,7 @@ proc getVmState(c: ChainRef, header: BlockHeader): debug "Cannot initialise VmState", number = header.blockNumber return err() + return ok(vmState) # A stripped down version of persistBlocks without validation diff --git a/nimbus/beacon/api_handler/api_forkchoice.nim b/nimbus/beacon/api_handler/api_forkchoice.nim index 75f5bd610..ea865fade 100644 --- a/nimbus/beacon/api_handler/api_forkchoice.nim +++ b/nimbus/beacon/api_handler/api_forkchoice.nim @@ -118,14 +118,14 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TDD check") if td < ttd or (blockNumber > 0'u64 and ptd > ttd): - error "Refusing beacon update to pre-merge", + notice "Refusing beacon update to pre-merge", number = blockNumber, hash = blockHash.short, diff = header.difficulty, ptd = ptd, ttd = ttd - return invalidFCU() + return invalidFCU("Refusing beacon update to pre-merge") # If the head block is already in our canonical chain, the beacon client is # probably resyncing. Ignore the update. @@ -133,11 +133,10 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash: # TODO should this be possible? # If we allow these types of reorgs, we will do lots and lots of reorgs during sync - debug "Reorg to previous block" - if chain.setCanonical(header) != ValidationResult.OK: - return invalidFCU(com, header) - elif chain.setCanonical(header) != ValidationResult.OK: - return invalidFCU(com, header) + notice "Reorg to previous block", blockHash + + chain.setCanonical(header).isOkOr: + return invalidFCU(error, com, header) # If the beacon client also advertised a finalized block, mark the local # chain final and completely in PoS mode. diff --git a/nimbus/beacon/api_handler/api_newpayload.nim b/nimbus/beacon/api_handler/api_newpayload.nim index 1ec838919..0b293fd47 100644 --- a/nimbus/beacon/api_handler/api_newpayload.nim +++ b/nimbus/beacon/api_handler/api_newpayload.nim @@ -187,10 +187,10 @@ proc newPayload*(ben: BeaconEngineRef, hash = blockHash, number = header.blockNumber let body = blockBody(payload) let vres = ben.chain.insertBlockWithoutSetHead(header, body) - if vres != ValidationResult.OK: + if vres.isErr: ben.setInvalidAncestor(header, blockHash) let blockHash = latestValidHash(db, parent, ttd) - return invalidStatus(blockHash, "Failed to insert block") + return invalidStatus(blockHash, vres.error()) # We've accepted a valid payload from the beacon client. Mark the local # chain transitions to notify other subsystems (e.g. downloader) of the diff --git a/nimbus/beacon/api_handler/api_utils.nim b/nimbus/beacon/api_handler/api_utils.nim index 8bbd52a55..2c81a1bda 100644 --- a/nimbus/beacon/api_handler/api_utils.nim +++ b/nimbus/beacon/api_handler/api_utils.nim @@ -85,11 +85,14 @@ proc simpleFCU*(status: PayloadExecutionStatus, ) ) -proc invalidFCU*(hash = common.Hash256()): ForkchoiceUpdatedResponse = +proc invalidFCU*( + validationError: string, + hash = common.Hash256()): ForkchoiceUpdatedResponse = ForkchoiceUpdatedResponse(payloadStatus: PayloadStatusV1( status: PayloadExecutionStatus.invalid, - latestValidHash: toValidHash(hash) + latestValidHash: toValidHash(hash), + validationError: some validationError ) ) @@ -183,13 +186,16 @@ proc latestValidHash*(db: CoreDbRef, # latestValidHash MUST be set to ZERO common.Hash256() -proc invalidFCU*(com: CommonRef, - header: common.BlockHeader): ForkchoiceUpdatedResponse - {.gcsafe, raises: [RlpError].} = +proc invalidFCU*(validationError: string, + com: CommonRef, + header: common.BlockHeader): ForkchoiceUpdatedResponse = var parent: common.BlockHeader if not com.db.getBlockHeader(header.parentHash, parent): - return invalidFCU(common.Hash256()) + return invalidFCU(validationError) - let blockHash = latestValidHash(com.db, parent, - com.ttd.get(high(common.BlockNumber))) - invalidFCU(blockHash) \ No newline at end of file + let blockHash = try: + latestValidHash(com.db, parent, com.ttd.get(high(common.BlockNumber))) + except RlpError: + default(common.Hash256) + + invalidFCU(validationError, blockHash) \ No newline at end of file diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 9872c6e8e..85b6c80b9 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -369,7 +369,7 @@ proc initializeEmptyDb*(com: CommonRef) {.gcsafe, raises: [CatchableError].} = let kvt = com.db.kvt() if canonicalHeadHashKey().toOpenArray notin kvt: - trace "Writing genesis to DB" + info "Writing genesis to DB" doAssert(com.genesisHeader.blockNumber.isZero, "can't commit genesis block with number > 0") discard com.db.persistHeaderToDb(com.genesisHeader, diff --git a/nimbus/config.nim b/nimbus/config.nim index 05f90fdfd..c646c10c0 100644 --- a/nimbus/config.nim +++ b/nimbus/config.nim @@ -145,6 +145,11 @@ type abbr: "d" name: "data-dir" }: OutDir + era1DirOpt* {. + desc: "Directory where era1 (pre-merge) archive can be found" + defaultValueDesc: "/era1" + name: "era1-dir" }: Option[OutDir] + keyStore* {. desc: "Load one or more keystore files from this directory" defaultValue: defaultKeystoreDir() @@ -166,7 +171,7 @@ type syncMode* {. desc: "Specify particular blockchain sync mode." longDesc: - "- default -- legacy sync mode\n" & + "- default -- beacon sync mode\n" & "- full -- full blockchain archive\n" & # "- snap -- experimental snap mode (development only)\n" & "" @@ -475,12 +480,20 @@ type name: "trusted-setup-file" .}: Option[string] of `import`: - blocksFile* {. argument - desc: "Import RLP encoded block(s) from a file, validate, write to database and quit" - defaultValue: "" - name: "blocks-file" }: InputFile + desc: "One or more RLP encoded block(s) files" + name: "blocks-file" }: seq[InputFile] + + maxBlocks* {. + desc: "Maximum number of blocks to import" + defaultValue: uint64.high() + name: "max-blocks" .}: uint64 + + chunkSize* {. + desc: "Number of blocks per database transaction" + defaultValue: 8192 + name: "chunk-size" .}: uint64 func parseCmdArg(T: type NetworkId, p: string): T {.gcsafe, raises: [ValueError].} = @@ -735,6 +748,9 @@ func httpServerEnabled*(conf: NimbusConf): bool = conf.wsEnabled or conf.rpcEnabled +func era1Dir*(conf: NimbusConf): OutDir = + conf.era1DirOpt.get(OutDir(conf.dataDir.string & "/era1")) + # KLUDGE: The `load()` template does currently not work within any exception # annotated environment. {.pop.} diff --git a/nimbus/core/block_import.nim b/nimbus/core/block_import.nim index 3a2ff0d0e..69a16196a 100644 --- a/nimbus/core/block_import.nim +++ b/nimbus/core/block_import.nim @@ -7,6 +7,8 @@ # This file may not be copied, modified, or distributed except according to # those terms. +{.push raises: [].} + import chronicles, eth/rlp, stew/io2, @@ -30,9 +32,6 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: str while rlp.hasData: try: rlp.decompose(header, body) - if chain.persistBlocks([header], [body]) == ValidationResult.Error: - # register one more error and continue - errorCount.inc except RlpError as e: # terminate if there was a decoding error error "rlp error", @@ -40,12 +39,12 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: str msg = e.msg, exception = e.name return false - except CatchableError as e: - # otherwise continue + + chain.persistBlocks([header], [body]).isOkOr(): + # register one more error and continue error "import error", fileName = importFile, - msg = e.msg, - exception = e.name + error errorCount.inc return errorCount == 0 diff --git a/nimbus/core/chain/chain_desc.nim b/nimbus/core/chain/chain_desc.nim index c500c85bc..8c67a4910 100644 --- a/nimbus/core/chain/chain_desc.nim +++ b/nimbus/core/chain/chain_desc.nim @@ -41,16 +41,12 @@ type ## First block to when `extraValidation` will be applied (only ## effective if `extraValidation` is true.) - vmState: BaseVMState - ## If it's not nil, block validation will use this - ## If it's nil, a new vmState state will be created. - # ------------------------------------------------------------------------------ # Public constructors # ------------------------------------------------------------------------------ proc newChain*(com: CommonRef, - extraValidation: bool, vmState = BaseVMState(nil)): ChainRef = + extraValidation: bool): ChainRef = ## Constructor for the `Chain` descriptor object. ## The argument `extraValidation` enables extra block ## chain validation if set `true`. @@ -58,7 +54,6 @@ proc newChain*(com: CommonRef, com: com, validateBlock: true, extraValidation: extraValidation, - vmState: vmState, ) func newChain*(com: CommonRef): ChainRef = diff --git a/nimbus/core/chain/persist_blocks.nim b/nimbus/core/chain/persist_blocks.nim index 66b048af0..2886af5a2 100644 --- a/nimbus/core/chain/persist_blocks.nim +++ b/nimbus/core/chain/persist_blocks.nim @@ -11,6 +11,7 @@ {.push raises: [].} import + results, ../../db/ledger, ../../vm_state, ../../vm_types, @@ -25,6 +26,8 @@ when not defined(release): ../../tracer, ../../utils/utils +export results + type PersistBlockFlag = enum NoPersistHeader @@ -34,6 +37,11 @@ type PersistBlockFlags = set[PersistBlockFlag] + PersistStats = tuple + blocks: int + txs: int + gas: GasInt + const CleanUpEpoch = 30_000.u256 ## Regular checks for history clean up (applies to single state DB). This @@ -45,17 +53,16 @@ const # ------------------------------------------------------------------------------ proc getVmState(c: ChainRef, header: BlockHeader): - Result[BaseVMState, void] = - if c.vmState.isNil.not: - return ok(c.vmState) - + Result[BaseVMState, string] = let vmState = BaseVMState() - if not vmState.init(header, c.com): - debug "Cannot initialise VmState", - number = header.blockNumber - return err() - return ok(vmState) + try: + # TODO clean up exception handling + if not vmState.init(header, c.com): + return err("Could not initialise VMState") + except CatchableError as exc: + return err("Error while initializing VMState: " & exc.msg) + ok(vmState) proc purgeOutOfJournalBlocks(db: CoreDbRef) {.inline, raises: [RlpError].} = ## Remove non-reachable blocks from KVT database @@ -67,24 +74,22 @@ proc purgeOutOfJournalBlocks(db: CoreDbRef) {.inline, raises: [RlpError].} = break blkNum = blkNum - 1 - proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; bodies: openArray[BlockBody], - flags: PersistBlockFlags = {}): ValidationResult - # wildcard exception, wrapped below in public section - {.inline, raises: [CatchableError].} = + flags: PersistBlockFlags = {}): Result[PersistStats, string] + {.raises: [CatchableError] .} = let dbTx = c.db.beginTransaction() defer: dbTx.dispose() c.com.hardForkTransition(headers[0]) # Note that `0 < headers.len`, assured when called from `persistBlocks()` - let vmState = c.getVmState(headers[0]).valueOr: - return ValidationResult.Error + let vmState = ?c.getVmState(headers[0]) let (fromBlock, toBlock) = (headers[0].blockNumber, headers[^1].blockNumber) trace "Persisting blocks", fromBlock, toBlock + var txs = 0 for i in 0 ..< headers.len: let (header, body) = (headers[i], bodies[i]) @@ -99,19 +104,15 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; debug "Cannot update VmState", blockNumber = header.blockNumber, item = i - return ValidationResult.Error + return err("Cannot update VmState to block " & $header.blockNumber) if c.validateBlock and c.extraValidation and c.verifyFrom <= header.blockNumber: - let res = c.com.validateHeaderAndKinship( + ? c.com.validateHeaderAndKinship( header, body, checkSealOK = false) # TODO: how to checkseal from here - if res.isErr: - debug "block validation error", - msg = res.error - return ValidationResult.Error if c.generateWitness: vmState.generateWitness = true @@ -128,7 +129,7 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; warn "Validation error. Debugging metadata dumped." if validationResult != ValidationResult.OK: - return validationResult + return err("Failed to validate block") if c.generateWitness: let dbTx = c.db.beginTransaction() @@ -144,7 +145,6 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; c.db.setBlockWitness(header.blockHash(), witness) - if NoPersistHeader notin flags: discard c.db.persistHeaderToDb( header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory) @@ -166,6 +166,8 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; # Done with this block lapTx.commit() + txs += body.transactions.len + dbTx.commit() # The `c.db.persistent()` call is ignored by the legacy DB which @@ -183,60 +185,66 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; if(fromBlock mod CleanUpEpoch) <= (toBlock - fromBlock): c.db.purgeOutOfJournalBlocks() - ValidationResult.OK + ok((headers.len, txs, vmState.cumulativeGasUsed)) # ------------------------------------------------------------------------------ # Public `ChainDB` methods # ------------------------------------------------------------------------------ proc insertBlockWithoutSetHead*(c: ChainRef, header: BlockHeader, - body: BlockBody): ValidationResult - {.gcsafe, raises: [CatchableError].} = - result = c.persistBlocksImpl( - [header], [body], {NoPersistHeader, NoSaveReceipts}) - if result == ValidationResult.OK: + body: BlockBody): Result[void, string] = + try: + discard ? c.persistBlocksImpl( + [header], [body], {NoPersistHeader, NoSaveReceipts}) + c.db.persistHeaderToDbWithoutSetHead(header, c.com.startOfHistory) + ok() + except CatchableError as exc: + err(exc.msg) -proc setCanonical*(c: ChainRef, header: BlockHeader): ValidationResult - {.gcsafe, raises: [CatchableError].} = +proc setCanonical*(c: ChainRef, header: BlockHeader): Result[void, string] = + try: + if header.parentHash == Hash256(): + discard c.db.setHead(header.blockHash) + return ok() + + var body: BlockBody + if not c.db.getBlockBody(header, body): + debug "Failed to get BlockBody", + hash = header.blockHash + return err("Could not get block body") + + discard ? c.persistBlocksImpl([header], [body], {NoPersistHeader, NoSaveTxs}) - if header.parentHash == Hash256(): discard c.db.setHead(header.blockHash) - return ValidationResult.OK + ok() + except CatchableError as exc: + err(exc.msg) - var body: BlockBody - if not c.db.getBlockBody(header, body): - debug "Failed to get BlockBody", - hash = header.blockHash - return ValidationResult.Error - - result = c.persistBlocksImpl([header], [body], {NoPersistHeader, NoSaveTxs}) - if result == ValidationResult.OK: - discard c.db.setHead(header.blockHash) - -proc setCanonical*(c: ChainRef, blockHash: Hash256): ValidationResult - {.gcsafe, raises: [CatchableError].} = +proc setCanonical*(c: ChainRef, blockHash: Hash256): Result[void, string] = var header: BlockHeader if not c.db.getBlockHeader(blockHash, header): debug "Failed to get BlockHeader", hash = blockHash - return ValidationResult.Error + return err("Could not get block header") setCanonical(c, header) proc persistBlocks*(c: ChainRef; headers: openArray[BlockHeader]; - bodies: openArray[BlockBody]): ValidationResult - {.gcsafe, raises: [CatchableError].} = + bodies: openArray[BlockBody]): Result[PersistStats, string] = # Run the VM here if headers.len != bodies.len: debug "Number of headers not matching number of bodies" - return ValidationResult.Error + return err("Mismatching headers and bodies") if headers.len == 0: debug "Nothing to do" - return ValidationResult.OK + return ok(default(PersistStats)) # TODO not nice to return nil - c.persistBlocksImpl(headers,bodies) + try: + c.persistBlocksImpl(headers,bodies) + except CatchableError as exc: + err(exc.msg) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/nim.cfg b/nimbus/nim.cfg index ac9563326..39cfc86be 100644 --- a/nimbus/nim.cfg +++ b/nimbus/nim.cfg @@ -1,5 +1,10 @@ --d:chronicles_line_numbers +-d:"chronicles_runtime_filtering=on" +-d:"chronicles_disable_thread_id" + +@if release: + -d:"chronicles_line_numbers:0" +@end + -d:"chronicles_sinks=textlines[file]" -d:"chronicles_runtime_filtering=on" -d:nimDebugDlOpen - diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 7560adf24..359cf895c 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -22,8 +22,8 @@ import ./version, ./constants, ./nimbus_desc, + ./nimbus_import, ./core/eip4844, - ./core/block_import, ./db/core_db/persistent, ./sync/protocol, ./sync/handlers @@ -36,14 +36,6 @@ when defined(evmc_enabled): ## * No multiple bind addresses support ## * No database support -proc importBlocks(conf: NimbusConf, com: CommonRef) = - if string(conf.blocksFile).len > 0: - # success or not, we quit after importing blocks - if not importRlpBlock(string conf.blocksFile, com): - quit(QuitFailure) - else: - quit(QuitSuccess) - proc basicServices(nimbus: NimbusNode, conf: NimbusConf, com: CommonRef) = @@ -218,7 +210,7 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf, nimbus.metricsServer = res.get waitFor nimbus.metricsServer.start() -proc start(nimbus: NimbusNode, conf: NimbusConf) = +proc run(nimbus: NimbusNode, conf: NimbusConf) = ## logging setLogLevel(conf.logLevel) if conf.logFile.isSome: @@ -229,6 +221,19 @@ proc start(nimbus: NimbusNode, conf: NimbusConf) = when defined(evmc_enabled): evmcSetLibraryPath(conf.evm) + # Trusted setup is needed for processing Cancun+ blocks + if conf.trustedSetupFile.isSome: + let fileName = conf.trustedSetupFile.get() + let res = Kzg.loadTrustedSetup(fileName) + if res.isErr: + fatal "Cannot load Kzg trusted setup from file", msg=res.error + quit(QuitFailure) + else: + let res = loadKzgTrustedSetup() + if res.isErr: + fatal "Cannot load baked in Kzg trusted setup", msg=res.error + quit(QuitFailure) + createDir(string conf.dataDir) let coreDB = # Resolve statically for database type @@ -241,26 +246,17 @@ proc start(nimbus: NimbusNode, conf: NimbusConf) = networkId = conf.networkId, params = conf.networkParams) + defer: + com.db.finish() + com.initializeEmptyDb() - let protocols = conf.getProtocolFlags() - - if conf.cmd != NimbusCmd.`import` and conf.trustedSetupFile.isSome: - let fileName = conf.trustedSetupFile.get() - let res = Kzg.loadTrustedSetup(fileName) - if res.isErr: - fatal "Cannot load Kzg trusted setup from file", msg=res.error - quit(QuitFailure) - else: - let res = loadKzgTrustedSetup() - if res.isErr: - fatal "Cannot load baked in Kzg trusted setup", msg=res.error - quit(QuitFailure) - case conf.cmd of NimbusCmd.`import`: importBlocks(conf, com) else: + let protocols = conf.getProtocolFlags() + basicServices(nimbus, conf, com) manageAccounts(nimbus, conf) setupP2P(nimbus, conf, com, protocols) @@ -282,17 +278,16 @@ proc start(nimbus: NimbusNode, conf: NimbusConf) = # it might have been set to "Stopping" with Ctrl+C nimbus.state = NimbusState.Running -proc process*(nimbus: NimbusNode, conf: NimbusConf) = - # Main event loop - while nimbus.state == NimbusState.Running: - try: - poll() - except CatchableError as e: - debug "Exception in poll()", exc = e.name, err = e.msg - discard e # silence warning when chronicles not activated + # Main event loop + while nimbus.state == NimbusState.Running: + try: + poll() + except CatchableError as e: + debug "Exception in poll()", exc = e.name, err = e.msg + discard e # silence warning when chronicles not activated - # Stop loop - waitFor nimbus.stop(conf) + # Stop loop + waitFor nimbus.stop(conf) when isMainModule: var nimbus = NimbusNode(state: NimbusState.Starting, ctx: newEthContext()) @@ -312,5 +307,4 @@ when isMainModule: ## Processing command line arguments let conf = makeConfig() - nimbus.start(conf) - nimbus.process(conf) + nimbus.run(conf) diff --git a/nimbus/nimbus_import.nim b/nimbus/nimbus_import.nim new file mode 100644 index 000000000..bc3aedf3f --- /dev/null +++ b/nimbus/nimbus_import.nim @@ -0,0 +1,117 @@ +# Nimbus +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +import + chronicles, + std/[monotimes, strformat, times], + stew/io2, + ./config, + ./common/common, + ./core/[block_import, chain], + ./db/era1_db, + beacon_chain/era_db + +var running {.volatile.} = true + +proc importBlocks*(conf: NimbusConf, com: CommonRef) = + # ## Ctrl+C handling + proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + setupForeignThreadGc() + running = false + + setControlCHook(controlCHandler) + + let + start = com.db.getLatestJournalBlockNumber().truncate(uint64) + 1 + chain = com.newChain() + + var + imported = 0'u64 + gas = 0.u256 + txs = 0 + time0 = getMonoTime() + template blockNumber(): uint64 = + start + imported + + if isDir(conf.era1Dir.string): + doAssert conf.networkId == MainNet, "Only mainnet era1 current supported" + + const + # TODO the merge block number could be fetched from the era1 file instead, + # specially if the accumulator is added to the chain metadata + lastEra1Block = 15537393 + + if start <= lastEra1Block: + notice "Importing era1 archive", + start, dataDir = conf.dataDir.string, era1Dir = conf.era1Dir.string + var + headers: seq[BlockHeader] + bodies: seq[BlockBody] + + func f(value: float): string = + &"{value:4.3f}" + + template process() = + let + time1 = getMonoTime() + statsRes = chain.persistBlocks(headers, bodies) + if statsRes.isErr(): + error "Failed to persist blocks", error = statsRes.error + quit(QuitFailure) + + txs += statsRes[].txs + gas += uint64 statsRes[].gas + let + time2 = getMonoTime() + diff1 = (time2 - time1).inNanoseconds().float / 1000000000 + diff0 = (time2 - time0).inNanoseconds().float / 1000000000 + + # TODO generate csv with import statistics + info "Imported blocks", + blockNumber, + gas, + bps = f(headers.len.float / diff1), + tps = f(statsRes[].txs.float / diff1), + gps = f(statsRes[].gas.float / diff1), + avgBps = f(imported.float / diff0), + avgGps = f(txs.float / diff0), + avgGps = f(gas.truncate(uint64).float / diff0) # TODO fix truncate + headers.setLen(0) + bodies.setLen(0) + + let db = + Era1DbRef.init(conf.era1Dir.string, "mainnet").expect("Era files present") + defer: + db.dispose() + + while running and imported < conf.maxBlocks and blockNumber <= lastEra1Block: + var blk = db.getBlockTuple(blockNumber).valueOr: + error "Could not load block from era1", blockNumber, error + break + + imported += 1 + + headers.add move(blk.header) + bodies.add move(blk.body) + + if headers.lenu64 mod conf.chunkSize == 0: + process() + + if headers.len > 0: + process() # last chunk, if any + + for blocksFile in conf.blocksFile: + if isFile(string blocksFile): + # success or not, we quit after importing blocks + if not importRlpBlock(string blocksFile, com): + quit(QuitFailure) + else: + quit(QuitSuccess) diff --git a/nimbus/sync/beacon/skeleton_db.nim b/nimbus/sync/beacon/skeleton_db.nim index 4022b936d..71f749de7 100644 --- a/nimbus/sync/beacon/skeleton_db.nim +++ b/nimbus/sync/beacon/skeleton_db.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2024 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at # https://opensource.org/licenses/MIT). @@ -197,13 +197,8 @@ proc insertBlocks*(sk: SkeletonRef, headers: openArray[BlockHeader], body: openArray[BlockBody], fromEngine: bool): Result[uint64, string] = - try: - let res = sk.chain.persistBlocks(headers, body) - if res != ValidationResult.OK: - return err("insertBlocks validation error") - ok(headers.len.uint64) - except CatchableError as ex: - err(ex.msg) + discard ? sk.chain.persistBlocks(headers, body) + ok(headers.len.uint64) proc insertBlock*(sk: SkeletonRef, header: BlockHeader, diff --git a/nimbus/sync/full/worker.nim b/nimbus/sync/full/worker.nim index 2fdd1136a..530d73c38 100644 --- a/nimbus/sync/full/worker.nim +++ b/nimbus/sync/full/worker.nim @@ -122,7 +122,7 @@ proc processStaged(buddy: FullBuddyRef): bool = # Store in persistent database try: - if chain.persistBlocks(wi.headers, wi.bodies) == ValidationResult.OK: + if chain.persistBlocks(wi.headers, wi.bodies).isOk(): bq.blockQueueAccept(wi) return true except CatchableError as e: diff --git a/premix/persist.nim b/premix/persist.nim index 8d9566385..5825c5126 100644 --- a/premix/persist.nim +++ b/premix/persist.nim @@ -82,7 +82,6 @@ proc main() {.used.} = var retryCount = 0 while true: - var thisBlock: Block try: thisBlock = requestBlock(blockNumber, { DownloadAndValidate }) @@ -104,8 +103,9 @@ proc main() {.used.} = if numBlocks == numBlocksToCommit: persistToDb(com.db): - if chain.persistBlocks(headers, bodies) != ValidationResult.OK: - raise newException(ValidationError, "Error when validating blocks") + let res = chain.persistBlocks(headers, bodies) + res.isOkOr: + raise newException(ValidationError, "Error when validating blocks: " & res.error) numBlocks = 0 headers.setLen(0) bodies.setLen(0) @@ -116,8 +116,9 @@ proc main() {.used.} = if numBlocks > 0: persistToDb(com.db): - if chain.persistBlocks(headers, bodies) != ValidationResult.OK: - raise newException(ValidationError, "Error when validating blocks") + let res = chain.persistBlocks(headers, bodies) + res.isOkOr: + raise newException(ValidationError, "Error when validating blocks: " & res.error) when isMainModule: var message: string diff --git a/scripts/check_copyright_year.sh b/scripts/check_copyright_year.sh index 79cfea75f..129c60c8f 100644 --- a/scripts/check_copyright_year.sh +++ b/scripts/check_copyright_year.sh @@ -8,7 +8,7 @@ # according to those terms. excluded_files="config.yaml|.gitmodules" -excluded_extensions="json|md|png|txt|toml|gz|key|rlp|era1" +excluded_extensions="json|md|png|txt|toml|gz|key|rlp|era1|cfg" current_year=$(date +"%Y") outdated_files=() diff --git a/tests/nim.cfg b/tests/nim.cfg index e1aa9c227..a63cbec0d 100644 --- a/tests/nim.cfg +++ b/tests/nim.cfg @@ -9,7 +9,9 @@ # according to those terms. -d:chronicles_line_numbers --d:"chronicles_sinks=textblocks" +-d:"chronicles_sinks=textlines" # comment this out, to run the tests in a serial manner: #-d:nimtestParallel +-d:"chronicles_disable_thread_id" +-d:"chronicles_runtime_filtering=on" diff --git a/tests/test_accounts_cache.nim b/tests/test_accounts_cache.nim index 8b88c17b8..31b2ac3a3 100644 --- a/tests/test_accounts_cache.nim +++ b/tests/test_accounts_cache.nim @@ -86,7 +86,7 @@ when false: discard when defined(chronicles_runtime_filtering) and loggingEnabled: setLogLevel(LogLevel.TRACE) - + proc setErrorLevel = discard when defined(chronicles_runtime_filtering) and loggingEnabled: @@ -104,7 +104,7 @@ proc blockChainForTesting*(network: NetworkId): CommonRef = initializeEmptyDb(result) proc importBlocks(com: CommonRef; h: seq[BlockHeader]; b: seq[BlockBody]) = - if com.newChain.persistBlocks(h,b) != ValidationResult.OK: + if com.newChain.persistBlocks(h,b).isErr: raiseAssert "persistBlocks() failed at block #" & $h[0].blockNumber proc getVmState(com: CommonRef; number: BlockNumber): BaseVMState = diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index ad61f4d7c..b8094ed07 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -261,8 +261,8 @@ proc importBlock(ctx: var TestCtx, com: CommonRef, chain = newChain(com, extraValidation = true, ctx.vmState) res = chain.persistBlocks([tb.header], [tb.body]) - if res == ValidationResult.Error: - raise newException(ValidationError, "persistBlocks validation") + if res.isErr() + raise newException(ValidationError, res.error()) else: blockWitness(chain.vmState, com.db) testGetBlockWitness(chain, chain.vmState.parent, tb.header) diff --git a/tests/test_configuration.nim b/tests/test_configuration.nim index 9a7b0abac..fecaa41c9 100644 --- a/tests/test_configuration.nim +++ b/tests/test_configuration.nim @@ -52,7 +52,7 @@ proc configurationMain*() = let bb = makeConfig(@["import", genesisFile]) check bb.cmd == NimbusCmd.`import` - check bb.blocksFile.string == genesisFile + check bb.blocksFile[0].string == genesisFile test "custom-network loading config file with no genesis data": # no genesis will fallback to geth compatibility mode diff --git a/tests/test_coredb/test_chainsync.nim b/tests/test_coredb/test_chainsync.nim index 347c09f4f..b1f048fbe 100644 --- a/tests/test_coredb/test_chainsync.nim +++ b/tests/test_coredb/test_chainsync.nim @@ -234,7 +234,7 @@ proc test_chainSync*( noisy.stopLoggingAfter(): let runPersistBlocksRc = chain.persistBlocks(w[0], w[1]) - xCheck runPersistBlocksRc == ValidationResult.OK: + xCheck runPersistBlocksRc.isOk(): if noisy: noisy.whisper "***", "Re-run with logging enabled...\n" setTraceLevel() @@ -269,7 +269,7 @@ proc test_chainSync*( noisy.whisper "***", &"processing {dotsOrSpace}[#{fromBlock:>8},#{(lastBlock-1):>8}]" let runPersistBlocks1Rc = chain.persistBlocks(headers1, bodies1) - xCheck runPersistBlocks1Rc == ValidationResult.OK + xCheck runPersistBlocks1Rc.isOk() dotsOrSpace = " " noisy.startLogging(headers9[0].blockNumber) @@ -286,7 +286,7 @@ proc test_chainSync*( &"processing {dotsOrSpace}[#{lastBlock:>8},#{lastBlock:>8}]" noisy.stopLoggingAfter(): let runPersistBlocks0Rc = chain.persistBlocks(headers0, bodies0) - xCheck runPersistBlocks0Rc == ValidationResult.OK + xCheck runPersistBlocks0Rc.isOk() else: if oldLogAlign: noisy.whisper "***", @@ -297,7 +297,7 @@ proc test_chainSync*( &"processing {dotsOrSpace}[#{lastBlock:>8},#{toBlock:>8}]" noisy.stopLoggingAfter(): let runPersistBlocks9Rc = chain.persistBlocks(headers9, bodies9) - xCheck runPersistBlocks9Rc == ValidationResult.OK + xCheck runPersistBlocks9Rc.isOk() break if not oldLogAlign: sayPerf diff --git a/tests/test_graphql.nim b/tests/test_graphql.nim index 4742f0c01..eaacc3800 100644 --- a/tests/test_graphql.nim +++ b/tests/test_graphql.nim @@ -88,7 +88,7 @@ proc setupChain(): CommonRef = let chain = newChain(com) let res = chain.persistBlocks(headers, bodies) - assert(res == ValidationResult.OK) + assert res.isOk(), res.error() com diff --git a/tests/test_persistblock_json.nim b/tests/test_persistblock_json.nim index e9feedf73..9d581dcdc 100644 --- a/tests/test_persistblock_json.nim +++ b/tests/test_persistblock_json.nim @@ -40,7 +40,7 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) = # it's ok if setHead fails here because of missing ancestors discard com.db.setHead(parent, true) let validationResult = chain.persistBlocks(headers, bodies) - check validationResult == ValidationResult.OK + check validationResult.isOk() proc persistBlockJsonMain*() = suite "persist block json tests": diff --git a/tests/test_persistblock_witness_json.nim b/tests/test_persistblock_witness_json.nim index ed85d97b8..af4e26263 100644 --- a/tests/test_persistblock_witness_json.nim +++ b/tests/test_persistblock_witness_json.nim @@ -43,7 +43,7 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) = # it's ok if setHead fails here because of missing ancestors discard com.db.setHead(parent, true) let validationResult = chain.persistBlocks(headers, bodies) - check validationResult == ValidationResult.OK + check validationResult.isOk() let blockHash = memoryDB.getBlockHash(blockNumber) diff --git a/tests/test_rocksdb_timing/test_db_timing.nim b/tests/test_rocksdb_timing/test_db_timing.nim index 7eddfd703..f699758a0 100644 --- a/tests/test_rocksdb_timing/test_db_timing.nim +++ b/tests/test_rocksdb_timing/test_db_timing.nim @@ -123,7 +123,7 @@ proc test_dbTimingUndumpBlocks*( # Message if [fromBlock,toBlock] contains a multiple of 700 if fromBlock + (toBlock mod 900) <= toBlock: loadNoise.say "***", &"processing ...[#{fromBlock},#{toBlock}]..." - check chain.persistBlocks(w[0], w[1]) == ValidationResult.OK + check chain.persistBlocks(w[0], w[1]).isOk() if numBlocks.toBlockNumber <= w[0][^1].blockNumber: break diff --git a/tests/test_rpc_experimental_json.nim b/tests/test_rpc_experimental_json.nim index 2fbe3204f..71c6a6468 100644 --- a/tests/test_rpc_experimental_json.nim +++ b/tests/test_rpc_experimental_json.nim @@ -52,7 +52,7 @@ proc importBlockData(node: JsonNode): (CommonRef, Hash256, Hash256, UInt256) {. # it's ok if setHead fails here because of missing ancestors discard com.db.setHead(parent, true) let validationResult = chain.persistBlocks(headers, bodies) - doAssert validationResult == ValidationResult.OK + doAssert validationResult.isOk() return (com, parent.stateRoot, header.stateRoot, blockNumber) diff --git a/tests/test_txpool/setup.nim b/tests/test_txpool/setup.nim index 68d56c606..55b099dee 100644 --- a/tests/test_txpool/setup.nim +++ b/tests/test_txpool/setup.nim @@ -31,7 +31,7 @@ proc setStatus(xp: TxPoolRef; item: TxItemRef; status: TxItemStatus) discard xp.txDB.reassign(item, status) proc importBlocks(c: ChainRef; h: seq[BlockHeader]; b: seq[BlockBody]): int = - if c.persistBlocks(h,b) != ValidationResult.OK: + if c.persistBlocks(h,b).isErr(): raiseAssert "persistBlocks() failed at block #" & $h[0].blockNumber for body in b: result += body.transactions.len diff --git a/tests/test_txpool2.nim b/tests/test_txpool2.nim index f452d4d2e..c0ad50b2d 100644 --- a/tests/test_txpool2.nim +++ b/tests/test_txpool2.nim @@ -173,7 +173,7 @@ proc runTxPoolPosTest() = test "PoS persistBlocks": let rr = chain.persistBlocks([blk.header], [body]) - check rr == ValidationResult.OK + check rr.isOk() test "validate TxPool prevRandao setter": var sdb = LedgerRef.init(com.db, blk.header.stateRoot) @@ -236,7 +236,7 @@ proc runTxPoolBlobhashTest() = test "Blobhash persistBlocks": let rr = chain.persistBlocks([blk.header], [body]) - check rr == ValidationResult.OK + check rr.isOk() test "validate TxPool prevRandao setter": var sdb = LedgerRef.init(com.db, blk.header.stateRoot)