diff --git a/fluffy/database/era1_db.nim b/fluffy/database/era1_db.nim index 3ea7c8f8a..4eaaaa482 100644 --- a/fluffy/database/era1_db.nim +++ b/fluffy/database/era1_db.nim @@ -50,6 +50,11 @@ proc new*( ): Era1DB = Era1DB(path: path, network: network, accumulator: accumulator) +proc getEthBlock*(db: Era1DB, blockNumber: uint64): Result[EthBlock, string] = + let f = ?db.getEra1File(blockNumber.era) + + f.getEthBlock(blockNumber) + proc getBlockTuple*(db: Era1DB, blockNumber: uint64): Result[BlockTuple, string] = let f = ?db.getEra1File(blockNumber.era) diff --git a/fluffy/eth_data/era1.nim b/fluffy/eth_data/era1.nim index cc13043e7..d05b92b75 100644 --- a/fluffy/eth_data/era1.nim +++ b/fluffy/eth_data/era1.nim @@ -338,6 +338,30 @@ proc getTotalDifficulty(f: Era1File): Result[UInt256, string] = ok(UInt256.fromBytesLE(bytes)) +proc getNextEthBlock*(f: Era1File): Result[EthBlock, string] = + doAssert not isNil(f) and f[].handle.isSome + + var + header = ?getBlockHeader(f) + body = ?getBlockBody(f) + ?skipRecord(f) # receipts + ?skipRecord(f) # totalDifficulty + + ok(EthBlock.init(move(header), move(body))) + +proc getEthBlock*(f: Era1File, blockNumber: uint64): Result[EthBlock, string] = + doAssert not isNil(f) and f[].handle.isSome + doAssert( + blockNumber >= f[].blockIdx.startNumber and blockNumber <= f[].blockIdx.endNumber, + "Wrong era1 file for selected block number", + ) + + let pos = f[].blockIdx.offsets[blockNumber - f[].blockIdx.startNumber] + + ?f[].handle.get().setFilePos(pos, SeekPosition.SeekBegin).mapErr(ioErrorMsg) + + getNextEthBlock(f) + proc getNextBlockTuple*(f: Era1File): Result[BlockTuple, string] = doAssert not isNil(f) and f[].handle.isSome diff --git a/fluffy/tools/portal_bridge/portal_bridge_history.nim b/fluffy/tools/portal_bridge/portal_bridge_history.nim index 3e1ab449a..9852cba35 100644 --- a/fluffy/tools/portal_bridge/portal_bridge_history.nim +++ b/fluffy/tools/portal_bridge/portal_bridge_history.nim @@ -37,7 +37,7 @@ func asEthBlock(blockObject: BlockObject): EthBlock = transactions = toTransactions(blockObject.transactions) withdrawals = toWithdrawals(blockObject.withdrawals) - EthBlock(header: header, txs: transactions, withdrawals: withdrawals) + EthBlock(header: header, transactions: transactions, withdrawals: withdrawals) func asPortalBlock( ethBlock: EthBlock diff --git a/hive_integration/nodocker/engine/cancun/customizer.nim b/hive_integration/nodocker/engine/cancun/customizer.nim index c8c8872c9..650c355f9 100644 --- a/hive_integration/nodocker/engine/cancun/customizer.nim +++ b/hive_integration/nodocker/engine/cancun/customizer.nim @@ -400,13 +400,13 @@ proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): Executabl var blk = EthBlock( header: customHeader, + transactions: + if cust.transactions.isSome: + cust.transactions.get + else: + ethTxs data.basePayload.transactions ) - if cust.transactions.isSome: - blk.txs = cust.transactions.get - else: - blk.txs = ethTxs data.basePayload.transactions - if cust.removeWithdrawals: blk.withdrawals = none(seq[Withdrawal]) elif cust.withdrawals.isSome: diff --git a/hive_integration/nodocker/engine/engine/invalid_ancestor.nim b/hive_integration/nodocker/engine/engine/invalid_ancestor.nim index fcafc0cc8..bbf094b6c 100644 --- a/hive_integration/nodocker/engine/engine/invalid_ancestor.nim +++ b/hive_integration/nodocker/engine/engine/invalid_ancestor.nim @@ -323,7 +323,7 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool = invalidHeader = blockHeader(shadow.payloads[i]) invalidBody = blockBody(shadow.payloads[i]) - testCond sec.setBlock(invalidHeader, invalidBody): + testCond sec.setBlock(EthBlock.init(invalidHeader, invalidBody)): fatal "TEST ISSUE - Failed to set invalid block" info "Invalid block successfully set", idx=i, diff --git a/hive_integration/nodocker/engine/engine_client.nim b/hive_integration/nodocker/engine/engine_client.nim index a2ac7edf0..ea96f8638 100644 --- a/hive_integration/nodocker/engine/engine_client.nim +++ b/hive_integration/nodocker/engine/engine_client.nim @@ -236,7 +236,7 @@ proc newPayload*(client: RpcClient, case version of Version.V1: return client.newPayloadV1(payload) of Version.V2: return client.newPayloadV2(payload) - of Version.V3: + of Version.V3: let versionedHashes = collectBlobHashes(payload.transactions) return client.newPayloadV3(payload, some(versionedHashes), @@ -246,7 +246,7 @@ proc newPayload*(client: RpcClient, return client.newPayloadV4(payload, some(versionedHashes), w3Hash beaconRoot) - + proc newPayload*(client: RpcClient, version: Version, payload: ExecutableData): Result[PayloadStatusV1, string] = @@ -518,7 +518,7 @@ proc latestBlock*(client: RpcClient): Result[common.EthBlock, string] = return err("failed to get latest blockHeader") let output = EthBlock( header: toBlockHeader(res), - txs: toTransactions(res.transactions), + transactions: toTransactions(res.transactions), withdrawals: toWithdrawals(res.withdrawals), ) return ok(output) diff --git a/hive_integration/nodocker/engine/engine_env.nim b/hive_integration/nodocker/engine/engine_env.nim index 27d6029df..507ca27c3 100644 --- a/hive_integration/nodocker/engine/engine_env.nim +++ b/hive_integration/nodocker/engine/engine_env.nim @@ -218,5 +218,5 @@ func version*(env: EngineEnv, time: Web3Quantity): Version = func version*(env: EngineEnv, time: uint64): Version = env.version(time.EthTime) -proc setBlock*(env: EngineEnv, header: common.BlockHeader, body: common.BlockBody): bool = - env.chain.setBlock(header, body) == ValidationResult.OK +proc setBlock*(env: EngineEnv, blk: common.EthBlock): bool = + env.chain.setBlock(blk) == ValidationResult.OK diff --git a/hive_integration/nodocker/engine/node.nim b/hive_integration/nodocker/engine/node.nim index eaa923448..77ac49592 100644 --- a/hive_integration/nodocker/engine/node.nim +++ b/hive_integration/nodocker/engine/node.nim @@ -32,8 +32,8 @@ import proc processBlock( vmState: BaseVMState; ## Parent environment of header/body block - header: BlockHeader; ## Header/body block to add to the blockchain - body: BlockBody): ValidationResult + blk: EthBlock; ## Header/body block to add to the blockchain + ): ValidationResult {.gcsafe, raises: [CatchableError].} = ## Generalised function to processes `(header,body)` pair for any network, ## regardless of PoA or not. @@ -43,7 +43,7 @@ proc processBlock( ## the `poa` descriptor is currently unused and only provided for later ## implementations (but can be savely removed, as well.) ## variant of `processBlock()` where the `header` argument is explicitely set. - + template header: BlockHeader = blk.header var dbTx = vmState.com.db.newTransaction() defer: dbTx.dispose() @@ -57,20 +57,20 @@ proc processBlock( if r.isErr: error("error in processing beaconRoot", err=r.error) - let r = processTransactions(vmState, header, body.transactions) + let r = processTransactions(vmState, header, blk.transactions) if r.isErr: error("error in processing transactions", err=r.error) if vmState.determineFork >= FkShanghai: - for withdrawal in body.withdrawals.get: + for withdrawal in blk.withdrawals.get: vmState.stateDB.addBalance(withdrawal.address, withdrawal.weiAmount) if header.ommersHash != EMPTY_UNCLE_HASH: - discard vmState.com.db.persistUncles(body.uncles) + discard vmState.com.db.persistUncles(blk.uncles) # EIP-3675: no reward for miner in POA/POS if vmState.com.consensus == ConsensusType.POW: - vmState.calculateReward(header, body) + vmState.calculateReward(header, blk.uncles) vmState.mutateStateDB: let clearEmptyAccount = vmState.determineFork >= FkSpurious @@ -95,9 +95,9 @@ proc getVmState(c: ChainRef, header: BlockHeader): # A stripped down version of persistBlocks without validation # intended to accepts invalid block -proc setBlock*(c: ChainRef; header: BlockHeader; - body: BlockBody): ValidationResult +proc setBlock*(c: ChainRef; blk: EthBlock): ValidationResult {.inline, raises: [CatchableError].} = + template header: BlockHeader = blk.header let dbTx = c.db.newTransaction() defer: dbTx.dispose() @@ -108,18 +108,18 @@ proc setBlock*(c: ChainRef; header: BlockHeader; vmState = c.getVmState(header).valueOr: return ValidationResult.Error stateRootChpt = vmState.parent.stateRoot # Check point - validationResult = vmState.processBlock(header, body) + validationResult = vmState.processBlock(blk) if validationResult != ValidationResult.OK: return validationResult - discard c.db.persistHeaderToDb( + c.db.persistHeaderToDb( header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory) - discard c.db.persistTransactions(header.blockNumber, body.transactions) + discard c.db.persistTransactions(header.blockNumber, blk.transactions) discard c.db.persistReceipts(vmState.receipts) - if body.withdrawals.isSome: - discard c.db.persistWithdrawals(body.withdrawals.get) + if blk.withdrawals.isSome: + discard c.db.persistWithdrawals(blk.withdrawals.get) # update currentBlock *after* we persist it # so the rpc return consistent result diff --git a/hive_integration/nodocker/pyspec/test_env.nim b/hive_integration/nodocker/pyspec/test_env.nim index b6adb4d06..0796db6b8 100644 --- a/hive_integration/nodocker/pyspec/test_env.nim +++ b/hive_integration/nodocker/pyspec/test_env.nim @@ -61,7 +61,7 @@ proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) = doAssert stateDB.rootHash == genesisHeader.stateRoot - discard t.com.db.persistHeaderToDb(genesisHeader, + t.com.db.persistHeaderToDb(genesisHeader, t.com.consensus == ConsensusType.POS) doAssert(t.com.db.getCanonicalHead().blockHash == genesisHeader.blockHash) diff --git a/nimbus/beacon/api_handler/api_newpayload.nim b/nimbus/beacon/api_handler/api_newpayload.nim index 0b293fd47..2513fe29a 100644 --- a/nimbus/beacon/api_handler/api_newpayload.nim +++ b/nimbus/beacon/api_handler/api_newpayload.nim @@ -115,7 +115,8 @@ proc newPayload*(ben: BeaconEngineRef, validatePayload(apiVersion, version, payload) validateVersion(com, timestamp, version, apiVersion) - var header = blockHeader(payload, beaconRoot = ethHash beaconRoot) + var blk = ethBlock(payload, beaconRoot = ethHash beaconRoot) + template header: BlockHeader = blk.header if apiVersion >= Version.V3: if versionedHashes.isNone: @@ -185,8 +186,7 @@ proc newPayload*(ben: BeaconEngineRef, trace "Inserting block without sethead", hash = blockHash, number = header.blockNumber - let body = blockBody(payload) - let vres = ben.chain.insertBlockWithoutSetHead(header, body) + let vres = ben.chain.insertBlockWithoutSetHead(blk) if vres.isErr: ben.setInvalidAncestor(header, blockHash) let blockHash = latestValidHash(db, parent, ttd) diff --git a/nimbus/beacon/payload_conv.nim b/nimbus/beacon/payload_conv.nim index 379fd772c..dc36e9602 100644 --- a/nimbus/beacon/payload_conv.nim +++ b/nimbus/beacon/payload_conv.nim @@ -7,6 +7,8 @@ # This file may not be copied, modified, or distributed except according to # those terms. +{.push raises: [].} + import ./web3_eth_conv, web3/execution_types, @@ -81,7 +83,7 @@ func executionPayloadV1V2*(blk: EthBlock): ExecutionPayloadV1OrV2 = func blockHeader*(p: ExecutionPayload, beaconRoot: Option[common.Hash256]): - common.BlockHeader {.gcsafe, raises:[CatchableError].} = + common.BlockHeader {.gcsafe, raises:[RlpError].} = common.BlockHeader( parentHash : ethHash p.parentHash, ommersHash : EMPTY_UNCLE_HASH, @@ -115,10 +117,10 @@ func blockBody*(p: ExecutionPayload): func ethBlock*(p: ExecutionPayload, beaconRoot: Option[common.Hash256]): - common.EthBlock {.gcsafe, raises:[CatchableError].} = + common.EthBlock {.gcsafe, raises:[RlpError].} = common.EthBlock( - header : blockHeader(p, beaconRoot), - uncles : @[], - txs : ethTxs p.transactions, + header : blockHeader(p, beaconRoot), + uncles : @[], + transactions: ethTxs p.transactions, withdrawals: ethWithdrawals p.withdrawals, ) diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 97248d40e..3a5506490 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -374,7 +374,7 @@ proc initializeEmptyDb*(com: CommonRef) info "Writing genesis to DB" doAssert(com.genesisHeader.blockNumber.isZero, "can't commit genesis block with number > 0") - discard com.db.persistHeaderToDb(com.genesisHeader, + com.db.persistHeaderToDb(com.genesisHeader, com.consensusType == ConsensusType.POS) doAssert(canonicalHeadHashKey().toOpenArray in kvt) diff --git a/nimbus/core/block_import.nim b/nimbus/core/block_import.nim index 69a16196a..254572c24 100644 --- a/nimbus/core/block_import.nim +++ b/nimbus/core/block_import.nim @@ -22,16 +22,17 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: str rlp = rlpFromBytes(blocksRlp) chain = newChain(com, extraValidation = true) errorCount = 0 - header: BlockHeader - body: BlockBody + blk: array[1, EthBlock] # even though the new imported blocks have block number # smaller than head, we keep importing it. # it maybe a side chain. - + # TODO the above is no longer true with a single-state database - to deal with + # that scenario the code needs to be rewritten to not persist the blocks + # to the state database until all have been processed while rlp.hasData: - try: - rlp.decompose(header, body) + blk[0] = try: + rlp.read(EthBlock) except RlpError as e: # terminate if there was a decoding error error "rlp error", @@ -40,7 +41,7 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: str exception = e.name return false - chain.persistBlocks([header], [body]).isOkOr(): + chain.persistBlocks(blk).isOkOr(): # register one more error and continue error "import error", fileName = importFile, diff --git a/nimbus/core/chain/persist_blocks.nim b/nimbus/core/chain/persist_blocks.nim index 53cb41a8b..1cdb70bf0 100644 --- a/nimbus/core/chain/persist_blocks.nim +++ b/nimbus/core/chain/persist_blocks.nim @@ -71,24 +71,25 @@ proc purgeOlderBlocksFromHistory( break blkNum = blkNum - 1 -proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; - bodies: openArray[BlockBody], +proc persistBlocksImpl(c: ChainRef; blocks: openArray[EthBlock]; flags: PersistBlockFlags = {}): Result[PersistStats, string] {.raises: [CatchableError] .} = let dbTx = c.db.newTransaction() defer: dbTx.dispose() - c.com.hardForkTransition(headers[0]) + c.com.hardForkTransition(blocks[0].header) # Note that `0 < headers.len`, assured when called from `persistBlocks()` - let vmState = ?c.getVmState(headers[0]) + let vmState = ?c.getVmState(blocks[0].header) - let (fromBlock, toBlock) = (headers[0].blockNumber, headers[^1].blockNumber) + let + fromBlock = blocks[0].header.blockNumber + toBlock = blocks[blocks.high()].header.blockNumber trace "Persisting blocks", fromBlock, toBlock var txs = 0 - for i in 0 ..< headers.len: - let (header, body) = (headers[i], bodies[i]) + for blk in blocks: + template header: BlockHeader = blk.header # # This transaction keeps the current state open for inspection # # if an error occurs (as needed for `Aristo`.). @@ -98,22 +99,18 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; c.com.hardForkTransition(header) if not vmState.reinit(header): - debug "Cannot update VmState", - blockNumber = header.blockNumber, - item = i + debug "Cannot update VmState", blockNumber = header.blockNumber return err("Cannot update VmState to block " & $header.blockNumber) if c.validateBlock and c.extraValidation and c.verifyFrom <= header.blockNumber: - ? c.com.validateHeaderAndKinship( - header, - body, - checkSealOK = false) # TODO: how to checkseal from here + # TODO: how to checkseal from here + ? c.com.validateHeaderAndKinship(blk, checkSealOK = false) let validationResult = if c.validateBlock: - vmState.processBlock(header, body) + vmState.processBlock(blk) else: ValidationResult.OK @@ -127,17 +124,17 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; return err("Failed to validate block") if NoPersistHeader notin flags: - discard c.db.persistHeaderToDb( + c.db.persistHeaderToDb( header, c.com.consensus == ConsensusType.POS, c.com.startOfHistory) if NoSaveTxs notin flags: - discard c.db.persistTransactions(header.blockNumber, body.transactions) + discard c.db.persistTransactions(header.blockNumber, blk.transactions) if NoSaveReceipts notin flags: discard c.db.persistReceipts(vmState.receipts) - if NoSaveWithdrawals notin flags and body.withdrawals.isSome: - discard c.db.persistWithdrawals(body.withdrawals.get) + if NoSaveWithdrawals notin flags and blk.withdrawals.isSome: + discard c.db.persistWithdrawals(blk.withdrawals.get) # update currentBlock *after* we persist it # so the rpc return consistent result @@ -147,12 +144,12 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; # Done with this block # lapTx.commit() - txs += body.transactions.len + txs += blk.transactions.len dbTx.commit() # Save and record the block number before the last saved block state. - c.db.persistent(headers[^1].blockNumber) + c.db.persistent(toBlock) if c.com.pruneHistory: # There is a feature for test systems to regularly clean up older blocks @@ -162,19 +159,18 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader]; # Starts at around `2 * CleanUpEpoch` c.db.purgeOlderBlocksFromHistory(fromBlock - CleanUpEpoch) - ok((headers.len, txs, vmState.cumulativeGasUsed)) + ok((blocks.len, txs, vmState.cumulativeGasUsed)) # ------------------------------------------------------------------------------ # Public `ChainDB` methods # ------------------------------------------------------------------------------ -proc insertBlockWithoutSetHead*(c: ChainRef, header: BlockHeader, - body: BlockBody): Result[void, string] = +proc insertBlockWithoutSetHead*(c: ChainRef, blk: EthBlock): Result[void, string] = try: discard ? c.persistBlocksImpl( - [header], [body], {NoPersistHeader, NoSaveReceipts}) + [blk], {NoPersistHeader, NoSaveReceipts}) - c.db.persistHeaderToDbWithoutSetHead(header, c.com.startOfHistory) + c.db.persistHeaderToDbWithoutSetHead(blk.header, c.com.startOfHistory) ok() except CatchableError as exc: err(exc.msg) @@ -191,7 +187,7 @@ proc setCanonical*(c: ChainRef, header: BlockHeader): Result[void, string] = hash = header.blockHash return err("Could not get block body") - discard ? c.persistBlocksImpl([header], [body], {NoPersistHeader, NoSaveTxs}) + discard ? c.persistBlocksImpl([EthBlock.init(header, move(body))], {NoPersistHeader, NoSaveTxs}) discard c.db.setHead(header.blockHash) ok() @@ -207,19 +203,15 @@ proc setCanonical*(c: ChainRef, blockHash: Hash256): Result[void, string] = setCanonical(c, header) -proc persistBlocks*(c: ChainRef; headers: openArray[BlockHeader]; - bodies: openArray[BlockBody]): Result[PersistStats, string] = +proc persistBlocks*( + c: ChainRef; blocks: openArray[EthBlock]): Result[PersistStats, string] = # Run the VM here - if headers.len != bodies.len: - debug "Number of headers not matching number of bodies" - return err("Mismatching headers and bodies") - - if headers.len == 0: + if blocks.len == 0: debug "Nothing to do" return ok(default(PersistStats)) # TODO not nice to return nil try: - c.persistBlocksImpl(headers,bodies) + c.persistBlocksImpl(blocks) except CatchableError as exc: err(exc.msg) diff --git a/nimbus/core/executor/calculate_reward.nim b/nimbus/core/executor/calculate_reward.nim index 9f06895d6..75d004181 100644 --- a/nimbus/core/executor/calculate_reward.nim +++ b/nimbus/core/executor/calculate_reward.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2023 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -35,7 +35,7 @@ proc calculateReward*(vmState: BaseVMState; account: EthAddress; proc calculateReward*(vmState: BaseVMState; - header: BlockHeader; body: BlockBody) = - vmState.calculateReward(header.coinbase, header.blockNumber, body.uncles) + header: BlockHeader; uncles: openArray[BlockHeader]) = + vmState.calculateReward(header.coinbase, header.blockNumber, uncles) # End diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index 9033c4aa8..e543dfc22 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -45,16 +45,15 @@ proc processTransactions*( vmState.receipts[txIndex] = vmState.makeReceipt(tx.txType) ok() -proc procBlkPreamble(vmState: BaseVMState; - header: BlockHeader; body: BlockBody): bool - {.gcsafe, raises: [CatchableError].} = - +proc procBlkPreamble(vmState: BaseVMState; blk: EthBlock): bool + {.raises: [CatchableError].} = + template header: BlockHeader = blk.header if vmState.com.daoForkSupport and vmState.com.daoForkBlock.get == header.blockNumber: vmState.mutateStateDB: db.applyDAOHardFork() - if body.transactions.calcTxRoot != header.txRoot: + if blk.transactions.calcTxRoot != header.txRoot: debug "Mismatched txRoot", blockNumber = header.blockNumber return false @@ -72,27 +71,27 @@ proc procBlkPreamble(vmState: BaseVMState; error("error in processing beaconRoot", err=r.error) if header.txRoot != EMPTY_ROOT_HASH: - if body.transactions.len == 0: + if blk.transactions.len == 0: debug "No transactions in body", blockNumber = header.blockNumber return false else: - let r = processTransactions(vmState, header, body.transactions) + let r = processTransactions(vmState, header, blk.transactions) if r.isErr: error("error in processing transactions", err=r.error) if vmState.determineFork >= FkShanghai: if header.withdrawalsRoot.isNone: raise ValidationError.newException("Post-Shanghai block header must have withdrawalsRoot") - if body.withdrawals.isNone: + if blk.withdrawals.isNone: raise ValidationError.newException("Post-Shanghai block body must have withdrawals") - for withdrawal in body.withdrawals.get: + for withdrawal in blk.withdrawals.get: vmState.stateDB.addBalance(withdrawal.address, withdrawal.weiAmount) else: if header.withdrawalsRoot.isSome: raise ValidationError.newException("Pre-Shanghai block header must not have withdrawalsRoot") - if body.withdrawals.isSome: + if blk.withdrawals.isSome: raise ValidationError.newException("Pre-Shanghai block body must not have withdrawals") if vmState.cumulativeGasUsed != header.gasUsed: @@ -102,15 +101,14 @@ proc procBlkPreamble(vmState: BaseVMState; return false if header.ommersHash != EMPTY_UNCLE_HASH: - let h = vmState.com.db.persistUncles(body.uncles) + let h = vmState.com.db.persistUncles(blk.uncles) if h != header.ommersHash: debug "Uncle hash mismatch" return false true -proc procBlkEpilogue(vmState: BaseVMState; - header: BlockHeader; body: BlockBody): bool +proc procBlkEpilogue(vmState: BaseVMState, header: BlockHeader): bool {.gcsafe, raises: [].} = # Reward beneficiary vmState.mutateStateDB: @@ -150,30 +148,20 @@ proc procBlkEpilogue(vmState: BaseVMState; proc processBlock*( vmState: BaseVMState; ## Parent environment of header/body block - header: BlockHeader; ## Header/body block to add to the blockchain - body: BlockBody; - ): ValidationResult - {.gcsafe, raises: [CatchableError].} = - ## Generalised function to processes `(header,body)` pair for any network, - ## regardless of PoA or not. - ## - ## Rather than calculating the PoA state change here, it is done with the - ## verification in the `chain/persist_blocks.persistBlocks()` method. So - ## the `poa` descriptor is currently unused and only provided for later - ## implementations (but can be savely removed, as well.) - ## variant of `processBlock()` where the `header` argument is explicitely set. - + blk: EthBlock; ## Header/body block to add to the blockchain + ): ValidationResult {.raises: [CatchableError].} = + ## Generalised function to processes `blk` for any network. var dbTx = vmState.com.db.newTransaction() defer: dbTx.dispose() - if not vmState.procBlkPreamble(header, body): + if not vmState.procBlkPreamble(blk): return ValidationResult.Error # EIP-3675: no reward for miner in POA/POS if vmState.com.consensus == ConsensusType.POW: - vmState.calculateReward(header, body) + vmState.calculateReward(blk.header, blk.uncles) - if not vmState.procBlkEpilogue(header, body): + if not vmState.procBlkEpilogue(blk.header): return ValidationResult.Error dbTx.commit() diff --git a/nimbus/core/validate.nim b/nimbus/core/validate.nim index 525935c7f..6717193e5 100644 --- a/nimbus/core/validate.nim +++ b/nimbus/core/validate.nim @@ -65,13 +65,13 @@ proc validateSeal(pow: PowRef; header: BlockHeader): Result[void,string] = proc validateHeader( com: CommonRef; - header: BlockHeader; + blk: EthBlock; parentHeader: BlockHeader; - body: BlockBody; checkSealOK: bool; - ): Result[void,string] - {.gcsafe, raises: [].} = - + ): Result[void,string] = + template header: BlockHeader = blk.header + # TODO this code is used for validating uncles also, though these get passed + # an empty body - avoid this by separating header and block validation template inDAOExtraRange(blockNumber: BlockNumber): bool = # EIP-799 # Blocks with block numbers in the range [1_920_000, 1_920_009] @@ -84,7 +84,7 @@ proc validateHeader( if header.extraData.len > 32: return err("BlockHeader.extraData larger than 32 bytes") - if header.gasUsed == 0 and 0 < body.transactions.len: + if header.gasUsed == 0 and 0 < blk.transactions.len: return err("zero gasUsed but transactions present"); if header.gasUsed < 0 or header.gasUsed > header.gasLimit: @@ -121,8 +121,8 @@ proc validateHeader( if checkSealOK: return com.pow.validateSeal(header) - ? com.validateWithdrawals(header, body) - ? com.validateEip4844Header(header, parentHeader, body.transactions) + ? com.validateWithdrawals(header, blk.withdrawals) + ? com.validateEip4844Header(header, parentHeader, blk.transactions) ? com.validateGasLimitOrBaseFee(header, parentHeader) ok() @@ -197,21 +197,17 @@ proc validateUncles(com: CommonRef; header: BlockHeader; # Now perform VM level validation of the uncle if checkSealOK: - result = com.pow.validateSeal(uncle) - if result.isErr: - return + ? com.pow.validateSeal(uncle) let uncleParent = try: chainDB.getBlockHeader(uncle.parentHash) except BlockNotFound: return err("Uncle parent not found") - result = com.validateHeader(uncle, uncleParent, - BlockBody(), checkSealOK) - if result.isErr: - return + ? com.validateHeader( + EthBlock.init(uncle, BlockBody()), uncleParent, checkSealOK) - result = ok() + ok() # ------------------------------------------------------------------------------ # Public function, extracted from executor @@ -376,11 +372,12 @@ proc validateTransaction*( proc validateHeaderAndKinship*( com: CommonRef; - header: BlockHeader; - body: BlockBody; + blk: EthBlock; checkSealOK: bool; ): Result[void, string] {.gcsafe, raises: [].} = + template header: BlockHeader = blk.header + if header.isGenesis: if header.extraData.len > 32: return err("BlockHeader.extraData larger than 32 bytes") @@ -392,16 +389,15 @@ proc validateHeaderAndKinship*( except CatchableError as err: return err("Failed to load block header from DB") - result = com.validateHeader( - header, parent, body, checkSealOK) - if result.isErr: - return + ? com.validateHeader(blk, parent, checkSealOK) - if body.uncles.len > MAX_UNCLES: + if blk.uncles.len > MAX_UNCLES: return err("Number of uncles exceed limit.") if com.consensus != ConsensusType.POS: - result = com.validateUncles(header, body.uncles, checkSealOK) + ? com.validateUncles(header, blk.uncles, checkSealOK) + + ok() # ------------------------------------------------------------------------------ # End diff --git a/nimbus/core/withdrawals.nim b/nimbus/core/withdrawals.nim index 9e6ef0228..2eae50a1d 100644 --- a/nimbus/core/withdrawals.nim +++ b/nimbus/core/withdrawals.nim @@ -8,35 +8,29 @@ # at your option. This file may not be copied, modified, or distributed except # according to those terms. -import - results, - ../common/common - {.push raises: [].} +import results, ../common/common + # https://eips.ethereum.org/EIPS/eip-4895 proc validateWithdrawals*( - com: CommonRef, - header: BlockHeader, - body: BlockBody - ): Result[void, string] - {.gcsafe, raises: [].} = - + com: CommonRef, header: BlockHeader, withdrawals: Option[seq[Withdrawal]] +): Result[void, string] = if com.forkGTE(Shanghai): if header.withdrawalsRoot.isNone: return err("Post-Shanghai block header must have withdrawalsRoot") - elif body.withdrawals.isNone: + elif withdrawals.isNone: return err("Post-Shanghai block body must have withdrawals") else: try: - if body.withdrawals.get.calcWithdrawalsRoot != header.withdrawalsRoot.get: + if withdrawals.get.calcWithdrawalsRoot != header.withdrawalsRoot.get: return err("Mismatched withdrawalsRoot blockNumber =" & $header.blockNumber) except RlpError as ex: return err(ex.msg) else: if header.withdrawalsRoot.isSome: return err("Pre-Shanghai block header must not have withdrawalsRoot") - elif body.withdrawals.isSome: + elif withdrawals.isSome: return err("Pre-Shanghai block body must not have withdrawals") return ok() diff --git a/nimbus/db/core_db/core_apps_newapi.nim b/nimbus/db/core_db/core_apps_newapi.nim index 559d1ba7d..d9d89eba3 100644 --- a/nimbus/db/core_db/core_apps_newapi.nim +++ b/nimbus/db/core_db/core_apps_newapi.nim @@ -248,8 +248,7 @@ proc removeTransactionFromCanonicalChain( proc setAsCanonicalChainHead( db: CoreDbRef; headerHash: Hash256; - ): seq[BlockHeader] - {.gcsafe, raises: [RlpError,BlockNotFound].} = + ) {.gcsafe, raises: [RlpError,BlockNotFound].} = ## Sets the header as the canonical chain HEAD. let header = db.getBlockHeader(headerHash) @@ -273,8 +272,6 @@ proc setAsCanonicalChainHead( warn logTxt "setAsCanonicalChainHead()", canonicalHeadHash, action="put()", error=($$error) - return newCanonicalHeaders - proc markCanonicalChain( db: CoreDbRef; header: BlockHeader; @@ -720,13 +717,28 @@ proc getWithdrawals*( for encodedWd in db.getWithdrawalsData(withdrawalsRoot): result.add(rlp.decode(encodedWd, Withdrawal)) +proc getTransactions*( + db: CoreDbRef; + header: BlockHeader; + output: var seq[Transaction]) + {.gcsafe, raises: [RlpError].} = + for encodedTx in db.getBlockTransactionData(header.txRoot): + output.add(rlp.decode(encodedTx, Transaction)) + +proc getTransactions*( + db: CoreDbRef; + header: BlockHeader; + ): seq[Transaction] + {.gcsafe, raises: [RlpError].} = + db.getTransactions(header, result) + proc getBlockBody*( db: CoreDbRef; header: BlockHeader; output: var BlockBody; ): bool {.gcsafe, raises: [RlpError].} = - output.transactions = @[] + db.getTransactions(header, output.transactions) output.uncles = @[] for encodedTx in db.getBlockTransactionData(header.txRoot): output.transactions.add(rlp.decode(encodedTx, Transaction)) @@ -763,6 +775,27 @@ proc getBlockBody*( if not db.getBlockBody(hash, result): raise newException(ValueError, "Error when retrieving block body") +proc getEthBlock*( + db: CoreDbRef; + hash: Hash256; + ): EthBlock + {.gcsafe, raises: [BlockNotFound, RlpError,ValueError].} = + var + header = db.getBlockHeader(hash) + blockBody = db.getBlockBody(hash) + EthBlock.init(move(header), move(blockBody)) + +proc getEthBlock*( + db: CoreDbRef; + blockNumber: BlockNumber; + ): EthBlock + {.gcsafe, raises: [BlockNotFound, RlpError,ValueError].} = + var + header = db.getBlockHeader(blockNumber) + headerHash = header.blockHash + blockBody = db.getBlockBody(headerHash) + EthBlock.init(move(header), move(blockBody)) + proc getUncleHashes*( db: CoreDbRef; blockHashes: openArray[Hash256]; @@ -876,8 +909,7 @@ proc persistHeaderToDb*( header: BlockHeader; forceCanonical: bool; startOfHistory = GENESIS_PARENT_HASH; - ): seq[BlockHeader] - {.gcsafe, raises: [RlpError,EVMError].} = + ) {.gcsafe, raises: [RlpError,EVMError].} = let isStartOfHistory = header.parentHash == startOfHistory let headerHash = header.blockHash if not isStartOfHistory and not db.headerExists(header.parentHash): @@ -887,7 +919,7 @@ proc persistHeaderToDb*( kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: warn logTxt "persistHeaderToDb()", headerHash, action="put()", `error`=($$error) - return @[] + return let score = if isStartOfHistory: header.difficulty else: db.getScore(header.parentHash) + header.difficulty @@ -895,17 +927,18 @@ proc persistHeaderToDb*( kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: warn logTxt "persistHeaderToDb()", scoreKey, action="put()", `error`=($$error) - return @[] + return db.addBlockNumberToHashLookup(header) - var canonHeader: BlockHeader - if not db.getCanonicalHead canonHeader: - return db.setAsCanonicalChainHead(headerHash) + if not forceCanonical: + var canonHeader: BlockHeader + if db.getCanonicalHead canonHeader: + let headScore = db.getScore(canonHeader.hash) + if score <= headScore: + return - let headScore = db.getScore(canonHeader.hash) - if score > headScore or forceCanonical: - return db.setAsCanonicalChainHead(headerHash) + db.setAsCanonicalChainHead(headerHash) proc persistHeaderToDbWithoutSetHead*( db: CoreDbRef; diff --git a/nimbus/db/era1_db/db_desc.nim b/nimbus/db/era1_db/db_desc.nim index 0c29811ad..d409f3ded 100644 --- a/nimbus/db/era1_db/db_desc.nim +++ b/nimbus/db/era1_db/db_desc.nim @@ -80,6 +80,11 @@ proc init*( ok Era1DbRef(path: path, network: network, filenames: filenames) +proc getEthBlock*(db: Era1DbRef, blockNumber: uint64): Result[EthBlock, string] = + let f = ?db.getEra1File(blockNumber.era) + + f.getEthBlock(blockNumber) + proc getBlockTuple*(db: Era1DbRef, blockNumber: uint64): Result[BlockTuple, string] = let f = ?db.getEra1File(blockNumber.era) diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index 7f6bf6fb0..f1f32cd0f 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -12,6 +12,7 @@ import std/[options, sets, strformat], + stew/assign2, eth/keys, ../db/ledger, ../common/[common, evmforks], @@ -28,7 +29,7 @@ proc init( tracer: TracerRef, flags: set[VMFlag] = self.flags) = ## Initialisation helper - self.parent = parent + assign(self.parent, parent) self.blockCtx = blockCtx self.gasPool = blockCtx.gasLimit self.com = com diff --git a/nimbus/nimbus_import.nim b/nimbus/nimbus_import.nim index 169098ee7..7f2793f30 100644 --- a/nimbus/nimbus_import.nim +++ b/nimbus/nimbus_import.nim @@ -104,9 +104,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = if start <= lastEra1Block: notice "Importing era1 archive", start, dataDir = conf.dataDir.string, era1Dir = conf.era1Dir.string - var - headers: seq[BlockHeader] - bodies: seq[BlockBody] + var blocks: seq[EthBlock] func f(value: float): string = try: @@ -117,7 +115,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = template process() = let time1 = Moment.now() - statsRes = chain.persistBlocks(headers, bodies) + statsRes = chain.persistBlocks(blocks) if statsRes.isErr(): error "Failed to persist blocks", error = statsRes.error quit(QuitFailure) @@ -134,7 +132,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = blocks = imported, txs, gas, - bps = f(headers.len.float / diff1), + bps = f(blocks.len.float / diff1), tps = f(statsRes[].txs.float / diff1), gps = f(statsRes[].gas.float / diff1), avgBps = f(imported.float / diff0), @@ -150,7 +148,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = csv.writeLine( [ $blockNumber, - $headers.len, + $blocks.len, $statsRes[].txs, $statsRes[].gas, $(time2 - time1).nanoseconds(), @@ -159,8 +157,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = csv.flushFile() except IOError as exc: warn "Could not write csv", err = exc.msg - headers.setLen(0) - bodies.setLen(0) + blocks.setLen(0) let db = Era1DbRef.init(conf.era1Dir.string, "mainnet").expect("Era files present") @@ -168,19 +165,17 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) = db.dispose() while running and imported < conf.maxBlocks and blockNumber <= lastEra1Block: - var blk = db.getBlockTuple(blockNumber).valueOr: + var blk = db.getEthBlock(blockNumber).valueOr: error "Could not load block from era1", blockNumber, error break imported += 1 + blocks.add move(blk) - headers.add move(blk.header) - bodies.add move(blk.body) - - if headers.lenu64 mod conf.chunkSize == 0: + if blocks.lenu64 mod conf.chunkSize == 0: process() - if headers.len > 0: + if blocks.len > 0: process() # last chunk, if any for blocksFile in conf.blocksFile: diff --git a/nimbus/rpc/debug.nim b/nimbus/rpc/debug.nim index c7fdf41c7..baa5b5e28 100644 --- a/nimbus/rpc/debug.nim +++ b/nimbus/rpc/debug.nim @@ -61,12 +61,11 @@ proc setupDebugRpc*(com: CommonRef, txPool: TxPoolRef, rpcsrv: RpcServer) = let txHash = ethHash(data) txDetails = chainDB.getTransactionKey(txHash) - blockHeader = chainDB.getBlockHeader(txDetails.blockNumber) - blockHash = chainDB.getBlockHash(txDetails.blockNumber) - blockBody = chainDB.getBlockBody(blockHash) + header = chainDB.getBlockHeader(txDetails.blockNumber) + transactions = chainDB.getTransactions(header) flags = traceOptionsToFlags(options) - result = traceTransaction(com, blockHeader, blockBody, txDetails.index, flags) + traceTransaction(com, header, transactions, txDetails.index, flags) rpcsrv.rpc("debug_dumpBlockStateByNumber") do(quantityTag: BlockTag) -> JsonNode: ## Retrieves the state that corresponds to the block number and returns @@ -74,25 +73,23 @@ proc setupDebugRpc*(com: CommonRef, txPool: TxPoolRef, rpcsrv: RpcServer) = ## ## quantityTag: integer of a block number, or the string "earliest", ## "latest" or "pending", as in the default block parameter. - let + var header = chainDB.headerFromTag(quantityTag) blockHash = chainDB.getBlockHash(header.blockNumber) body = chainDB.getBlockBody(blockHash) - result = dumpBlockState(com, header, body) + dumpBlockState(com, EthBlock.init(move(header), move(body))) rpcsrv.rpc("debug_dumpBlockStateByHash") do(data: Web3Hash) -> JsonNode: ## Retrieves the state that corresponds to the block number and returns ## a list of accounts (including storage and code). ## ## data: Hash of a block. - let + var h = data.ethHash - header = chainDB.getBlockHeader(h) - blockHash = chainDB.getBlockHash(header.blockNumber) - body = chainDB.getBlockBody(blockHash) + blk = chainDB.getEthBlock(h) - result = dumpBlockState(com, header, body) + dumpBlockState(com, blk) rpcsrv.rpc("debug_traceBlockByNumber") do(quantityTag: BlockTag, options: Option[TraceOptions]) -> JsonNode: ## The traceBlock method will return a full stack trace of all invoked opcodes of all transaction @@ -101,13 +98,13 @@ proc setupDebugRpc*(com: CommonRef, txPool: TxPoolRef, rpcsrv: RpcServer) = ## quantityTag: integer of a block number, or the string "earliest", ## "latest" or "pending", as in the default block parameter. ## options: see debug_traceTransaction - let + var header = chainDB.headerFromTag(quantityTag) blockHash = chainDB.getBlockHash(header.blockNumber) body = chainDB.getBlockBody(blockHash) flags = traceOptionsToFlags(options) - result = traceBlock(com, header, body, flags) + traceBlock(com, EthBlock.init(move(header), move(body)), flags) rpcsrv.rpc("debug_traceBlockByHash") do(data: Web3Hash, options: Option[TraceOptions]) -> JsonNode: ## The traceBlock method will return a full stack trace of all invoked opcodes of all transaction @@ -115,14 +112,14 @@ proc setupDebugRpc*(com: CommonRef, txPool: TxPoolRef, rpcsrv: RpcServer) = ## ## data: Hash of a block. ## options: see debug_traceTransaction - let + var h = data.ethHash header = chainDB.getBlockHeader(h) blockHash = chainDB.getBlockHash(header.blockNumber) body = chainDB.getBlockBody(blockHash) flags = traceOptionsToFlags(options) - result = traceBlock(com, header, body, flags) + traceBlock(com, EthBlock.init(move(header), move(body)), flags) rpcsrv.rpc("debug_setHead") do(quantityTag: BlockTag) -> bool: ## Sets the current head of the local chain by block number. @@ -130,29 +127,21 @@ proc setupDebugRpc*(com: CommonRef, txPool: TxPoolRef, rpcsrv: RpcServer) = ## Use with extreme caution. let header = chainDB.headerFromTag(quantityTag) - result = chainDB.setHead(header) + chainDB.setHead(header) rpcsrv.rpc("debug_getRawBlock") do(quantityTag: BlockTag) -> seq[byte]: ## Returns an RLP-encoded block. - let + var header = chainDB.headerFromTag(quantityTag) blockHash = chainDB.getBlockHash(header.blockNumber) - - var body = chainDB.getBlockBody(blockHash) - ethBlock = EthBlock( - header: header, - txs: system.move(body.transactions), - uncles: system.move(body.uncles), - withdrawals: system.move(body.withdrawals), - ) - result = rlp.encode(ethBlock) + rlp.encode(EthBlock.init(move(header), move(body))) rpcsrv.rpc("debug_getRawHeader") do(quantityTag: BlockTag) -> seq[byte]: ## Returns an RLP-encoded header. let header = chainDB.headerFromTag(quantityTag) - result = rlp.encode(header) + rlp.encode(header) rpcsrv.rpc("debug_getRawReceipts") do(quantityTag: BlockTag) -> seq[seq[byte]]: ## Returns an array of EIP-2718 binary-encoded receipts. diff --git a/nimbus/rpc/experimental.nim b/nimbus/rpc/experimental.nim index f2c9e6fed..7fd985782 100644 --- a/nimbus/rpc/experimental.nim +++ b/nimbus/rpc/experimental.nim @@ -37,12 +37,11 @@ proc getMultiKeys*( let chainDB = com.db - blockHash = chainDB.getBlockHash(blockHeader.blockNumber) - blockBody = chainDB.getBlockBody(blockHash) + blk = chainDB.getEthBlock(blockHeader.blockNumber) # Initializing the VM will throw a Defect if the state doesn't exist. # Once we enable pruning we will need to check if the block state has been pruned # before trying to initialize the VM as we do here. - vmState = BaseVMState.new(blockHeader, com).valueOr: + vmState = BaseVMState.new(blk.header, com).valueOr: raise newException(ValueError, "Cannot create vm state") vmState.collectWitnessData = true # Enable saving witness data @@ -52,7 +51,7 @@ proc getMultiKeys*( defer: dbTx.dispose() # Execute the block of transactions and collect the keys of the touched account state - let processBlockResult = processBlock(vmState, blockHeader, blockBody) + let processBlockResult = processBlock(vmState, blk) doAssert processBlockResult == ValidationResult.OK let mkeys = vmState.stateDB.makeMultiKeys() diff --git a/nimbus/sync/beacon/skeleton_db.nim b/nimbus/sync/beacon/skeleton_db.nim index 70c29f9f9..248b7050e 100644 --- a/nimbus/sync/beacon/skeleton_db.nim +++ b/nimbus/sync/beacon/skeleton_db.nim @@ -196,11 +196,10 @@ proc resetCanonicalHead*(sk: SkeletonRef, newHead, oldHead: uint64) = sk.chain.com.syncCurrent = newHead.toBlockNumber proc insertBlocks*(sk: SkeletonRef, - headers: openArray[BlockHeader], - body: openArray[BlockBody], + blocks: openArray[EthBlock], fromEngine: bool): Result[uint64, string] = - discard ? sk.chain.persistBlocks(headers, body) - ok(headers.len.uint64) + discard ? sk.chain.persistBlocks(blocks) + ok(blocks.len.uint64) proc insertBlock*(sk: SkeletonRef, header: BlockHeader, @@ -209,4 +208,4 @@ proc insertBlock*(sk: SkeletonRef, return err(error) if maybeBody.isNone: return err("insertBlock: Block body not found: " & $header.u64) - sk.insertBlocks([header], [maybeBody.get], fromEngine) + sk.insertBlocks([EthBlock.init(header, maybeBody.get)], fromEngine) diff --git a/nimbus/tracer.nim b/nimbus/tracer.nim index f56c1ada2..7f0e58ce9 100644 --- a/nimbus/tracer.nim +++ b/nimbus/tracer.nim @@ -110,7 +110,8 @@ const internalTxName = "internalTx" proc traceTransaction*(com: CommonRef, header: BlockHeader, - body: BlockBody, txIndex: int, tracerFlags: set[TracerFlags] = {}): JsonNode = + transactions: openArray[Transaction], txIndex: int, + tracerFlags: set[TracerFlags] = {}): JsonNode = let # we add a memory layer between backend/lower layer db # and capture state db snapshot during transaction execution @@ -128,8 +129,8 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader, capture.forget() if header.txRoot == EMPTY_ROOT_HASH: return newJNull() - doAssert(body.transactions.calcTxRoot == header.txRoot) - doAssert(body.transactions.len != 0) + doAssert(transactions.calcTxRoot == header.txRoot) + doAssert(transactions.len != 0) var gasUsed: GasInt @@ -142,7 +143,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader, let miner = vmState.coinbase() - for idx, tx in body.transactions: + for idx, tx in transactions: let sender = tx.getSender let recipient = tx.getRecipient(sender) @@ -191,7 +192,8 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader, if TracerFlags.DisableState notin tracerFlags: result.dumpMemoryDB(capture) -proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpState = false): JsonNode = +proc dumpBlockState*(com: CommonRef, blk: EthBlock, dumpState = false): JsonNode = + template header: BlockHeader = blk.header let parent = com.db.getParentHeader(header) capture = com.db.newCapture.value @@ -213,7 +215,7 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS after = newJArray() stateBefore = LedgerRef.init(capture.recorder, parent.stateRoot) - for idx, tx in body.transactions: + for idx, tx in blk.transactions: let sender = tx.getSender let recipient = tx.getRecipient(sender) before.captureAccount(stateBefore, sender, senderName & $idx) @@ -221,14 +223,14 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS before.captureAccount(stateBefore, miner, minerName) - for idx, uncle in body.uncles: + for idx, uncle in blk.uncles: before.captureAccount(stateBefore, uncle.coinbase, uncleName & $idx) - discard vmState.processBlock(header, body) + discard vmState.processBlock(blk) var stateAfter = vmState.stateDB - for idx, tx in body.transactions: + for idx, tx in blk.transactions: let sender = tx.getSender let recipient = tx.getRecipient(sender) after.captureAccount(stateAfter, sender, senderName & $idx) @@ -238,7 +240,7 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS after.captureAccount(stateAfter, miner, minerName) tracerInst.removeTracedAccounts(miner) - for idx, uncle in body.uncles: + for idx, uncle in blk.uncles: after.captureAccount(stateAfter, uncle.coinbase, uncleName & $idx) tracerInst.removeTracedAccounts(uncle.coinbase) @@ -254,7 +256,8 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS if dumpState: result.dumpMemoryDB(capture) -proc traceBlock*(com: CommonRef, header: BlockHeader, body: BlockBody, tracerFlags: set[TracerFlags] = {}): JsonNode = +proc traceBlock*(com: CommonRef, blk: EthBlock, tracerFlags: set[TracerFlags] = {}): JsonNode = + template header: BlockHeader = blk.header let capture = com.db.newCapture.value captureCom = com.clone(capture.recorder) @@ -269,12 +272,12 @@ proc traceBlock*(com: CommonRef, header: BlockHeader, body: BlockBody, tracerFla capture.forget() if header.txRoot == EMPTY_ROOT_HASH: return newJNull() - doAssert(body.transactions.calcTxRoot == header.txRoot) - doAssert(body.transactions.len != 0) + doAssert(blk.transactions.calcTxRoot == header.txRoot) + doAssert(blk.transactions.len != 0) var gasUsed = GasInt(0) - for tx in body.transactions: + for tx in blk.transactions: let sender = tx.getSender rc = vmState.processTransaction(tx, sender, header) @@ -287,14 +290,14 @@ proc traceBlock*(com: CommonRef, header: BlockHeader, body: BlockBody, tracerFla if TracerFlags.DisableState notin tracerFlags: result.dumpMemoryDB(capture) -proc traceTransactions*(com: CommonRef, header: BlockHeader, blockBody: BlockBody): JsonNode = +proc traceTransactions*(com: CommonRef, header: BlockHeader, transactions: openArray[Transaction]): JsonNode = result = newJArray() - for i in 0 ..< blockBody.transactions.len: - result.add traceTransaction(com, header, blockBody, i, {DisableState}) + for i in 0 ..< transactions.len: + result.add traceTransaction(com, header, transactions, i, {DisableState}) -proc dumpDebuggingMetaData*(vmState: BaseVMState, header: BlockHeader, - blockBody: BlockBody, launchDebugger = true) = +proc dumpDebuggingMetaData*(vmState: BaseVMState, blk: EthBlock, launchDebugger = true) = + template header: BlockHeader = blk.header let com = vmState.com blockNumber = header.blockNumber @@ -312,9 +315,9 @@ proc dumpDebuggingMetaData*(vmState: BaseVMState, header: BlockHeader, var metaData = %{ "blockNumber": %blockNumber.toHex, - "txTraces": traceTransactions(captureCom, header, blockBody), - "stateDump": dumpBlockState(captureCom, header, blockBody), - "blockTrace": traceBlock(captureCom, header, blockBody, {DisableState}), + "txTraces": traceTransactions(captureCom, header, blk.transactions), + "stateDump": dumpBlockState(captureCom, blk), + "blockTrace": traceBlock(captureCom, blk, {DisableState}), "receipts": toJson(vmState.receipts), "block": blockSummary } diff --git a/premix/debug.nim b/premix/debug.nim index 4f56c1d2c..ec0c24c54 100644 --- a/premix/debug.nim +++ b/premix/debug.nim @@ -29,19 +29,17 @@ proc prepareBlockEnv(node: JsonNode, memoryDB: CoreDbRef) = raiseAssert "prepareBlockEnv(): put() (loop) failed " & $$error proc executeBlock(blockEnv: JsonNode, memoryDB: CoreDbRef, blockNumber: UInt256) = - let + var parentNumber = blockNumber - 1 com = CommonRef.new(memoryDB) parent = com.db.getBlockHeader(parentNumber) - header = com.db.getBlockHeader(blockNumber) - body = com.db.getBlockBody(header.blockHash) - + blk = com.db.getEthBlock(blockNumber) let transaction = memoryDB.newTransaction() defer: transaction.dispose() let - vmState = BaseVMState.new(parent, header, com) - validationResult = vmState.processBlock(header, body) + vmState = BaseVMState.new(parent, blk.header, com) + validationResult = vmState.processBlock(blk) if validationResult != ValidationResult.OK: error "block validation error", validationResult @@ -49,7 +47,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: CoreDbRef, blockNumber: UInt256) info "block validation success", validationResult, blockNumber transaction.rollback() - vmState.dumpDebuggingMetaData(header, body, false) + vmState.dumpDebuggingMetaData(blk, false) let fileName = "debug" & $blockNumber & ".json" nimbus = json.parseFile(fileName) @@ -62,7 +60,7 @@ proc executeBlock(blockEnv: JsonNode, memoryDB: CoreDbRef, blockNumber: UInt256) # prestate data goes to debug tool and contains data # needed to execute single block - generatePrestate(nimbus, geth, blockNumber, parent, header, body) + generatePrestate(nimbus, geth, blockNumber, parent, blk) proc main() = if paramCount() == 0: diff --git a/premix/dumper.nim b/premix/dumper.nim index 97989a9e2..6f2d1804a 100644 --- a/premix/dumper.nim +++ b/premix/dumper.nim @@ -33,19 +33,17 @@ proc dumpDebug(com: CommonRef, blockNumber: UInt256) = defer: transaction.dispose() - let + var parentNumber = blockNumber - 1 parent = captureCom.db.getBlockHeader(parentNumber) - header = captureCom.db.getBlockHeader(blockNumber) - headerHash = header.blockHash - body = captureCom.db.getBlockBody(headerHash) - vmState = BaseVMState.new(parent, header, captureCom) + blk = captureCom.db.getEthBlock(blockNumber) + vmState = BaseVMState.new(parent, blk.header, captureCom) discard captureCom.db.setHead(parent, true) - discard vmState.processBlock(header, body) + discard vmState.processBlock(blk) transaction.rollback() - vmState.dumpDebuggingMetaData(header, body, false) + vmState.dumpDebuggingMetaData(blk, false) proc main() {.used.} = let conf = getConfiguration() diff --git a/premix/persist.nim b/premix/persist.nim index 121b171b6..943b7db88 100644 --- a/premix/persist.nim +++ b/premix/persist.nim @@ -78,9 +78,8 @@ proc main() {.used.} = let numBlocksToCommit = conf.numCommits - var headers = newSeqOfCap[BlockHeader](numBlocksToCommit) - var bodies = newSeqOfCap[BlockBody](numBlocksToCommit) - var one = 1.u256 + var blocks = newSeqOfCap[EthBlock](numBlocksToCommit) + var one = 1.u256 var numBlocks = 0 var counter = 0 @@ -99,8 +98,7 @@ proc main() {.used.} = else: raise e - headers.add thisBlock.header - bodies.add thisBlock.body + blocks.add EthBlock.init(thisBlock.header, thisBlock.body) info "REQUEST HEADER", blockNumber=blockNumber, txs=thisBlock.body.transactions.len inc numBlocks @@ -108,12 +106,11 @@ proc main() {.used.} = if numBlocks == numBlocksToCommit: persistToDb(com.db): - let res = chain.persistBlocks(headers, bodies) + let res = chain.persistBlocks(blocks) res.isOkOr: raise newException(ValidationError, "Error when validating blocks: " & res.error) numBlocks = 0 - headers.setLen(0) - bodies.setLen(0) + blocks.setLen(0) inc counter if conf.maxBlocks != 0 and counter >= conf.maxBlocks: @@ -121,7 +118,7 @@ proc main() {.used.} = if numBlocks > 0: persistToDb(com.db): - let res = chain.persistBlocks(headers, bodies) + let res = chain.persistBlocks(blocks) res.isOkOr: raise newException(ValidationError, "Error when validating blocks: " & res.error) diff --git a/premix/premix.nim b/premix/premix.nim index 5e8fd07dc..2fdb78b7c 100644 --- a/premix/premix.nim +++ b/premix/premix.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2020-2023 Status Research & Development GmbH +# Copyright (c) 2020-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -62,7 +62,9 @@ proc main() = # prestate data goes to debug tool and contains data # needed to execute single block - generatePrestate(nimbus, geth, blockNumber, parentBlock.header, thisBlock.header, thisBlock.body) + generatePrestate( + nimbus, geth, blockNumber, parentBlock.header, + EthBlock.init(thisBlock.header, thisBlock.body)) printDebugInstruction(blockNumber) except CatchableError: diff --git a/premix/prestate.nim b/premix/prestate.nim index dfc3d04d3..2cb6e8739 100644 --- a/premix/prestate.nim +++ b/premix/prestate.nim @@ -14,7 +14,8 @@ import ../nimbus/db/[core_db, storage_types], eth/[rlp, common], ../nimbus/tracer -proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: UInt256, parent, header: BlockHeader, body: BlockBody) = +proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: UInt256, parent: BlockHeader, blk: EthBlock) = + template header: BlockHeader = blk.header let state = nimbus["state"] headerHash = rlpHash(header) @@ -22,8 +23,8 @@ proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: UInt256, parent, hea kvt = chainDB.newKvt() discard chainDB.setHead(parent, true) - discard chainDB.persistTransactions(blockNumber, body.transactions) - discard chainDB.persistUncles(body.uncles) + discard chainDB.persistTransactions(blockNumber, blk.transactions) + discard chainDB.persistUncles(blk.uncles) kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: raiseAssert "generatePrestate(): put() failed " & $$error diff --git a/premix/regress.nim b/premix/regress.nim index 2c8d9d020..284db8e29 100644 --- a/premix/regress.nim +++ b/premix/regress.nim @@ -24,12 +24,10 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber = var parentNumber = blockNumber - 1 parent = com.db.getBlockHeader(parentNumber) - headers = newSeq[BlockHeader](numBlocks) - bodies = newSeq[BlockBody](numBlocks) + blocks = newSeq[EthBlock](numBlocks) for i in 0 ..< numBlocks: - headers[i] = com.db.getBlockHeader(blockNumber + i.u256) - bodies[i] = com.db.getBlockBody(headers[i].blockHash) + blocks[i] = com.db.getEthBlock(blockNumber + i.u256) let transaction = com.db.newTransaction() defer: transaction.dispose() @@ -39,13 +37,13 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber = stdout.write "\r" let - vmState = BaseVMState.new(parent, headers[i], com) - validationResult = vmState.processBlock(headers[i], bodies[i]) + vmState = BaseVMState.new(parent, blocks[i].header, com) + validationResult = vmState.processBlock(blocks[i]) if validationResult != ValidationResult.OK: error "block validation error", validationResult, blockNumber = blockNumber + i.u256 - parent = headers[i] + parent = blocks[i].header transaction.rollback() result = blockNumber + numBlocks.u256 diff --git a/tests/persistBlockTestGen.nim b/tests/persistBlockTestGen.nim index fa3340267..f9076414d 100644 --- a/tests/persistBlockTestGen.nim +++ b/tests/persistBlockTestGen.nim @@ -28,15 +28,11 @@ proc dumpTest(com: CommonRef, blockNumber: int) = let parent = captureCom.db.getBlockHeader(parentNumber) - header = captureCom.db.getBlockHeader(blockNumber) - headerHash = header.blockHash - blockBody = captureCom.db.getBlockBody(headerHash) + blk = captureCom.db.getEthBlock(blockNumber) chain = newChain(captureCom) - headers = @[header] - bodies = @[blockBody] discard captureCom.db.setHead(parent, true) - discard chain.persistBlocks(headers, bodies) + discard chain.persistBlocks([blk]) var metaData = %{ "blockNumber": %blockNumber.toHex diff --git a/tests/replay/undump_blocks.nim b/tests/replay/undump_blocks.nim index b34c13afd..822cf3bab 100644 --- a/tests/replay/undump_blocks.nim +++ b/tests/replay/undump_blocks.nim @@ -21,7 +21,7 @@ iterator undumpBlocks*( file: string; least = low(uint64); # First block to extract stopAfter = high(uint64); # Last block to extract - ): (seq[BlockHeader],seq[BlockBody]) = + ): seq[EthBlock] = if file.dirExists: for w in file.undumpBlocksEra1(least, stopAfter): yield w @@ -38,7 +38,7 @@ iterator undumpBlocks*( files: seq[string]; least = low(uint64); # First block to extract stopAfter = high(uint64); # Last block to extract - ): (seq[BlockHeader],seq[BlockBody]) = + ): seq[EthBlock] = for f in files: for w in f.undumpBlocks(least, stopAfter): yield w diff --git a/tests/replay/undump_blocks_era1.nim b/tests/replay/undump_blocks_era1.nim index 886424003..f02dcdf54 100644 --- a/tests/replay/undump_blocks_era1.nim +++ b/tests/replay/undump_blocks_era1.nim @@ -20,7 +20,7 @@ iterator undumpBlocksEra1*( dir: string, least = low(uint64), # First block to extract stopAfter = high(uint64), # Last block to extract -): (seq[BlockHeader], seq[BlockBody]) = +): seq[EthBlock] = let db = Era1DbRef.init(dir, "mainnet").expect("Era files present") defer: db.dispose() @@ -28,25 +28,22 @@ iterator undumpBlocksEra1*( # TODO it would be a lot more natural for this iterator to return 1 block at # a time and let the consumer do the chunking const blocksPerYield = 192 - var tmp = - (newSeqOfCap[BlockHeader](blocksPerYield), newSeqOfCap[BlockBody](blocksPerYield)) + var tmp = newSeqOfCap[EthBlock](blocksPerYield) for i in 0 ..< stopAfter: - var bck = db.getBlockTuple(least + i).valueOr: + var bck = db.getEthBlock(least + i).valueOr: doAssert i > 0, "expected at least one block" break - tmp[0].add move(bck[0]) - tmp[1].add move(bck[1]) + tmp.add move(bck) # Genesis block requires a chunk of its own, for compatibility with current # test setup (a bit weird, that...) - if tmp[0].len mod blocksPerYield == 0 or tmp[0][0].blockNumber == 0: + if tmp.len mod blocksPerYield == 0 or tmp[0].header.blockNumber == 0: yield tmp - tmp[0].setLen(0) - tmp[1].setLen(0) + tmp.setLen(0) - if tmp[0].len > 0: + if tmp.len > 0: yield tmp # ------------------------------------------------------------------------------ diff --git a/tests/replay/undump_blocks_gz.nim b/tests/replay/undump_blocks_gz.nim index e02c82df8..c3f2d50a4 100644 --- a/tests/replay/undump_blocks_gz.nim +++ b/tests/replay/undump_blocks_gz.nim @@ -82,10 +82,9 @@ proc dumpBlocksNl*(db: CoreDbRef; headers: openArray[BlockHeader]; # Public undump # ------------------------------------------------------------------------------ -iterator undumpBlocksGz*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) = +iterator undumpBlocksGz*(gzFile: string): seq[EthBlock] = var - headerQ: seq[BlockHeader] - bodyQ: seq[BlockBody] + blockQ: seq[EthBlock] current = 0u start = 0u top = 0u @@ -109,8 +108,7 @@ iterator undumpBlocksGz*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) = top = start + flds[2].parseUInt current = start waitFor = "" - headerQ.reset - bodyQ.reset + blockQ.reset continue else: echo &"*** Ignoring line({lno}): {line}." @@ -123,8 +121,8 @@ iterator undumpBlocksGz*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) = var rlpHeader = flds[1].rlpFromHex rlpBody = flds[2].rlpFromHex - headerQ.add rlpHeader.read(BlockHeader) - bodyQ.add rlpBody.read(BlockBody) + blockQ.add EthBlock.init( + rlpHeader.read(BlockHeader), rlpBody.read(BlockBody)) current.inc continue else: @@ -135,14 +133,14 @@ iterator undumpBlocksGz*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) = say &"*** commit({lno}) #{start}..{top-1}" else: echo &"*** commit({lno}) error, current({current}) should be {top}" - yield (headerQ, bodyQ) + yield blockQ waitFor = "transaction" continue echo &"*** Ignoring line({lno}): {line}." waitFor = "transaction" -iterator undumpBlocksGz*(gzs: seq[string]): (seq[BlockHeader],seq[BlockBody]) = +iterator undumpBlocksGz*(gzs: seq[string]): seq[EthBlock] = ## Variant of `undumpBlocks()` for f in gzs: for w in f.undumpBlocksGz: @@ -152,14 +150,14 @@ iterator undumpBlocksGz*( gzFile: string; # Data dump file least: uint64; # First block to extract stopAfter = high(uint64); # Last block to extract - ): (seq[BlockHeader],seq[BlockBody]) = + ): seq[EthBlock] = ## Variant of `undumpBlocks()` - for (seqHdr,seqBdy) in gzFile.undumpBlocksGz: - let (h,b) = startAt(seqHdr, seqBdy, least) - if h.len == 0: + for seqBlock in gzFile.undumpBlocksGz: + let b = startAt(seqBlock, least) + if b.len == 0: continue - let w = stopAfter(h, b, stopAfter) - if w[0].len == 0: + let w = stopAfter(b, stopAfter) + if w.len == 0: break yield w diff --git a/tests/replay/undump_helpers.nim b/tests/replay/undump_helpers.nim index 199c7dd36..051753c99 100644 --- a/tests/replay/undump_helpers.nim +++ b/tests/replay/undump_helpers.nim @@ -17,34 +17,32 @@ import # ------------------------------------------------------------------------------ proc startAt*( - h: openArray[BlockHeader]; - b: openArray[BlockBody]; + h: openArray[EthBlock]; start: uint64; - ): (seq[BlockHeader],seq[BlockBody]) = + ): seq[EthBlock] = ## Filter out blocks with smaller `blockNumber` - if start.toBlockNumber <= h[0].blockNumber: - return (h.toSeq,b.toSeq) - if start.toBlockNumber <= h[^1].blockNumber: + if start.toBlockNumber <= h[0].header.blockNumber: + return h.toSeq() + if start.toBlockNumber <= h[^1].header.blockNumber: # There are at least two headers, find the least acceptable one var n = 1 - while h[n].blockNumber < start.toBlockNumber: + while h[n].header.blockNumber < start.toBlockNumber: n.inc - return (h[n ..< h.len], b[n ..< b.len]) + return h[n ..< h.len] proc stopAfter*( - h: openArray[BlockHeader]; - b: openArray[BlockBody]; + h: openArray[EthBlock]; last: uint64; - ): (seq[BlockHeader],seq[BlockBody]) = + ): seq[EthBlock] = ## Filter out blocks with larger `blockNumber` - if h[^1].blockNumber <= last.toBlockNumber: - return (h.toSeq,b.toSeq) - if h[0].blockNumber <= last.toBlockNumber: + if h[^1].header.blockNumber <= last.toBlockNumber: + return h.toSeq() + if h[0].header.blockNumber <= last.toBlockNumber: # There are at least two headers, find the last acceptable one var n = 1 - while h[n].blockNumber <= last.toBlockNumber: + while h[n].header.blockNumber <= last.toBlockNumber: n.inc - return (h[0 ..< n], b[0 ..< n]) + return h[0 ..< n] # ------------------------------------------------------------------------------ # End diff --git a/tests/test_accounts_cache.nim b/tests/test_accounts_cache.nim index abec620cc..dac3c86f5 100644 --- a/tests/test_accounts_cache.nim +++ b/tests/test_accounts_cache.nim @@ -295,8 +295,8 @@ proc runner(noisy = true; capture = goerliCapture) = test &"Import from {fileInfo}": # Import minimum amount of blocks, then collect transactions for chain in filePath.undumpBlocks: - let leadBlkNum = chain[0][0].blockNumber - topNumber = chain[0][^1].blockNumber + let leadBlkNum = chain[0].header.blockNumber + topNumber = chain[^1].header.blockNumber if loadTxs <= txs.len: break @@ -308,16 +308,16 @@ proc runner(noisy = true; capture = goerliCapture) = # Import block chain blocks if leadBlkNum < loadBlocks: - com.importBlocks(chain[0],chain[1]) + com.importBlocks(chain) continue # Import transactions - for inx in 0 ..< chain[0].len: - let blkTxs = chain[1][inx].transactions + for inx in 0 ..< chain.len: + let blkTxs = chain[inx].transactions # Continue importing up until first non-trivial block if txs.len == 0 and blkTxs.len == 0: - com.importBlocks(@[chain[0][inx]],@[chain[1][inx]]) + com.importBlocks([chain[inx]]) continue # Load transactions diff --git a/tests/test_beacon/test_5_canonical_past_genesis.nim b/tests/test_beacon/test_5_canonical_past_genesis.nim index 622e5848e..e2d073511 100644 --- a/tests/test_beacon/test_5_canonical_past_genesis.nim +++ b/tests/test_beacon/test_5_canonical_past_genesis.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2024 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at # https://opensource.org/licenses/MIT). @@ -19,7 +19,7 @@ proc test5*() = suite "should fill the canonical chain after being linked to a canonical block past genesis": let env = setupEnv() let skel = SkeletonRef.new(env.chain) - + test "skel open ok": let res = skel.open() check res.isOk @@ -43,7 +43,8 @@ proc test5*() = check res.isOk test "canonical height should be at block 2": - let r = skel.insertBlocks([block1, block2], [emptyBody, emptyBody], false) + let r = skel.insertBlocks([ + EthBlock.init(block1, emptyBody), EthBlock.init(block2, emptyBody)], false) check r.isOk check r.get == 2 diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 34527efb2..71606b729 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -254,7 +254,7 @@ proc collectDebugData(ctx: var TestCtx) = } proc runTestCtx(ctx: var TestCtx, com: CommonRef, testStatusIMPL: var TestStatus) = - discard com.db.persistHeaderToDb(ctx.genesisHeader, + com.db.persistHeaderToDb(ctx.genesisHeader, com.consensus == ConsensusType.POS) check com.db.getCanonicalHead().blockHash == ctx.genesisHeader.blockHash let checkSeal = ctx.shouldCheckSeal diff --git a/tests/test_coredb/test_chainsync.nim b/tests/test_coredb/test_chainsync.nim index 0767bfcf9..014c80dff 100644 --- a/tests/test_coredb/test_chainsync.nim +++ b/tests/test_coredb/test_chainsync.nim @@ -213,9 +213,9 @@ proc test_chainSync*( sample = done for w in files.undumpBlocks(least = start): - let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber) + let (fromBlock, toBlock) = (w[0].header.blockNumber, w[^1].header.blockNumber) if fromBlock == 0.u256: - xCheck w[0][0] == com.db.getBlockHeader(0.u256) + xCheck w[0].header == com.db.getBlockHeader(0.u256) continue # Process groups of blocks ... @@ -230,10 +230,10 @@ proc test_chainSync*( noisy.whisper "***", &"processing ...[#{fromBlock:>8},#{toBlock:>8}]..." if enaLogging: - noisy.startLogging(w[0][0].blockNumber) + noisy.startLogging(w[0].header.blockNumber) noisy.stopLoggingAfter(): - let runPersistBlocksRc = chain.persistBlocks(w[0], w[1]) + let runPersistBlocksRc = chain.persistBlocks(w) xCheck runPersistBlocksRc.isOk(): if noisy: noisy.whisper "***", "Re-run with logging enabled...\n" @@ -241,8 +241,8 @@ proc test_chainSync*( com.db.trackLegaApi = false com.db.trackNewApi = false com.db.trackLedgerApi = false - discard chain.persistBlocks(w[0], w[1]) - blocks += w[0].len + discard chain.persistBlocks(w) + blocks += w.len continue # Last group or single block @@ -252,31 +252,28 @@ proc test_chainSync*( # and execute them first. Then the next batch starts with the `lastBlock`. let pivot = (lastBlock - fromBlock).truncate(uint) - headers9 = w[0][pivot .. ^1] - bodies9 = w[1][pivot .. ^1] - doAssert lastBlock == headers9[0].blockNumber + blocks9 = w[pivot .. ^1] + doAssert lastBlock == blocks9[0].header.blockNumber # Process leading batch before `lastBlock` (if any) var dotsOrSpace = "..." if fromBlock < lastBlock: let - headers1 = w[0][0 ..< pivot] - bodies1 = w[1][0 ..< pivot] + blocks1 = w[0 ..< pivot] if oldLogAlign: noisy.whisper "***", &"processing ...[#{fromBlock},#{toBlock}]...\n" else: sayPerf noisy.whisper "***", &"processing {dotsOrSpace}[#{fromBlock:>8},#{(lastBlock-1):>8}]" - let runPersistBlocks1Rc = chain.persistBlocks(headers1, bodies1) + let runPersistBlocks1Rc = chain.persistBlocks(blocks1) xCheck runPersistBlocks1Rc.isOk() dotsOrSpace = " " - noisy.startLogging(headers9[0].blockNumber) + noisy.startLogging(blocks9[0].header.blockNumber) if lastOneExtra: let - headers0 = headers9[0..0] - bodies0 = bodies9[0..0] + blocks0 = blocks9[0..0] if oldLogAlign: noisy.whisper "***", &"processing {dotsOrSpace}[#{fromBlock},#{lastBlock-1}]\n" @@ -285,7 +282,7 @@ proc test_chainSync*( noisy.whisper "***", &"processing {dotsOrSpace}[#{lastBlock:>8},#{lastBlock:>8}]" noisy.stopLoggingAfter(): - let runPersistBlocks0Rc = chain.persistBlocks(headers0, bodies0) + let runPersistBlocks0Rc = chain.persistBlocks(blocks0) xCheck runPersistBlocks0Rc.isOk() else: if oldLogAlign: @@ -296,7 +293,7 @@ proc test_chainSync*( noisy.whisper "***", &"processing {dotsOrSpace}[#{lastBlock:>8},#{toBlock:>8}]" noisy.stopLoggingAfter(): - let runPersistBlocks9Rc = chain.persistBlocks(headers9, bodies9) + let runPersistBlocks9Rc = chain.persistBlocks(blocks9) xCheck runPersistBlocks9Rc.isOk() break if not oldLogAlign: diff --git a/tests/test_persistblock_json.nim b/tests/test_persistblock_json.nim index 9d581dcdc..1eba2b040 100644 --- a/tests/test_persistblock_json.nim +++ b/tests/test_persistblock_json.nim @@ -30,16 +30,12 @@ proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus) = let parentNumber = blockNumber - 1 parent = com.db.getBlockHeader(parentNumber) - header = com.db.getBlockHeader(blockNumber) - headerHash = header.blockHash - blockBody = com.db.getBlockBody(headerHash) + blk = com.db.getEthBlock(blockNumber) chain = newChain(com) - headers = @[header] - bodies = @[blockBody] # it's ok if setHead fails here because of missing ancestors discard com.db.setHead(parent, true) - let validationResult = chain.persistBlocks(headers, bodies) + let validationResult = chain.persistBlocks([blk]) check validationResult.isOk() proc persistBlockJsonMain*() = diff --git a/tests/test_rpc.nim b/tests/test_rpc.nim index 85b383b5a..53c30579e 100644 --- a/tests/test_rpc.nim +++ b/tests/test_rpc.nim @@ -192,7 +192,7 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv let uncles = [header] header.ommersHash = com.db.persistUncles(uncles) - discard com.db.persistHeaderToDb(header, + com.db.persistHeaderToDb(header, com.consensus == ConsensusType.POS) com.db.persistFixtureBlock() result = TestEnv( diff --git a/tests/test_rpc_experimental_json.nim b/tests/test_rpc_experimental_json.nim index f8efc1c2a..aa1973480 100644 --- a/tests/test_rpc_experimental_json.nim +++ b/tests/test_rpc_experimental_json.nim @@ -41,19 +41,15 @@ proc importBlockData(node: JsonNode): (CommonRef, Hash256, Hash256, UInt256) {. let parentNumber = blockNumber - 1 parent = com.db.getBlockHeader(parentNumber) - header = com.db.getBlockHeader(blockNumber) - headerHash = header.blockHash - blockBody = com.db.getBlockBody(headerHash) + blk = com.db.getEthBlock(blockNumber) chain = newChain(com) - headers = @[header] - bodies = @[blockBody] # it's ok if setHead fails here because of missing ancestors discard com.db.setHead(parent, true) - let validationResult = chain.persistBlocks(headers, bodies) + let validationResult = chain.persistBlocks([blk]) doAssert validationResult.isOk() - return (com, parent.stateRoot, header.stateRoot, blockNumber) + return (com, parent.stateRoot, blk.header.stateRoot, blockNumber) proc checkAndValidateProofs( db: CoreDbRef, diff --git a/tests/test_tracer_json.nim b/tests/test_tracer_json.nim index 72114d690..e41d0943b 100644 --- a/tests/test_tracer_json.nim +++ b/tests/test_tracer_json.nim @@ -92,13 +92,11 @@ proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: C # Some hack for `Aristo` using the `snap` protocol proof-loader memoryDB.preLoadAristoDb(state, blockNumber) - var header = com.db.getBlockHeader(blockNumber) - var headerHash = header.blockHash - var blockBody = com.db.getBlockBody(headerHash) + var blk = com.db.getEthBlock(blockNumber) - let txTraces = traceTransactions(com, header, blockBody) - let stateDump = dumpBlockState(com, header, blockBody) - let blockTrace = traceBlock(com, header, blockBody, {DisableState}) + let txTraces = traceTransactions(com, blk.header, blk.transactions) + let stateDump = dumpBlockState(com, blk) + let blockTrace = traceBlock(com, blk, {DisableState}) check node["txTraces"] == txTraces check node["stateDump"] == stateDump diff --git a/tests/test_txpool2.nim b/tests/test_txpool2.nim index c0ad50b2d..8690cd959 100644 --- a/tests/test_txpool2.nim +++ b/tests/test_txpool2.nim @@ -172,7 +172,7 @@ proc runTxPoolPosTest() = check blk.txs.len == 1 test "PoS persistBlocks": - let rr = chain.persistBlocks([blk.header], [body]) + let rr = chain.persistBlocks([EthBlock.init(blk.header, body)]) check rr.isOk() test "validate TxPool prevRandao setter": @@ -235,7 +235,7 @@ proc runTxPoolBlobhashTest() = check blk.txs.len == 2 test "Blobhash persistBlocks": - let rr = chain.persistBlocks([blk.header], [body]) + let rr = chain.persistBlocks([EthBlock.init(blk.header, body)]) check rr.isOk() test "validate TxPool prevRandao setter": @@ -317,7 +317,7 @@ proc runTxHeadDelta(noisy = true) = uncles: blk.uncles) # Commit to block chain - check chain.persistBlocks([blk.header], [body]).isOk + check chain.persistBlocks([EthBlock.init(blk.header, body)]).isOk # Synchronise TxPool against new chain head, register txs differences. # In this particular case, these differences will simply flush the diff --git a/tests/tracerTestGen.nim b/tests/tracerTestGen.nim index 1ed2dbc42..3967a7505 100644 --- a/tests/tracerTestGen.nim +++ b/tests/tracerTestGen.nim @@ -25,13 +25,11 @@ proc dumpTest(com: CommonRef, blockNumber: int) = captureCom = com.clone(capture.recorder) let - header = captureCom.db.getBlockHeader(blockNumber) - headerHash = header.blockHash - blockBody = captureCom.db.getBlockBody(headerHash) - txTrace = traceTransactions(captureCom, header, blockBody) - stateDump = dumpBlockState(captureCom, header, blockBody) - blockTrace = traceBlock(captureCom, header, blockBody, {DisableState}) - receipts = dumpReceipts(captureCom.db, header) + blk = captureCom.db.getEthBlock(blockNumber) + txTrace = traceTransactions(captureCom, blk.header, blk.transactions) + stateDump = dumpBlockState(captureCom, blk) + blockTrace = traceBlock(captureCom, blk, {DisableState}) + receipts = dumpReceipts(captureCom.db, blk.header) var metaData = %{ "blockNumber": %blockNumber.toHex, diff --git a/vendor/nim-eth b/vendor/nim-eth index c02e050db..559943590 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit c02e050db8c60010b1e779d81c9d0f033f88d410 +Subproject commit 55994359018e6d3e7d45e8f8d211fa819f3843cf