diff --git a/nimbus/config.nim b/nimbus/config.nim index 12bee380c..eac008c65 100644 --- a/nimbus/config.nim +++ b/nimbus/config.nim @@ -503,6 +503,11 @@ type defaultValue: defaultAdminListenAddress defaultValueDesc: $defaultAdminListenAddressDesc name: "metrics-address" }: ValidIpAddress + + statelessModeDataSourceUrl* {. + desc: "URL of the node to use as a data source for on-demand data fetching via the JSON-RPC API" + defaultValue: "" + name: "stateless-data-source-url" .}: string of `import`: diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index 9594be2be..c3b0de726 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -34,6 +34,24 @@ import func gwei(n: uint64): UInt256 = n.u256 * (10 ^ 9).u256 +# Factored this out of procBlkPreamble so that it can be used directly for +# stateless execution of specific transactions. +proc processTransactions*(vmState: BaseVMState; + header: BlockHeader; + transactions: seq[Transaction]): Result[void, string] + {.gcsafe, raises: [Defect,CatchableError].} = + vmState.receipts = newSeq[Receipt](transactions.len) + vmState.cumulativeGasUsed = 0 + for txIndex, tx in transactions: + var sender: EthAddress + if not tx.getSender(sender): + return err("Could not get sender for tx with index " & $(txIndex) & ": " & $(tx)) + let rc = vmState.processTransaction(tx, sender, header) + if rc.isErr: + return err("Error processing tx with index " & $(txIndex) & ": " & $(tx)) + vmState.receipts[txIndex] = vmState.makeReceipt(tx.txType) + ok() + proc procBlkPreamble(vmState: BaseVMState; header: BlockHeader; body: BlockBody): bool {.gcsafe, raises: [CatchableError].} = @@ -57,18 +75,9 @@ proc procBlkPreamble(vmState: BaseVMState; #trace "Has transactions", # blockNumber = header.blockNumber, # blockHash = header.blockHash - vmState.receipts = newSeq[Receipt](body.transactions.len) - vmState.cumulativeGasUsed = 0 - for txIndex, tx in body.transactions: - var sender: EthAddress - if not tx.getSender(sender): - debug "Could not get sender", - txIndex, tx - return false - let rc = vmState.processTransaction(tx, sender, header) - if rc.isErr: - return false - vmState.receipts[txIndex] = vmState.makeReceipt(tx.txType) + let r = processTransactions(vmState, header, body.transactions) + if r.isErr: + error("error in processing transactions", err=r.error) if vmState.determineFork >= FkShanghai: if header.withdrawalsRoot.isNone: diff --git a/nimbus/core/executor/process_transaction.nim b/nimbus/core/executor/process_transaction.nim index 9be72bdcb..263df070a 100644 --- a/nimbus/core/executor/process_transaction.nim +++ b/nimbus/core/executor/process_transaction.nim @@ -17,8 +17,10 @@ import ../../transaction, ../../vm_state, ../../vm_types, + ../../evm/async/operations, ../validate, chronicles, + chronos, stew/results # ------------------------------------------------------------------------------ @@ -32,30 +34,52 @@ proc eip1559BaseFee(header: BlockHeader; fork: EVMFork): UInt256 = if FkLondon <= fork: result = header.baseFee -proc processTransactionImpl( +proc commitOrRollbackDependingOnGasUsed(vmState: BaseVMState, accTx: SavePoint, gasLimit: GasInt, gasBurned: GasInt, priorityFee: GasInt): Result[GasInt, void] {.raises: [Defect, RlpError].} = + # Make sure that the tx does not exceed the maximum cumulative limit as + # set in the block header. Again, the eip-1559 reference does not mention + # an early stop. It would rather detect differing values for the block + # header `gasUsed` and the `vmState.cumulativeGasUsed` at a later stage. + if gasLimit < vmState.cumulativeGasUsed + gasBurned: + vmState.stateDB.rollback(accTx) + debug "invalid tx: block header gasLimit reached", + maxLimit = gasLimit, + gasUsed = vmState.cumulativeGasUsed, + addition = gasBurned + return err() + else: + # Accept transaction and collect mining fee. + vmState.stateDB.commit(accTx) + vmState.stateDB.addBalance(vmState.coinbase(), gasBurned.u256 * priorityFee.u256) + vmState.cumulativeGasUsed += gasBurned + return ok(gasBurned) + +proc asyncProcessTransactionImpl( vmState: BaseVMState; ## Parent accounts environment for transaction tx: Transaction; ## Transaction to validate sender: EthAddress; ## tx.getSender or tx.ecRecover header: BlockHeader; ## Header for the block containing the current tx - fork: EVMFork): Result[GasInt,void] + fork: EVMFork): Future[Result[GasInt,void]] # wildcard exception, wrapped below - {.gcsafe, raises: [CatchableError].} = + {.async, gcsafe.} = ## Modelled after `https://eips.ethereum.org/EIPS/eip-1559#specification`_ ## which provides a backward compatible framwork for EIP1559. #trace "Sender", sender #trace "txHash", rlpHash = ty.rlpHash - + let roDB = vmState.readOnlyStateDB baseFee256 = header.eip1559BaseFee(fork) baseFee = baseFee256.truncate(GasInt) tx = eip1559TxNormalization(tx, baseFee, fork) priorityFee = min(tx.maxPriorityFee, tx.maxFee - baseFee) - miner = vmState.coinbase() # Return failure unless explicitely set `ok()` - result = err() + var res: Result[GasInt,void] = err() + + await ifNecessaryGetAccounts(vmState, @[sender, vmState.coinbase()]) + if tx.to.isSome: + await ifNecessaryGetCode(vmState, tx.to.get) # Actually, the eip-1559 reference does not mention an early exit. # @@ -70,22 +94,7 @@ proc processTransactionImpl( accTx = vmState.stateDB.beginSavepoint gasBurned = tx.txCallEvm(sender, vmState, fork) - # Make sure that the tx does not exceed the maximum cumulative limit as - # set in the block header. Again, the eip-1559 reference does not mention - # an early stop. It would rather detect differing values for the block - # header `gasUsed` and the `vmState.cumulativeGasUsed` at a later stage. - if header.gasLimit < vmState.cumulativeGasUsed + gasBurned: - vmState.stateDB.rollback(accTx) - debug "invalid tx: block header gasLimit reached", - maxLimit = header.gasLimit, - gasUsed = vmState.cumulativeGasUsed, - addition = gasBurned - else: - # Accept transaction and collect mining fee. - vmState.stateDB.commit(accTx) - vmState.stateDB.addBalance(miner, gasBurned.u256 * priorityFee.u256) - vmState.cumulativeGasUsed += gasBurned - result = ok(gasBurned) + res = commitOrRollbackDependingOnGasUsed(vmState, accTx, header.gasLimit, gasBurned, priorityFee) if vmState.generateWitness: vmState.stateDB.collectWitnessData() @@ -93,10 +102,35 @@ proc processTransactionImpl( clearEmptyAccount = fork >= FkSpurious, clearCache = false) + return res + # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ +proc asyncProcessTransaction*( + vmState: BaseVMState; ## Parent accounts environment for transaction + tx: Transaction; ## Transaction to validate + sender: EthAddress; ## tx.getSender or tx.ecRecover + header: BlockHeader; ## Header for the block containing the current tx + fork: EVMFork): Future[Result[GasInt,void]] + {.async, gcsafe.} = + ## Process the transaction, write the results to accounts db. The function + ## returns the amount of gas burned if executed. + return await vmState.asyncProcessTransactionImpl(tx, sender, header, fork) + +# FIXME-duplicatedForAsync +proc asyncProcessTransaction*( + vmState: BaseVMState; ## Parent accounts environment for transaction + tx: Transaction; ## Transaction to validate + sender: EthAddress; ## tx.getSender or tx.ecRecover + header: BlockHeader): Future[Result[GasInt,void]] + {.async, gcsafe.} = + ## Variant of `asyncProcessTransaction()` with `*fork* derived + ## from the `vmState` argument. + let fork = vmState.com.toEVMFork(header.forkDeterminationInfoForHeader) + return await vmState.asyncProcessTransaction(tx, sender, header, fork) + proc processTransaction*( vmState: BaseVMState; ## Parent accounts environment for transaction tx: Transaction; ## Transaction to validate @@ -104,9 +138,7 @@ proc processTransaction*( header: BlockHeader; ## Header for the block containing the current tx fork: EVMFork): Result[GasInt,void] {.gcsafe, raises: [CatchableError].} = - ## Process the transaction, write the results to accounts db. The function - ## returns the amount of gas burned if executed. - vmState.processTransactionImpl(tx, sender, header, fork) + return waitFor(vmState.asyncProcessTransaction(tx, sender, header, fork)) proc processTransaction*( vmState: BaseVMState; ## Parent accounts environment for transaction @@ -114,10 +146,7 @@ proc processTransaction*( sender: EthAddress; ## tx.getSender or tx.ecRecover header: BlockHeader): Result[GasInt,void] {.gcsafe, raises: [CatchableError].} = - ## Variant of `processTransaction()` with `*fork* derived - ## from the `vmState` argument. - let fork = vmState.com.toEVMFork(header.forkDeterminationInfoForHeader) - vmState.processTransaction(tx, sender, header, fork) + return waitFor(vmState.asyncProcessTransaction(tx, sender, header)) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/core/tx_pool/tx_tasks/tx_classify.nim b/nimbus/core/tx_pool/tx_tasks/tx_classify.nim index adea557dc..be1ad4a2d 100644 --- a/nimbus/core/tx_pool/tx_tasks/tx_classify.nim +++ b/nimbus/core/tx_pool/tx_tasks/tx_classify.nim @@ -223,7 +223,7 @@ proc classifyValidatePacked*(xp: TxPoolRef; vmState: BaseVMState; item: TxItemRef): bool = ## Verify the argument `item` against the accounts database. This function ## is a wrapper around the `verifyTransaction()` call to be used in a similar - ## fashion as in `processTransactionImpl()`. + ## fashion as in `asyncProcessTransactionImpl()`. let roDB = vmState.readOnlyStateDB baseFee = xp.chain.baseFee.uint64.u256 diff --git a/nimbus/db/db_chain.nim b/nimbus/db/db_chain.nim index d6a047591..bbb620d96 100644 --- a/nimbus/db/db_chain.nim +++ b/nimbus/db/db_chain.nim @@ -440,6 +440,13 @@ proc persistHeaderToDbWithoutSetHead*(db: ChainDBRef; header: BlockHeader) = db.db.put(blockHashToScoreKey(headerHash).toOpenArray, rlp.encode(score)) db.db.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)) +# FIXME-Adam: This seems like a bad idea. I don't see a way to get the score +# in stateless mode, but it seems dangerous to just shove the header into +# the DB *without* also storing the score. +proc persistHeaderToDbWithoutSetHeadOrScore*(db: ChainDBRef; header: BlockHeader) = + db.addBlockNumberToHashLookup(header) + db.db.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header)) + proc persistUncles*(db: ChainDBRef, uncles: openArray[BlockHeader]): Hash256 = ## Persists the list of uncles to the database. ## Returns the uncles hash. diff --git a/nimbus/db/distinct_tries.nim b/nimbus/db/distinct_tries.nim index c5e216d7e..aec32ba5f 100644 --- a/nimbus/db/distinct_tries.nim +++ b/nimbus/db/distinct_tries.nim @@ -35,6 +35,9 @@ template initAccountsTrie*(db: DB, isPruning = true): AccountsTrie = proc getAccountBytes*(trie: AccountsTrie, address: EthAddress): seq[byte] = SecureHexaryTrie(trie).get(address) +proc maybeGetAccountBytes*(trie: AccountsTrie, address: EthAddress): Option[seq[byte]] = + SecureHexaryTrie(trie).maybeGet(address) + proc putAccountBytes*(trie: var AccountsTrie, address: EthAddress, value: openArray[byte]) = SecureHexaryTrie(trie).put(address, value) @@ -49,11 +52,31 @@ template initStorageTrie*(db: DB, rootHash: KeccakHash, isPruning = true): Stora template initStorageTrie*(db: DB, isPruning = true): StorageTrie = StorageTrie(initSecureHexaryTrie(db, isPruning)) +template createTrieKeyFromSlot*(slot: UInt256): auto = + # XXX: This is too expensive. Similar to `createRangeFromAddress` + # Converts a number to hex big-endian representation including + # prefix and leading zeros: + slot.toByteArrayBE + # Original py-evm code: + # pad32(int_to_big_endian(slot)) + # morally equivalent to toByteRange_Unnecessary but with different types + proc getSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): seq[byte] = SecureHexaryTrie(trie).get(slotAsKey) +proc maybeGetSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): Option[seq[byte]] = + SecureHexaryTrie(trie).maybeGet(slotAsKey) + proc putSlotBytes*(trie: var StorageTrie, slotAsKey: openArray[byte], value: openArray[byte]) = SecureHexaryTrie(trie).put(slotAsKey, value) proc delSlotBytes*(trie: var StorageTrie, slotAsKey: openArray[byte]) = SecureHexaryTrie(trie).del(slotAsKey) + +proc storageTrieForAccount*(trie: AccountsTrie, account: Account, isPruning = true): StorageTrie = + # TODO: implement `prefix-db` to solve issue #228 permanently. + # the `prefix-db` will automatically insert account address to the + # underlying-db key without disturb how the trie works. + # it will create virtual container for each account. + # see nim-eth#9 + initStorageTrie(SecureHexaryTrie(trie).db, account.storageRoot, isPruning) diff --git a/nimbus/db/incomplete_db.nim b/nimbus/db/incomplete_db.nim new file mode 100644 index 000000000..24a6bfe3a --- /dev/null +++ b/nimbus/db/incomplete_db.nim @@ -0,0 +1,94 @@ +#[ +FIXME-Adam: I feel like this and distinct_tries should either be combined or more clearly separated. + +The points of these two files are: + - Have distinct types for the two kinds of tries, because we really don't want to mix them up. + - Have an interface with functions like getAccountBytes rather than just get. (But still just a super-thin wrapper.) + - Have maybeGetWhatever instead of just getWhatever. (Also assertions.) + - Notice that this makes sense at both the bytes level and the Account/UInt256 level. + +]# + +import + chronicles, + eth/[common, rlp], + eth/trie/[hexary, db, trie_defs], + storage_types, + ./values_from_bytes, + ./distinct_tries + + + +# Useful for debugging. +const shouldDoAssertionsForMissingNodes* = false + +proc ifNodesExistGetAccountBytes*(trie: AccountsTrie, address: EthAddress): Option[seq[byte]] = + trie.maybeGetAccountBytes(address) + +proc ifNodesExistGetStorageBytesWithinAccount*(storageTrie: StorageTrie, slotAsKey: openArray[byte]): Option[seq[byte]] = + storageTrie.maybeGetSlotBytes(slotAsKey) + + +proc populateDbWithNodes*(db: TrieDatabaseRef, nodes: seq[seq[byte]]) = + error("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG AARDVARK: populateDbWithNodes received nodes, about to populate", nodes) # AARDVARK not an error, I just want it to stand out + for nodeBytes in nodes: + let nodeHash = keccakHash(nodeBytes) + info("AARDVARK: populateDbWithNodes about to add node", nodeHash, nodeBytes) + db.put(nodeHash.data, nodeBytes) + +# AARDVARK: just make the callers call populateDbWithNodes directly? +proc populateDbWithBranch*(db: TrieDatabaseRef, branch: seq[seq[byte]]) = + for nodeBytes in branch: + let nodeHash = keccakHash(nodeBytes) + db.put(nodeHash.data, nodeBytes) + +# Returns a none if there are missing nodes; if the account itself simply +# doesn't exist yet, that's fine and it returns some(newAccount()). +proc ifNodesExistGetAccount*(trie: AccountsTrie, address: EthAddress): Option[Account] = + ifNodesExistGetAccountBytes(trie, address).map(accountFromBytes) + +proc maybeGetCode*(db: TrieDatabaseRef, codeHash: Hash256): Option[seq[byte]] = + when defined(geth): + return db.maybeGet(codeHash.data) + else: + return db.maybeGet(contractHashKey(codeHash).toOpenArray) + +proc maybeGetCode*(trie: AccountsTrie, address: EthAddress): Option[seq[byte]] = + let maybeAcc = trie.ifNodesExistGetAccount(address) + if maybeAcc.isNone: + none[seq[byte]]() + else: + maybeGetCode(SecureHexaryTrie(trie).db, maybeAcc.get.codeHash) + +proc checkingForMissingNodes_getCode*(trie: AccountsTrie, address: EthAddress): seq[byte] = + let m = maybeGetCode(trie, address) + doAssert(m.isSome, "missing code for account at " & $(address)) + m.get + +proc assertFetchedCode*(trie: AccountsTrie, address: EthAddress) = + if shouldDoAssertionsForMissingNodes: + let m = maybeGetCode(trie, address) + doAssert(m.isSome, "missing code for account at " & $(address)) + + +proc ifNodesExistGetStorageWithinAccount*(storageTrie: StorageTrie, slot: UInt256): Option[UInt256] = + ifNodesExistGetStorageBytesWithinAccount(storageTrie, createTrieKeyFromSlot(slot)).map(slotValueFromBytes) + +proc ifNodesExistGetStorage*(trie: AccountsTrie, address: EthAddress, slot: UInt256): Option[UInt256] = + let maybeAcc = ifNodesExistGetAccount(trie, address) + if maybeAcc.isNone: + none[UInt256]() + else: + ifNodesExistGetStorageWithinAccount(storageTrieForAccount(trie, maybeAcc.get), slot) + +proc hasAllNodesForAccount*(trie: AccountsTrie, address: EthAddress): bool = + ifNodesExistGetAccountBytes(trie, address).isSome + +proc hasAllNodesForCode*(trie: AccountsTrie, address: EthAddress): bool = + maybeGetCode(trie, address).isSome + +proc hasAllNodesForStorageSlot*(trie: AccountsTrie, address: EthAddress, slot: UInt256): bool = + ifNodesExistGetStorage(trie, address, slot).isSome + +proc assertFetchedStorage*(trie: AccountsTrie, address: EthAddress, slot: UInt256) = + doAssert(hasAllNodesForStorageSlot(trie, address, slot)) diff --git a/nimbus/db/state_db.nim b/nimbus/db/state_db.nim index 6aa35b325..4596c8dc1 100644 --- a/nimbus/db/state_db.nim +++ b/nimbus/db/state_db.nim @@ -126,12 +126,7 @@ template createTrieKeyFromSlot(slot: UInt256): auto = # morally equivalent to toByteRange_Unnecessary but with different types template getStorageTrie(db: AccountStateDB, account: Account): auto = - # TODO: implement `prefix-db` to solve issue #228 permanently. - # the `prefix-db` will automatically insert account address to the - # underlying-db key without disturb how the trie works. - # it will create virtual container for each account. - # see nim-eth#9 - initStorageTrie(trieDB(db), account.storageRoot, false) + storageTrieForAccount(db.trie, account, false) proc clearStorage*(db: AccountStateDB, address: EthAddress) = var account = db.getAccount(address) diff --git a/nimbus/db/values_from_bytes.nim b/nimbus/db/values_from_bytes.nim new file mode 100644 index 000000000..476ae06cf --- /dev/null +++ b/nimbus/db/values_from_bytes.nim @@ -0,0 +1,18 @@ +# This code was duplicated enough times around the codebase +# that it seemed worth factoring it out. + +import + stint, + eth/[common, rlp] + +proc accountFromBytes*(accountBytes: seq[byte]): Account = + if accountBytes.len > 0: + rlp.decode(accountBytes, Account) + else: + newAccount() + +proc slotValueFromBytes*(rec: seq[byte]): UInt256 = + if rec.len > 0: + rlp.decode(rec, UInt256) + else: + UInt256.zero() diff --git a/nimbus/evm/async/data_sources/json_rpc_data_source.nim b/nimbus/evm/async/data_sources/json_rpc_data_source.nim index 466152282..317d06127 100644 --- a/nimbus/evm/async/data_sources/json_rpc_data_source.nim +++ b/nimbus/evm/async/data_sources/json_rpc_data_source.nim @@ -1,5 +1,6 @@ import std/[sequtils, typetraits, options], + times, chronicles, chronos, nimcrypto, @@ -15,14 +16,263 @@ import ../../../sync/protocol, ../../../sync/protocol/eth66 as proto_eth66, ../../../db/[db_chain, distinct_tries, incomplete_db, storage_types], - ../data_fetching, - ../data_sources, - ../rpc_api + ../data_sources + +from ../../../sync/protocol/eth66 import getNodeData + +from ../../../rpc/rpc_utils import toHash +from web3 import Web3, BlockHash, BlockObject, FixedBytes, Address, ProofResponse, StorageProof, newWeb3, fromJson, fromHex, eth_getBlockByHash, eth_getBlockByNumber, eth_getCode, eth_getProof, blockId, `%` +#from ../../../premix/downloader import request +#from ../../../premix/parser import prefixHex, parseBlockHeader, parseReceipt, parseTransaction + +from eth/common import BlockHeader + +# Trying to do things the new web3 way: +from ../../../nimbus_verified_proxy/validate_proof import getAccountFromProof export AsyncOperationFactory, AsyncDataSource +var durationSpentDoingFetches*: times.Duration +var fetchCounter*: int + + +func toHash*(s: string): Hash256 {.raises: [Defect, ValueError].} = + hexToPaddedByteArray[32](s).toHash + +func toHash*(h: BlockHash): Hash256 {.raises: [Defect, ValueError].} = + distinctBase(h).toHash + +func toWeb3BlockHash*(h: Hash256): BlockHash = + BlockHash(h.data) + +func web3AddressToEthAddress(a: web3.Address): EthAddress = + distinctBase(a) + + +proc makeAnRpcClient*(web3Url: string): Future[RpcClient] {.async.} = + let myWeb3: Web3 = waitFor(newWeb3(web3Url)) + return myWeb3.provider + + +#[ + BlockObject* = ref object + number*: Quantity # the block number. null when its pending block. + hash*: Hash256 # hash of the block. null when its pending block. + parentHash*: Hash256 # hash of the parent block. + sha3Uncles*: Hash256 # SHA3 of the uncles data in the block. + logsBloom*: FixedBytes[256] # the bloom filter for the logs of the block. null when its pending block. + transactionsRoot*: Hash256 # the root of the transaction trie of the block. + stateRoot*: Hash256 # the root of the final state trie of the block. + receiptsRoot*: Hash256 # the root of the receipts trie of the block. + miner*: Address # the address of the beneficiary to whom the mining rewards were given. + difficulty*: UInt256 # integer of the difficulty for this block. + extraData*: DynamicBytes[0, 32] # the "extra data" field of this block. + gasLimit*: Quantity # the maximum gas allowed in this block. + gasUsed*: Quantity # the total used gas by all transactions in this block. + timestamp*: Quantity # the unix timestamp for when the block was collated. + nonce*: Option[FixedBytes[8]] # hash of the generated proof-of-work. null when its pending block. + size*: Quantity # integer the size of this block in bytes. + totalDifficulty*: UInt256 # integer of the total difficulty of the chain until this block. + transactions*: seq[TxHash] # list of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter. + uncles*: seq[Hash256] # list of uncle hashes. + baseFeePerGas*: Option[UInt256] # EIP-1559 + withdrawalsRoot*: Option[Hash256] # EIP-4895 + excessDataGas*: Option[UInt256] # EIP-4844 +]# + +func blockHeaderFromBlockObject(o: BlockObject): BlockHeader = + let nonce: BlockNonce = if o.nonce.isSome: distinctBase(o.nonce.get) else: default(BlockNonce) + BlockHeader( + parentHash: o.parentHash.toHash, + ommersHash: o.sha3Uncles.toHash, + coinbase: o.miner.web3AddressToEthAddress, + stateRoot: o.stateRoot.toHash, + txRoot: o.transactionsRoot.toHash, + receiptRoot: o.receiptsRoot.toHash, + bloom: distinctBase(o.logsBloom), + difficulty: o.difficulty, + blockNumber: distinctBase(o.number).u256, + gasLimit: int64(distinctBase(o.gasLimit)), + gasUsed: int64(distinctBase(o.gasUsed)), + timestamp: initTime(int64(distinctBase(o.timestamp)), 0), + extraData: distinctBase(o.extraData), + #mixDigest: o.mixHash.toHash, # AARDVARK what's this? + nonce: nonce, + fee: o.baseFeePerGas, + withdrawalsRoot: o.withdrawalsRoot.map(toHash), + excessDataGas: o.excessDataGas + ) + +proc fetchBlockHeaderWithHash*(rpcClient: RpcClient, h: Hash256): Future[BlockHeader] {.async.} = + let t0 = now() + let blockObject: BlockObject = await rpcClient.eth_getBlockByHash(h.toWeb3BlockHash, false) + durationSpentDoingFetches += now() - t0 + fetchCounter += 1 + return blockHeaderFromBlockObject(blockObject) + +proc fetchBlockHeaderWithNumber*(rpcClient: RpcClient, n: BlockNumber): Future[BlockHeader] {.async.} = + let t0 = now() + let bid = blockId(n.truncate(uint64)) + let blockObject: BlockObject = await rpcClient.eth_getBlockByNumber(bid, false) + durationSpentDoingFetches += now() - t0 + fetchCounter += 1 + return blockHeaderFromBlockObject(blockObject) + +#[ +proc parseBlockBodyAndFetchUncles(rpcClient: RpcClient, r: JsonNode): Future[BlockBody] {.async.} = + var body: BlockBody + for tn in r["transactions"].getElems: + body.transactions.add(parseTransaction(tn)) + for un in r["uncles"].getElems: + let uncleHash: Hash256 = un.getStr.toHash + let uncleHeader = await fetchBlockHeaderWithHash(rpcClient, uncleHash) + body.uncles.add(uncleHeader) + return body + +proc fetchBlockHeaderAndBodyWithHash*(rpcClient: RpcClient, h: Hash256): Future[(BlockHeader, BlockBody)] {.async.} = + let t0 = now() + let r = request("eth_getBlockByHash", %[%h.prefixHex, %true], some(rpcClient)) + durationSpentDoingFetches += now() - t0 + fetchCounter += 1 + if r.kind == JNull: + error "requested block not available", blockHash=h + raise newException(ValueError, "Error when retrieving block header and body") + let header = parseBlockHeader(r) + let body = await parseBlockBodyAndFetchUncles(rpcClient, r) + return (header, body) + +proc fetchBlockHeaderAndBodyWithNumber*(rpcClient: RpcClient, n: BlockNumber): Future[(BlockHeader, BlockBody)] {.async.} = + let t0 = now() + let r = request("eth_getBlockByNumber", %[%n.prefixHex, %true], some(rpcClient)) + durationSpentDoingFetches += now() - t0 + fetchCounter += 1 + if r.kind == JNull: + error "requested block not available", blockNumber=n + raise newException(ValueError, "Error when retrieving block header and body") + let header = parseBlockHeader(r) + let body = await parseBlockBodyAndFetchUncles(rpcClient, r) + return (header, body) +]# + +proc fetchBlockHeaderAndBodyWithHash*(rpcClient: RpcClient, h: Hash256): Future[(BlockHeader, BlockBody)] {.async.} = + doAssert(false, "AARDVARK not implemented") + +proc fetchBlockHeaderAndBodyWithNumber*(rpcClient: RpcClient, n: BlockNumber): Future[(BlockHeader, BlockBody)] {.async.} = + doAssert(false, "AARDVARK not implemented") + +func mdigestFromFixedBytes*(arg: FixedBytes[32]): MDigest[256] = + MDigest[256](data: distinctBase(arg)) + +func mdigestFromString*(s: string): MDigest[256] = + mdigestFromFixedBytes(FixedBytes[32].fromHex(s)) + +type + AccountProof* = seq[seq[byte]] + +proc fetchAccountAndSlots*(rpcClient: RpcClient, address: EthAddress, slots: seq[UInt256], blockNumber: BlockNumber): Future[(Account, AccountProof, seq[StorageProof])] {.async.} = + let t0 = now() + debug "Got to fetchAccountAndSlots", address=address, slots=slots, blockNumber=blockNumber + #let blockNumberHexStr: HexQuantityStr = encodeQuantity(blockNumber) + let blockNumberUint64 = blockNumber.truncate(uint64) + let a = web3.Address(address) + let bid = blockId(blockNumber.truncate(uint64)) + debug "About to call eth_getProof", address=address, slots=slots, blockNumber=blockNumber + let proofResponse: ProofResponse = await rpcClient.eth_getProof(a, slots, bid) + debug "Received response to eth_getProof", proofResponse=proofResponse + + let acc = Account( + nonce: distinctBase(proofResponse.nonce), + balance: proofResponse.balance, + storageRoot: mdigestFromFixedBytes(proofResponse.storageHash), + codeHash: mdigestFromFixedBytes(proofResponse.codeHash) + ) + debug "Parsed response to eth_getProof", acc=acc + let mptNodesBytes: seq[seq[byte]] = proofResponse.accountProof.mapIt(distinctBase(it)) + durationSpentDoingFetches += now() - t0 + fetchCounter += 1 + return (acc, mptNodesBytes, proofResponse.storageProof) + +proc fetchCode*(client: RpcClient, blockNumber: BlockNumber, address: EthAddress): Future[seq[byte]] {.async.} = + let t0 = now() + let a = web3.Address(address) + let bid = blockId(blockNumber.truncate(uint64)) + let fetchedCode: seq[byte] = await client.eth_getCode(a, bid) + durationSpentDoingFetches += now() - t0 + fetchCounter += 1 + return fetchedCode + + + + + + +const bytesLimit = 2 * 1024 * 1024 +const maxNumberOfPeersToAttempt = 3 + +proc fetchUsingGetTrieNodes(peer: Peer, stateRoot: Hash256, paths: seq[SnapTriePaths]): Future[seq[seq[byte]]] {.async.} = + let r = await peer.getTrieNodes(stateRoot, paths, bytesLimit) + if r.isNone: + raise newException(CatchableError, "AARDVARK: received None in GetTrieNodes response") + else: + return r.get.nodes + +proc fetchUsingGetNodeData(peer: Peer, nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.async.} = + #[ + let r: Option[seq[seq[byte]]] = none[seq[seq[byte]]]() # AARDVARK await peer.getNodeData(nodeHashes) + if r.isNone: + raise newException(CatchableError, "AARDVARK: received None in GetNodeData response") + else: + echo "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA fetchUsingGetNodeData received nodes: " & $(r.get.data) + return r.get.data + ]# + # AARDVARK whatever + return @[] + +proc findPeersAndMakeSomeCalls[R](peerPool: PeerPool, protocolName: string, protocolType: typedesc, initiateAttempt: (proc(p: Peer): Future[R] {.gcsafe.})): Future[seq[Future[R]]] {.async.} = + var attempts: seq[Future[R]] + while true: + #info("AARDVARK: findPeersAndMakeSomeCalls about to loop through the peer pool", count=peerPool.connectedNodes.len) + for nodeOfSomeSort, peer in peerPool.connectedNodes: + if peer.supports(protocolType): + info("AARDVARK: findPeersAndMakeSomeCalls calling peer", protocolName, peer) + attempts.add(initiateAttempt(peer)) + if attempts.len >= maxNumberOfPeersToAttempt: + break + #else: + # info("AARDVARK: peer does not support protocol", protocolName, peer) + if attempts.len == 0: + warn("AARDVARK: findPeersAndMakeSomeCalls did not find any peers; waiting and trying again", protocolName, totalPeerPoolSize=peerPool.connectedNodes.len) + await sleepAsync(5000) + else: + if attempts.len < maxNumberOfPeersToAttempt: + warn("AARDVARK: findPeersAndMakeSomeCalls did not find enough peers, but found some", protocolName, totalPeerPoolSize=peerPool.connectedNodes.len, found=attempts.len) + break + return attempts + +proc findPeersAndMakeSomeAttemptsToCallGetTrieNodes(peerPool: PeerPool, stateRoot: Hash256, paths: seq[SnapTriePaths]): Future[seq[Future[seq[seq[byte]]]]] = + findPeersAndMakeSomeCalls(peerPool, "snap", protocol.snap, (proc(peer: Peer): Future[seq[seq[byte]]] = fetchUsingGetTrieNodes(peer, stateRoot, paths))) + +#[ +proc findPeersAndMakeSomeAttemptsToCallGetNodeData(peerPool: PeerPool, stateRoot: Hash256, nodeHashes: seq[Hash256]): Future[seq[Future[seq[seq[byte]]]]] = + findPeersAndMakeSomeCalls(peerPool, "eth66", eth66, (proc(peer: Peer): Future[seq[seq[byte]]] = fetchUsingGetNodeData(peer, nodeHashes))) +]# + +proc fetchNodes(peerPool: PeerPool, stateRoot: Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.async.} = + let attempts = await findPeersAndMakeSomeAttemptsToCallGetTrieNodes(peerPool, stateRoot, paths) + #let attempts = await findPeersAndMakeSomeAttemptsToCallGetNodeData(peerPool, stateRoot, nodeHashes) + let completedAttempt = await one(attempts) + let nodes: seq[seq[byte]] = completedAttempt.read + info("AARDVARK: fetchNodes received nodes", nodes) + return nodes + + + + + + + proc verifyFetchedAccount(stateRoot: Hash256, address: EthAddress, acc: Account, accProof: seq[seq[byte]]): Result[void, string] = let accKey = toSeq(keccakHash(address).data) let accEncoded = rlp.encode(acc) @@ -61,6 +311,15 @@ proc fetchAndVerifyCode(client: RpcClient, p: CodeFetchingInfo, desiredCodeHash: error("code hash values do not match", p=p, desiredCodeHash=desiredCodeHash, fetchedCodeHash=fetchedCodeHash) raise newException(CatchableError, "async code received code for " & $(p.address) & " whose hash (" & $(fetchedCodeHash) & ") does not match the desired hash (" & $(desiredCodeHash) & ")") +proc putCode*(db: TrieDatabaseRef, codeHash: Hash256, code: seq[byte]) = + when defined(geth): + db.put(codeHash.data, code) + else: + db.put(contractHashKey(codeHash).toOpenArray, code) + +proc putCode*(trie: AccountsTrie, codeHash: Hash256, code: seq[byte]) = + putCode(distinctBase(trie).db, codeHash, code) + proc storeCode(trie: AccountsTrie, p: CodeFetchingInfo, desiredCodeHash: Hash256, fetchedCode: seq[byte]) = trie.putCode(desiredCodeHash, fetchedCode) @@ -130,7 +389,7 @@ proc assertThatWeHaveStoredBlockHeader(chainDB: ChainDBRef, blockNumber: BlockNu let h = chainDB.getBlockHash(blockNumber) doAssert(h == header.blockHash, "stored the block header for block " & $(blockNumber)) -template raiseExceptionIfError[E](whatAreWeVerifying: untyped, r: Result[void, E]) = +proc raiseExceptionIfError[V, E](whatAreWeVerifying: V, r: Result[void, E]) = if r.isErr: error("async code failed to verify", whatAreWeVerifying=whatAreWeVerifying, err=r.error) raise newException(CatchableError, "async code failed to verify: " & $(whatAreWeVerifying) & ", error is: " & $(r.error)) @@ -170,9 +429,9 @@ proc ifNecessaryGetAccountAndSlots*(client: RpcClient, db: TrieDatabaseRef, bloc doAssert(slotsToActuallyFetch.len == storageProofs.len, "We should get back the same number of storage proofs as slots that we asked for. I think.") for storageProof in storageProofs: - let slot = UInt256.fromHex(string(storageProof.key)) - let fetchedVal = UInt256.fromHex(string(storageProof.value)) - let storageMptNodes = storageProof.proof.mapIt(hexToSeqByte(string(it))) + let slot: UInt256 = storageProof.key + let fetchedVal: UInt256 = storageProof.value + let storageMptNodes: seq[seq[byte]] = storageProof.proof.mapIt(distinctBase(it)) let storageVerificationRes = verifyFetchedSlot(acc.storageRoot, slot, fetchedVal, storageMptNodes) let whatAreWeVerifying = ("storage proof", address, acc, slot, fetchedVal) raiseExceptionIfError(whatAreWeVerifying, storageVerificationRes) diff --git a/nimbus/evm/interpreter/op_dispatcher.nim b/nimbus/evm/interpreter/op_dispatcher.nim index a33fde8ed..5322ea102 100644 --- a/nimbus/evm/interpreter/op_dispatcher.nim +++ b/nimbus/evm/interpreter/op_dispatcher.nim @@ -50,7 +50,8 @@ template handleFixedGasCostsDirective(fork: EVMFork; op: Op; k: var Vm2Ctx) = k.cpt.gasMeter.consumeGas(k.cpt.gasCosts[op].cost, reason = $op) vmOpHandlers[fork][op].run(k) - if k.cpt.tracingEnabled: + # If continuation is not nil, traceOpCodeEnded will be called in executeOpcodes. + if k.cpt.tracingEnabled and k.cpt.continuation.isNil: k.cpt.traceOpCodeEnded(op, k.cpt.opIndex) @@ -60,7 +61,8 @@ template handleOtherDirective(fork: EVMFork; op: Op; k: var Vm2Ctx) = vmOpHandlers[fork][op].run(k) - if k.cpt.tracingEnabled: + # If continuation is not nil, traceOpCodeEnded will be called in executeOpcodes. + if k.cpt.tracingEnabled and k.cpt.continuation.isNil: k.cpt.traceOpCodeEnded(op, k.cpt.opIndex) # ------------------------------------------------------------------------------ @@ -98,18 +100,28 @@ proc toCaseStmt(forkArg, opArg, k: NimNode): NimNode = # Wrap innner case/switch into outer case/switch let branchStmt = block: case op - of Create, Create2, Call, CallCode, DelegateCall, StaticCall, Sload: - quote do: - `forkCaseSubExpr` - if not `k`.cpt.continuation.isNil: - break of Stop, Return, Revert, SelfDestruct: quote do: `forkCaseSubExpr` break else: + # FIXME-manyOpcodesNowRequireContinuations + # We used to have another clause in this case statement for various + # opcodes that *don't* need to check for a continuation. But now + # there are many opcodes that need to, because they call asyncChainTo + # (and so they set a pendingAsyncOperation and a continuation that + # needs to be noticed by the interpreter_dispatch loop). And that + # will become even more true once we implement speculative execution, + # because that will mean that even reading from the stack might + # require waiting. + # + # Anyway, the point is that now we might as well just do this check + # for *every* opcode (other than Return/Revert/etc, which need to + # break no matter what). quote do: `forkCaseSubExpr` + if not `k`.cpt.continuation.isNil: + break result.add nnkOfBranch.newTree(asOp, branchStmt) @@ -135,13 +147,12 @@ template genLowMemDispatcher*(fork: EVMFork; op: Op; k: Vm2Ctx) = handleOtherDirective(fork, op, k) case c.instr - of Create, Create2, Call, CallCode, DelegateCall, StaticCall, Sload: - if not k.cpt.continuation.isNil: - break of Return, Revert, SelfDestruct: break else: - discard + # FIXME-manyOpcodesNowRequireContinuations + if not k.cpt.continuation.isNil: + break # ------------------------------------------------------------------------------ # Debugging ... diff --git a/nimbus/evm/interpreter/op_handlers/oph_blockdata.nim b/nimbus/evm/interpreter/op_handlers/oph_blockdata.nim index 879d35f63..b0d0f750f 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_blockdata.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_blockdata.nim @@ -16,6 +16,7 @@ import eth/common, ../../computation, ../../stack, + ../../async/operations, ../op_codes, ./oph_defs @@ -31,9 +32,11 @@ when not defined(evmc_enabled): const blockhashOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x40, Get the hash of one of the 256 most recent complete blocks. - let (blockNumber) = k.cpt.stack.popInt(1) - k.cpt.stack.push: - k.cpt.getBlockHash(blockNumber) + let cpt = k.cpt + let (blockNumber) = cpt.stack.popInt(1) + cpt.asyncChainTo(ifNecessaryGetBlockHeaderByNumber(cpt.vmState, blockNumber)): + cpt.stack.push: + cpt.getBlockHash(blockNumber) coinBaseOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x41, Get the block's beneficiary address. @@ -67,8 +70,10 @@ const selfBalanceOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x47, Get current contract's balance. - k.cpt.stack.push: - k.cpt.getBalance(k.cpt.msg.contractAddress) + let cpt = k.cpt + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, cpt.msg.contractAddress)): + cpt.stack.push: + cpt.getBalance(cpt.msg.contractAddress) baseFeeOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x48, Get the block's base fee. diff --git a/nimbus/evm/interpreter/op_handlers/oph_call.nim b/nimbus/evm/interpreter/op_handlers/oph_call.nim index cf8746068..918e40806 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_call.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_call.nim @@ -22,6 +22,7 @@ import ../../memory, ../../stack, ../../types, + ../../async/operations, ../gas_costs, ../gas_meter, ../op_codes, @@ -196,171 +197,176 @@ else: const callOp: Vm2OpFn = proc(k: var Vm2Ctx) = ## 0xf1, Message-Call into an account + let cpt = k.cpt - if emvcStatic == k.cpt.msg.flags and k.cpt.stack[^3, UInt256] > 0.u256: + if emvcStatic == cpt.msg.flags and cpt.stack[^3, UInt256] > 0.u256: raise newException( StaticContextError, "Cannot modify state while inside of a STATICCALL context") - let - p = k.cpt.callParams + + let p = cpt.callParams + cpt.asyncChainTo(ifNecessaryGetAccounts(cpt.vmState, @[p.sender])): + cpt.asyncChainTo(ifNecessaryGetCodeForAccounts(cpt.vmState, @[p.contractAddress, p.codeAddress])): + var (gasCost, childGasLimit) = cpt.gasCosts[Call].c_handler( + p.value, + GasParams( + kind: Call, + c_isNewAccount: not cpt.accountExists(p.contractAddress), + c_gasBalance: cpt.gasMeter.gasRemaining, + c_contractGas: p.gas, + c_currentMemSize: cpt.memory.len, + c_memOffset: p.memOffset, + c_memLength: p.memLength)) - var (gasCost, childGasLimit) = k.cpt.gasCosts[Call].c_handler( - p.value, - GasParams( - kind: Call, - c_isNewAccount: not k.cpt.accountExists(p.contractAddress), - c_gasBalance: k.cpt.gasMeter.gasRemaining, - c_contractGas: p.gas, - c_currentMemSize: k.cpt.memory.len, - c_memOffset: p.memOffset, - c_memLength: p.memLength)) + # EIP 2046: temporary disabled + # reduce gas fee for precompiles + # from 700 to 40 + if gasCost >= 0: + cpt.gasMeter.consumeGas(gasCost, reason = $Call) - # EIP 2046: temporary disabled - # reduce gas fee for precompiles - # from 700 to 40 - if gasCost >= 0: - k.cpt.gasMeter.consumeGas(gasCost, reason = $Call) + cpt.returnData.setLen(0) - k.cpt.returnData.setLen(0) + if cpt.msg.depth >= MaxCallDepth: + debug "Computation Failure", + reason = "Stack too deep", + maximumDepth = MaxCallDepth, + depth = cpt.msg.depth + cpt.gasMeter.returnGas(childGasLimit) + return - if k.cpt.msg.depth >= MaxCallDepth: - debug "Computation Failure", - reason = "Stack too deep", - maximumDepth = MaxCallDepth, - depth = k.cpt.msg.depth - k.cpt.gasMeter.returnGas(childGasLimit) - return + if gasCost < 0 and childGasLimit <= 0: + raise newException( + OutOfGas, "Gas not enough to perform calculation (call)") - if gasCost < 0 and childGasLimit <= 0: - raise newException( - OutOfGas, "Gas not enough to perform calculation (call)") + cpt.memory.extend(p.memInPos, p.memInLen) + cpt.memory.extend(p.memOutPos, p.memOutLen) - k.cpt.memory.extend(p.memInPos, p.memInLen) - k.cpt.memory.extend(p.memOutPos, p.memOutLen) + let senderBalance = cpt.getBalance(p.sender) + if senderBalance < p.value: + #debug "Insufficient funds", + # available = senderBalance, + # needed = cpt.msg.value + cpt.gasMeter.returnGas(childGasLimit) + return - let senderBalance = k.cpt.getBalance(p.sender) - if senderBalance < p.value: - #debug "Insufficient funds", - # available = senderBalance, - # needed = k.cpt.msg.value - k.cpt.gasMeter.returnGas(childGasLimit) - return - - when evmc_enabled: - let - msg = new(nimbus_message) - c = k.cpt - msg[] = nimbus_message( - kind : evmcCall.ord.evmc_call_kind, - depth : (k.cpt.msg.depth + 1).int32, - gas : childGasLimit, - sender : p.sender, - recipient : p.contractAddress, - code_address: p.codeAddress, - input_data : k.cpt.memory.readPtr(p.memInPos), - input_size : p.memInLen.uint, - value : toEvmc(p.value), - flags : p.flags.uint32 - ) - c.execSubCall(msg, p) - else: - k.cpt.execSubCall( - memPos = p.memOutPos, - memLen = p.memOutLen, - childMsg = Message( - kind: evmcCall, - depth: k.cpt.msg.depth + 1, - gas: childGasLimit, - sender: p.sender, - contractAddress: p.contractAddress, - codeAddress: p.codeAddress, - value: p.value, - data: k.cpt.memory.read(p.memInPos, p.memInLen), - flags: p.flags)) + when evmc_enabled: + let + msg = new(nimbus_message) + c = cpt + msg[] = nimbus_message( + kind : evmcCall.ord.evmc_call_kind, + depth : (cpt.msg.depth + 1).int32, + gas : childGasLimit, + sender : p.sender, + recipient : p.contractAddress, + code_address: p.codeAddress, + input_data : cpt.memory.readPtr(p.memInPos), + input_size : p.memInLen.uint, + value : toEvmc(p.value), + flags : p.flags.uint32 + ) + c.execSubCall(msg, p) + else: + cpt.execSubCall( + memPos = p.memOutPos, + memLen = p.memOutLen, + childMsg = Message( + kind: evmcCall, + depth: cpt.msg.depth + 1, + gas: childGasLimit, + sender: p.sender, + contractAddress: p.contractAddress, + codeAddress: p.codeAddress, + value: p.value, + data: cpt.memory.read(p.memInPos, p.memInLen), + flags: p.flags)) # --------------------- callCodeOp: Vm2OpFn = proc(k: var Vm2Ctx) = ## 0xf2, Message-call into this account with an alternative account's code. let - p = k.cpt.callCodeParams + cpt = k.cpt + p = cpt.callCodeParams - var (gasCost, childGasLimit) = k.cpt.gasCosts[CallCode].c_handler( - p.value, - GasParams( - kind: CallCode, - c_isNewAccount: not k.cpt.accountExists(p.contractAddress), - c_gasBalance: k.cpt.gasMeter.gasRemaining, - c_contractGas: p.gas, - c_currentMemSize: k.cpt.memory.len, - c_memOffset: p.memOffset, - c_memLength: p.memLength)) + cpt.asyncChainTo(ifNecessaryGetAccounts(cpt.vmState, @[p.sender])): + cpt.asyncChainTo(ifNecessaryGetCodeForAccounts(cpt.vmState, @[p.contractAddress, p.codeAddress])): + var (gasCost, childGasLimit) = cpt.gasCosts[CallCode].c_handler( + p.value, + GasParams( + kind: CallCode, + c_isNewAccount: not cpt.accountExists(p.contractAddress), + c_gasBalance: cpt.gasMeter.gasRemaining, + c_contractGas: p.gas, + c_currentMemSize: cpt.memory.len, + c_memOffset: p.memOffset, + c_memLength: p.memLength)) - # EIP 2046: temporary disabled - # reduce gas fee for precompiles - # from 700 to 40 - if gasCost >= 0: - k.cpt.gasMeter.consumeGas(gasCost, reason = $CallCode) + # EIP 2046: temporary disabled + # reduce gas fee for precompiles + # from 700 to 40 + if gasCost >= 0: + cpt.gasMeter.consumeGas(gasCost, reason = $CallCode) - k.cpt.returnData.setLen(0) + cpt.returnData.setLen(0) - if k.cpt.msg.depth >= MaxCallDepth: - debug "Computation Failure", - reason = "Stack too deep", - maximumDepth = MaxCallDepth, - depth = k.cpt.msg.depth - k.cpt.gasMeter.returnGas(childGasLimit) - return + if cpt.msg.depth >= MaxCallDepth: + debug "Computation Failure", + reason = "Stack too deep", + maximumDepth = MaxCallDepth, + depth = cpt.msg.depth + cpt.gasMeter.returnGas(childGasLimit) + return - # EIP 2046: temporary disabled - # reduce gas fee for precompiles - # from 700 to 40 - if gasCost < 0 and childGasLimit <= 0: - raise newException( - OutOfGas, "Gas not enough to perform calculation (callCode)") + # EIP 2046: temporary disabled + # reduce gas fee for precompiles + # from 700 to 40 + if gasCost < 0 and childGasLimit <= 0: + raise newException( + OutOfGas, "Gas not enough to perform calculation (callCode)") - k.cpt.memory.extend(p.memInPos, p.memInLen) - k.cpt.memory.extend(p.memOutPos, p.memOutLen) + cpt.memory.extend(p.memInPos, p.memInLen) + cpt.memory.extend(p.memOutPos, p.memOutLen) - let senderBalance = k.cpt.getBalance(p.sender) - if senderBalance < p.value: - #debug "Insufficient funds", - # available = senderBalance, - # needed = k.cpt.msg.value - k.cpt.gasMeter.returnGas(childGasLimit) - return + let senderBalance = cpt.getBalance(p.sender) + if senderBalance < p.value: + #debug "Insufficient funds", + # available = senderBalance, + # needed = cpt.msg.value + cpt.gasMeter.returnGas(childGasLimit) + return - when evmc_enabled: - let - msg = new(nimbus_message) - c = k.cpt - msg[] = nimbus_message( - kind : evmcCallCode.ord.evmc_call_kind, - depth : (k.cpt.msg.depth + 1).int32, - gas : childGasLimit, - sender : p.sender, - recipient : p.contractAddress, - code_address: p.codeAddress, - input_data : k.cpt.memory.readPtr(p.memInPos), - input_size : p.memInLen.uint, - value : toEvmc(p.value), - flags : p.flags.uint32 - ) - c.execSubCall(msg, p) - else: - k.cpt.execSubCall( - memPos = p.memOutPos, - memLen = p.memOutLen, - childMsg = Message( - kind: evmcCallCode, - depth: k.cpt.msg.depth + 1, - gas: childGasLimit, - sender: p.sender, - contractAddress: p.contractAddress, - codeAddress: p.codeAddress, - value: p.value, - data: k.cpt.memory.read(p.memInPos, p.memInLen), - flags: p.flags)) + when evmc_enabled: + let + msg = new(nimbus_message) + c = cpt + msg[] = nimbus_message( + kind : evmcCallCode.ord.evmc_call_kind, + depth : (cpt.msg.depth + 1).int32, + gas : childGasLimit, + sender : p.sender, + recipient : p.contractAddress, + code_address: p.codeAddress, + input_data : cpt.memory.readPtr(p.memInPos), + input_size : p.memInLen.uint, + value : toEvmc(p.value), + flags : p.flags.uint32 + ) + c.execSubCall(msg, p) + else: + cpt.execSubCall( + memPos = p.memOutPos, + memLen = p.memOutLen, + childMsg = Message( + kind: evmcCallCode, + depth: cpt.msg.depth + 1, + gas: childGasLimit, + sender: p.sender, + contractAddress: p.contractAddress, + codeAddress: p.codeAddress, + value: p.value, + data: cpt.memory.read(p.memInPos, p.memInLen), + flags: p.flags)) # --------------------- @@ -368,72 +374,75 @@ const ## 0xf4, Message-call into this account with an alternative account's ## code, but persisting the current values for sender and value. let - p = k.cpt.delegateCallParams + cpt = k.cpt + p = cpt.delegateCallParams - var (gasCost, childGasLimit) = k.cpt.gasCosts[DelegateCall].c_handler( - p.value, - GasParams( - kind: DelegateCall, - c_isNewAccount: not k.cpt.accountExists(p.contractAddress), - c_gasBalance: k.cpt.gasMeter.gasRemaining, - c_contractGas: p.gas, - c_currentMemSize: k.cpt.memory.len, - c_memOffset: p.memOffset, - c_memLength: p.memLength)) + cpt.asyncChainTo(ifNecessaryGetAccounts(cpt.vmState, @[p.sender])): + cpt.asyncChainTo(ifNecessaryGetCodeForAccounts(cpt.vmState, @[p.contractAddress, p.codeAddress])): + var (gasCost, childGasLimit) = cpt.gasCosts[DelegateCall].c_handler( + p.value, + GasParams( + kind: DelegateCall, + c_isNewAccount: not cpt.accountExists(p.contractAddress), + c_gasBalance: cpt.gasMeter.gasRemaining, + c_contractGas: p.gas, + c_currentMemSize: cpt.memory.len, + c_memOffset: p.memOffset, + c_memLength: p.memLength)) - # EIP 2046: temporary disabled - # reduce gas fee for precompiles - # from 700 to 40 - if gasCost >= 0: - k.cpt.gasMeter.consumeGas(gasCost, reason = $DelegateCall) + # EIP 2046: temporary disabled + # reduce gas fee for precompiles + # from 700 to 40 + if gasCost >= 0: + cpt.gasMeter.consumeGas(gasCost, reason = $DelegateCall) - k.cpt.returnData.setLen(0) - if k.cpt.msg.depth >= MaxCallDepth: - debug "Computation Failure", - reason = "Stack too deep", - maximumDepth = MaxCallDepth, - depth = k.cpt.msg.depth - k.cpt.gasMeter.returnGas(childGasLimit) - return + cpt.returnData.setLen(0) + if cpt.msg.depth >= MaxCallDepth: + debug "Computation Failure", + reason = "Stack too deep", + maximumDepth = MaxCallDepth, + depth = cpt.msg.depth + cpt.gasMeter.returnGas(childGasLimit) + return - if gasCost < 0 and childGasLimit <= 0: - raise newException( - OutOfGas, "Gas not enough to perform calculation (delegateCall)") + if gasCost < 0 and childGasLimit <= 0: + raise newException( + OutOfGas, "Gas not enough to perform calculation (delegateCall)") - k.cpt.memory.extend(p.memInPos, p.memInLen) - k.cpt.memory.extend(p.memOutPos, p.memOutLen) + cpt.memory.extend(p.memInPos, p.memInLen) + cpt.memory.extend(p.memOutPos, p.memOutLen) - when evmc_enabled: - let - msg = new(nimbus_message) - c = k.cpt - msg[] = nimbus_message( - kind : evmcDelegateCall.ord.evmc_call_kind, - depth : (k.cpt.msg.depth + 1).int32, - gas : childGasLimit, - sender : p.sender, - recipient : p.contractAddress, - code_address: p.codeAddress, - input_data : k.cpt.memory.readPtr(p.memInPos), - input_size : p.memInLen.uint, - value : toEvmc(p.value), - flags : p.flags.uint32 - ) - c.execSubCall(msg, p) - else: - k.cpt.execSubCall( - memPos = p.memOutPos, - memLen = p.memOutLen, - childMsg = Message( - kind: evmcDelegateCall, - depth: k.cpt.msg.depth + 1, - gas: childGasLimit, - sender: p.sender, - contractAddress: p.contractAddress, - codeAddress: p.codeAddress, - value: p.value, - data: k.cpt.memory.read(p.memInPos, p.memInLen), - flags: p.flags)) + when evmc_enabled: + let + msg = new(nimbus_message) + c = cpt + msg[] = nimbus_message( + kind : evmcDelegateCall.ord.evmc_call_kind, + depth : (cpt.msg.depth + 1).int32, + gas : childGasLimit, + sender : p.sender, + recipient : p.contractAddress, + code_address: p.codeAddress, + input_data : cpt.memory.readPtr(p.memInPos), + input_size : p.memInLen.uint, + value : toEvmc(p.value), + flags : p.flags.uint32 + ) + c.execSubCall(msg, p) + else: + cpt.execSubCall( + memPos = p.memOutPos, + memLen = p.memOutLen, + childMsg = Message( + kind: evmcDelegateCall, + depth: cpt.msg.depth + 1, + gas: childGasLimit, + sender: p.sender, + contractAddress: p.contractAddress, + codeAddress: p.codeAddress, + value: p.value, + data: cpt.memory.read(p.memInPos, p.memInLen), + flags: p.flags)) # --------------------- @@ -441,77 +450,80 @@ const ## 0xfa, Static message-call into an account. let - p = k.cpt.staticCallParams + cpt = k.cpt + p = cpt.staticCallParams - var (gasCost, childGasLimit) = k.cpt.gasCosts[StaticCall].c_handler( - p.value, - GasParams( - kind: StaticCall, - c_isNewAccount: not k.cpt.accountExists(p.contractAddress), - c_gasBalance: k.cpt.gasMeter.gasRemaining, - c_contractGas: p.gas, - c_currentMemSize: k.cpt.memory.len, - c_memOffset: p.memOffset, - c_memLength: p.memLength)) + cpt.asyncChainTo(ifNecessaryGetAccounts(cpt.vmState, @[p.sender])): + cpt.asyncChainTo(ifNecessaryGetCodeForAccounts(cpt.vmState, @[p.contractAddress, p.codeAddress])): + var (gasCost, childGasLimit) = cpt.gasCosts[StaticCall].c_handler( + p.value, + GasParams( + kind: StaticCall, + c_isNewAccount: not cpt.accountExists(p.contractAddress), + c_gasBalance: cpt.gasMeter.gasRemaining, + c_contractGas: p.gas, + c_currentMemSize: cpt.memory.len, + c_memOffset: p.memOffset, + c_memLength: p.memLength)) - # EIP 2046: temporary disabled - # reduce gas fee for precompiles - # from 700 to 40 - # - # when opCode == StaticCall: - # if k.cpt.fork >= FkBerlin and codeAddress.toInt <= MaxPrecompilesAddr: - # gasCost = gasCost - 660.GasInt - if gasCost >= 0: - k.cpt.gasMeter.consumeGas(gasCost, reason = $StaticCall) + # EIP 2046: temporary disabled + # reduce gas fee for precompiles + # from 700 to 40 + # + # when opCode == StaticCall: + # if cpt.fork >= FkBerlin and codeAddress.toInt <= MaxPrecompilesAddr: + # gasCost = gasCost - 660.GasInt + if gasCost >= 0: + cpt.gasMeter.consumeGas(gasCost, reason = $StaticCall) - k.cpt.returnData.setLen(0) + cpt.returnData.setLen(0) - if k.cpt.msg.depth >= MaxCallDepth: - debug "Computation Failure", - reason = "Stack too deep", - maximumDepth = MaxCallDepth, - depth = k.cpt.msg.depth - k.cpt.gasMeter.returnGas(childGasLimit) - return + if cpt.msg.depth >= MaxCallDepth: + debug "Computation Failure", + reason = "Stack too deep", + maximumDepth = MaxCallDepth, + depth = cpt.msg.depth + cpt.gasMeter.returnGas(childGasLimit) + return - if gasCost < 0 and childGasLimit <= 0: - raise newException( - OutOfGas, "Gas not enough to perform calculation (staticCall)") + if gasCost < 0 and childGasLimit <= 0: + raise newException( + OutOfGas, "Gas not enough to perform calculation (staticCall)") - k.cpt.memory.extend(p.memInPos, p.memInLen) - k.cpt.memory.extend(p.memOutPos, p.memOutLen) + cpt.memory.extend(p.memInPos, p.memInLen) + cpt.memory.extend(p.memOutPos, p.memOutLen) - when evmc_enabled: - let - msg = new(nimbus_message) - c = k.cpt - msg[] = nimbus_message( - kind : evmcCall.ord.evmc_call_kind, - depth : (k.cpt.msg.depth + 1).int32, - gas : childGasLimit, - sender : p.sender, - recipient : p.contractAddress, - code_address: p.codeAddress, - input_data : k.cpt.memory.readPtr(p.memInPos), - input_size : p.memInLen.uint, - value : toEvmc(p.value), - flags : p.flags.uint32 - ) - c.execSubCall(msg, p) - else: - k.cpt.execSubCall( - memPos = p.memOutPos, - memLen = p.memOutLen, - childMsg = Message( - kind: evmcCall, - depth: k.cpt.msg.depth + 1, - gas: childGasLimit, - sender: p.sender, - contractAddress: p.contractAddress, - codeAddress: p.codeAddress, - value: p.value, - data: k.cpt.memory.read(p.memInPos, p.memInLen), - flags: p.flags)) + when evmc_enabled: + let + msg = new(nimbus_message) + c = cpt + msg[] = nimbus_message( + kind : evmcCall.ord.evmc_call_kind, + depth : (cpt.msg.depth + 1).int32, + gas : childGasLimit, + sender : p.sender, + recipient : p.contractAddress, + code_address: p.codeAddress, + input_data : cpt.memory.readPtr(p.memInPos), + input_size : p.memInLen.uint, + value : toEvmc(p.value), + flags : p.flags.uint32 + ) + c.execSubCall(msg, p) + else: + cpt.execSubCall( + memPos = p.memOutPos, + memLen = p.memOutLen, + childMsg = Message( + kind: evmcCall, + depth: cpt.msg.depth + 1, + gas: childGasLimit, + sender: p.sender, + contractAddress: p.contractAddress, + codeAddress: p.codeAddress, + value: p.value, + data: cpt.memory.read(p.memInPos, p.memInLen), + flags: p.flags)) # ------------------------------------------------------------------------------ # Public, op exec table entries diff --git a/nimbus/evm/interpreter/op_handlers/oph_envinfo.nim b/nimbus/evm/interpreter/op_handlers/oph_envinfo.nim index 10430fd72..08cb728cb 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_envinfo.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_envinfo.nim @@ -18,6 +18,7 @@ import ../../computation, ../../memory, ../../stack, + ../../async/operations, ../gas_costs, ../gas_meter, ../op_codes, @@ -71,17 +72,21 @@ const balanceOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x31, Get balance of the given account. - let address = k.cpt.stack.popAddress - k.cpt.stack.push: - k.cpt.getBalance(address) + let cpt = k.cpt + let address = cpt.stack.popAddress + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, address)): + cpt.stack.push: + cpt.getBalance(address) balanceEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x31, EIP292: Get balance of the given account for Berlin and later - let address = k.cpt.stack.popAddress() + let cpt = k.cpt + let address = cpt.stack.popAddress() - k.cpt.gasEip2929AccountCheck(address) - k.cpt.stack.push: - k.cpt.getBalance(address) + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, address)): + cpt.gasEip2929AccountCheck(address) + cpt.stack.push: + cpt.getBalance(address) # ------------------ @@ -144,23 +149,27 @@ const codeSizeOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x38, Get size of code running in current environment. - k.cpt.stack.push: - k.cpt.code.len + let cpt = k.cpt + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, cpt.msg.contractAddress)): + cpt.stack.push: + cpt.code.len codeCopyOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x39, Copy code running in current environment to memory. - let (memStartPos, copyStartPos, size) = k.cpt.stack.popInt(3) + let cpt = k.cpt + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, cpt.msg.contractAddress)): + let (memStartPos, copyStartPos, size) = cpt.stack.popInt(3) - # TODO tests: https://github.com/status-im/nimbus/issues/67 - let (memPos, copyPos, len) = - (memStartPos.cleanMemRef, copyStartPos.cleanMemRef, size.cleanMemRef) + # TODO tests: https://github.com/status-im/nimbus/issues/67 + let (memPos, copyPos, len) = + (memStartPos.cleanMemRef, copyStartPos.cleanMemRef, size.cleanMemRef) - k.cpt.gasMeter.consumeGas( - k.cpt.gasCosts[CodeCopy].m_handler(k.cpt.memory.len, memPos, len), - reason = "CodeCopy fee") + cpt.gasMeter.consumeGas( + cpt.gasCosts[CodeCopy].m_handler(cpt.memory.len, memPos, len), + reason = "CodeCopy fee") - k.cpt.memory.writePaddedResult(k.cpt.code.bytes, memPos, copyPos, len) + cpt.memory.writePaddedResult(cpt.code.bytes, memPos, copyPos, len) gasPriceOp: Vm2OpFn = proc (k: var Vm2Ctx) = @@ -172,51 +181,59 @@ const extCodeSizeOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x3b, Get size of an account's code + let cpt = k.cpt let address = k.cpt.stack.popAddress() - k.cpt.stack.push: - k.cpt.getCodeSize(address) + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, address)): + cpt.stack.push: + cpt.getCodeSize(address) extCodeSizeEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x3b, Get size of an account's code - let address = k.cpt.stack.popAddress() + let cpt = k.cpt + let address = cpt.stack.popAddress() - k.cpt.gasEip2929AccountCheck(address) - k.cpt.stack.push: - k.cpt.getCodeSize(address) + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, address)): + cpt.gasEip2929AccountCheck(address) + cpt.stack.push: + cpt.getCodeSize(address) # ----------- extCodeCopyOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x3c, Copy an account's code to memory. - let address = k.cpt.stack.popAddress() + let cpt = k.cpt + let address = cpt.stack.popAddress() - let (memStartPos, codeStartPos, size) = k.cpt.stack.popInt(3) - let (memPos, codePos, len) = - (memStartPos.cleanMemRef, codeStartPos.cleanMemRef, size.cleanMemRef) + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, address)): + let (memStartPos, codeStartPos, size) = cpt.stack.popInt(3) + let (memPos, codePos, len) = + (memStartPos.cleanMemRef, codeStartPos.cleanMemRef, size.cleanMemRef) - k.cpt.gasMeter.consumeGas( - k.cpt.gasCosts[ExtCodeCopy].m_handler(k.cpt.memory.len, memPos, len), - reason = "ExtCodeCopy fee") + cpt.gasMeter.consumeGas( + cpt.gasCosts[ExtCodeCopy].m_handler(cpt.memory.len, memPos, len), + reason = "ExtCodeCopy fee") - let codeBytes = k.cpt.getCode(address) - k.cpt.memory.writePaddedResult(codeBytes, memPos, codePos, len) + let codeBytes = cpt.getCode(address) + cpt.memory.writePaddedResult(codeBytes, memPos, codePos, len) extCodeCopyEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x3c, Copy an account's code to memory. - let address = k.cpt.stack.popAddress() + let cpt = k.cpt + let address = cpt.stack.popAddress() - let (memStartPos, codeStartPos, size) = k.cpt.stack.popInt(3) - let (memPos, codePos, len) = (memStartPos.cleanMemRef, - codeStartPos.cleanMemRef, size.cleanMemRef) - k.cpt.gasMeter.consumeGas( - k.cpt.gasCosts[ExtCodeCopy].m_handler(k.cpt.memory.len, memPos, len), - reason = "ExtCodeCopy fee") + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, address)): + let (memStartPos, codeStartPos, size) = cpt.stack.popInt(3) + let (memPos, codePos, len) = (memStartPos.cleanMemRef, + codeStartPos.cleanMemRef, size.cleanMemRef) + cpt.gasMeter.consumeGas( + cpt.gasCosts[ExtCodeCopy].m_handler(cpt.memory.len, memPos, len), + reason = "ExtCodeCopy fee") - k.cpt.gasEip2929AccountCheck(address) + cpt.gasEip2929AccountCheck(address) - let codeBytes = k.cpt.getCode(address) - k.cpt.memory.writePaddedResult(codeBytes, memPos, codePos, len) + let codeBytes = cpt.getCode(address) + cpt.memory.writePaddedResult(codeBytes, memPos, codePos, len) # ----------- @@ -251,18 +268,22 @@ const extCodeHashOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x3f, Returns the keccak256 hash of a contract’s code + let cpt = k.cpt let address = k.cpt.stack.popAddress() - k.cpt.stack.push: - k.cpt.getCodeHash(address) + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, address)): + cpt.stack.push: + cpt.getCodeHash(address) extCodeHashEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x3f, EIP2929: Returns the keccak256 hash of a contract’s code + let cpt = k.cpt let address = k.cpt.stack.popAddress() - k.cpt.gasEip2929AccountCheck(address) + cpt.asyncChainTo(ifNecessaryGetCode(cpt.vmState, address)): + cpt.gasEip2929AccountCheck(address) - k.cpt.stack.push: - k.cpt.getCodeHash(address) + cpt.stack.push: + cpt.getCodeHash(address) # ------------------------------------------------------------------------------ # Public, op exec table entries diff --git a/nimbus/evm/interpreter/op_handlers/oph_memory.nim b/nimbus/evm/interpreter/op_handlers/oph_memory.nim index f51afc58c..97dcdb66e 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_memory.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_memory.nim @@ -174,91 +174,101 @@ const sloadEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x54, EIP2929: Load word from storage for Berlin and later - let (slot) = k.cpt.stack.popInt(1) + let cpt = k.cpt + let (slot) = cpt.stack.popInt(1) - when evmc_enabled: - let gasCost = if k.cpt.host.accessStorage(k.cpt.msg.contractAddress, slot) == EVMC_ACCESS_COLD: - ColdSloadCost - else: - WarmStorageReadCost - k.cpt.gasMeter.consumeGas(gasCost, reason = "sloadEIP2929") - else: - k.cpt.vmState.mutateStateDB: - let gasCost = if not db.inAccessList(k.cpt.msg.contractAddress, slot): - db.accessList(k.cpt.msg.contractAddress, slot) + cpt.asyncChainTo(ifNecessaryGetSlot(cpt.vmState, cpt.msg.contractAddress, slot)): + when evmc_enabled: + let gasCost = if cpt.host.accessStorage(cpt.msg.contractAddress, slot) == EVMC_ACCESS_COLD: ColdSloadCost else: WarmStorageReadCost - k.cpt.gasMeter.consumeGas(gasCost, reason = "sloadEIP2929") - k.cpt.stack.push: - k.cpt.getStorage(slot) + cpt.gasMeter.consumeGas(gasCost, reason = "sloadEIP2929") + else: + cpt.vmState.mutateStateDB: + let gasCost = if not db.inAccessList(cpt.msg.contractAddress, slot): + db.accessList(cpt.msg.contractAddress, slot) + ColdSloadCost + else: + WarmStorageReadCost + cpt.gasMeter.consumeGas(gasCost, reason = "sloadEIP2929") + cpt.stack.push: + cpt.getStorage(slot) # ------- sstoreOp: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x55, Save word to storage. - let (slot, newValue) = k.cpt.stack.popInt(2) + let cpt = k.cpt + let (slot, newValue) = cpt.stack.popInt(2) - checkInStaticContext(k.cpt) - when evmc_enabled: - sstoreEvmc(k.cpt, slot, newValue) - else: - sstoreImpl(k.cpt, slot, newValue) + checkInStaticContext(cpt) + cpt.asyncChainTo(ifNecessaryGetSlot(cpt.vmState, cpt.msg.contractAddress, slot)): + when evmc_enabled: + sstoreEvmc(cpt, slot, newValue) + else: + sstoreImpl(cpt, slot, newValue) sstoreEIP1283Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x55, EIP1283: sstore for Constantinople and later - let (slot, newValue) = k.cpt.stack.popInt(2) + let cpt = k.cpt + let (slot, newValue) = cpt.stack.popInt(2) - checkInStaticContext(k.cpt) - when evmc_enabled: - sstoreEvmc(k.cpt, slot, newValue) - else: - sstoreNetGasMeteringImpl(k.cpt, slot, newValue) + checkInStaticContext(cpt) + cpt.asyncChainTo(ifNecessaryGetSlot(cpt.vmState, cpt.msg.contractAddress, slot)): + when evmc_enabled: + sstoreEvmc(cpt, slot, newValue) + else: + sstoreNetGasMeteringImpl(cpt, slot, newValue) sstoreEIP2200Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x55, EIP2200: sstore for Istanbul and later - let (slot, newValue) = k.cpt.stack.popInt(2) + let cpt = k.cpt + let (slot, newValue) = cpt.stack.popInt(2) - checkInStaticContext(k.cpt) + checkInStaticContext(cpt) const SentryGasEIP2200 = 2300 - if k.cpt.gasMeter.gasRemaining <= SentryGasEIP2200: + if cpt.gasMeter.gasRemaining <= SentryGasEIP2200: raise newException( OutOfGas, "Gas not enough to perform EIP2200 SSTORE") - when evmc_enabled: - sstoreEvmc(k.cpt, slot, newValue) - else: - sstoreNetGasMeteringImpl(k.cpt, slot, newValue) + cpt.asyncChainTo(ifNecessaryGetSlot(cpt.vmState, cpt.msg.contractAddress, slot)): + when evmc_enabled: + sstoreEvmc(cpt, slot, newValue) + else: + sstoreNetGasMeteringImpl(cpt, slot, newValue) sstoreEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) = ## 0x55, EIP2929: sstore for Berlin and later - let (slot, newValue) = k.cpt.stack.popInt(2) - checkInStaticContext(k.cpt) + let cpt = k.cpt + let (slot, newValue) = cpt.stack.popInt(2) + checkInStaticContext(cpt) # Minimum gas required to be present for an SSTORE call, not consumed const SentryGasEIP2200 = 2300 - if k.cpt.gasMeter.gasRemaining <= SentryGasEIP2200: + if cpt.gasMeter.gasRemaining <= SentryGasEIP2200: raise newException(OutOfGas, "Gas not enough to perform EIP2200 SSTORE") - when evmc_enabled: - if k.cpt.host.accessStorage(k.cpt.msg.contractAddress, slot) == EVMC_ACCESS_COLD: - k.cpt.gasMeter.consumeGas(ColdSloadCost, reason = "sstoreEIP2929") - else: - k.cpt.vmState.mutateStateDB: - if not db.inAccessList(k.cpt.msg.contractAddress, slot): - db.accessList(k.cpt.msg.contractAddress, slot) - k.cpt.gasMeter.consumeGas(ColdSloadCost, reason = "sstoreEIP2929") + cpt.asyncChainTo(ifNecessaryGetSlot(cpt.vmState, cpt.msg.contractAddress, slot)): + when evmc_enabled: + if cpt.host.accessStorage(cpt.msg.contractAddress, slot) == EVMC_ACCESS_COLD: + cpt.gasMeter.consumeGas(ColdSloadCost, reason = "sstoreEIP2929") + else: + cpt.vmState.mutateStateDB: + if not db.inAccessList(cpt.msg.contractAddress, slot): + db.accessList(cpt.msg.contractAddress, slot) + cpt.gasMeter.consumeGas(ColdSloadCost, reason = "sstoreEIP2929") - when evmc_enabled: - sstoreEvmc(k.cpt, slot, newValue) - else: - sstoreNetGasMeteringImpl(k.cpt, slot, newValue) + when evmc_enabled: + sstoreEvmc(cpt, slot, newValue) + else: + sstoreNetGasMeteringImpl(cpt, slot, newValue) # ------- diff --git a/nimbus/evm/interpreter/op_handlers/oph_sysops.nim b/nimbus/evm/interpreter/op_handlers/oph_sysops.nim index 0464d9c49..d9c47618f 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_sysops.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_sysops.nim @@ -18,6 +18,7 @@ import ../../memory, ../../stack, ../../types, + ../../async/operations, ../gas_costs, ../gas_meter, ../op_codes, @@ -77,73 +78,80 @@ const selfDestructOp: Vm2OpFn = proc(k: var Vm2Ctx) = ## 0xff, Halt execution and register account for later deletion. - let beneficiary = k.cpt.stack.popAddress() - k.cpt.selfDestruct(beneficiary) + let cpt = k.cpt + let beneficiary = cpt.stack.popAddress() + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, beneficiary)): + cpt.selfDestruct(beneficiary) selfDestructEIP150Op: Vm2OpFn = proc(k: var Vm2Ctx) = ## selfDestructEip150 (auto generated comment) - let beneficiary = k.cpt.stack.popAddress() + let cpt = k.cpt + let beneficiary = cpt.stack.popAddress() + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, beneficiary)): + let gasParams = GasParams( + kind: SelfDestruct, + sd_condition: not cpt.accountExists(beneficiary)) - let gasParams = GasParams( - kind: SelfDestruct, - sd_condition: not k.cpt.accountExists(beneficiary)) - - let gasCost = - k.cpt.gasCosts[SelfDestruct].c_handler(0.u256, gasParams).gasCost - k.cpt.gasMeter.consumeGas( - gasCost, reason = "SELFDESTRUCT EIP150") - k.cpt.selfDestruct(beneficiary) + let gasCost = + cpt.gasCosts[SelfDestruct].c_handler(0.u256, gasParams).gasCost + cpt.gasMeter.consumeGas( + gasCost, reason = "SELFDESTRUCT EIP150") + cpt.selfDestruct(beneficiary) selfDestructEIP161Op: Vm2OpFn = proc(k: var Vm2Ctx) = ## selfDestructEip161 (auto generated comment) - checkInStaticContext(k.cpt) + let cpt = k.cpt + checkInStaticContext(cpt) - let - beneficiary = k.cpt.stack.popAddress() - isDead = not k.cpt.accountExists(beneficiary) - balance = k.cpt.getBalance(k.cpt.msg.contractAddress) + let beneficiary = cpt.stack.popAddress() + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, beneficiary)): + let + isDead = not cpt.accountExists(beneficiary) + balance = cpt.getBalance(cpt.msg.contractAddress) - let gasParams = GasParams( - kind: SelfDestruct, - sd_condition: isDead and not balance.isZero) + let gasParams = GasParams( + kind: SelfDestruct, + sd_condition: isDead and not balance.isZero) - let gasCost = - k.cpt.gasCosts[SelfDestruct].c_handler(0.u256, gasParams).gasCost - k.cpt.gasMeter.consumeGas( - gasCost, reason = "SELFDESTRUCT EIP161") - k.cpt.selfDestruct(beneficiary) + let gasCost = + cpt.gasCosts[SelfDestruct].c_handler(0.u256, gasParams).gasCost + cpt.gasMeter.consumeGas( + gasCost, reason = "SELFDESTRUCT EIP161") + cpt.selfDestruct(beneficiary) selfDestructEIP2929Op: Vm2OpFn = proc(k: var Vm2Ctx) = ## selfDestructEIP2929 (auto generated comment) - checkInStaticContext(k.cpt) + let cpt = k.cpt + checkInStaticContext(cpt) - let - beneficiary = k.cpt.stack.popAddress() - isDead = not k.cpt.accountExists(beneficiary) - balance = k.cpt.getBalance(k.cpt.msg.contractAddress) + let beneficiary = cpt.stack.popAddress() + cpt.asyncChainTo(ifNecessaryGetAccount(cpt.vmState, beneficiary)): + let + isDead = not cpt.accountExists(beneficiary) + balance = cpt.getBalance(cpt.msg.contractAddress) - let gasParams = GasParams( - kind: SelfDestruct, - sd_condition: isDead and not balance.isZero) + let gasParams = GasParams( + kind: SelfDestruct, + sd_condition: isDead and not balance.isZero) - var gasCost = - k.cpt.gasCosts[SelfDestruct].c_handler(0.u256, gasParams).gasCost + var gasCost = + cpt.gasCosts[SelfDestruct].c_handler(0.u256, gasParams).gasCost - when evmc_enabled: - if k.cpt.host.accessAccount(beneficiary) == EVMC_ACCESS_COLD: - gasCost = gasCost + ColdAccountAccessCost - else: - k.cpt.vmState.mutateStateDB: - if not db.inAccessList(beneficiary): - db.accessList(beneficiary) + when evmc_enabled: + if cpt.host.accessAccount(beneficiary) == EVMC_ACCESS_COLD: gasCost = gasCost + ColdAccountAccessCost + else: + cpt.vmState.mutateStateDB: + if not db.inAccessList(beneficiary): + db.accessList(beneficiary) + gasCost = gasCost + ColdAccountAccessCost - k.cpt.gasMeter.consumeGas( - gasCost, reason = "SELFDESTRUCT EIP161") - k.cpt.selfDestruct(beneficiary) + cpt.gasMeter.consumeGas( + gasCost, reason = "SELFDESTRUCT EIP161") + cpt.selfDestruct(beneficiary) # ------------------------------------------------------------------------------ # Public, op exec table entries diff --git a/nimbus/evm/interpreter_dispatch.nim b/nimbus/evm/interpreter_dispatch.nim index d5db76a3c..47cbc149f 100644 --- a/nimbus/evm/interpreter_dispatch.nim +++ b/nimbus/evm/interpreter_dispatch.nim @@ -19,6 +19,7 @@ import ".."/[constants, utils/utils, db/accounts_cache], "."/[code_stream, computation], "."/[message, precompiles, state, types], + ./async/operations, ./interpreter/[op_dispatcher, gas_costs] {.push raises: [].} @@ -198,9 +199,30 @@ proc executeOpcodes*(c: Computation, shouldPrepareTracer: bool = true) break try: - if not c.continuation.isNil: - (c.continuation)() - c.selectVM(fork, shouldPrepareTracer) + let cont = c.continuation + if not cont.isNil: + c.continuation = nil + cont() + let nextCont = c.continuation + if nextCont.isNil: + # FIXME-Adam: I hate how convoluted this is. See also the comment in + # op_dispatcher.nim. The idea here is that we need to call + # traceOpCodeEnded at the end of the opcode (and only if there + # hasn't been an exception thrown); otherwise we run into problems + # if an exception (e.g. out of gas) is thrown during a continuation. + # So this code says, "If we've just run a continuation, but there's + # no *subsequent* continuation, then the opcode is done." + if c.tracingEnabled and not(cont.isNil) and nextCont.isNil: + c.traceOpCodeEnded(c.instr, c.opIndex) + case c.instr + of Return, Revert, SelfDestruct: # FIXME-Adam: HACK, fix this in a clean way; I think the idea is that these are the ones from the "always break" case in op_dispatcher + discard + else: + c.selectVM(fork, shouldPrepareTracer) + else: + # Return up to the caller, which will run the async operation or child + # and then call this proc again. + discard except CatchableError as e: let msg = e.msg @@ -282,6 +304,9 @@ else: # rather use this one. --Adam proc asyncExecCallOrCreate*(c: Computation): Future[void] {.async.} = defer: c.dispose() + + await ifNecessaryGetCode(c.vmState, c.msg.contractAddress) + if c.beforeExec(): return c.executeOpcodes() diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index f2a6b6258..f99f7b461 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -21,17 +21,18 @@ import ./types proc init( - self: BaseVMState; - ac: AccountsCache; - parent: BlockHeader; - timestamp: EthTime; - gasLimit: GasInt; - fee: Option[UInt256]; - prevRandao: Hash256; - difficulty: UInt256; - miner: EthAddress; - com: CommonRef; - tracer: TransactionTracer) + self: BaseVMState; + ac: AccountsCache; + parent: BlockHeader; + timestamp: EthTime; + gasLimit: GasInt; + fee: Option[UInt256]; + prevRandao: Hash256; + difficulty: UInt256; + miner: EthAddress; + com: CommonRef; + tracer: TransactionTracer, + asyncFactory: AsyncOperationFactory = AsyncOperationFactory(maybeDataSource: none[AsyncDataSource]())) {.gcsafe.} = ## Initialisation helper self.prevHeaders = @[] @@ -45,7 +46,7 @@ proc init( self.tracer = tracer self.stateDB = ac self.minerAddress = miner - self.asyncFactory = AsyncOperationFactory(maybeDataSource: none[AsyncDataSource]()) + self.asyncFactory = asyncFactory proc init( self: BaseVMState; @@ -267,6 +268,30 @@ proc init*( tracerFlags = tracerFlags) return true +proc statelessInit*( + vmState: BaseVMState; + parent: BlockHeader; ## parent header, account sync position + header: BlockHeader; ## header with tx environment data fields + com: CommonRef; ## block chain config + asyncFactory: AsyncOperationFactory; + tracerFlags: set[TracerFlags] = {}): bool + {.gcsafe, raises: [Defect,CatchableError].} = + var tracer: TransactionTracer + tracer.initTracer(tracerFlags) + vmState.init( + ac = AccountsCache.init(com.db.db, parent.stateRoot, com.pruneTrie), + parent = parent, + timestamp = header.timestamp, + gasLimit = header.gasLimit, + fee = header.fee, + prevRandao = header.prevRandao, + difficulty = header.difficulty, + miner = com.minerAddress(header), + com = com, + tracer = tracer, + asyncFactory = asyncFactory) + return true + method coinbase*(vmState: BaseVMState): EthAddress {.base, gcsafe.} = vmState.minerAddress diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index a3c25ebae..5dd8a5d01 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -28,7 +28,8 @@ import clique/clique_sealer, tx_pool, block_import], ./rpc/merge/merger, ./sync/[legacy, full, protocol, snap, stateless, - protocol/les_protocol, handlers, peers] + protocol/les_protocol, handlers, peers], + ./evm/async/data_sources/json_rpc_data_source when defined(evmc_enabled): import transaction/evmc_dynamic_loader @@ -216,6 +217,14 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf, enableDiscovery = conf.discovery != DiscoveryType.None, waitForPeers = waitForPeers) +proc maybeStatelessAsyncDataSource*(nimbus: NimbusNode, conf: NimbusConf): Option[AsyncDataSource] = + if conf.syncMode == SyncMode.Stateless: + let rpcClient = waitFor(makeAnRpcClient(conf.statelessModeDataSourceUrl)) + let asyncDataSource = realAsyncDataSource(nimbus.ethNode.peerPool, rpcClient, false) + some(asyncDataSource) + else: + none[AsyncDataSource]() + proc localServices(nimbus: NimbusNode, conf: NimbusConf, com: CommonRef, protocols: set[ProtocolFlag]) = # metrics logging @@ -357,30 +366,32 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf, nimbus.sealingEngine.start() if conf.engineApiEnabled: + let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf) if conf.engineApiPort != conf.rpcPort: nimbus.engineApiServer = newRpcHttpServer( [initTAddress(conf.engineApiAddress, conf.engineApiPort)], authHooks = @[httpJwtAuthHook, httpCorsHook] ) - setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiServer, nimbus.merger) + setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiServer, nimbus.merger, maybeAsyncDataSource) setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiServer) nimbus.engineApiServer.start() else: - setupEngineAPI(nimbus.sealingEngine, nimbus.rpcServer, nimbus.merger) + setupEngineAPI(nimbus.sealingEngine, nimbus.rpcServer, nimbus.merger, maybeAsyncDataSource) info "Starting engine API server", port = conf.engineApiPort if conf.engineApiWsEnabled: + let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf) if conf.engineApiWsPort != conf.wsPort: nimbus.engineApiWsServer = newRpcWebSocketServer( initTAddress(conf.engineApiWsAddress, conf.engineApiWsPort), authHooks = @[wsJwtAuthHook, wsCorsHook] ) - setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiWsServer, nimbus.merger) + setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiWsServer, nimbus.merger, maybeAsyncDataSource) setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiWsServer) nimbus.engineApiWsServer.start() else: - setupEngineAPI(nimbus.sealingEngine, nimbus.wsRpcServer, nimbus.merger) + setupEngineAPI(nimbus.sealingEngine, nimbus.wsRpcServer, nimbus.merger, maybeAsyncDataSource) info "Starting WebSocket engine API server", port = conf.engineApiWsPort diff --git a/nimbus/rpc/engine_api.nim b/nimbus/rpc/engine_api.nim index db9e09419..0df0b40e1 100644 --- a/nimbus/rpc/engine_api.nim +++ b/nimbus/rpc/engine_api.nim @@ -17,8 +17,10 @@ import eth/common/eth_types_rlp, ../common/common, ".."/core/chain/[chain_desc, persist_blocks], + ".."/stateless_runner, ../constants, ../core/[tx_pool, sealer], + ../evm/async/data_sources, ./merge/[mergetypes, mergeutils], # put chronicles import last because Nim # compiler resolve `$` for logging @@ -90,7 +92,7 @@ template unsafeQuantityToInt64(q: Quantity): int64 = # null.) --Adam # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 -proc handle_newPayload(sealingEngine: SealingEngineRef, api: EngineApiRef, com: CommonRef, payload: ExecutionPayloadV1 | ExecutionPayloadV2): PayloadStatusV1 {.raises: [CatchableError].} = +proc handle_newPayload(sealingEngine: SealingEngineRef, api: EngineApiRef, com: CommonRef, maybeAsyncDataSource: Option[AsyncDataSource], payload: ExecutionPayloadV1 | ExecutionPayloadV2): PayloadStatusV1 {.raises: [CatchableError].} = trace "Engine API request received", meth = "newPayload", number = $(distinctBase payload.blockNumber), hash = payload.blockHash @@ -116,6 +118,17 @@ proc handle_newPayload(sealingEngine: SealingEngineRef, api: EngineApiRef, com: number = header.blockNumber, hash = blockHash return validStatus(blockHash) + # FIXME-Adam - I'm adding this here, but I don't actually think this is the right place. + # For one thing, it won't even persist the new block. But let's worry about persisting + # after I've gotten a block to come out actually correct. --Adam + if maybeAsyncDataSource.isSome: + let r = statelesslyRunBlock(maybeAsyncDataSource.get, com, header, toBlockBody(payload)) + if r.isErr: + error "Stateless execution failed", error=r.error + return invalidStatus() + else: + return validStatus(r.get) + # If the parent is missing, we - in theory - could trigger a sync, but that # would also entail a reorg. That is problematic if multiple sibling blocks # are being fed to us, and even moreso, if some semi-distant uncle shortens @@ -460,10 +473,11 @@ const supportedMethods: HashSet[string] = # I'm trying to keep the handlers below very thin, and move the # bodies up to the various procs above. Once we have multiple # versions, they'll need to be able to share code. -proc setupEngineApi*( +proc setupEngineAPI*( sealingEngine: SealingEngineRef, server: RpcServer, - merger: MergerRef) = + merger: MergerRef, + maybeAsyncDataSource: Option[AsyncDataSource] = none[AsyncDataSource]()) = let api = EngineApiRef.new(merger) @@ -474,14 +488,14 @@ proc setupEngineApi*( # cannot use `params` as param name. see https:#github.com/status-im/nim-json-rpc/issues/128 server.rpc("engine_newPayloadV1") do(payload: ExecutionPayloadV1) -> PayloadStatusV1: - return handle_newPayload(sealingEngine, api, com, payload) + return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload) server.rpc("engine_newPayloadV2") do(payload: ExecutionPayloadV1OrV2) -> PayloadStatusV1: let p = payload.toExecutionPayloadV1OrExecutionPayloadV2 if p.isOk: - return handle_newPayload(sealingEngine, api, com, p.get) + return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, p.get) else: - return handle_newPayload(sealingEngine, api, com, p.error) + return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, p.error) server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1: let r = handle_getPayload(api, payloadId) diff --git a/nimbus/stateless_runner.nim b/nimbus/stateless_runner.nim new file mode 100644 index 000000000..6adc1da6a --- /dev/null +++ b/nimbus/stateless_runner.nim @@ -0,0 +1,138 @@ +import + chronos, + options, + sequtils, + times, + nimcrypto, + os, + stew/byteutils, + stew/results, + json_rpc/rpcclient, + eth/[rlp, common/eth_types, p2p], + core/chain/[chain_desc, persist_blocks], + core/executor/process_block, + db/[db_chain, select_backend, storage_types, distinct_tries, incomplete_db, accounts_cache], + eth/trie/[db, trie_defs], + rpc/rpc_utils, + evm/async/[data_sources, operations, data_sources/json_rpc_data_source], + ./vm_state, ./vm_types, + ./sync/stateless, + chronicles + +from strutils import parseInt, startsWith +from common/chain_config import MainNet, networkParams +from common/common import initializeEmptyDb + + +proc coinbasesOfThisBlockAndUncles(header: BlockHeader, body: BlockBody): seq[EthAddress] = + result.add header.coinbase + for uncle in body.uncles: + result.add(uncle.coinbase) + +proc createVmStateForStatelessMode*(com: CommonRef, header: BlockHeader, body: BlockBody, + parentHeader: BlockHeader, asyncFactory: AsyncOperationFactory): Result[BaseVMState, string] + {.inline.} = + let vmState = BaseVMState() + if not vmState.statelessInit(parentHeader, header, com, asyncFactory, {}): + return err("Cannot initialise VmState for block number " & $(header.blockNumber)) + waitFor(ifNecessaryGetAccounts(vmState, coinbasesOfThisBlockAndUncles(header, body))) + ok(vmState) + + + +proc statelesslyRunBlock*(asyncDataSource: AsyncDataSource, com: CommonRef, header: BlockHeader, body: BlockBody): Result[Hash256, string] = + try: + let t0 = now() + + # FIXME-Adam: this doesn't feel like the right place for this; where should it go? + com.db.db.put(emptyRlpHash.data, emptyRlp) + + let blockHash: Hash256 = header.blockHash + + let asyncFactory = AsyncOperationFactory(maybeDataSource: some(asyncDataSource)) + + let parentHeader = waitFor(asyncDataSource.fetchBlockHeaderWithHash(header.parentHash)) + com.db.persistHeaderToDbWithoutSetHeadOrScore(parentHeader) + + info("statelessly running block", blockNumber=header.blockNumber, blockHash=blockHash, parentHash=header.parentHash, parentStateRoot=parentHeader.stateRoot, desiredNewStateRoot=header.stateRoot) + + let vmState = createVmStateForStatelessMode(com, header, body, parentHeader, asyncFactory).get + let vres = processBlockNotPoA(vmState, header, body) + + let elapsedTime = now() - t0 + + let headerStateRoot = header.stateRoot + let vmStateRoot = rootHash(vmState.stateDB) + info("finished statelessly running the block", vres=vres, elapsedTime=elapsedTime, durationSpentDoingFetches=durationSpentDoingFetches, fetchCounter=fetchCounter, headerStateRoot=headerStateRoot, vmStateRoot=vmStateRoot) + if headerStateRoot != vmStateRoot: + return err("State roots do not match: header says " & $(headerStateRoot) & ", vmState says " & $(vmStateRoot)) + else: + if vres == ValidationResult.OK: + return ok(blockHash) + else: + return err("Error while statelessly running a block") + except: + let ex = getCurrentException() + echo getStackTrace(ex) + error "Got an exception while statelessly running a block", exMsg = ex.msg + return err("Error while statelessly running a block: " & $(ex.msg)) + +proc statelesslyRunBlock*(asyncDataSource: AsyncDataSource, com: CommonRef, blockHash: Hash256): Result[Hash256, string] = + let (header, body) = waitFor(asyncDataSource.fetchBlockHeaderAndBodyWithHash(blockHash)) + let r = statelesslyRunBlock(asyncDataSource, com, header, body) + if r.isErr: + error("stateless execution failed", hash=blockHash, error=r.error) + else: + info("stateless execution succeeded", hash=blockHash, resultingHash=r.value) + return r + +proc fetchBlockHeaderAndBodyForHashOrNumber(asyncDataSource: AsyncDataSource, hashOrNum: string): Future[(BlockHeader, BlockBody)] {.async.} = + if hashOrNum.startsWith("0x"): + return await asyncDataSource.fetchBlockHeaderAndBodyWithHash(hashOrNum.toHash) + else: + return await asyncDataSource.fetchBlockHeaderAndBodyWithNumber(u256(parseInt(hashOrNum))) + +proc statelesslyRunSequentialBlocks*(asyncDataSource: AsyncDataSource, com: CommonRef, initialBlockNumber: BlockNumber): Result[Hash256, string] = + info("sequential stateless execution beginning", initialBlockNumber=initialBlockNumber) + var n = initialBlockNumber + while true: + let (header, body) = waitFor(asyncDataSource.fetchBlockHeaderAndBodyWithNumber(n)) + let r = statelesslyRunBlock(asyncDataSource, com, header, body) + if r.isErr: + error("stateless execution failed", n=n, h=header.blockHash, error=r.error) + return r + else: + info("stateless execution succeeded", n=n, h=header.blockHash, resultingHash=r.value) + n = n + 1 + +proc statelesslyRunBlock*(asyncDataSource: AsyncDataSource, com: CommonRef, hashOrNum: string): Result[Hash256, string] = + let (header, body) = waitFor(fetchBlockHeaderAndBodyForHashOrNumber(asyncDataSource, hashOrNum)) + return statelesslyRunBlock(asyncDataSource, com, header, body) + + +proc statelesslyRunTransaction*(asyncDataSource: AsyncDataSource, com: CommonRef, headerHash: Hash256, tx: Transaction) = + let t0 = now() + + let (header, body) = waitFor(asyncDataSource.fetchBlockHeaderAndBodyWithHash(headerHash)) + + # FIXME-Adam: this doesn't feel like the right place for this; where should it go? + com.db.db.put(emptyRlpHash.data, emptyRlp) + + let blockHash: Hash256 = header.blockHash + + let transaction = com.db.db.beginTransaction() + defer: transaction.rollback() # intentionally throwing away the result of this execution + + let asyncFactory = AsyncOperationFactory(maybeDataSource: some(asyncDataSource)) + let parentHeader = waitFor(asyncDataSource.fetchBlockHeaderWithHash(header.parentHash)) + com.db.persistHeaderToDbWithoutSetHeadOrScore(parentHeader) + + let vmState = createVmStateForStatelessMode(com, header, body, parentHeader, asyncFactory).get + + let r = processTransactions(vmState, header, @[tx]) + if r.isErr: + error("error statelessly running tx", tx=tx, error=r.error) + else: + let elapsedTime = now() - t0 + let gasUsed = vmState.cumulativeGasUsed + info("finished statelessly running the tx", elapsedTime=elapsedTime, gasUsed=gasUsed) diff --git a/nimbus/transaction/call_common.nim b/nimbus/transaction/call_common.nim index a886d19d2..f872fda96 100644 --- a/nimbus/transaction/call_common.nim +++ b/nimbus/transaction/call_common.nim @@ -14,6 +14,7 @@ import ".."/[vm_types, vm_state, vm_computation, vm_state_transactions], ".."/[vm_internals, vm_precompiles, vm_gas_costs], ".."/[db/accounts_cache], + ../evm/async/operations, ../common/evmforks, ./host_types @@ -282,6 +283,10 @@ proc runComputation*(call: CallParams): CallResult # FIXME-duplicatedForAsync proc asyncRunComputation*(call: CallParams): Future[CallResult] {.async.} = + # This has to come before the newComputation call inside setupHost. + if not call.isCreate: + await ifNecessaryGetCodeForAccounts(call.vmState, @[call.to.toEvmc.fromEvmc]) + let host = setupHost(call) prepareToRunComputation(host, call) diff --git a/nimbus/vm_state.nim b/nimbus/vm_state.nim index b88014db4..c0f2e29c4 100644 --- a/nimbus/vm_state.nim +++ b/nimbus/vm_state.nim @@ -32,6 +32,7 @@ export vms.getAndClearLogEntries, vms.getTracingResult, vms.init, + vms.statelessInit, vms.mutateStateDB, vms.new, vms.reinit, diff --git a/vendor/nim-eth b/vendor/nim-eth index 9e89f0dcc..ffc3ef3a7 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 9e89f0dccc54e4c8a670d073175de720af3423dc +Subproject commit ffc3ef3a7cc622166ce578e9ce0346fc0916bfe3