nimbus-eth1/nimbus/evm/types.nim

205 lines
6.1 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
2022-12-02 04:35:41 +00:00
chronos,
Added basic async capabilities for vm2. (#1260) * Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
2022-11-01 15:35:46 +00:00
json_rpc/rpcclient,
2022-12-02 04:35:41 +00:00
"."/[stack, memory, code_stream],
./interpreter/[gas_costs, op_codes],
../db/ledger,
2022-12-02 04:35:41 +00:00
../common/[common, evmforks]
# this import not guarded by `when defined(evmc_enabled)`
# because we want to use evmc types such as evmc_call_kind
# and evmc_flags
import
evmc/evmc
export evmc
{.push raises: [].}
when defined(evmc_enabled):
import
./evmc_api
# Select between small-stack recursion and no recursion. Both are good, fast,
# low resource using methods. Keep both here because true EVMC API requires
# the small-stack method, but Chronos `async` is better without recursion.
const vm_use_recursion* = defined(evmc_enabled)
type
VMFlag* = enum
ExecutionOK
GenerateWitness
ClearCache
BlockContext* = object
timestamp* : EthTime
gasLimit* : GasInt
fee* : Option[UInt256]
prevRandao* : Hash256
difficulty* : UInt256
coinbase* : EthAddress
excessBlobGas* : uint64
TxContext* = object
origin* : EthAddress
gasPrice* : GasInt
versionedHashes*: VersionedHashes
blobBaseFee* : UInt256
BaseVMState* = ref object of RootObj
com* : CommonRef
stateDB* : LedgerRef
gasPool* : GasInt
parent* : BlockHeader
blockCtx* : BlockContext
txCtx* : TxContext
flags* : set[VMFlag]
fork* : EVMFork
tracer* : TracerRef
receipts* : seq[Receipt]
cumulativeGasUsed*: GasInt
gasCosts* : GasCosts
Computation* = ref object
# The execution computation
vmState*: BaseVMState
msg*: Message
memory*: Memory
stack*: Stack
returnStack*: seq[int]
gasMeter*: GasMeter
code*: CodeStream
output*: seq[byte]
returnData*: seq[byte]
error*: Error
savePoint*: LedgerSpRef
instr*: Op
opIndex*: int
when defined(evmc_enabled):
host*: HostContext
child*: ref nimbus_message
res*: nimbus_result
else:
parent*, child*: Computation
Added basic async capabilities for vm2. (#1260) * Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
2022-11-01 15:35:46 +00:00
pendingAsyncOperation*: Future[void]
continuation*: proc() {.gcsafe, raises: [CatchableError].}
sysCall*: bool
Error* = ref object
evmcStatus*: evmc_status_code
info* : string
burnsGas* : bool
GasMeter* = object
gasRefunded*: GasInt
gasRemaining*: GasInt
CallKind* = evmc_call_kind
MsgFlags* = evmc_flags
Message* = ref object
kind*: CallKind
depth*: int
gas*: GasInt
sender*: EthAddress
contractAddress*: EthAddress
codeAddress*: EthAddress
value*: UInt256
data*: seq[byte]
flags*: MsgFlags
TracerFlags* {.pure.} = enum
DisableStorage
DisableMemory
DisableStack
DisableState
DisableStateDiff
EnableAccount
DisableReturnData
StructLog* = object
pc* : int
op* : Op
gas* : GasInt
gasCost* : GasInt
memory* : seq[byte]
memSize* : int
stack* : seq[UInt256]
returnData* : seq[byte]
storage* : Table[UInt256, UInt256]
depth* : int
refund* : GasInt
opName* : string
error* : string
TracerRef* = ref object of RootObj
flags*: set[TracerFlags]
# Transaction level
# This is called once fo each transaction
method captureTxStart*(ctx: TracerRef, gasLimit: GasInt) {.base, gcsafe.} =
discard
method captureTxEnd*(ctx: TracerRef, restGas: GasInt) {.base, gcsafe.} =
discard
# Top call frame
method captureStart*(ctx: TracerRef, comp: Computation,
sender: EthAddress, to: EthAddress,
create: bool, input: openArray[byte],
gasLimit: GasInt, value: UInt256) {.base, gcsafe.} =
discard
method captureEnd*(ctx: TracerRef, comp: Computation, output: openArray[byte],
gasUsed: GasInt, error: Option[string]) {.base, gcsafe.} =
discard
# Rest of call frames
method captureEnter*(ctx: TracerRef, comp: Computation, op: Op,
sender: EthAddress, to: EthAddress,
input: openArray[byte], gasLimit: GasInt,
value: UInt256) {.base, gcsafe.} =
discard
method captureExit*(ctx: TracerRef, comp: Computation, output: openArray[byte],
gasUsed: GasInt, error: Option[string]) {.base, gcsafe.} =
discard
# Opcode level
method captureOpStart*(ctx: TracerRef, comp: Computation,
fixed: bool, pc: int, op: Op, gas: GasInt,
depth: int): int {.base, gcsafe.} =
discard
method captureGasCost*(ctx: TracerRef, comp: Computation,
fixed: bool, op: Op, gasCost: GasInt,
gasRemaining: GasInt, depth: int) {.base, gcsafe.} =
discard
method captureOpEnd*(ctx: TracerRef, comp: Computation,
fixed: bool, pc: int, op: Op, gas: GasInt, refund: GasInt,
rData: openArray[byte],
depth: int, opIndex: int) {.base, gcsafe.} =
discard
method captureFault*(ctx: TracerRef, comp: Computation,
fixed: bool, pc: int, op: Op, gas: GasInt, refund: GasInt,
rData: openArray[byte],
depth: int, error: Option[string]) {.base, gcsafe.} =
discard
# Called at the start of EVM interpreter loop
method capturePrepare*(ctx: TracerRef, comp: Computation, depth: int) {.base, gcsafe.} =
discard