nimbus-eth1/nimbus/evm/interpreter_dispatch.nim

282 lines
8.5 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
std/[macros, strformat],
pkg/[chronicles, chronos, stew/byteutils],
".."/[constants, db/ledger],
"."/[code_stream, computation, evm_errors],
2022-12-02 04:35:41 +00:00
"."/[message, precompiles, state, types],
./interpreter/op_dispatcher
logScope:
topics = "vm opcode"
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc runVM(
c: VmCpt,
shouldPrepareTracer: bool,
fork: static EVMFork,
tracingEnabled: static bool,
): EvmResultVoid =
## VM instruction handler main loop - for each fork, a distinc version of
## this function is instantiated so that selection of fork-specific
## versions of functions happens only once
Added basic async capabilities for vm2. (#1260) * Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
2022-11-01 15:35:46 +00:00
# It's important not to re-prepare the tracer after
# an async operation, only after a call/create.
#
# That is, tracingEnabled is checked in many places, and
# indicates something like, "Do we want tracing to be
# enabled?", whereas shouldPrepareTracer is more like,
# "Are we at a spot right now where we want to re-initialize
# the tracer?"
when tracingEnabled:
if shouldPrepareTracer:
c.prepareTracer()
while true:
{.computedGoto.}
c.instr = c.code.next()
dispatchInstr(fork, tracingEnabled, c.instr, c)
ok()
macro selectVM(v: VmCpt, shouldPrepareTracer: bool, fork: EVMFork, tracingEnabled: bool): EvmResultVoid =
# Generate opcode dispatcher that calls selectVM with a literal for each fork:
#
# case fork
# of A: runVM(v, A, ...)
# ...
let caseStmt = nnkCaseStmt.newTree(fork)
for fork in EVMFork:
let
forkVal = quote:
`fork`
call = quote:
case `tracingEnabled`
of false: runVM(`v`, `shouldPrepareTracer`, `forkVal`, `false`)
of true: runVM(`v`, `shouldPrepareTracer`, `forkVal`, `true`)
caseStmt.add nnkOfBranch.newTree(forkVal, call)
caseStmt
proc beforeExecCall(c: Computation) =
c.snapshot()
if c.msg.kind == EVMC_CALL:
2022-04-08 04:54:11 +00:00
c.vmState.mutateStateDB:
db.subBalance(c.msg.sender, c.msg.value)
db.addBalance(c.msg.contractAddress, c.msg.value)
proc afterExecCall(c: Computation) =
## Collect all of the accounts that *may* need to be deleted based on EIP161
## https://github.com/ethereum/EIPs/blob/master/EIPS/eip-161.md
## also see: https://github.com/ethereum/EIPs/issues/716
2022-04-08 04:54:11 +00:00
if c.isError or c.fork >= FkByzantium:
if c.msg.contractAddress == RIPEMD_ADDR:
# Special case to account for geth+parity bug
c.vmState.stateDB.ripemdSpecial()
if c.isSuccess:
c.commit()
else:
c.rollback()
proc beforeExecCreate(c: Computation): bool =
c.vmState.mutateStateDB:
let nonce = db.getNonce(c.msg.sender)
if nonce + 1 < nonce:
let sender = c.msg.sender.toHex
c.setError(
"Nonce overflow when sender=" & sender & " wants to create contract", false
)
return true
db.setNonce(c.msg.sender, nonce + 1)
# We add this to the access list _before_ taking a snapshot.
# Even if the creation fails, the access-list change should not be rolled
# back EIP2929
if c.fork >= FkBerlin:
db.accessList(c.msg.contractAddress)
c.snapshot()
if c.vmState.readOnlyStateDB().contractCollision(c.msg.contractAddress):
let blurb = c.msg.contractAddress.toHex
c.setError("Address collision when creating contract address=" & blurb, true)
c.rollback()
return true
2022-04-08 04:54:11 +00:00
c.vmState.mutateStateDB:
db.subBalance(c.msg.sender, c.msg.value)
db.addBalance(c.msg.contractAddress, c.msg.value)
db.clearStorage(c.msg.contractAddress)
if c.fork >= FkSpurious:
# EIP161 nonce incrementation
db.incNonce(c.msg.contractAddress)
return false
proc afterExecCreate(c: Computation) =
if c.isSuccess:
EVM: `writeContract` fixes, never return contract code as `RETURNDATA` This fixes #867 "EIP-170 related consensus error at Goerli block 5080941", and equivalent on other networks. This combines a change on the EVM-caller side with an EVM-side change from @jangko 6548ff98 "fixes CREATE/CREATE2's `returndata` bug", making the caller EVM ignore any data except from `REVERT`. Either change works by itself. The reason for both is to ensure we definitely comply with ambiguous EVMC expectations from either side of that boundary, and it makes the internal API clearer. As well as fixing a specific consensus issue, there are some other EVM logic changes too: Refactored `writeContract`, how `RETURNDATA` is handled inside the EVM, and changed behaviour with quirks before EIP-2 (Homestead). The fix allows sync to pass block 5080941 on Goerli, and probably equivalent on other networks. Here's a trace at batch 5080897..5081088: ``` TRC 2021-10-01 21:18:12.883+01:00 Persisting blocks file=persist_blocks.nim:43 fromBlock=5080897 toBlock=5081088 ... DBG 2021-10-01 21:18:13.270+01:00 Contract code size exceeds EIP170 topics="vm computation" file=computation.nim:236 limit=24577 actual=31411 DBG 2021-10-01 21:18:13.271+01:00 gasUsed neq cumulativeGasUsed file=process_block.nim:68 block=5080941/0A3537BC5BDFC637349E1C77D9648F2F65E2BF973ABF7956618F854B769DF626 gasUsed=3129669 cumulativeGasUsed=3132615 TRC 2021-10-01 21:18:13.271+01:00 peer disconnected file=blockchain_sync.nim:407 peer=<IP:PORT> ``` Although it says "Contract code size" and "gasUsed", this bug is more general than either contract size or gas. It's due to incorrect behaviour of EVM instructions `RETURNDATA` and `RETURNDATASIZE`. Sometimes when `writeContract` decides to reject writing the contract for any of several reasons (for example just insufficient gas), the unwritten contract code was being used as the "return data", and given to the caller. If the caller used `RETURNDATA` or `RETURNDATASIZE` ops, those incorrectly reported the contract code that didn't get written. EIP-211 (https://eips.ethereum.org/EIPS/eip-211) describes `RETURNDATA`: > "`CREATE` and `CREATE2` are considered to return the empty buffer in the > success case and the failure data in the failure case". The language is ambiguous. In fact "failure case" means when the contract uses `REVERT` to finish. It doesn't mean other failures like out of gas, EIP-170 limit, EIP-3541, etc. To be thorough, and to ensure we always do the right thing with real EVMC when that's finalised, this patch fixes the `RETURNDATA` issue in two places, either of which make Goerli block 5080941 pass. `writeContract` has been refactored to be caller, and so has where it's called. It sets an error in the usual way if contract writing is rejected -- that's anticipating EVMC, where we'll use different error codes later. Overall four behaviour changes: 1. On the callee side, it doesn't set `c.outputData` except for `REVERT`. 2. On the caller side, it doesn't read `child.outputData` except for `REVERT`. 3. There was a bug in processing before Homestead fork (EIP-2). We did not match the spec or other implementations; now we do. When there's insufficient gas, before Homestead it's treated as success but with an empty contract. https://github.com/ethereum/pyethereum/blob/d117c8f3fd93359fc641fd850fa799436f7c43b5/ethereum/processblock.py#L304 https://github.com/ethereum/go-ethereum/blob/401354976bb4/core/vm/instructions.go#L586 4. The Byzantium check has been removed, as it's unnecessary. Signed-off-by: Jamie Lokier <jamie@shareable.org>
2021-12-02 19:44:51 +00:00
# This can change `c.isSuccess`.
c.writeContract()
# Contract code should never be returned to the caller. Only data from
# `REVERT` is returned after a create. Clearing in this branch covers the
# right cases, particularly important with EVMC where it must be cleared.
if c.output.len > 0:
c.output = @[]
if c.isSuccess:
c.commit()
else:
c.rollback()
const MsgKindToOp: array[CallKind, Op] =
[Call, DelegateCall, CallCode, Create, Create2, EofCreate]
func msgToOp(msg: Message): Op =
if EVMC_STATIC in msg.flags:
return StaticCall
MsgKindToOp[msg.kind]
proc beforeExec(c: Computation): bool =
if c.msg.depth > 0:
c.vmState.captureEnter(
c,
msgToOp(c.msg),
c.msg.sender,
c.msg.contractAddress,
c.msg.data,
c.msg.gas,
c.msg.value,
)
if not c.msg.isCreate:
c.beforeExecCall()
false
else:
c.beforeExecCreate()
proc afterExec(c: Computation) =
if not c.msg.isCreate:
c.afterExecCall()
else:
c.afterExecCreate()
if c.msg.depth > 0:
let gasUsed = c.msg.gas - c.gasMeter.gasRemaining
c.vmState.captureExit(c, c.output, gasUsed, c.errorOpt)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
template handleEvmError(x: EvmErrorObj) =
let
msg = $x.code
depth = $(c.msg.depth + 1) # plus one to match tracer depth, and avoid confusion
c.setError("Opcode Dispatch Error: " & msg & ", depth=" & depth, true)
proc executeOpcodes*(c: Computation, shouldPrepareTracer: bool = true) =
let fork = c.fork
block blockOne:
if c.continuation.isNil and c.execPrecompiles(fork):
break blockOne
let cont = c.continuation
if not cont.isNil:
c.continuation = nil
cont().isOkOr:
handleEvmError(error)
break blockOne
let nextCont = c.continuation
if not nextCont.isNil:
# Return up to the caller, which will run the child
# and then call this proc again.
break blockOne
# FIXME-Adam: I hate how convoluted this is. See also the comment in
# op_dispatcher.nim. The idea here is that we need to call
# traceOpCodeEnded at the end of the opcode (and only if there
# hasn't been an exception thrown); otherwise we run into problems
# if an exception (e.g. out of gas) is thrown during a continuation.
# So this code says, "If we've just run a continuation, but there's
# no *subsequent* continuation, then the opcode is done."
if c.tracingEnabled and not (cont.isNil) and nextCont.isNil:
c.traceOpCodeEnded(c.instr, c.opIndex)
if c.instr == Return or c.instr == Revert or c.instr == SelfDestruct:
break blockOne
c.selectVM(shouldPrepareTracer, fork, c.tracingEnabled).isOkOr:
handleEvmError(error)
break blockOne # this break is not needed but make the flow clear
if c.isError() and c.continuation.isNil:
if c.tracingEnabled:
c.traceError()
when vm_use_recursion:
# Recursion with tiny stack frame per level.
proc execCallOrCreate*(c: Computation) =
if not c.beforeExec():
c.executeOpcodes()
while not c.continuation.isNil:
# If there's a continuation, then it's because there's either
# a child (i.e. call or create)
when evmc_enabled:
c.res = c.host.call(c.child[])
else:
execCallOrCreate(c.child)
c.child = nil
c.executeOpcodes()
c.afterExec()
c.dispose()
else:
proc execCallOrCreate*(cParam: Computation) =
Added basic async capabilities for vm2. (#1260) * Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
2022-11-01 15:35:46 +00:00
var (c, before, shouldPrepareTracer) = (cParam, true, true)
# No actual recursion, but simulate recursion including before/after/dispose.
while true:
while true:
if before and c.beforeExec():
break
Added basic async capabilities for vm2. (#1260) * Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
2022-11-01 15:35:46 +00:00
c.executeOpcodes(shouldPrepareTracer)
if c.continuation.isNil:
c.afterExec()
break
(before, shouldPrepareTracer, c.child, c, c.parent) =
(true, true, nil.Computation, c.child, c)
if c.parent.isNil:
break
c.dispose()
(before, shouldPrepareTracer, c.parent, c) =
(false, true, nil.Computation, c.parent)
Added basic async capabilities for vm2. (#1260) * Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
2022-11-01 15:35:46 +00:00
while not c.isNil:
c.dispose()
c = c.parent
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------