Added basic async capabilities for vm2. (#1260)
* Added basic async capabilities for vm2. This is a whole new Git branch, not the same one as last time (https://github.com/status-im/nimbus-eth1/pull/1250) - there wasn't much worth salvaging. Main differences: I didn't do the "each opcode has to specify an async handler" junk that I put in last time. Instead, in oph_memory.nim you can see sloadOp calling asyncChainTo and passing in an async operation. That async operation is then run by the execCallOrCreate (or asyncExecCallOrCreate) code in interpreter_dispatch.nim. In the test code, the (previously existing) macro called "assembler" now allows you to add a section called "initialStorage", specifying fake data to be used by the EVM computation run by that test. (In the long run we'll obviously want to write tests that for-real use the JSON-RPC API to asynchronously fetch data; for now, this was just an expedient way to write a basic unit test that exercises the async-EVM code pathway.) There's also a new macro called "concurrentAssemblers" that allows you to write a test that runs multiple assemblers concurrently (and then waits for them all to finish). There's one example test using this, in test_op_memory_lazy.nim, though you can't actually see it doing so unless you uncomment some echo statements in async_operations.nim (in which case you can see the two concurrently running EVM computations each printing out what they're doing, and you'll see that they interleave). A question: is it possible to make EVMC work asynchronously? (For now, this code compiles and "make test" passes even if ENABLE_EVMC is turned on, but it doesn't actually work asynchronously, it just falls back on doing the usual synchronous EVMC thing. See FIXME-asyncAndEvmc.) * Moved the AsyncOperationFactory to the BaseVMState object. * Made the AsyncOperationFactory into a table of fn pointers. Also ditched the plain-data Vm2AsyncOperation type; it wasn't really serving much purpose. Instead, the pendingAsyncOperation field directly contains the Future. * Removed the hasStorage idea. It's not the right solution to the "how do we know whether we still need to fetch the storage value or not?" problem. I haven't implemented the right solution yet, but at least we're better off not putting in a wrong one. * Added/modified/removed some comments. (Based on feedback on the PR.) * Removed the waitFor from execCallOrCreate. There was some back-and-forth in the PR regarding whether nested waitFor calls are acceptable: https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r998587449 The eventual decision was to just change the waitFor to a doAssert (since we probably won't want this extra functionality when running synchronously anyway) to make sure that the Future is already finished.
This commit is contained in:
parent
a689e9185a
commit
e040e2671a
4
Makefile
4
Makefile
|
@ -175,6 +175,10 @@ test: | build deps
|
|||
$(ENV_SCRIPT) nim test_rocksdb $(NIM_PARAMS) nimbus.nims
|
||||
$(ENV_SCRIPT) nim test $(NIM_PARAMS) nimbus.nims
|
||||
|
||||
# builds and runs an EVM-related subset of the nimbus test suite
|
||||
test-evm: | build deps
|
||||
$(ENV_SCRIPT) nim test_evm $(NIM_PARAMS) nimbus.nims
|
||||
|
||||
# Primitive reproducibility test.
|
||||
#
|
||||
# On some platforms, with some GCC versions, it may not be possible to get a
|
||||
|
|
|
@ -66,6 +66,9 @@ task test, "Run tests":
|
|||
task test_rocksdb, "Run rocksdb tests":
|
||||
test "tests/db", "test_kvstore_rocksdb", "-d:chronicles_log_level=ERROR -d:unittest2DisableParamFiltering"
|
||||
|
||||
task test_evm, "Run EVM tests":
|
||||
test "tests", "evm_tests", "-d:chronicles_log_level=ERROR -d:unittest2DisableParamFiltering"
|
||||
|
||||
## Fluffy tasks
|
||||
|
||||
task fluffy, "Build fluffy":
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
import
|
||||
eth/common/eth_types, stint, options, stew/ranges/ptr_arith,
|
||||
chronos,
|
||||
".."/[vm_types, vm_state, vm_computation, vm_state_transactions],
|
||||
".."/[vm_internals, vm_precompiles, vm_gas_costs],
|
||||
".."/[db/accounts_cache, forks],
|
||||
|
@ -160,6 +161,7 @@ proc setupHost(call: CallParams): TransactionHost =
|
|||
|
||||
let cMsg = hostToComputationMessage(host.msg)
|
||||
host.computation = newComputation(vmState, cMsg, code)
|
||||
|
||||
shallowCopy(host.code, code)
|
||||
|
||||
else:
|
||||
|
@ -197,10 +199,12 @@ when defined(evmc_enabled):
|
|||
{.gcsafe.}:
|
||||
callResult.release(callResult)
|
||||
|
||||
proc runComputation*(call: CallParams): CallResult =
|
||||
let host = setupHost(call)
|
||||
let c = host.computation
|
||||
# FIXME-awkwardFactoring: the factoring out of the pre and
|
||||
# post parts feels awkward to me, but for now I'd really like
|
||||
# not to have too much duplicated code between sync and async.
|
||||
# --Adam
|
||||
|
||||
proc prepareToRunComputation(host: TransactionHost, call: CallParams) =
|
||||
# Must come after `setupHost` for correct fork.
|
||||
if not call.noAccessList:
|
||||
initialAccessListEIP2929(call)
|
||||
|
@ -210,11 +214,9 @@ proc runComputation*(call: CallParams): CallResult =
|
|||
host.vmState.mutateStateDB:
|
||||
db.subBalance(call.sender, call.gasLimit.u256 * call.gasPrice.u256)
|
||||
|
||||
when defined(evmc_enabled):
|
||||
doExecEvmc(host, call)
|
||||
else:
|
||||
execComputation(host.computation)
|
||||
|
||||
proc calculateAndPossiblyRefundGas(host: TransactionHost, call: CallParams): GasInt =
|
||||
let c = host.computation
|
||||
|
||||
# EIP-3529: Reduction in refunds
|
||||
let MaxRefundQuotient = if host.vmState.fork >= FkLondon:
|
||||
5.GasInt
|
||||
|
@ -222,19 +224,23 @@ proc runComputation*(call: CallParams): CallResult =
|
|||
2.GasInt
|
||||
|
||||
# Calculated gas used, taking into account refund rules.
|
||||
var gasRemaining: GasInt = 0
|
||||
if call.noRefund:
|
||||
gasRemaining = c.gasMeter.gasRemaining
|
||||
result = c.gasMeter.gasRemaining
|
||||
elif not c.shouldBurnGas:
|
||||
let maxRefund = (call.gasLimit - c.gasMeter.gasRemaining) div MaxRefundQuotient
|
||||
let refund = min(c.getGasRefund(), maxRefund)
|
||||
c.gasMeter.returnGas(refund)
|
||||
gasRemaining = c.gasMeter.gasRemaining
|
||||
result = c.gasMeter.gasRemaining
|
||||
|
||||
# Refund for unused gas.
|
||||
if gasRemaining > 0 and not call.noGasCharge:
|
||||
if result > 0 and not call.noGasCharge:
|
||||
host.vmState.mutateStateDB:
|
||||
db.addBalance(call.sender, gasRemaining.u256 * call.gasPrice.u256)
|
||||
db.addBalance(call.sender, result.u256 * call.gasPrice.u256)
|
||||
|
||||
proc finishRunningComputation(host: TransactionHost, call: CallParams): CallResult =
|
||||
let c = host.computation
|
||||
|
||||
let gasRemaining = calculateAndPossiblyRefundGas(host, call)
|
||||
|
||||
result.isError = c.isError
|
||||
result.gasUsed = call.gasLimit - gasRemaining
|
||||
|
@ -244,3 +250,27 @@ proc runComputation*(call: CallParams): CallResult =
|
|||
shallowCopy(result.logEntries, c.logEntries)
|
||||
result.stack = c.stack
|
||||
result.memory = c.memory
|
||||
|
||||
proc runComputation*(call: CallParams): CallResult =
|
||||
let host = setupHost(call)
|
||||
prepareToRunComputation(host, call)
|
||||
|
||||
when defined(evmc_enabled):
|
||||
doExecEvmc(host, call)
|
||||
else:
|
||||
execComputation(host.computation)
|
||||
|
||||
finishRunningComputation(host, call)
|
||||
|
||||
# FIXME-duplicatedForAsync
|
||||
proc asyncRunComputation*(call: CallParams): Future[CallResult] {.async.} =
|
||||
let host = setupHost(call)
|
||||
prepareToRunComputation(host, call)
|
||||
|
||||
# FIXME-asyncAndEvmc: I'm not sure what to do with EVMC at the moment.
|
||||
# when defined(evmc_enabled):
|
||||
# doExecEvmc(host, call)
|
||||
# else:
|
||||
await asyncExecComputation(host.computation)
|
||||
|
||||
return finishRunningComputation(host, call)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
import
|
||||
std/[options, times],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types_rlp, trie/db],
|
||||
stint,
|
||||
".."/[vm_types, vm_state, vm_gas_costs, forks, constants],
|
||||
|
@ -177,8 +178,10 @@ proc rpcEstimateGas*(cd: RpcCallData, header: BlockHeader, chainDB: BaseChainDB,
|
|||
|
||||
hi
|
||||
|
||||
proc txCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): GasInt =
|
||||
var call = CallParams(
|
||||
proc callParamsForTx(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): CallParams =
|
||||
# Is there a nice idiom for this kind of thing? Should I
|
||||
# just be writing this as a bunch of assignment statements?
|
||||
result = CallParams(
|
||||
vmState: vmState,
|
||||
forkOverride: some(fork),
|
||||
gasPrice: tx.gasPrice,
|
||||
|
@ -190,11 +193,10 @@ proc txCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork:
|
|||
input: tx.payload
|
||||
)
|
||||
if tx.txType > TxLegacy:
|
||||
shallowCopy(call.accessList, tx.accessList)
|
||||
return runComputation(call).gasUsed
|
||||
shallowCopy(result.accessList, tx.accessList)
|
||||
|
||||
proc testCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): CallResult =
|
||||
var call = CallParams(
|
||||
proc callParamsForTest(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): CallParams =
|
||||
result = CallParams(
|
||||
vmState: vmState,
|
||||
forkOverride: some(fork),
|
||||
gasPrice: tx.gasPrice,
|
||||
|
@ -209,5 +211,17 @@ proc testCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, for
|
|||
noRefund: true, # Don't apply gas refund/burn rule.
|
||||
)
|
||||
if tx.txType > TxLegacy:
|
||||
shallowCopy(call.accessList, tx.accessList)
|
||||
shallowCopy(result.accessList, tx.accessList)
|
||||
|
||||
proc txCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): GasInt =
|
||||
let call = callParamsForTx(tx, sender, vmState, fork)
|
||||
return runComputation(call).gasUsed
|
||||
|
||||
proc testCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): CallResult =
|
||||
let call = callParamsForTest(tx, sender, vmState, fork)
|
||||
runComputation(call)
|
||||
|
||||
# FIXME-duplicatedForAsync
|
||||
proc asyncTestCallEvm*(tx: Transaction, sender: EthAddress, vmState: BaseVMState, fork: Fork): Future[CallResult] {.async.} =
|
||||
let call = callParamsForTest(tx, sender, vmState, fork)
|
||||
return await asyncRunComputation(call)
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
chronos,
|
||||
stint,
|
||||
json_rpc/rpcclient,
|
||||
web3,
|
||||
./computation,
|
||||
./state,
|
||||
./types,
|
||||
../db/accounts_cache
|
||||
|
||||
|
||||
|
||||
# Used in synchronous mode.
|
||||
proc noLazyDataSource*(): LazyDataSource =
|
||||
LazyDataSource(
|
||||
ifNecessaryGetStorage: (proc(c: Computation, slot: UInt256): Future[void] {.async.} =
|
||||
discard
|
||||
)
|
||||
)
|
||||
|
||||
# Will be used in asynchronous on-demand-data-fetching mode, once
|
||||
# that is implemented.
|
||||
proc realLazyDataSource*(client: RpcClient): LazyDataSource =
|
||||
LazyDataSource(
|
||||
ifNecessaryGetStorage: (proc(c: Computation, slot: UInt256): Future[void] {.async.} =
|
||||
# TODO: find some way to check whether we already have it.
|
||||
# This is WRONG, but good enough for now, considering this
|
||||
# code is unused except in a few tests. I'm working on
|
||||
# doing this properly.
|
||||
if not c.getStorage(slot).isZero: return
|
||||
|
||||
# FIXME-onDemandStorageNotImplementedYet
|
||||
# (I sketched in this code, but haven't actually tried running it yet.)
|
||||
echo("Attempting to for-real fetch slot " & $(slot))
|
||||
# ethAddressStr("0xfff33a3bd36abdbd412707b8e310d6011454a7ae")
|
||||
# check hexDataStr(0.u256).string == res.string
|
||||
let ethAddress = c.msg.contractAddress
|
||||
let address: Address = Address(ethAddress)
|
||||
let quantity: int = slot.truncate(int) # this is probably wrong; what's the right way to convert this?
|
||||
let blockId: BlockIdentifier = blockId(c.vmState.parent.blockNumber.truncate(uint64)) # ditto
|
||||
let res = await client.eth_getStorageAt(address, quantity, blockId)
|
||||
echo("Fetched slot " & $(slot) & ", result is " & $(res))
|
||||
let v = res # will res be the actual value, or do I need to convert or something?
|
||||
|
||||
# Before implementing this, see the note from Zahary here:
|
||||
# https://github.com/status-im/nimbus-eth1/pull/1260#discussion_r999669139
|
||||
#
|
||||
# c.vmState.mutateStateDB:
|
||||
# db.setStorage(c.msg.contractAddress, slot, UInt256.fromBytesBE(v))
|
||||
)
|
||||
)
|
||||
|
||||
# Used for unit testing. Contains some prepopulated data.
|
||||
proc fakeLazyDataSource*(fakePairs: seq[tuple[key, val: array[32, byte]]]): LazyDataSource =
|
||||
LazyDataSource(
|
||||
ifNecessaryGetStorage: (proc(c: Computation, slot: UInt256): Future[void] {.async.} =
|
||||
# See the comment above.
|
||||
if not c.getStorage(slot).isZero: return
|
||||
|
||||
# FIXME-writeAutomatedTestsToShowThatItCanRunConcurrently
|
||||
|
||||
# For now, until I've implemented some more automated way to
|
||||
# capture and verify the fact that this can run concurrently,
|
||||
# this is useful just to see in the console that the echo
|
||||
# statements from multiple Computations can run at the same
|
||||
# time and be interleaved.
|
||||
# echo("Attempting to fake-fetch slot " & $(slot))
|
||||
# await sleepAsync(2.seconds)
|
||||
|
||||
let slotBytes = toBytesBE(slot)
|
||||
# The linear search is obviously slow, but doesn't matter
|
||||
# for tests with only a few initialStorage entries. Fix
|
||||
# this if we ever want to write tests with more.
|
||||
for (k, v) in fakePairs:
|
||||
if slotBytes == k:
|
||||
c.vmState.mutateStateDB:
|
||||
db.setStorage(c.msg.contractAddress, slot, UInt256.fromBytesBE(v))
|
||||
break
|
||||
|
||||
# echo("Finished fake-fetch of slot " & $(slot))
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
# Gotta find the place where we're creating a Computation without setting
|
||||
# its asyncFactory in the first place, but this is fine for now.
|
||||
proc asyncFactory*(c: Computation): AsyncOperationFactory =
|
||||
# Does Nim have an "ifNil" macro/template?
|
||||
if isNil(c.vmState.asyncFactory):
|
||||
AsyncOperationFactory(lazyDataSource: noLazyDataSource()) # AARDVARK - can I make a singleton?
|
||||
else:
|
||||
c.vmState.asyncFactory
|
|
@ -21,6 +21,7 @@ import
|
|||
./transaction_tracer,
|
||||
./types,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, keys],
|
||||
options,
|
||||
sets
|
||||
|
@ -304,6 +305,13 @@ template chainTo*(c: Computation, toChild: typeof(c.child), after: untyped) =
|
|||
c.continuation = nil
|
||||
after
|
||||
|
||||
# Register an async operation to be performed before the continuation is called.
|
||||
template asyncChainTo*(c: Computation, asyncOperation: Future[void], after: untyped) =
|
||||
c.pendingAsyncOperation = asyncOperation
|
||||
c.continuation = proc() =
|
||||
c.continuation = nil
|
||||
after
|
||||
|
||||
proc merge*(c, child: Computation) =
|
||||
c.logEntries.add child.logEntries
|
||||
c.gasMeter.refundGas(child.gasMeter.gasRefunded)
|
||||
|
|
|
@ -98,7 +98,7 @@ proc toCaseStmt(forkArg, opArg, k: NimNode): NimNode =
|
|||
# Wrap innner case/switch into outer case/switch
|
||||
let branchStmt = block:
|
||||
case op
|
||||
of Create, Create2, Call, CallCode, DelegateCall, StaticCall:
|
||||
of Create, Create2, Call, CallCode, DelegateCall, StaticCall, Sload:
|
||||
quote do:
|
||||
`forkCaseSubExpr`
|
||||
if not `k`.cpt.continuation.isNil:
|
||||
|
@ -135,7 +135,7 @@ template genLowMemDispatcher*(fork: Fork; op: Op; k: Vm2Ctx) =
|
|||
handleOtherDirective(fork, op, k)
|
||||
|
||||
case c.instr
|
||||
of Create, Create2, Call, CallCode, DelegateCall, StaticCall:
|
||||
of Create, Create2, Call, CallCode, DelegateCall, StaticCall, Sload:
|
||||
if not k.cpt.continuation.isNil:
|
||||
break
|
||||
of Return, Revert, SelfDestruct:
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
import
|
||||
../../../errors,
|
||||
../../async_operations,
|
||||
../../code_stream,
|
||||
../../computation,
|
||||
../../memory,
|
||||
|
@ -163,10 +164,11 @@ const
|
|||
|
||||
sloadOp: Vm2OpFn = proc (k: var Vm2Ctx) =
|
||||
## 0x54, Load word from storage.
|
||||
let (slot) = k.cpt.stack.popInt(1)
|
||||
k.cpt.stack.push:
|
||||
k.cpt.getStorage(slot)
|
||||
|
||||
let cpt = k.cpt # so it can safely be captured by the asyncChainTo closure below
|
||||
let (slot) = cpt.stack.popInt(1)
|
||||
cpt.asyncChainTo(asyncFactory(cpt).lazyDataSource.ifNecessaryGetStorage(cpt, slot)):
|
||||
cpt.stack.push:
|
||||
cpt.getStorage(slot)
|
||||
|
||||
sloadEIP2929Op: Vm2OpFn = proc (k: var Vm2Ctx) =
|
||||
## 0x54, EIP2929: Load word from storage for Berlin and later
|
||||
|
|
|
@ -25,6 +25,7 @@ import
|
|||
./state,
|
||||
./types,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, keys],
|
||||
macros,
|
||||
options,
|
||||
|
@ -45,12 +46,20 @@ const
|
|||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc selectVM(c: Computation, fork: Fork) {.gcsafe.} =
|
||||
proc selectVM(c: Computation, fork: Fork, shouldPrepareTracer: bool) {.gcsafe.} =
|
||||
## Op code execution handler main loop.
|
||||
var desc: Vm2Ctx
|
||||
desc.cpt = c
|
||||
|
||||
if c.tracingEnabled:
|
||||
# It's important not to re-prepare the tracer after
|
||||
# an async operation, only after a call/create.
|
||||
#
|
||||
# That is, tracingEnabled is checked in many places, and
|
||||
# indicates something like, "Do we want tracing to be
|
||||
# enabled?", whereas shouldPrepareTracer is more like,
|
||||
# "Are we at a spot right now where we want to re-initialize
|
||||
# the tracer?"
|
||||
if c.tracingEnabled and shouldPrepareTracer:
|
||||
c.prepareTracer()
|
||||
|
||||
while true:
|
||||
|
@ -195,7 +204,7 @@ proc afterExec(c: Computation) =
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc executeOpcodes*(c: Computation) =
|
||||
proc executeOpcodes*(c: Computation, shouldPrepareTracer: bool = true) =
|
||||
let fork = c.fork
|
||||
|
||||
block:
|
||||
|
@ -205,7 +214,7 @@ proc executeOpcodes*(c: Computation) =
|
|||
try:
|
||||
if not c.continuation.isNil:
|
||||
(c.continuation)()
|
||||
c.selectVM(fork)
|
||||
c.selectVM(fork, shouldPrepareTracer)
|
||||
except CatchableError as e:
|
||||
c.setError(
|
||||
&"Opcode Dispatch Error msg={e.msg}, depth={c.msg.depth}", true)
|
||||
|
@ -222,17 +231,25 @@ when vm_use_recursion:
|
|||
return
|
||||
c.executeOpcodes()
|
||||
while not c.continuation.isNil:
|
||||
when evmc_enabled:
|
||||
c.res = c.host.call(c.child[])
|
||||
# If there's a continuation, then it's because there's either
|
||||
# a child (i.e. call or create) or a pendingAsyncOperation.
|
||||
if not c.pendingAsyncOperation.isNil:
|
||||
let p = c.pendingAsyncOperation
|
||||
c.pendingAsyncOperation = nil
|
||||
doAssert(p.finished(), "In synchronous mode, every async operation should be an already-resolved Future.")
|
||||
c.executeOpcodes(false)
|
||||
else:
|
||||
execCallOrCreate(c.child)
|
||||
c.child = nil
|
||||
c.executeOpcodes()
|
||||
when evmc_enabled:
|
||||
c.res = c.host.call(c.child[])
|
||||
else:
|
||||
execCallOrCreate(c.child)
|
||||
c.child = nil
|
||||
c.executeOpcodes()
|
||||
c.afterExec()
|
||||
|
||||
else:
|
||||
proc execCallOrCreate*(cParam: Computation) =
|
||||
var (c, before) = (cParam, true)
|
||||
var (c, before, shouldPrepareTracer) = (cParam, true, true)
|
||||
defer:
|
||||
while not c.isNil:
|
||||
c.dispose()
|
||||
|
@ -243,15 +260,60 @@ else:
|
|||
while true:
|
||||
if before and c.beforeExec():
|
||||
break
|
||||
c.executeOpcodes()
|
||||
c.executeOpcodes(shouldPrepareTracer)
|
||||
if c.continuation.isNil:
|
||||
c.afterExec()
|
||||
break
|
||||
(before, c.child, c, c.parent) = (true, nil.Computation, c.child, c)
|
||||
if not c.pendingAsyncOperation.isNil:
|
||||
before = false
|
||||
shouldPrepareTracer = false
|
||||
let p = c.pendingAsyncOperation
|
||||
c.pendingAsyncOperation = nil
|
||||
doAssert(p.finished(), "In synchronous mode, every async operation should be an already-resolved Future.")
|
||||
else:
|
||||
(before, shouldPrepareTracer, c.child, c, c.parent) = (true, true, nil.Computation, c.child, c)
|
||||
if c.parent.isNil:
|
||||
break
|
||||
c.dispose()
|
||||
(before, c.parent, c) = (false, nil.Computation, c.parent)
|
||||
(before, shouldPrepareTracer, c.parent, c) = (false, true, nil.Computation, c.parent)
|
||||
|
||||
# FIXME-duplicatedForAsync
|
||||
#
|
||||
# In the long run I'd like to make some clever macro/template to
|
||||
# eliminate the duplication between the synchronous and
|
||||
# asynchronous versions. But for now let's stick with this for
|
||||
# simplicity.
|
||||
#
|
||||
# Also, I've based this on the recursive one (above), which I think
|
||||
# is okay because the "async" pragma is going to rewrite this whole
|
||||
# thing to use callbacks anyway. But maybe I'm wrong? It isn't hard
|
||||
# to write the async version of the iterative one, but this one is
|
||||
# a bit shorter and feels cleaner, so if it works just as well I'd
|
||||
# rather use this one. --Adam
|
||||
proc asyncExecCallOrCreate*(c: Computation): Future[void] {.async.} =
|
||||
defer: c.dispose()
|
||||
if c.beforeExec():
|
||||
return
|
||||
c.executeOpcodes()
|
||||
while not c.continuation.isNil:
|
||||
# If there's a continuation, then it's because there's either
|
||||
# a child (i.e. call or create) or a pendingAsyncOperation.
|
||||
if not c.pendingAsyncOperation.isNil:
|
||||
let p = c.pendingAsyncOperation
|
||||
c.pendingAsyncOperation = nil
|
||||
await p
|
||||
c.executeOpcodes(false)
|
||||
else:
|
||||
when evmc_enabled:
|
||||
# FIXME-asyncAndEvmc
|
||||
# Note that this is NOT async. I'm not sure how/whether I
|
||||
# can do EVMC asynchronously.
|
||||
c.res = c.host.call(c.child[])
|
||||
else:
|
||||
await asyncExecCallOrCreate(c.child)
|
||||
c.child = nil
|
||||
c.executeOpcodes()
|
||||
c.afterExec()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -21,6 +21,7 @@ import
|
|||
./state,
|
||||
./types,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/common,
|
||||
eth/common/eth_types,
|
||||
options,
|
||||
|
@ -46,13 +47,17 @@ proc refundGas*(c: Computation, tx: Transaction, sender: EthAddress) =
|
|||
db.addBalance(sender, c.gasMeter.gasRemaining.u256 * tx.gasPrice.u256)
|
||||
|
||||
|
||||
proc execComputation*(c: Computation) =
|
||||
# FIXME-awkwardFactoring: the factoring out of the pre and
|
||||
# post parts feels awkward to me, but for now I'd really like
|
||||
# not to have too much duplicated code between sync and async.
|
||||
# --Adam
|
||||
|
||||
proc preExecComputation(c: Computation) =
|
||||
if not c.msg.isCreate:
|
||||
c.vmState.mutateStateDB:
|
||||
db.incNonce(c.msg.sender)
|
||||
|
||||
c.execCallOrCreate()
|
||||
|
||||
proc postExecComputation(c: Computation) =
|
||||
if c.isSuccess:
|
||||
if c.fork < FkLondon:
|
||||
# EIP-3529: Reduction in refunds
|
||||
|
@ -62,3 +67,14 @@ proc execComputation*(c: Computation) =
|
|||
c.vmState.touchedAccounts.incl c.touchedAccounts
|
||||
|
||||
c.vmState.status = c.isSuccess
|
||||
|
||||
proc execComputation*(c: Computation) =
|
||||
c.preExecComputation()
|
||||
c.execCallOrCreate()
|
||||
c.postExecComputation()
|
||||
|
||||
# FIXME-duplicatedForAsync
|
||||
proc asyncExecComputation*(c: Computation): Future[void] {.async.} =
|
||||
c.preExecComputation()
|
||||
await c.asyncExecCallOrCreate()
|
||||
c.postExecComputation()
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
import
|
||||
tables, eth/common,
|
||||
options, json, sets,
|
||||
chronos, stint,
|
||||
json_rpc/rpcclient,
|
||||
./stack, ./memory, ./code_stream, ../forks,
|
||||
./interpreter/[gas_costs, op_codes],
|
||||
# TODO - will be hidden at a lower layer
|
||||
|
@ -55,6 +57,7 @@ type
|
|||
gasCosts* : GasCosts
|
||||
fork* : Fork
|
||||
minerAddress* : EthAddress
|
||||
asyncFactory* : AsyncOperationFactory
|
||||
|
||||
TracerFlags* {.pure.} = enum
|
||||
EnableTracing
|
||||
|
@ -96,6 +99,7 @@ type
|
|||
res*: nimbus_result
|
||||
else:
|
||||
parent*, child*: Computation
|
||||
pendingAsyncOperation*: Future[void]
|
||||
continuation*: proc() {.gcsafe.}
|
||||
|
||||
Error* = ref object
|
||||
|
@ -127,3 +131,9 @@ type
|
|||
value*: UInt256
|
||||
data*: seq[byte]
|
||||
flags*: MsgFlags
|
||||
|
||||
LazyDataSource* = ref object of RootObj
|
||||
ifNecessaryGetStorage*: proc(c: Computation, slot: UInt256): Future[void] {.gcsafe.}
|
||||
|
||||
AsyncOperationFactory* = ref object of RootObj
|
||||
lazyDataSource*: LazyDataSource
|
||||
|
|
|
@ -12,6 +12,7 @@ import
|
|||
vm2/state_transactions as vmx
|
||||
|
||||
export
|
||||
vmx.asyncExecComputation,
|
||||
vmx.execComputation
|
||||
|
||||
# End
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import ../test_macro
|
||||
|
||||
{. warning[UnusedImport]:off .}
|
||||
|
||||
# This file is just meant to gather a bunch of EVM tests in one
|
||||
# place. I want to be able to gradually add to this test suite.
|
||||
# --Adam
|
||||
|
||||
# FIXME-asyncAndEvmc
|
||||
# The test_op_memory_lazy test fails under EVMC.
|
||||
when not defined(evmc_enabled):
|
||||
cliBuilder:
|
||||
import ./test_op_arith,
|
||||
./test_op_bit,
|
||||
./test_op_env,
|
||||
./test_op_memory,
|
||||
./test_op_misc,
|
||||
./test_op_custom,
|
||||
./test_tracer_json,
|
||||
./test_op_memory_lazy
|
|
@ -6,11 +6,16 @@ import
|
|||
import
|
||||
options, eth/trie/[db, hexary],
|
||||
../nimbus/db/[db_chain, accounts_cache],
|
||||
../nimbus/vm2/[async_operations, types],
|
||||
../nimbus/vm_internals, ../nimbus/forks,
|
||||
../nimbus/transaction/call_evm,
|
||||
../nimbus/transaction/[call_common, call_evm],
|
||||
../nimbus/[transaction, chain_config, genesis, vm_types, vm_state],
|
||||
../nimbus/utils/difficulty
|
||||
|
||||
# Need to exclude ServerCommand because it contains something
|
||||
# called Stop that interferes with the EVM operation named Stop.
|
||||
import chronos except ServerCommand
|
||||
|
||||
export byteutils
|
||||
{.experimental: "dynamicBindSym".}
|
||||
|
||||
|
@ -28,8 +33,11 @@ type
|
|||
|
||||
Assembler* = object
|
||||
title*: string
|
||||
chainDBIdentName*: string
|
||||
vmStateIdentName*: string
|
||||
stack*: seq[VMWord]
|
||||
memory*: seq[VMWord]
|
||||
initialStorage*: seq[Storage]
|
||||
storage*: seq[Storage]
|
||||
code*: seq[byte]
|
||||
logs*: seq[Log]
|
||||
|
@ -40,6 +48,10 @@ type
|
|||
output*: seq[byte]
|
||||
fork*: Fork
|
||||
|
||||
ConcurrencyTest* = object
|
||||
title*: string
|
||||
assemblers*: seq[Assembler]
|
||||
|
||||
const
|
||||
idToOpcode = CacheTable"NimbusMacroAssembler"
|
||||
|
||||
|
@ -81,6 +93,16 @@ proc parseStorage(list: NimNode): seq[Storage] =
|
|||
for val in list:
|
||||
result.add validateStorage(val)
|
||||
|
||||
proc parseStringLiteral(node: NimNode): string =
|
||||
let strNode = node[0]
|
||||
strNode.expectKind(nnkStrLit)
|
||||
strNode.strVal
|
||||
|
||||
proc parseIdent(node: NimNode): string =
|
||||
let identNode = node[0]
|
||||
identNode.expectKind(nnkIdent)
|
||||
identNode.strVal
|
||||
|
||||
proc parseSuccess(list: NimNode): bool =
|
||||
list.expectKind nnkStmtList
|
||||
list[0].expectKind(nnkIdent)
|
||||
|
@ -180,21 +202,114 @@ proc parseGasUsed(gas: NimNode): GasInt =
|
|||
gas[0].expectKind(nnkIntLit)
|
||||
result = gas[0].intVal
|
||||
|
||||
proc generateVMProxy(boa: Assembler): NimNode =
|
||||
proc parseAssembler(list: NimNode): Assembler =
|
||||
result.success = true
|
||||
result.fork = FkFrontier
|
||||
result.gasUsed = -1
|
||||
list.expectKind nnkStmtList
|
||||
for callSection in list:
|
||||
callSection.expectKind(nnkCall)
|
||||
let label = callSection[0].strVal
|
||||
let body = callSection[1]
|
||||
case label.normalize
|
||||
of "title": result.title = parseStringLiteral(body)
|
||||
of "vmstate": result.vmStateIdentName = parseIdent(body)
|
||||
of "chaindb": result.chainDBIdentName = parseIdent(body)
|
||||
of "code" : result.code = parseCode(body)
|
||||
of "memory": result.memory = parseVMWords(body)
|
||||
of "stack" : result.stack = parseVMWords(body)
|
||||
of "storage": result.storage = parseStorage(body)
|
||||
of "initialstorage": result.initialStorage = parseStorage(body)
|
||||
of "logs": result.logs = parseLogs(body)
|
||||
of "success": result.success = parseSuccess(body)
|
||||
of "data": result.data = parseData(body)
|
||||
of "output": result.output = parseData(body)
|
||||
of "fork": result.fork = parseFork(body)
|
||||
of "gasused": result.gasUsed = parseGasUsed(body)
|
||||
else: error("unknown section '" & label & "'", callSection[0])
|
||||
|
||||
proc parseAssemblers(list: NimNode): seq[Assembler] =
|
||||
result = @[]
|
||||
list.expectKind nnkStmtList
|
||||
for callSection in list:
|
||||
# Should we do something with the label? Or is the
|
||||
# assembler's "title" section good enough?
|
||||
# let label = callSection[0].strVal
|
||||
let body = callSection[1]
|
||||
result.add parseAssembler(body)
|
||||
|
||||
proc parseConcurrencyTest(list: NimNode): ConcurrencyTest =
|
||||
list.expectKind nnkStmtList
|
||||
for callSection in list:
|
||||
callSection.expectKind(nnkCall)
|
||||
let label = callSection[0].strVal
|
||||
let body = callSection[1]
|
||||
case label.normalize
|
||||
of "title": result.title = parseStringLiteral(body)
|
||||
of "assemblers": result.assemblers = parseAssemblers(body)
|
||||
else: error("unknown section '" & label & "'", callSection[0])
|
||||
|
||||
type VMProxy = tuple[sym: NimNode, pr: NimNode]
|
||||
|
||||
proc generateVMProxy(boa: Assembler, shouldBeAsync: bool): VMProxy =
|
||||
let
|
||||
vmProxy = genSym(nskProc, "vmProxy")
|
||||
chainDB = ident("chainDB")
|
||||
vmState = ident("vmState")
|
||||
title = boa.title
|
||||
vmProxySym = genSym(nskProc, "asyncVMProxy")
|
||||
chainDB = ident(if boa.chainDBIdentName == "": "chainDB" else: boa.chainDBIdentName)
|
||||
vmState = ident(if boa.vmStateIdentName == "": "vmState" else: boa.vmStateIdentName)
|
||||
body = newLitFixed(boa)
|
||||
returnType = if shouldBeAsync:
|
||||
quote do: Future[bool]
|
||||
else:
|
||||
quote do: bool
|
||||
runVMProcName = ident(if shouldBeAsync: "asyncRunVM" else: "runVM")
|
||||
vmProxyProc = quote do:
|
||||
proc `vmProxySym`(): `returnType` =
|
||||
let boa = `body`
|
||||
let asyncFactory =
|
||||
AsyncOperationFactory(
|
||||
lazyDataSource:
|
||||
if len(boa.initialStorage) == 0:
|
||||
noLazyDataSource()
|
||||
else:
|
||||
fakeLazyDataSource(boa.initialStorage))
|
||||
`runVMProcName`(`vmState`, `chainDB`, boa, asyncFactory)
|
||||
(vmProxySym, vmProxyProc)
|
||||
|
||||
proc generateAssemblerTest(boa: Assembler): NimNode =
|
||||
let
|
||||
(vmProxySym, vmProxyProc) = generateVMProxy(boa, false)
|
||||
title: string = boa.title
|
||||
|
||||
result = quote do:
|
||||
test `title`:
|
||||
proc `vmProxy`(): bool =
|
||||
let boa = `body`
|
||||
runVM(`vmState`, `chainDB`, boa)
|
||||
`vmProxyProc`
|
||||
{.gcsafe.}:
|
||||
check `vmProxy`()
|
||||
check `vmProxySym`()
|
||||
|
||||
when defined(macro_assembler_debug):
|
||||
echo result.toStrLit.strVal
|
||||
|
||||
type
|
||||
AsyncVMProxyTestProc* = proc(): Future[bool]
|
||||
|
||||
proc generateConcurrencyTest(t: ConcurrencyTest): NimNode =
|
||||
let
|
||||
vmProxies: seq[VMProxy] = t.assemblers.map(proc(boa: Assembler): VMProxy = generateVMProxy(boa, true))
|
||||
vmProxyProcs: seq[NimNode] = vmProxies.map(proc(x: VMProxy): NimNode = x.pr)
|
||||
vmProxySyms: seq[NimNode] = vmProxies.map(proc(x: VMProxy): NimNode = x.sym)
|
||||
title: string = t.title
|
||||
|
||||
let runVMProxy = quote do:
|
||||
{.gcsafe.}:
|
||||
let procs: seq[AsyncVMProxyTestProc] = @(`vmProxySyms`)
|
||||
let futures: seq[Future[bool]] = procs.map(proc(s: AsyncVMProxyTestProc): Future[bool] = s())
|
||||
waitFor(allFutures(futures))
|
||||
|
||||
# Is there a way to use "quote" (or something like it) to splice
|
||||
# in a statement list?
|
||||
let stmtList = newStmtList(vmProxyProcs)
|
||||
stmtList.add(runVMProxy)
|
||||
result = newCall("test", newStrLitNode(title), stmtList)
|
||||
|
||||
when defined(macro_assembler_debug):
|
||||
echo result.toStrLit.strVal
|
||||
|
@ -220,26 +335,9 @@ proc initDatabase*(networkId = MainNet): (BaseVMState, BaseChainDB) =
|
|||
|
||||
(vmState, db)
|
||||
|
||||
proc runVM*(vmState: BaseVMState, chainDB: BaseChainDB, boa: Assembler): bool =
|
||||
const codeAddress = hexToByteArray[20]("460121576cc7df020759730751f92bd62fd78dd6")
|
||||
let privateKey = PrivateKey.fromHex("7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d")[]
|
||||
|
||||
vmState.mutateStateDB:
|
||||
db.setCode(codeAddress, boa.code)
|
||||
db.setBalance(codeAddress, 1_000_000.u256)
|
||||
|
||||
let unsignedTx = Transaction(
|
||||
txType: TxLegacy,
|
||||
nonce: 0,
|
||||
gasPrice: 1.GasInt,
|
||||
gasLimit: 500_000_000.GasInt,
|
||||
to: codeAddress.some,
|
||||
value: 500.u256,
|
||||
payload: boa.data
|
||||
)
|
||||
let tx = signTransaction(unsignedTx, privateKey, chainDB.config.chainId, false)
|
||||
let asmResult = testCallEvm(tx, tx.getSender, vmState, boa.fork)
|
||||
const codeAddress = hexToByteArray[20]("460121576cc7df020759730751f92bd62fd78dd6")
|
||||
|
||||
proc verifyAsmResult(vmState: BaseVMState, chainDB: BaseChainDB, boa: Assembler, asmResult: CallResult): bool =
|
||||
if not asmResult.isError:
|
||||
if boa.success == false:
|
||||
error "different success value", expected=boa.success, actual=true
|
||||
|
@ -332,30 +430,43 @@ proc runVM*(vmState: BaseVMState, chainDB: BaseChainDB, boa: Assembler): bool =
|
|||
|
||||
result = true
|
||||
|
||||
proc createSignedTx(boaData: Blob, chainId: ChainId): Transaction =
|
||||
let privateKey = PrivateKey.fromHex("7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d")[]
|
||||
let unsignedTx = Transaction(
|
||||
txType: TxLegacy,
|
||||
nonce: 0,
|
||||
gasPrice: 1.GasInt,
|
||||
gasLimit: 500_000_000.GasInt,
|
||||
to: codeAddress.some,
|
||||
value: 500.u256,
|
||||
payload: boaData
|
||||
)
|
||||
signTransaction(unsignedTx, privateKey, chainId, false)
|
||||
|
||||
proc runVM*(vmState: BaseVMState, chainDB: BaseChainDB, boa: Assembler, asyncFactory: AsyncOperationFactory): bool =
|
||||
vmState.asyncFactory = asyncFactory
|
||||
vmState.mutateStateDB:
|
||||
db.setCode(codeAddress, boa.code)
|
||||
db.setBalance(codeAddress, 1_000_000.u256)
|
||||
let tx = createSignedTx(boa.data, chainDB.config.chainId)
|
||||
let asmResult = testCallEvm(tx, tx.getSender, vmState, boa.fork)
|
||||
verifyAsmResult(vmState, chainDB, boa, asmResult)
|
||||
|
||||
# FIXME-duplicatedForAsync
|
||||
proc asyncRunVM*(vmState: BaseVMState, chainDB: BaseChainDB, boa: Assembler, asyncFactory: AsyncOperationFactory): Future[bool] {.async.} =
|
||||
vmState.asyncFactory = asyncFactory
|
||||
vmState.mutateStateDB:
|
||||
db.setCode(codeAddress, boa.code)
|
||||
db.setBalance(codeAddress, 1_000_000.u256)
|
||||
let tx = createSignedTx(boa.data, chainDB.config.chainId)
|
||||
let asmResult = await asyncTestCallEvm(tx, tx.getSender, vmState, boa.fork)
|
||||
return verifyAsmResult(vmState, chainDB, boa, asmResult)
|
||||
|
||||
macro assembler*(list: untyped): untyped =
|
||||
var boa = Assembler(success: true, fork: FkFrontier, gasUsed: -1)
|
||||
list.expectKind nnkStmtList
|
||||
for callSection in list:
|
||||
callSection.expectKind(nnkCall)
|
||||
let label = callSection[0].strVal
|
||||
let body = callSection[1]
|
||||
case label.normalize
|
||||
of "title":
|
||||
let title = body[0]
|
||||
title.expectKind(nnkStrLit)
|
||||
boa.title = title.strVal
|
||||
of "code" : boa.code = parseCode(body)
|
||||
of "memory": boa.memory = parseVMWords(body)
|
||||
of "stack" : boa.stack = parseVMWords(body)
|
||||
of "storage": boa.storage = parseStorage(body)
|
||||
of "logs": boa.logs = parseLogs(body)
|
||||
of "success": boa.success = parseSuccess(body)
|
||||
of "data": boa.data = parseData(body)
|
||||
of "output": boa.output = parseData(body)
|
||||
of "fork": boa.fork = parseFork(body)
|
||||
of "gasused": boa.gasUsed = parseGasUsed(body)
|
||||
else: error("unknown section '" & label & "'", callSection[0])
|
||||
result = boa.generateVMProxy()
|
||||
result = parseAssembler(list).generateAssemblerTest()
|
||||
|
||||
macro concurrentAssemblers*(list: untyped): untyped =
|
||||
result = parseConcurrencyTest(list).generateConcurrencyTest()
|
||||
|
||||
macro evmByteCode*(list: untyped): untyped =
|
||||
list.expectKind nnkStmtList
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
import macro_assembler, unittest2, macros, strutils
|
||||
|
||||
proc opMemoryLazyMain*() =
|
||||
suite "Lazy Loading With Memory Opcodes":
|
||||
let (vmState, chainDB) = initDatabase()
|
||||
|
||||
assembler: # SLOAD OP with (fake) lazy data fetching
|
||||
title: "LAZY_SLOAD_1"
|
||||
initialStorage:
|
||||
"0xAA": "0x42"
|
||||
code:
|
||||
PUSH1 "0xAA"
|
||||
SLOAD
|
||||
PUSH1 "0x01"
|
||||
ADD
|
||||
PUSH1 "0xAA"
|
||||
SSTORE
|
||||
PUSH1 "0xAA"
|
||||
SLOAD
|
||||
storage:
|
||||
"0xAA": "0x43"
|
||||
stack:
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000043"
|
||||
|
||||
let (vmState1, chainDB1) = initDatabase()
|
||||
let (vmState2, chainDB2) = initDatabase()
|
||||
concurrentAssemblers:
|
||||
title: "Concurrent Assemblers"
|
||||
assemblers:
|
||||
asm1:
|
||||
title: "asm1"
|
||||
vmState: vmState1
|
||||
chainDB: chainDB1
|
||||
initialStorage:
|
||||
"0xBB": "0x42"
|
||||
"0xCC": "0x20"
|
||||
code:
|
||||
PUSH1 "0xBB"
|
||||
SLOAD
|
||||
PUSH1 "0xCC"
|
||||
SLOAD
|
||||
ADD
|
||||
PUSH1 "0xBB"
|
||||
SSTORE
|
||||
PUSH1 "0xBB"
|
||||
SLOAD
|
||||
storage:
|
||||
"0xBB": "0x62"
|
||||
"0xCC": "0x20"
|
||||
stack: "0x0000000000000000000000000000000000000000000000000000000000000062"
|
||||
asm2:
|
||||
title: "asm2"
|
||||
vmState: vmState2
|
||||
chainDB: chainDB2
|
||||
initialStorage:
|
||||
"0xDD": "0x30"
|
||||
"0xEE": "0x20"
|
||||
code:
|
||||
PUSH1 "0xDD"
|
||||
SLOAD
|
||||
PUSH1 "0xEE"
|
||||
SLOAD
|
||||
ADD
|
||||
PUSH1 "0xEE"
|
||||
SSTORE
|
||||
PUSH1 "0xEE"
|
||||
SLOAD
|
||||
storage:
|
||||
"0xDD": "0x30"
|
||||
"0xEE": "0x50"
|
||||
stack: "0x0000000000000000000000000000000000000000000000000000000000000050"
|
||||
|
||||
when isMainModule:
|
||||
opMemoryLazyMain()
|
Loading…
Reference in New Issue