From 8211db1ea8bb4e6e93daa2e336d70c518887678e Mon Sep 17 00:00:00 2001 From: Jamie Lokier Date: Mon, 12 Apr 2021 18:06:31 +0100 Subject: [PATCH] EVM: Small patch that reduces EVM stack usage to almost nothing There's been a lot of talk about the Nimbus EVM "stack problem". I think we assumed changing it would require big changes to the interpreter code, touching a lot of functions. It turned out to be a low hanging fruit. This patch solves the stack problem, but hardly touches anything. The change in EVM stack memory is from 13 MB worst case to just 48 kB, a 250x reduction. I've been doing work on the database/storage/trie code. While looking at the API between the EVM and the database/storage/trie, this stack patch stood out and made itself obvious. As it's tiny, rather than more talk, here it is. Note: This patch is intentionally small, non-invasive, and hopefully easy to understand, so that it doesn't conflict with other work done on the EVM, and can easily be grafted into any other EVM structure. Motivation ========== - We run out of space and crash on some targets, unless the stack limit is raised above its default. Surprise segmentation faults are unhelpful. - Some CI targets have been disabled for months due to this. - Because usage borders on the system limits, when working on database/storage/trie/sync code (called from the EVM), segmentation faults occur and are misleading. They cause lost time due to thinking there's a crash bug in the code being worked on, when there's nothing wrong with it. - Sometimes unrelated, trivial code changes elsewhere trigger CI test failures. It looks like abrupt termination. A simple, recent patch was crashing in `make test` even though it was a trivial refactor. Turns out it pushed the stack over the edge. - A large stack has to be scanned by the Nim garbage collector sometimes. Larger stack means slower GC and memory allocation. - The structure of this small patch suggests how to weave async into the EVM with almost no changes to the EVM, and no async transformation overhead. - The patch seemed obvious when working on the API between EVM and storage. Measurements before =================== All these tests were run on Ubuntu 20.04 server, x86-64. This is one of the targets that has been disabled for a while in CI in EVMC mode due to crashing, and excessive stack usage is the cause. Testing commit 0c34a8e3 `2021-04-08 17:46:00 +0200 CI: use MSYS2 on Windows`. $ rm -f build/all_tests && make ENABLE_EVMC=1 test $ ulimit -S -s 16384 # Requires larger stack than default to avoid crash. $ ./build/all_tests 9 | tee tlog [Suite] persist block json tests ... Stack range 38496 depthHigh 3 ... Stack range 13140272 depthHigh 1024 [OK] tests/fixtures/PersistBlockTests/block1431916.json These tests use 13.14 MB of stack to run, and so crash with the default stack limit on Ubuntu Server 20.04 (8MB). Exactly 12832 bytes per EVM call stack frame. It's interesting to see some stack frames take a bit more. $ rm -f build/all_tests && make ENABLE_EVMC=1 test $ ulimit -S -s 16384 # Requires larger stack than default. $ ./build/all_tests 7 | tee tlog [Suite] new generalstate json tests ... Stack range 15488 depthHigh 2 ... Stack range 3539312 depthHigh 457 [OK] tests/fixtures/eth_tests/GeneralStateTests/stRandom2/randomStatetest639.json ... Stack range 3756144 depthHigh 485 [OK] tests/fixtures/eth_tests/GeneralStateTests/stRandom2/randomStatetest458.json ... Stack range 7929968 depthHigh 1024 [OK] tests/fixtures/eth_tests/GeneralStateTests/stCreate2/Create2OnDepth1024.json These tests use 7.92MB of stack to run. About 7264 bytes per EVM call stack frame. It _only just_ avoids crashing with the default Ubuntu Server stack limit of 8 MB. However, it still crashes on Windows x86-64, which is why the CI target is currently disabled. On Linux where this passes, this is so borderline that it affects work and testing of storage and sync code, because that's called from the EVM. Which was a motivation for dealing with the stack instead of letting this linger. Also, this stack greatly exceeds the default thread stack size. $ rm -f build/all_tests && make ENABLE_EVMC=0 test $ ulimit -S -s 16384 # Requires larger stack than default to avoid crash. $ ./build/all_tests 9 | tee tlog [Suite] persist block json tests ... Stack range 33216 depthHigh 3 ... Stack range 11338032 depthHigh 1024 [OK] tests/fixtures/PersistBlockTests/block1431916.json These tests use 11.33 MB stack to run, and so crash with a default stack limit of 8MB. Exactly 11072 bytes per EVM call stack frame. It's interesting to see some stack frames take a bit more. $ rm -f build/all_tests && make ENABLE_EVMC=0 test $ ulimit -S -s 16384 # Requires larger stack than default. $ ./build/all_tests 7 | tee tlog [Suite] new generalstate json tests ... Stack range 10224 depthHigh 2 ... Stack range 2471760 depthHigh 457 [OK] tests/fixtures/eth_tests/GeneralStateTests/stRandom2/randomStatetest639.json ... Stack range 2623184 depthHigh 485 [OK] tests/fixtures/eth_tests/GeneralStateTests/stRandom2/randomStatetest458.json ... Stack range 5537824 depthHigh 1024 [OK] tests/fixtures/eth_tests/GeneralStateTests/stCreate2/Create2OnDepth1024.json These tests use 5.54 MB of stack to run, and avoid crashing on with a default stack limit of 8 MB. About 5408 bytes per EVM call stack frame. However, this is uncomfortably close to the limit, as the stack frame size is sensitive to changes in the code. Also, this stack greatly exceeds the default thread stack size. Measurements after ================== (This patch doesn't address EVMC mode, which is not our default. EVMC stack usage remains about the same. EVMC mode is addressed in another tiny patch.) $ rm -f build/all_tests && make ENABLE_EVMC=0 test $ ulimit -S -s 80 # Because we can! 80k stack. $ ./build/all_tests 9 | tee tlog [Suite] persist block json tests ... Stack range 496 depthHigh 3 ... Stack range 49504 depthHigh 1024 [OK] tests/fixtures/PersistBlockTests/block1431916.json $ rm -f build/all_tests && make ENABLE_EVMC=0 test $ ulimit -S -s 72 # Because we can! 72k stack. $ ./build/all_tests 7 | tee tlog [Suite] new generalstate json tests ... Stack range 448 depthHigh 2 ... Stack range 22288 depthHigh 457 [OK] tests/fixtures/eth_tests/GeneralStateTests/stRandom2/randomStatetest639.json ... Stack range 23632 depthHigh 485 [OK] tests/fixtures/eth_tests/GeneralStateTests/stRandom2/randomStatetest458.json ... Stack range 49504 depthHigh 1024 [OK] tests/fixtures/eth_tests/GeneralStateTests/stCreate2/Create2OnDepth1024.json For both tests, a satisfying *48 bytes* per EVM call stack frame, and EVM takes not much more than 48 kB. With other overheads, both tests run in 80 kB stack total at maximum EVM depth. We must add some headroom on this for database activity called from the EVM, and different compile targets. But it means the EVM itself is no longer a stack burden. This is much smaller than the default thread stack size on Linux (2MB), with plenty of margin. It's even smaller than Linux from a long time ago (128kB), and some small embedded C targets. (Just fyi, though, some JVM environments allocated just 32 kB to thread stacks.) This size is also well suited to running EVMs in threads, if that's useful. Subtle exception handling and `dispose` ======================================= It is important that each `snapshot` has a corresponding `dispose` in the event of an exception being raised. This code does do that, but in a subtle way. The pair of functions `execCallOrCreate` and `execCallOrCreateAux` are equivalent to the following code, where you can see `dispose` more clearly: proc execCallOrCreate*(c: Computation) = defer: c.dispose() if c.beforeExec(): return c.executeOpcodes() while not c.continuation.isNil: c.child.execCallOrCreate() c.child = nil (c.continuation)() c.executeOpcodes() c.afterExec() That works fine, but only reduces the stack used to 300-700 kB instead of 48 kB. To get lower we split the above into separate `execCallOrCreate` and `execCallOrCreateAux`. Only the outermost has `defer`, and instead of handling one level, it walks the entire `c.parent` chain calling `dispose` if needed. The inner one avoids `defer`, which greatly reduces the size of its stackframe. `c` is a `var` parameter, at each level of recursion. So the outermost proc sees the temporary changes made by all inner calls. This is why `c` is updated and the `c.parent` chain is maintained at each step. Signed-off-by: Jamie Lokier --- nimbus/vm/computation.nim | 69 +++++++++++++++++++++----- nimbus/vm/evmc_host.nim | 4 +- nimbus/vm/interpreter.nim | 4 +- nimbus/vm/interpreter/opcodes_impl.nim | 39 +++++++-------- nimbus/vm/interpreter_dispatch.nim | 12 ++++- nimbus/vm/state_transactions.nim | 7 ++- nimbus/vm/types.nim | 2 + nimbus/vm_computation.nim | 4 +- 8 files changed, 97 insertions(+), 44 deletions(-) diff --git a/nimbus/vm/computation.nim b/nimbus/vm/computation.nim index 44de3d4c2..580629f11 100644 --- a/nimbus/vm/computation.nim +++ b/nimbus/vm/computation.nim @@ -192,7 +192,8 @@ proc commit*(c: Computation) = c.vmState.accountDb.commit(c.savePoint) proc dispose*(c: Computation) {.inline.} = - c.vmState.accountDb.dispose(c.savePoint) + c.vmState.accountDb.safeDispose(c.savePoint) + c.savePoint = nil proc rollback*(c: Computation) = c.vmState.accountDb.rollback(c.savePoint) @@ -228,18 +229,14 @@ proc initAddress(x: int): EthAddress {.compileTime.} = result[19] = x.byte const ripemdAddr = initAddress(3) proc executeOpcodes*(c: Computation) {.gcsafe.} -proc execCall*(c: Computation) = +proc beforeExecCall(c: Computation) = c.snapshot() - defer: - c.dispose() - if c.msg.kind == evmcCall: c.vmState.mutateStateDb: db.subBalance(c.msg.sender, c.msg.value) db.addBalance(c.msg.contractAddress, c.msg.value) - executeOpcodes(c) - +proc afterExecCall(c: Computation) = ## Collect all of the accounts that *may* need to be deleted based on EIP161 ## https://github.com/ethereum/EIPs/blob/master/EIPS/eip-161.md ## also see: https://github.com/ethereum/EIPs/issues/716 @@ -255,7 +252,7 @@ proc execCall*(c: Computation) = else: c.rollback() -proc execCreate*(c: Computation) = +proc beforeExecCreate(c: Computation): bool = c.vmState.mutateStateDB: db.incNonce(c.msg.sender) @@ -266,13 +263,11 @@ proc execCreate*(c: Computation) = db.accessList(c.msg.contractAddress) c.snapshot() - defer: - c.dispose() if c.vmState.readOnlyStateDb().hasCodeOrNonce(c.msg.contractAddress): c.setError("Address collision when creating contract address={c.msg.contractAddress.toHex}", true) c.rollback() - return + return true c.vmState.mutateStateDb: db.subBalance(c.msg.sender, c.msg.value) @@ -282,8 +277,9 @@ proc execCreate*(c: Computation) = # EIP161 nonce incrementation db.incNonce(c.msg.contractAddress) - executeOpcodes(c) + return false +proc afterExecCreate(c: Computation) = if c.isSuccess: let fork = c.fork let contractFailed = not c.writeContract(fork) @@ -295,6 +291,55 @@ proc execCreate*(c: Computation) = else: c.rollback() +proc beforeExec(c: Computation): bool = + if not c.msg.isCreate: + c.beforeExecCall() + false + else: + c.beforeExecCreate() + +proc afterExec(c: Computation) = + if not c.msg.isCreate: + c.afterExecCall() + else: + c.afterExecCreate() + +template chainTo*(c, toChild: Computation, after: untyped) = + c.child = toChild + c.continuation = proc() = + after + +proc execCallOrCreateAux(c: var Computation) {.noinline.} = + # Perform recursion with minimum-size stack per level. The exception + # handling is very subtle. Each call to `snapshot` must have a corresponding + # `dispose` on exception. To minimise this proc's stackframe, `defer` is + # moved to the outermost proc only. `{.noinline.}` is also used to make + # extra sure they stay separate. `c` is a `var` parameter at every level of + # recursion, so the outermost proc sees every change to `c`, which is why `c` + # is updated instead of using `let`. On exception, the outermost `defer` + # walks the `c.parent` chain to call `dispose` on each `c`. + if c.beforeExec(): + return + c.executeOpcodes() + while not c.continuation.isNil: + # Parent and child refs are updated and cleared so as to avoid circular + # refs (like a double-linked list) or dangling refs (to finished child). + (c.child, c, c.parent) = (nil.Computation, c.child, c) + execCallOrCreateAux(c) + c.dispose() + (c.parent, c) = (nil.Computation, c.parent) + (c.continuation)() + c.executeOpcodes() + c.afterExec() + +proc execCallOrCreate*(cParam: Computation) = + var c = cParam + defer: + while not c.isNil: + c.dispose() + c = c.parent + execCallOrCreateAux(c) + proc merge*(c, child: Computation) = c.logEntries.add child.logEntries c.gasMeter.refundGas(child.gasMeter.gasRefunded) diff --git a/nimbus/vm/evmc_host.nim b/nimbus/vm/evmc_host.nim index 2a72bbf8b..72b66e6f1 100644 --- a/nimbus/vm/evmc_host.nim +++ b/nimbus/vm/evmc_host.nim @@ -135,7 +135,7 @@ template createImpl(c: Computation, m: nimbus_message, res: nimbus_result) = ) let child = newComputation(c.vmState, childMsg, Uint256.fromEvmc(m.create2_salt)) - child.execCreate() + child.execCallOrCreate() if not child.shouldBurnGas: res.gas_left = child.gasMeter.gasRemaining @@ -167,7 +167,7 @@ template callImpl(c: Computation, m: nimbus_message, res: nimbus_result) = ) let child = newComputation(c.vmState, childMsg) - child.execCall() + child.execCallOrCreate() if not child.shouldBurnGas: res.gas_left = child.gasMeter.gasRemaining diff --git a/nimbus/vm/interpreter.nim b/nimbus/vm/interpreter.nim index f12dfb1d7..543949c91 100644 --- a/nimbus/vm/interpreter.nim +++ b/nimbus/vm/interpreter.nim @@ -38,8 +38,8 @@ export vmc.addLogEntry, vmc.commit, vmc.dispose, - vmc.execCall, - vmc.execCreate, + vmc.execCallOrCreate, + vmc.chainTo, vmc.execSelfDestruct, vmc.executeOpcodes, vmc.fork, diff --git a/nimbus/vm/interpreter/opcodes_impl.nim b/nimbus/vm/interpreter/opcodes_impl.nim index 5efe0e246..e8a4b15d3 100644 --- a/nimbus/vm/interpreter/opcodes_impl.nim +++ b/nimbus/vm/interpreter/opcodes_impl.nim @@ -678,15 +678,15 @@ template genCreate(callName: untyped, opCode: Op): untyped = ) var child = newComputation(c.vmState, childMsg, salt) - child.execCreate() - if not child.shouldBurnGas: - c.gasMeter.returnGas(child.gasMeter.gasRemaining) + c.chainTo(child): + if not child.shouldBurnGas: + c.gasMeter.returnGas(child.gasMeter.gasRemaining) - if child.isSuccess: - c.merge(child) - c.stack.top child.msg.contractAddress - else: - c.returnData = child.output + if child.isSuccess: + c.merge(child) + c.stack.top child.msg.contractAddress + else: + c.returnData = child.output genCreate(create, Create) genCreate(create2, Create2) @@ -870,20 +870,19 @@ template genCall(callName: untyped, opCode: Op): untyped = flags: flags) var child = newComputation(c.vmState, msg) - child.execCall() + c.chainTo(child): + if not child.shouldBurnGas: + c.gasMeter.returnGas(child.gasMeter.gasRemaining) - if not child.shouldBurnGas: - c.gasMeter.returnGas(child.gasMeter.gasRemaining) + if child.isSuccess: + c.merge(child) + c.stack.top(1) - if child.isSuccess: - c.merge(child) - c.stack.top(1) - - c.returnData = child.output - let actualOutputSize = min(memOutLen, child.output.len) - if actualOutputSize > 0: - c.memory.write(memOutPos, - child.output.toOpenArray(0, actualOutputSize - 1)) + c.returnData = child.output + let actualOutputSize = min(memOutLen, child.output.len) + if actualOutputSize > 0: + c.memory.write(memOutPos, + child.output.toOpenArray(0, actualOutputSize - 1)) genCall(call, Call) genCall(callCode, CallCode) diff --git a/nimbus/vm/interpreter_dispatch.nim b/nimbus/vm/interpreter_dispatch.nim index af7e3f59b..593d76dc1 100644 --- a/nimbus/vm/interpreter_dispatch.nim +++ b/nimbus/vm/interpreter_dispatch.nim @@ -272,6 +272,9 @@ proc opTableToCaseStmt(opTable: array[Op, NimNode], c: NimNode): NimNode = `opImpl`(`c`) if `c`.tracingEnabled: `c`.traceOpCodeEnded(`asOp`, `c`.opIndex) + when `asOp` in {Create, Create2, Call, CallCode, DelegateCall, StaticCall}: + if not `c`.continuation.isNil: + return else: quote do: if `c`.tracingEnabled: @@ -279,6 +282,9 @@ proc opTableToCaseStmt(opTable: array[Op, NimNode], c: NimNode): NimNode = `opImpl`(`c`) if `c`.tracingEnabled: `c`.traceOpCodeEnded(`asOp`, `c`.opIndex) + when `asOp` in {Create, Create2, Call, CallCode, DelegateCall, StaticCall}: + if not `c`.continuation.isNil: + return when `asOp` in {Return, Revert, SelfDestruct}: break @@ -380,7 +386,9 @@ proc executeOpcodes(c: Computation) = let fork = c.fork block: - if c.execPrecompiles(fork): + if not c.continuation.isNil: + c.continuation = nil + elif c.execPrecompiles(fork): break try: @@ -388,6 +396,6 @@ proc executeOpcodes(c: Computation) = except CatchableError as e: c.setError(&"Opcode Dispatch Error msg={e.msg}, depth={c.msg.depth}", true) - if c.isError(): + if c.isError() and c.continuation.isNil: if c.tracingEnabled: c.traceError() debug "executeOpcodes error", msg=c.error.info diff --git a/nimbus/vm/state_transactions.nim b/nimbus/vm/state_transactions.nim index fa17ac4ad..f6e471849 100644 --- a/nimbus/vm/state_transactions.nim +++ b/nimbus/vm/state_transactions.nim @@ -36,12 +36,11 @@ proc setupComputation*(vmState: BaseVMState, tx: Transaction, sender: EthAddress doAssert result.isOriginComputation proc execComputation*(c: Computation) = - if c.msg.isCreate: - c.execCreate() - else: + if not c.msg.isCreate: c.vmState.mutateStateDB: db.incNonce(c.msg.sender) - c.execCall() + + c.execCallOrCreate() if c.isSuccess: c.refundSelfDestruct() diff --git a/nimbus/vm/types.nim b/nimbus/vm/types.nim index e5776c812..a1aa9faf6 100644 --- a/nimbus/vm/types.nim +++ b/nimbus/vm/types.nim @@ -85,6 +85,8 @@ type savePoint*: SavePoint instr*: Op opIndex*: int + parent*, child*: Computation + continuation*: proc() {.gcsafe.} Error* = ref object info*: string diff --git a/nimbus/vm_computation.nim b/nimbus/vm_computation.nim index be222385b..06ed1fe16 100644 --- a/nimbus/vm_computation.nim +++ b/nimbus/vm_computation.nim @@ -19,8 +19,8 @@ export vmc.addLogEntry, vmc.commit, vmc.dispose, - vmc.execCall, - vmc.execCreate, + vmc.execCallOrCreate, + vmc.chainTo, vmc.execSelfDestruct, vmc.executeOpcodes, vmc.fork,