From b7220428b35545759f876bc4ff6a83f51214e4b1 Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Fri, 2 Jun 2023 15:51:26 -0700 Subject: [PATCH 1/6] Error handling --- evm/src/cpu/columns/general.rs | 33 ++ evm/src/cpu/columns/ops.rs | 5 +- evm/src/cpu/control_flow.rs | 1 + evm/src/cpu/cpu_stark.rs | 6 +- evm/src/cpu/decode.rs | 27 -- evm/src/cpu/gas.rs | 1 + evm/src/cpu/jumps.rs | 27 +- evm/src/cpu/kernel/aggregator.rs | 2 +- evm/src/cpu/kernel/asm/core/exception.asm | 290 ++++++++++++++++++ evm/src/cpu/kernel/asm/core/invalid.asm | 26 -- evm/src/cpu/kernel/asm/core/util.asm | 18 ++ evm/src/cpu/kernel/constants/exc_bitfields.rs | 53 ++++ evm/src/cpu/kernel/constants/mod.rs | 8 +- evm/src/cpu/mod.rs | 1 + evm/src/cpu/stack.rs | 5 + evm/src/witness/gas.rs | 2 +- evm/src/witness/operation.rs | 99 +++++- evm/src/witness/transition.rs | 153 +++++---- 18 files changed, 627 insertions(+), 130 deletions(-) create mode 100644 evm/src/cpu/kernel/asm/core/exception.asm delete mode 100644 evm/src/cpu/kernel/asm/core/invalid.asm create mode 100644 evm/src/cpu/kernel/constants/exc_bitfields.rs diff --git a/evm/src/cpu/columns/general.rs b/evm/src/cpu/columns/general.rs index 6727eb07..07d5a378 100644 --- a/evm/src/cpu/columns/general.rs +++ b/evm/src/cpu/columns/general.rs @@ -7,6 +7,8 @@ use std::mem::{size_of, transmute}; #[derive(Clone, Copy)] pub(crate) union CpuGeneralColumnsView { arithmetic: CpuArithmeticView, + exception: CpuExceptionView, + exit_kernel: CpuExitKernelView, logic: CpuLogicView, jumps: CpuJumpsView, shift: CpuShiftView, @@ -23,6 +25,26 @@ impl CpuGeneralColumnsView { unsafe { &mut self.arithmetic } } + // SAFETY: Each view is a valid interpretation of the underlying array. + pub(crate) fn exception(&self) -> &CpuExceptionView { + unsafe { &self.exception } + } + + // SAFETY: Each view is a valid interpretation of the underlying array. + pub(crate) fn exception_mut(&mut self) -> &mut CpuExceptionView { + unsafe { &mut self.exception } + } + + // SAFETY: Each view is a valid interpretation of the underlying array. + pub(crate) fn exit_kernel(&self) -> &CpuExitKernelView { + unsafe { &self.exit_kernel } + } + + // SAFETY: Each view is a valid interpretation of the underlying array. + pub(crate) fn exit_kernel_mut(&mut self) -> &mut CpuExitKernelView { + unsafe { &mut self.exit_kernel } + } + // SAFETY: Each view is a valid interpretation of the underlying array. pub(crate) fn logic(&self) -> &CpuLogicView { unsafe { &self.logic } @@ -89,6 +111,17 @@ pub(crate) struct CpuArithmeticView { tmp: T, // temporary, to suppress errors } +#[derive(Copy, Clone)] +pub(crate) struct CpuExceptionView { + // Exception code as little-endian bits. + pub(crate) exc_code_bits: [T; 3], +} + +#[derive(Copy, Clone)] +pub(crate) struct CpuExitKernelView { + pub(crate) stack_len_check_aux: T, +} + #[derive(Copy, Clone)] pub(crate) struct CpuLogicView { // Pseudoinverse of `(input0 - input1)`. Used prove that they are unequal. Assumes 32-bit limbs. diff --git a/evm/src/cpu/columns/ops.rs b/evm/src/cpu/columns/ops.rs index e3dc84d3..c37ddbf3 100644 --- a/evm/src/cpu/columns/ops.rs +++ b/evm/src/cpu/columns/ops.rs @@ -47,12 +47,13 @@ pub struct OpsColumnsView { // TODO: combine GET_CONTEXT and SET_CONTEXT into one flag pub get_context: T, pub set_context: T, - pub exit_kernel: T, + pub exit_kernel: T, // Note: This column must be 0 when is_cpu_cycle = 0. // TODO: combine MLOAD_GENERAL and MSTORE_GENERAL into one flag pub mload_general: T, pub mstore_general: T, - pub syscall: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub syscall: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub exception: T, // Note: This column must be 0 when is_cpu_cycle = 0. } // `u8` is guaranteed to have a `size_of` of 1. diff --git a/evm/src/cpu/control_flow.rs b/evm/src/cpu/control_flow.rs index 0611744a..55da50c4 100644 --- a/evm/src/cpu/control_flow.rs +++ b/evm/src/cpu/control_flow.rs @@ -45,6 +45,7 @@ const NATIVE_INSTRUCTIONS: [usize; 31] = [ COL_MAP.op.mload_general, COL_MAP.op.mstore_general, // not SYSCALL (performs a jump) + // not exceptions (also jump) ]; pub(crate) fn get_halt_pcs() -> (F, F) { diff --git a/evm/src/cpu/cpu_stark.rs b/evm/src/cpu/cpu_stark.rs index 069a1609..e0d89f02 100644 --- a/evm/src/cpu/cpu_stark.rs +++ b/evm/src/cpu/cpu_stark.rs @@ -13,8 +13,8 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS}; use crate::cpu::membus::NUM_GP_CHANNELS; use crate::cpu::{ - bootstrap_kernel, contextops, control_flow, decode, dup_swap, gas, jumps, membus, memio, - modfp254, pc, shift, simple_logic, stack, stack_bounds, syscalls, + bootstrap_kernel, contextops, control_flow, decode, dup_swap, exceptions, gas, jumps, membus, + memio, modfp254, pc, shift, simple_logic, stack, stack_bounds, syscalls, }; use crate::cross_table_lookup::{Column, TableWithColumns}; use crate::memory::segments::Segment; @@ -190,6 +190,7 @@ impl, const D: usize> Stark for CpuStark, const D: usize> Stark for CpuStark [u8; 32] { - let mut res = [u8::MAX; 32]; // Start with all opcodes marked invalid. - - let mut i = 0; - while i < OPCODES.len() { - let (block_start, lb_block_len, kernel_only, _) = OPCODES[i]; - i += 1; - - if kernel_only { - continue; - } - - let block_len = 1 << lb_block_len; - let block_start = block_start as usize; - let block_end = block_start + block_len; - let mut j = block_start; - while j < block_end { - let byte = j / u8::BITS as usize; - let bit = j % u8::BITS as usize; - res[byte] &= !(1 << bit); // Mark opcode as invalid by zeroing the bit. - j += 1; - } - } - res -} - pub fn generate(lv: &mut CpuColumnsView) { let cycle_filter = lv.is_cpu_cycle; if cycle_filter == F::ZERO { diff --git a/evm/src/cpu/gas.rs b/evm/src/cpu/gas.rs index d2105c10..3d9e1fad 100644 --- a/evm/src/cpu/gas.rs +++ b/evm/src/cpu/gas.rs @@ -56,6 +56,7 @@ const SIMPLE_OPCODES: OpsColumnsView> = OpsColumnsView { mload_general: KERNEL_ONLY_INSTR, mstore_general: KERNEL_ONLY_INSTR, syscall: None, + exception: None, }; fn eval_packed_accumulate( diff --git a/evm/src/cpu/jumps.rs b/evm/src/cpu/jumps.rs index 2f2f30a9..9ce5b3bc 100644 --- a/evm/src/cpu/jumps.rs +++ b/evm/src/cpu/jumps.rs @@ -15,7 +15,7 @@ pub fn eval_packed_exit_kernel( yield_constr: &mut ConstraintConsumer

, ) { let input = lv.mem_channels[0].value; - let filter = lv.is_cpu_cycle * lv.op.exit_kernel; + let filter = lv.op.exit_kernel; // If we are executing `EXIT_KERNEL` then we simply restore the program counter, kernel mode // flag, and gas counter. The middle 4 (32-bit) limbs are ignored (this is not part of the spec, @@ -25,6 +25,19 @@ pub fn eval_packed_exit_kernel( yield_constr.constraint_transition(filter * (input[6] - nv.gas)); // High limb of gas must be 0 for convenient detection of overflow. yield_constr.constraint(filter * input[7]); + + // Check that the syscall did not cause a stack overflow. + // A standard EVM instruction increases the length of the user's stack by at most one. We assume + // that the stack length was valid when we entered the syscall (i.e. it was <= 1024). If the + // syscall caused the stack to overflow, then the stack length is 1025 upon exit into userspace; + // that corresponds to a stack length of 1026 before `EXIT_KERNEL` is executed. + // This constraint ensures that the current stack length is not 1026 if we're returning to user + // mode. If we're remaining in kernel mode upon return from this syscall, the RHS will be 0, so + // the constraint is trivially satisfied by setting `stack_len_check_aux = 0`. + let offset_stack_len = lv.stack_len - P::Scalar::from_canonical_u32(1026); + let lhs = offset_stack_len * lv.general.exit_kernel().stack_len_check_aux; + let rhs = P::ONES - input[1]; + yield_constr.constraint(filter * (lhs - rhs)); } pub fn eval_ext_circuit_exit_kernel, const D: usize>( @@ -34,7 +47,7 @@ pub fn eval_ext_circuit_exit_kernel, const D: usize yield_constr: &mut RecursiveConstraintConsumer, ) { let input = lv.mem_channels[0].value; - let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.exit_kernel); + let filter = lv.op.exit_kernel; // If we are executing `EXIT_KERNEL` then we simply restore the program counter and kernel mode // flag. The top 6 (32-bit) limbs are ignored (this is not part of the spec, but we trust the @@ -58,6 +71,16 @@ pub fn eval_ext_circuit_exit_kernel, const D: usize let constr = builder.mul_extension(filter, input[7]); yield_constr.constraint(builder, constr); } + + // Check that the syscall did not cause a stack overflow. + // See detailed comments in the packed implementation. + { + let stack_len_check_aux = lv.general.exit_kernel().stack_len_check_aux; + let lhs = builder.arithmetic_extension(F::ONE, -F::from_canonical_u32(1026), lv.stack_len, stack_len_check_aux, stack_len_check_aux); + let constr = builder.add_extension(lhs, input[1]); + let constr = builder.mul_sub_extension(filter, constr, filter); + yield_constr.constraint(builder, constr); + } } pub fn eval_packed_jump_jumpi( diff --git a/evm/src/cpu/kernel/aggregator.rs b/evm/src/cpu/kernel/aggregator.rs index 1da28e0b..4d4f1da8 100644 --- a/evm/src/cpu/kernel/aggregator.rs +++ b/evm/src/cpu/kernel/aggregator.rs @@ -29,9 +29,9 @@ pub(crate) fn combined_kernel() -> Kernel { include_str!("asm/core/create.asm"), include_str!("asm/core/create_addresses.asm"), include_str!("asm/core/create_contract_account.asm"), + include_str!("asm/core/exception.asm"), include_str!("asm/core/gas.asm"), include_str!("asm/core/intrinsic_gas.asm"), - include_str!("asm/core/invalid.asm"), include_str!("asm/core/jumpdest_analysis.asm"), include_str!("asm/core/nonce.asm"), include_str!("asm/core/process_txn.asm"), diff --git a/evm/src/cpu/kernel/asm/core/exception.asm b/evm/src/cpu/kernel/asm/core/exception.asm new file mode 100644 index 00000000..0fefe29c --- /dev/null +++ b/evm/src/cpu/kernel/asm/core/exception.asm @@ -0,0 +1,290 @@ +// These exception codes are arbitary and assigned by us. +global exception_jumptable: + // exception 0: out of gas + JUMPTABLE exc_out_of_gas + + // exception 1: invalid opcode + JUMPTABLE exc_invalid_opcode + + // exception 2: stack underflow + JUMPTABLE exc_stack_underflow + + // exception 3: invalid jump destination + JUMPTABLE exc_invalid_jump_destination + + // exception 4: invalid jumpi destination + JUMPTABLE exc_invalid_jumpi_destination + + // exception 5: stack overflow + JUMPTABLE exc_stack_overflow + + // exceptions 6 and 7: unused + JUMPTABLE panic + JUMPTABLE panic + + +global exc_out_of_gas: + // TODO + %jump(fault_exception) + + +global exc_invalid_opcode: + // stack: trap_info + // check if the opcode that triggered this trap is _actually_ invalid + %opcode_from_exp_trap_info + PUSH @INVALID_OPCODES_USER + // stack: invalid_opcodes_user, opcode + SWAP1 + // stack: opcode, invalid_opcodes_user + SHR + %mod_const(2) + // stack: opcode_is_invalid + // if the opcode is indeed invalid, then perform an exceptional exit + %jumpi(fault_exception) + // otherwise, panic because this trap should not have been entered + PANIC + + +global exc_stack_underflow: + // stack: trap_info + %opcode_from_exp_trap_info + // stack: opcode + %add_const(min_stack_len_for_opcode) + %mload_kernel_code + // stack: min_stack_length + %stack_length + // stack: user_stack_length + 1, min_stack_length + GT + // stack: user_stack_length >= min_stack_length + %jumpi(panic) + %jump(fault_exception) + + +// Debugging note: this will underflow if entered without at least one item on the stack (in +// addition to trap_info). This is expected; it means that the exc_stack_underflow handler should +// have been used instead. +global exc_invalid_jump_destination: + // stack: trap_info, jump_dest + // check that the triggering opcode is indeed JUMP + %opcode_from_exp_trap_info + // stack: opcode, jump_dest + %eq_const(0x56) + // if it's JUMP, then verify that we're actually jumping to an invalid address + %jumpi(invalid_jump_jumpi_destination_common) + // otherwise, panic + PANIC + + +// Debugging note: this will underflow if entered without at least two items on the stack (in +// addition to trap_info). This is expected; it means that the exc_stack_underflow handler should +// have been used instead. +global exc_invalid_jumpi_destination: + // stack: trap_info, jump_dest, condition + // check that the triggering opcode is indeed JUMPI + %opcode_from_exp_trap_info + // stack: opcode, jump_dest, condition + %sub_const(0x57) + // if it's not JUMPI, then panic + %jumpi(panic) + // otherwise, verify that the condition is nonzero + // stack: jump_dest, condition + SWAP1 + // if it's nonzero, then verify that we're actually jumping to an invalid address + %jumpi(invalid_jump_jumpi_destination_common) + // otherwise, panic + PANIC + + +global invalid_jump_jumpi_destination_common: + // stack: jump_dest + %mload_current(@SEGMENT_JUMPDEST_BITS) + // stack: is_valid_jumpdest + // if valid, then the trap should never have been entered + %jumpi(panic) + %jump(fault_exception) + + +global exc_stack_overflow: + // stack: trap_info + // check that the triggering opcode _can_ overflow (i.e., it increases the stack size by 1) + %opcode_from_exp_trap_info + PUSH @STACK_LENGTH_INCREASING_OPCODES_USER + // stack: stack_length_increasing_opcodes_user, opcode + SWAP1 + // stack: opcode, stack_length_increasing_opcodes_user + SHR + %mod_const(2) + // stack: opcode_increases_stack_length + // if the opcode indeed increases the stack length, then check whether the stack size is at its + // maximum value + %jumpi(exc_stack_overflow_check_stack_length) + // otherwise, panic because this trap should not have been entered + PANIC +global exc_stack_overflow_check_stack_length: + // stack: (empty) + %stack_length + %eq_const(1024) + // if true, stack length is at its maximum allowed value, so the instruction would indeed cause + // an overflow. + %jumpi(fault_exception) + PANIC + + +// Given the exception trap info, load the opcode that caused the exception +%macro opcode_from_exp_trap_info + %mod_const(0x100000000) // get program counter from low 32 bits of trap_info + %mload_current_code +%endmacro + + +min_stack_len_for_opcode: + BYTES 0 // 0x00, STOP + BYTES 2 // 0x01, ADD + BYTES 2 // 0x02, MUL + BYTES 2 // 0x03, SUB + BYTES 2 // 0x04, DIV + BYTES 2 // 0x05, SDIV + BYTES 2 // 0x06, MOD + BYTES 2 // 0x07, SMOD + BYTES 2 // 0x08, ADDMOD + BYTES 2 // 0x09, MULMOD + BYTES 2 // 0x0a, EXP + BYTES 2 // 0x0b, SIGNEXTEND + %rep 4 // 0x0c-0x0f, invalid + BYTES 0 + %endrep + + BYTES 2 // 0x10, LT + BYTES 2 // 0x11, GT + BYTES 2 // 0x12, SLT + BYTES 2 // 0x13, SGT + BYTES 2 // 0x14, EQ + BYTES 1 // 0x15, ISZERO + BYTES 2 // 0x16, AND + BYTES 2 // 0x17, OR + BYTES 2 // 0x18, XOR + BYTES 1 // 0x19, NOT + BYTES 2 // 0x1a, BYTES + BYTES 2 // 0x1b, SHL + BYTES 2 // 0x1c, SHR + BYTES 2 // 0x1d, SAR + BYTES 0 // 0x1e, invalid + BYTES 0 // 0x1f, invalid + + BYTES 2 // 0x20, KECCAK256 + %rep 15 // 0x21-0x2f, invalid + BYTES 0 + %endrep + + BYTES 0 // 0x30, ADDRESS + BYTES 1 // 0x31, BALANCE + BYTES 0 // 0x32, ORIGIN + BYTES 0 // 0x33, CALLER + BYTES 0 // 0x34, CALLVALUE + BYTES 1 // 0x35, CALLDATALOAD + BYTES 0 // 0x36, CALLDATASIZE + BYTES 3 // 0x37, CALLDATACOPY + BYTES 0 // 0x38, CODESIZE + BYTES 3 // 0x39, CODECOPY + BYTES 0 // 0x3a, GASPRICE + BYTES 1 // 0x3b, EXTCODESIZE + BYTES 4 // 0x3c, EXTCODECOPY + BYTES 0 // 0x3d, RETURNDATASIZE + BYTES 3 // 0x3e, RETURNDATACOPY + BYTES 1 // 0x3f, EXTCODEHASH + + BYTES 1 // 0x40, BLOCKHASH + BYTES 0 // 0x41, COINBASE + BYTES 0 // 0x42, TIMESTAMP + BYTES 0 // 0x43, NUMBER + BYTES 0 // 0x44, DIFFICULTY + BYTES 0 // 0x45, GASLIMIT + BYTES 0 // 0x46, CHAINID + BYTES 0 // 0x47, SELFBALANCE + BYTES 0 // 0x48, BASEFEE + %rep 7 // 0x49-0x4f, invalid + BYTES 0 + %endrep + + BYTES 1 // 0x50, POP + BYTES 1 // 0x51, MLOAD + BYTES 2 // 0x52, MSTORE + BYTES 2 // 0x53, MSTORE8 + BYTES 1 // 0x54, SLOAD + BYTES 2 // 0x55, SSTORE + BYTES 1 // 0x56, JUMP + BYTES 1 // 0x57, JUMPI + BYTES 0 // 0x58, PC + BYTES 0 // 0x59, MSIZE + BYTES 0 // 0x5a, GAS + BYTES 0 // 0x5b, JUMPDEST + %rep 4 // 0x5c-0x5f, invalid + BYTES 0 + %endrep + + %rep 32 // 0x60-0x7f, PUSH1-PUSH32 + BYTES 0 + %endrep + + BYTES 1 // 0x80, DUP1 + BYTES 2 // 0x81, DUP2 + BYTES 3 // 0x82, DUP3 + BYTES 4 // 0x83, DUP4 + BYTES 5 // 0x84, DUP5 + BYTES 6 // 0x85, DUP6 + BYTES 7 // 0x86, DUP7 + BYTES 8 // 0x87, DUP8 + BYTES 9 // 0x88, DUP9 + BYTES 10 // 0x89, DUP10 + BYTES 11 // 0x8a, DUP11 + BYTES 12 // 0x8b, DUP12 + BYTES 13 // 0x8c, DUP13 + BYTES 14 // 0x8d, DUP14 + BYTES 15 // 0x8e, DUP15 + BYTES 16 // 0x8f, DUP16 + + BYTES 2 // 0x90, SWAP1 + BYTES 3 // 0x91, SWAP2 + BYTES 4 // 0x92, SWAP3 + BYTES 5 // 0x93, SWAP4 + BYTES 6 // 0x94, SWAP5 + BYTES 7 // 0x95, SWAP6 + BYTES 8 // 0x96, SWAP7 + BYTES 9 // 0x97, SWAP8 + BYTES 10 // 0x98, SWAP9 + BYTES 11 // 0x99, SWAP10 + BYTES 12 // 0x9a, SWAP11 + BYTES 13 // 0x9b, SWAP12 + BYTES 14 // 0x9c, SWAP13 + BYTES 15 // 0x9d, SWAP14 + BYTES 16 // 0x9e, SWAP15 + BYTES 17 // 0x9f, SWAP16 + + BYTES 2 // 0xa0, LOG0 + BYTES 3 // 0xa1, LOG1 + BYTES 4 // 0xa2, LOG2 + BYTES 5 // 0xa3, LOG3 + BYTES 6 // 0xa4, LOG4 + %rep 11 // 0xa5-0xaf, invalid + BYTES 0 + %endrep + + %rep 64 // 0xb0-0xef, invalid + BYTES 0 + %endrep + + BYTES 3 // 0xf0, CREATE + BYTES 7 // 0xf1, CALL + BYTES 7 // 0xf2, CALLCODE + BYTES 2 // 0xf3, RETURN + BYTES 6 // 0xf4, DELEGATECALL + BYTES 4 // 0xf5, CREATE2 + %rep 4 // 0xf6-0xf9, invalid + BYTES 0 + %endrep + BYTES 6 // 0xfa, STATICCALL + BYTES 0 // 0xfb, invalid + BYTES 0 // 0xfc, invalid + BYTES 2 // 0xfd, REVERT + BYTES 0 // 0xfe, invalid + BYTES 1 // 0xff, SELFDESTRUCT diff --git a/evm/src/cpu/kernel/asm/core/invalid.asm b/evm/src/cpu/kernel/asm/core/invalid.asm deleted file mode 100644 index 6a7f4c17..00000000 --- a/evm/src/cpu/kernel/asm/core/invalid.asm +++ /dev/null @@ -1,26 +0,0 @@ -global handle_invalid: - // stack: trap_info - - // if the kernel is trying to execute an invalid instruction, then we've already screwed up and - // there's no chance of getting a useful proof, so we just panic - DUP1 - // stack: trap_info, trap_info - %shr_const(32) - // stack: is_kernel, trap_info - %jumpi(panic) - - // check if the opcode that triggered this trap is _actually_ invalid - // stack: program_counter (is_kernel == 0, so trap_info == program_counter) - %mload_current_code - // stack: opcode - PUSH @INVALID_OPCODES_USER - // stack: invalid_opcodes_user, opcode - SWAP1 - // stack: opcode, invalid_opcodes_user - SHR - %and_const(1) - // stack: opcode_is_invalid - // if the opcode is indeed invalid, then perform an exceptional exit - %jumpi(fault_exception) - // otherwise, panic because this trap should not have been entered - PANIC diff --git a/evm/src/cpu/kernel/asm/core/util.asm b/evm/src/cpu/kernel/asm/core/util.asm index d64c6059..e737d86a 100644 --- a/evm/src/cpu/kernel/asm/core/util.asm +++ b/evm/src/cpu/kernel/asm/core/util.asm @@ -73,3 +73,21 @@ SWAP1 %is_empty OR %endmacro + +// Gets the size of the stack _before_ the macro is run +// WARNING: this macro is side-effecting. It writes the current stack length to offset +// `CTX_METADATA_STACK_SIZE`, segment `SEGMENT_CONTEXT_METADATA` in the current context. But I can't +// imagine it being an issue unless someone's doing something dumb. +%macro stack_length + // stack: (empty) + GET_CONTEXT + // stack: current_ctx + // It seems odd to switch to the context that we are already in. We do this because SET_CONTEXT + // saves the stack length of the context we are leaving in its metadata segment. + SET_CONTEXT + // stack: (empty) + // We can now read this stack length from memory. + push @CTX_METADATA_STACK_SIZE + %mload_current(@SEGMENT_CONTEXT_METADATA) + // stack: stack_length +%endmacro diff --git a/evm/src/cpu/kernel/constants/exc_bitfields.rs b/evm/src/cpu/kernel/constants/exc_bitfields.rs new file mode 100644 index 00000000..2a6eaf0e --- /dev/null +++ b/evm/src/cpu/kernel/constants/exc_bitfields.rs @@ -0,0 +1,53 @@ +use std::ops::RangeInclusive; + +use ethereum_types::U256; + +/// Create a U256, where the bits at indices inside the specified ranges are set to 1, and all other +/// bits are set to 0. +const fn u256_from_set_index_ranges(ranges: [RangeInclusive; N]) -> U256 { + let mut j = 0; + let mut res_limbs = [0u64; 4]; + while j < ranges.len() { + let range = &ranges[j]; + let mut i = *range.start(); + if i > *range.end() { + continue; + } + loop { + let i_lo = i & 0x3f; + let i_hi = i >> 6; + res_limbs[i_hi as usize] |= 1 << i_lo; + + if i >= *range.end() { + break; + } + i += 1; + } + j += 1; + } + U256(res_limbs) +} + +pub const STACK_LENGTH_INCREASING_OPCODES_USER: U256 = u256_from_set_index_ranges([ + 0x30..=0x30, // ADDRESS + 0x32..=0x34, // ORIGIN, CALLER, CALLVALUE + 0x36..=0x36, // CALLDATASIZE + 0x38..=0x38, // CODESIZE + 0x3a..=0x3a, // GASPRICE + 0x3d..=0x3d, // RETURNDATASIZE + 0x41..=0x48, // COINBASE, TIMESTAMP, NUMBER, DIFFICULTY, GASLIMIT, CHAINID, SELFBALANCE, BASEFEE + 0x58..=0x5a, // PC, MSIZE, GAS + 0x60..=0x8f, // PUSH*, DUP* +]); + +pub const INVALID_OPCODES_USER: U256 = u256_from_set_index_ranges([ + 0x0c..=0x0f, + 0x1e..=0x1f, + 0x21..=0x2f, + 0x49..=0x4f, + 0x5c..=0x5f, + 0xa5..=0xef, + 0xf6..=0xf9, + 0xfb..=0xfc, + 0xfe..=0xfe, +]); diff --git a/evm/src/cpu/kernel/constants/mod.rs b/evm/src/cpu/kernel/constants/mod.rs index ab702798..3da4fdb0 100644 --- a/evm/src/cpu/kernel/constants/mod.rs +++ b/evm/src/cpu/kernel/constants/mod.rs @@ -3,7 +3,6 @@ use std::collections::HashMap; use ethereum_types::U256; use hex_literal::hex; -use crate::cpu::decode::invalid_opcodes_user; use crate::cpu::kernel::constants::context_metadata::ContextMetadata; use crate::cpu::kernel::constants::global_metadata::GlobalMetadata; use crate::cpu::kernel::constants::journal_entry::JournalEntry; @@ -16,6 +15,7 @@ pub(crate) mod global_metadata; pub(crate) mod journal_entry; pub(crate) mod trie_type; pub(crate) mod txn_fields; +mod exc_bitfields; /// Constants that are accessible to our kernel assembly code. pub fn evm_constants() -> HashMap { @@ -76,7 +76,11 @@ pub fn evm_constants() -> HashMap { } c.insert( "INVALID_OPCODES_USER".into(), - U256::from_little_endian(&invalid_opcodes_user()), + exc_bitfields::INVALID_OPCODES_USER, + ); + c.insert( + "STACK_LENGTH_INCREASING_OPCODES_USER".into(), + exc_bitfields::STACK_LENGTH_INCREASING_OPCODES_USER, ); c } diff --git a/evm/src/cpu/mod.rs b/evm/src/cpu/mod.rs index 22878e80..a5128599 100644 --- a/evm/src/cpu/mod.rs +++ b/evm/src/cpu/mod.rs @@ -5,6 +5,7 @@ pub(crate) mod control_flow; pub mod cpu_stark; pub(crate) mod decode; mod dup_swap; +mod exceptions; mod gas; mod jumps; pub mod kernel; diff --git a/evm/src/cpu/stack.rs b/evm/src/cpu/stack.rs index f5e9c78c..3fed2587 100644 --- a/evm/src/cpu/stack.rs +++ b/evm/src/cpu/stack.rs @@ -122,6 +122,11 @@ const STACK_BEHAVIORS: OpsColumnsView> = OpsColumnsView { pushes: true, disable_other_channels: false, }), + exception: Some(StackBehavior { + num_pops: 0, + pushes: true, + disable_other_channels: false, + }), }; fn eval_packed_one( diff --git a/evm/src/witness/gas.rs b/evm/src/witness/gas.rs index 98259ff8..9375863a 100644 --- a/evm/src/witness/gas.rs +++ b/evm/src/witness/gas.rs @@ -15,7 +15,7 @@ pub(crate) fn gas_to_charge(op: Operation) -> u64 { Iszero => G_VERYLOW, Not => G_VERYLOW, Byte => G_VERYLOW, - Syscall(_) => KERNEL_ONLY_INSTR, + Syscall(_, _, _) => KERNEL_ONLY_INSTR, Eq => G_VERYLOW, BinaryLogic(_) => G_VERYLOW, BinaryArithmetic(Add) => G_VERYLOW, diff --git a/evm/src/witness/operation.rs b/evm/src/witness/operation.rs index 58c242a6..293ba620 100644 --- a/evm/src/witness/operation.rs +++ b/evm/src/witness/operation.rs @@ -9,12 +9,13 @@ use crate::cpu::kernel::assembler::BYTES_PER_OFFSET; use crate::cpu::kernel::constants::context_metadata::ContextMetadata; use crate::cpu::membus::NUM_GP_CHANNELS; use crate::cpu::simple_logic::eq_iszero::generate_pinv_diff; +use crate::cpu::stack_bounds::MAX_USER_STACK_SIZE; use crate::generation::state::GenerationState; use crate::memory::segments::Segment; use crate::witness::errors::MemoryError::{ContextTooLarge, SegmentTooLarge, VirtTooLarge}; use crate::witness::errors::ProgramError; use crate::witness::errors::ProgramError::MemoryError; -use crate::witness::memory::{MemoryAddress, MemoryOp}; +use crate::witness::memory::{MemoryAddress, MemoryChannel, MemoryOp, MemoryOpKind}; use crate::witness::util::{ keccak_sponge_log, mem_read_gp_with_log_and_fill, mem_write_gp_log_and_fill, stack_pop_with_log_and_fill, stack_push_log_and_fill, @@ -28,7 +29,7 @@ pub(crate) enum Operation { Byte, Shl, Shr, - Syscall(u8), + Syscall(u8, usize, bool), Eq, BinaryLogic(logic::Op), BinaryArithmetic(arithmetic::BinaryOperator), @@ -309,7 +310,18 @@ pub(crate) fn generate_set_context( let new_sp_addr = MemoryAddress::new(new_ctx, Segment::ContextMetadata, sp_field); let log_write_old_sp = mem_write_gp_log_and_fill(1, old_sp_addr, state, &mut row, sp_to_save); - let (new_sp, log_read_new_sp) = mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row); + let (new_sp, log_read_new_sp) = if old_ctx == new_ctx { + let op = MemoryOp::new( + MemoryChannel::GeneralPurpose(2), + state.traces.clock(), + new_sp_addr, + MemoryOpKind::Read, + sp_to_save, + ); + (sp_to_save, op) + } else { + mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row) + }; state.registers.context = new_ctx; state.registers.stack_len = new_sp.as_usize(); @@ -506,6 +518,8 @@ pub(crate) fn generate_shr( pub(crate) fn generate_syscall( opcode: u8, + stack_values_read: usize, + stack_len_increased: bool, state: &mut GenerationState, mut row: CpuColumnsView, ) -> Result<(), ProgramError> { @@ -513,6 +527,13 @@ pub(crate) fn generate_syscall( panic!(); } + if state.registers.stack_len < stack_values_read { + return Err(ProgramError::StackUnderflow); + } + if stack_len_increased && !state.registers.is_kernel && state.registers.stack_len >= MAX_USER_STACK_SIZE { + return Err(ProgramError::StackOverflow); + } + let handler_jumptable_addr = KERNEL.global_labels["syscall_jumptable"]; let handler_addr_addr = handler_jumptable_addr + (opcode as usize) * (BYTES_PER_OFFSET as usize); @@ -591,6 +612,17 @@ pub(crate) fn generate_exit_kernel( panic!(); } + if is_kernel_mode { + row.general.exit_kernel_mut().stack_len_check_aux = F::ZERO; + } else { + let diff = F::from_canonical_usize(state.registers.stack_len + 1) - F::from_canonical_u32(1026); + if let Some(inv) = diff.try_inverse() { + row.general.exit_kernel_mut().stack_len_check_aux = inv; + } else { + panic!("stack overflow; should have been caught before entering the syscall") + } + } + state.registers.program_counter = program_counter; state.registers.is_kernel = is_kernel_mode; state.registers.gas_used = gas_used_val; @@ -659,3 +691,64 @@ pub(crate) fn generate_mstore_general( state.traces.push_cpu(row); Ok(()) } + +pub(crate) fn generate_exception( + exc_code: u8, + state: &mut GenerationState, + mut row: CpuColumnsView, +) -> Result<(), ProgramError> { + if TryInto::::try_into(state.registers.gas_used).is_err() { + panic!(); + } + + row.stack_len_bounds_aux = (row.stack_len + F::ONE).inverse(); + + row.general.exception_mut().exc_code_bits = [ + F::from_bool(exc_code & 1 != 0), + F::from_bool(exc_code & 2 != 0), + F::from_bool(exc_code & 4 != 0), + ]; + + let handler_jumptable_addr = KERNEL.global_labels["exception_jumptable"]; + let handler_addr_addr = + handler_jumptable_addr + (exc_code as usize) * (BYTES_PER_OFFSET as usize); + assert_eq!(BYTES_PER_OFFSET, 3, "Code below assumes 3 bytes per offset"); + let (handler_addr0, log_in0) = mem_read_gp_with_log_and_fill( + 0, + MemoryAddress::new(0, Segment::Code, handler_addr_addr), + state, + &mut row, + ); + let (handler_addr1, log_in1) = mem_read_gp_with_log_and_fill( + 1, + MemoryAddress::new(0, Segment::Code, handler_addr_addr + 1), + state, + &mut row, + ); + let (handler_addr2, log_in2) = mem_read_gp_with_log_and_fill( + 2, + MemoryAddress::new(0, Segment::Code, handler_addr_addr + 2), + state, + &mut row, + ); + + let handler_addr = (handler_addr0 << 16) + (handler_addr1 << 8) + handler_addr2; + let new_program_counter = handler_addr.as_usize(); + + let exc_info = U256::from(state.registers.program_counter) + + (U256::from(state.registers.gas_used) << 192); + let log_out = stack_push_log_and_fill(state, &mut row, exc_info)?; + + state.registers.program_counter = new_program_counter; + log::debug!("Exception to {}", KERNEL.offset_name(new_program_counter)); + state.registers.is_kernel = true; + state.registers.gas_used = 0; + + state.traces.push_memory(log_in0); + state.traces.push_memory(log_in1); + state.traces.push_memory(log_in2); + state.traces.push_memory(log_out); + state.traces.push_cpu(row); + + Ok(()) +} diff --git a/evm/src/witness/transition.rs b/evm/src/witness/transition.rs index 0184e183..9fc48aae 100644 --- a/evm/src/witness/transition.rs +++ b/evm/src/witness/transition.rs @@ -29,22 +29,22 @@ fn read_code_memory(state: &mut GenerationState, row: &mut CpuColum fn decode(registers: RegistersState, opcode: u8) -> Result { match (opcode, registers.is_kernel) { - (0x00, _) => Ok(Operation::Syscall(opcode)), + (0x00, _) => Ok(Operation::Syscall(opcode, 0, false)), // STOP (0x01, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Add)), (0x02, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Mul)), (0x03, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Sub)), (0x04, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Div)), - (0x05, _) => Ok(Operation::Syscall(opcode)), + (0x05, _) => Ok(Operation::Syscall(opcode, 2, false)), // SDIV (0x06, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Mod)), - (0x07, _) => Ok(Operation::Syscall(opcode)), + (0x07, _) => Ok(Operation::Syscall(opcode, 2, false)), // SMOD (0x08, _) => Ok(Operation::TernaryArithmetic( arithmetic::TernaryOperator::AddMod, )), (0x09, _) => Ok(Operation::TernaryArithmetic( arithmetic::TernaryOperator::MulMod, )), - (0x0a, _) => Ok(Operation::Syscall(opcode)), - (0x0b, _) => Ok(Operation::Syscall(opcode)), + (0x0a, _) => Ok(Operation::Syscall(opcode, 2, false)), // EXP + (0x0b, _) => Ok(Operation::Syscall(opcode, 2, false)), // SIGNEXTEND (0x0c, true) => Ok(Operation::BinaryArithmetic( arithmetic::BinaryOperator::AddFp254, )), @@ -59,8 +59,8 @@ fn decode(registers: RegistersState, opcode: u8) -> Result Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Lt)), (0x11, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Gt)), - (0x12, _) => Ok(Operation::Syscall(opcode)), - (0x13, _) => Ok(Operation::Syscall(opcode)), + (0x12, _) => Ok(Operation::Syscall(opcode, 2, false)), // SLT + (0x13, _) => Ok(Operation::Syscall(opcode, 2, false)), // SGT (0x14, _) => Ok(Operation::Eq), (0x15, _) => Ok(Operation::Iszero), (0x16, _) => Ok(Operation::BinaryLogic(logic::Op::And)), @@ -70,76 +70,76 @@ fn decode(registers: RegistersState, opcode: u8) -> Result Ok(Operation::Byte), (0x1b, _) => Ok(Operation::Shl), (0x1c, _) => Ok(Operation::Shr), - (0x1d, _) => Ok(Operation::Syscall(opcode)), - (0x20, _) => Ok(Operation::Syscall(opcode)), + (0x1d, _) => Ok(Operation::Syscall(opcode, 2, false)), // SAR + (0x20, _) => Ok(Operation::Syscall(opcode, 2, false)), // KECCAK256 (0x21, true) => Ok(Operation::KeccakGeneral), - (0x30, _) => Ok(Operation::Syscall(opcode)), - (0x31, _) => Ok(Operation::Syscall(opcode)), - (0x32, _) => Ok(Operation::Syscall(opcode)), - (0x33, _) => Ok(Operation::Syscall(opcode)), - (0x34, _) => Ok(Operation::Syscall(opcode)), - (0x35, _) => Ok(Operation::Syscall(opcode)), - (0x36, _) => Ok(Operation::Syscall(opcode)), - (0x37, _) => Ok(Operation::Syscall(opcode)), - (0x38, _) => Ok(Operation::Syscall(opcode)), - (0x39, _) => Ok(Operation::Syscall(opcode)), - (0x3a, _) => Ok(Operation::Syscall(opcode)), - (0x3b, _) => Ok(Operation::Syscall(opcode)), - (0x3c, _) => Ok(Operation::Syscall(opcode)), - (0x3d, _) => Ok(Operation::Syscall(opcode)), - (0x3e, _) => Ok(Operation::Syscall(opcode)), - (0x3f, _) => Ok(Operation::Syscall(opcode)), - (0x40, _) => Ok(Operation::Syscall(opcode)), - (0x41, _) => Ok(Operation::Syscall(opcode)), - (0x42, _) => Ok(Operation::Syscall(opcode)), - (0x43, _) => Ok(Operation::Syscall(opcode)), - (0x44, _) => Ok(Operation::Syscall(opcode)), - (0x45, _) => Ok(Operation::Syscall(opcode)), - (0x46, _) => Ok(Operation::Syscall(opcode)), - (0x47, _) => Ok(Operation::Syscall(opcode)), - (0x48, _) => Ok(Operation::Syscall(opcode)), - (0x49, _) => Ok(Operation::ProverInput), + (0x30, _) => Ok(Operation::Syscall(opcode, 0, true)), // ADDRESS + (0x31, _) => Ok(Operation::Syscall(opcode, 1, false)), // BALANCE + (0x32, _) => Ok(Operation::Syscall(opcode, 0, true)), // ORIGIN + (0x33, _) => Ok(Operation::Syscall(opcode, 0, true)), // CALLER + (0x34, _) => Ok(Operation::Syscall(opcode, 0, true)), // CALLVALUE + (0x35, _) => Ok(Operation::Syscall(opcode, 1, false)), // CALLDATALOAD + (0x36, _) => Ok(Operation::Syscall(opcode, 0, true)), // CALLDATASIZE + (0x37, _) => Ok(Operation::Syscall(opcode, 3, false)), // CALLDATACOPY + (0x38, _) => Ok(Operation::Syscall(opcode, 0, true)), // CODESIZE + (0x39, _) => Ok(Operation::Syscall(opcode, 3, false)), // CODECOPY + (0x3a, _) => Ok(Operation::Syscall(opcode, 0, true)), // GASPRICE + (0x3b, _) => Ok(Operation::Syscall(opcode, 1, false)), // EXTCODESIZE + (0x3c, _) => Ok(Operation::Syscall(opcode, 4, false)), // EXTCODECOPY + (0x3d, _) => Ok(Operation::Syscall(opcode, 0, true)), // RETURNDATASIZE + (0x3e, _) => Ok(Operation::Syscall(opcode, 3, false)), // RETURNDATACOPY + (0x3f, _) => Ok(Operation::Syscall(opcode, 1, false)), // EXTCODEHASH + (0x40, _) => Ok(Operation::Syscall(opcode, 1, false)), // BLOCKHASH + (0x41, _) => Ok(Operation::Syscall(opcode, 0, true)), // COINBASE + (0x42, _) => Ok(Operation::Syscall(opcode, 0, true)), // TIMESTAMP + (0x43, _) => Ok(Operation::Syscall(opcode, 0, true)), // NUMBER + (0x44, _) => Ok(Operation::Syscall(opcode, 0, true)), // DIFFICULTY + (0x45, _) => Ok(Operation::Syscall(opcode, 0, true)), // GASLIMIT + (0x46, _) => Ok(Operation::Syscall(opcode, 0, true)), // CHAINID + (0x47, _) => Ok(Operation::Syscall(opcode, 0, true)), // SELFBALANCE + (0x48, _) => Ok(Operation::Syscall(opcode, 0, true)), // BASEFEE + (0x49, true) => Ok(Operation::ProverInput), (0x50, _) => Ok(Operation::Pop), - (0x51, _) => Ok(Operation::Syscall(opcode)), - (0x52, _) => Ok(Operation::Syscall(opcode)), - (0x53, _) => Ok(Operation::Syscall(opcode)), - (0x54, _) => Ok(Operation::Syscall(opcode)), - (0x55, _) => Ok(Operation::Syscall(opcode)), + (0x51, _) => Ok(Operation::Syscall(opcode, 1, false)), // MLOAD + (0x52, _) => Ok(Operation::Syscall(opcode, 2, false)), // MSTORE + (0x53, _) => Ok(Operation::Syscall(opcode, 2, false)), // MSTORE8 + (0x54, _) => Ok(Operation::Syscall(opcode, 1, false)), // SLOAD + (0x55, _) => Ok(Operation::Syscall(opcode, 2, false)), // SSTORE (0x56, _) => Ok(Operation::Jump), (0x57, _) => Ok(Operation::Jumpi), (0x58, _) => Ok(Operation::Pc), - (0x59, _) => Ok(Operation::Syscall(opcode)), - (0x5a, _) => Ok(Operation::Syscall(opcode)), + (0x59, _) => Ok(Operation::Syscall(opcode, 0, true)), // MSIZE + (0x5a, _) => Ok(Operation::Syscall(opcode, 0, true)), // GAS (0x5b, _) => Ok(Operation::Jumpdest), (0x60..=0x7f, _) => Ok(Operation::Push(opcode & 0x1f)), (0x80..=0x8f, _) => Ok(Operation::Dup(opcode & 0xf)), (0x90..=0x9f, _) => Ok(Operation::Swap(opcode & 0xf)), - (0xa0, _) => Ok(Operation::Syscall(opcode)), - (0xa1, _) => Ok(Operation::Syscall(opcode)), - (0xa2, _) => Ok(Operation::Syscall(opcode)), - (0xa3, _) => Ok(Operation::Syscall(opcode)), - (0xa4, _) => Ok(Operation::Syscall(opcode)), - (0xa5, _) => { + (0xa0, _) => Ok(Operation::Syscall(opcode, 2, false)), // LOG0 + (0xa1, _) => Ok(Operation::Syscall(opcode, 3, false)), // LOG1 + (0xa2, _) => Ok(Operation::Syscall(opcode, 4, false)), // LOG2 + (0xa3, _) => Ok(Operation::Syscall(opcode, 5, false)), // LOG3 + (0xa4, _) => Ok(Operation::Syscall(opcode, 6, false)), // LOG4 + (0xa5, true) => { log::warn!( "Kernel panic at {}", KERNEL.offset_name(registers.program_counter), ); Err(ProgramError::KernelPanic) } - (0xf0, _) => Ok(Operation::Syscall(opcode)), - (0xf1, _) => Ok(Operation::Syscall(opcode)), - (0xf2, _) => Ok(Operation::Syscall(opcode)), - (0xf3, _) => Ok(Operation::Syscall(opcode)), - (0xf4, _) => Ok(Operation::Syscall(opcode)), - (0xf5, _) => Ok(Operation::Syscall(opcode)), + (0xf0, _) => Ok(Operation::Syscall(opcode, 3, false)), // CREATE + (0xf1, _) => Ok(Operation::Syscall(opcode, 7, false)), // CALL + (0xf2, _) => Ok(Operation::Syscall(opcode, 7, false)), // CALLCODE + (0xf3, _) => Ok(Operation::Syscall(opcode, 2, false)), // RETURN + (0xf4, _) => Ok(Operation::Syscall(opcode, 6, false)), // DELEGATECALL + (0xf5, _) => Ok(Operation::Syscall(opcode, 4, false)), // CREATE2 (0xf6, true) => Ok(Operation::GetContext), (0xf7, true) => Ok(Operation::SetContext), (0xf9, true) => Ok(Operation::ExitKernel), - (0xfa, _) => Ok(Operation::Syscall(opcode)), + (0xfa, _) => Ok(Operation::Syscall(opcode, 6, false)), // STATICCALL (0xfb, true) => Ok(Operation::MloadGeneral), (0xfc, true) => Ok(Operation::MstoreGeneral), - (0xfd, _) => Ok(Operation::Syscall(opcode)), - (0xff, _) => Ok(Operation::Syscall(opcode)), + (0xfd, _) => Ok(Operation::Syscall(opcode, 2, false)), // REVERT + (0xff, _) => Ok(Operation::Syscall(opcode, 1, false)), // SELFDESTRUCT _ => { log::warn!("Invalid opcode: {}", opcode); Err(ProgramError::InvalidOpcode) @@ -156,7 +156,7 @@ fn fill_op_flag(op: Operation, row: &mut CpuColumnsView) { Operation::Iszero => &mut flags.iszero, Operation::Not => &mut flags.not, Operation::Byte => &mut flags.byte, - Operation::Syscall(_) => &mut flags.syscall, + Operation::Syscall(_, _, _) => &mut flags.syscall, Operation::Eq => &mut flags.eq, Operation::BinaryLogic(logic::Op::And) => &mut flags.and, Operation::BinaryLogic(logic::Op::Or) => &mut flags.or, @@ -205,7 +205,7 @@ fn perform_op( Operation::Byte => generate_byte(state, row)?, Operation::Shl => generate_shl(state, row)?, Operation::Shr => generate_shr(state, row)?, - Operation::Syscall(opcode) => generate_syscall(opcode, state, row)?, + Operation::Syscall(opcode, stack_values_read, stack_len_increased) => generate_syscall(opcode, stack_values_read, stack_len_increased, state, row)?, Operation::Eq => generate_eq(state, row)?, Operation::BinaryLogic(binary_logic_op) => { generate_binary_logic_op(binary_logic_op, state, row)? @@ -227,7 +227,7 @@ fn perform_op( }; state.registers.program_counter += match op { - Operation::Syscall(_) | Operation::ExitKernel => 0, + Operation::Syscall(_, _, _) | Operation::ExitKernel => 0, Operation::Push(n) => n as usize + 2, Operation::Jump | Operation::Jumpi => 0, _ => 1, @@ -238,7 +238,10 @@ fn perform_op( Ok(()) } -fn try_perform_instruction(state: &mut GenerationState) -> Result<(), ProgramError> { +/// Row that has the correct values for system registers and the code channel, but is otherwise +/// blank. It fulfills the constraints that are common to successful operations and the exception +/// operation. It also returns the opcode. +fn base_row(state: &mut GenerationState) -> (CpuColumnsView, u8) { let mut row: CpuColumnsView = CpuColumnsView::default(); row.is_cpu_cycle = F::ONE; row.clock = F::from_canonical_usize(state.traces.clock()); @@ -249,6 +252,11 @@ fn try_perform_instruction(state: &mut GenerationState) -> Result<( row.stack_len = F::from_canonical_usize(state.registers.stack_len); let opcode = read_code_memory(state, &mut row); + (row, opcode) +} + +fn try_perform_instruction(state: &mut GenerationState) -> Result<(), ProgramError> { + let (mut row, opcode) = base_row(state); let op = decode(state.registers, opcode)?; if state.registers.is_kernel { @@ -309,8 +317,25 @@ fn log_kernel_instruction(state: &mut GenerationState, op: Operatio assert!(pc < KERNEL.code.len(), "Kernel PC is out of range: {}", pc); } -fn handle_error(_state: &mut GenerationState) -> anyhow::Result<()> { - bail!("TODO: generation for exception handling is not implemented"); +fn handle_error(state: &mut GenerationState, err: ProgramError) -> anyhow::Result<()> { + let exc_code: u8 = match err { + ProgramError::OutOfGas => 0, + ProgramError::InvalidOpcode => 1, + ProgramError::StackUnderflow => 2, + ProgramError::InvalidJumpDestination => 3, + ProgramError::InvalidJumpiDestination => 4, + ProgramError::StackOverflow => 5, + _ => bail!("TODO: figure out what to do with this...") + }; + + let checkpoint = state.checkpoint(); + + let (row, _) = base_row(state); + generate_exception(exc_code, state, row).map_err(|_| anyhow::Error::msg("error handling errored..."))?; + + state.memory + .apply_ops(state.traces.mem_ops_since(checkpoint.traces)); + Ok(()) } pub(crate) fn transition(state: &mut GenerationState) -> anyhow::Result<()> { @@ -336,7 +361,7 @@ pub(crate) fn transition(state: &mut GenerationState) -> anyhow::Re ); } state.rollback(checkpoint); - handle_error(state) + handle_error(state, e) } } } From 448bc719d8144212edd4345cd5ef14ed73e8b0c6 Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Sat, 3 Jun 2023 18:46:24 -0700 Subject: [PATCH 2/6] Lints --- evm/src/cpu/jumps.rs | 8 +++++++- evm/src/cpu/kernel/constants/mod.rs | 2 +- evm/src/witness/operation.rs | 12 ++++++++---- evm/src/witness/transition.rs | 14 +++++++++----- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/evm/src/cpu/jumps.rs b/evm/src/cpu/jumps.rs index 9ce5b3bc..6b73f57f 100644 --- a/evm/src/cpu/jumps.rs +++ b/evm/src/cpu/jumps.rs @@ -76,7 +76,13 @@ pub fn eval_ext_circuit_exit_kernel, const D: usize // See detailed comments in the packed implementation. { let stack_len_check_aux = lv.general.exit_kernel().stack_len_check_aux; - let lhs = builder.arithmetic_extension(F::ONE, -F::from_canonical_u32(1026), lv.stack_len, stack_len_check_aux, stack_len_check_aux); + let lhs = builder.arithmetic_extension( + F::ONE, + -F::from_canonical_u32(1026), + lv.stack_len, + stack_len_check_aux, + stack_len_check_aux, + ); let constr = builder.add_extension(lhs, input[1]); let constr = builder.mul_sub_extension(filter, constr, filter); yield_constr.constraint(builder, constr); diff --git a/evm/src/cpu/kernel/constants/mod.rs b/evm/src/cpu/kernel/constants/mod.rs index 3da4fdb0..08591261 100644 --- a/evm/src/cpu/kernel/constants/mod.rs +++ b/evm/src/cpu/kernel/constants/mod.rs @@ -11,11 +11,11 @@ use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField; use crate::memory::segments::Segment; pub(crate) mod context_metadata; +mod exc_bitfields; pub(crate) mod global_metadata; pub(crate) mod journal_entry; pub(crate) mod trie_type; pub(crate) mod txn_fields; -mod exc_bitfields; /// Constants that are accessible to our kernel assembly code. pub fn evm_constants() -> HashMap { diff --git a/evm/src/witness/operation.rs b/evm/src/witness/operation.rs index 25cc3241..decc41cc 100644 --- a/evm/src/witness/operation.rs +++ b/evm/src/witness/operation.rs @@ -508,7 +508,10 @@ pub(crate) fn generate_syscall( if state.registers.stack_len < stack_values_read { return Err(ProgramError::StackUnderflow); } - if stack_len_increased && !state.registers.is_kernel && state.registers.stack_len >= MAX_USER_STACK_SIZE { + if stack_len_increased + && !state.registers.is_kernel + && state.registers.stack_len >= MAX_USER_STACK_SIZE + { return Err(ProgramError::StackOverflow); } @@ -593,7 +596,8 @@ pub(crate) fn generate_exit_kernel( if is_kernel_mode { row.general.exit_kernel_mut().stack_len_check_aux = F::ZERO; } else { - let diff = F::from_canonical_usize(state.registers.stack_len + 1) - F::from_canonical_u32(1026); + let diff = + F::from_canonical_usize(state.registers.stack_len + 1) - F::from_canonical_u32(1026); if let Some(inv) = diff.try_inverse() { row.general.exit_kernel_mut().stack_len_check_aux = inv; } else { @@ -713,8 +717,8 @@ pub(crate) fn generate_exception( let handler_addr = (handler_addr0 << 16) + (handler_addr1 << 8) + handler_addr2; let new_program_counter = handler_addr.as_usize(); - let exc_info = U256::from(state.registers.program_counter) - + (U256::from(state.registers.gas_used) << 192); + let exc_info = + U256::from(state.registers.program_counter) + (U256::from(state.registers.gas_used) << 192); let log_out = stack_push_log_and_fill(state, &mut row, exc_info)?; state.registers.program_counter = new_program_counter; diff --git a/evm/src/witness/transition.rs b/evm/src/witness/transition.rs index df3481db..df522374 100644 --- a/evm/src/witness/transition.rs +++ b/evm/src/witness/transition.rs @@ -207,7 +207,9 @@ fn perform_op( Operation::Not => generate_not(state, row)?, Operation::Shl => generate_shl(state, row)?, Operation::Shr => generate_shr(state, row)?, - Operation::Syscall(opcode, stack_values_read, stack_len_increased) => generate_syscall(opcode, stack_values_read, stack_len_increased, state, row)?, + Operation::Syscall(opcode, stack_values_read, stack_len_increased) => { + generate_syscall(opcode, stack_values_read, stack_len_increased, state, row)? + } Operation::Eq => generate_eq(state, row)?, Operation::BinaryLogic(binary_logic_op) => { generate_binary_logic_op(binary_logic_op, state, row)? @@ -327,16 +329,18 @@ fn handle_error(state: &mut GenerationState, err: ProgramError) -> ProgramError::InvalidJumpDestination => 3, ProgramError::InvalidJumpiDestination => 4, ProgramError::StackOverflow => 5, - _ => bail!("TODO: figure out what to do with this...") + _ => bail!("TODO: figure out what to do with this..."), }; let checkpoint = state.checkpoint(); let (row, _) = base_row(state); - generate_exception(exc_code, state, row).map_err(|_| anyhow::Error::msg("error handling errored..."))?; + generate_exception(exc_code, state, row) + .map_err(|_| anyhow::Error::msg("error handling errored..."))?; - state.memory - .apply_ops(state.traces.mem_ops_since(checkpoint.traces)); + state + .memory + .apply_ops(state.traces.mem_ops_since(checkpoint.traces)); Ok(()) } From 3ecf53095676cb55c132c84ab1efe2ad2ebea3d9 Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Sun, 4 Jun 2023 10:05:28 -0700 Subject: [PATCH 3/6] Minor bugfixes --- evm/src/cpu/kernel/asm/core/exception.asm | 15 ++++++++++++--- evm/src/witness/gas.rs | 1 - evm/src/witness/transition.rs | 1 - 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/evm/src/cpu/kernel/asm/core/exception.asm b/evm/src/cpu/kernel/asm/core/exception.asm index 0fefe29c..7d5c7d9c 100644 --- a/evm/src/cpu/kernel/asm/core/exception.asm +++ b/evm/src/cpu/kernel/asm/core/exception.asm @@ -96,11 +96,20 @@ global exc_invalid_jumpi_destination: global invalid_jump_jumpi_destination_common: + // We have a jump destination on the stack. We want to `PANIC` if it is valid, and jump to + // `fault_exception` if it is not. An address is a valid jump destination if it points to a + // `JUMPDEST` instruction. In practice, since in this implementation memory addresses are + // limited to 32 bits, we check two things: + // 1. the address is no more than 32 bits long, and + // 2. it points to a `JUMPDEST` instruction. // stack: jump_dest + DUP1 + %shr_const(32) + %jumpi(fault_exception) // This keeps one copy of jump_dest on the stack, but that's fine. + // jump_dest is a valid address; check if it points to a `JUMP_DEST`. %mload_current(@SEGMENT_JUMPDEST_BITS) // stack: is_valid_jumpdest - // if valid, then the trap should never have been entered - %jumpi(panic) + %jumpi(panic) // Trap should never have been entered. %jump(fault_exception) @@ -164,7 +173,7 @@ min_stack_len_for_opcode: BYTES 2 // 0x17, OR BYTES 2 // 0x18, XOR BYTES 1 // 0x19, NOT - BYTES 2 // 0x1a, BYTES + BYTES 2 // 0x1a, BYTE BYTES 2 // 0x1b, SHL BYTES 2 // 0x1c, SHR BYTES 2 // 0x1d, SAR diff --git a/evm/src/witness/gas.rs b/evm/src/witness/gas.rs index e03ab8a1..0acb04e4 100644 --- a/evm/src/witness/gas.rs +++ b/evm/src/witness/gas.rs @@ -14,7 +14,6 @@ pub(crate) fn gas_to_charge(op: Operation) -> u64 { match op { Iszero => G_VERYLOW, Not => G_VERYLOW, - Byte => G_VERYLOW, Syscall(_, _, _) => KERNEL_ONLY_INSTR, Eq => G_VERYLOW, BinaryLogic(_) => G_VERYLOW, diff --git a/evm/src/witness/transition.rs b/evm/src/witness/transition.rs index df522374..44e6ebdd 100644 --- a/evm/src/witness/transition.rs +++ b/evm/src/witness/transition.rs @@ -157,7 +157,6 @@ fn fill_op_flag(op: Operation, row: &mut CpuColumnsView) { Operation::Swap(_) => &mut flags.swap, Operation::Iszero => &mut flags.iszero, Operation::Not => &mut flags.not, - Operation::Byte => &mut flags.byte, Operation::Syscall(_, _, _) => &mut flags.syscall, Operation::Eq => &mut flags.eq, Operation::BinaryLogic(logic::Op::And) => &mut flags.and, From c773476cb97b9e2d083fb152cd60404ab2d3e93c Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Sun, 4 Jun 2023 12:40:34 -0700 Subject: [PATCH 4/6] Minor docs --- evm/src/witness/operation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evm/src/witness/operation.rs b/evm/src/witness/operation.rs index decc41cc..4cd725bb 100644 --- a/evm/src/witness/operation.rs +++ b/evm/src/witness/operation.rs @@ -28,7 +28,7 @@ pub(crate) enum Operation { Not, Shl, Shr, - Syscall(u8, usize, bool), + Syscall(u8, usize, bool), // (syscall number, minimum stack length, increases stack length) Eq, BinaryLogic(logic::Op), BinaryArithmetic(arithmetic::BinaryOperator), From d33871728d1cd08c84a95ead169ff31fca2f0627 Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Sun, 4 Jun 2023 12:46:46 -0700 Subject: [PATCH 5/6] Commit missing file --- evm/src/cpu/exceptions.rs | 241 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 evm/src/cpu/exceptions.rs diff --git a/evm/src/cpu/exceptions.rs b/evm/src/cpu/exceptions.rs new file mode 100644 index 00000000..485f6888 --- /dev/null +++ b/evm/src/cpu/exceptions.rs @@ -0,0 +1,241 @@ +//! Handle instructions that are implemented in terms of system calls. +//! +//! These are usually the ones that are too complicated to implement in one CPU table row. + +use plonky2::field::extension::Extendable; +use plonky2::field::packed::PackedField; +use plonky2::field::types::Field; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; +use static_assertions::const_assert; + +use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use crate::cpu::columns::CpuColumnsView; +use crate::cpu::kernel::aggregator::KERNEL; +use crate::cpu::membus::NUM_GP_CHANNELS; +use crate::memory::segments::Segment; + +// Copy the constant but make it `usize`. +const BYTES_PER_OFFSET: usize = crate::cpu::kernel::assembler::BYTES_PER_OFFSET as usize; +const_assert!(BYTES_PER_OFFSET < NUM_GP_CHANNELS); // Reserve one channel for stack push + +pub fn eval_packed( + lv: &CpuColumnsView

, + nv: &CpuColumnsView

, + yield_constr: &mut ConstraintConsumer

, +) { + // TODO: There's heaps of overlap between this and syscalls. They could be merged. + + let filter = lv.op.exception; + + // Ensure we are not in kernel mode + yield_constr.constraint(filter * lv.is_kernel_mode); + + // Get the exception code as an value in {0, ..., 7}. + let exc_code_bits = lv.general.exception().exc_code_bits; + let exc_code: P = exc_code_bits + .into_iter() + .enumerate() + .map(|(i, bit)| bit * P::Scalar::from_canonical_u64(1 << i)) + .sum(); + // Ensure that all bits are either 0 or 1. + for bit in exc_code_bits { + yield_constr.constraint(filter * bit * (bit - P::ONES)); + } + + // Look up the handler in memory + let code_segment = P::Scalar::from_canonical_usize(Segment::Code as usize); + let exc_jumptable_start = + P::Scalar::from_canonical_usize(KERNEL.global_labels["exception_jumptable"]); + let exc_handler_addr_start = + exc_jumptable_start + exc_code * P::Scalar::from_canonical_usize(BYTES_PER_OFFSET); + for (i, channel) in lv.mem_channels[0..BYTES_PER_OFFSET].iter().enumerate() { + yield_constr.constraint(filter * (channel.used - P::ONES)); + yield_constr.constraint(filter * (channel.is_read - P::ONES)); + + // Set kernel context and code segment + yield_constr.constraint(filter * channel.addr_context); + yield_constr.constraint(filter * (channel.addr_segment - code_segment)); + + // Set address, using a separate channel for each of the `BYTES_PER_OFFSET` limbs. + let limb_address = exc_handler_addr_start + P::Scalar::from_canonical_usize(i); + yield_constr.constraint(filter * (channel.addr_virtual - limb_address)); + } + + // Disable unused channels (the last channel is used to push to the stack) + for channel in &lv.mem_channels[BYTES_PER_OFFSET..NUM_GP_CHANNELS - 1] { + yield_constr.constraint(filter * channel.used); + } + + // Set program counter to the handler address + // The addresses are big-endian in memory + let target = lv.mem_channels[0..BYTES_PER_OFFSET] + .iter() + .map(|channel| channel.value[0]) + .fold(P::ZEROS, |cumul, limb| { + cumul * P::Scalar::from_canonical_u64(256) + limb + }); + yield_constr.constraint_transition(filter * (nv.program_counter - target)); + // Set kernel mode + yield_constr.constraint_transition(filter * (nv.is_kernel_mode - P::ONES)); + // Maintain current context + yield_constr.constraint_transition(filter * (nv.context - lv.context)); + // Reset gas counter to zero. + yield_constr.constraint_transition(filter * nv.gas); + + // This memory channel is constrained in `stack.rs`. + let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value; + // Push to stack: current PC (limb 0), gas counter (limbs 6 and 7). + yield_constr.constraint(filter * (output[0] - lv.program_counter)); + yield_constr.constraint(filter * (output[6] - lv.gas)); + // TODO: Range check `output[6]`. + yield_constr.constraint(filter * output[7]); // High limb of gas is zero. + + // Zero the rest of that register + for &limb in &output[1..6] { + yield_constr.constraint(filter * limb); + } +} + +pub fn eval_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + lv: &CpuColumnsView>, + nv: &CpuColumnsView>, + yield_constr: &mut RecursiveConstraintConsumer, +) { + let filter = lv.op.exception; + + // Ensure we are not in kernel mode + { + let constr = builder.mul_extension(filter, lv.is_kernel_mode); + yield_constr.constraint(builder, constr); + } + + // Get the exception code as an value in {0, ..., 7}. + let exc_code_bits = lv.general.exception().exc_code_bits; + let exc_code = + exc_code_bits + .into_iter() + .enumerate() + .fold(builder.zero_extension(), |cumul, (i, bit)| { + builder.mul_const_add_extension(F::from_canonical_u64(1 << i), bit, cumul) + }); + // Ensure that all bits are either 0 or 1. + for bit in exc_code_bits { + let constr = builder.mul_sub_extension(bit, bit, bit); + let constr = builder.mul_extension(filter, constr); + yield_constr.constraint(builder, constr); + } + + // Look up the handler in memory + let code_segment = F::from_canonical_usize(Segment::Code as usize); + let exc_jumptable_start = builder.constant_extension( + F::from_canonical_usize(KERNEL.global_labels["exception_jumptable"]).into(), + ); + let exc_handler_addr_start = builder.mul_const_add_extension( + F::from_canonical_usize(BYTES_PER_OFFSET), + exc_code, + exc_jumptable_start, + ); + for (i, channel) in lv.mem_channels[0..BYTES_PER_OFFSET].iter().enumerate() { + { + let constr = builder.mul_sub_extension(filter, channel.used, filter); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.mul_sub_extension(filter, channel.is_read, filter); + yield_constr.constraint(builder, constr); + } + + // Set kernel context and code segment + { + let constr = builder.mul_extension(filter, channel.addr_context); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.arithmetic_extension( + F::ONE, + -code_segment, + filter, + channel.addr_segment, + filter, + ); + yield_constr.constraint(builder, constr); + } + + // Set address, using a separate channel for each of the `BYTES_PER_OFFSET` limbs. + { + let diff = builder.sub_extension(channel.addr_virtual, exc_handler_addr_start); + let constr = builder.arithmetic_extension( + F::ONE, + -F::from_canonical_usize(i), + filter, + diff, + filter, + ); + yield_constr.constraint(builder, constr); + } + } + + // Disable unused channels (the last channel is used to push to the stack) + for channel in &lv.mem_channels[BYTES_PER_OFFSET..NUM_GP_CHANNELS - 1] { + let constr = builder.mul_extension(filter, channel.used); + yield_constr.constraint(builder, constr); + } + + // Set program counter to the handler address + // The addresses are big-endian in memory + { + let target = lv.mem_channels[0..BYTES_PER_OFFSET] + .iter() + .map(|channel| channel.value[0]) + .fold(builder.zero_extension(), |cumul, limb| { + builder.mul_const_add_extension(F::from_canonical_u64(256), cumul, limb) + }); + let diff = builder.sub_extension(nv.program_counter, target); + let constr = builder.mul_extension(filter, diff); + yield_constr.constraint_transition(builder, constr); + } + // Set kernel mode + { + let constr = builder.mul_sub_extension(filter, nv.is_kernel_mode, filter); + yield_constr.constraint_transition(builder, constr); + } + // Maintain current context + { + let diff = builder.sub_extension(nv.context, lv.context); + let constr = builder.mul_extension(filter, diff); + yield_constr.constraint_transition(builder, constr); + } + // Reset gas counter to zero. + { + let constr = builder.mul_extension(filter, nv.gas); + yield_constr.constraint_transition(builder, constr); + } + + // This memory channel is constrained in `stack.rs`. + let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value; + // Push to stack: current PC (limb 0), gas counter (limbs 6 and 7). + { + let diff = builder.sub_extension(output[0], lv.program_counter); + let constr = builder.mul_extension(filter, diff); + yield_constr.constraint(builder, constr); + } + { + let diff = builder.sub_extension(output[6], lv.gas); + let constr = builder.mul_extension(filter, diff); + yield_constr.constraint(builder, constr); + } + // TODO: Range check `output[6]`. + { + // High limb of gas is zero. + let constr = builder.mul_extension(filter, output[7]); + yield_constr.constraint(builder, constr); + } + + // Zero the rest of that register + for &limb in &output[1..6] { + let constr = builder.mul_extension(filter, limb); + yield_constr.constraint(builder, constr); + } +} From ae290dbf1157b19dca790bfe9a166344c2e5261d Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Wed, 7 Jun 2023 14:58:59 -0700 Subject: [PATCH 6/6] William PR comments --- evm/src/cpu/kernel/asm/core/exception.asm | 6 +++--- evm/src/witness/errors.rs | 1 + evm/src/witness/operation.rs | 22 ++++++++++++++++------ 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/evm/src/cpu/kernel/asm/core/exception.asm b/evm/src/cpu/kernel/asm/core/exception.asm index 7d5c7d9c..70e8550b 100644 --- a/evm/src/cpu/kernel/asm/core/exception.asm +++ b/evm/src/cpu/kernel/asm/core/exception.asm @@ -155,8 +155,8 @@ min_stack_len_for_opcode: BYTES 2 // 0x05, SDIV BYTES 2 // 0x06, MOD BYTES 2 // 0x07, SMOD - BYTES 2 // 0x08, ADDMOD - BYTES 2 // 0x09, MULMOD + BYTES 3 // 0x08, ADDMOD + BYTES 3 // 0x09, MULMOD BYTES 2 // 0x0a, EXP BYTES 2 // 0x0b, SIGNEXTEND %rep 4 // 0x0c-0x0f, invalid @@ -222,7 +222,7 @@ min_stack_len_for_opcode: BYTES 1 // 0x54, SLOAD BYTES 2 // 0x55, SSTORE BYTES 1 // 0x56, JUMP - BYTES 1 // 0x57, JUMPI + BYTES 2 // 0x57, JUMPI BYTES 0 // 0x58, PC BYTES 0 // 0x59, MSIZE BYTES 0 // 0x5a, GAS diff --git a/evm/src/witness/errors.rs b/evm/src/witness/errors.rs index e28e7d0f..81d5ff01 100644 --- a/evm/src/witness/errors.rs +++ b/evm/src/witness/errors.rs @@ -11,6 +11,7 @@ pub enum ProgramError { StackOverflow, KernelPanic, MemoryError(MemoryError), + GasLimitError, } #[allow(clippy::enum_variant_names)] diff --git a/evm/src/witness/operation.rs b/evm/src/witness/operation.rs index 4cd725bb..94cf49ea 100644 --- a/evm/src/witness/operation.rs +++ b/evm/src/witness/operation.rs @@ -502,7 +502,7 @@ pub(crate) fn generate_syscall( mut row: CpuColumnsView, ) -> Result<(), ProgramError> { if TryInto::::try_into(state.registers.gas_used).is_err() { - panic!(); + return Err(ProgramError::GasLimitError); } if state.registers.stack_len < stack_values_read { @@ -544,13 +544,18 @@ pub(crate) fn generate_syscall( let syscall_info = U256::from(state.registers.program_counter + 1) + (U256::from(u64::from(state.registers.is_kernel)) << 32) + (U256::from(state.registers.gas_used) << 192); - let log_out = stack_push_log_and_fill(state, &mut row, syscall_info)?; + // Set registers before pushing to the stack; in particular, we need to set kernel mode so we + // can't incorrectly trigger a stack overflow. However, note that we have to do it _after_ we + // make `syscall_info`, which should contain the old values. state.registers.program_counter = new_program_counter; - log::debug!("Syscall to {}", KERNEL.offset_name(new_program_counter)); state.registers.is_kernel = true; state.registers.gas_used = 0; + let log_out = stack_push_log_and_fill(state, &mut row, syscall_info)?; + + log::debug!("Syscall to {}", KERNEL.offset_name(new_program_counter)); + state.traces.push_memory(log_in0); state.traces.push_memory(log_in1); state.traces.push_memory(log_in2); @@ -680,7 +685,7 @@ pub(crate) fn generate_exception( mut row: CpuColumnsView, ) -> Result<(), ProgramError> { if TryInto::::try_into(state.registers.gas_used).is_err() { - panic!(); + return Err(ProgramError::GasLimitError); } row.stack_len_bounds_aux = (row.stack_len + F::ONE).inverse(); @@ -719,13 +724,18 @@ pub(crate) fn generate_exception( let exc_info = U256::from(state.registers.program_counter) + (U256::from(state.registers.gas_used) << 192); - let log_out = stack_push_log_and_fill(state, &mut row, exc_info)?; + // Set registers before pushing to the stack; in particular, we need to set kernel mode so we + // can't incorrectly trigger a stack overflow. However, note that we have to do it _after_ we + // make `exc_info`, which should contain the old values. state.registers.program_counter = new_program_counter; - log::debug!("Exception to {}", KERNEL.offset_name(new_program_counter)); state.registers.is_kernel = true; state.registers.gas_used = 0; + let log_out = stack_push_log_and_fill(state, &mut row, exc_info)?; + + log::debug!("Exception to {}", KERNEL.offset_name(new_program_counter)); + state.traces.push_memory(log_in0); state.traces.push_memory(log_in1); state.traces.push_memory(log_in2);