Merge pull request #1071 from mir-protocol/jacqui/bad-opcode-witness-generation

Error handling
This commit is contained in:
Jacqueline Nabaglo 2023-06-07 15:49:07 -07:00 committed by GitHub
commit 0117541909
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 905 additions and 133 deletions

View File

@ -7,6 +7,8 @@ use std::mem::{size_of, transmute};
#[derive(Clone, Copy)]
pub(crate) union CpuGeneralColumnsView<T: Copy> {
arithmetic: CpuArithmeticView<T>,
exception: CpuExceptionView<T>,
exit_kernel: CpuExitKernelView<T>,
logic: CpuLogicView<T>,
jumps: CpuJumpsView<T>,
shift: CpuShiftView<T>,
@ -23,6 +25,26 @@ impl<T: Copy> CpuGeneralColumnsView<T> {
unsafe { &mut self.arithmetic }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn exception(&self) -> &CpuExceptionView<T> {
unsafe { &self.exception }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn exception_mut(&mut self) -> &mut CpuExceptionView<T> {
unsafe { &mut self.exception }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn exit_kernel(&self) -> &CpuExitKernelView<T> {
unsafe { &self.exit_kernel }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn exit_kernel_mut(&mut self) -> &mut CpuExitKernelView<T> {
unsafe { &mut self.exit_kernel }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn logic(&self) -> &CpuLogicView<T> {
unsafe { &self.logic }
@ -89,6 +111,17 @@ pub(crate) struct CpuArithmeticView<T: Copy> {
tmp: T, // temporary, to suppress errors
}
#[derive(Copy, Clone)]
pub(crate) struct CpuExceptionView<T: Copy> {
// Exception code as little-endian bits.
pub(crate) exc_code_bits: [T; 3],
}
#[derive(Copy, Clone)]
pub(crate) struct CpuExitKernelView<T: Copy> {
pub(crate) stack_len_check_aux: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuLogicView<T: Copy> {
// Pseudoinverse of `(input0 - input1)`. Used prove that they are unequal. Assumes 32-bit limbs.

View File

@ -47,12 +47,13 @@ pub struct OpsColumnsView<T: Copy> {
// TODO: combine GET_CONTEXT and SET_CONTEXT into one flag
pub get_context: T,
pub set_context: T,
pub exit_kernel: T,
pub exit_kernel: T, // Note: This column must be 0 when is_cpu_cycle = 0.
// TODO: combine MLOAD_GENERAL and MSTORE_GENERAL into one flag
pub mload_general: T,
pub mstore_general: T,
pub syscall: T, // Note: This column must be 0 when is_cpu_cycle = 0.
pub syscall: T, // Note: This column must be 0 when is_cpu_cycle = 0.
pub exception: T, // Note: This column must be 0 when is_cpu_cycle = 0.
}
// `u8` is guaranteed to have a `size_of` of 1.

View File

@ -45,6 +45,7 @@ const NATIVE_INSTRUCTIONS: [usize; 31] = [
COL_MAP.op.mload_general,
COL_MAP.op.mstore_general,
// not SYSCALL (performs a jump)
// not exceptions (also jump)
];
pub(crate) fn get_halt_pcs<F: Field>() -> (F, F) {

View File

@ -13,8 +13,8 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS};
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cpu::{
bootstrap_kernel, contextops, control_flow, decode, dup_swap, gas, jumps, membus, memio,
modfp254, pc, shift, simple_logic, stack, stack_bounds, syscalls,
bootstrap_kernel, contextops, control_flow, decode, dup_swap, exceptions, gas, jumps, membus,
memio, modfp254, pc, shift, simple_logic, stack, stack_bounds, syscalls,
};
use crate::cross_table_lookup::{Column, TableWithColumns};
use crate::memory::segments::Segment;
@ -191,6 +191,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
control_flow::eval_packed_generic(local_values, next_values, yield_constr);
decode::eval_packed_generic(local_values, yield_constr);
dup_swap::eval_packed(local_values, yield_constr);
exceptions::eval_packed(local_values, next_values, yield_constr);
gas::eval_packed(local_values, next_values, yield_constr);
jumps::eval_packed(local_values, next_values, yield_constr);
membus::eval_packed(local_values, yield_constr);
@ -217,6 +218,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
control_flow::eval_ext_circuit(builder, local_values, next_values, yield_constr);
decode::eval_ext_circuit(builder, local_values, yield_constr);
dup_swap::eval_ext_circuit(builder, local_values, yield_constr);
exceptions::eval_ext_circuit(builder, local_values, next_values, yield_constr);
gas::eval_ext_circuit(builder, local_values, next_values, yield_constr);
jumps::eval_ext_circuit(builder, local_values, next_values, yield_constr);
membus::eval_ext_circuit(builder, local_values, yield_constr);

View File

@ -62,33 +62,6 @@ const OPCODES: [(u8, usize, bool, usize); 36] = [
(0xfc, 0, true, COL_MAP.op.mstore_general),
];
/// Bitfield of invalid opcodes, in little-endian order.
pub(crate) const fn invalid_opcodes_user() -> [u8; 32] {
let mut res = [u8::MAX; 32]; // Start with all opcodes marked invalid.
let mut i = 0;
while i < OPCODES.len() {
let (block_start, lb_block_len, kernel_only, _) = OPCODES[i];
i += 1;
if kernel_only {
continue;
}
let block_len = 1 << lb_block_len;
let block_start = block_start as usize;
let block_end = block_start + block_len;
let mut j = block_start;
while j < block_end {
let byte = j / u8::BITS as usize;
let bit = j % u8::BITS as usize;
res[byte] &= !(1 << bit); // Mark opcode as invalid by zeroing the bit.
j += 1;
}
}
res
}
pub fn generate<F: RichField>(lv: &mut CpuColumnsView<F>) {
let cycle_filter = lv.is_cpu_cycle;
if cycle_filter == F::ZERO {

241
evm/src/cpu/exceptions.rs Normal file
View File

@ -0,0 +1,241 @@
//! Handle instructions that are implemented in terms of system calls.
//!
//! These are usually the ones that are too complicated to implement in one CPU table row.
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use static_assertions::const_assert;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::memory::segments::Segment;
// Copy the constant but make it `usize`.
const BYTES_PER_OFFSET: usize = crate::cpu::kernel::assembler::BYTES_PER_OFFSET as usize;
const_assert!(BYTES_PER_OFFSET < NUM_GP_CHANNELS); // Reserve one channel for stack push
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// TODO: There's heaps of overlap between this and syscalls. They could be merged.
let filter = lv.op.exception;
// Ensure we are not in kernel mode
yield_constr.constraint(filter * lv.is_kernel_mode);
// Get the exception code as an value in {0, ..., 7}.
let exc_code_bits = lv.general.exception().exc_code_bits;
let exc_code: P = exc_code_bits
.into_iter()
.enumerate()
.map(|(i, bit)| bit * P::Scalar::from_canonical_u64(1 << i))
.sum();
// Ensure that all bits are either 0 or 1.
for bit in exc_code_bits {
yield_constr.constraint(filter * bit * (bit - P::ONES));
}
// Look up the handler in memory
let code_segment = P::Scalar::from_canonical_usize(Segment::Code as usize);
let exc_jumptable_start =
P::Scalar::from_canonical_usize(KERNEL.global_labels["exception_jumptable"]);
let exc_handler_addr_start =
exc_jumptable_start + exc_code * P::Scalar::from_canonical_usize(BYTES_PER_OFFSET);
for (i, channel) in lv.mem_channels[0..BYTES_PER_OFFSET].iter().enumerate() {
yield_constr.constraint(filter * (channel.used - P::ONES));
yield_constr.constraint(filter * (channel.is_read - P::ONES));
// Set kernel context and code segment
yield_constr.constraint(filter * channel.addr_context);
yield_constr.constraint(filter * (channel.addr_segment - code_segment));
// Set address, using a separate channel for each of the `BYTES_PER_OFFSET` limbs.
let limb_address = exc_handler_addr_start + P::Scalar::from_canonical_usize(i);
yield_constr.constraint(filter * (channel.addr_virtual - limb_address));
}
// Disable unused channels (the last channel is used to push to the stack)
for channel in &lv.mem_channels[BYTES_PER_OFFSET..NUM_GP_CHANNELS - 1] {
yield_constr.constraint(filter * channel.used);
}
// Set program counter to the handler address
// The addresses are big-endian in memory
let target = lv.mem_channels[0..BYTES_PER_OFFSET]
.iter()
.map(|channel| channel.value[0])
.fold(P::ZEROS, |cumul, limb| {
cumul * P::Scalar::from_canonical_u64(256) + limb
});
yield_constr.constraint_transition(filter * (nv.program_counter - target));
// Set kernel mode
yield_constr.constraint_transition(filter * (nv.is_kernel_mode - P::ONES));
// Maintain current context
yield_constr.constraint_transition(filter * (nv.context - lv.context));
// Reset gas counter to zero.
yield_constr.constraint_transition(filter * nv.gas);
// This memory channel is constrained in `stack.rs`.
let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
// Push to stack: current PC (limb 0), gas counter (limbs 6 and 7).
yield_constr.constraint(filter * (output[0] - lv.program_counter));
yield_constr.constraint(filter * (output[6] - lv.gas));
// TODO: Range check `output[6]`.
yield_constr.constraint(filter * output[7]); // High limb of gas is zero.
// Zero the rest of that register
for &limb in &output[1..6] {
yield_constr.constraint(filter * limb);
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.exception;
// Ensure we are not in kernel mode
{
let constr = builder.mul_extension(filter, lv.is_kernel_mode);
yield_constr.constraint(builder, constr);
}
// Get the exception code as an value in {0, ..., 7}.
let exc_code_bits = lv.general.exception().exc_code_bits;
let exc_code =
exc_code_bits
.into_iter()
.enumerate()
.fold(builder.zero_extension(), |cumul, (i, bit)| {
builder.mul_const_add_extension(F::from_canonical_u64(1 << i), bit, cumul)
});
// Ensure that all bits are either 0 or 1.
for bit in exc_code_bits {
let constr = builder.mul_sub_extension(bit, bit, bit);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Look up the handler in memory
let code_segment = F::from_canonical_usize(Segment::Code as usize);
let exc_jumptable_start = builder.constant_extension(
F::from_canonical_usize(KERNEL.global_labels["exception_jumptable"]).into(),
);
let exc_handler_addr_start = builder.mul_const_add_extension(
F::from_canonical_usize(BYTES_PER_OFFSET),
exc_code,
exc_jumptable_start,
);
for (i, channel) in lv.mem_channels[0..BYTES_PER_OFFSET].iter().enumerate() {
{
let constr = builder.mul_sub_extension(filter, channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
// Set kernel context and code segment
{
let constr = builder.mul_extension(filter, channel.addr_context);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.arithmetic_extension(
F::ONE,
-code_segment,
filter,
channel.addr_segment,
filter,
);
yield_constr.constraint(builder, constr);
}
// Set address, using a separate channel for each of the `BYTES_PER_OFFSET` limbs.
{
let diff = builder.sub_extension(channel.addr_virtual, exc_handler_addr_start);
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(i),
filter,
diff,
filter,
);
yield_constr.constraint(builder, constr);
}
}
// Disable unused channels (the last channel is used to push to the stack)
for channel in &lv.mem_channels[BYTES_PER_OFFSET..NUM_GP_CHANNELS - 1] {
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
// Set program counter to the handler address
// The addresses are big-endian in memory
{
let target = lv.mem_channels[0..BYTES_PER_OFFSET]
.iter()
.map(|channel| channel.value[0])
.fold(builder.zero_extension(), |cumul, limb| {
builder.mul_const_add_extension(F::from_canonical_u64(256), cumul, limb)
});
let diff = builder.sub_extension(nv.program_counter, target);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint_transition(builder, constr);
}
// Set kernel mode
{
let constr = builder.mul_sub_extension(filter, nv.is_kernel_mode, filter);
yield_constr.constraint_transition(builder, constr);
}
// Maintain current context
{
let diff = builder.sub_extension(nv.context, lv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint_transition(builder, constr);
}
// Reset gas counter to zero.
{
let constr = builder.mul_extension(filter, nv.gas);
yield_constr.constraint_transition(builder, constr);
}
// This memory channel is constrained in `stack.rs`.
let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
// Push to stack: current PC (limb 0), gas counter (limbs 6 and 7).
{
let diff = builder.sub_extension(output[0], lv.program_counter);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(output[6], lv.gas);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// TODO: Range check `output[6]`.
{
// High limb of gas is zero.
let constr = builder.mul_extension(filter, output[7]);
yield_constr.constraint(builder, constr);
}
// Zero the rest of that register
for &limb in &output[1..6] {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
}

View File

@ -56,6 +56,7 @@ const SIMPLE_OPCODES: OpsColumnsView<Option<u32>> = OpsColumnsView {
mload_general: KERNEL_ONLY_INSTR,
mstore_general: KERNEL_ONLY_INSTR,
syscall: None,
exception: None,
};
fn eval_packed_accumulate<P: PackedField>(

View File

@ -15,7 +15,7 @@ pub fn eval_packed_exit_kernel<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
) {
let input = lv.mem_channels[0].value;
let filter = lv.is_cpu_cycle * lv.op.exit_kernel;
let filter = lv.op.exit_kernel;
// If we are executing `EXIT_KERNEL` then we simply restore the program counter, kernel mode
// flag, and gas counter. The middle 4 (32-bit) limbs are ignored (this is not part of the spec,
@ -25,6 +25,19 @@ pub fn eval_packed_exit_kernel<P: PackedField>(
yield_constr.constraint_transition(filter * (input[6] - nv.gas));
// High limb of gas must be 0 for convenient detection of overflow.
yield_constr.constraint(filter * input[7]);
// Check that the syscall did not cause a stack overflow.
// A standard EVM instruction increases the length of the user's stack by at most one. We assume
// that the stack length was valid when we entered the syscall (i.e. it was <= 1024). If the
// syscall caused the stack to overflow, then the stack length is 1025 upon exit into userspace;
// that corresponds to a stack length of 1026 before `EXIT_KERNEL` is executed.
// This constraint ensures that the current stack length is not 1026 if we're returning to user
// mode. If we're remaining in kernel mode upon return from this syscall, the RHS will be 0, so
// the constraint is trivially satisfied by setting `stack_len_check_aux = 0`.
let offset_stack_len = lv.stack_len - P::Scalar::from_canonical_u32(1026);
let lhs = offset_stack_len * lv.general.exit_kernel().stack_len_check_aux;
let rhs = P::ONES - input[1];
yield_constr.constraint(filter * (lhs - rhs));
}
pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize>(
@ -34,7 +47,7 @@ pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let input = lv.mem_channels[0].value;
let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.exit_kernel);
let filter = lv.op.exit_kernel;
// If we are executing `EXIT_KERNEL` then we simply restore the program counter and kernel mode
// flag. The top 6 (32-bit) limbs are ignored (this is not part of the spec, but we trust the
@ -58,6 +71,22 @@ pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize
let constr = builder.mul_extension(filter, input[7]);
yield_constr.constraint(builder, constr);
}
// Check that the syscall did not cause a stack overflow.
// See detailed comments in the packed implementation.
{
let stack_len_check_aux = lv.general.exit_kernel().stack_len_check_aux;
let lhs = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_u32(1026),
lv.stack_len,
stack_len_check_aux,
stack_len_check_aux,
);
let constr = builder.add_extension(lhs, input[1]);
let constr = builder.mul_sub_extension(filter, constr, filter);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed_jump_jumpi<P: PackedField>(

View File

@ -28,9 +28,9 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/core/create.asm"),
include_str!("asm/core/create_addresses.asm"),
include_str!("asm/core/create_contract_account.asm"),
include_str!("asm/core/exception.asm"),
include_str!("asm/core/gas.asm"),
include_str!("asm/core/intrinsic_gas.asm"),
include_str!("asm/core/invalid.asm"),
include_str!("asm/core/jumpdest_analysis.asm"),
include_str!("asm/core/nonce.asm"),
include_str!("asm/core/process_txn.asm"),

View File

@ -0,0 +1,299 @@
// These exception codes are arbitary and assigned by us.
global exception_jumptable:
// exception 0: out of gas
JUMPTABLE exc_out_of_gas
// exception 1: invalid opcode
JUMPTABLE exc_invalid_opcode
// exception 2: stack underflow
JUMPTABLE exc_stack_underflow
// exception 3: invalid jump destination
JUMPTABLE exc_invalid_jump_destination
// exception 4: invalid jumpi destination
JUMPTABLE exc_invalid_jumpi_destination
// exception 5: stack overflow
JUMPTABLE exc_stack_overflow
// exceptions 6 and 7: unused
JUMPTABLE panic
JUMPTABLE panic
global exc_out_of_gas:
// TODO
%jump(fault_exception)
global exc_invalid_opcode:
// stack: trap_info
// check if the opcode that triggered this trap is _actually_ invalid
%opcode_from_exp_trap_info
PUSH @INVALID_OPCODES_USER
// stack: invalid_opcodes_user, opcode
SWAP1
// stack: opcode, invalid_opcodes_user
SHR
%mod_const(2)
// stack: opcode_is_invalid
// if the opcode is indeed invalid, then perform an exceptional exit
%jumpi(fault_exception)
// otherwise, panic because this trap should not have been entered
PANIC
global exc_stack_underflow:
// stack: trap_info
%opcode_from_exp_trap_info
// stack: opcode
%add_const(min_stack_len_for_opcode)
%mload_kernel_code
// stack: min_stack_length
%stack_length
// stack: user_stack_length + 1, min_stack_length
GT
// stack: user_stack_length >= min_stack_length
%jumpi(panic)
%jump(fault_exception)
// Debugging note: this will underflow if entered without at least one item on the stack (in
// addition to trap_info). This is expected; it means that the exc_stack_underflow handler should
// have been used instead.
global exc_invalid_jump_destination:
// stack: trap_info, jump_dest
// check that the triggering opcode is indeed JUMP
%opcode_from_exp_trap_info
// stack: opcode, jump_dest
%eq_const(0x56)
// if it's JUMP, then verify that we're actually jumping to an invalid address
%jumpi(invalid_jump_jumpi_destination_common)
// otherwise, panic
PANIC
// Debugging note: this will underflow if entered without at least two items on the stack (in
// addition to trap_info). This is expected; it means that the exc_stack_underflow handler should
// have been used instead.
global exc_invalid_jumpi_destination:
// stack: trap_info, jump_dest, condition
// check that the triggering opcode is indeed JUMPI
%opcode_from_exp_trap_info
// stack: opcode, jump_dest, condition
%sub_const(0x57)
// if it's not JUMPI, then panic
%jumpi(panic)
// otherwise, verify that the condition is nonzero
// stack: jump_dest, condition
SWAP1
// if it's nonzero, then verify that we're actually jumping to an invalid address
%jumpi(invalid_jump_jumpi_destination_common)
// otherwise, panic
PANIC
global invalid_jump_jumpi_destination_common:
// We have a jump destination on the stack. We want to `PANIC` if it is valid, and jump to
// `fault_exception` if it is not. An address is a valid jump destination if it points to a
// `JUMPDEST` instruction. In practice, since in this implementation memory addresses are
// limited to 32 bits, we check two things:
// 1. the address is no more than 32 bits long, and
// 2. it points to a `JUMPDEST` instruction.
// stack: jump_dest
DUP1
%shr_const(32)
%jumpi(fault_exception) // This keeps one copy of jump_dest on the stack, but that's fine.
// jump_dest is a valid address; check if it points to a `JUMP_DEST`.
%mload_current(@SEGMENT_JUMPDEST_BITS)
// stack: is_valid_jumpdest
%jumpi(panic) // Trap should never have been entered.
%jump(fault_exception)
global exc_stack_overflow:
// stack: trap_info
// check that the triggering opcode _can_ overflow (i.e., it increases the stack size by 1)
%opcode_from_exp_trap_info
PUSH @STACK_LENGTH_INCREASING_OPCODES_USER
// stack: stack_length_increasing_opcodes_user, opcode
SWAP1
// stack: opcode, stack_length_increasing_opcodes_user
SHR
%mod_const(2)
// stack: opcode_increases_stack_length
// if the opcode indeed increases the stack length, then check whether the stack size is at its
// maximum value
%jumpi(exc_stack_overflow_check_stack_length)
// otherwise, panic because this trap should not have been entered
PANIC
global exc_stack_overflow_check_stack_length:
// stack: (empty)
%stack_length
%eq_const(1024)
// if true, stack length is at its maximum allowed value, so the instruction would indeed cause
// an overflow.
%jumpi(fault_exception)
PANIC
// Given the exception trap info, load the opcode that caused the exception
%macro opcode_from_exp_trap_info
%mod_const(0x100000000) // get program counter from low 32 bits of trap_info
%mload_current_code
%endmacro
min_stack_len_for_opcode:
BYTES 0 // 0x00, STOP
BYTES 2 // 0x01, ADD
BYTES 2 // 0x02, MUL
BYTES 2 // 0x03, SUB
BYTES 2 // 0x04, DIV
BYTES 2 // 0x05, SDIV
BYTES 2 // 0x06, MOD
BYTES 2 // 0x07, SMOD
BYTES 3 // 0x08, ADDMOD
BYTES 3 // 0x09, MULMOD
BYTES 2 // 0x0a, EXP
BYTES 2 // 0x0b, SIGNEXTEND
%rep 4 // 0x0c-0x0f, invalid
BYTES 0
%endrep
BYTES 2 // 0x10, LT
BYTES 2 // 0x11, GT
BYTES 2 // 0x12, SLT
BYTES 2 // 0x13, SGT
BYTES 2 // 0x14, EQ
BYTES 1 // 0x15, ISZERO
BYTES 2 // 0x16, AND
BYTES 2 // 0x17, OR
BYTES 2 // 0x18, XOR
BYTES 1 // 0x19, NOT
BYTES 2 // 0x1a, BYTE
BYTES 2 // 0x1b, SHL
BYTES 2 // 0x1c, SHR
BYTES 2 // 0x1d, SAR
BYTES 0 // 0x1e, invalid
BYTES 0 // 0x1f, invalid
BYTES 2 // 0x20, KECCAK256
%rep 15 // 0x21-0x2f, invalid
BYTES 0
%endrep
BYTES 0 // 0x30, ADDRESS
BYTES 1 // 0x31, BALANCE
BYTES 0 // 0x32, ORIGIN
BYTES 0 // 0x33, CALLER
BYTES 0 // 0x34, CALLVALUE
BYTES 1 // 0x35, CALLDATALOAD
BYTES 0 // 0x36, CALLDATASIZE
BYTES 3 // 0x37, CALLDATACOPY
BYTES 0 // 0x38, CODESIZE
BYTES 3 // 0x39, CODECOPY
BYTES 0 // 0x3a, GASPRICE
BYTES 1 // 0x3b, EXTCODESIZE
BYTES 4 // 0x3c, EXTCODECOPY
BYTES 0 // 0x3d, RETURNDATASIZE
BYTES 3 // 0x3e, RETURNDATACOPY
BYTES 1 // 0x3f, EXTCODEHASH
BYTES 1 // 0x40, BLOCKHASH
BYTES 0 // 0x41, COINBASE
BYTES 0 // 0x42, TIMESTAMP
BYTES 0 // 0x43, NUMBER
BYTES 0 // 0x44, DIFFICULTY
BYTES 0 // 0x45, GASLIMIT
BYTES 0 // 0x46, CHAINID
BYTES 0 // 0x47, SELFBALANCE
BYTES 0 // 0x48, BASEFEE
%rep 7 // 0x49-0x4f, invalid
BYTES 0
%endrep
BYTES 1 // 0x50, POP
BYTES 1 // 0x51, MLOAD
BYTES 2 // 0x52, MSTORE
BYTES 2 // 0x53, MSTORE8
BYTES 1 // 0x54, SLOAD
BYTES 2 // 0x55, SSTORE
BYTES 1 // 0x56, JUMP
BYTES 2 // 0x57, JUMPI
BYTES 0 // 0x58, PC
BYTES 0 // 0x59, MSIZE
BYTES 0 // 0x5a, GAS
BYTES 0 // 0x5b, JUMPDEST
%rep 4 // 0x5c-0x5f, invalid
BYTES 0
%endrep
%rep 32 // 0x60-0x7f, PUSH1-PUSH32
BYTES 0
%endrep
BYTES 1 // 0x80, DUP1
BYTES 2 // 0x81, DUP2
BYTES 3 // 0x82, DUP3
BYTES 4 // 0x83, DUP4
BYTES 5 // 0x84, DUP5
BYTES 6 // 0x85, DUP6
BYTES 7 // 0x86, DUP7
BYTES 8 // 0x87, DUP8
BYTES 9 // 0x88, DUP9
BYTES 10 // 0x89, DUP10
BYTES 11 // 0x8a, DUP11
BYTES 12 // 0x8b, DUP12
BYTES 13 // 0x8c, DUP13
BYTES 14 // 0x8d, DUP14
BYTES 15 // 0x8e, DUP15
BYTES 16 // 0x8f, DUP16
BYTES 2 // 0x90, SWAP1
BYTES 3 // 0x91, SWAP2
BYTES 4 // 0x92, SWAP3
BYTES 5 // 0x93, SWAP4
BYTES 6 // 0x94, SWAP5
BYTES 7 // 0x95, SWAP6
BYTES 8 // 0x96, SWAP7
BYTES 9 // 0x97, SWAP8
BYTES 10 // 0x98, SWAP9
BYTES 11 // 0x99, SWAP10
BYTES 12 // 0x9a, SWAP11
BYTES 13 // 0x9b, SWAP12
BYTES 14 // 0x9c, SWAP13
BYTES 15 // 0x9d, SWAP14
BYTES 16 // 0x9e, SWAP15
BYTES 17 // 0x9f, SWAP16
BYTES 2 // 0xa0, LOG0
BYTES 3 // 0xa1, LOG1
BYTES 4 // 0xa2, LOG2
BYTES 5 // 0xa3, LOG3
BYTES 6 // 0xa4, LOG4
%rep 11 // 0xa5-0xaf, invalid
BYTES 0
%endrep
%rep 64 // 0xb0-0xef, invalid
BYTES 0
%endrep
BYTES 3 // 0xf0, CREATE
BYTES 7 // 0xf1, CALL
BYTES 7 // 0xf2, CALLCODE
BYTES 2 // 0xf3, RETURN
BYTES 6 // 0xf4, DELEGATECALL
BYTES 4 // 0xf5, CREATE2
%rep 4 // 0xf6-0xf9, invalid
BYTES 0
%endrep
BYTES 6 // 0xfa, STATICCALL
BYTES 0 // 0xfb, invalid
BYTES 0 // 0xfc, invalid
BYTES 2 // 0xfd, REVERT
BYTES 0 // 0xfe, invalid
BYTES 1 // 0xff, SELFDESTRUCT

View File

@ -1,26 +0,0 @@
global handle_invalid:
// stack: trap_info
// if the kernel is trying to execute an invalid instruction, then we've already screwed up and
// there's no chance of getting a useful proof, so we just panic
DUP1
// stack: trap_info, trap_info
%shr_const(32)
// stack: is_kernel, trap_info
%jumpi(panic)
// check if the opcode that triggered this trap is _actually_ invalid
// stack: program_counter (is_kernel == 0, so trap_info == program_counter)
%mload_current_code
// stack: opcode
PUSH @INVALID_OPCODES_USER
// stack: invalid_opcodes_user, opcode
SWAP1
// stack: opcode, invalid_opcodes_user
SHR
%and_const(1)
// stack: opcode_is_invalid
// if the opcode is indeed invalid, then perform an exceptional exit
%jumpi(fault_exception)
// otherwise, panic because this trap should not have been entered
PANIC

View File

@ -69,3 +69,21 @@
SWAP1 %is_empty
OR
%endmacro
// Gets the size of the stack _before_ the macro is run
// WARNING: this macro is side-effecting. It writes the current stack length to offset
// `CTX_METADATA_STACK_SIZE`, segment `SEGMENT_CONTEXT_METADATA` in the current context. But I can't
// imagine it being an issue unless someone's doing something dumb.
%macro stack_length
// stack: (empty)
GET_CONTEXT
// stack: current_ctx
// It seems odd to switch to the context that we are already in. We do this because SET_CONTEXT
// saves the stack length of the context we are leaving in its metadata segment.
SET_CONTEXT
// stack: (empty)
// We can now read this stack length from memory.
push @CTX_METADATA_STACK_SIZE
%mload_current(@SEGMENT_CONTEXT_METADATA)
// stack: stack_length
%endmacro

View File

@ -0,0 +1,53 @@
use std::ops::RangeInclusive;
use ethereum_types::U256;
/// Create a U256, where the bits at indices inside the specified ranges are set to 1, and all other
/// bits are set to 0.
const fn u256_from_set_index_ranges<const N: usize>(ranges: [RangeInclusive<u8>; N]) -> U256 {
let mut j = 0;
let mut res_limbs = [0u64; 4];
while j < ranges.len() {
let range = &ranges[j];
let mut i = *range.start();
if i > *range.end() {
continue;
}
loop {
let i_lo = i & 0x3f;
let i_hi = i >> 6;
res_limbs[i_hi as usize] |= 1 << i_lo;
if i >= *range.end() {
break;
}
i += 1;
}
j += 1;
}
U256(res_limbs)
}
pub const STACK_LENGTH_INCREASING_OPCODES_USER: U256 = u256_from_set_index_ranges([
0x30..=0x30, // ADDRESS
0x32..=0x34, // ORIGIN, CALLER, CALLVALUE
0x36..=0x36, // CALLDATASIZE
0x38..=0x38, // CODESIZE
0x3a..=0x3a, // GASPRICE
0x3d..=0x3d, // RETURNDATASIZE
0x41..=0x48, // COINBASE, TIMESTAMP, NUMBER, DIFFICULTY, GASLIMIT, CHAINID, SELFBALANCE, BASEFEE
0x58..=0x5a, // PC, MSIZE, GAS
0x60..=0x8f, // PUSH*, DUP*
]);
pub const INVALID_OPCODES_USER: U256 = u256_from_set_index_ranges([
0x0c..=0x0f,
0x1e..=0x1f,
0x21..=0x2f,
0x49..=0x4f,
0x5c..=0x5f,
0xa5..=0xef,
0xf6..=0xf9,
0xfb..=0xfc,
0xfe..=0xfe,
]);

View File

@ -3,7 +3,6 @@ use std::collections::HashMap;
use ethereum_types::U256;
use hex_literal::hex;
use crate::cpu::decode::invalid_opcodes_user;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::constants::journal_entry::JournalEntry;
@ -12,6 +11,7 @@ use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField;
use crate::memory::segments::Segment;
pub(crate) mod context_metadata;
mod exc_bitfields;
pub(crate) mod global_metadata;
pub(crate) mod journal_entry;
pub(crate) mod trie_type;
@ -77,7 +77,11 @@ pub fn evm_constants() -> HashMap<String, U256> {
}
c.insert(
"INVALID_OPCODES_USER".into(),
U256::from_little_endian(&invalid_opcodes_user()),
exc_bitfields::INVALID_OPCODES_USER,
);
c.insert(
"STACK_LENGTH_INCREASING_OPCODES_USER".into(),
exc_bitfields::STACK_LENGTH_INCREASING_OPCODES_USER,
);
c
}

View File

@ -5,6 +5,7 @@ pub(crate) mod control_flow;
pub mod cpu_stark;
pub(crate) mod decode;
mod dup_swap;
mod exceptions;
mod gas;
mod jumps;
pub mod kernel;

View File

@ -122,6 +122,11 @@ const STACK_BEHAVIORS: OpsColumnsView<Option<StackBehavior>> = OpsColumnsView {
pushes: true,
disable_other_channels: false,
}),
exception: Some(StackBehavior {
num_pops: 0,
pushes: true,
disable_other_channels: false,
}),
};
fn eval_packed_one<P: PackedField>(

View File

@ -11,6 +11,7 @@ pub enum ProgramError {
StackOverflow,
KernelPanic,
MemoryError(MemoryError),
GasLimitError,
}
#[allow(clippy::enum_variant_names)]

View File

@ -14,7 +14,7 @@ pub(crate) fn gas_to_charge(op: Operation) -> u64 {
match op {
Iszero => G_VERYLOW,
Not => G_VERYLOW,
Syscall(_) => KERNEL_ONLY_INSTR,
Syscall(_, _, _) => KERNEL_ONLY_INSTR,
Eq => G_VERYLOW,
BinaryLogic(_) => G_VERYLOW,
BinaryArithmetic(Add) => G_VERYLOW,

View File

@ -9,12 +9,13 @@ use crate::cpu::kernel::assembler::BYTES_PER_OFFSET;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cpu::simple_logic::eq_iszero::generate_pinv_diff;
use crate::cpu::stack_bounds::MAX_USER_STACK_SIZE;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::witness::errors::MemoryError::{ContextTooLarge, SegmentTooLarge, VirtTooLarge};
use crate::witness::errors::ProgramError;
use crate::witness::errors::ProgramError::MemoryError;
use crate::witness::memory::{MemoryAddress, MemoryOp};
use crate::witness::memory::{MemoryAddress, MemoryChannel, MemoryOp, MemoryOpKind};
use crate::witness::util::{
keccak_sponge_log, mem_read_gp_with_log_and_fill, mem_write_gp_log_and_fill,
stack_pop_with_log_and_fill, stack_push_log_and_fill,
@ -27,7 +28,7 @@ pub(crate) enum Operation {
Not,
Shl,
Shr,
Syscall(u8),
Syscall(u8, usize, bool), // (syscall number, minimum stack length, increases stack length)
Eq,
BinaryLogic(logic::Op),
BinaryArithmetic(arithmetic::BinaryOperator),
@ -308,7 +309,18 @@ pub(crate) fn generate_set_context<F: Field>(
let new_sp_addr = MemoryAddress::new(new_ctx, Segment::ContextMetadata, sp_field);
let log_write_old_sp = mem_write_gp_log_and_fill(1, old_sp_addr, state, &mut row, sp_to_save);
let (new_sp, log_read_new_sp) = mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row);
let (new_sp, log_read_new_sp) = if old_ctx == new_ctx {
let op = MemoryOp::new(
MemoryChannel::GeneralPurpose(2),
state.traces.clock(),
new_sp_addr,
MemoryOpKind::Read,
sp_to_save,
);
(sp_to_save, op)
} else {
mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row)
};
state.registers.context = new_ctx;
state.registers.stack_len = new_sp.as_usize();
@ -494,11 +506,23 @@ pub(crate) fn generate_shr<F: Field>(
pub(crate) fn generate_syscall<F: Field>(
opcode: u8,
stack_values_read: usize,
stack_len_increased: bool,
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
if TryInto::<u32>::try_into(state.registers.gas_used).is_err() {
panic!();
return Err(ProgramError::GasLimitError);
}
if state.registers.stack_len < stack_values_read {
return Err(ProgramError::StackUnderflow);
}
if stack_len_increased
&& !state.registers.is_kernel
&& state.registers.stack_len >= MAX_USER_STACK_SIZE
{
return Err(ProgramError::StackOverflow);
}
let handler_jumptable_addr = KERNEL.global_labels["syscall_jumptable"];
@ -530,13 +554,18 @@ pub(crate) fn generate_syscall<F: Field>(
let syscall_info = U256::from(state.registers.program_counter + 1)
+ (U256::from(u64::from(state.registers.is_kernel)) << 32)
+ (U256::from(state.registers.gas_used) << 192);
let log_out = stack_push_log_and_fill(state, &mut row, syscall_info)?;
// Set registers before pushing to the stack; in particular, we need to set kernel mode so we
// can't incorrectly trigger a stack overflow. However, note that we have to do it _after_ we
// make `syscall_info`, which should contain the old values.
state.registers.program_counter = new_program_counter;
log::debug!("Syscall to {}", KERNEL.offset_name(new_program_counter));
state.registers.is_kernel = true;
state.registers.gas_used = 0;
let log_out = stack_push_log_and_fill(state, &mut row, syscall_info)?;
log::debug!("Syscall to {}", KERNEL.offset_name(new_program_counter));
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
@ -579,6 +608,18 @@ pub(crate) fn generate_exit_kernel<F: Field>(
panic!();
}
if is_kernel_mode {
row.general.exit_kernel_mut().stack_len_check_aux = F::ZERO;
} else {
let diff =
F::from_canonical_usize(state.registers.stack_len + 1) - F::from_canonical_u32(1026);
if let Some(inv) = diff.try_inverse() {
row.general.exit_kernel_mut().stack_len_check_aux = inv;
} else {
panic!("stack overflow; should have been caught before entering the syscall")
}
}
state.registers.program_counter = program_counter;
state.registers.is_kernel = is_kernel_mode;
state.registers.gas_used = gas_used_val;
@ -647,3 +688,69 @@ pub(crate) fn generate_mstore_general<F: Field>(
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_exception<F: Field>(
exc_code: u8,
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
if TryInto::<u32>::try_into(state.registers.gas_used).is_err() {
return Err(ProgramError::GasLimitError);
}
row.stack_len_bounds_aux = (row.stack_len + F::ONE).inverse();
row.general.exception_mut().exc_code_bits = [
F::from_bool(exc_code & 1 != 0),
F::from_bool(exc_code & 2 != 0),
F::from_bool(exc_code & 4 != 0),
];
let handler_jumptable_addr = KERNEL.global_labels["exception_jumptable"];
let handler_addr_addr =
handler_jumptable_addr + (exc_code as usize) * (BYTES_PER_OFFSET as usize);
assert_eq!(BYTES_PER_OFFSET, 3, "Code below assumes 3 bytes per offset");
let (handler_addr0, log_in0) = mem_read_gp_with_log_and_fill(
0,
MemoryAddress::new(0, Segment::Code, handler_addr_addr),
state,
&mut row,
);
let (handler_addr1, log_in1) = mem_read_gp_with_log_and_fill(
1,
MemoryAddress::new(0, Segment::Code, handler_addr_addr + 1),
state,
&mut row,
);
let (handler_addr2, log_in2) = mem_read_gp_with_log_and_fill(
2,
MemoryAddress::new(0, Segment::Code, handler_addr_addr + 2),
state,
&mut row,
);
let handler_addr = (handler_addr0 << 16) + (handler_addr1 << 8) + handler_addr2;
let new_program_counter = handler_addr.as_usize();
let exc_info =
U256::from(state.registers.program_counter) + (U256::from(state.registers.gas_used) << 192);
// Set registers before pushing to the stack; in particular, we need to set kernel mode so we
// can't incorrectly trigger a stack overflow. However, note that we have to do it _after_ we
// make `exc_info`, which should contain the old values.
state.registers.program_counter = new_program_counter;
state.registers.is_kernel = true;
state.registers.gas_used = 0;
let log_out = stack_push_log_and_fill(state, &mut row, exc_info)?;
log::debug!("Exception to {}", KERNEL.offset_name(new_program_counter));
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
state.traces.push_memory(log_out);
state.traces.push_cpu(row);
Ok(())
}

View File

@ -29,22 +29,22 @@ fn read_code_memory<F: Field>(state: &mut GenerationState<F>, row: &mut CpuColum
fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramError> {
match (opcode, registers.is_kernel) {
(0x00, _) => Ok(Operation::Syscall(opcode)),
(0x00, _) => Ok(Operation::Syscall(opcode, 0, false)), // STOP
(0x01, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Add)),
(0x02, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Mul)),
(0x03, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Sub)),
(0x04, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Div)),
(0x05, _) => Ok(Operation::Syscall(opcode)),
(0x05, _) => Ok(Operation::Syscall(opcode, 2, false)), // SDIV
(0x06, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Mod)),
(0x07, _) => Ok(Operation::Syscall(opcode)),
(0x07, _) => Ok(Operation::Syscall(opcode, 2, false)), // SMOD
(0x08, _) => Ok(Operation::TernaryArithmetic(
arithmetic::TernaryOperator::AddMod,
)),
(0x09, _) => Ok(Operation::TernaryArithmetic(
arithmetic::TernaryOperator::MulMod,
)),
(0x0a, _) => Ok(Operation::Syscall(opcode)),
(0x0b, _) => Ok(Operation::Syscall(opcode)),
(0x0a, _) => Ok(Operation::Syscall(opcode, 2, false)), // EXP
(0x0b, _) => Ok(Operation::Syscall(opcode, 2, false)), // SIGNEXTEND
(0x0c, true) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::AddFp254,
)),
@ -59,8 +59,8 @@ fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramErr
)),
(0x10, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Lt)),
(0x11, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Gt)),
(0x12, _) => Ok(Operation::Syscall(opcode)),
(0x13, _) => Ok(Operation::Syscall(opcode)),
(0x12, _) => Ok(Operation::Syscall(opcode, 2, false)), // SLT
(0x13, _) => Ok(Operation::Syscall(opcode, 2, false)), // SGT
(0x14, _) => Ok(Operation::Eq),
(0x15, _) => Ok(Operation::Iszero),
(0x16, _) => Ok(Operation::BinaryLogic(logic::Op::And)),
@ -72,76 +72,76 @@ fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramErr
)),
(0x1b, _) => Ok(Operation::Shl),
(0x1c, _) => Ok(Operation::Shr),
(0x1d, _) => Ok(Operation::Syscall(opcode)),
(0x20, _) => Ok(Operation::Syscall(opcode)),
(0x1d, _) => Ok(Operation::Syscall(opcode, 2, false)), // SAR
(0x20, _) => Ok(Operation::Syscall(opcode, 2, false)), // KECCAK256
(0x21, true) => Ok(Operation::KeccakGeneral),
(0x30, _) => Ok(Operation::Syscall(opcode)),
(0x31, _) => Ok(Operation::Syscall(opcode)),
(0x32, _) => Ok(Operation::Syscall(opcode)),
(0x33, _) => Ok(Operation::Syscall(opcode)),
(0x34, _) => Ok(Operation::Syscall(opcode)),
(0x35, _) => Ok(Operation::Syscall(opcode)),
(0x36, _) => Ok(Operation::Syscall(opcode)),
(0x37, _) => Ok(Operation::Syscall(opcode)),
(0x38, _) => Ok(Operation::Syscall(opcode)),
(0x39, _) => Ok(Operation::Syscall(opcode)),
(0x3a, _) => Ok(Operation::Syscall(opcode)),
(0x3b, _) => Ok(Operation::Syscall(opcode)),
(0x3c, _) => Ok(Operation::Syscall(opcode)),
(0x3d, _) => Ok(Operation::Syscall(opcode)),
(0x3e, _) => Ok(Operation::Syscall(opcode)),
(0x3f, _) => Ok(Operation::Syscall(opcode)),
(0x40, _) => Ok(Operation::Syscall(opcode)),
(0x41, _) => Ok(Operation::Syscall(opcode)),
(0x42, _) => Ok(Operation::Syscall(opcode)),
(0x43, _) => Ok(Operation::Syscall(opcode)),
(0x44, _) => Ok(Operation::Syscall(opcode)),
(0x45, _) => Ok(Operation::Syscall(opcode)),
(0x46, _) => Ok(Operation::Syscall(opcode)),
(0x47, _) => Ok(Operation::Syscall(opcode)),
(0x48, _) => Ok(Operation::Syscall(opcode)),
(0x49, _) => Ok(Operation::ProverInput),
(0x30, _) => Ok(Operation::Syscall(opcode, 0, true)), // ADDRESS
(0x31, _) => Ok(Operation::Syscall(opcode, 1, false)), // BALANCE
(0x32, _) => Ok(Operation::Syscall(opcode, 0, true)), // ORIGIN
(0x33, _) => Ok(Operation::Syscall(opcode, 0, true)), // CALLER
(0x34, _) => Ok(Operation::Syscall(opcode, 0, true)), // CALLVALUE
(0x35, _) => Ok(Operation::Syscall(opcode, 1, false)), // CALLDATALOAD
(0x36, _) => Ok(Operation::Syscall(opcode, 0, true)), // CALLDATASIZE
(0x37, _) => Ok(Operation::Syscall(opcode, 3, false)), // CALLDATACOPY
(0x38, _) => Ok(Operation::Syscall(opcode, 0, true)), // CODESIZE
(0x39, _) => Ok(Operation::Syscall(opcode, 3, false)), // CODECOPY
(0x3a, _) => Ok(Operation::Syscall(opcode, 0, true)), // GASPRICE
(0x3b, _) => Ok(Operation::Syscall(opcode, 1, false)), // EXTCODESIZE
(0x3c, _) => Ok(Operation::Syscall(opcode, 4, false)), // EXTCODECOPY
(0x3d, _) => Ok(Operation::Syscall(opcode, 0, true)), // RETURNDATASIZE
(0x3e, _) => Ok(Operation::Syscall(opcode, 3, false)), // RETURNDATACOPY
(0x3f, _) => Ok(Operation::Syscall(opcode, 1, false)), // EXTCODEHASH
(0x40, _) => Ok(Operation::Syscall(opcode, 1, false)), // BLOCKHASH
(0x41, _) => Ok(Operation::Syscall(opcode, 0, true)), // COINBASE
(0x42, _) => Ok(Operation::Syscall(opcode, 0, true)), // TIMESTAMP
(0x43, _) => Ok(Operation::Syscall(opcode, 0, true)), // NUMBER
(0x44, _) => Ok(Operation::Syscall(opcode, 0, true)), // DIFFICULTY
(0x45, _) => Ok(Operation::Syscall(opcode, 0, true)), // GASLIMIT
(0x46, _) => Ok(Operation::Syscall(opcode, 0, true)), // CHAINID
(0x47, _) => Ok(Operation::Syscall(opcode, 0, true)), // SELFBALANCE
(0x48, _) => Ok(Operation::Syscall(opcode, 0, true)), // BASEFEE
(0x49, true) => Ok(Operation::ProverInput),
(0x50, _) => Ok(Operation::Pop),
(0x51, _) => Ok(Operation::Syscall(opcode)),
(0x52, _) => Ok(Operation::Syscall(opcode)),
(0x53, _) => Ok(Operation::Syscall(opcode)),
(0x54, _) => Ok(Operation::Syscall(opcode)),
(0x55, _) => Ok(Operation::Syscall(opcode)),
(0x51, _) => Ok(Operation::Syscall(opcode, 1, false)), // MLOAD
(0x52, _) => Ok(Operation::Syscall(opcode, 2, false)), // MSTORE
(0x53, _) => Ok(Operation::Syscall(opcode, 2, false)), // MSTORE8
(0x54, _) => Ok(Operation::Syscall(opcode, 1, false)), // SLOAD
(0x55, _) => Ok(Operation::Syscall(opcode, 2, false)), // SSTORE
(0x56, _) => Ok(Operation::Jump),
(0x57, _) => Ok(Operation::Jumpi),
(0x58, _) => Ok(Operation::Pc),
(0x59, _) => Ok(Operation::Syscall(opcode)),
(0x5a, _) => Ok(Operation::Syscall(opcode)),
(0x59, _) => Ok(Operation::Syscall(opcode, 0, true)), // MSIZE
(0x5a, _) => Ok(Operation::Syscall(opcode, 0, true)), // GAS
(0x5b, _) => Ok(Operation::Jumpdest),
(0x60..=0x7f, _) => Ok(Operation::Push(opcode & 0x1f)),
(0x80..=0x8f, _) => Ok(Operation::Dup(opcode & 0xf)),
(0x90..=0x9f, _) => Ok(Operation::Swap(opcode & 0xf)),
(0xa0, _) => Ok(Operation::Syscall(opcode)),
(0xa1, _) => Ok(Operation::Syscall(opcode)),
(0xa2, _) => Ok(Operation::Syscall(opcode)),
(0xa3, _) => Ok(Operation::Syscall(opcode)),
(0xa4, _) => Ok(Operation::Syscall(opcode)),
(0xa5, _) => {
(0xa0, _) => Ok(Operation::Syscall(opcode, 2, false)), // LOG0
(0xa1, _) => Ok(Operation::Syscall(opcode, 3, false)), // LOG1
(0xa2, _) => Ok(Operation::Syscall(opcode, 4, false)), // LOG2
(0xa3, _) => Ok(Operation::Syscall(opcode, 5, false)), // LOG3
(0xa4, _) => Ok(Operation::Syscall(opcode, 6, false)), // LOG4
(0xa5, true) => {
log::warn!(
"Kernel panic at {}",
KERNEL.offset_name(registers.program_counter),
);
Err(ProgramError::KernelPanic)
}
(0xf0, _) => Ok(Operation::Syscall(opcode)),
(0xf1, _) => Ok(Operation::Syscall(opcode)),
(0xf2, _) => Ok(Operation::Syscall(opcode)),
(0xf3, _) => Ok(Operation::Syscall(opcode)),
(0xf4, _) => Ok(Operation::Syscall(opcode)),
(0xf5, _) => Ok(Operation::Syscall(opcode)),
(0xf0, _) => Ok(Operation::Syscall(opcode, 3, false)), // CREATE
(0xf1, _) => Ok(Operation::Syscall(opcode, 7, false)), // CALL
(0xf2, _) => Ok(Operation::Syscall(opcode, 7, false)), // CALLCODE
(0xf3, _) => Ok(Operation::Syscall(opcode, 2, false)), // RETURN
(0xf4, _) => Ok(Operation::Syscall(opcode, 6, false)), // DELEGATECALL
(0xf5, _) => Ok(Operation::Syscall(opcode, 4, false)), // CREATE2
(0xf6, true) => Ok(Operation::GetContext),
(0xf7, true) => Ok(Operation::SetContext),
(0xf9, true) => Ok(Operation::ExitKernel),
(0xfa, _) => Ok(Operation::Syscall(opcode)),
(0xfa, _) => Ok(Operation::Syscall(opcode, 6, false)), // STATICCALL
(0xfb, true) => Ok(Operation::MloadGeneral),
(0xfc, true) => Ok(Operation::MstoreGeneral),
(0xfd, _) => Ok(Operation::Syscall(opcode)),
(0xff, _) => Ok(Operation::Syscall(opcode)),
(0xfd, _) => Ok(Operation::Syscall(opcode, 2, false)), // REVERT
(0xff, _) => Ok(Operation::Syscall(opcode, 1, false)), // SELFDESTRUCT
_ => {
log::warn!("Invalid opcode: {}", opcode);
Err(ProgramError::InvalidOpcode)
@ -157,7 +157,7 @@ fn fill_op_flag<F: Field>(op: Operation, row: &mut CpuColumnsView<F>) {
Operation::Swap(_) => &mut flags.swap,
Operation::Iszero => &mut flags.iszero,
Operation::Not => &mut flags.not,
Operation::Syscall(_) => &mut flags.syscall,
Operation::Syscall(_, _, _) => &mut flags.syscall,
Operation::Eq => &mut flags.eq,
Operation::BinaryLogic(logic::Op::And) => &mut flags.and,
Operation::BinaryLogic(logic::Op::Or) => &mut flags.or,
@ -206,7 +206,9 @@ fn perform_op<F: Field>(
Operation::Not => generate_not(state, row)?,
Operation::Shl => generate_shl(state, row)?,
Operation::Shr => generate_shr(state, row)?,
Operation::Syscall(opcode) => generate_syscall(opcode, state, row)?,
Operation::Syscall(opcode, stack_values_read, stack_len_increased) => {
generate_syscall(opcode, stack_values_read, stack_len_increased, state, row)?
}
Operation::Eq => generate_eq(state, row)?,
Operation::BinaryLogic(binary_logic_op) => {
generate_binary_logic_op(binary_logic_op, state, row)?
@ -228,7 +230,7 @@ fn perform_op<F: Field>(
};
state.registers.program_counter += match op {
Operation::Syscall(_) | Operation::ExitKernel => 0,
Operation::Syscall(_, _, _) | Operation::ExitKernel => 0,
Operation::Push(n) => n as usize + 2,
Operation::Jump | Operation::Jumpi => 0,
_ => 1,
@ -239,7 +241,10 @@ fn perform_op<F: Field>(
Ok(())
}
fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(), ProgramError> {
/// Row that has the correct values for system registers and the code channel, but is otherwise
/// blank. It fulfills the constraints that are common to successful operations and the exception
/// operation. It also returns the opcode.
fn base_row<F: Field>(state: &mut GenerationState<F>) -> (CpuColumnsView<F>, u8) {
let mut row: CpuColumnsView<F> = CpuColumnsView::default();
row.is_cpu_cycle = F::ONE;
row.clock = F::from_canonical_usize(state.traces.clock());
@ -250,6 +255,11 @@ fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(
row.stack_len = F::from_canonical_usize(state.registers.stack_len);
let opcode = read_code_memory(state, &mut row);
(row, opcode)
}
fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(), ProgramError> {
let (mut row, opcode) = base_row(state);
let op = decode(state.registers, opcode)?;
if state.registers.is_kernel {
@ -310,8 +320,27 @@ fn log_kernel_instruction<F: Field>(state: &mut GenerationState<F>, op: Operatio
assert!(pc < KERNEL.code.len(), "Kernel PC is out of range: {}", pc);
}
fn handle_error<F: Field>(_state: &mut GenerationState<F>) -> anyhow::Result<()> {
bail!("TODO: generation for exception handling is not implemented");
fn handle_error<F: Field>(state: &mut GenerationState<F>, err: ProgramError) -> anyhow::Result<()> {
let exc_code: u8 = match err {
ProgramError::OutOfGas => 0,
ProgramError::InvalidOpcode => 1,
ProgramError::StackUnderflow => 2,
ProgramError::InvalidJumpDestination => 3,
ProgramError::InvalidJumpiDestination => 4,
ProgramError::StackOverflow => 5,
_ => bail!("TODO: figure out what to do with this..."),
};
let checkpoint = state.checkpoint();
let (row, _) = base_row(state);
generate_exception(exc_code, state, row)
.map_err(|_| anyhow::Error::msg("error handling errored..."))?;
state
.memory
.apply_ops(state.traces.mem_ops_since(checkpoint.traces));
Ok(())
}
pub(crate) fn transition<F: Field>(state: &mut GenerationState<F>) -> anyhow::Result<()> {
@ -337,7 +366,7 @@ pub(crate) fn transition<F: Field>(state: &mut GenerationState<F>) -> anyhow::Re
);
}
state.rollback(checkpoint);
handle_error(state)
handle_error(state, e)
}
}
}