From e978425b26ea21b357f405f1b42f2b8bd813ef7d Mon Sep 17 00:00:00 2001 From: Jacqueline Nabaglo Date: Wed, 28 Sep 2022 15:18:56 -0700 Subject: [PATCH] Connect stack to memory (#735) * Connect stack to memory * Daniel PR comment --- evm/src/cpu/columns/mod.rs | 107 +-------- evm/src/cpu/columns/ops.rs | 153 +++++++++++++ evm/src/cpu/control_flow.rs | 50 ++--- evm/src/cpu/cpu_stark.rs | 9 +- evm/src/cpu/decode.rs | 198 ++++++++--------- evm/src/cpu/jumps.rs | 18 +- evm/src/cpu/mod.rs | 1 + evm/src/cpu/simple_logic/eq_iszero.rs | 12 +- evm/src/cpu/simple_logic/not.rs | 6 +- evm/src/cpu/stack.rs | 307 ++++++++++++++++++++++++++ evm/src/cpu/stack_bounds.rs | 4 +- evm/src/cpu/syscalls.rs | 6 +- 12 files changed, 622 insertions(+), 249 deletions(-) create mode 100644 evm/src/cpu/columns/ops.rs create mode 100644 evm/src/cpu/stack.rs diff --git a/evm/src/cpu/columns/mod.rs b/evm/src/cpu/columns/mod.rs index 5204122e..d0ef3f28 100644 --- a/evm/src/cpu/columns/mod.rs +++ b/evm/src/cpu/columns/mod.rs @@ -7,11 +7,15 @@ use std::mem::{size_of, transmute}; use std::ops::{Index, IndexMut}; use crate::cpu::columns::general::CpuGeneralColumnsView; +use crate::cpu::columns::ops::OpsColumnsView; use crate::cpu::membus::NUM_GP_CHANNELS; use crate::memory; use crate::util::{indices_arr, transmute_no_compile_time_size_checks}; mod general; +pub(crate) mod ops; + +pub type MemValue = [T; memory::VALUE_LIMBS]; #[repr(C)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -23,7 +27,7 @@ pub struct MemoryChannelView { pub addr_context: T, pub addr_segment: T, pub addr_virtual: T, - pub value: [T; memory::VALUE_LIMBS], + pub value: MemValue, } #[repr(C)] @@ -56,104 +60,9 @@ pub struct CpuColumnsView { /// If CPU cycle: We're in kernel (privileged) mode. pub is_kernel_mode: T, - // If CPU cycle: flags for EVM instructions. PUSHn, DUPn, and SWAPn only get one flag each. - // Invalid opcodes are split between a number of flags for practical reasons. Exactly one of - // these flags must be 1. - pub is_stop: T, - pub is_add: T, - pub is_mul: T, - pub is_sub: T, - pub is_div: T, - pub is_sdiv: T, - pub is_mod: T, - pub is_smod: T, - pub is_addmod: T, - pub is_mulmod: T, - pub is_exp: T, - pub is_signextend: T, - pub is_lt: T, - pub is_gt: T, - pub is_slt: T, - pub is_sgt: T, - pub is_eq: T, // Note: This column must be 0 when is_cpu_cycle = 0. - pub is_iszero: T, // Note: This column must be 0 when is_cpu_cycle = 0. - pub is_and: T, - pub is_or: T, - pub is_xor: T, - pub is_not: T, - pub is_byte: T, - pub is_shl: T, - pub is_shr: T, - pub is_sar: T, - pub is_keccak256: T, - pub is_address: T, - pub is_balance: T, - pub is_origin: T, - pub is_caller: T, - pub is_callvalue: T, - pub is_calldataload: T, - pub is_calldatasize: T, - pub is_calldatacopy: T, - pub is_codesize: T, - pub is_codecopy: T, - pub is_gasprice: T, - pub is_extcodesize: T, - pub is_extcodecopy: T, - pub is_returndatasize: T, - pub is_returndatacopy: T, - pub is_extcodehash: T, - pub is_blockhash: T, - pub is_coinbase: T, - pub is_timestamp: T, - pub is_number: T, - pub is_difficulty: T, - pub is_gaslimit: T, - pub is_chainid: T, - pub is_selfbalance: T, - pub is_basefee: T, - pub is_prover_input: T, - pub is_pop: T, - pub is_mload: T, - pub is_mstore: T, - pub is_mstore8: T, - pub is_sload: T, - pub is_sstore: T, - pub is_jump: T, // Note: This column must be 0 when is_cpu_cycle = 0. - pub is_jumpi: T, // Note: This column must be 0 when is_cpu_cycle = 0. - pub is_pc: T, - pub is_msize: T, - pub is_gas: T, - pub is_jumpdest: T, - pub is_get_state_root: T, - pub is_set_state_root: T, - pub is_get_receipt_root: T, - pub is_set_receipt_root: T, - pub is_push: T, - pub is_dup: T, - pub is_swap: T, - pub is_log0: T, - pub is_log1: T, - pub is_log2: T, - pub is_log3: T, - pub is_log4: T, - // PANIC does not get a flag; it fails at the decode stage. - pub is_create: T, - pub is_call: T, - pub is_callcode: T, - pub is_return: T, - pub is_delegatecall: T, - pub is_create2: T, - pub is_get_context: T, - pub is_set_context: T, - pub is_consume_gas: T, - pub is_exit_kernel: T, - pub is_staticcall: T, - pub is_mload_general: T, - pub is_mstore_general: T, - pub is_revert: T, - pub is_selfdestruct: T, - - pub is_invalid: T, + /// If CPU cycle: flags for EVM instructions (a few cannot be shared; see the comments in + /// `OpsColumnsView`). + pub op: OpsColumnsView, /// If CPU cycle: the opcode, broken up into bits in little-endian order. pub opcode_bits: [T; 8], diff --git a/evm/src/cpu/columns/ops.rs b/evm/src/cpu/columns/ops.rs new file mode 100644 index 00000000..28087b9e --- /dev/null +++ b/evm/src/cpu/columns/ops.rs @@ -0,0 +1,153 @@ +use std::borrow::{Borrow, BorrowMut}; +use std::mem::{size_of, transmute}; +use std::ops::{Deref, DerefMut}; + +use crate::util::{indices_arr, transmute_no_compile_time_size_checks}; + +#[repr(C)] +#[derive(Eq, PartialEq, Debug)] +pub struct OpsColumnsView { + pub stop: T, + pub add: T, + pub mul: T, + pub sub: T, + pub div: T, + pub sdiv: T, + pub mod_: T, + pub smod: T, + pub addmod: T, + pub mulmod: T, + pub exp: T, + pub signextend: T, + pub lt: T, + pub gt: T, + pub slt: T, + pub sgt: T, + pub eq: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub iszero: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub and: T, + pub or: T, + pub xor: T, + pub not: T, + pub byte: T, + pub shl: T, + pub shr: T, + pub sar: T, + pub keccak256: T, + pub address: T, + pub balance: T, + pub origin: T, + pub caller: T, + pub callvalue: T, + pub calldataload: T, + pub calldatasize: T, + pub calldatacopy: T, + pub codesize: T, + pub codecopy: T, + pub gasprice: T, + pub extcodesize: T, + pub extcodecopy: T, + pub returndatasize: T, + pub returndatacopy: T, + pub extcodehash: T, + pub blockhash: T, + pub coinbase: T, + pub timestamp: T, + pub number: T, + pub difficulty: T, + pub gaslimit: T, + pub chainid: T, + pub selfbalance: T, + pub basefee: T, + pub prover_input: T, + pub pop: T, + pub mload: T, + pub mstore: T, + pub mstore8: T, + pub sload: T, + pub sstore: T, + pub jump: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub jumpi: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub pc: T, + pub msize: T, + pub gas: T, + pub jumpdest: T, + pub get_state_root: T, + pub set_state_root: T, + pub get_receipt_root: T, + pub set_receipt_root: T, + pub push: T, + pub dup: T, + pub swap: T, + pub log0: T, + pub log1: T, + pub log2: T, + pub log3: T, + pub log4: T, + // PANIC does not get a flag; it fails at the decode stage. + pub create: T, + pub call: T, + pub callcode: T, + pub return_: T, + pub delegatecall: T, + pub create2: T, + pub get_context: T, + pub set_context: T, + pub consume_gas: T, + pub exit_kernel: T, + pub staticcall: T, + pub mload_general: T, + pub mstore_general: T, + pub revert: T, + pub selfdestruct: T, + + // TODO: this doesn't actually need its own flag. We can just do `1 - sum(all other flags)`. + pub invalid: T, +} + +// `u8` is guaranteed to have a `size_of` of 1. +pub const NUM_OPS_COLUMNS: usize = size_of::>(); + +impl From<[T; NUM_OPS_COLUMNS]> for OpsColumnsView { + fn from(value: [T; NUM_OPS_COLUMNS]) -> Self { + unsafe { transmute_no_compile_time_size_checks(value) } + } +} + +impl From> for [T; NUM_OPS_COLUMNS] { + fn from(value: OpsColumnsView) -> Self { + unsafe { transmute_no_compile_time_size_checks(value) } + } +} + +impl Borrow> for [T; NUM_OPS_COLUMNS] { + fn borrow(&self) -> &OpsColumnsView { + unsafe { transmute(self) } + } +} + +impl BorrowMut> for [T; NUM_OPS_COLUMNS] { + fn borrow_mut(&mut self) -> &mut OpsColumnsView { + unsafe { transmute(self) } + } +} + +impl Deref for OpsColumnsView { + type Target = [T; NUM_OPS_COLUMNS]; + fn deref(&self) -> &Self::Target { + unsafe { transmute(self) } + } +} + +impl DerefMut for OpsColumnsView { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { transmute(self) } + } +} + +const fn make_col_map() -> OpsColumnsView { + let indices_arr = indices_arr::(); + unsafe { transmute::<[usize; NUM_OPS_COLUMNS], OpsColumnsView>(indices_arr) } +} + +pub const COL_MAP: OpsColumnsView = make_col_map(); diff --git a/evm/src/cpu/control_flow.rs b/evm/src/cpu/control_flow.rs index 5a43f7cf..4ab9b91a 100644 --- a/evm/src/cpu/control_flow.rs +++ b/evm/src/cpu/control_flow.rs @@ -10,31 +10,31 @@ use crate::cpu::kernel::aggregator::KERNEL; // TODO: This list is incomplete. const NATIVE_INSTRUCTIONS: [usize; 25] = [ - COL_MAP.is_add, - COL_MAP.is_mul, - COL_MAP.is_sub, - COL_MAP.is_div, - COL_MAP.is_sdiv, - COL_MAP.is_mod, - COL_MAP.is_smod, - COL_MAP.is_addmod, - COL_MAP.is_mulmod, - COL_MAP.is_signextend, - COL_MAP.is_lt, - COL_MAP.is_gt, - COL_MAP.is_slt, - COL_MAP.is_sgt, - COL_MAP.is_eq, - COL_MAP.is_iszero, - COL_MAP.is_and, - COL_MAP.is_or, - COL_MAP.is_xor, - COL_MAP.is_not, - COL_MAP.is_byte, - COL_MAP.is_shl, - COL_MAP.is_shr, - COL_MAP.is_sar, - COL_MAP.is_pop, + COL_MAP.op.add, + COL_MAP.op.mul, + COL_MAP.op.sub, + COL_MAP.op.div, + COL_MAP.op.sdiv, + COL_MAP.op.mod_, + COL_MAP.op.smod, + COL_MAP.op.addmod, + COL_MAP.op.mulmod, + COL_MAP.op.signextend, + COL_MAP.op.lt, + COL_MAP.op.gt, + COL_MAP.op.slt, + COL_MAP.op.sgt, + COL_MAP.op.eq, + COL_MAP.op.iszero, + COL_MAP.op.and, + COL_MAP.op.or, + COL_MAP.op.xor, + COL_MAP.op.not, + COL_MAP.op.byte, + COL_MAP.op.shl, + COL_MAP.op.shr, + COL_MAP.op.sar, + COL_MAP.op.pop, ]; fn get_halt_pcs() -> (F, F) { diff --git a/evm/src/cpu/cpu_stark.rs b/evm/src/cpu/cpu_stark.rs index 5f702317..7ee204ca 100644 --- a/evm/src/cpu/cpu_stark.rs +++ b/evm/src/cpu/cpu_stark.rs @@ -11,7 +11,8 @@ use plonky2::hash::hash_types::RichField; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS}; use crate::cpu::{ - bootstrap_kernel, control_flow, decode, jumps, membus, simple_logic, stack_bounds, syscalls, + bootstrap_kernel, control_flow, decode, jumps, membus, simple_logic, stack, stack_bounds, + syscalls, }; use crate::cross_table_lookup::Column; use crate::memory::segments::Segment; @@ -50,7 +51,7 @@ pub fn ctl_filter_keccak_memory() -> Column { } pub fn ctl_data_logic() -> Vec> { - let mut res = Column::singles([COL_MAP.is_and, COL_MAP.is_or, COL_MAP.is_xor]).collect_vec(); + let mut res = Column::singles([COL_MAP.op.and, COL_MAP.op.or, COL_MAP.op.xor]).collect_vec(); res.extend(Column::singles(COL_MAP.mem_channels[0].value)); res.extend(Column::singles(COL_MAP.mem_channels[1].value)); res.extend(Column::singles(COL_MAP.mem_channels[2].value)); @@ -58,7 +59,7 @@ pub fn ctl_data_logic() -> Vec> { } pub fn ctl_filter_logic() -> Column { - Column::sum([COL_MAP.is_and, COL_MAP.is_or, COL_MAP.is_xor]) + Column::sum([COL_MAP.op.and, COL_MAP.op.or, COL_MAP.op.xor]) } pub const MEM_CODE_CHANNEL_IDX: usize = 0; @@ -149,6 +150,7 @@ impl, const D: usize> Stark for CpuStark, const D: usize> Stark for CpuStark(lv: &mut CpuColumnsView) { let cycle_filter = lv.is_cpu_cycle; if cycle_filter == F::ZERO { // These columns cannot be shared. - lv.is_eq = F::ZERO; - lv.is_iszero = F::ZERO; + lv.op.eq = F::ZERO; + lv.op.iszero = F::ZERO; return; } // This assert is not _strictly_ necessary, but I include it as a sanity check. @@ -196,7 +196,7 @@ pub fn generate(lv: &mut CpuColumnsView) { any_flag_set = any_flag_set || flag; } // is_invalid is a catch-all for opcodes we can't decode. - lv.is_invalid = F::from_bool(!any_flag_set); + lv.op.invalid = F::from_bool(!any_flag_set); } /// Break up an opcode (which is 8 bits long) into its eight bits. @@ -234,13 +234,13 @@ pub fn eval_packed_generic( let flag = lv[flag_col]; yield_constr.constraint(cycle_filter * flag * (flag - P::ONES)); } - yield_constr.constraint(cycle_filter * lv.is_invalid * (lv.is_invalid - P::ONES)); + yield_constr.constraint(cycle_filter * lv.op.invalid * (lv.op.invalid - P::ONES)); // Now check that exactly one is 1. let flag_sum: P = OPCODES .into_iter() .map(|(_, _, _, flag_col)| lv[flag_col]) .sum::

() - + lv.is_invalid; + + lv.op.invalid; yield_constr.constraint(cycle_filter * (P::ONES - flag_sum)); // Finally, classify all opcodes, together with the kernel flag, into blocks @@ -305,7 +305,7 @@ pub fn eval_ext_circuit, const D: usize>( yield_constr.constraint(builder, constr); } { - let constr = builder.mul_sub_extension(lv.is_invalid, lv.is_invalid, lv.is_invalid); + let constr = builder.mul_sub_extension(lv.op.invalid, lv.op.invalid, lv.op.invalid); let constr = builder.mul_extension(cycle_filter, constr); yield_constr.constraint(builder, constr); } @@ -316,7 +316,7 @@ pub fn eval_ext_circuit, const D: usize>( let flag = lv[flag_col]; constr = builder.sub_extension(constr, flag); } - constr = builder.sub_extension(constr, lv.is_invalid); + constr = builder.sub_extension(constr, lv.op.invalid); constr = builder.mul_extension(cycle_filter, constr); yield_constr.constraint(builder, constr); } diff --git a/evm/src/cpu/jumps.rs b/evm/src/cpu/jumps.rs index 219b39dd..fb13f83b 100644 --- a/evm/src/cpu/jumps.rs +++ b/evm/src/cpu/jumps.rs @@ -23,10 +23,10 @@ pub fn eval_packed_exit_kernel( // flag. The top 6 (32-bit) limbs are ignored (this is not part of the spec, but we trust the // kernel to set them to zero). yield_constr.constraint_transition( - lv.is_cpu_cycle * lv.is_exit_kernel * (input[0] - nv.program_counter), + lv.is_cpu_cycle * lv.op.exit_kernel * (input[0] - nv.program_counter), ); yield_constr.constraint_transition( - lv.is_cpu_cycle * lv.is_exit_kernel * (input[1] - nv.is_kernel_mode), + lv.is_cpu_cycle * lv.op.exit_kernel * (input[1] - nv.is_kernel_mode), ); } @@ -37,7 +37,7 @@ pub fn eval_ext_circuit_exit_kernel, const D: usize yield_constr: &mut RecursiveConstraintConsumer, ) { let input = lv.mem_channels[0].value; - let filter = builder.mul_extension(lv.is_cpu_cycle, lv.is_exit_kernel); + let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.exit_kernel); // If we are executing `EXIT_KERNEL` then we simply restore the program counter and kernel mode // flag. The top 6 (32-bit) limbs are ignored (this is not part of the spec, but we trust the @@ -60,16 +60,16 @@ pub fn eval_packed_jump_jumpi( let jumps_lv = lv.general.jumps(); let input0 = lv.mem_channels[0].value; let input1 = lv.mem_channels[1].value; - let filter = lv.is_jump + lv.is_jumpi; // `JUMP` or `JUMPI` + let filter = lv.op.jump + lv.op.jumpi; // `JUMP` or `JUMPI` // If `JUMP`, re-use the `JUMPI` logic, but setting the second input (the predicate) to be 1. // In other words, we implement `JUMP(addr)` as `JUMPI(addr, cond=1)`. - yield_constr.constraint(lv.is_jump * (input1[0] - P::ONES)); + yield_constr.constraint(lv.op.jump * (input1[0] - P::ONES)); for &limb in &input1[1..] { // Set all limbs (other than the least-significant limb) to 0. // NB: Technically, they don't have to be 0, as long as the sum // `input1[0] + ... + input1[7]` cannot overflow. - yield_constr.constraint(lv.is_jump * limb); + yield_constr.constraint(lv.op.jump * limb); } // Check `input0_upper_zero` @@ -162,19 +162,19 @@ pub fn eval_ext_circuit_jump_jumpi, const D: usize> let jumps_lv = lv.general.jumps(); let input0 = lv.mem_channels[0].value; let input1 = lv.mem_channels[1].value; - let filter = builder.add_extension(lv.is_jump, lv.is_jumpi); // `JUMP` or `JUMPI` + let filter = builder.add_extension(lv.op.jump, lv.op.jumpi); // `JUMP` or `JUMPI` // If `JUMP`, re-use the `JUMPI` logic, but setting the second input (the predicate) to be 1. // In other words, we implement `JUMP(addr)` as `JUMPI(addr, cond=1)`. { - let constr = builder.mul_sub_extension(lv.is_jump, input1[0], lv.is_jump); + let constr = builder.mul_sub_extension(lv.op.jump, input1[0], lv.op.jump); yield_constr.constraint(builder, constr); } for &limb in &input1[1..] { // Set all limbs (other than the least-significant limb) to 0. // NB: Technically, they don't have to be 0, as long as the sum // `input1[0] + ... + input1[7]` cannot overflow. - let constr = builder.mul_extension(lv.is_jump, limb); + let constr = builder.mul_extension(lv.op.jump, limb); yield_constr.constraint(builder, constr); } diff --git a/evm/src/cpu/mod.rs b/evm/src/cpu/mod.rs index 7b1e4756..c5b7dd32 100644 --- a/evm/src/cpu/mod.rs +++ b/evm/src/cpu/mod.rs @@ -7,5 +7,6 @@ mod jumps; pub mod kernel; pub(crate) mod membus; mod simple_logic; +mod stack; mod stack_bounds; mod syscalls; diff --git a/evm/src/cpu/simple_logic/eq_iszero.rs b/evm/src/cpu/simple_logic/eq_iszero.rs index 6b7294a8..37e06248 100644 --- a/evm/src/cpu/simple_logic/eq_iszero.rs +++ b/evm/src/cpu/simple_logic/eq_iszero.rs @@ -10,8 +10,8 @@ use crate::cpu::columns::CpuColumnsView; pub fn generate(lv: &mut CpuColumnsView) { let input0 = lv.mem_channels[0].value; - let eq_filter = lv.is_eq.to_canonical_u64(); - let iszero_filter = lv.is_iszero.to_canonical_u64(); + let eq_filter = lv.op.eq.to_canonical_u64(); + let iszero_filter = lv.op.iszero.to_canonical_u64(); assert!(eq_filter <= 1); assert!(iszero_filter <= 1); assert!(eq_filter + iszero_filter <= 1); @@ -62,8 +62,8 @@ pub fn eval_packed( let input1 = lv.mem_channels[1].value; let output = lv.mem_channels[2].value; - let eq_filter = lv.is_eq; - let iszero_filter = lv.is_iszero; + let eq_filter = lv.op.eq; + let iszero_filter = lv.op.iszero; let eq_or_iszero_filter = eq_filter + iszero_filter; let equal = output[0]; @@ -110,8 +110,8 @@ pub fn eval_ext_circuit, const D: usize>( let input1 = lv.mem_channels[1].value; let output = lv.mem_channels[2].value; - let eq_filter = lv.is_eq; - let iszero_filter = lv.is_iszero; + let eq_filter = lv.op.eq; + let iszero_filter = lv.op.iszero; let eq_or_iszero_filter = builder.add_extension(eq_filter, iszero_filter); let equal = output[0]; diff --git a/evm/src/cpu/simple_logic/not.rs b/evm/src/cpu/simple_logic/not.rs index 83d43276..3b8a888f 100644 --- a/evm/src/cpu/simple_logic/not.rs +++ b/evm/src/cpu/simple_logic/not.rs @@ -11,7 +11,7 @@ const LIMB_SIZE: usize = 32; const ALL_1_LIMB: u64 = (1 << LIMB_SIZE) - 1; pub fn generate(lv: &mut CpuColumnsView) { - let is_not_filter = lv.is_not.to_canonical_u64(); + let is_not_filter = lv.op.not.to_canonical_u64(); if is_not_filter == 0 { return; } @@ -35,7 +35,7 @@ pub fn eval_packed( let input = lv.mem_channels[0].value; let output = lv.mem_channels[1].value; let cycle_filter = lv.is_cpu_cycle; - let is_not_filter = lv.is_not; + let is_not_filter = lv.op.not; let filter = cycle_filter * is_not_filter; for (input_limb, output_limb) in input.into_iter().zip(output) { yield_constr.constraint( @@ -52,7 +52,7 @@ pub fn eval_ext_circuit, const D: usize>( let input = lv.mem_channels[0].value; let output = lv.mem_channels[1].value; let cycle_filter = lv.is_cpu_cycle; - let is_not_filter = lv.is_not; + let is_not_filter = lv.op.not; let filter = builder.mul_extension(cycle_filter, is_not_filter); for (input_limb, output_limb) in input.into_iter().zip(output) { let constr = builder.add_extension(output_limb, input_limb); diff --git a/evm/src/cpu/stack.rs b/evm/src/cpu/stack.rs new file mode 100644 index 00000000..d478571a --- /dev/null +++ b/evm/src/cpu/stack.rs @@ -0,0 +1,307 @@ +use itertools::izip; +use plonky2::field::extension::Extendable; +use plonky2::field::packed::PackedField; +use plonky2::field::types::Field; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; + +use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use crate::cpu::columns::ops::OpsColumnsView; +use crate::cpu::columns::CpuColumnsView; +use crate::cpu::membus::NUM_GP_CHANNELS; +use crate::memory::segments::Segment; + +#[derive(Clone, Copy)] +struct StackBehavior { + num_pops: usize, + pushes: bool, + disable_other_channels: bool, +} + +const BASIC_UNARY_OP: Option = Some(StackBehavior { + num_pops: 1, + pushes: true, + disable_other_channels: true, +}); +const BASIC_BINARY_OP: Option = Some(StackBehavior { + num_pops: 2, + pushes: true, + disable_other_channels: true, +}); +const BASIC_TERNARY_OP: Option = Some(StackBehavior { + num_pops: 2, + pushes: true, + disable_other_channels: true, +}); + +const STACK_BEHAVIORS: OpsColumnsView> = OpsColumnsView { + stop: None, // TODO + add: BASIC_BINARY_OP, + mul: BASIC_BINARY_OP, + sub: BASIC_BINARY_OP, + div: BASIC_BINARY_OP, + sdiv: BASIC_BINARY_OP, + mod_: BASIC_BINARY_OP, + smod: BASIC_BINARY_OP, + addmod: BASIC_TERNARY_OP, + mulmod: BASIC_TERNARY_OP, + exp: None, // TODO + signextend: BASIC_BINARY_OP, + lt: BASIC_BINARY_OP, + gt: BASIC_BINARY_OP, + slt: BASIC_BINARY_OP, + sgt: BASIC_BINARY_OP, + eq: BASIC_BINARY_OP, + iszero: BASIC_UNARY_OP, + and: BASIC_BINARY_OP, + or: BASIC_BINARY_OP, + xor: BASIC_BINARY_OP, + not: BASIC_TERNARY_OP, + byte: BASIC_BINARY_OP, + shl: BASIC_BINARY_OP, + shr: BASIC_BINARY_OP, + sar: BASIC_BINARY_OP, + keccak256: None, // TODO + address: None, // TODO + balance: None, // TODO + origin: None, // TODO + caller: None, // TODO + callvalue: None, // TODO + calldataload: None, // TODO + calldatasize: None, // TODO + calldatacopy: None, // TODO + codesize: None, // TODO + codecopy: None, // TODO + gasprice: None, // TODO + extcodesize: None, // TODO + extcodecopy: None, // TODO + returndatasize: None, // TODO + returndatacopy: None, // TODO + extcodehash: None, // TODO + blockhash: None, // TODO + coinbase: None, // TODO + timestamp: None, // TODO + number: None, // TODO + difficulty: None, // TODO + gaslimit: None, // TODO + chainid: None, // TODO + selfbalance: None, // TODO + basefee: None, // TODO + prover_input: None, // TODO + pop: None, // TODO + mload: None, // TODO + mstore: None, // TODO + mstore8: None, // TODO + sload: None, // TODO + sstore: None, // TODO + jump: None, // TODO + jumpi: None, // TODO + pc: None, // TODO + msize: None, // TODO + gas: None, // TODO + jumpdest: None, // TODO + get_state_root: None, // TODO + set_state_root: None, // TODO + get_receipt_root: None, // TODO + set_receipt_root: None, // TODO + push: None, // TODO + dup: None, // TODO + swap: None, // TODO + log0: None, // TODO + log1: None, // TODO + log2: None, // TODO + log3: None, // TODO + log4: None, // TODO + create: None, // TODO + call: None, // TODO + callcode: None, // TODO + return_: None, // TODO + delegatecall: None, // TODO + create2: None, // TODO + get_context: None, // TODO + set_context: None, // TODO + consume_gas: None, // TODO + exit_kernel: None, // TODO + staticcall: None, // TODO + mload_general: None, // TODO + mstore_general: None, // TODO + revert: None, // TODO + selfdestruct: None, // TODO + invalid: None, // TODO +}; + +fn eval_packed_one( + lv: &CpuColumnsView

, + filter: P, + stack_behavior: StackBehavior, + yield_constr: &mut ConstraintConsumer

, +) { + let num_operands = stack_behavior.num_pops + (stack_behavior.pushes as usize); + assert!(num_operands <= NUM_GP_CHANNELS); + + // Pops + for i in 0..stack_behavior.num_pops { + let channel = lv.mem_channels[i]; + + yield_constr.constraint(filter * (channel.used - P::ONES)); + yield_constr.constraint(filter * (channel.is_read - P::ONES)); + + yield_constr.constraint(filter * (channel.addr_context - lv.context)); + yield_constr.constraint( + filter * (channel.addr_segment - P::Scalar::from_canonical_u64(Segment::Stack as u64)), + ); + // E.g. if `stack_len == 1` and `i == 0`, we want `add_virtual == 0`. + let addr_virtual = lv.stack_len - P::Scalar::from_canonical_usize(i + 1); + yield_constr.constraint(filter * (channel.addr_virtual - addr_virtual)); + } + + // Pushes + if stack_behavior.pushes { + let channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; + + yield_constr.constraint(filter * (channel.used - P::ONES)); + yield_constr.constraint(filter * channel.is_read); + + yield_constr.constraint(filter * (channel.addr_context - lv.context)); + yield_constr.constraint( + filter * (channel.addr_segment - P::Scalar::from_canonical_u64(Segment::Stack as u64)), + ); + let addr_virtual = lv.stack_len - P::Scalar::from_canonical_usize(stack_behavior.num_pops); + yield_constr.constraint(filter * (channel.addr_virtual - addr_virtual)); + } + + // Unused channels + if stack_behavior.disable_other_channels { + for i in stack_behavior.num_pops..NUM_GP_CHANNELS - (stack_behavior.pushes as usize) { + let channel = lv.mem_channels[i]; + yield_constr.constraint(filter * channel.used); + } + } +} + +pub fn eval_packed( + lv: &CpuColumnsView

, + yield_constr: &mut ConstraintConsumer

, +) { + for (op, stack_behavior) in izip!(lv.op.into_iter(), STACK_BEHAVIORS.into_iter()) { + if let Some(stack_behavior) = stack_behavior { + let filter = lv.is_cpu_cycle * op; + eval_packed_one(lv, filter, stack_behavior, yield_constr); + } + } +} + +fn eval_ext_circuit_one, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + lv: &CpuColumnsView>, + filter: ExtensionTarget, + stack_behavior: StackBehavior, + yield_constr: &mut RecursiveConstraintConsumer, +) { + let num_operands = stack_behavior.num_pops + (stack_behavior.pushes as usize); + assert!(num_operands <= NUM_GP_CHANNELS); + + // Pops + for i in 0..stack_behavior.num_pops { + let channel = lv.mem_channels[i]; + + { + let constr = builder.mul_sub_extension(filter, channel.used, filter); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.mul_sub_extension(filter, channel.is_read, filter); + yield_constr.constraint(builder, constr); + } + + { + let diff = builder.sub_extension(channel.addr_context, lv.context); + let constr = builder.mul_extension(filter, diff); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.arithmetic_extension( + F::ONE, + -F::from_canonical_u64(Segment::Stack as u64), + filter, + channel.addr_segment, + filter, + ); + yield_constr.constraint(builder, constr); + } + { + let diff = builder.sub_extension(channel.addr_virtual, lv.stack_len); + let constr = builder.arithmetic_extension( + F::ONE, + F::from_canonical_usize(i + 1), + filter, + diff, + filter, + ); + yield_constr.constraint(builder, constr); + } + } + + // Pushes + if stack_behavior.pushes { + let channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; + + { + let constr = builder.mul_sub_extension(filter, channel.used, filter); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.mul_extension(filter, channel.is_read); + yield_constr.constraint(builder, constr); + } + + { + let diff = builder.sub_extension(channel.addr_context, lv.context); + let constr = builder.mul_extension(filter, diff); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.arithmetic_extension( + F::ONE, + -F::from_canonical_u64(Segment::Stack as u64), + filter, + channel.addr_segment, + filter, + ); + yield_constr.constraint(builder, constr); + } + { + let diff = builder.sub_extension(channel.addr_virtual, lv.stack_len); + let constr = builder.arithmetic_extension( + F::ONE, + F::from_canonical_usize(stack_behavior.num_pops), + filter, + diff, + filter, + ); + yield_constr.constraint(builder, constr); + } + } + + // Unused channels + if stack_behavior.disable_other_channels { + for i in stack_behavior.num_pops..NUM_GP_CHANNELS - (stack_behavior.pushes as usize) { + let channel = lv.mem_channels[i]; + let constr = builder.mul_extension(filter, channel.used); + yield_constr.constraint(builder, constr); + } + } +} + +pub fn eval_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + lv: &CpuColumnsView>, + yield_constr: &mut RecursiveConstraintConsumer, +) { + for (op, stack_behavior) in izip!(lv.op.into_iter(), STACK_BEHAVIORS.into_iter()) { + if let Some(stack_behavior) = stack_behavior { + let filter = builder.mul_extension(lv.is_cpu_cycle, op); + eval_ext_circuit_one(builder, lv, filter, stack_behavior, yield_constr); + } + } +} diff --git a/evm/src/cpu/stack_bounds.rs b/evm/src/cpu/stack_bounds.rs index 2c9c46eb..99734433 100644 --- a/evm/src/cpu/stack_bounds.rs +++ b/evm/src/cpu/stack_bounds.rs @@ -26,13 +26,13 @@ const MAX_USER_STACK_SIZE: u64 = 1024; // Other operations that have a minimum stack size (e.g. `MULMOD`, which has three inputs) read // all their inputs from memory. On underflow, the cross-table lookup fails, as -1, ..., -17 are // invalid memory addresses. -const DECREMENTING_FLAGS: [usize; 1] = [COL_MAP.is_pop]; +const DECREMENTING_FLAGS: [usize; 1] = [COL_MAP.op.pop]; // Operations that increase the stack length by 1, but excluding: // - privileged (kernel-only) operations (superfluous; doesn't affect correctness), // - operations that from userspace to the kernel (required for correctness). // TODO: This list is incomplete. -const INCREMENTING_FLAGS: [usize; 2] = [COL_MAP.is_pc, COL_MAP.is_dup]; +const INCREMENTING_FLAGS: [usize; 2] = [COL_MAP.op.pc, COL_MAP.op.dup]; /// Calculates `lv.stack_len_bounds_aux`. Note that this must be run after decode. pub fn generate(lv: &mut CpuColumnsView) { diff --git a/evm/src/cpu/syscalls.rs b/evm/src/cpu/syscalls.rs index b0b63be8..0ac31ef6 100644 --- a/evm/src/cpu/syscalls.rs +++ b/evm/src/cpu/syscalls.rs @@ -18,9 +18,9 @@ const NUM_SYSCALLS: usize = 3; fn make_syscall_list() -> [(usize, usize); NUM_SYSCALLS] { let kernel = Lazy::force(&KERNEL); [ - (COL_MAP.is_stop, "sys_stop"), - (COL_MAP.is_exp, "sys_exp"), - (COL_MAP.is_invalid, "handle_invalid"), + (COL_MAP.op.stop, "sys_stop"), + (COL_MAP.op.exp, "sys_exp"), + (COL_MAP.op.invalid, "handle_invalid"), ] .map(|(col_index, handler_name)| (col_index, kernel.global_labels[handler_name])) }