diff --git a/evm/src/cpu/columns/ops.rs b/evm/src/cpu/columns/ops.rs index b8a4d8a6..d4d753f7 100644 --- a/evm/src/cpu/columns/ops.rs +++ b/evm/src/cpu/columns/ops.rs @@ -28,9 +28,7 @@ pub struct OpsColumnsView { pub mstore_32bytes: T, pub mload_32bytes: T, pub exit_kernel: T, - // TODO: combine MLOAD_GENERAL and MSTORE_GENERAL into one flag - pub mload_general: T, - pub mstore_general: T, + pub m_op_general: T, pub syscall: T, pub exception: T, diff --git a/evm/src/cpu/control_flow.rs b/evm/src/cpu/control_flow.rs index 8d0ee264..eeabf6f0 100644 --- a/evm/src/cpu/control_flow.rs +++ b/evm/src/cpu/control_flow.rs @@ -8,7 +8,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer use crate::cpu::columns::{CpuColumnsView, COL_MAP}; use crate::cpu::kernel::aggregator::KERNEL; -const NATIVE_INSTRUCTIONS: [usize; 18] = [ +const NATIVE_INSTRUCTIONS: [usize; 17] = [ COL_MAP.op.binary_op, COL_MAP.op.ternary_op, COL_MAP.op.fp254_op, @@ -29,8 +29,7 @@ const NATIVE_INSTRUCTIONS: [usize; 18] = [ COL_MAP.op.swap, COL_MAP.op.context_op, // not EXIT_KERNEL (performs a jump) - COL_MAP.op.mload_general, - COL_MAP.op.mstore_general, + COL_MAP.op.m_op_general, // not SYSCALL (performs a jump) // not exceptions (also jump) ]; diff --git a/evm/src/cpu/cpu_stark.rs b/evm/src/cpu/cpu_stark.rs index 1579b3a1..2d2e9072 100644 --- a/evm/src/cpu/cpu_stark.rs +++ b/evm/src/cpu/cpu_stark.rs @@ -246,7 +246,7 @@ impl, const D: usize> Stark for CpuStark, const D: usize> Stark for CpuStark(lv: &mut CpuColumnsView) { @@ -99,6 +100,10 @@ pub fn generate(lv: &mut CpuColumnsView) { let flag = available && opcode_match; lv[col] = F::from_bool(flag); } + + if opcode == 0xfb || opcode == 0xfc { + lv.op.m_op_general = F::from_bool(kernel); + } } /// Break up an opcode (which is 8 bits long) into its eight bits. @@ -173,6 +178,20 @@ pub fn eval_packed_generic( // correct mode. yield_constr.constraint(lv[col] * (unavailable + opcode_mismatch)); } + + // Manually check lv.op.m_op_constr + let opcode: P = lv + .opcode_bits + .into_iter() + .enumerate() + .map(|(i, bit)| bit * P::Scalar::from_canonical_u64(1 << i)) + .sum(); + yield_constr.constraint((P::ONES - kernel_mode) * lv.op.m_op_general); + + let m_op_constr = (opcode - P::Scalar::from_canonical_usize(0xfb_usize)) + * (opcode - P::Scalar::from_canonical_usize(0xfc_usize)) + * lv.op.m_op_general; + yield_constr.constraint(m_op_constr); } pub fn eval_ext_circuit, const D: usize>( @@ -251,4 +270,28 @@ pub fn eval_ext_circuit, const D: usize>( let constr = builder.mul_extension(lv[col], constr); yield_constr.constraint(builder, constr); } + + // Manually check lv.op.m_op_constr + let opcode = lv + .opcode_bits + .into_iter() + .rev() + .fold(builder.zero_extension(), |cumul, bit| { + builder.mul_const_add_extension(F::TWO, cumul, bit) + }); + + let mload_opcode = builder.constant_extension(F::Extension::from_canonical_usize(0xfb_usize)); + let mstore_opcode = builder.constant_extension(F::Extension::from_canonical_usize(0xfc_usize)); + + let one_extension = builder.constant_extension(F::Extension::ONE); + let is_not_kernel_mode = builder.sub_extension(one_extension, kernel_mode); + let constr = builder.mul_extension(is_not_kernel_mode, lv.op.m_op_general); + yield_constr.constraint(builder, constr); + + let mload_constr = builder.sub_extension(opcode, mload_opcode); + let mstore_constr = builder.sub_extension(opcode, mstore_opcode); + let mut m_op_constr = builder.mul_extension(mload_constr, mstore_constr); + m_op_constr = builder.mul_extension(m_op_constr, lv.op.m_op_general); + + yield_constr.constraint(builder, m_op_constr); } diff --git a/evm/src/cpu/gas.rs b/evm/src/cpu/gas.rs index a4a499ad..51f375c0 100644 --- a/evm/src/cpu/gas.rs +++ b/evm/src/cpu/gas.rs @@ -40,8 +40,7 @@ const SIMPLE_OPCODES: OpsColumnsView> = OpsColumnsView { mstore_32bytes: KERNEL_ONLY_INSTR, mload_32bytes: KERNEL_ONLY_INSTR, exit_kernel: None, - mload_general: KERNEL_ONLY_INSTR, - mstore_general: KERNEL_ONLY_INSTR, + m_op_general: KERNEL_ONLY_INSTR, syscall: None, exception: None, }; diff --git a/evm/src/cpu/memio.rs b/evm/src/cpu/memio.rs index 09490e87..aa3749ca 100644 --- a/evm/src/cpu/memio.rs +++ b/evm/src/cpu/memio.rs @@ -7,6 +7,7 @@ use plonky2::iop::ext_target::ExtensionTarget; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::cpu::columns::CpuColumnsView; use crate::cpu::membus::NUM_GP_CHANNELS; +use crate::cpu::stack; fn get_addr(lv: &CpuColumnsView) -> (T, T, T) { let addr_context = lv.mem_channels[0].value[0]; @@ -17,9 +18,11 @@ fn get_addr(lv: &CpuColumnsView) -> (T, T, T) { fn eval_packed_load( lv: &CpuColumnsView

, + nv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { - let filter = lv.op.mload_general; + // The opcode for MLOAD_GENERAL is 0xfb. If the operation is MLOAD_GENERAL, lv.opcode_bits[0] = 1 + let filter = lv.op.m_op_general * lv.opcode_bits[0]; let (addr_context, addr_segment, addr_virtual) = get_addr(lv); @@ -38,14 +41,25 @@ fn eval_packed_load( for &channel in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] { yield_constr.constraint(filter * channel.used); } + + // Stack constraints + stack::eval_packed_one( + lv, + nv, + filter, + stack::MLOAD_GENERAL_OP.unwrap(), + yield_constr, + ); } fn eval_ext_circuit_load, const D: usize>( builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, lv: &CpuColumnsView>, + nv: &CpuColumnsView>, yield_constr: &mut RecursiveConstraintConsumer, ) { - let filter = lv.op.mload_general; + let mut filter = lv.op.m_op_general; + filter = builder.mul_extension(filter, lv.opcode_bits[0]); let (addr_context, addr_segment, addr_virtual) = get_addr(lv); @@ -82,13 +96,24 @@ fn eval_ext_circuit_load, const D: usize>( let constr = builder.mul_extension(filter, channel.used); yield_constr.constraint(builder, constr); } + + // Stack constraints + stack::eval_ext_circuit_one( + builder, + lv, + nv, + filter, + stack::MLOAD_GENERAL_OP.unwrap(), + yield_constr, + ); } fn eval_packed_store( lv: &CpuColumnsView

, + nv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { - let filter = lv.op.mstore_general; + let filter = lv.op.m_op_general * (P::ONES - lv.opcode_bits[0]); let (addr_context, addr_segment, addr_virtual) = get_addr(lv); @@ -107,14 +132,27 @@ fn eval_packed_store( for &channel in &lv.mem_channels[5..] { yield_constr.constraint(filter * channel.used); } + + // Stack constraints + stack::eval_packed_one( + lv, + nv, + filter, + stack::MSTORE_GENERAL_OP.unwrap(), + yield_constr, + ); } fn eval_ext_circuit_store, const D: usize>( builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, lv: &CpuColumnsView>, + nv: &CpuColumnsView>, yield_constr: &mut RecursiveConstraintConsumer, ) { - let filter = lv.op.mstore_general; + let mut filter = lv.op.m_op_general; + let one = builder.one_extension(); + let minus = builder.sub_extension(one, lv.opcode_bits[0]); + filter = builder.mul_extension(filter, minus); let (addr_context, addr_segment, addr_virtual) = get_addr(lv); @@ -151,21 +189,33 @@ fn eval_ext_circuit_store, const D: usize>( let constr = builder.mul_extension(filter, channel.used); yield_constr.constraint(builder, constr); } + + // Stack constraints + stack::eval_ext_circuit_one( + builder, + lv, + nv, + filter, + stack::MSTORE_GENERAL_OP.unwrap(), + yield_constr, + ); } pub fn eval_packed( lv: &CpuColumnsView

, + nv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { - eval_packed_load(lv, yield_constr); - eval_packed_store(lv, yield_constr); + eval_packed_load(lv, nv, yield_constr); + eval_packed_store(lv, nv, yield_constr); } pub fn eval_ext_circuit, const D: usize>( builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, lv: &CpuColumnsView>, + nv: &CpuColumnsView>, yield_constr: &mut RecursiveConstraintConsumer, ) { - eval_ext_circuit_load(builder, lv, yield_constr); - eval_ext_circuit_store(builder, lv, yield_constr); + eval_ext_circuit_load(builder, lv, nv, yield_constr); + eval_ext_circuit_store(builder, lv, nv, yield_constr); } diff --git a/evm/src/cpu/stack.rs b/evm/src/cpu/stack.rs index 765a7a07..28abf077 100644 --- a/evm/src/cpu/stack.rs +++ b/evm/src/cpu/stack.rs @@ -44,6 +44,18 @@ pub(crate) const JUMPI_OP: Option = Some(StackBehavior { disable_other_channels: false, }); +pub(crate) const MLOAD_GENERAL_OP: Option = Some(StackBehavior { + num_pops: 3, + pushes: true, + disable_other_channels: false, +}); + +pub(crate) const MSTORE_GENERAL_OP: Option = Some(StackBehavior { + num_pops: 4, + pushes: false, + disable_other_channels: false, +}); + // AUDITORS: If the value below is `None`, then the operation must be manually checked to ensure // that every general-purpose memory channel is either disabled or has its read flag and address // propertly constrained. The same applies when `disable_other_channels` is set to `false`, @@ -107,16 +119,7 @@ const STACK_BEHAVIORS: OpsColumnsView> = OpsColumnsView { pushes: false, disable_other_channels: true, }), - mload_general: Some(StackBehavior { - num_pops: 3, - pushes: true, - disable_other_channels: false, - }), - mstore_general: Some(StackBehavior { - num_pops: 4, - pushes: false, - disable_other_channels: false, - }), + m_op_general: None, syscall: Some(StackBehavior { num_pops: 0, pushes: true, diff --git a/evm/src/witness/transition.rs b/evm/src/witness/transition.rs index 9532aa33..e3e7b584 100644 --- a/evm/src/witness/transition.rs +++ b/evm/src/witness/transition.rs @@ -179,8 +179,7 @@ fn fill_op_flag(op: Operation, row: &mut CpuColumnsView) { Operation::Mload32Bytes => &mut flags.mload_32bytes, Operation::Mstore32Bytes => &mut flags.mstore_32bytes, Operation::ExitKernel => &mut flags.exit_kernel, - Operation::MloadGeneral => &mut flags.mload_general, - Operation::MstoreGeneral => &mut flags.mstore_general, + Operation::MloadGeneral | Operation::MstoreGeneral => &mut flags.m_op_general, } = F::ONE; }