From 815a02ab7563efc583b694fc984c3cb5e7ab1b63 Mon Sep 17 00:00:00 2001 From: Hamy Ratoanina Date: Fri, 11 Aug 2023 12:07:17 -0400 Subject: [PATCH] Remove is_cpu_cycle --- evm/src/cpu/columns/mod.rs | 4 ---- evm/src/cpu/columns/ops.rs | 6 +++--- evm/src/cpu/control_flow.rs | 29 ++++++++++++++--------------- evm/src/cpu/cpu_stark.rs | 2 +- evm/src/cpu/decode.rs | 12 ++++-------- evm/src/cpu/dup_swap.rs | 8 ++++---- evm/src/cpu/gas.rs | 28 ++++++++++++++-------------- evm/src/cpu/membus.rs | 12 +++++++----- evm/src/cpu/modfp254.rs | 7 ++----- evm/src/cpu/simple_logic/not.rs | 8 ++------ evm/src/cpu/stack.rs | 6 ++---- evm/src/cpu/stack_bounds.rs | 5 +++-- evm/src/witness/transition.rs | 1 - 13 files changed, 56 insertions(+), 72 deletions(-) diff --git a/evm/src/cpu/columns/mod.rs b/evm/src/cpu/columns/mod.rs index f073c78e..fecc8df9 100644 --- a/evm/src/cpu/columns/mod.rs +++ b/evm/src/cpu/columns/mod.rs @@ -38,10 +38,6 @@ pub struct CpuColumnsView { /// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise. pub is_bootstrap_kernel: T, - /// Filter. 1 if the row corresponds to a cycle of execution and 0 otherwise. - /// Lets us re-use columns in non-cycle rows. - pub is_cpu_cycle: T, - /// If CPU cycle: Current context. // TODO: this is currently unconstrained pub context: T, diff --git a/evm/src/cpu/columns/ops.rs b/evm/src/cpu/columns/ops.rs index f1637786..bab27ea8 100644 --- a/evm/src/cpu/columns/ops.rs +++ b/evm/src/cpu/columns/ops.rs @@ -43,13 +43,13 @@ pub struct OpsColumnsView { // TODO: combine GET_CONTEXT and SET_CONTEXT into one flag pub get_context: T, pub set_context: T, - pub exit_kernel: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub exit_kernel: T, // TODO: combine MLOAD_GENERAL and MSTORE_GENERAL into one flag pub mload_general: T, pub mstore_general: T, - pub syscall: T, // Note: This column must be 0 when is_cpu_cycle = 0. - pub exception: T, // Note: This column must be 0 when is_cpu_cycle = 0. + pub syscall: T, + pub exception: T, } // `u8` is guaranteed to have a `size_of` of 1. diff --git a/evm/src/cpu/control_flow.rs b/evm/src/cpu/control_flow.rs index e27b0710..fd0688b3 100644 --- a/evm/src/cpu/control_flow.rs +++ b/evm/src/cpu/control_flow.rs @@ -67,34 +67,33 @@ pub fn eval_packed_generic( nv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { + let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); + let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum(); // Once we start executing instructions, then we continue until the end of the table. - yield_constr.constraint_transition(lv.is_cpu_cycle * (nv.is_cpu_cycle - P::ONES)); + yield_constr.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next - P::ONES)); // If a row is a CPU cycle and executing a native instruction (implemented as a table row; not // microcoded) then the program counter is incremented by 1 to obtain the next row's program // counter. Also, the next row has the same kernel flag. let is_native_instruction: P = NATIVE_INSTRUCTIONS.iter().map(|&col_i| lv[col_i]).sum(); yield_constr.constraint_transition( - lv.is_cpu_cycle - * is_native_instruction - * (lv.program_counter - nv.program_counter + P::ONES), - ); - yield_constr.constraint_transition( - lv.is_cpu_cycle * is_native_instruction * (lv.is_kernel_mode - nv.is_kernel_mode), + is_native_instruction * (lv.program_counter - nv.program_counter + P::ONES), ); + yield_constr + .constraint_transition(is_native_instruction * (lv.is_kernel_mode - nv.is_kernel_mode)); // If a non-CPU cycle row is followed by a CPU cycle row, then: // - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel), // - execution is in kernel mode, and // - the stack is empty. - let is_last_noncpu_cycle = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle; + let is_last_noncpu_cycle = (is_cpu_cycle - P::ONES) * is_cpu_cycle_next; let pc_diff = nv.program_counter - get_start_pc::(); yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff); yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES)); yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len); // The last row must be a CPU cycle row. - yield_constr.constraint_last_row(lv.is_cpu_cycle - P::ONES); + yield_constr.constraint_last_row(is_cpu_cycle - P::ONES); // Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that // that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels. let (halt_pc0, halt_pc1) = get_halt_pcs::(); @@ -110,9 +109,11 @@ pub fn eval_ext_circuit, const D: usize>( nv: &CpuColumnsView>, yield_constr: &mut RecursiveConstraintConsumer, ) { + let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); + let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i])); // Once we start executing instructions, then we continue until the end of the table. { - let constr = builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, lv.is_cpu_cycle); + let constr = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle); yield_constr.constraint_transition(builder, constr); } @@ -120,9 +121,7 @@ pub fn eval_ext_circuit, const D: usize>( // microcoded) then the program counter is incremented by 1 to obtain the next row's program // counter. Also, the next row has the same kernel flag. { - let is_native_instruction = - builder.add_many_extension(NATIVE_INSTRUCTIONS.iter().map(|&col_i| lv[col_i])); - let filter = builder.mul_extension(lv.is_cpu_cycle, is_native_instruction); + let filter = builder.add_many_extension(NATIVE_INSTRUCTIONS.iter().map(|&col_i| lv[col_i])); let pc_diff = builder.sub_extension(lv.program_counter, nv.program_counter); let pc_constr = builder.mul_add_extension(filter, pc_diff, filter); yield_constr.constraint_transition(builder, pc_constr); @@ -137,7 +136,7 @@ pub fn eval_ext_circuit, const D: usize>( // - the stack is empty. { let is_last_noncpu_cycle = - builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle); + builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle_next); // Start at `main`. let main = builder.constant_extension(get_start_pc::().into()); @@ -161,7 +160,7 @@ pub fn eval_ext_circuit, const D: usize>( // The last row must be a CPU cycle row. { let one = builder.one_extension(); - let constr = builder.sub_extension(lv.is_cpu_cycle, one); + let constr = builder.sub_extension(is_cpu_cycle, one); yield_constr.constraint_last_row(builder, constr); } // Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that diff --git a/evm/src/cpu/cpu_stark.rs b/evm/src/cpu/cpu_stark.rs index 25a3c5e7..7fd0c76f 100644 --- a/evm/src/cpu/cpu_stark.rs +++ b/evm/src/cpu/cpu_stark.rs @@ -200,7 +200,7 @@ pub fn ctl_data_gp_memory(channel: usize) -> Vec> { } pub fn ctl_filter_code_memory() -> Column { - Column::single(COL_MAP.is_cpu_cycle) + Column::sum(COL_MAP.op.iter()) } pub fn ctl_filter_gp_memory(channel: usize) -> Column { diff --git a/evm/src/cpu/decode.rs b/evm/src/cpu/decode.rs index 6969a1db..a2918d4c 100644 --- a/evm/src/cpu/decode.rs +++ b/evm/src/cpu/decode.rs @@ -60,12 +60,8 @@ const OPCODES: [(u8, usize, bool, usize); 32] = [ ]; pub fn generate(lv: &mut CpuColumnsView) { - let cycle_filter = lv.is_cpu_cycle; - if cycle_filter == F::ZERO { - // This column cannot be shared. - lv.op.eq_iszero = F::ZERO; - return; - } + let cycle_filter: F = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); + // This assert is not _strictly_ necessary, but I include it as a sanity check. assert_eq!(cycle_filter, F::ONE, "cycle_filter should be 0 or 1"); @@ -122,7 +118,7 @@ pub fn eval_packed_generic( lv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { - let cycle_filter = lv.is_cpu_cycle; + let cycle_filter: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); // Ensure that the kernel flag is valid (either 0 or 1). let kernel_mode = lv.is_kernel_mode; @@ -192,7 +188,7 @@ pub fn eval_ext_circuit, const D: usize>( ) { let one = builder.one_extension(); - let cycle_filter = lv.is_cpu_cycle; + let cycle_filter = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); // Ensure that the kernel flag is valid (either 0 or 1). let kernel_mode = lv.is_kernel_mode; diff --git a/evm/src/cpu/dup_swap.rs b/evm/src/cpu/dup_swap.rs index 215d0638..8bc0d5a5 100644 --- a/evm/src/cpu/dup_swap.rs +++ b/evm/src/cpu/dup_swap.rs @@ -114,7 +114,7 @@ fn eval_packed_dup( lv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { - let filter = lv.is_cpu_cycle * lv.op.dup; + let filter = lv.op.dup; let in_channel = &lv.mem_channels[0]; let out_channel = &lv.mem_channels[NUM_GP_CHANNELS - 1]; @@ -140,7 +140,7 @@ fn eval_ext_circuit_dup, const D: usize>( ) { let neg_one = builder.constant_extension(F::NEG_ONE.into()); - let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.dup); + let filter = lv.op.dup; let in_channel = &lv.mem_channels[0]; let out_channel = &lv.mem_channels[NUM_GP_CHANNELS - 1]; @@ -166,7 +166,7 @@ fn eval_packed_swap( ) { let n_plus_one = n + P::ONES; - let filter = lv.is_cpu_cycle * lv.op.swap; + let filter = lv.op.swap; let in1_channel = &lv.mem_channels[0]; let in2_channel = &lv.mem_channels[1]; @@ -192,7 +192,7 @@ fn eval_ext_circuit_swap, const D: usize>( let one = builder.one_extension(); let n_plus_one = builder.add_extension(n, one); - let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.swap); + let filter = lv.op.swap; let in1_channel = &lv.mem_channels[0]; let in2_channel = &lv.mem_channels[1]; diff --git a/evm/src/cpu/gas.rs b/evm/src/cpu/gas.rs index e42e1d17..75ece520 100644 --- a/evm/src/cpu/gas.rs +++ b/evm/src/cpu/gas.rs @@ -5,6 +5,7 @@ use plonky2::field::types::Field; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; +use super::columns::COL_MAP; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::cpu::columns::ops::OpsColumnsView; use crate::cpu::columns::CpuColumnsView; @@ -63,7 +64,7 @@ fn eval_packed_accumulate( ) { // Is it an instruction that we constrain here? // I.e., does it always cost a constant amount of gas? - let is_simple_instr: P = SIMPLE_OPCODES + let filter: P = SIMPLE_OPCODES .into_iter() .enumerate() .filter_map(|(i, maybe_cost)| { @@ -71,7 +72,6 @@ fn eval_packed_accumulate( maybe_cost.map(|_| lv.op[i]) }) .sum(); - let filter = lv.is_cpu_cycle * is_simple_instr; // How much gas did we use? let gas_used: P = SIMPLE_OPCODES @@ -88,16 +88,14 @@ fn eval_packed_accumulate( for (maybe_cost, op_flag) in izip!(SIMPLE_OPCODES.into_iter(), lv.op.into_iter()) { if let Some(cost) = maybe_cost { let cost = P::Scalar::from_canonical_u32(cost); - yield_constr - .constraint_transition(lv.is_cpu_cycle * op_flag * (nv.gas - lv.gas - cost)); + yield_constr.constraint_transition(op_flag * (nv.gas - lv.gas - cost)); } } // For jumps. let jump_gas_cost = P::Scalar::from_canonical_u32(G_MID.unwrap()) + lv.opcode_bits[0] * P::Scalar::from_canonical_u32(G_HIGH.unwrap() - G_MID.unwrap()); - yield_constr - .constraint_transition(lv.is_cpu_cycle * lv.op.jumps * (nv.gas - lv.gas - jump_gas_cost)); + yield_constr.constraint_transition(lv.op.jumps * (nv.gas - lv.gas - jump_gas_cost)); } fn eval_packed_init( @@ -105,8 +103,10 @@ fn eval_packed_init( nv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { + let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); + let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum(); // `nv` is the first row that executes an instruction. - let filter = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle; + let filter = (is_cpu_cycle - P::ONES) * is_cpu_cycle_next; // Set initial gas to zero. yield_constr.constraint_transition(filter * nv.gas); } @@ -128,7 +128,7 @@ fn eval_ext_circuit_accumulate, const D: usize>( ) { // Is it an instruction that we constrain here? // I.e., does it always cost a constant amount of gas? - let is_simple_instr = SIMPLE_OPCODES.into_iter().enumerate().fold( + let filter = SIMPLE_OPCODES.into_iter().enumerate().fold( builder.zero_extension(), |cumul, (i, maybe_cost)| { // Add flag `lv.op[i]` to the sum if `SIMPLE_OPCODES[i]` is `Some`. @@ -138,7 +138,6 @@ fn eval_ext_circuit_accumulate, const D: usize>( } }, ); - let filter = builder.mul_extension(lv.is_cpu_cycle, is_simple_instr); // How much gas did we use? let gas_used = SIMPLE_OPCODES.into_iter().enumerate().fold( @@ -161,21 +160,20 @@ fn eval_ext_circuit_accumulate, const D: usize>( for (maybe_cost, op_flag) in izip!(SIMPLE_OPCODES.into_iter(), lv.op.into_iter()) { if let Some(cost) = maybe_cost { - let filter = builder.mul_extension(lv.is_cpu_cycle, op_flag); let nv_lv_diff = builder.sub_extension(nv.gas, lv.gas); let constr = builder.arithmetic_extension( F::ONE, -F::from_canonical_u32(cost), - filter, + op_flag, nv_lv_diff, - filter, + op_flag, ); yield_constr.constraint_transition(builder, constr); } } // For jumps. - let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.jumps); + let filter = lv.op.jumps; let jump_gas_cost = builder.mul_const_extension( F::from_canonical_u32(G_HIGH.unwrap() - G_MID.unwrap()), lv.opcode_bits[0], @@ -196,7 +194,9 @@ fn eval_ext_circuit_init, const D: usize>( yield_constr: &mut RecursiveConstraintConsumer, ) { // `nv` is the first row that executes an instruction. - let filter = builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle); + let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); + let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i])); + let filter = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle_next); // Set initial gas to zero. let constr = builder.mul_extension(filter, nv.gas); yield_constr.constraint_transition(builder, constr); diff --git a/evm/src/cpu/membus.rs b/evm/src/cpu/membus.rs index 06835a49..8a703f0d 100644 --- a/evm/src/cpu/membus.rs +++ b/evm/src/cpu/membus.rs @@ -4,6 +4,7 @@ use plonky2::field::types::PrimeField64; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; +use super::columns::COL_MAP; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::cpu::columns::CpuColumnsView; @@ -34,7 +35,7 @@ pub const NUM_CHANNELS: usize = channel_indices::GP.end; /// Calculates `lv.stack_len_bounds_aux`. Note that this must be run after decode. pub fn generate(lv: &mut CpuColumnsView) { - let cycle_filter = lv.is_cpu_cycle; + let cycle_filter: F = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); if cycle_filter != F::ZERO { assert!(lv.is_kernel_mode.to_canonical_u64() <= 1); } @@ -48,11 +49,11 @@ pub fn eval_packed( lv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { + let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); // Validate `lv.code_context`. It should be 0 if in kernel mode and `lv.context` if in user // mode. - yield_constr.constraint( - lv.is_cpu_cycle * (lv.code_context - (P::ONES - lv.is_kernel_mode) * lv.context), - ); + yield_constr + .constraint(is_cpu_cycle * (lv.code_context - (P::ONES - lv.is_kernel_mode) * lv.context)); // Validate `channel.used`. It should be binary. for channel in lv.mem_channels { @@ -69,7 +70,8 @@ pub fn eval_ext_circuit, const D: usize>( // mode. let diff = builder.sub_extension(lv.context, lv.code_context); let constr = builder.mul_sub_extension(lv.is_kernel_mode, lv.context, diff); - let filtered_constr = builder.mul_extension(lv.is_cpu_cycle, constr); + let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); + let filtered_constr = builder.mul_extension(is_cpu_cycle, constr); yield_constr.constraint(builder, filtered_constr); // Validate `channel.used`. It should be binary. diff --git a/evm/src/cpu/modfp254.rs b/evm/src/cpu/modfp254.rs index defbf862..e6a2815d 100644 --- a/evm/src/cpu/modfp254.rs +++ b/evm/src/cpu/modfp254.rs @@ -19,7 +19,7 @@ pub fn eval_packed( lv: &CpuColumnsView

, yield_constr: &mut ConstraintConsumer

, ) { - let filter = lv.is_cpu_cycle * (lv.op.addfp254 + lv.op.mulfp254 + lv.op.subfp254); + let filter = lv.op.addfp254 + lv.op.mulfp254 + lv.op.subfp254; // We want to use all the same logic as the usual mod operations, but without needing to read // the modulus from the stack. We simply constrain `mem_channels[2]` to be our prime (that's @@ -36,10 +36,7 @@ pub fn eval_ext_circuit, const D: usize>( lv: &CpuColumnsView>, yield_constr: &mut RecursiveConstraintConsumer, ) { - let filter = { - let flag_sum = builder.add_many_extension([lv.op.addfp254, lv.op.mulfp254, lv.op.subfp254]); - builder.mul_extension(lv.is_cpu_cycle, flag_sum) - }; + let filter = builder.add_many_extension([lv.op.addfp254, lv.op.mulfp254, lv.op.subfp254]); // We want to use all the same logic as the usual mod operations, but without needing to read // the modulus from the stack. We simply constrain `mem_channels[2]` to be our prime (that's diff --git a/evm/src/cpu/simple_logic/not.rs b/evm/src/cpu/simple_logic/not.rs index 16572e9c..0bfaa0b7 100644 --- a/evm/src/cpu/simple_logic/not.rs +++ b/evm/src/cpu/simple_logic/not.rs @@ -18,9 +18,7 @@ pub fn eval_packed( // This is simple: just do output = 0xffffffff - input. let input = lv.mem_channels[0].value; let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value; - let cycle_filter = lv.is_cpu_cycle; - let is_not_filter = lv.op.not; - let filter = cycle_filter * is_not_filter; + let filter = lv.op.not; for (input_limb, output_limb) in input.into_iter().zip(output) { yield_constr.constraint( filter * (output_limb + input_limb - P::Scalar::from_canonical_u64(ALL_1_LIMB)), @@ -35,9 +33,7 @@ pub fn eval_ext_circuit, const D: usize>( ) { let input = lv.mem_channels[0].value; let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value; - let cycle_filter = lv.is_cpu_cycle; - let is_not_filter = lv.op.not; - let filter = builder.mul_extension(cycle_filter, is_not_filter); + let filter = lv.op.not; for (input_limb, output_limb) in input.into_iter().zip(output) { let constr = builder.add_extension(output_limb, input_limb); let constr = builder.arithmetic_extension( diff --git a/evm/src/cpu/stack.rs b/evm/src/cpu/stack.rs index 8b33a458..760f4041 100644 --- a/evm/src/cpu/stack.rs +++ b/evm/src/cpu/stack.rs @@ -198,8 +198,7 @@ pub fn eval_packed( ) { for (op, stack_behavior) in izip!(lv.op.into_iter(), STACK_BEHAVIORS.into_iter()) { if let Some(stack_behavior) = stack_behavior { - let filter = lv.is_cpu_cycle * op; - eval_packed_one(lv, filter, stack_behavior, yield_constr); + eval_packed_one(lv, op, stack_behavior, yield_constr); } } } @@ -313,8 +312,7 @@ pub fn eval_ext_circuit, const D: usize>( ) { for (op, stack_behavior) in izip!(lv.op.into_iter(), STACK_BEHAVIORS.into_iter()) { if let Some(stack_behavior) = stack_behavior { - let filter = builder.mul_extension(lv.is_cpu_cycle, op); - eval_ext_circuit_one(builder, lv, filter, stack_behavior, yield_constr); + eval_ext_circuit_one(builder, lv, op, stack_behavior, yield_constr); } } } diff --git a/evm/src/cpu/stack_bounds.rs b/evm/src/cpu/stack_bounds.rs index 63a00b28..e66e6686 100644 --- a/evm/src/cpu/stack_bounds.rs +++ b/evm/src/cpu/stack_bounds.rs @@ -14,6 +14,7 @@ use plonky2::field::types::Field; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; +use super::columns::COL_MAP; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::cpu::columns::CpuColumnsView; @@ -27,7 +28,7 @@ pub fn eval_packed( // 1024 is valid. 1025 means we've gone one over, which is necessary for overflow, as an EVM // opcode increases the stack length by at most one. - let filter = lv.is_cpu_cycle; + let filter: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); let diff = lv.stack_len - P::Scalar::from_canonical_usize(MAX_USER_STACK_SIZE + 1); let lhs = diff * lv.stack_len_bounds_aux; let rhs = P::ONES - lv.is_kernel_mode; @@ -44,7 +45,7 @@ pub fn eval_ext_circuit, const D: usize>( // 1024 is valid. 1025 means we've gone one over, which is necessary for overflow, as an EVM // opcode increases the stack length by at most one. - let filter = lv.is_cpu_cycle; + let filter = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); let lhs = builder.arithmetic_extension( F::ONE, diff --git a/evm/src/witness/transition.rs b/evm/src/witness/transition.rs index a838cbfd..a8425a66 100644 --- a/evm/src/witness/transition.rs +++ b/evm/src/witness/transition.rs @@ -243,7 +243,6 @@ fn perform_op( /// operation. It also returns the opcode. fn base_row(state: &mut GenerationState) -> (CpuColumnsView, u8) { let mut row: CpuColumnsView = CpuColumnsView::default(); - row.is_cpu_cycle = F::ONE; row.clock = F::from_canonical_usize(state.traces.clock()); row.context = F::from_canonical_usize(state.registers.context); row.program_counter = F::from_canonical_usize(state.registers.program_counter);