diff --git a/evm/src/all_stark.rs b/evm/src/all_stark.rs index e2a11ba2..1131d529 100644 --- a/evm/src/all_stark.rs +++ b/evm/src/all_stark.rs @@ -75,9 +75,7 @@ impl Table { #[allow(unused)] // TODO: Should be used soon. pub(crate) fn all_cross_table_lookups() -> Vec> { - let mut cross_table_lookups = vec![ctl_keccak(), ctl_logic()]; - cross_table_lookups.extend((0..NUM_CHANNELS).map(ctl_memory)); - cross_table_lookups + vec![ctl_keccak(), ctl_logic(), ctl_memory()] } fn ctl_keccak() -> CrossTableLookup { @@ -108,17 +106,21 @@ fn ctl_logic() -> CrossTableLookup { ) } -fn ctl_memory(channel: usize) -> CrossTableLookup { +fn ctl_memory() -> CrossTableLookup { CrossTableLookup::new( - vec![TableWithColumns::new( - Table::Cpu, - cpu_stark::ctl_data_memory(channel), - Some(cpu_stark::ctl_filter_memory(channel)), - )], + (0..NUM_CHANNELS) + .map(|channel| { + TableWithColumns::new( + Table::Cpu, + cpu_stark::ctl_data_memory(channel), + Some(cpu_stark::ctl_filter_memory(channel)), + ) + }) + .collect(), TableWithColumns::new( Table::Memory, memory_stark::ctl_data(), - Some(memory_stark::ctl_filter(channel)), + Some(memory_stark::ctl_filter()), ), None, ) @@ -298,11 +300,11 @@ mod tests { let clock = mem_timestamp / NUM_CHANNELS; let channel = mem_timestamp % NUM_CHANNELS; - let is_padding_row = (0..NUM_CHANNELS) - .map(|c| memory_trace[memory::columns::is_channel(c)].values[i]) - .all(|x| x == F::ZERO); + let filter = memory_trace[memory::columns::FILTER].values[i]; + assert!(filter.is_one() || filter.is_zero()); + let is_actual_op = filter.is_one(); - if !is_padding_row { + if is_actual_op { let row: &mut cpu::columns::CpuColumnsView = cpu_trace_rows[clock].borrow_mut(); row.mem_channel_used[channel] = F::ONE; diff --git a/evm/src/generation/state.rs b/evm/src/generation/state.rs index c7f1003e..04ab4016 100644 --- a/evm/src/generation/state.rs +++ b/evm/src/generation/state.rs @@ -59,11 +59,12 @@ impl GenerationState { segment: Segment, virt: usize, ) -> U256 { + self.current_cpu_row.mem_channel_used[channel_index] = F::ONE; let timestamp = self.cpu_rows.len(); let context = self.current_context; let value = self.memory.contexts[context].segments[segment as usize].get(virt); self.memory.log.push(MemoryOp { - channel_index: Some(channel_index), + filter: true, timestamp, is_read: true, context, @@ -82,10 +83,11 @@ impl GenerationState { virt: usize, value: U256, ) { + self.current_cpu_row.mem_channel_used[channel_index] = F::ONE; let timestamp = self.cpu_rows.len(); let context = self.current_context; self.memory.log.push(MemoryOp { - channel_index: Some(channel_index), + filter: true, timestamp, is_read: false, context, diff --git a/evm/src/memory/columns.rs b/evm/src/memory/columns.rs index 7229a834..91cc8754 100644 --- a/evm/src/memory/columns.rs +++ b/evm/src/memory/columns.rs @@ -3,7 +3,9 @@ use crate::memory::{NUM_CHANNELS, VALUE_LIMBS}; // Columns for memory operations, ordered by (addr, timestamp). -pub(crate) const TIMESTAMP: usize = 0; +/// 1 if this is an actual memory operation, or 0 if it's a padding row. +pub(crate) const FILTER: usize = 0; +pub(crate) const TIMESTAMP: usize = FILTER + 1; pub(crate) const IS_READ: usize = TIMESTAMP + 1; pub(crate) const ADDR_CONTEXT: usize = IS_READ + 1; pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1; @@ -25,15 +27,8 @@ pub(crate) const CONTEXT_FIRST_CHANGE: usize = VALUE_START + VALUE_LIMBS; pub(crate) const SEGMENT_FIRST_CHANGE: usize = CONTEXT_FIRST_CHANGE + 1; pub(crate) const VIRTUAL_FIRST_CHANGE: usize = SEGMENT_FIRST_CHANGE + 1; -// Flags to indicate if this operation came from the `i`th channel of the memory bus. -const IS_CHANNEL_START: usize = VIRTUAL_FIRST_CHANGE + 1; -pub(crate) const fn is_channel(channel: usize) -> usize { - debug_assert!(channel < NUM_CHANNELS); - IS_CHANNEL_START + channel -} - // We use a range check to enforce the ordering. -pub(crate) const RANGE_CHECK: usize = IS_CHANNEL_START + NUM_CHANNELS; +pub(crate) const RANGE_CHECK: usize = VIRTUAL_FIRST_CHANGE + NUM_CHANNELS; // The counter column (used for the range check) starts from 0 and increments. pub(crate) const COUNTER: usize = RANGE_CHECK + 1; // Helper columns for the permutation argument used to enforce the range check. diff --git a/evm/src/memory/memory_stark.rs b/evm/src/memory/memory_stark.rs index 5a17ed20..8ed52ebb 100644 --- a/evm/src/memory/memory_stark.rs +++ b/evm/src/memory/memory_stark.rs @@ -16,12 +16,12 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer use crate::cross_table_lookup::Column; use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols}; use crate::memory::columns::{ - is_channel, value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE, - COUNTER, COUNTER_PERMUTED, IS_READ, NUM_COLUMNS, RANGE_CHECK, RANGE_CHECK_PERMUTED, + value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE, COUNTER, + COUNTER_PERMUTED, FILTER, IS_READ, NUM_COLUMNS, RANGE_CHECK, RANGE_CHECK_PERMUTED, SEGMENT_FIRST_CHANGE, TIMESTAMP, VIRTUAL_FIRST_CHANGE, }; use crate::memory::segments::Segment; -use crate::memory::{NUM_CHANNELS, VALUE_LIMBS}; +use crate::memory::VALUE_LIMBS; use crate::permutation::PermutationPair; use crate::stark::Stark; use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; @@ -36,8 +36,8 @@ pub fn ctl_data() -> Vec> { res } -pub fn ctl_filter(channel: usize) -> Column { - Column::single(is_channel(channel)) +pub fn ctl_filter() -> Column { + Column::single(FILTER) } #[derive(Copy, Clone, Default)] @@ -47,8 +47,8 @@ pub struct MemoryStark { #[derive(Clone, Debug)] pub(crate) struct MemoryOp { - /// The channel this operation came from, or `None` if it's a dummy operation for padding. - pub channel_index: Option, + /// true if this is an actual memory operation, or false if it's a padding row. + pub filter: bool, pub timestamp: usize, pub is_read: bool, pub context: usize, @@ -64,9 +64,7 @@ impl MemoryOp { /// trace has been transposed into column-major form. fn to_row(&self) -> [F; NUM_COLUMNS] { let mut row = [F::ZERO; NUM_COLUMNS]; - if let Some(channel) = self.channel_index { - row[is_channel(channel)] = F::ONE; - } + row[FILTER] = F::from_bool(self.filter); row[TIMESTAMP] = F::from_canonical_usize(self.timestamp); row[IS_READ] = F::from_bool(self.is_read); row[ADDR_CONTEXT] = F::from_canonical_usize(self.context); @@ -178,12 +176,12 @@ impl, const D: usize> MemoryStark { // We essentially repeat the last operation until our operation list has the desired size, // with a few changes: - // - We change its channel to `None` to indicate that this is a dummy operation. + // - We change its filter to 0 to indicate that this is a dummy operation. // - We increment its timestamp in order to pass the ordering check. // - We make sure it's a read, sine dummy operations must be reads. for i in 0..to_pad { memory_ops.push(MemoryOp { - channel_index: None, + filter: false, timestamp: last_op.timestamp + i + 1, is_read: true, ..last_op @@ -245,21 +243,13 @@ impl, const D: usize> Stark for MemoryStark = (0..8).map(|i| vars.next_values[value_limb(i)]).collect(); - // Each `is_channel` value must be 0 or 1. - for c in 0..NUM_CHANNELS { - let is_channel = vars.local_values[is_channel(c)]; - yield_constr.constraint(is_channel * (is_channel - P::ONES)); - } + // The filter must be 0 or 1. + let filter = vars.local_values[FILTER]; + yield_constr.constraint(filter * (filter - P::ONES)); - // The sum of `is_channel` flags, `has_channel`, must also be 0 or 1. - let has_channel: P = (0..NUM_CHANNELS) - .map(|c| vars.local_values[is_channel(c)]) - .sum(); - yield_constr.constraint(has_channel * (has_channel - P::ONES)); - - // If this is a dummy row (with no channel), it must be a read. This means the prover can + // If this is a dummy row (filter is off), it must be a read. This means the prover can // insert reads which never appear in the CPU trace (which are harmless), but not writes. - let is_dummy = P::ONES - has_channel; + let is_dummy = P::ONES - filter; let is_write = P::ONES - vars.local_values[IS_READ]; yield_constr.constraint(is_dummy * is_write); @@ -330,22 +320,14 @@ impl, const D: usize> Stark for MemoryStark