Simpler CPU <-> memory CTL

This commit is contained in:
Daniel Lubarov 2022-08-23 17:24:35 -07:00
parent 8e220ac623
commit c38a98f9e4
5 changed files with 47 additions and 65 deletions

View File

@ -75,9 +75,7 @@ impl Table {
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn all_cross_table_lookups<F: Field>() -> Vec<CrossTableLookup<F>> {
let mut cross_table_lookups = vec![ctl_keccak(), ctl_logic()];
cross_table_lookups.extend((0..NUM_CHANNELS).map(ctl_memory));
cross_table_lookups
vec![ctl_keccak(), ctl_logic(), ctl_memory()]
}
fn ctl_keccak<F: Field>() -> CrossTableLookup<F> {
@ -108,17 +106,21 @@ fn ctl_logic<F: Field>() -> CrossTableLookup<F> {
)
}
fn ctl_memory<F: Field>(channel: usize) -> CrossTableLookup<F> {
fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
CrossTableLookup::new(
vec![TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_memory(channel),
Some(cpu_stark::ctl_filter_memory(channel)),
)],
(0..NUM_CHANNELS)
.map(|channel| {
TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_memory(channel),
Some(cpu_stark::ctl_filter_memory(channel)),
)
})
.collect(),
TableWithColumns::new(
Table::Memory,
memory_stark::ctl_data(),
Some(memory_stark::ctl_filter(channel)),
Some(memory_stark::ctl_filter()),
),
None,
)
@ -298,11 +300,11 @@ mod tests {
let clock = mem_timestamp / NUM_CHANNELS;
let channel = mem_timestamp % NUM_CHANNELS;
let is_padding_row = (0..NUM_CHANNELS)
.map(|c| memory_trace[memory::columns::is_channel(c)].values[i])
.all(|x| x == F::ZERO);
let filter = memory_trace[memory::columns::FILTER].values[i];
assert!(filter.is_one() || filter.is_zero());
let is_actual_op = filter.is_one();
if !is_padding_row {
if is_actual_op {
let row: &mut cpu::columns::CpuColumnsView<F> = cpu_trace_rows[clock].borrow_mut();
row.mem_channel_used[channel] = F::ONE;

View File

@ -59,11 +59,12 @@ impl<F: Field> GenerationState<F> {
segment: Segment,
virt: usize,
) -> U256 {
self.current_cpu_row.mem_channel_used[channel_index] = F::ONE;
let timestamp = self.cpu_rows.len();
let context = self.current_context;
let value = self.memory.contexts[context].segments[segment as usize].get(virt);
self.memory.log.push(MemoryOp {
channel_index: Some(channel_index),
filter: true,
timestamp,
is_read: true,
context,
@ -82,10 +83,11 @@ impl<F: Field> GenerationState<F> {
virt: usize,
value: U256,
) {
self.current_cpu_row.mem_channel_used[channel_index] = F::ONE;
let timestamp = self.cpu_rows.len();
let context = self.current_context;
self.memory.log.push(MemoryOp {
channel_index: Some(channel_index),
filter: true,
timestamp,
is_read: false,
context,

View File

@ -3,7 +3,9 @@
use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
// Columns for memory operations, ordered by (addr, timestamp).
pub(crate) const TIMESTAMP: usize = 0;
/// 1 if this is an actual memory operation, or 0 if it's a padding row.
pub(crate) const FILTER: usize = 0;
pub(crate) const TIMESTAMP: usize = FILTER + 1;
pub(crate) const IS_READ: usize = TIMESTAMP + 1;
pub(crate) const ADDR_CONTEXT: usize = IS_READ + 1;
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
@ -25,15 +27,8 @@ pub(crate) const CONTEXT_FIRST_CHANGE: usize = VALUE_START + VALUE_LIMBS;
pub(crate) const SEGMENT_FIRST_CHANGE: usize = CONTEXT_FIRST_CHANGE + 1;
pub(crate) const VIRTUAL_FIRST_CHANGE: usize = SEGMENT_FIRST_CHANGE + 1;
// Flags to indicate if this operation came from the `i`th channel of the memory bus.
const IS_CHANNEL_START: usize = VIRTUAL_FIRST_CHANGE + 1;
pub(crate) const fn is_channel(channel: usize) -> usize {
debug_assert!(channel < NUM_CHANNELS);
IS_CHANNEL_START + channel
}
// We use a range check to enforce the ordering.
pub(crate) const RANGE_CHECK: usize = IS_CHANNEL_START + NUM_CHANNELS;
pub(crate) const RANGE_CHECK: usize = VIRTUAL_FIRST_CHANGE + NUM_CHANNELS;
// The counter column (used for the range check) starts from 0 and increments.
pub(crate) const COUNTER: usize = RANGE_CHECK + 1;
// Helper columns for the permutation argument used to enforce the range check.

View File

@ -16,12 +16,12 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cross_table_lookup::Column;
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
use crate::memory::columns::{
is_channel, value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE,
COUNTER, COUNTER_PERMUTED, IS_READ, NUM_COLUMNS, RANGE_CHECK, RANGE_CHECK_PERMUTED,
value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE, COUNTER,
COUNTER_PERMUTED, FILTER, IS_READ, NUM_COLUMNS, RANGE_CHECK, RANGE_CHECK_PERMUTED,
SEGMENT_FIRST_CHANGE, TIMESTAMP, VIRTUAL_FIRST_CHANGE,
};
use crate::memory::segments::Segment;
use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
use crate::memory::VALUE_LIMBS;
use crate::permutation::PermutationPair;
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
@ -36,8 +36,8 @@ pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
res
}
pub fn ctl_filter<F: Field>(channel: usize) -> Column<F> {
Column::single(is_channel(channel))
pub fn ctl_filter<F: Field>() -> Column<F> {
Column::single(FILTER)
}
#[derive(Copy, Clone, Default)]
@ -47,8 +47,8 @@ pub struct MemoryStark<F, const D: usize> {
#[derive(Clone, Debug)]
pub(crate) struct MemoryOp {
/// The channel this operation came from, or `None` if it's a dummy operation for padding.
pub channel_index: Option<usize>,
/// true if this is an actual memory operation, or false if it's a padding row.
pub filter: bool,
pub timestamp: usize,
pub is_read: bool,
pub context: usize,
@ -64,9 +64,7 @@ impl MemoryOp {
/// trace has been transposed into column-major form.
fn to_row<F: Field>(&self) -> [F; NUM_COLUMNS] {
let mut row = [F::ZERO; NUM_COLUMNS];
if let Some(channel) = self.channel_index {
row[is_channel(channel)] = F::ONE;
}
row[FILTER] = F::from_bool(self.filter);
row[TIMESTAMP] = F::from_canonical_usize(self.timestamp);
row[IS_READ] = F::from_bool(self.is_read);
row[ADDR_CONTEXT] = F::from_canonical_usize(self.context);
@ -178,12 +176,12 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
// We essentially repeat the last operation until our operation list has the desired size,
// with a few changes:
// - We change its channel to `None` to indicate that this is a dummy operation.
// - We change its filter to 0 to indicate that this is a dummy operation.
// - We increment its timestamp in order to pass the ordering check.
// - We make sure it's a read, sine dummy operations must be reads.
for i in 0..to_pad {
memory_ops.push(MemoryOp {
channel_index: None,
filter: false,
timestamp: last_op.timestamp + i + 1,
is_read: true,
..last_op
@ -245,21 +243,13 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F
let next_addr_virtual = vars.next_values[ADDR_VIRTUAL];
let next_values: Vec<_> = (0..8).map(|i| vars.next_values[value_limb(i)]).collect();
// Each `is_channel` value must be 0 or 1.
for c in 0..NUM_CHANNELS {
let is_channel = vars.local_values[is_channel(c)];
yield_constr.constraint(is_channel * (is_channel - P::ONES));
}
// The filter must be 0 or 1.
let filter = vars.local_values[FILTER];
yield_constr.constraint(filter * (filter - P::ONES));
// The sum of `is_channel` flags, `has_channel`, must also be 0 or 1.
let has_channel: P = (0..NUM_CHANNELS)
.map(|c| vars.local_values[is_channel(c)])
.sum();
yield_constr.constraint(has_channel * (has_channel - P::ONES));
// If this is a dummy row (with no channel), it must be a read. This means the prover can
// If this is a dummy row (filter is off), it must be a read. This means the prover can
// insert reads which never appear in the CPU trace (which are harmless), but not writes.
let is_dummy = P::ONES - has_channel;
let is_dummy = P::ONES - filter;
let is_write = P::ONES - vars.local_values[IS_READ];
yield_constr.constraint(is_dummy * is_write);
@ -330,22 +320,14 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F
let next_is_read = vars.next_values[IS_READ];
let next_timestamp = vars.next_values[TIMESTAMP];
// Each `is_channel` value must be 0 or 1.
for c in 0..NUM_CHANNELS {
let is_channel = vars.local_values[is_channel(c)];
let constraint = builder.mul_sub_extension(is_channel, is_channel, is_channel);
yield_constr.constraint(builder, constraint);
}
// The filter must be 0 or 1.
let filter = vars.local_values[FILTER];
let constraint = builder.mul_sub_extension(filter, filter, filter);
yield_constr.constraint(builder, constraint);
// The sum of `is_channel` flags, `has_channel`, must also be 0 or 1.
let has_channel =
builder.add_many_extension((0..NUM_CHANNELS).map(|c| vars.local_values[is_channel(c)]));
let has_channel_bool = builder.mul_sub_extension(has_channel, has_channel, has_channel);
yield_constr.constraint(builder, has_channel_bool);
// If this is a dummy row (with no channel), it must be a read. This means the prover can
// If this is a dummy row (filter is off), it must be a read. This means the prover can
// insert reads which never appear in the CPU trace (which are harmless), but not writes.
let is_dummy = builder.sub_extension(one, has_channel);
let is_dummy = builder.sub_extension(one, filter);
let is_write = builder.sub_extension(one, vars.local_values[IS_READ]);
let is_dummy_write = builder.mul_extension(is_dummy, is_write);
yield_constr.constraint(builder, is_dummy_write);
@ -532,7 +514,7 @@ pub(crate) mod tests {
let timestamp = clock * NUM_CHANNELS + channel_index;
memory_ops.push(MemoryOp {
channel_index: Some(channel_index),
filter: true,
timestamp,
is_read,
context,

View File

@ -2,5 +2,6 @@ pub mod columns;
pub mod memory_stark;
pub mod segments;
// TODO: Move to CPU module, now that channels have been removed from the memory table.
pub(crate) const NUM_CHANNELS: usize = 4;
pub(crate) const VALUE_LIMBS: usize = 8;