Merge pull request #571 from mir-protocol/evm_memory

EVM memory stark
This commit is contained in:
Nicholas Ward 2022-06-23 14:50:30 -07:00 committed by GitHub
commit af0e32506a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 997 additions and 17 deletions

View File

@ -6,6 +6,7 @@ use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::CrossTableLookup;
use crate::keccak::keccak_stark::KeccakStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::stark::Stark;
#[derive(Clone)]
@ -13,6 +14,7 @@ pub struct AllStark<F: RichField + Extendable<D>, const D: usize> {
pub cpu_stark: CpuStark<F, D>,
pub keccak_stark: KeccakStark<F, D>,
pub logic_stark: LogicStark<F, D>,
pub memory_stark: MemoryStark<F, D>,
pub cross_table_lookups: Vec<CrossTableLookup<F>>,
}
@ -22,6 +24,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
self.cpu_stark.num_permutation_batches(config),
self.keccak_stark.num_permutation_batches(config),
self.logic_stark.num_permutation_batches(config),
self.memory_stark.num_permutation_batches(config),
];
debug_assert_eq!(ans.len(), Table::num_tables());
ans
@ -32,6 +35,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
self.cpu_stark.permutation_batch_size(),
self.keccak_stark.permutation_batch_size(),
self.logic_stark.permutation_batch_size(),
self.memory_stark.permutation_batch_size(),
];
debug_assert_eq!(ans.len(), Table::num_tables());
ans
@ -43,11 +47,12 @@ pub enum Table {
Cpu = 0,
Keccak = 1,
Logic = 2,
Memory = 3,
}
impl Table {
pub(crate) fn num_tables() -> usize {
Table::Logic as usize + 1
Table::Memory as usize + 1
}
}
@ -66,13 +71,16 @@ mod tests {
use crate::all_stark::{AllStark, Table};
use crate::config::StarkConfig;
use crate::cpu::columns::{KECCAK_INPUT_LIMBS, KECCAK_OUTPUT_LIMBS};
use crate::cpu::columns::{KECCAK_INPUT_LIMBS, KECCAK_OUTPUT_LIMBS, NUM_MEMORY_OPS};
use crate::cpu::cpu_stark::{self as cpu_stark_mod, CpuStark};
use crate::cross_table_lookup::{CrossTableLookup, TableWithColumns};
use crate::keccak::keccak_stark::{
self as keccak_stark_mod, KeccakStark, NUM_INPUTS, NUM_ROUNDS,
};
use crate::logic::{self, LogicStark};
use crate::memory::memory_stark::{
self as memory_stark_mod, generate_random_memory_ops, MemoryStark,
};
use crate::proof::AllProof;
use crate::prover::prove;
use crate::recursive_verifier::{
@ -81,7 +89,7 @@ mod tests {
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::verifier::verify_proof;
use crate::{cpu, keccak};
use crate::{cpu, keccak, memory};
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
@ -131,12 +139,23 @@ mod tests {
trace_rows_to_poly_values(trace_rows)
}
fn make_memory_trace<R: Rng>(
num_memory_ops: usize,
memory_stark: &MemoryStark<F, D>,
rng: &mut R,
) -> Vec<PolynomialValues<F>> {
let memory_ops = generate_random_memory_ops(num_memory_ops, rng);
memory_stark.generate_trace(memory_ops)
}
fn make_cpu_trace(
num_keccak_perms: usize,
num_logic_rows: usize,
num_memory_ops: usize,
cpu_stark: &CpuStark<F, D>,
keccak_trace: &[PolynomialValues<F>],
logic_trace: &[PolynomialValues<F>],
memory_trace: &mut [PolynomialValues<F>],
) -> Vec<PolynomialValues<F>> {
let keccak_input_limbs: Vec<[F; 2 * NUM_INPUTS]> = (0..num_keccak_perms)
.map(|i| {
@ -199,6 +218,35 @@ mod tests {
cpu_stark.generate(&mut row);
cpu_trace_rows.push(row);
}
let mut current_cpu_index = 0;
let mut last_timestamp = memory_trace[memory::registers::TIMESTAMP].values[0];
for i in 0..num_memory_ops {
let mem_timestamp = memory_trace[memory::registers::TIMESTAMP].values[i];
let clock = mem_timestamp;
let op = (0..4)
.filter(|&o| memory_trace[memory::registers::is_memop(o)].values[i] == F::ONE)
.collect_vec()[0];
if mem_timestamp != last_timestamp {
current_cpu_index += 1;
last_timestamp = mem_timestamp;
}
cpu_trace_rows[current_cpu_index][cpu::columns::uses_memop(op)] = F::ONE;
cpu_trace_rows[current_cpu_index][cpu::columns::CLOCK] = clock;
cpu_trace_rows[current_cpu_index][cpu::columns::memop_is_read(op)] =
memory_trace[memory::registers::IS_READ].values[i];
cpu_trace_rows[current_cpu_index][cpu::columns::memop_addr_context(op)] =
memory_trace[memory::registers::ADDR_CONTEXT].values[i];
cpu_trace_rows[current_cpu_index][cpu::columns::memop_addr_segment(op)] =
memory_trace[memory::registers::ADDR_SEGMENT].values[i];
cpu_trace_rows[current_cpu_index][cpu::columns::memop_addr_virtual(op)] =
memory_trace[memory::registers::ADDR_VIRTUAL].values[i];
for j in 0..8 {
cpu_trace_rows[current_cpu_index][cpu::columns::memop_value(op, j)] =
memory_trace[memory::registers::value_limb(j)].values[i];
}
}
trace_rows_to_poly_values(cpu_trace_rows)
}
@ -216,20 +264,28 @@ mod tests {
};
let num_logic_rows = 62;
let memory_stark = MemoryStark::<F, D> {
f: Default::default(),
};
let num_memory_ops = 1 << 5;
let mut rng = thread_rng();
let num_keccak_perms = 2;
let keccak_trace = make_keccak_trace(num_keccak_perms, &keccak_stark, &mut rng);
let logic_trace = make_logic_trace(num_logic_rows, &logic_stark, &mut rng);
let mut memory_trace = make_memory_trace(num_memory_ops, &memory_stark, &mut rng);
let cpu_trace = make_cpu_trace(
num_keccak_perms,
num_logic_rows,
num_memory_ops,
&cpu_stark,
&keccak_trace,
&logic_trace,
&mut memory_trace,
);
let cross_table_lookups = vec![
let mut cross_table_lookups = vec![
CrossTableLookup::new(
vec![TableWithColumns::new(
Table::Cpu,
@ -253,19 +309,35 @@ mod tests {
None,
),
];
cross_table_lookups.extend((0..NUM_MEMORY_OPS).map(|op| {
CrossTableLookup::new(
vec![TableWithColumns::new(
Table::Cpu,
cpu_stark_mod::ctl_data_memory(op),
Some(cpu_stark_mod::ctl_filter_memory(op)),
)],
TableWithColumns::new(
Table::Memory,
memory_stark_mod::ctl_data(),
Some(memory_stark_mod::ctl_filter(op)),
),
None,
)
}));
let all_stark = AllStark {
cpu_stark,
keccak_stark,
logic_stark,
memory_stark,
cross_table_lookups,
};
let proof = prove::<F, C, D>(
&all_stark,
config,
vec![cpu_trace, keccak_trace, logic_trace],
vec![vec![]; 3],
vec![cpu_trace, keccak_trace, logic_trace, memory_trace],
vec![vec![]; 4],
&mut TimingTree::default(),
)?;

View File

@ -1,3 +1,6 @@
// TODO: remove when possible.
#![allow(dead_code)]
use std::ops::Range;
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
@ -13,9 +16,9 @@ pub const IS_CPU_CYCLE: usize = IS_BOOTSTRAP_CONTRACT + 1;
/// If CPU cycle: The opcode being decoded, in {0, ..., 255}.
pub const OPCODE: usize = IS_CPU_CYCLE + 1;
/// If CPU cycle: flags for EVM instructions. PUSHn, DUPn, and SWAPn only get one flag each. Invalid
/// opcodes are split between a number of flags for practical reasons. Exactly one of these flags
/// must be 1.
// If CPU cycle: flags for EVM instructions. PUSHn, DUPn, and SWAPn only get one flag each. Invalid
// opcodes are split between a number of flags for practical reasons. Exactly one of these flags
// must be 1.
pub const IS_STOP: usize = OPCODE + 1;
pub const IS_ADD: usize = IS_STOP + 1;
pub const IS_MUL: usize = IS_ADD + 1;
@ -144,7 +147,6 @@ pub const OPCODE_BITS: [usize; 8] = [
pub const IS_KECCAK: usize = OPCODE_BITS[OPCODE_BITS.len() - 1] + 1;
pub const START_KECCAK_INPUT: usize = IS_KECCAK + 1;
#[allow(dead_code)] // TODO: Remove when used
pub const KECCAK_INPUT_LIMBS: Range<usize> = START_KECCAK_INPUT..START_KECCAK_INPUT + 50;
pub const START_KECCAK_OUTPUT: usize = KECCAK_INPUT_LIMBS.end;
@ -159,4 +161,47 @@ pub const LOGIC_OUTPUT: Range<usize> = LOGIC_INPUT1.end..LOGIC_INPUT1.end + 16;
pub const SIMPLE_LOGIC_DIFF: usize = LOGIC_OUTPUT.end;
pub const SIMPLE_LOGIC_DIFF_INV: usize = SIMPLE_LOGIC_DIFF + 1;
pub const NUM_CPU_COLUMNS: usize = SIMPLE_LOGIC_DIFF_INV + 1;
pub(crate) const NUM_MEMORY_OPS: usize = 4;
pub(crate) const NUM_MEMORY_VALUE_LIMBS: usize = 8;
pub(crate) const CLOCK: usize = SIMPLE_LOGIC_DIFF_INV + 1;
// Uses_memop(i) is `F::ONE` iff this row includes a memory operation in its `i`th spot.
const USES_MEMOP_START: usize = CLOCK + 1;
pub const fn uses_memop(op: usize) -> usize {
debug_assert!(op < NUM_MEMORY_OPS);
USES_MEMOP_START + op
}
const MEMOP_ISREAD_START: usize = USES_MEMOP_START + NUM_MEMORY_OPS;
pub const fn memop_is_read(op: usize) -> usize {
debug_assert!(op < NUM_MEMORY_OPS);
MEMOP_ISREAD_START + op
}
const MEMOP_ADDR_CONTEXT_START: usize = MEMOP_ISREAD_START + NUM_MEMORY_OPS;
pub const fn memop_addr_context(op: usize) -> usize {
debug_assert!(op < NUM_MEMORY_OPS);
MEMOP_ADDR_CONTEXT_START + op
}
const MEMOP_ADDR_SEGMENT_START: usize = MEMOP_ADDR_CONTEXT_START + NUM_MEMORY_OPS;
pub const fn memop_addr_segment(op: usize) -> usize {
debug_assert!(op < NUM_MEMORY_OPS);
MEMOP_ADDR_SEGMENT_START + op
}
const MEMOP_ADDR_VIRTUAL_START: usize = MEMOP_ADDR_SEGMENT_START + NUM_MEMORY_OPS;
pub const fn memop_addr_virtual(op: usize) -> usize {
debug_assert!(op < NUM_MEMORY_OPS);
MEMOP_ADDR_VIRTUAL_START + op
}
const MEMOP_ADDR_VALUE_START: usize = MEMOP_ADDR_VIRTUAL_START + NUM_MEMORY_OPS;
pub const fn memop_value(op: usize, limb: usize) -> usize {
debug_assert!(op < NUM_MEMORY_OPS);
debug_assert!(limb < NUM_MEMORY_VALUE_LIMBS);
MEMOP_ADDR_VALUE_START + op * NUM_MEMORY_VALUE_LIMBS + limb
}
pub const NUM_CPU_COLUMNS: usize = MEMOP_ADDR_VALUE_START + NUM_MEMORY_OPS * NUM_MEMORY_VALUE_LIMBS;

View File

@ -1,5 +1,6 @@
use std::marker::PhantomData;
use itertools::Itertools;
use plonky2::field::extension_field::{Extendable, FieldExtension};
use plonky2::field::field_types::Field;
use plonky2::field::packed_field::PackedField;
@ -23,11 +24,7 @@ pub fn ctl_filter_keccak<F: Field>() -> Column<F> {
}
pub fn ctl_data_logic<F: Field>() -> Vec<Column<F>> {
let mut res = vec![
Column::single(columns::IS_AND),
Column::single(columns::IS_OR),
Column::single(columns::IS_XOR),
];
let mut res = Column::singles([columns::IS_AND, columns::IS_OR, columns::IS_XOR]).collect_vec();
res.extend(columns::LOGIC_INPUT0.map(Column::single));
res.extend(columns::LOGIC_INPUT1.map(Column::single));
res.extend(columns::LOGIC_OUTPUT.map(Column::single));
@ -38,6 +35,23 @@ pub fn ctl_filter_logic<F: Field>() -> Column<F> {
Column::sum([columns::IS_AND, columns::IS_OR, columns::IS_XOR])
}
pub fn ctl_data_memory<F: Field>(op: usize) -> Vec<Column<F>> {
let mut cols: Vec<Column<F>> = Column::singles([
columns::CLOCK,
columns::memop_is_read(op),
columns::memop_addr_context(op),
columns::memop_addr_segment(op),
columns::memop_addr_virtual(op),
])
.collect_vec();
cols.extend(Column::singles((0..8).map(|j| columns::memop_value(op, j))));
cols
}
pub fn ctl_filter_memory<F: Field>(op: usize) -> Column<F> {
Column::single(columns::uses_memop(op))
}
#[derive(Copy, Clone)]
pub struct CpuStark<F, const D: usize> {
pub f: PhantomData<F>,

View File

@ -12,6 +12,8 @@ pub mod cross_table_lookup;
mod get_challenges;
pub mod keccak;
pub mod logic;
pub mod lookup;
pub mod memory;
pub mod permutation;
pub mod proof;
pub mod prover;

137
evm/src/lookup.rs Normal file
View File

@ -0,0 +1,137 @@
use std::cmp::Ordering;
use itertools::Itertools;
use plonky2::field::extension_field::Extendable;
use plonky2::field::field_types::{Field, PrimeField64};
use plonky2::field::packed_field::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
pub(crate) fn eval_lookups<
F: Field,
P: PackedField<Scalar = F>,
const COLS: usize,
const PUB_INPUTS: usize,
>(
vars: StarkEvaluationVars<F, P, COLS, PUB_INPUTS>,
yield_constr: &mut ConstraintConsumer<P>,
col_permuted_input: usize,
col_permuted_table: usize,
) {
let local_perm_input = vars.local_values[col_permuted_input];
let next_perm_table = vars.next_values[col_permuted_table];
let next_perm_input = vars.next_values[col_permuted_input];
// A "vertical" diff between the local and next permuted inputs.
let diff_input_prev = next_perm_input - local_perm_input;
// A "horizontal" diff between the next permuted input and permuted table value.
let diff_input_table = next_perm_input - next_perm_table;
yield_constr.constraint(diff_input_prev * diff_input_table);
// This is actually constraining the first row, as per the spec, since `diff_input_table`
// is a diff of the next row's values. In the context of `constraint_last_row`, the next
// row is the first row.
yield_constr.constraint_last_row(diff_input_table);
}
pub(crate) fn eval_lookups_circuit<
F: RichField + Extendable<D>,
const D: usize,
const COLS: usize,
const PUB_INPUTS: usize,
>(
builder: &mut CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, COLS, PUB_INPUTS>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
col_permuted_input: usize,
col_permuted_table: usize,
) {
let local_perm_input = vars.local_values[col_permuted_input];
let next_perm_table = vars.next_values[col_permuted_table];
let next_perm_input = vars.next_values[col_permuted_input];
// A "vertical" diff between the local and next permuted inputs.
let diff_input_prev = builder.sub_extension(next_perm_input, local_perm_input);
// A "horizontal" diff between the next permuted input and permuted table value.
let diff_input_table = builder.sub_extension(next_perm_input, next_perm_table);
let diff_product = builder.mul_extension(diff_input_prev, diff_input_table);
yield_constr.constraint(builder, diff_product);
// This is actually constraining the first row, as per the spec, since `diff_input_table`
// is a diff of the next row's values. In the context of `constraint_last_row`, the next
// row is the first row.
yield_constr.constraint_last_row(builder, diff_input_table);
}
/// Given an input column and a table column, generate the permuted input and permuted table columns
/// used in the Halo2 permutation argument.
pub fn permuted_cols<F: PrimeField64>(inputs: &[F], table: &[F]) -> (Vec<F>, Vec<F>) {
let n = inputs.len();
// The permuted inputs do not have to be ordered, but we found that sorting was faster than
// hash-based grouping. We also sort the table, as this helps us identify "unused" table
// elements efficiently.
// To compare elements, e.g. for sorting, we first need them in canonical form. It would be
// wasteful to canonicalize in each comparison, as a single element may be involved in many
// comparisons. So we will canonicalize once upfront, then use `to_noncanonical_u64` when
// comparing elements.
let sorted_inputs = inputs
.iter()
.map(|x| x.to_canonical())
.sorted_unstable_by_key(|x| x.to_noncanonical_u64())
.collect_vec();
let sorted_table = table
.iter()
.map(|x| x.to_canonical())
.sorted_unstable_by_key(|x| x.to_noncanonical_u64())
.collect_vec();
let mut unused_table_inds = Vec::with_capacity(n);
let mut unused_table_vals = Vec::with_capacity(n);
let mut permuted_table = vec![F::ZERO; n];
let mut i = 0;
let mut j = 0;
while (j < n) && (i < n) {
let input_val = sorted_inputs[i].to_noncanonical_u64();
let table_val = sorted_table[j].to_noncanonical_u64();
match input_val.cmp(&table_val) {
Ordering::Greater => {
unused_table_vals.push(sorted_table[j]);
j += 1;
}
Ordering::Less => {
if let Some(x) = unused_table_vals.pop() {
permuted_table[i] = x;
} else {
unused_table_inds.push(i);
}
i += 1;
}
Ordering::Equal => {
permuted_table[i] = sorted_table[j];
i += 1;
j += 1;
}
}
}
#[allow(clippy::needless_range_loop)] // indexing is just more natural here
for jj in j..n {
unused_table_vals.push(sorted_table[jj]);
}
for ii in i..n {
unused_table_inds.push(ii);
}
for (ind, val) in unused_table_inds.into_iter().zip_eq(unused_table_vals) {
permuted_table[ind] = val;
}
(sorted_inputs, permuted_table)
}

View File

@ -0,0 +1,598 @@
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use itertools::{izip, multiunzip, Itertools};
use plonky2::field::extension_field::{Extendable, FieldExtension};
use plonky2::field::field_types::Field;
use plonky2::field::packed_field::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::hash::hash_types::RichField;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
use rand::Rng;
use super::registers::is_memop;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::Column;
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
use crate::memory::registers::{
sorted_value_limb, value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE,
COUNTER, COUNTER_PERMUTED, IS_READ, NUM_REGISTERS, RANGE_CHECK, RANGE_CHECK_PERMUTED,
SEGMENT_FIRST_CHANGE, SORTED_ADDR_CONTEXT, SORTED_ADDR_SEGMENT, SORTED_ADDR_VIRTUAL,
SORTED_IS_READ, SORTED_TIMESTAMP, TIMESTAMP, VIRTUAL_FIRST_CHANGE,
};
use crate::permutation::PermutationPair;
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
pub(crate) const NUM_PUBLIC_INPUTS: usize = 0;
pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
let mut res = Column::singles([TIMESTAMP, IS_READ, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL])
.collect_vec();
res.extend(Column::singles((0..8).map(value_limb)));
res
}
pub fn ctl_filter<F: Field>(op: usize) -> Column<F> {
Column::single(is_memop(op))
}
#[derive(Copy, Clone)]
pub struct MemoryStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
pub struct MemoryOp<F> {
channel_index: usize,
timestamp: F,
is_read: F,
context: F,
segment: F,
virt: F,
value: [F; 8],
}
pub fn generate_random_memory_ops<F: RichField, R: Rng>(
num_ops: usize,
rng: &mut R,
) -> Vec<MemoryOp<F>> {
let mut memory_ops = Vec::new();
let mut current_memory_values: HashMap<(F, F, F), [F; 8]> = HashMap::new();
let num_cycles = num_ops / 2;
for i in 0..num_cycles {
let timestamp = F::from_canonical_usize(i);
let mut used_indices = HashSet::new();
let mut new_writes_this_cycle = HashMap::new();
let mut has_read = false;
for _ in 0..2 {
let mut channel_index = rng.gen_range(0..4);
while used_indices.contains(&channel_index) {
channel_index = rng.gen_range(0..4);
}
used_indices.insert(channel_index);
let is_read = if i == 0 {
false
} else {
!has_read && rng.gen()
};
has_read = has_read || is_read;
let is_read_field = F::from_bool(is_read);
let (context, segment, virt, vals) = if is_read {
let written: Vec<_> = current_memory_values.keys().collect();
let &(context, segment, virt) = written[rng.gen_range(0..written.len())];
let &vals = current_memory_values
.get(&(context, segment, virt))
.unwrap();
(context, segment, virt, vals)
} else {
// TODO: with taller memory table or more padding (to enable range-checking bigger diffs),
// test larger address values.
let mut context = F::from_canonical_usize(rng.gen_range(0..40));
let mut segment = F::from_canonical_usize(rng.gen_range(0..8));
let mut virt = F::from_canonical_usize(rng.gen_range(0..20));
while new_writes_this_cycle.contains_key(&(context, segment, virt)) {
context = F::from_canonical_usize(rng.gen_range(0..40));
segment = F::from_canonical_usize(rng.gen_range(0..8));
virt = F::from_canonical_usize(rng.gen_range(0..20));
}
let val: [u32; 8] = rng.gen();
let vals: [F; 8] = val.map(F::from_canonical_u32);
new_writes_this_cycle.insert((context, segment, virt), vals);
(context, segment, virt, vals)
};
memory_ops.push(MemoryOp {
channel_index,
timestamp,
is_read: is_read_field,
context,
segment,
virt,
value: vals,
});
}
for (k, v) in new_writes_this_cycle {
current_memory_values.insert(k, v);
}
}
memory_ops
}
pub fn sort_memory_ops<F: RichField>(
timestamp: &[F],
is_read: &[F],
context: &[F],
segment: &[F],
virtuals: &[F],
values: &[[F; 8]],
) -> (Vec<F>, Vec<F>, Vec<F>, Vec<F>, Vec<F>, Vec<[F; 8]>) {
let mut ops: Vec<(F, F, F, F, F, [F; 8])> = izip!(
timestamp.iter().cloned(),
is_read.iter().cloned(),
context.iter().cloned(),
segment.iter().cloned(),
virtuals.iter().cloned(),
values.iter().cloned(),
)
.collect();
ops.sort_unstable_by_key(|&(t, _, c, s, v, _)| {
(
c.to_noncanonical_u64(),
s.to_noncanonical_u64(),
v.to_noncanonical_u64(),
t.to_noncanonical_u64(),
)
});
multiunzip(ops)
}
pub fn generate_first_change_flags<F: RichField>(
context: &[F],
segment: &[F],
virtuals: &[F],
) -> (Vec<F>, Vec<F>, Vec<F>) {
let num_ops = context.len();
let mut context_first_change = Vec::with_capacity(num_ops);
let mut segment_first_change = Vec::with_capacity(num_ops);
let mut virtual_first_change = Vec::with_capacity(num_ops);
for idx in 0..num_ops - 1 {
let this_context_first_change = context[idx] != context[idx + 1];
let this_segment_first_change =
segment[idx] != segment[idx + 1] && !this_context_first_change;
let this_virtual_first_change = virtuals[idx] != virtuals[idx + 1]
&& !this_segment_first_change
&& !this_context_first_change;
context_first_change.push(F::from_bool(this_context_first_change));
segment_first_change.push(F::from_bool(this_segment_first_change));
virtual_first_change.push(F::from_bool(this_virtual_first_change));
}
context_first_change.push(F::ZERO);
segment_first_change.push(F::ZERO);
virtual_first_change.push(F::ZERO);
(
context_first_change,
segment_first_change,
virtual_first_change,
)
}
pub fn generate_range_check_value<F: RichField>(
context: &[F],
segment: &[F],
virtuals: &[F],
timestamp: &[F],
context_first_change: &[F],
segment_first_change: &[F],
virtual_first_change: &[F],
) -> Vec<F> {
let num_ops = context.len();
let mut range_check = Vec::new();
for idx in 0..num_ops - 1 {
let this_address_unchanged = F::ONE
- context_first_change[idx]
- segment_first_change[idx]
- virtual_first_change[idx];
range_check.push(
context_first_change[idx] * (context[idx + 1] - context[idx] - F::ONE)
+ segment_first_change[idx] * (segment[idx + 1] - segment[idx] - F::ONE)
+ virtual_first_change[idx] * (virtuals[idx + 1] - virtuals[idx] - F::ONE)
+ this_address_unchanged * (timestamp[idx + 1] - timestamp[idx] - F::ONE),
);
}
range_check.push(F::ZERO);
range_check
}
impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
pub(crate) fn generate_trace_rows(
&self,
memory_ops: Vec<MemoryOp<F>>,
) -> Vec<[F; NUM_REGISTERS]> {
let num_ops = memory_ops.len();
let mut trace_cols = [(); NUM_REGISTERS].map(|_| vec![F::ZERO; num_ops]);
for i in 0..num_ops {
let MemoryOp {
channel_index,
timestamp,
is_read,
context,
segment,
virt,
value,
} = memory_ops[i];
trace_cols[is_memop(channel_index)][i] = F::ONE;
trace_cols[TIMESTAMP][i] = timestamp;
trace_cols[IS_READ][i] = is_read;
trace_cols[ADDR_CONTEXT][i] = context;
trace_cols[ADDR_SEGMENT][i] = segment;
trace_cols[ADDR_VIRTUAL][i] = virt;
for j in 0..8 {
trace_cols[value_limb(j)][i] = value[j];
}
}
self.generate_memory(&mut trace_cols);
let mut trace_rows = vec![[F::ZERO; NUM_REGISTERS]; num_ops];
for (i, col) in trace_cols.iter().enumerate() {
for (j, &val) in col.iter().enumerate() {
trace_rows[j][i] = val;
}
}
trace_rows
}
fn generate_memory(&self, trace_cols: &mut [Vec<F>]) {
let num_trace_rows = trace_cols[0].len();
let timestamp = &trace_cols[TIMESTAMP];
let is_read = &trace_cols[IS_READ];
let context = &trace_cols[ADDR_CONTEXT];
let segment = &trace_cols[ADDR_SEGMENT];
let virtuals = &trace_cols[ADDR_VIRTUAL];
let values: Vec<[F; 8]> = (0..num_trace_rows)
.map(|i| {
let arr: [F; 8] = (0..8)
.map(|j| &trace_cols[value_limb(j)][i])
.cloned()
.collect_vec()
.try_into()
.unwrap();
arr
})
.collect();
let (
sorted_timestamp,
sorted_is_read,
sorted_context,
sorted_segment,
sorted_virtual,
sorted_values,
) = sort_memory_ops(timestamp, is_read, context, segment, virtuals, &values);
let (context_first_change, segment_first_change, virtual_first_change) =
generate_first_change_flags(&sorted_context, &sorted_segment, &sorted_virtual);
let range_check_value = generate_range_check_value(
&sorted_context,
&sorted_segment,
&sorted_virtual,
&sorted_timestamp,
&context_first_change,
&segment_first_change,
&virtual_first_change,
);
trace_cols[SORTED_TIMESTAMP] = sorted_timestamp;
trace_cols[SORTED_IS_READ] = sorted_is_read;
trace_cols[SORTED_ADDR_CONTEXT] = sorted_context;
trace_cols[SORTED_ADDR_SEGMENT] = sorted_segment;
trace_cols[SORTED_ADDR_VIRTUAL] = sorted_virtual;
for i in 0..num_trace_rows {
for j in 0..8 {
trace_cols[sorted_value_limb(j)][i] = sorted_values[i][j];
}
}
trace_cols[CONTEXT_FIRST_CHANGE] = context_first_change;
trace_cols[SEGMENT_FIRST_CHANGE] = segment_first_change;
trace_cols[VIRTUAL_FIRST_CHANGE] = virtual_first_change;
trace_cols[RANGE_CHECK] = range_check_value;
trace_cols[COUNTER] = (0..num_trace_rows)
.map(|i| F::from_canonical_usize(i))
.collect();
let (permuted_inputs, permuted_table) =
permuted_cols(&trace_cols[RANGE_CHECK], &trace_cols[COUNTER]);
trace_cols[RANGE_CHECK_PERMUTED] = permuted_inputs;
trace_cols[COUNTER_PERMUTED] = permuted_table;
}
pub fn generate_trace(&self, memory_ops: Vec<MemoryOp<F>>) -> Vec<PolynomialValues<F>> {
let mut timing = TimingTree::new("generate trace", log::Level::Debug);
// Generate the witness.
let trace_rows = timed!(
&mut timing,
"generate trace rows",
self.generate_trace_rows(memory_ops)
);
let trace_polys = timed!(
&mut timing,
"convert to PolynomialValues",
trace_rows_to_poly_values(trace_rows)
);
timing.print();
trace_polys
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F, D> {
const COLUMNS: usize = NUM_REGISTERS;
const PUBLIC_INPUTS: usize = NUM_PUBLIC_INPUTS;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: StarkEvaluationVars<FE, P, { Self::COLUMNS }, { Self::PUBLIC_INPUTS }>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let one = P::from(FE::ONE);
let timestamp = vars.local_values[SORTED_TIMESTAMP];
let addr_context = vars.local_values[SORTED_ADDR_CONTEXT];
let addr_segment = vars.local_values[SORTED_ADDR_SEGMENT];
let addr_virtual = vars.local_values[SORTED_ADDR_VIRTUAL];
let values: Vec<_> = (0..8)
.map(|i| vars.local_values[sorted_value_limb(i)])
.collect();
let next_timestamp = vars.next_values[SORTED_TIMESTAMP];
let next_is_read = vars.next_values[SORTED_IS_READ];
let next_addr_context = vars.next_values[SORTED_ADDR_CONTEXT];
let next_addr_segment = vars.next_values[SORTED_ADDR_SEGMENT];
let next_addr_virtual = vars.next_values[SORTED_ADDR_VIRTUAL];
let next_values: Vec<_> = (0..8)
.map(|i| vars.next_values[sorted_value_limb(i)])
.collect();
let context_first_change = vars.local_values[CONTEXT_FIRST_CHANGE];
let segment_first_change = vars.local_values[SEGMENT_FIRST_CHANGE];
let virtual_first_change = vars.local_values[VIRTUAL_FIRST_CHANGE];
let address_unchanged =
one - context_first_change - segment_first_change - virtual_first_change;
let range_check = vars.local_values[RANGE_CHECK];
let not_context_first_change = one - context_first_change;
let not_segment_first_change = one - segment_first_change;
let not_virtual_first_change = one - virtual_first_change;
let not_address_unchanged = one - address_unchanged;
// First set of ordering constraint: first_change flags are boolean.
yield_constr.constraint(context_first_change * not_context_first_change);
yield_constr.constraint(segment_first_change * not_segment_first_change);
yield_constr.constraint(virtual_first_change * not_virtual_first_change);
yield_constr.constraint(address_unchanged * not_address_unchanged);
// Second set of ordering constraints: no change before the column corresponding to the nonzero first_change flag.
yield_constr
.constraint_transition(segment_first_change * (next_addr_context - addr_context));
yield_constr
.constraint_transition(virtual_first_change * (next_addr_context - addr_context));
yield_constr
.constraint_transition(virtual_first_change * (next_addr_segment - addr_segment));
yield_constr.constraint_transition(address_unchanged * (next_addr_context - addr_context));
yield_constr.constraint_transition(address_unchanged * (next_addr_segment - addr_segment));
yield_constr.constraint_transition(address_unchanged * (next_addr_virtual - addr_virtual));
// Third set of ordering constraints: range-check difference in the column that should be increasing.
let computed_range_check = context_first_change * (next_addr_context - addr_context - one)
+ segment_first_change * (next_addr_segment - addr_segment - one)
+ virtual_first_change * (next_addr_virtual - addr_virtual - one)
+ address_unchanged * (next_timestamp - timestamp - one);
yield_constr.constraint_transition(range_check - computed_range_check);
// Enumerate purportedly-ordered log.
for i in 0..8 {
yield_constr
.constraint(next_is_read * address_unchanged * (next_values[i] - values[i]));
}
eval_lookups(vars, yield_constr, RANGE_CHECK_PERMUTED, COUNTER_PERMUTED)
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, { Self::COLUMNS }, { Self::PUBLIC_INPUTS }>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();
let addr_context = vars.local_values[SORTED_ADDR_CONTEXT];
let addr_segment = vars.local_values[SORTED_ADDR_SEGMENT];
let addr_virtual = vars.local_values[SORTED_ADDR_VIRTUAL];
let values: Vec<_> = (0..8)
.map(|i| vars.local_values[sorted_value_limb(i)])
.collect();
let timestamp = vars.local_values[SORTED_TIMESTAMP];
let next_addr_context = vars.next_values[SORTED_ADDR_CONTEXT];
let next_addr_segment = vars.next_values[SORTED_ADDR_SEGMENT];
let next_addr_virtual = vars.next_values[SORTED_ADDR_VIRTUAL];
let next_values: Vec<_> = (0..8)
.map(|i| vars.next_values[sorted_value_limb(i)])
.collect();
let next_is_read = vars.next_values[SORTED_IS_READ];
let next_timestamp = vars.next_values[SORTED_TIMESTAMP];
let context_first_change = vars.local_values[CONTEXT_FIRST_CHANGE];
let segment_first_change = vars.local_values[SEGMENT_FIRST_CHANGE];
let virtual_first_change = vars.local_values[VIRTUAL_FIRST_CHANGE];
let address_unchanged = {
let mut cur = builder.sub_extension(one, context_first_change);
cur = builder.sub_extension(cur, segment_first_change);
builder.sub_extension(cur, virtual_first_change)
};
let range_check = vars.local_values[RANGE_CHECK];
let not_context_first_change = builder.sub_extension(one, context_first_change);
let not_segment_first_change = builder.sub_extension(one, segment_first_change);
let not_virtual_first_change = builder.sub_extension(one, virtual_first_change);
let not_address_unchanged = builder.sub_extension(one, address_unchanged);
let addr_context_diff = builder.sub_extension(next_addr_context, addr_context);
let addr_segment_diff = builder.sub_extension(next_addr_segment, addr_segment);
let addr_virtual_diff = builder.sub_extension(next_addr_virtual, addr_virtual);
// First set of ordering constraint: traces are boolean.
let context_first_change_bool =
builder.mul_extension(context_first_change, not_context_first_change);
yield_constr.constraint(builder, context_first_change_bool);
let segment_first_change_bool =
builder.mul_extension(segment_first_change, not_segment_first_change);
yield_constr.constraint(builder, segment_first_change_bool);
let virtual_first_change_bool =
builder.mul_extension(virtual_first_change, not_virtual_first_change);
yield_constr.constraint(builder, virtual_first_change_bool);
let address_unchanged_bool =
builder.mul_extension(address_unchanged, not_address_unchanged);
yield_constr.constraint(builder, address_unchanged_bool);
// Second set of ordering constraints: no change before the column corresponding to the nonzero first_change flag.
let segment_first_change_check =
builder.mul_extension(segment_first_change, addr_context_diff);
yield_constr.constraint_transition(builder, segment_first_change_check);
let virtual_first_change_check_1 =
builder.mul_extension(virtual_first_change, addr_context_diff);
yield_constr.constraint_transition(builder, virtual_first_change_check_1);
let virtual_first_change_check_2 =
builder.mul_extension(virtual_first_change, addr_segment_diff);
yield_constr.constraint_transition(builder, virtual_first_change_check_2);
let address_unchanged_check_1 = builder.mul_extension(address_unchanged, addr_context_diff);
yield_constr.constraint_transition(builder, address_unchanged_check_1);
let address_unchanged_check_2 = builder.mul_extension(address_unchanged, addr_segment_diff);
yield_constr.constraint_transition(builder, address_unchanged_check_2);
let address_unchanged_check_3 = builder.mul_extension(address_unchanged, addr_virtual_diff);
yield_constr.constraint_transition(builder, address_unchanged_check_3);
// Third set of ordering constraints: range-check difference in the column that should be increasing.
let context_diff = {
let diff = builder.sub_extension(next_addr_context, addr_context);
builder.sub_extension(diff, one)
};
let context_range_check = builder.mul_extension(context_first_change, context_diff);
let segment_diff = {
let diff = builder.sub_extension(next_addr_segment, addr_segment);
builder.sub_extension(diff, one)
};
let segment_range_check = builder.mul_extension(segment_first_change, segment_diff);
let virtual_diff = {
let diff = builder.sub_extension(next_addr_virtual, addr_virtual);
builder.sub_extension(diff, one)
};
let virtual_range_check = builder.mul_extension(virtual_first_change, virtual_diff);
let timestamp_diff = {
let diff = builder.sub_extension(next_timestamp, timestamp);
builder.sub_extension(diff, one)
};
let timestamp_range_check = builder.mul_extension(address_unchanged, timestamp_diff);
let computed_range_check = {
let mut sum = builder.add_extension(context_range_check, segment_range_check);
sum = builder.add_extension(sum, virtual_range_check);
builder.add_extension(sum, timestamp_range_check)
};
let range_check_diff = builder.sub_extension(range_check, computed_range_check);
yield_constr.constraint_transition(builder, range_check_diff);
// Enumerate purportedly-ordered log.
for i in 0..8 {
let value_diff = builder.sub_extension(next_values[i], values[i]);
let zero_if_read = builder.mul_extension(address_unchanged, value_diff);
let read_constraint = builder.mul_extension(next_is_read, zero_if_read);
yield_constr.constraint(builder, read_constraint);
}
eval_lookups_circuit(
builder,
vars,
yield_constr,
RANGE_CHECK_PERMUTED,
COUNTER_PERMUTED,
)
}
fn constraint_degree(&self) -> usize {
3
}
fn permutation_pairs(&self) -> Vec<PermutationPair> {
vec![
PermutationPair::singletons(RANGE_CHECK, RANGE_CHECK_PERMUTED),
PermutationPair::singletons(COUNTER, COUNTER_PERMUTED),
]
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::memory::memory_stark::MemoryStark;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = MemoryStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = MemoryStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
}

2
evm/src/memory/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod memory_stark;
pub mod registers;

View File

@ -0,0 +1,57 @@
//! Memory unit.
const NUM_MEMORY_OPS: usize = 4;
const NUM_MEMORY_VALUE_LIMBS: usize = 8;
pub(crate) const TIMESTAMP: usize = 0;
pub(crate) const IS_READ: usize = TIMESTAMP + 1;
pub(crate) const ADDR_CONTEXT: usize = IS_READ + 1;
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
// Eight limbs to hold up to a 256-bit value.
const VALUE_START: usize = ADDR_VIRTUAL + 1;
pub(crate) const fn value_limb(i: usize) -> usize {
debug_assert!(i < NUM_MEMORY_VALUE_LIMBS);
VALUE_START + i
}
// Separate columns for the same memory operations, sorted by (addr, timestamp).
pub(crate) const SORTED_TIMESTAMP: usize = VALUE_START + NUM_MEMORY_VALUE_LIMBS;
pub(crate) const SORTED_IS_READ: usize = SORTED_TIMESTAMP + 1;
pub(crate) const SORTED_ADDR_CONTEXT: usize = SORTED_IS_READ + 1;
pub(crate) const SORTED_ADDR_SEGMENT: usize = SORTED_ADDR_CONTEXT + 1;
pub(crate) const SORTED_ADDR_VIRTUAL: usize = SORTED_ADDR_SEGMENT + 1;
const SORTED_VALUE_START: usize = SORTED_ADDR_VIRTUAL + 1;
pub(crate) const fn sorted_value_limb(i: usize) -> usize {
debug_assert!(i < NUM_MEMORY_VALUE_LIMBS);
SORTED_VALUE_START + i
}
// Flags to indicate whether this part of the address differs from the next row (in the sorted
// columns), and the previous parts do not differ.
// That is, e.g., `SEGMENT_FIRST_CHANGE` is `F::ONE` iff `SORTED_ADDR_CONTEXT` is the same in this
// row and the next, but `SORTED_ADDR_SEGMENT` is not.
pub(crate) const CONTEXT_FIRST_CHANGE: usize = SORTED_VALUE_START + NUM_MEMORY_VALUE_LIMBS;
pub(crate) const SEGMENT_FIRST_CHANGE: usize = CONTEXT_FIRST_CHANGE + 1;
pub(crate) const VIRTUAL_FIRST_CHANGE: usize = SEGMENT_FIRST_CHANGE + 1;
// We use a range check to ensure sorting.
pub(crate) const RANGE_CHECK: usize = VIRTUAL_FIRST_CHANGE + 1;
// The counter column (used for the range check) starts from 0 and increments.
pub(crate) const COUNTER: usize = RANGE_CHECK + 1;
// Helper columns for the permutation argument used to enforce the range check.
pub(crate) const RANGE_CHECK_PERMUTED: usize = COUNTER + 1;
pub(crate) const COUNTER_PERMUTED: usize = RANGE_CHECK_PERMUTED + 1;
// Flags to indicate if this operation corresponds to the `i`th memory op in a certain row of the
// CPU table.
const IS_MEMOP_START: usize = COUNTER_PERMUTED + 1;
#[allow(dead_code)]
pub(crate) const fn is_memop(i: usize) -> usize {
debug_assert!(i < NUM_MEMORY_OPS);
IS_MEMOP_START + i
}
pub(crate) const NUM_REGISTERS: usize = IS_MEMOP_START + NUM_MEMORY_OPS;

View File

@ -22,6 +22,7 @@ use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::{cross_table_lookup_data, CtlCheckVars, CtlData};
use crate::keccak::keccak_stark::KeccakStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::PermutationCheckVars;
use crate::permutation::{
compute_permutation_z_polys, get_n_grand_product_challenge_sets, GrandProductChallengeSet,
@ -49,6 +50,8 @@ where
[(); KeccakStark::<F, D>::PUBLIC_INPUTS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::PUBLIC_INPUTS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::PUBLIC_INPUTS]:,
{
let num_starks = Table::num_tables();
debug_assert_eq!(num_starks, trace_poly_values.len());
@ -132,8 +135,21 @@ where
&mut challenger,
timing,
)?;
let memory_proof = prove_single_table(
&all_stark.memory_stark,
config,
&trace_poly_values[Table::Memory as usize],
&trace_commitments[Table::Memory as usize],
&ctl_data_per_table[Table::Memory as usize],
public_inputs[Table::Memory as usize]
.clone()
.try_into()
.unwrap(),
&mut challenger,
timing,
)?;
let stark_proofs = vec![cpu_proof, keccak_proof, logic_proof];
let stark_proofs = vec![cpu_proof, keccak_proof, logic_proof, memory_proof];
debug_assert_eq!(stark_proofs.len(), num_starks);
Ok(AllProof { stark_proofs })

View File

@ -18,6 +18,7 @@ use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::{verify_cross_table_lookups_circuit, CtlCheckVarsTarget};
use crate::keccak::keccak_stark::KeccakStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::PermutationCheckDataTarget;
use crate::proof::{
AllProof, AllProofChallengesTarget, AllProofTarget, StarkOpeningSetTarget, StarkProof,
@ -44,6 +45,8 @@ pub fn verify_proof_circuit<
[(); KeccakStark::<F, D>::PUBLIC_INPUTS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::PUBLIC_INPUTS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::PUBLIC_INPUTS]:,
C::Hasher: AlgebraicHasher<F>,
{
let AllProofChallengesTarget {
@ -57,6 +60,7 @@ pub fn verify_proof_circuit<
cpu_stark,
keccak_stark,
logic_stark,
memory_stark,
cross_table_lookups,
} = all_stark;
@ -91,6 +95,14 @@ pub fn verify_proof_circuit<
&ctl_vars_per_table[Table::Logic as usize],
inner_config,
);
verify_stark_proof_with_challenges_circuit::<F, C, _, D>(
builder,
memory_stark,
&all_proof.stark_proofs[Table::Memory as usize],
&stark_challenges[Table::Memory as usize],
&ctl_vars_per_table[Table::Memory as usize],
inner_config,
);
verify_cross_table_lookups_circuit::<F, C, D>(
builder,
@ -291,6 +303,20 @@ pub fn add_virtual_all_proof<F: RichField + Extendable<D>, const D: usize>(
public_inputs,
}
},
{
let proof = add_virtual_stark_proof(
builder,
all_stark.memory_stark,
config,
degree_bits[Table::Memory as usize],
nums_ctl_zs[Table::Memory as usize],
);
let public_inputs = builder.add_virtual_targets(KeccakStark::<F, D>::PUBLIC_INPUTS);
StarkProofWithPublicInputsTarget {
proof,
public_inputs,
}
},
];
assert_eq!(stark_proofs.len(), Table::num_tables());

View File

@ -13,6 +13,7 @@ use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::{verify_cross_table_lookups, CtlCheckVars};
use crate::keccak::keccak_stark::KeccakStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::PermutationCheckVars;
use crate::proof::{
AllProof, AllProofChallenges, StarkOpeningSet, StarkProofChallenges, StarkProofWithPublicInputs,
@ -33,6 +34,8 @@ where
[(); KeccakStark::<F, D>::PUBLIC_INPUTS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::PUBLIC_INPUTS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::PUBLIC_INPUTS]:,
[(); C::Hasher::HASH_SIZE]:,
{
let AllProofChallenges {
@ -46,6 +49,7 @@ where
cpu_stark,
keccak_stark,
logic_stark,
memory_stark,
cross_table_lookups,
} = all_stark;
@ -70,6 +74,13 @@ where
&ctl_vars_per_table[Table::Keccak as usize],
config,
)?;
verify_stark_proof_with_challenges(
memory_stark,
&all_proof.stark_proofs[Table::Memory as usize],
&stark_challenges[Table::Memory as usize],
&ctl_vars_per_table[Table::Memory as usize],
config,
)?;
verify_stark_proof_with_challenges(
logic_stark,
&all_proof.stark_proofs[Table::Logic as usize],