//! This crate provides support for cross-table lookups. //! //! If a STARK S_1 calls an operation that is carried out by another STARK S_2, //! S_1 provides the inputs to S_2 and reads the output from S_1. To ensure that //! the operation was correctly carried out, we must check that the provided inputs //! and outputs are correctly read. Cross-table lookups carry out that check. //! //! To achieve this, smaller CTL tables are created on both sides: looking and looked tables. //! In our example, we create a table S_1' comprised of columns -- or linear combinations //! of columns -- of S_1, and rows that call operations carried out in S_2. We also create a //! table S_2' comprised of columns -- or linear combinations od columns -- of S_2 and rows //! that carry out the operations needed by other STARKs. Then, S_1' is a looking table for //! the looked S_2', since we want to check that the operation outputs in S_1' are indeeed in S_2'. //! Furthermore, the concatenation of all tables looking into S_2' must be equal to S_2'. //! //! To achieve this, we construct, for each table, a permutation polynomial Z(x). //! Z(x) is computed as the product of all its column combinations. //! To check it was correctly constructed, we check: //! - Z(gw) = Z(w) * combine(w) where combine(w) is the column combination at point w. //! - Z(g^(n-1)) = combine(1). //! - The verifier also checks that the product of looking table Z polynomials is equal //! to the associated looked table Z polynomial. //! Note that the first two checks are written that way because Z polynomials are computed //! upside down for convenience. //! //! Additionally, we support cross-table lookups over two rows. The permutation principle //! is similar, but we provide not only `local_values` but also `next_values` -- corresponding to //! the current and next row values -- when computing the linear combinations. use std::borrow::Borrow; use std::fmt::Debug; use std::iter::repeat; use anyhow::{ensure, Result}; use itertools::Itertools; use plonky2::field::extension::{Extendable, FieldExtension}; use plonky2::field::packed::PackedField; use plonky2::field::polynomial::PolynomialValues; use plonky2::field::types::Field; use plonky2::hash::hash_types::RichField; use plonky2::iop::challenger::{Challenger, RecursiveChallenger}; use plonky2::iop::ext_target::ExtensionTarget; use plonky2::iop::target::Target; use plonky2::plonk::circuit_builder::CircuitBuilder; use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher}; use plonky2::plonk::plonk_common::{ reduce_with_powers, reduce_with_powers_circuit, reduce_with_powers_ext_circuit, }; use plonky2::util::serialization::{Buffer, IoResult, Read, Write}; use crate::all_stark::{Table, NUM_TABLES}; use crate::config::StarkConfig; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::evaluation_frame::StarkEvaluationFrame; use crate::proof::{StarkProofTarget, StarkProofWithMetadata}; use crate::stark::Stark; /// Represent two linear combination of columns, corresponding to the current and next row values. /// Each linear combination is represented as: /// - a vector of `(usize, F)` corresponding to the column number and the associated multiplicand /// - the constant of the linear combination. #[derive(Clone, Debug)] pub(crate) struct Column { linear_combination: Vec<(usize, F)>, next_row_linear_combination: Vec<(usize, F)>, constant: F, } impl Column { /// Returns the representation of a single column in the current row. pub(crate) fn single(c: usize) -> Self { Self { linear_combination: vec![(c, F::ONE)], next_row_linear_combination: vec![], constant: F::ZERO, } } /// Returns multiple single columns in the current row. pub(crate) fn singles>>( cs: I, ) -> impl Iterator { cs.into_iter().map(|c| Self::single(*c.borrow())) } /// Returns the representation of a single column in the next row. pub(crate) fn single_next_row(c: usize) -> Self { Self { linear_combination: vec![], next_row_linear_combination: vec![(c, F::ONE)], constant: F::ZERO, } } /// Returns multiple single columns for the next row. pub(crate) fn singles_next_row>>( cs: I, ) -> impl Iterator { cs.into_iter().map(|c| Self::single_next_row(*c.borrow())) } /// Returns a linear combination corresponding to a constant. pub(crate) fn constant(constant: F) -> Self { Self { linear_combination: vec![], next_row_linear_combination: vec![], constant, } } /// Returns a linear combination corresponding to 0. pub(crate) fn zero() -> Self { Self::constant(F::ZERO) } /// Returns a linear combination corresponding to 1. pub(crate) fn one() -> Self { Self::constant(F::ONE) } /// Given an iterator of `(usize, F)` and a constant, returns the association linear combination of columns for the current row. pub(crate) fn linear_combination_with_constant>( iter: I, constant: F, ) -> Self { let v = iter.into_iter().collect::>(); assert!(!v.is_empty()); debug_assert_eq!( v.iter().map(|(c, _)| c).unique().count(), v.len(), "Duplicate columns." ); Self { linear_combination: v, next_row_linear_combination: vec![], constant, } } /// Given an iterator of `(usize, F)` and a constant, returns the associated linear combination of columns for the current and the next rows. pub(crate) fn linear_combination_and_next_row_with_constant< I: IntoIterator, >( iter: I, next_row_iter: I, constant: F, ) -> Self { let v = iter.into_iter().collect::>(); let next_row_v = next_row_iter.into_iter().collect::>(); assert!(!v.is_empty() || !next_row_v.is_empty()); debug_assert_eq!( v.iter().map(|(c, _)| c).unique().count(), v.len(), "Duplicate columns." ); debug_assert_eq!( next_row_v.iter().map(|(c, _)| c).unique().count(), next_row_v.len(), "Duplicate columns." ); Self { linear_combination: v, next_row_linear_combination: next_row_v, constant, } } /// Returns a linear combination of columns, with no additional constant. pub(crate) fn linear_combination>(iter: I) -> Self { Self::linear_combination_with_constant(iter, F::ZERO) } /// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order: /// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n. pub(crate) fn le_bits>>(cs: I) -> Self { Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers())) } /// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order: /// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n + k where `k` is an /// additional constant. pub(crate) fn le_bits_with_constant>>( cs: I, constant: F, ) -> Self { Self::linear_combination_with_constant( cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()), constant, ) } /// Given an iterator of columns (c_0, ..., c_n) containing bytes in little endian order: /// returns the representation of c_0 + 256 * c_1 + ... + 256^n * c_n. pub(crate) fn le_bytes>>(cs: I) -> Self { Self::linear_combination( cs.into_iter() .map(|c| *c.borrow()) .zip(F::from_canonical_u16(256).powers()), ) } /// Given an iterator of columns, returns the representation of their sum. pub(crate) fn sum>>(cs: I) -> Self { Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(repeat(F::ONE))) } /// Given the column values for the current row, returns the evaluation of the linear combination. pub(crate) fn eval(&self, v: &[P]) -> P where FE: FieldExtension, P: PackedField, { self.linear_combination .iter() .map(|&(c, f)| v[c] * FE::from_basefield(f)) .sum::

() + FE::from_basefield(self.constant) } /// Given the column values for the current and next rows, evaluates the current and next linear combinations and returns their sum. pub(crate) fn eval_with_next(&self, v: &[P], next_v: &[P]) -> P where FE: FieldExtension, P: PackedField, { self.linear_combination .iter() .map(|&(c, f)| v[c] * FE::from_basefield(f)) .sum::

() + self .next_row_linear_combination .iter() .map(|&(c, f)| next_v[c] * FE::from_basefield(f)) .sum::

() + FE::from_basefield(self.constant) } /// Evaluate on a row of a table given in column-major form. pub(crate) fn eval_table(&self, table: &[PolynomialValues], row: usize) -> F { let mut res = self .linear_combination .iter() .map(|&(c, f)| table[c].values[row] * f) .sum::() + self.constant; // If we access the next row at the last row, for sanity, we consider the next row's values to be 0. // If CTLs are correctly written, the filter should be 0 in that case anyway. if !self.next_row_linear_combination.is_empty() && row < table[0].values.len() - 1 { res += self .next_row_linear_combination .iter() .map(|&(c, f)| table[c].values[row + 1] * f) .sum::(); } res } /// Evaluates the column on all rows. pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues]) -> Vec { let length = table[0].len(); (0..length) .map(|row| self.eval_table(table, row)) .collect::>() } /// Circuit version of `eval`: Given a row's targets, returns their linear combination. pub(crate) fn eval_circuit( &self, builder: &mut CircuitBuilder, v: &[ExtensionTarget], ) -> ExtensionTarget where F: RichField + Extendable, { let pairs = self .linear_combination .iter() .map(|&(c, f)| { ( v[c], builder.constant_extension(F::Extension::from_basefield(f)), ) }) .collect::>(); let constant = builder.constant_extension(F::Extension::from_basefield(self.constant)); builder.inner_product_extension(F::ONE, constant, pairs) } /// Circuit version of `eval_with_next`: /// Given the targets of the current and next row, returns the sum of their linear combinations. pub(crate) fn eval_with_next_circuit( &self, builder: &mut CircuitBuilder, v: &[ExtensionTarget], next_v: &[ExtensionTarget], ) -> ExtensionTarget where F: RichField + Extendable, { let mut pairs = self .linear_combination .iter() .map(|&(c, f)| { ( v[c], builder.constant_extension(F::Extension::from_basefield(f)), ) }) .collect::>(); let next_row_pairs = self.next_row_linear_combination.iter().map(|&(c, f)| { ( next_v[c], builder.constant_extension(F::Extension::from_basefield(f)), ) }); pairs.extend(next_row_pairs); let constant = builder.constant_extension(F::Extension::from_basefield(self.constant)); builder.inner_product_extension(F::ONE, constant, pairs) } } /// Represents a CTL filter, which evaluates to 1 if the row must be considered for the CTL and 0 otherwise. /// It's an arbitrary degree 2 combination of columns: `products` are the degree 2 terms, and `constants` are /// the degree 1 terms. #[derive(Clone, Debug)] pub(crate) struct Filter { products: Vec<(Column, Column)>, constants: Vec>, } impl Filter { pub(crate) fn new(products: Vec<(Column, Column)>, constants: Vec>) -> Self { Self { products, constants, } } /// Returns a filter made of a single column. pub(crate) fn new_simple(col: Column) -> Self { Self { products: vec![], constants: vec![col], } } /// Given the column values for the current and next rows, evaluates the filter. pub(crate) fn eval_filter(&self, v: &[P], next_v: &[P]) -> P where FE: FieldExtension, P: PackedField, { self.products .iter() .map(|(col1, col2)| col1.eval_with_next(v, next_v) * col2.eval_with_next(v, next_v)) .sum::

() + self .constants .iter() .map(|col| col.eval_with_next(v, next_v)) .sum::

() } /// Circuit version of `eval_filter`: /// Given the column values for the current and next rows, evaluates the filter. pub(crate) fn eval_filter_circuit( &self, builder: &mut CircuitBuilder, v: &[ExtensionTarget], next_v: &[ExtensionTarget], ) -> ExtensionTarget where F: RichField + Extendable, { let prods = self .products .iter() .map(|(col1, col2)| { let col1_eval = col1.eval_with_next_circuit(builder, v, next_v); let col2_eval = col2.eval_with_next_circuit(builder, v, next_v); builder.mul_extension(col1_eval, col2_eval) }) .collect::>(); let consts = self .constants .iter() .map(|col| col.eval_with_next_circuit(builder, v, next_v)) .collect::>(); let prods = builder.add_many_extension(prods); let consts = builder.add_many_extension(consts); builder.add_extension(prods, consts) } /// Evaluate on a row of a table given in column-major form. pub(crate) fn eval_table(&self, table: &[PolynomialValues], row: usize) -> F { self.products .iter() .map(|(col1, col2)| col1.eval_table(table, row) * col2.eval_table(table, row)) .sum::() + self .constants .iter() .map(|col| col.eval_table(table, row)) .sum() } pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues]) -> Vec { let length = table[0].len(); (0..length) .map(|row| self.eval_table(table, row)) .collect::>() } } /// A `Table` with a linear combination of columns and a filter. /// `filter` is used to determine the rows to select in `Table`. /// `columns` represents linear combinations of the columns of `Table`. #[derive(Clone, Debug)] pub(crate) struct TableWithColumns { table: Table, columns: Vec>, pub(crate) filter: Option>, } impl TableWithColumns { /// Generates a new `TableWithColumns` given a `Table`, a linear combination of columns `columns` and a `filter`. pub(crate) fn new(table: Table, columns: Vec>, filter: Option>) -> Self { Self { table, columns, filter, } } } /// Cross-table lookup data consisting in the lookup table (`looked_table`) and all the tables that look into `looked_table` (`looking_tables`). /// Each `looking_table` corresponds to a STARK's table whose rows have been filtered out and whose columns have been through a linear combination (see `eval_table`). The concatenation of those smaller tables should result in the `looked_table`. #[derive(Clone)] pub(crate) struct CrossTableLookup { /// Column linear combinations for all tables that are looking into the current table. pub(crate) looking_tables: Vec>, /// Column linear combination for the current table. pub(crate) looked_table: TableWithColumns, } impl CrossTableLookup { /// Creates a new `CrossTableLookup` given some looking tables and a looked table. /// All tables should have the same width. pub(crate) fn new( looking_tables: Vec>, looked_table: TableWithColumns, ) -> Self { assert!(looking_tables .iter() .all(|twc| twc.columns.len() == looked_table.columns.len())); Self { looking_tables, looked_table, } } /// Given a `Table` t and the number of challenges, returns the number of Cross-table lookup polynomials associated to t, /// i.e. the number of looking and looked tables among all CTLs whose columns are taken from t. pub(crate) fn num_ctl_zs(ctls: &[Self], table: Table, num_challenges: usize) -> usize { let mut num_ctls = 0; for ctl in ctls { let all_tables = std::iter::once(&ctl.looked_table).chain(&ctl.looking_tables); num_ctls += all_tables.filter(|twc| twc.table == table).count(); } num_ctls * num_challenges } } /// Cross-table lookup data for one table. #[derive(Clone, Default)] pub(crate) struct CtlData { /// Data associated with all Z(x) polynomials for one table. pub(crate) zs_columns: Vec>, } /// Cross-table lookup data associated with one Z(x) polynomial. #[derive(Clone)] pub(crate) struct CtlZData { /// Z polynomial values. pub(crate) z: PolynomialValues, /// Cross-table lookup challenge. pub(crate) challenge: GrandProductChallenge, /// Column linear combination for the current table. pub(crate) columns: Vec>, /// Filter column for the current table. It evaluates to either 1 or 0. pub(crate) filter: Option>, } impl CtlData { /// Returns the number of cross-table lookup polynomials. pub(crate) fn len(&self) -> usize { self.zs_columns.len() } /// Returns whether there are no cross-table lookups. pub(crate) fn is_empty(&self) -> bool { self.zs_columns.is_empty() } /// Returns all the cross-table lookup polynomials. pub(crate) fn z_polys(&self) -> Vec> { self.zs_columns .iter() .map(|zs_columns| zs_columns.z.clone()) .collect() } } /// Randomness for a single instance of a permutation check protocol. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub(crate) struct GrandProductChallenge { /// Randomness used to combine multiple columns into one. pub(crate) beta: T, /// Random offset that's added to the beta-reduced column values. pub(crate) gamma: T, } impl GrandProductChallenge { pub(crate) fn combine<'a, FE, P, T: IntoIterator, const D2: usize>( &self, terms: T, ) -> P where FE: FieldExtension, P: PackedField, T::IntoIter: DoubleEndedIterator, { reduce_with_powers(terms, FE::from_basefield(self.beta)) + FE::from_basefield(self.gamma) } } impl GrandProductChallenge { pub(crate) fn combine_circuit, const D: usize>( &self, builder: &mut CircuitBuilder, terms: &[ExtensionTarget], ) -> ExtensionTarget { let reduced = reduce_with_powers_ext_circuit(builder, terms, self.beta); let gamma = builder.convert_to_ext(self.gamma); builder.add_extension(reduced, gamma) } } impl GrandProductChallenge { pub(crate) fn combine_base_circuit, const D: usize>( &self, builder: &mut CircuitBuilder, terms: &[Target], ) -> Target { let reduced = reduce_with_powers_circuit(builder, terms, self.beta); builder.add(reduced, self.gamma) } } /// Like `PermutationChallenge`, but with `num_challenges` copies to boost soundness. #[derive(Clone, Eq, PartialEq, Debug)] pub struct GrandProductChallengeSet { pub(crate) challenges: Vec>, } impl GrandProductChallengeSet { pub(crate) fn to_buffer(&self, buffer: &mut Vec) -> IoResult<()> { buffer.write_usize(self.challenges.len())?; for challenge in &self.challenges { buffer.write_target(challenge.beta)?; buffer.write_target(challenge.gamma)?; } Ok(()) } pub(crate) fn from_buffer(buffer: &mut Buffer) -> IoResult { let length = buffer.read_usize()?; let mut challenges = Vec::with_capacity(length); for _ in 0..length { challenges.push(GrandProductChallenge { beta: buffer.read_target()?, gamma: buffer.read_target()?, }); } Ok(GrandProductChallengeSet { challenges }) } } fn get_grand_product_challenge>( challenger: &mut Challenger, ) -> GrandProductChallenge { let beta = challenger.get_challenge(); let gamma = challenger.get_challenge(); GrandProductChallenge { beta, gamma } } pub(crate) fn get_grand_product_challenge_set>( challenger: &mut Challenger, num_challenges: usize, ) -> GrandProductChallengeSet { let challenges = (0..num_challenges) .map(|_| get_grand_product_challenge(challenger)) .collect(); GrandProductChallengeSet { challenges } } fn get_grand_product_challenge_target< F: RichField + Extendable, H: AlgebraicHasher, const D: usize, >( builder: &mut CircuitBuilder, challenger: &mut RecursiveChallenger, ) -> GrandProductChallenge { let beta = challenger.get_challenge(builder); let gamma = challenger.get_challenge(builder); GrandProductChallenge { beta, gamma } } pub(crate) fn get_grand_product_challenge_set_target< F: RichField + Extendable, H: AlgebraicHasher, const D: usize, >( builder: &mut CircuitBuilder, challenger: &mut RecursiveChallenger, num_challenges: usize, ) -> GrandProductChallengeSet { let challenges = (0..num_challenges) .map(|_| get_grand_product_challenge_target(builder, challenger)) .collect(); GrandProductChallengeSet { challenges } } /// Generates all the cross-table lookup data, for all tables. /// - `trace_poly_values` corresponds to the trace values for all tables. /// - `cross_table_lookups` corresponds to all the cross-table lookups, i.e. the looked and looking tables, as described in `CrossTableLookup`. /// - `ctl_challenges` corresponds to the challenges used for CTLs. /// For each `CrossTableLookup`, and each looking/looked table, the partial products for the CTL are computed, and added to the said table's `CtlZData`. pub(crate) fn cross_table_lookup_data( trace_poly_values: &[Vec>; NUM_TABLES], cross_table_lookups: &[CrossTableLookup], ctl_challenges: &GrandProductChallengeSet, ) -> [CtlData; NUM_TABLES] { let mut ctl_data_per_table = [0; NUM_TABLES].map(|_| CtlData::default()); for CrossTableLookup { looking_tables, looked_table, } in cross_table_lookups { log::debug!("Processing CTL for {:?}", looked_table.table); for &challenge in &ctl_challenges.challenges { let zs_looking = looking_tables.iter().map(|table| { partial_sums( &trace_poly_values[table.table as usize], &table.columns, &table.filter, challenge, ) }); let z_looked = partial_sums( &trace_poly_values[looked_table.table as usize], &looked_table.columns, &looked_table.filter, challenge, ); for (table, z) in looking_tables.iter().zip(zs_looking) { ctl_data_per_table[table.table as usize] .zs_columns .push(CtlZData { z, challenge, columns: table.columns.clone(), filter: table.filter.clone(), }); } ctl_data_per_table[looked_table.table as usize] .zs_columns .push(CtlZData { z: z_looked, challenge, columns: looked_table.columns.clone(), filter: looked_table.filter.clone(), }); } } ctl_data_per_table } /// Computes the cross-table lookup partial sums for one table and given column linear combinations. /// `trace` represents the trace values for the given table. /// `columns` are all the column linear combinations to evaluate. /// `filter_column` is a column linear combination used to determine whether a row should be selected. /// `challenge` is a cross-table lookup challenge. /// The initial sum `s` is 0. /// For each row, if the `filter_column` evaluates to 1, then the rows is selected. All the column linear combinations are evaluated at said row. All those evaluations are combined using the challenge to get a value `v`. /// The sum is updated: `s += 1/v`, and is pushed to the vector of partial sums. fn partial_sums( trace: &[PolynomialValues], columns: &[Column], filter: &Option>, challenge: GrandProductChallenge, ) -> PolynomialValues { let mut partial_sum = F::ZERO; let degree = trace[0].len(); let mut filters = Vec::with_capacity(degree); let mut res = Vec::with_capacity(degree); for i in (0..degree).rev() { if let Some(filter) = filter { let filter_val = filter.eval_table(trace, i); if filter_val.is_one() { filters.push(true); } else { assert_eq!(filter_val, F::ZERO, "Non-binary filter?"); filters.push(false); } } else { filters.push(false); }; let combined = if filters[filters.len() - 1] { let evals = columns .iter() .map(|c| c.eval_table(trace, i)) .collect::>(); challenge.combine(evals.iter()) } else { // Dummy value. Cannot be zero since it will be batch-inverted. F::ONE }; res.push(combined); } res = F::batch_multiplicative_inverse(&res); if !filters[0] { res[0] = F::ZERO; } for i in (1..degree) { let mut cur_value = res[i - 1]; if filters[i] { cur_value += res[i]; } res[i] = cur_value; } res.reverse(); res.into() } /// Data necessary to check the cross-table lookups of a given table. #[derive(Clone)] pub(crate) struct CtlCheckVars<'a, F, FE, P, const D2: usize> where F: Field, FE: FieldExtension, P: PackedField, { /// Evaluation of the trace polynomials at point `zeta`. pub(crate) local_z: P, /// Evaluation of the trace polynomials at point `g * zeta` pub(crate) next_z: P, /// Cross-table lookup challenges. pub(crate) challenges: GrandProductChallenge, /// Column linear combinations of the `CrossTableLookup`s. pub(crate) columns: &'a [Column], /// Filter that evaluates to either 1 or 0. pub(crate) filter: &'a Option>, } impl<'a, F: RichField + Extendable, const D: usize> CtlCheckVars<'a, F, F::Extension, F::Extension, D> { /// Extracts the `CtlCheckVars` for each STARK. pub(crate) fn from_proofs>( proofs: &[StarkProofWithMetadata; NUM_TABLES], cross_table_lookups: &'a [CrossTableLookup], ctl_challenges: &'a GrandProductChallengeSet, num_lookup_columns: &[usize; NUM_TABLES], ) -> [Vec; NUM_TABLES] { // Get all cross-table lookup polynomial openings for each STARK proof. let mut ctl_zs = proofs .iter() .zip(num_lookup_columns) .map(|(p, &num_lookup)| { let openings = &p.proof.openings; let ctl_zs = openings.auxiliary_polys.iter().skip(num_lookup); let ctl_zs_next = openings.auxiliary_polys_next.iter().skip(num_lookup); ctl_zs.zip(ctl_zs_next) }) .collect::>(); // Put each cross-table lookup polynomial into the correct table data: if a CTL polynomial is extracted from looking/looked table t, then we add it to the `CtlCheckVars` of table t. let mut ctl_vars_per_table = [0; NUM_TABLES].map(|_| vec![]); for CrossTableLookup { looking_tables, looked_table, } in cross_table_lookups { for &challenges in &ctl_challenges.challenges { for table in looking_tables { let (looking_z, looking_z_next) = ctl_zs[table.table as usize].next().unwrap(); ctl_vars_per_table[table.table as usize].push(Self { local_z: *looking_z, next_z: *looking_z_next, challenges, columns: &table.columns, filter: &table.filter, }); } let (looked_z, looked_z_next) = ctl_zs[looked_table.table as usize].next().unwrap(); ctl_vars_per_table[looked_table.table as usize].push(Self { local_z: *looked_z, next_z: *looked_z_next, challenges, columns: &looked_table.columns, filter: &looked_table.filter, }); } } ctl_vars_per_table } } /// Checks the cross-table lookup Z polynomials for each table: /// - Checks that the CTL `Z` partial sums are correctly updated. /// - Checks that the final value of the CTL sum is the combination of all STARKs' CTL polynomials. /// CTL `Z` partial sums are upside down: the complete sum is on the first row, and /// the first term is on the last row. This allows the transition constraint to be: /// `combine(w) * (Z(w) - Z(gw)) = filter` where combine is called on the local row /// and not the next. This enables CTLs across two rows. pub(crate) fn eval_cross_table_lookup_checks( vars: &S::EvaluationFrame, ctl_vars: &[CtlCheckVars], consumer: &mut ConstraintConsumer

, ) where F: RichField + Extendable, FE: FieldExtension, P: PackedField, S: Stark, { let local_values = vars.get_local_values(); let next_values = vars.get_next_values(); for lookup_vars in ctl_vars { let CtlCheckVars { local_z, next_z, challenges, columns, filter, } = lookup_vars; // Compute all linear combinations on the current table, and combine them using the challenge. let evals = columns .iter() .map(|c| c.eval_with_next(local_values, next_values)) .collect::>(); let combined = challenges.combine(evals.iter()); let local_filter = if let Some(combin) = filter { combin.eval_filter(local_values, next_values) } else { P::ONES }; // Check value of `Z(g^(n-1))` consumer.constraint_last_row(*local_z * combined - local_filter); // Check `Z(w) = Z(gw) * (filter / combination)` consumer.constraint_transition((*local_z - *next_z) * combined - local_filter); } } /// Circuit version of `CtlCheckVars`. Data necessary to check the cross-table lookups of a given table. #[derive(Clone)] pub(crate) struct CtlCheckVarsTarget<'a, F: Field, const D: usize> { /// Evaluation of the trace polynomials at point `zeta`. pub(crate) local_z: ExtensionTarget, /// Evaluation of the trace polynomials at point `g * zeta`. pub(crate) next_z: ExtensionTarget, /// Cross-table lookup challenges. pub(crate) challenges: GrandProductChallenge, /// Column linear combinations of the `CrossTableLookup`s. pub(crate) columns: &'a [Column], /// Filter that evaluates to either 1 or 0. pub(crate) filter: &'a Option>, } impl<'a, F: Field, const D: usize> CtlCheckVarsTarget<'a, F, D> { /// Circuit version of `from_proofs`. Extracts the `CtlCheckVarsTarget` for each STARK. pub(crate) fn from_proof( table: Table, proof: &StarkProofTarget, cross_table_lookups: &'a [CrossTableLookup], ctl_challenges: &'a GrandProductChallengeSet, num_lookup_columns: usize, ) -> Vec { // Get all cross-table lookup polynomial openings for each STARK proof. let mut ctl_zs = { let openings = &proof.openings; let ctl_zs = openings.auxiliary_polys.iter().skip(num_lookup_columns); let ctl_zs_next = openings .auxiliary_polys_next .iter() .skip(num_lookup_columns); ctl_zs.zip(ctl_zs_next) }; // Put each cross-table lookup polynomial into the correct table data: if a CTL polynomial is extracted from looking/looked table t, then we add it to the `CtlCheckVars` of table t. let mut ctl_vars = vec![]; for CrossTableLookup { looking_tables, looked_table, } in cross_table_lookups { for &challenges in &ctl_challenges.challenges { for looking_table in looking_tables { if looking_table.table == table { let (looking_z, looking_z_next) = ctl_zs.next().unwrap(); ctl_vars.push(Self { local_z: *looking_z, next_z: *looking_z_next, challenges, columns: &looking_table.columns, filter: &looking_table.filter, }); } } if looked_table.table == table { let (looked_z, looked_z_next) = ctl_zs.next().unwrap(); ctl_vars.push(Self { local_z: *looked_z, next_z: *looked_z_next, challenges, columns: &looked_table.columns, filter: &looked_table.filter, }); } } } assert!(ctl_zs.next().is_none()); ctl_vars } } /// Circuit version of `eval_cross_table_lookup_checks`. Checks the cross-table lookup Z polynomials for each table: /// - Checks that the CTL `Z` partial sums are correctly updated. /// - Checks that the final value of the CTL sum is the combination of all STARKs' CTL polynomials. /// CTL `Z` partial sums are upside down: the complete sum is on the first row, and /// the first term is on the last row. This allows the transition constraint to be: /// `combine(w) * (Z(w) - Z(gw)) = filter` where combine is called on the local row /// and not the next. This enables CTLs across two rows. pub(crate) fn eval_cross_table_lookup_checks_circuit< S: Stark, F: RichField + Extendable, const D: usize, >( builder: &mut CircuitBuilder, vars: &S::EvaluationFrameTarget, ctl_vars: &[CtlCheckVarsTarget], consumer: &mut RecursiveConstraintConsumer, ) { let local_values = vars.get_local_values(); let next_values = vars.get_next_values(); for lookup_vars in ctl_vars { let CtlCheckVarsTarget { local_z, next_z, challenges, columns, filter, } = lookup_vars; let one = builder.one_extension(); let local_filter = if let Some(combin) = filter { combin.eval_filter_circuit(builder, local_values, next_values) } else { one }; // Compute all linear combinations on the current table, and combine them using the challenge. let evals = columns .iter() .map(|c| c.eval_with_next_circuit(builder, local_values, next_values)) .collect::>(); let combined = challenges.combine_circuit(builder, &evals); // Check value of `Z(g^(n-1))` let last_row = builder.mul_sub_extension(*local_z, combined, local_filter); consumer.constraint_last_row(builder, last_row); // Check `Z(w) = Z(gw) * (filter / combination)` let z_diff = builder.sub_extension(*local_z, *next_z); let lhs = builder.mul_extension(combined, z_diff); let transition = builder.sub_extension(lhs, local_filter); consumer.constraint_transition(builder, transition); } } /// Verifies all cross-table lookups. pub(crate) fn verify_cross_table_lookups, const D: usize>( cross_table_lookups: &[CrossTableLookup], ctl_zs_first: [Vec; NUM_TABLES], ctl_extra_looking_sums: Vec>, config: &StarkConfig, ) -> Result<()> { let mut ctl_zs_openings = ctl_zs_first.iter().map(|v| v.iter()).collect::>(); for ( index, CrossTableLookup { looking_tables, looked_table, }, ) in cross_table_lookups.iter().enumerate() { // Get elements looking into `looked_table` that are not associated to any STARK. let extra_sum_vec = &ctl_extra_looking_sums[looked_table.table as usize]; for c in 0..config.num_challenges { // Compute the combination of all looking table CTL polynomial openings. let looking_zs_sum = looking_tables .iter() .map(|table| *ctl_zs_openings[table.table as usize].next().unwrap()) .sum::() + extra_sum_vec[c]; // Get the looked table CTL polynomial opening. let looked_z = *ctl_zs_openings[looked_table.table as usize].next().unwrap(); // Ensure that the combination of looking table openings is equal to the looked table opening. ensure!( looking_zs_sum == looked_z, "Cross-table lookup {:?} verification failed.", index ); } } debug_assert!(ctl_zs_openings.iter_mut().all(|iter| iter.next().is_none())); Ok(()) } /// Circuit version of `verify_cross_table_lookups`. Verifies all cross-table lookups. pub(crate) fn verify_cross_table_lookups_circuit, const D: usize>( builder: &mut CircuitBuilder, cross_table_lookups: Vec>, ctl_zs_first: [Vec; NUM_TABLES], ctl_extra_looking_sums: Vec>, inner_config: &StarkConfig, ) { let mut ctl_zs_openings = ctl_zs_first.iter().map(|v| v.iter()).collect::>(); for CrossTableLookup { looking_tables, looked_table, } in cross_table_lookups.into_iter() { // Get elements looking into `looked_table` that are not associated to any STARK. let extra_sum_vec = &ctl_extra_looking_sums[looked_table.table as usize]; for c in 0..inner_config.num_challenges { // Compute the combination of all looking table CTL polynomial openings. let mut looking_zs_sum = builder.add_many( looking_tables .iter() .map(|table| *ctl_zs_openings[table.table as usize].next().unwrap()), ); looking_zs_sum = builder.add(looking_zs_sum, extra_sum_vec[c]); // Get the looked table CTL polynomial opening. let looked_z = *ctl_zs_openings[looked_table.table as usize].next().unwrap(); // Verify that the combination of looking table openings is equal to the looked table opening. builder.connect(looked_z, looking_zs_sum); } } debug_assert!(ctl_zs_openings.iter_mut().all(|iter| iter.next().is_none())); } #[cfg(test)] pub(crate) mod testutils { use std::collections::HashMap; use plonky2::field::polynomial::PolynomialValues; use plonky2::field::types::Field; use crate::all_stark::Table; use crate::cross_table_lookup::{CrossTableLookup, TableWithColumns}; type MultiSet = HashMap, Vec<(Table, usize)>>; /// Check that the provided traces and cross-table lookups are consistent. pub(crate) fn check_ctls( trace_poly_values: &[Vec>], cross_table_lookups: &[CrossTableLookup], extra_memory_looking_values: &[Vec], ) { for (i, ctl) in cross_table_lookups.iter().enumerate() { check_ctl(trace_poly_values, ctl, i, extra_memory_looking_values); } } fn check_ctl( trace_poly_values: &[Vec>], ctl: &CrossTableLookup, ctl_index: usize, extra_memory_looking_values: &[Vec], ) { let CrossTableLookup { looking_tables, looked_table, } = ctl; // Maps `m` with `(table, i) in m[row]` iff the `i`-th row of `table` is equal to `row` and // the filter is 1. Without default values, the CTL check holds iff `looking_multiset == looked_multiset`. let mut looking_multiset = MultiSet::::new(); let mut looked_multiset = MultiSet::::new(); for table in looking_tables { process_table(trace_poly_values, table, &mut looking_multiset); } process_table(trace_poly_values, looked_table, &mut looked_multiset); // Extra looking values for memory if ctl_index == Table::Memory as usize { for row in extra_memory_looking_values.iter() { // The table and the row index don't matter here, as we just want to enforce // that the special extra values do appear when looking against the Memory table. looking_multiset .entry(row.to_vec()) .or_default() .push((Table::Cpu, 0)); } } let empty = &vec![]; // Check that every row in the looking tables appears in the looked table the same number of times. for (row, looking_locations) in &looking_multiset { let looked_locations = looked_multiset.get(row).unwrap_or(empty); check_locations(looking_locations, looked_locations, ctl_index, row); } // Check that every row in the looked tables appears in the looked table the same number of times. for (row, looked_locations) in &looked_multiset { let looking_locations = looking_multiset.get(row).unwrap_or(empty); check_locations(looking_locations, looked_locations, ctl_index, row); } } fn process_table( trace_poly_values: &[Vec>], table: &TableWithColumns, multiset: &mut MultiSet, ) { let trace = &trace_poly_values[table.table as usize]; for i in 0..trace[0].len() { let filter = if let Some(combin) = &table.filter { combin.eval_table(trace, i) } else { F::ONE }; if filter.is_one() { let row = table .columns .iter() .map(|c| c.eval_table(trace, i)) .collect::>(); multiset.entry(row).or_default().push((table.table, i)); } else { assert_eq!(filter, F::ZERO, "Non-binary filter?") } } } fn check_locations( looking_locations: &[(Table, usize)], looked_locations: &[(Table, usize)], ctl_index: usize, row: &[F], ) { if looking_locations.len() != looked_locations.len() { panic!( "CTL #{ctl_index}:\n\ Row {row:?} is present {l0} times in the looking tables, but {l1} times in the looked table.\n\ Looking locations (Table, Row index): {looking_locations:?}.\n\ Looked locations (Table, Row index): {looked_locations:?}.", l0 = looking_locations.len(), l1 = looked_locations.len(), ); } } }