mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-07 08:13:11 +00:00
Reorganize lookup / ctl modules (#1495)
* Reorganize lookup / ctl modules * Apply review
This commit is contained in:
parent
e502a0dfb1
commit
6357963654
@ -17,9 +17,9 @@ use crate::all_stark::Table;
|
||||
use crate::arithmetic::columns::{NUM_SHARED_COLS, RANGE_COUNTER, RC_FREQUENCIES, SHARED_COLS};
|
||||
use crate::arithmetic::{addcy, byte, columns, divmod, modular, mul, Operation};
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cross_table_lookup::{Column, Filter, TableWithColumns};
|
||||
use crate::cross_table_lookup::TableWithColumns;
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::lookup::Lookup;
|
||||
use crate::lookup::{Column, Filter, Lookup};
|
||||
use crate::stark::Stark;
|
||||
|
||||
/// Creates a vector of `Columns` to link the 16-bit columns of the arithmetic table,
|
||||
|
||||
@ -45,9 +45,8 @@ use crate::byte_packing::columns::{
|
||||
NUM_COLUMNS, RANGE_COUNTER, RC_FREQUENCIES, TIMESTAMP,
|
||||
};
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cross_table_lookup::{Column, Filter};
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::lookup::Lookup;
|
||||
use crate::lookup::{Column, Filter, Lookup};
|
||||
use crate::stark::Stark;
|
||||
use crate::witness::memory::MemoryAddress;
|
||||
|
||||
|
||||
@ -20,8 +20,9 @@ use crate::cpu::{
|
||||
byte_unpacking, clock, contextops, control_flow, decode, dup_swap, gas, jumps, membus, memio,
|
||||
modfp254, pc, push0, shift, simple_logic, stack, syscalls_exceptions,
|
||||
};
|
||||
use crate::cross_table_lookup::{Column, Filter, TableWithColumns};
|
||||
use crate::cross_table_lookup::TableWithColumns;
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::lookup::{Column, Filter, Lookup};
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
|
||||
use crate::stark::Stark;
|
||||
|
||||
@ -56,373 +56,13 @@ use crate::all_stark::Table;
|
||||
use crate::config::StarkConfig;
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::evaluation_frame::StarkEvaluationFrame;
|
||||
use crate::lookup::{
|
||||
eval_helper_columns, eval_helper_columns_circuit, get_helper_cols, Column, ColumnFilter,
|
||||
Filter, GrandProductChallenge,
|
||||
};
|
||||
use crate::proof::{StarkProofTarget, StarkProofWithMetadata};
|
||||
use crate::stark::Stark;
|
||||
|
||||
/// Represent two linear combination of columns, corresponding to the current and next row values.
|
||||
/// Each linear combination is represented as:
|
||||
/// - a vector of `(usize, F)` corresponding to the column number and the associated multiplicand
|
||||
/// - the constant of the linear combination.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Column<F: Field> {
|
||||
linear_combination: Vec<(usize, F)>,
|
||||
next_row_linear_combination: Vec<(usize, F)>,
|
||||
constant: F,
|
||||
}
|
||||
|
||||
impl<F: Field> Column<F> {
|
||||
/// Returns the representation of a single column in the current row.
|
||||
pub(crate) fn single(c: usize) -> Self {
|
||||
Self {
|
||||
linear_combination: vec![(c, F::ONE)],
|
||||
next_row_linear_combination: vec![],
|
||||
constant: F::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns multiple single columns in the current row.
|
||||
pub(crate) fn singles<I: IntoIterator<Item = impl Borrow<usize>>>(
|
||||
cs: I,
|
||||
) -> impl Iterator<Item = Self> {
|
||||
cs.into_iter().map(|c| Self::single(*c.borrow()))
|
||||
}
|
||||
|
||||
/// Returns the representation of a single column in the next row.
|
||||
pub(crate) fn single_next_row(c: usize) -> Self {
|
||||
Self {
|
||||
linear_combination: vec![],
|
||||
next_row_linear_combination: vec![(c, F::ONE)],
|
||||
constant: F::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns multiple single columns for the next row.
|
||||
pub(crate) fn singles_next_row<I: IntoIterator<Item = impl Borrow<usize>>>(
|
||||
cs: I,
|
||||
) -> impl Iterator<Item = Self> {
|
||||
cs.into_iter().map(|c| Self::single_next_row(*c.borrow()))
|
||||
}
|
||||
|
||||
/// Returns a linear combination corresponding to a constant.
|
||||
pub(crate) fn constant(constant: F) -> Self {
|
||||
Self {
|
||||
linear_combination: vec![],
|
||||
next_row_linear_combination: vec![],
|
||||
constant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a linear combination corresponding to 0.
|
||||
pub(crate) fn zero() -> Self {
|
||||
Self::constant(F::ZERO)
|
||||
}
|
||||
|
||||
/// Returns a linear combination corresponding to 1.
|
||||
pub(crate) fn one() -> Self {
|
||||
Self::constant(F::ONE)
|
||||
}
|
||||
|
||||
/// Given an iterator of `(usize, F)` and a constant, returns the association linear combination of columns for the current row.
|
||||
pub(crate) fn linear_combination_with_constant<I: IntoIterator<Item = (usize, F)>>(
|
||||
iter: I,
|
||||
constant: F,
|
||||
) -> Self {
|
||||
let v = iter.into_iter().collect::<Vec<_>>();
|
||||
assert!(!v.is_empty());
|
||||
debug_assert_eq!(
|
||||
v.iter().map(|(c, _)| c).unique().count(),
|
||||
v.len(),
|
||||
"Duplicate columns."
|
||||
);
|
||||
Self {
|
||||
linear_combination: v,
|
||||
next_row_linear_combination: vec![],
|
||||
constant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Given an iterator of `(usize, F)` and a constant, returns the associated linear combination of columns for the current and the next rows.
|
||||
pub(crate) fn linear_combination_and_next_row_with_constant<
|
||||
I: IntoIterator<Item = (usize, F)>,
|
||||
>(
|
||||
iter: I,
|
||||
next_row_iter: I,
|
||||
constant: F,
|
||||
) -> Self {
|
||||
let v = iter.into_iter().collect::<Vec<_>>();
|
||||
let next_row_v = next_row_iter.into_iter().collect::<Vec<_>>();
|
||||
|
||||
assert!(!v.is_empty() || !next_row_v.is_empty());
|
||||
debug_assert_eq!(
|
||||
v.iter().map(|(c, _)| c).unique().count(),
|
||||
v.len(),
|
||||
"Duplicate columns."
|
||||
);
|
||||
debug_assert_eq!(
|
||||
next_row_v.iter().map(|(c, _)| c).unique().count(),
|
||||
next_row_v.len(),
|
||||
"Duplicate columns."
|
||||
);
|
||||
|
||||
Self {
|
||||
linear_combination: v,
|
||||
next_row_linear_combination: next_row_v,
|
||||
constant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a linear combination of columns, with no additional constant.
|
||||
pub(crate) fn linear_combination<I: IntoIterator<Item = (usize, F)>>(iter: I) -> Self {
|
||||
Self::linear_combination_with_constant(iter, F::ZERO)
|
||||
}
|
||||
|
||||
/// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order:
|
||||
/// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n.
|
||||
pub(crate) fn le_bits<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
||||
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()))
|
||||
}
|
||||
|
||||
/// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order:
|
||||
/// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n + k where `k` is an
|
||||
/// additional constant.
|
||||
pub(crate) fn le_bits_with_constant<I: IntoIterator<Item = impl Borrow<usize>>>(
|
||||
cs: I,
|
||||
constant: F,
|
||||
) -> Self {
|
||||
Self::linear_combination_with_constant(
|
||||
cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()),
|
||||
constant,
|
||||
)
|
||||
}
|
||||
|
||||
/// Given an iterator of columns (c_0, ..., c_n) containing bytes in little endian order:
|
||||
/// returns the representation of c_0 + 256 * c_1 + ... + 256^n * c_n.
|
||||
pub(crate) fn le_bytes<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
||||
Self::linear_combination(
|
||||
cs.into_iter()
|
||||
.map(|c| *c.borrow())
|
||||
.zip(F::from_canonical_u16(256).powers()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Given an iterator of columns, returns the representation of their sum.
|
||||
pub(crate) fn sum<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
||||
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(repeat(F::ONE)))
|
||||
}
|
||||
|
||||
/// Given the column values for the current row, returns the evaluation of the linear combination.
|
||||
pub(crate) fn eval<FE, P, const D: usize>(&self, v: &[P]) -> P
|
||||
where
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| v[c] * FE::from_basefield(f))
|
||||
.sum::<P>()
|
||||
+ FE::from_basefield(self.constant)
|
||||
}
|
||||
|
||||
/// Given the column values for the current and next rows, evaluates the current and next linear combinations and returns their sum.
|
||||
pub(crate) fn eval_with_next<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
|
||||
where
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| v[c] * FE::from_basefield(f))
|
||||
.sum::<P>()
|
||||
+ self
|
||||
.next_row_linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| next_v[c] * FE::from_basefield(f))
|
||||
.sum::<P>()
|
||||
+ FE::from_basefield(self.constant)
|
||||
}
|
||||
|
||||
/// Evaluate on a row of a table given in column-major form.
|
||||
pub(crate) fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
|
||||
let mut res = self
|
||||
.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| table[c].values[row] * f)
|
||||
.sum::<F>()
|
||||
+ self.constant;
|
||||
|
||||
// If we access the next row at the last row, for sanity, we consider the next row's values to be 0.
|
||||
// If CTLs are correctly written, the filter should be 0 in that case anyway.
|
||||
if !self.next_row_linear_combination.is_empty() && row < table[0].values.len() - 1 {
|
||||
res += self
|
||||
.next_row_linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| table[c].values[row + 1] * f)
|
||||
.sum::<F>();
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Evaluates the column on all rows.
|
||||
pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues<F>]) -> Vec<F> {
|
||||
let length = table[0].len();
|
||||
(0..length)
|
||||
.map(|row| self.eval_table(table, row))
|
||||
.collect::<Vec<F>>()
|
||||
}
|
||||
|
||||
/// Circuit version of `eval`: Given a row's targets, returns their linear combination.
|
||||
pub(crate) fn eval_circuit<const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
v: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let pairs = self
|
||||
.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| {
|
||||
(
|
||||
v[c],
|
||||
builder.constant_extension(F::Extension::from_basefield(f)),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
|
||||
builder.inner_product_extension(F::ONE, constant, pairs)
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_with_next`:
|
||||
/// Given the targets of the current and next row, returns the sum of their linear combinations.
|
||||
pub(crate) fn eval_with_next_circuit<const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
v: &[ExtensionTarget<D>],
|
||||
next_v: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let mut pairs = self
|
||||
.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| {
|
||||
(
|
||||
v[c],
|
||||
builder.constant_extension(F::Extension::from_basefield(f)),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let next_row_pairs = self.next_row_linear_combination.iter().map(|&(c, f)| {
|
||||
(
|
||||
next_v[c],
|
||||
builder.constant_extension(F::Extension::from_basefield(f)),
|
||||
)
|
||||
});
|
||||
pairs.extend(next_row_pairs);
|
||||
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
|
||||
builder.inner_product_extension(F::ONE, constant, pairs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a CTL filter, which evaluates to 1 if the row must be considered for the CTL and 0 otherwise.
|
||||
/// It's an arbitrary degree 2 combination of columns: `products` are the degree 2 terms, and `constants` are
|
||||
/// the degree 1 terms.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Filter<F: Field> {
|
||||
products: Vec<(Column<F>, Column<F>)>,
|
||||
constants: Vec<Column<F>>,
|
||||
}
|
||||
|
||||
impl<F: Field> Filter<F> {
|
||||
pub(crate) fn new(products: Vec<(Column<F>, Column<F>)>, constants: Vec<Column<F>>) -> Self {
|
||||
Self {
|
||||
products,
|
||||
constants,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a filter made of a single column.
|
||||
pub(crate) fn new_simple(col: Column<F>) -> Self {
|
||||
Self {
|
||||
products: vec![],
|
||||
constants: vec![col],
|
||||
}
|
||||
}
|
||||
|
||||
/// Given the column values for the current and next rows, evaluates the filter.
|
||||
pub(crate) fn eval_filter<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
|
||||
where
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.products
|
||||
.iter()
|
||||
.map(|(col1, col2)| col1.eval_with_next(v, next_v) * col2.eval_with_next(v, next_v))
|
||||
.sum::<P>()
|
||||
+ self
|
||||
.constants
|
||||
.iter()
|
||||
.map(|col| col.eval_with_next(v, next_v))
|
||||
.sum::<P>()
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_filter`:
|
||||
/// Given the column values for the current and next rows, evaluates the filter.
|
||||
pub(crate) fn eval_filter_circuit<const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
v: &[ExtensionTarget<D>],
|
||||
next_v: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let prods = self
|
||||
.products
|
||||
.iter()
|
||||
.map(|(col1, col2)| {
|
||||
let col1_eval = col1.eval_with_next_circuit(builder, v, next_v);
|
||||
let col2_eval = col2.eval_with_next_circuit(builder, v, next_v);
|
||||
builder.mul_extension(col1_eval, col2_eval)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let consts = self
|
||||
.constants
|
||||
.iter()
|
||||
.map(|col| col.eval_with_next_circuit(builder, v, next_v))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let prods = builder.add_many_extension(prods);
|
||||
let consts = builder.add_many_extension(consts);
|
||||
builder.add_extension(prods, consts)
|
||||
}
|
||||
|
||||
/// Evaluate on a row of a table given in column-major form.
|
||||
pub(crate) fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
|
||||
self.products
|
||||
.iter()
|
||||
.map(|(col1, col2)| col1.eval_table(table, row) * col2.eval_table(table, row))
|
||||
.sum::<F>()
|
||||
+ self
|
||||
.constants
|
||||
.iter()
|
||||
.map(|col| col.eval_table(table, row))
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues<F>]) -> Vec<F> {
|
||||
let length = table[0].len();
|
||||
|
||||
(0..length)
|
||||
.map(|row| self.eval_table(table, row))
|
||||
.collect::<Vec<F>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// An alias for `usize`, to represent the index of a STARK table in a multi-STARK setting.
|
||||
pub(crate) type TableIdx = usize;
|
||||
|
||||
@ -578,52 +218,6 @@ impl<'a, F: Field> CtlData<'a, F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Randomness for a single instance of a permutation check protocol.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
pub(crate) struct GrandProductChallenge<T: Copy + Eq + PartialEq + Debug> {
|
||||
/// Randomness used to combine multiple columns into one.
|
||||
pub(crate) beta: T,
|
||||
/// Random offset that's added to the beta-reduced column values.
|
||||
pub(crate) gamma: T,
|
||||
}
|
||||
|
||||
impl<F: Field> GrandProductChallenge<F> {
|
||||
pub(crate) fn combine<'a, FE, P, T: IntoIterator<Item = &'a P>, const D2: usize>(
|
||||
&self,
|
||||
terms: T,
|
||||
) -> P
|
||||
where
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
T::IntoIter: DoubleEndedIterator,
|
||||
{
|
||||
reduce_with_powers(terms, FE::from_basefield(self.beta)) + FE::from_basefield(self.gamma)
|
||||
}
|
||||
}
|
||||
|
||||
impl GrandProductChallenge<Target> {
|
||||
pub(crate) fn combine_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
terms: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D> {
|
||||
let reduced = reduce_with_powers_ext_circuit(builder, terms, self.beta);
|
||||
let gamma = builder.convert_to_ext(self.gamma);
|
||||
builder.add_extension(reduced, gamma)
|
||||
}
|
||||
}
|
||||
|
||||
impl GrandProductChallenge<Target> {
|
||||
pub(crate) fn combine_base_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
terms: &[Target],
|
||||
) -> Target {
|
||||
let reduced = reduce_with_powers_circuit(builder, terms, self.beta);
|
||||
builder.add(reduced, self.gamma)
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `PermutationChallenge`, but with `num_challenges` copies to boost soundness.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct GrandProductChallengeSet<T: Copy + Eq + PartialEq + Debug> {
|
||||
@ -805,101 +399,6 @@ pub(crate) fn cross_table_lookup_data<'a, F: RichField, const D: usize, const N:
|
||||
ctl_data_per_table
|
||||
}
|
||||
|
||||
type ColumnFilter<'a, F> = (&'a [Column<F>], &'a Option<Filter<F>>);
|
||||
|
||||
/// Given a STARK's trace, and the data associated to one lookup (either CTL or range check),
|
||||
/// returns the associated helper polynomials.
|
||||
pub(crate) fn get_helper_cols<F: Field>(
|
||||
trace: &[PolynomialValues<F>],
|
||||
degree: usize,
|
||||
columns_filters: &[ColumnFilter<F>],
|
||||
challenge: GrandProductChallenge<F>,
|
||||
constraint_degree: usize,
|
||||
) -> Vec<PolynomialValues<F>> {
|
||||
let num_helper_columns = ceil_div_usize(columns_filters.len(), constraint_degree - 1);
|
||||
|
||||
let mut helper_columns = Vec::with_capacity(num_helper_columns);
|
||||
|
||||
let mut filter_index = 0;
|
||||
for mut cols_filts in &columns_filters.iter().chunks(constraint_degree - 1) {
|
||||
let (first_col, first_filter) = cols_filts.next().unwrap();
|
||||
|
||||
let mut filter_col = Vec::with_capacity(degree);
|
||||
let first_combined = (0..degree)
|
||||
.map(|d| {
|
||||
let f = if let Some(filter) = first_filter {
|
||||
let f = filter.eval_table(trace, d);
|
||||
filter_col.push(f);
|
||||
f
|
||||
} else {
|
||||
filter_col.push(F::ONE);
|
||||
F::ONE
|
||||
};
|
||||
if f.is_one() {
|
||||
let evals = first_col
|
||||
.iter()
|
||||
.map(|c| c.eval_table(trace, d))
|
||||
.collect::<Vec<F>>();
|
||||
challenge.combine(evals.iter())
|
||||
} else {
|
||||
assert_eq!(f, F::ZERO, "Non-binary filter?");
|
||||
// Dummy value. Cannot be zero since it will be batch-inverted.
|
||||
F::ONE
|
||||
}
|
||||
})
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
let mut acc = F::batch_multiplicative_inverse(&first_combined);
|
||||
for d in 0..degree {
|
||||
if filter_col[d].is_zero() {
|
||||
acc[d] = F::ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
for (col, filt) in cols_filts {
|
||||
let mut filter_col = Vec::with_capacity(degree);
|
||||
let mut combined = (0..degree)
|
||||
.map(|d| {
|
||||
let f = if let Some(filter) = filt {
|
||||
let f = filter.eval_table(trace, d);
|
||||
filter_col.push(f);
|
||||
f
|
||||
} else {
|
||||
filter_col.push(F::ONE);
|
||||
F::ONE
|
||||
};
|
||||
if f.is_one() {
|
||||
let evals = col
|
||||
.iter()
|
||||
.map(|c| c.eval_table(trace, d))
|
||||
.collect::<Vec<F>>();
|
||||
challenge.combine(evals.iter())
|
||||
} else {
|
||||
assert_eq!(f, F::ZERO, "Non-binary filter?");
|
||||
// Dummy value. Cannot be zero since it will be batch-inverted.
|
||||
F::ONE
|
||||
}
|
||||
})
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
combined = F::batch_multiplicative_inverse(&combined);
|
||||
|
||||
for d in 0..degree {
|
||||
if filter_col[d].is_zero() {
|
||||
combined[d] = F::ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
batch_add_inplace(&mut acc, &combined);
|
||||
}
|
||||
|
||||
helper_columns.push(acc.into());
|
||||
}
|
||||
assert_eq!(helper_columns.len(), num_helper_columns);
|
||||
|
||||
helper_columns
|
||||
}
|
||||
|
||||
/// Computes helper columns and Z polynomials for all looking tables
|
||||
/// of one cross-table lookup (i.e. for one looked table).
|
||||
fn ctl_helper_zs_cols<F: Field, const N: usize>(
|
||||
@ -1115,61 +614,6 @@ impl<'a, F: RichField + Extendable<D>, const D: usize>
|
||||
}
|
||||
}
|
||||
|
||||
/// Given data associated to a lookup (either a CTL or a range-check), check the associated helper polynomials.
|
||||
pub(crate) fn eval_helper_columns<F, FE, P, const D: usize, const D2: usize>(
|
||||
filter: &[Option<Filter<F>>],
|
||||
columns: &[Vec<P>],
|
||||
local_values: &[P],
|
||||
next_values: &[P],
|
||||
helper_columns: &[P],
|
||||
constraint_degree: usize,
|
||||
challenges: &GrandProductChallenge<F>,
|
||||
consumer: &mut ConstraintConsumer<P>,
|
||||
) where
|
||||
F: RichField + Extendable<D>,
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
if !helper_columns.is_empty() {
|
||||
for (j, chunk) in columns.chunks(constraint_degree - 1).enumerate() {
|
||||
let fs =
|
||||
&filter[(constraint_degree - 1) * j..(constraint_degree - 1) * j + chunk.len()];
|
||||
let h = helper_columns[j];
|
||||
|
||||
match chunk.len() {
|
||||
2 => {
|
||||
let combin0 = challenges.combine(&chunk[0]);
|
||||
let combin1 = challenges.combine(chunk[1].iter());
|
||||
|
||||
let f0 = if let Some(filter0) = &fs[0] {
|
||||
filter0.eval_filter(local_values, next_values)
|
||||
} else {
|
||||
P::ONES
|
||||
};
|
||||
let f1 = if let Some(filter1) = &fs[1] {
|
||||
filter1.eval_filter(local_values, next_values)
|
||||
} else {
|
||||
P::ONES
|
||||
};
|
||||
|
||||
consumer.constraint(combin1 * combin0 * h - f0 * combin1 - f1 * combin0);
|
||||
}
|
||||
1 => {
|
||||
let combin = challenges.combine(&chunk[0]);
|
||||
let f0 = if let Some(filter1) = &fs[0] {
|
||||
filter1.eval_filter(local_values, next_values)
|
||||
} else {
|
||||
P::ONES
|
||||
};
|
||||
consumer.constraint(combin * h - f0);
|
||||
}
|
||||
|
||||
_ => todo!("Allow other constraint degrees"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks the cross-table lookup Z polynomials for each table:
|
||||
/// - Checks that the CTL `Z` partial sums are correctly updated.
|
||||
/// - Checks that the final value of the CTL sum is the combination of all STARKs' CTL polynomials.
|
||||
@ -1378,66 +822,6 @@ impl<'a, F: Field, const D: usize> CtlCheckVarsTarget<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_helper_columns`.
|
||||
/// Given data associated to a lookup (either a CTL or a range-check), check the associated helper polynomials.
|
||||
pub(crate) fn eval_helper_columns_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
filter: &[Option<Filter<F>>],
|
||||
columns: &[Vec<ExtensionTarget<D>>],
|
||||
local_values: &[ExtensionTarget<D>],
|
||||
next_values: &[ExtensionTarget<D>],
|
||||
helper_columns: &[ExtensionTarget<D>],
|
||||
constraint_degree: usize,
|
||||
challenges: &GrandProductChallenge<Target>,
|
||||
consumer: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
if !helper_columns.is_empty() {
|
||||
for (j, chunk) in columns.chunks(constraint_degree - 1).enumerate() {
|
||||
let fs =
|
||||
&filter[(constraint_degree - 1) * j..(constraint_degree - 1) * j + chunk.len()];
|
||||
let h = helper_columns[j];
|
||||
|
||||
let one = builder.one_extension();
|
||||
match chunk.len() {
|
||||
2 => {
|
||||
let combin0 = challenges.combine_circuit(builder, &chunk[0]);
|
||||
let combin1 = challenges.combine_circuit(builder, &chunk[1]);
|
||||
|
||||
let f0 = if let Some(filter0) = &fs[0] {
|
||||
filter0.eval_filter_circuit(builder, local_values, next_values)
|
||||
} else {
|
||||
one
|
||||
};
|
||||
let f1 = if let Some(filter1) = &fs[1] {
|
||||
filter1.eval_filter_circuit(builder, local_values, next_values)
|
||||
} else {
|
||||
one
|
||||
};
|
||||
|
||||
let constr = builder.mul_sub_extension(combin0, h, f0);
|
||||
let constr = builder.mul_extension(constr, combin1);
|
||||
let f1_constr = builder.mul_extension(f1, combin0);
|
||||
let constr = builder.sub_extension(constr, f1_constr);
|
||||
|
||||
consumer.constraint(builder, constr);
|
||||
}
|
||||
1 => {
|
||||
let combin = challenges.combine_circuit(builder, &chunk[0]);
|
||||
let f0 = if let Some(filter1) = &fs[0] {
|
||||
filter1.eval_filter_circuit(builder, local_values, next_values)
|
||||
} else {
|
||||
one
|
||||
};
|
||||
let constr = builder.mul_sub_extension(combin, h, f0);
|
||||
consumer.constraint(builder, constr);
|
||||
}
|
||||
|
||||
_ => todo!("Allow other constraint degrees"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_cross_table_lookup_checks`. Checks the cross-table lookup Z polynomials for each table:
|
||||
/// - Checks that the CTL `Z` partial sums are correctly updated.
|
||||
/// - Checks that the final value of the CTL sum is the combination of all STARKs' CTL polynomials.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use plonky2::field::types::Field;
|
||||
|
||||
use crate::cross_table_lookup::Column;
|
||||
use crate::keccak::keccak_stark::{NUM_INPUTS, NUM_ROUNDS};
|
||||
use crate::lookup::Column;
|
||||
|
||||
/// A register which is set to 1 if we are in the `i`th round, otherwise 0.
|
||||
pub(crate) const fn reg_step(i: usize) -> usize {
|
||||
|
||||
@ -13,7 +13,6 @@ use plonky2::util::timing::TimingTree;
|
||||
|
||||
use super::columns::reg_input_limb;
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cross_table_lookup::{Column, Filter};
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::keccak::columns::{
|
||||
reg_a, reg_a_prime, reg_a_prime_prime, reg_a_prime_prime_0_0_bit, reg_a_prime_prime_prime,
|
||||
@ -24,6 +23,7 @@ use crate::keccak::logic::{
|
||||
andn, andn_gen, andn_gen_circuit, xor, xor3_gen, xor3_gen_circuit, xor_gen, xor_gen_circuit,
|
||||
};
|
||||
use crate::keccak::round_flags::{eval_round_flags, eval_round_flags_recursively};
|
||||
use crate::lookup::{Column, Filter};
|
||||
use crate::stark::Stark;
|
||||
use crate::util::trace_rows_to_poly_values;
|
||||
|
||||
@ -630,9 +630,8 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::config::StarkConfig;
|
||||
use crate::cross_table_lookup::{
|
||||
CtlData, CtlZData, GrandProductChallenge, GrandProductChallengeSet,
|
||||
};
|
||||
use crate::cross_table_lookup::{CtlData, CtlZData, GrandProductChallengeSet};
|
||||
use crate::lookup::GrandProductChallenge;
|
||||
use crate::prover::prove_single_table;
|
||||
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
|
||||
|
||||
|
||||
@ -17,10 +17,9 @@ use plonky2_util::ceil_div_usize;
|
||||
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cpu::kernel::keccak_util::keccakf_u32s;
|
||||
use crate::cross_table_lookup::{Column, Filter};
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::keccak_sponge::columns::*;
|
||||
use crate::lookup::Lookup;
|
||||
use crate::lookup::{Column, Filter, Lookup};
|
||||
use crate::stark::Stark;
|
||||
use crate::witness::memory::MemoryAddress;
|
||||
|
||||
|
||||
@ -13,9 +13,9 @@ use plonky2::util::timing::TimingTree;
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cross_table_lookup::{Column, Filter};
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::logic::columns::NUM_COLUMNS;
|
||||
use crate::lookup::{Column, Filter};
|
||||
use crate::stark::Stark;
|
||||
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive, trace_rows_to_poly_values};
|
||||
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
use core::borrow::Borrow;
|
||||
use core::fmt::Debug;
|
||||
use core::iter::repeat;
|
||||
|
||||
use itertools::Itertools;
|
||||
use num_bigint::BigUint;
|
||||
use plonky2::field::batch_util::{batch_add_inplace, batch_multiply_inplace};
|
||||
@ -9,16 +13,381 @@ use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::ext_target::ExtensionTarget;
|
||||
use plonky2::iop::target::Target;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2::plonk::plonk_common::{
|
||||
reduce_with_powers, reduce_with_powers_circuit, reduce_with_powers_ext_circuit,
|
||||
};
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cross_table_lookup::{
|
||||
eval_helper_columns, eval_helper_columns_circuit, get_helper_cols, Column, Filter,
|
||||
GrandProductChallenge,
|
||||
};
|
||||
use crate::evaluation_frame::StarkEvaluationFrame;
|
||||
use crate::stark::Stark;
|
||||
|
||||
/// Represents a filter, which evaluates to 1 if the row must be considered and 0 if it should be ignored.
|
||||
/// It's an arbitrary degree 2 combination of columns: `products` are the degree 2 terms, and `constants` are
|
||||
/// the degree 1 terms.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Filter<F: Field> {
|
||||
products: Vec<(Column<F>, Column<F>)>,
|
||||
constants: Vec<Column<F>>,
|
||||
}
|
||||
|
||||
impl<F: Field> Filter<F> {
|
||||
pub(crate) fn new(products: Vec<(Column<F>, Column<F>)>, constants: Vec<Column<F>>) -> Self {
|
||||
Self {
|
||||
products,
|
||||
constants,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a filter made of a single column.
|
||||
pub(crate) fn new_simple(col: Column<F>) -> Self {
|
||||
Self {
|
||||
products: vec![],
|
||||
constants: vec![col],
|
||||
}
|
||||
}
|
||||
|
||||
/// Given the column values for the current and next rows, evaluates the filter.
|
||||
pub(crate) fn eval_filter<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
|
||||
where
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.products
|
||||
.iter()
|
||||
.map(|(col1, col2)| col1.eval_with_next(v, next_v) * col2.eval_with_next(v, next_v))
|
||||
.sum::<P>()
|
||||
+ self
|
||||
.constants
|
||||
.iter()
|
||||
.map(|col| col.eval_with_next(v, next_v))
|
||||
.sum::<P>()
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_filter`:
|
||||
/// Given the column values for the current and next rows, evaluates the filter.
|
||||
pub(crate) fn eval_filter_circuit<const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
v: &[ExtensionTarget<D>],
|
||||
next_v: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let prods = self
|
||||
.products
|
||||
.iter()
|
||||
.map(|(col1, col2)| {
|
||||
let col1_eval = col1.eval_with_next_circuit(builder, v, next_v);
|
||||
let col2_eval = col2.eval_with_next_circuit(builder, v, next_v);
|
||||
builder.mul_extension(col1_eval, col2_eval)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let consts = self
|
||||
.constants
|
||||
.iter()
|
||||
.map(|col| col.eval_with_next_circuit(builder, v, next_v))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let prods = builder.add_many_extension(prods);
|
||||
let consts = builder.add_many_extension(consts);
|
||||
builder.add_extension(prods, consts)
|
||||
}
|
||||
|
||||
/// Evaluate on a row of a table given in column-major form.
|
||||
pub(crate) fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
|
||||
self.products
|
||||
.iter()
|
||||
.map(|(col1, col2)| col1.eval_table(table, row) * col2.eval_table(table, row))
|
||||
.sum::<F>()
|
||||
+ self
|
||||
.constants
|
||||
.iter()
|
||||
.map(|col| col.eval_table(table, row))
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues<F>]) -> Vec<F> {
|
||||
let length = table[0].len();
|
||||
|
||||
(0..length)
|
||||
.map(|row| self.eval_table(table, row))
|
||||
.collect::<Vec<F>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represent two linear combination of columns, corresponding to the current and next row values.
|
||||
/// Each linear combination is represented as:
|
||||
/// - a vector of `(usize, F)` corresponding to the column number and the associated multiplicand
|
||||
/// - the constant of the linear combination.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Column<F: Field> {
|
||||
linear_combination: Vec<(usize, F)>,
|
||||
next_row_linear_combination: Vec<(usize, F)>,
|
||||
constant: F,
|
||||
}
|
||||
|
||||
impl<F: Field> Column<F> {
|
||||
/// Returns the representation of a single column in the current row.
|
||||
pub(crate) fn single(c: usize) -> Self {
|
||||
Self {
|
||||
linear_combination: vec![(c, F::ONE)],
|
||||
next_row_linear_combination: vec![],
|
||||
constant: F::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns multiple single columns in the current row.
|
||||
pub(crate) fn singles<I: IntoIterator<Item = impl Borrow<usize>>>(
|
||||
cs: I,
|
||||
) -> impl Iterator<Item = Self> {
|
||||
cs.into_iter().map(|c| Self::single(*c.borrow()))
|
||||
}
|
||||
|
||||
/// Returns the representation of a single column in the next row.
|
||||
pub(crate) fn single_next_row(c: usize) -> Self {
|
||||
Self {
|
||||
linear_combination: vec![],
|
||||
next_row_linear_combination: vec![(c, F::ONE)],
|
||||
constant: F::ZERO,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns multiple single columns for the next row.
|
||||
pub(crate) fn singles_next_row<I: IntoIterator<Item = impl Borrow<usize>>>(
|
||||
cs: I,
|
||||
) -> impl Iterator<Item = Self> {
|
||||
cs.into_iter().map(|c| Self::single_next_row(*c.borrow()))
|
||||
}
|
||||
|
||||
/// Returns a linear combination corresponding to a constant.
|
||||
pub(crate) fn constant(constant: F) -> Self {
|
||||
Self {
|
||||
linear_combination: vec![],
|
||||
next_row_linear_combination: vec![],
|
||||
constant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a linear combination corresponding to 0.
|
||||
pub(crate) fn zero() -> Self {
|
||||
Self::constant(F::ZERO)
|
||||
}
|
||||
|
||||
/// Returns a linear combination corresponding to 1.
|
||||
pub(crate) fn one() -> Self {
|
||||
Self::constant(F::ONE)
|
||||
}
|
||||
|
||||
/// Given an iterator of `(usize, F)` and a constant, returns the association linear combination of columns for the current row.
|
||||
pub(crate) fn linear_combination_with_constant<I: IntoIterator<Item = (usize, F)>>(
|
||||
iter: I,
|
||||
constant: F,
|
||||
) -> Self {
|
||||
let v = iter.into_iter().collect::<Vec<_>>();
|
||||
assert!(!v.is_empty());
|
||||
debug_assert_eq!(
|
||||
v.iter().map(|(c, _)| c).unique().count(),
|
||||
v.len(),
|
||||
"Duplicate columns."
|
||||
);
|
||||
Self {
|
||||
linear_combination: v,
|
||||
next_row_linear_combination: vec![],
|
||||
constant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Given an iterator of `(usize, F)` and a constant, returns the associated linear combination of columns for the current and the next rows.
|
||||
pub(crate) fn linear_combination_and_next_row_with_constant<
|
||||
I: IntoIterator<Item = (usize, F)>,
|
||||
>(
|
||||
iter: I,
|
||||
next_row_iter: I,
|
||||
constant: F,
|
||||
) -> Self {
|
||||
let v = iter.into_iter().collect::<Vec<_>>();
|
||||
let next_row_v = next_row_iter.into_iter().collect::<Vec<_>>();
|
||||
|
||||
assert!(!v.is_empty() || !next_row_v.is_empty());
|
||||
debug_assert_eq!(
|
||||
v.iter().map(|(c, _)| c).unique().count(),
|
||||
v.len(),
|
||||
"Duplicate columns."
|
||||
);
|
||||
debug_assert_eq!(
|
||||
next_row_v.iter().map(|(c, _)| c).unique().count(),
|
||||
next_row_v.len(),
|
||||
"Duplicate columns."
|
||||
);
|
||||
|
||||
Self {
|
||||
linear_combination: v,
|
||||
next_row_linear_combination: next_row_v,
|
||||
constant,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a linear combination of columns, with no additional constant.
|
||||
pub(crate) fn linear_combination<I: IntoIterator<Item = (usize, F)>>(iter: I) -> Self {
|
||||
Self::linear_combination_with_constant(iter, F::ZERO)
|
||||
}
|
||||
|
||||
/// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order:
|
||||
/// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n.
|
||||
pub(crate) fn le_bits<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
||||
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()))
|
||||
}
|
||||
|
||||
/// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order:
|
||||
/// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n + k where `k` is an
|
||||
/// additional constant.
|
||||
pub(crate) fn le_bits_with_constant<I: IntoIterator<Item = impl Borrow<usize>>>(
|
||||
cs: I,
|
||||
constant: F,
|
||||
) -> Self {
|
||||
Self::linear_combination_with_constant(
|
||||
cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()),
|
||||
constant,
|
||||
)
|
||||
}
|
||||
|
||||
/// Given an iterator of columns (c_0, ..., c_n) containing bytes in little endian order:
|
||||
/// returns the representation of c_0 + 256 * c_1 + ... + 256^n * c_n.
|
||||
pub(crate) fn le_bytes<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
||||
Self::linear_combination(
|
||||
cs.into_iter()
|
||||
.map(|c| *c.borrow())
|
||||
.zip(F::from_canonical_u16(256).powers()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Given an iterator of columns, returns the representation of their sum.
|
||||
pub(crate) fn sum<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
||||
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(repeat(F::ONE)))
|
||||
}
|
||||
|
||||
/// Given the column values for the current row, returns the evaluation of the linear combination.
|
||||
pub(crate) fn eval<FE, P, const D: usize>(&self, v: &[P]) -> P
|
||||
where
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| v[c] * FE::from_basefield(f))
|
||||
.sum::<P>()
|
||||
+ FE::from_basefield(self.constant)
|
||||
}
|
||||
|
||||
/// Given the column values for the current and next rows, evaluates the current and next linear combinations and returns their sum.
|
||||
pub(crate) fn eval_with_next<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
|
||||
where
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| v[c] * FE::from_basefield(f))
|
||||
.sum::<P>()
|
||||
+ self
|
||||
.next_row_linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| next_v[c] * FE::from_basefield(f))
|
||||
.sum::<P>()
|
||||
+ FE::from_basefield(self.constant)
|
||||
}
|
||||
|
||||
/// Evaluate on a row of a table given in column-major form.
|
||||
pub(crate) fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
|
||||
let mut res = self
|
||||
.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| table[c].values[row] * f)
|
||||
.sum::<F>()
|
||||
+ self.constant;
|
||||
|
||||
// If we access the next row at the last row, for sanity, we consider the next row's values to be 0.
|
||||
// If the lookups are correctly written, the filter should be 0 in that case anyway.
|
||||
if !self.next_row_linear_combination.is_empty() && row < table[0].values.len() - 1 {
|
||||
res += self
|
||||
.next_row_linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| table[c].values[row + 1] * f)
|
||||
.sum::<F>();
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Evaluates the column on all rows.
|
||||
pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues<F>]) -> Vec<F> {
|
||||
let length = table[0].len();
|
||||
(0..length)
|
||||
.map(|row| self.eval_table(table, row))
|
||||
.collect::<Vec<F>>()
|
||||
}
|
||||
|
||||
/// Circuit version of `eval`: Given a row's targets, returns their linear combination.
|
||||
pub(crate) fn eval_circuit<const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
v: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let pairs = self
|
||||
.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| {
|
||||
(
|
||||
v[c],
|
||||
builder.constant_extension(F::Extension::from_basefield(f)),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
|
||||
builder.inner_product_extension(F::ONE, constant, pairs)
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_with_next`:
|
||||
/// Given the targets of the current and next row, returns the sum of their linear combinations.
|
||||
pub(crate) fn eval_with_next_circuit<const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
v: &[ExtensionTarget<D>],
|
||||
next_v: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let mut pairs = self
|
||||
.linear_combination
|
||||
.iter()
|
||||
.map(|&(c, f)| {
|
||||
(
|
||||
v[c],
|
||||
builder.constant_extension(F::Extension::from_basefield(f)),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let next_row_pairs = self.next_row_linear_combination.iter().map(|&(c, f)| {
|
||||
(
|
||||
next_v[c],
|
||||
builder.constant_extension(F::Extension::from_basefield(f)),
|
||||
)
|
||||
});
|
||||
pairs.extend(next_row_pairs);
|
||||
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
|
||||
builder.inner_product_extension(F::ONE, constant, pairs)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type ColumnFilter<'a, F> = (&'a [Column<F>], &'a Option<Filter<F>>);
|
||||
|
||||
pub struct Lookup<F: Field> {
|
||||
/// Columns whose values should be contained in the lookup table.
|
||||
/// These are the f_i(x) polynomials in the logUp paper.
|
||||
@ -43,6 +412,52 @@ impl<F: Field> Lookup<F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Randomness for a single instance of a permutation check protocol.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
pub(crate) struct GrandProductChallenge<T: Copy + Eq + PartialEq + Debug> {
|
||||
/// Randomness used to combine multiple columns into one.
|
||||
pub(crate) beta: T,
|
||||
/// Random offset that's added to the beta-reduced column values.
|
||||
pub(crate) gamma: T,
|
||||
}
|
||||
|
||||
impl<F: Field> GrandProductChallenge<F> {
|
||||
pub(crate) fn combine<'a, FE, P, T: IntoIterator<Item = &'a P>, const D2: usize>(
|
||||
&self,
|
||||
terms: T,
|
||||
) -> P
|
||||
where
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
T::IntoIter: DoubleEndedIterator,
|
||||
{
|
||||
reduce_with_powers(terms, FE::from_basefield(self.beta)) + FE::from_basefield(self.gamma)
|
||||
}
|
||||
}
|
||||
|
||||
impl GrandProductChallenge<Target> {
|
||||
pub(crate) fn combine_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
terms: &[ExtensionTarget<D>],
|
||||
) -> ExtensionTarget<D> {
|
||||
let reduced = reduce_with_powers_ext_circuit(builder, terms, self.beta);
|
||||
let gamma = builder.convert_to_ext(self.gamma);
|
||||
builder.add_extension(reduced, gamma)
|
||||
}
|
||||
}
|
||||
|
||||
impl GrandProductChallenge<Target> {
|
||||
pub(crate) fn combine_base_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
terms: &[Target],
|
||||
) -> Target {
|
||||
let reduced = reduce_with_powers_circuit(builder, terms, self.beta);
|
||||
builder.add(reduced, self.gamma)
|
||||
}
|
||||
}
|
||||
|
||||
/// logUp protocol from <https://ia.cr/2022/1530>
|
||||
/// Compute the helper columns for the lookup argument.
|
||||
/// Given columns `f0,...,fk` and a column `t`, such that `∪fi ⊆ t`, and challenges `x`,
|
||||
@ -129,6 +544,214 @@ pub(crate) fn lookup_helper_columns<F: Field>(
|
||||
helper_columns
|
||||
}
|
||||
|
||||
/// Given data associated to a lookup, check the associated helper polynomials.
|
||||
pub(crate) fn eval_helper_columns<F, FE, P, const D: usize, const D2: usize>(
|
||||
filter: &[Option<Filter<F>>],
|
||||
columns: &[Vec<P>],
|
||||
local_values: &[P],
|
||||
next_values: &[P],
|
||||
helper_columns: &[P],
|
||||
constraint_degree: usize,
|
||||
challenges: &GrandProductChallenge<F>,
|
||||
consumer: &mut ConstraintConsumer<P>,
|
||||
) where
|
||||
F: RichField + Extendable<D>,
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
if !helper_columns.is_empty() {
|
||||
for (j, chunk) in columns.chunks(constraint_degree - 1).enumerate() {
|
||||
let fs =
|
||||
&filter[(constraint_degree - 1) * j..(constraint_degree - 1) * j + chunk.len()];
|
||||
let h = helper_columns[j];
|
||||
|
||||
match chunk.len() {
|
||||
2 => {
|
||||
let combin0 = challenges.combine(&chunk[0]);
|
||||
let combin1 = challenges.combine(chunk[1].iter());
|
||||
|
||||
let f0 = if let Some(filter0) = &fs[0] {
|
||||
filter0.eval_filter(local_values, next_values)
|
||||
} else {
|
||||
P::ONES
|
||||
};
|
||||
let f1 = if let Some(filter1) = &fs[1] {
|
||||
filter1.eval_filter(local_values, next_values)
|
||||
} else {
|
||||
P::ONES
|
||||
};
|
||||
|
||||
consumer.constraint(combin1 * combin0 * h - f0 * combin1 - f1 * combin0);
|
||||
}
|
||||
1 => {
|
||||
let combin = challenges.combine(&chunk[0]);
|
||||
let f0 = if let Some(filter1) = &fs[0] {
|
||||
filter1.eval_filter(local_values, next_values)
|
||||
} else {
|
||||
P::ONES
|
||||
};
|
||||
consumer.constraint(combin * h - f0);
|
||||
}
|
||||
|
||||
_ => todo!("Allow other constraint degrees"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Circuit version of `eval_helper_columns`.
|
||||
/// Given data associated to a lookup (either a CTL or a range-check), check the associated helper polynomials.
|
||||
pub(crate) fn eval_helper_columns_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
filter: &[Option<Filter<F>>],
|
||||
columns: &[Vec<ExtensionTarget<D>>],
|
||||
local_values: &[ExtensionTarget<D>],
|
||||
next_values: &[ExtensionTarget<D>],
|
||||
helper_columns: &[ExtensionTarget<D>],
|
||||
constraint_degree: usize,
|
||||
challenges: &GrandProductChallenge<Target>,
|
||||
consumer: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
if !helper_columns.is_empty() {
|
||||
for (j, chunk) in columns.chunks(constraint_degree - 1).enumerate() {
|
||||
let fs =
|
||||
&filter[(constraint_degree - 1) * j..(constraint_degree - 1) * j + chunk.len()];
|
||||
let h = helper_columns[j];
|
||||
|
||||
let one = builder.one_extension();
|
||||
match chunk.len() {
|
||||
2 => {
|
||||
let combin0 = challenges.combine_circuit(builder, &chunk[0]);
|
||||
let combin1 = challenges.combine_circuit(builder, &chunk[1]);
|
||||
|
||||
let f0 = if let Some(filter0) = &fs[0] {
|
||||
filter0.eval_filter_circuit(builder, local_values, next_values)
|
||||
} else {
|
||||
one
|
||||
};
|
||||
let f1 = if let Some(filter1) = &fs[1] {
|
||||
filter1.eval_filter_circuit(builder, local_values, next_values)
|
||||
} else {
|
||||
one
|
||||
};
|
||||
|
||||
let constr = builder.mul_sub_extension(combin0, h, f0);
|
||||
let constr = builder.mul_extension(constr, combin1);
|
||||
let f1_constr = builder.mul_extension(f1, combin0);
|
||||
let constr = builder.sub_extension(constr, f1_constr);
|
||||
|
||||
consumer.constraint(builder, constr);
|
||||
}
|
||||
1 => {
|
||||
let combin = challenges.combine_circuit(builder, &chunk[0]);
|
||||
let f0 = if let Some(filter1) = &fs[0] {
|
||||
filter1.eval_filter_circuit(builder, local_values, next_values)
|
||||
} else {
|
||||
one
|
||||
};
|
||||
let constr = builder.mul_sub_extension(combin, h, f0);
|
||||
consumer.constraint(builder, constr);
|
||||
}
|
||||
|
||||
_ => todo!("Allow other constraint degrees"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a STARK's trace, and the data associated to one lookup (either CTL or range check),
|
||||
/// returns the associated helper polynomials.
|
||||
pub(crate) fn get_helper_cols<F: Field>(
|
||||
trace: &[PolynomialValues<F>],
|
||||
degree: usize,
|
||||
columns_filters: &[ColumnFilter<F>],
|
||||
challenge: GrandProductChallenge<F>,
|
||||
constraint_degree: usize,
|
||||
) -> Vec<PolynomialValues<F>> {
|
||||
let num_helper_columns = ceil_div_usize(columns_filters.len(), constraint_degree - 1);
|
||||
|
||||
let mut helper_columns = Vec::with_capacity(num_helper_columns);
|
||||
|
||||
let mut filter_index = 0;
|
||||
for mut cols_filts in &columns_filters.iter().chunks(constraint_degree - 1) {
|
||||
let (first_col, first_filter) = cols_filts.next().unwrap();
|
||||
|
||||
let mut filter_col = Vec::with_capacity(degree);
|
||||
let first_combined = (0..degree)
|
||||
.map(|d| {
|
||||
let f = if let Some(filter) = first_filter {
|
||||
let f = filter.eval_table(trace, d);
|
||||
filter_col.push(f);
|
||||
f
|
||||
} else {
|
||||
filter_col.push(F::ONE);
|
||||
F::ONE
|
||||
};
|
||||
if f.is_one() {
|
||||
let evals = first_col
|
||||
.iter()
|
||||
.map(|c| c.eval_table(trace, d))
|
||||
.collect::<Vec<F>>();
|
||||
challenge.combine(evals.iter())
|
||||
} else {
|
||||
assert_eq!(f, F::ZERO, "Non-binary filter?");
|
||||
// Dummy value. Cannot be zero since it will be batch-inverted.
|
||||
F::ONE
|
||||
}
|
||||
})
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
let mut acc = F::batch_multiplicative_inverse(&first_combined);
|
||||
for d in 0..degree {
|
||||
if filter_col[d].is_zero() {
|
||||
acc[d] = F::ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
for (col, filt) in cols_filts {
|
||||
let mut filter_col = Vec::with_capacity(degree);
|
||||
let mut combined = (0..degree)
|
||||
.map(|d| {
|
||||
let f = if let Some(filter) = filt {
|
||||
let f = filter.eval_table(trace, d);
|
||||
filter_col.push(f);
|
||||
f
|
||||
} else {
|
||||
filter_col.push(F::ONE);
|
||||
F::ONE
|
||||
};
|
||||
if f.is_one() {
|
||||
let evals = col
|
||||
.iter()
|
||||
.map(|c| c.eval_table(trace, d))
|
||||
.collect::<Vec<F>>();
|
||||
challenge.combine(evals.iter())
|
||||
} else {
|
||||
assert_eq!(f, F::ZERO, "Non-binary filter?");
|
||||
// Dummy value. Cannot be zero since it will be batch-inverted.
|
||||
F::ONE
|
||||
}
|
||||
})
|
||||
.collect::<Vec<F>>();
|
||||
|
||||
combined = F::batch_multiplicative_inverse(&combined);
|
||||
|
||||
for d in 0..degree {
|
||||
if filter_col[d].is_zero() {
|
||||
combined[d] = F::ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
batch_add_inplace(&mut acc, &combined);
|
||||
}
|
||||
|
||||
helper_columns.push(acc.into());
|
||||
}
|
||||
assert_eq!(helper_columns.len(), num_helper_columns);
|
||||
|
||||
helper_columns
|
||||
}
|
||||
|
||||
pub(crate) struct LookupCheckVars<F, FE, P, const D2: usize>
|
||||
where
|
||||
F: Field,
|
||||
|
||||
@ -15,9 +15,8 @@ use plonky2_maybe_rayon::*;
|
||||
|
||||
use super::segments::Segment;
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cross_table_lookup::{Column, Filter};
|
||||
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
|
||||
use crate::lookup::Lookup;
|
||||
use crate::lookup::{Column, Filter, Lookup};
|
||||
use crate::memory::columns::{
|
||||
value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE, COUNTER, FILTER,
|
||||
FREQUENCIES, INITIALIZE_AUX, IS_READ, NUM_COLUMNS, RANGE_CHECK, SEGMENT_FIRST_CHANGE,
|
||||
|
||||
@ -31,11 +31,9 @@ use crate::config::StarkConfig;
|
||||
use crate::constraint_consumer::RecursiveConstraintConsumer;
|
||||
use crate::cpu::kernel::aggregator::KERNEL;
|
||||
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
|
||||
use crate::cross_table_lookup::{
|
||||
CrossTableLookup, CtlCheckVarsTarget, GrandProductChallenge, GrandProductChallengeSet,
|
||||
};
|
||||
use crate::cross_table_lookup::{CrossTableLookup, CtlCheckVarsTarget, GrandProductChallengeSet};
|
||||
use crate::evaluation_frame::StarkEvaluationFrame;
|
||||
use crate::lookup::LookupCheckVarsTarget;
|
||||
use crate::lookup::{GrandProductChallenge, LookupCheckVarsTarget};
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::memory::VALUE_LIMBS;
|
||||
use crate::proof::{
|
||||
|
||||
@ -17,10 +17,10 @@ use crate::cpu::kernel::aggregator::KERNEL;
|
||||
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
|
||||
use crate::cross_table_lookup::{
|
||||
num_ctl_helper_columns_by_table, verify_cross_table_lookups, CtlCheckVars,
|
||||
GrandProductChallenge, GrandProductChallengeSet,
|
||||
GrandProductChallengeSet,
|
||||
};
|
||||
use crate::evaluation_frame::StarkEvaluationFrame;
|
||||
use crate::lookup::LookupCheckVars;
|
||||
use crate::lookup::{GrandProductChallenge, LookupCheckVars};
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::memory::VALUE_LIMBS;
|
||||
use crate::proof::{
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user