2022-08-29 18:10:51 -07:00
|
|
|
use std::borrow::Borrow;
|
2022-08-23 12:22:54 -07:00
|
|
|
use std::iter::repeat;
|
|
|
|
|
|
2022-05-11 16:09:12 +02:00
|
|
|
use anyhow::{ensure, Result};
|
2022-06-13 18:54:12 +02:00
|
|
|
use itertools::Itertools;
|
2022-06-27 07:18:21 -07:00
|
|
|
use plonky2::field::extension::{Extendable, FieldExtension};
|
2022-06-27 15:07:52 -07:00
|
|
|
use plonky2::field::packed::PackedField;
|
2022-05-05 22:21:09 +02:00
|
|
|
use plonky2::field::polynomial::PolynomialValues;
|
2022-06-27 12:24:09 -07:00
|
|
|
use plonky2::field::types::Field;
|
2022-05-05 22:21:09 +02:00
|
|
|
use plonky2::hash::hash_types::RichField;
|
2022-11-22 08:48:48 -05:00
|
|
|
use plonky2::hash::hashing::HashConfig;
|
2022-05-20 11:21:13 +02:00
|
|
|
use plonky2::iop::ext_target::ExtensionTarget;
|
2022-05-24 16:24:52 +02:00
|
|
|
use plonky2::iop::target::Target;
|
|
|
|
|
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
2022-05-05 22:21:09 +02:00
|
|
|
use plonky2::plonk::config::GenericConfig;
|
|
|
|
|
|
2022-08-26 10:12:45 +02:00
|
|
|
use crate::all_stark::{Table, NUM_TABLES};
|
2022-05-05 22:21:09 +02:00
|
|
|
use crate::config::StarkConfig;
|
2022-05-24 16:24:52 +02:00
|
|
|
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use crate::permutation::{GrandProductChallenge, GrandProductChallengeSet};
|
|
|
|
|
use crate::proof::{StarkProofTarget, StarkProofWithMetadata};
|
2022-05-06 14:55:54 +02:00
|
|
|
use crate::stark::Stark;
|
2022-05-24 16:24:52 +02:00
|
|
|
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
|
2022-05-05 22:21:09 +02:00
|
|
|
|
2022-06-14 19:09:03 +02:00
|
|
|
/// Represent a linear combination of columns.
|
2022-08-14 16:36:07 -07:00
|
|
|
#[derive(Clone, Debug)]
|
2022-06-14 19:09:03 +02:00
|
|
|
pub struct Column<F: Field> {
|
2022-08-23 12:22:54 -07:00
|
|
|
linear_combination: Vec<(usize, F)>,
|
2022-06-14 19:09:03 +02:00
|
|
|
constant: F,
|
2022-06-06 20:51:14 +02:00
|
|
|
}
|
|
|
|
|
|
2022-06-14 00:53:31 +02:00
|
|
|
impl<F: Field> Column<F> {
|
2022-08-23 12:22:54 -07:00
|
|
|
pub fn single(c: usize) -> Self {
|
2022-06-14 19:09:03 +02:00
|
|
|
Self {
|
2022-08-23 12:22:54 -07:00
|
|
|
linear_combination: vec![(c, F::ONE)],
|
2022-06-14 19:09:03 +02:00
|
|
|
constant: F::ZERO,
|
|
|
|
|
}
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-29 18:10:51 -07:00
|
|
|
pub fn singles<I: IntoIterator<Item = impl Borrow<usize>>>(
|
|
|
|
|
cs: I,
|
|
|
|
|
) -> impl Iterator<Item = Self> {
|
|
|
|
|
cs.into_iter().map(|c| Self::single(*c.borrow()))
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-14 16:36:07 -07:00
|
|
|
pub fn constant(constant: F) -> Self {
|
|
|
|
|
Self {
|
|
|
|
|
linear_combination: vec![],
|
|
|
|
|
constant,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn zero() -> Self {
|
|
|
|
|
Self::constant(F::ZERO)
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-04 16:53:04 -07:00
|
|
|
pub fn one() -> Self {
|
|
|
|
|
Self::constant(F::ONE)
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-23 12:22:54 -07:00
|
|
|
pub fn linear_combination_with_constant<I: IntoIterator<Item = (usize, F)>>(
|
2022-06-14 19:09:03 +02:00
|
|
|
iter: I,
|
|
|
|
|
constant: F,
|
|
|
|
|
) -> Self {
|
2022-06-14 00:53:31 +02:00
|
|
|
let v = iter.into_iter().collect::<Vec<_>>();
|
|
|
|
|
assert!(!v.is_empty());
|
2022-06-07 17:23:46 +02:00
|
|
|
debug_assert_eq!(
|
2022-08-23 12:22:54 -07:00
|
|
|
v.iter().map(|(c, _)| c).unique().count(),
|
2022-06-14 00:53:31 +02:00
|
|
|
v.len(),
|
2022-06-14 16:30:34 +02:00
|
|
|
"Duplicate columns."
|
2022-06-07 17:23:46 +02:00
|
|
|
);
|
2022-06-14 19:09:03 +02:00
|
|
|
Self {
|
|
|
|
|
linear_combination: v,
|
|
|
|
|
constant,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-23 12:22:54 -07:00
|
|
|
pub fn linear_combination<I: IntoIterator<Item = (usize, F)>>(iter: I) -> Self {
|
2022-06-14 19:09:03 +02:00
|
|
|
Self::linear_combination_with_constant(iter, F::ZERO)
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-29 18:10:51 -07:00
|
|
|
pub fn le_bits<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
|
|
|
|
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()))
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-29 18:10:51 -07:00
|
|
|
pub fn le_bytes<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
|
|
|
|
Self::linear_combination(
|
|
|
|
|
cs.into_iter()
|
|
|
|
|
.map(|c| *c.borrow())
|
|
|
|
|
.zip(F::from_canonical_u16(256).powers()),
|
|
|
|
|
)
|
2022-08-14 16:36:07 -07:00
|
|
|
}
|
|
|
|
|
|
2022-08-29 18:10:51 -07:00
|
|
|
pub fn sum<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
|
|
|
|
|
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(repeat(F::ONE)))
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-23 12:22:54 -07:00
|
|
|
pub fn eval<FE, P, const D: usize>(&self, v: &[P]) -> P
|
2022-06-14 00:53:31 +02:00
|
|
|
where
|
|
|
|
|
FE: FieldExtension<D, BaseField = F>,
|
|
|
|
|
P: PackedField<Scalar = FE>,
|
|
|
|
|
{
|
2022-06-14 19:09:03 +02:00
|
|
|
self.linear_combination
|
|
|
|
|
.iter()
|
2022-08-23 12:22:54 -07:00
|
|
|
.map(|&(c, f)| v[c] * FE::from_basefield(f))
|
2022-06-14 19:09:03 +02:00
|
|
|
.sum::<P>()
|
|
|
|
|
+ FE::from_basefield(self.constant)
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Evaluate on an row of a table given in column-major form.
|
2022-08-23 12:22:54 -07:00
|
|
|
pub fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
|
2022-06-14 19:09:03 +02:00
|
|
|
self.linear_combination
|
|
|
|
|
.iter()
|
2022-08-23 12:22:54 -07:00
|
|
|
.map(|&(c, f)| table[c].values[row] * f)
|
2022-06-14 19:09:03 +02:00
|
|
|
.sum::<F>()
|
|
|
|
|
+ self.constant
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn eval_circuit<const D: usize>(
|
|
|
|
|
&self,
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-08-23 12:22:54 -07:00
|
|
|
v: &[ExtensionTarget<D>],
|
2022-06-14 00:53:31 +02:00
|
|
|
) -> ExtensionTarget<D>
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
{
|
2022-06-14 19:09:03 +02:00
|
|
|
let pairs = self
|
|
|
|
|
.linear_combination
|
|
|
|
|
.iter()
|
2022-08-23 12:22:54 -07:00
|
|
|
.map(|&(c, f)| {
|
2022-06-14 19:09:03 +02:00
|
|
|
(
|
2022-08-23 12:22:54 -07:00
|
|
|
v[c],
|
|
|
|
|
builder.constant_extension(F::Extension::from_basefield(f)),
|
2022-06-14 19:09:03 +02:00
|
|
|
)
|
|
|
|
|
})
|
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
|
|
|
|
|
builder.inner_product_extension(F::ONE, constant, pairs)
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-14 16:36:07 -07:00
|
|
|
#[derive(Clone, Debug)]
|
2022-06-14 00:53:31 +02:00
|
|
|
pub struct TableWithColumns<F: Field> {
|
|
|
|
|
table: Table,
|
|
|
|
|
columns: Vec<Column<F>>,
|
2022-12-02 13:56:52 -08:00
|
|
|
pub(crate) filter_column: Option<Column<F>>,
|
2022-06-14 00:53:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<F: Field> TableWithColumns<F> {
|
2022-06-14 19:09:03 +02:00
|
|
|
pub fn new(table: Table, columns: Vec<Column<F>>, filter_column: Option<Column<F>>) -> Self {
|
2022-06-06 20:51:14 +02:00
|
|
|
Self {
|
|
|
|
|
table,
|
|
|
|
|
columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
filter_column,
|
2022-06-06 20:51:14 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-23 12:22:54 -07:00
|
|
|
#[derive(Clone)]
|
2022-05-17 09:24:22 +02:00
|
|
|
pub struct CrossTableLookup<F: Field> {
|
2022-12-02 13:56:52 -08:00
|
|
|
pub(crate) looking_tables: Vec<TableWithColumns<F>>,
|
|
|
|
|
pub(crate) looked_table: TableWithColumns<F>,
|
2022-05-06 17:22:30 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-17 09:24:22 +02:00
|
|
|
impl<F: Field> CrossTableLookup<F> {
|
2022-05-06 17:22:30 +02:00
|
|
|
pub fn new(
|
2022-06-14 00:53:31 +02:00
|
|
|
looking_tables: Vec<TableWithColumns<F>>,
|
|
|
|
|
looked_table: TableWithColumns<F>,
|
2022-05-06 17:22:30 +02:00
|
|
|
) -> Self {
|
2022-06-06 20:51:14 +02:00
|
|
|
assert!(looking_tables
|
2022-06-01 18:53:19 +02:00
|
|
|
.iter()
|
2022-06-06 20:51:14 +02:00
|
|
|
.all(|twc| twc.columns.len() == looked_table.columns.len()));
|
2022-05-06 17:22:30 +02:00
|
|
|
Self {
|
2022-06-01 18:53:19 +02:00
|
|
|
looking_tables,
|
2022-05-06 17:22:30 +02:00
|
|
|
looked_table,
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-09-26 15:47:35 +02:00
|
|
|
|
|
|
|
|
pub(crate) fn num_ctl_zs(ctls: &[Self], table: Table, num_challenges: usize) -> usize {
|
2022-09-29 16:32:41 +02:00
|
|
|
let mut num_ctls = 0;
|
2022-09-26 15:47:35 +02:00
|
|
|
for ctl in ctls {
|
2022-09-29 16:32:41 +02:00
|
|
|
let all_tables = std::iter::once(&ctl.looked_table).chain(&ctl.looking_tables);
|
|
|
|
|
num_ctls += all_tables.filter(|twc| twc.table == table).count();
|
2022-09-26 15:47:35 +02:00
|
|
|
}
|
2022-09-29 16:32:41 +02:00
|
|
|
num_ctls * num_challenges
|
2022-09-26 15:47:35 +02:00
|
|
|
}
|
2022-05-06 17:22:30 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Cross-table lookup data for one table.
|
2022-08-23 23:20:20 -07:00
|
|
|
#[derive(Clone, Default)]
|
2022-05-13 11:20:29 +02:00
|
|
|
pub struct CtlData<F: Field> {
|
2022-08-23 23:20:20 -07:00
|
|
|
pub(crate) zs_columns: Vec<CtlZData<F>>,
|
2022-05-05 22:21:09 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-23 23:20:20 -07:00
|
|
|
/// Cross-table lookup data associated with one Z(x) polynomial.
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
|
pub(crate) struct CtlZData<F: Field> {
|
|
|
|
|
pub(crate) z: PolynomialValues<F>,
|
|
|
|
|
pub(crate) challenge: GrandProductChallenge<F>,
|
|
|
|
|
pub(crate) columns: Vec<Column<F>>,
|
|
|
|
|
pub(crate) filter_column: Option<Column<F>>,
|
|
|
|
|
}
|
2022-05-05 22:21:09 +02:00
|
|
|
|
2022-08-23 23:20:20 -07:00
|
|
|
impl<F: Field> CtlData<F> {
|
2022-05-10 15:08:08 +02:00
|
|
|
pub fn len(&self) -> usize {
|
2022-05-11 14:35:33 +02:00
|
|
|
self.zs_columns.len()
|
2022-05-10 15:08:08 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-05 22:21:09 +02:00
|
|
|
pub fn is_empty(&self) -> bool {
|
2022-05-11 14:35:33 +02:00
|
|
|
self.zs_columns.is_empty()
|
2022-05-05 22:21:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn z_polys(&self) -> Vec<PolynomialValues<F>> {
|
2022-08-23 23:20:20 -07:00
|
|
|
self.zs_columns
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|zs_columns| zs_columns.z.clone())
|
|
|
|
|
.collect()
|
2022-05-05 22:21:09 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-10 21:47:51 -08:00
|
|
|
pub(crate) fn cross_table_lookup_data<F: RichField, const D: usize>(
|
2022-08-26 10:12:45 +02:00
|
|
|
trace_poly_values: &[Vec<PolynomialValues<F>>; NUM_TABLES],
|
2022-05-17 09:24:22 +02:00
|
|
|
cross_table_lookups: &[CrossTableLookup<F>],
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
ctl_challenges: &GrandProductChallengeSet<F>,
|
2022-08-26 10:12:45 +02:00
|
|
|
) -> [CtlData<F>; NUM_TABLES] {
|
|
|
|
|
let mut ctl_data_per_table = [0; NUM_TABLES].map(|_| CtlData::default());
|
2022-05-18 09:22:58 +02:00
|
|
|
for CrossTableLookup {
|
2022-06-01 18:53:19 +02:00
|
|
|
looking_tables,
|
2022-05-18 09:22:58 +02:00
|
|
|
looked_table,
|
|
|
|
|
} in cross_table_lookups
|
|
|
|
|
{
|
2022-12-02 13:56:52 -08:00
|
|
|
log::debug!("Processing CTL for {:?}", looked_table.table);
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
for &challenge in &ctl_challenges.challenges {
|
2022-06-06 20:51:14 +02:00
|
|
|
let zs_looking = looking_tables.iter().map(|table| {
|
|
|
|
|
partial_products(
|
|
|
|
|
&trace_poly_values[table.table as usize],
|
|
|
|
|
&table.columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
&table.filter_column,
|
2022-06-06 20:51:14 +02:00
|
|
|
challenge,
|
|
|
|
|
)
|
|
|
|
|
});
|
2022-05-18 09:22:58 +02:00
|
|
|
let z_looked = partial_products(
|
2022-06-06 20:51:14 +02:00
|
|
|
&trace_poly_values[looked_table.table as usize],
|
|
|
|
|
&looked_table.columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
&looked_table.filter_column,
|
2022-05-18 09:22:58 +02:00
|
|
|
challenge,
|
|
|
|
|
);
|
2022-05-05 22:21:09 +02:00
|
|
|
|
2022-05-18 09:22:58 +02:00
|
|
|
debug_assert_eq!(
|
2022-06-01 18:53:19 +02:00
|
|
|
zs_looking
|
2022-06-01 20:21:59 +02:00
|
|
|
.clone()
|
2022-06-01 18:53:19 +02:00
|
|
|
.map(|z| *z.values.last().unwrap())
|
|
|
|
|
.product::<F>(),
|
2022-05-18 09:22:58 +02:00
|
|
|
*z_looked.values.last().unwrap()
|
|
|
|
|
);
|
2022-05-17 09:24:22 +02:00
|
|
|
|
2022-06-06 20:51:14 +02:00
|
|
|
for (table, z) in looking_tables.iter().zip(zs_looking) {
|
2022-08-23 23:20:20 -07:00
|
|
|
ctl_data_per_table[table.table as usize]
|
|
|
|
|
.zs_columns
|
|
|
|
|
.push(CtlZData {
|
|
|
|
|
z,
|
|
|
|
|
challenge,
|
|
|
|
|
columns: table.columns.clone(),
|
|
|
|
|
filter_column: table.filter_column.clone(),
|
|
|
|
|
});
|
2022-06-01 18:53:19 +02:00
|
|
|
}
|
2022-06-06 20:51:14 +02:00
|
|
|
ctl_data_per_table[looked_table.table as usize]
|
2022-05-18 09:22:58 +02:00
|
|
|
.zs_columns
|
2022-08-23 23:20:20 -07:00
|
|
|
.push(CtlZData {
|
|
|
|
|
z: z_looked,
|
|
|
|
|
challenge,
|
|
|
|
|
columns: looked_table.columns.clone(),
|
|
|
|
|
filter_column: looked_table.filter_column.clone(),
|
|
|
|
|
});
|
2022-05-18 09:22:58 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ctl_data_per_table
|
2022-05-05 22:21:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn partial_products<F: Field>(
|
|
|
|
|
trace: &[PolynomialValues<F>],
|
2022-06-14 00:53:31 +02:00
|
|
|
columns: &[Column<F>],
|
2022-06-14 19:09:03 +02:00
|
|
|
filter_column: &Option<Column<F>>,
|
2022-05-18 09:22:58 +02:00
|
|
|
challenge: GrandProductChallenge<F>,
|
2022-05-05 22:21:09 +02:00
|
|
|
) -> PolynomialValues<F> {
|
|
|
|
|
let mut partial_prod = F::ONE;
|
2022-05-12 20:38:11 +02:00
|
|
|
let degree = trace[0].len();
|
|
|
|
|
let mut res = Vec::with_capacity(degree);
|
2022-08-23 12:22:54 -07:00
|
|
|
for i in 0..degree {
|
2022-06-14 19:09:03 +02:00
|
|
|
let filter = if let Some(column) = filter_column {
|
2022-08-23 12:22:54 -07:00
|
|
|
column.eval_table(trace, i)
|
2022-06-07 18:08:12 +02:00
|
|
|
} else {
|
2022-06-14 19:09:03 +02:00
|
|
|
F::ONE
|
2022-06-07 18:08:12 +02:00
|
|
|
};
|
2022-06-13 18:54:12 +02:00
|
|
|
if filter.is_one() {
|
2022-06-14 00:53:31 +02:00
|
|
|
let evals = columns
|
|
|
|
|
.iter()
|
2022-08-23 12:22:54 -07:00
|
|
|
.map(|c| c.eval_table(trace, i))
|
2022-06-14 00:53:31 +02:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
partial_prod *= challenge.combine(evals.iter());
|
2022-06-07 23:09:09 +02:00
|
|
|
} else {
|
2022-06-13 18:54:12 +02:00
|
|
|
assert_eq!(filter, F::ZERO, "Non-binary filter?")
|
2022-06-07 18:08:12 +02:00
|
|
|
};
|
2022-05-05 22:21:09 +02:00
|
|
|
res.push(partial_prod);
|
|
|
|
|
}
|
|
|
|
|
res.into()
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-12 13:47:55 +02:00
|
|
|
#[derive(Clone)]
|
2022-05-16 20:45:30 +02:00
|
|
|
pub struct CtlCheckVars<'a, F, FE, P, const D2: usize>
|
2022-05-05 22:21:09 +02:00
|
|
|
where
|
|
|
|
|
F: Field,
|
|
|
|
|
FE: FieldExtension<D2, BaseField = F>,
|
|
|
|
|
P: PackedField<Scalar = FE>,
|
|
|
|
|
{
|
2022-05-06 14:55:54 +02:00
|
|
|
pub(crate) local_z: P,
|
|
|
|
|
pub(crate) next_z: P,
|
2022-05-12 13:51:02 +02:00
|
|
|
pub(crate) challenges: GrandProductChallenge<F>,
|
2022-06-14 00:53:31 +02:00
|
|
|
pub(crate) columns: &'a [Column<F>],
|
2022-06-14 19:09:03 +02:00
|
|
|
pub(crate) filter_column: &'a Option<Column<F>>,
|
2022-05-12 13:47:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<'a, F: RichField + Extendable<D>, const D: usize>
|
2022-05-16 20:45:30 +02:00
|
|
|
CtlCheckVars<'a, F, F::Extension, F::Extension, D>
|
2022-05-12 13:47:55 +02:00
|
|
|
{
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) fn from_proofs<C: GenericConfig<D, F = F>>(
|
|
|
|
|
proofs: &[StarkProofWithMetadata<F, C, D>; NUM_TABLES],
|
2022-05-17 09:24:22 +02:00
|
|
|
cross_table_lookups: &'a [CrossTableLookup<F>],
|
2022-05-12 13:51:02 +02:00
|
|
|
ctl_challenges: &'a GrandProductChallengeSet<F>,
|
2022-08-26 10:12:45 +02:00
|
|
|
num_permutation_zs: &[usize; NUM_TABLES],
|
2022-11-22 08:48:48 -05:00
|
|
|
) -> [Vec<Self>; NUM_TABLES]
|
|
|
|
|
where
|
2023-04-01 09:34:13 -04:00
|
|
|
[(); C::HCO::WIDTH]:,
|
2022-11-22 08:48:48 -05:00
|
|
|
{
|
2022-05-12 13:47:55 +02:00
|
|
|
let mut ctl_zs = proofs
|
|
|
|
|
.iter()
|
2022-05-13 10:48:56 +02:00
|
|
|
.zip(num_permutation_zs)
|
2022-05-18 09:22:58 +02:00
|
|
|
.map(|(p, &num_perms)| {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let openings = &p.proof.openings;
|
2022-05-18 09:22:58 +02:00
|
|
|
let ctl_zs = openings.permutation_ctl_zs.iter().skip(num_perms);
|
2022-06-02 23:55:56 +02:00
|
|
|
let ctl_zs_next = openings.permutation_ctl_zs_next.iter().skip(num_perms);
|
|
|
|
|
ctl_zs.zip(ctl_zs_next)
|
2022-05-12 13:47:55 +02:00
|
|
|
})
|
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
2022-08-26 10:12:45 +02:00
|
|
|
let mut ctl_vars_per_table = [0; NUM_TABLES].map(|_| vec![]);
|
2022-05-18 09:22:58 +02:00
|
|
|
for CrossTableLookup {
|
2022-06-01 18:53:19 +02:00
|
|
|
looking_tables,
|
2022-05-18 09:22:58 +02:00
|
|
|
looked_table,
|
|
|
|
|
} in cross_table_lookups
|
|
|
|
|
{
|
|
|
|
|
for &challenges in &ctl_challenges.challenges {
|
2022-06-06 20:51:14 +02:00
|
|
|
for table in looking_tables {
|
|
|
|
|
let (looking_z, looking_z_next) = ctl_zs[table.table as usize].next().unwrap();
|
|
|
|
|
ctl_vars_per_table[table.table as usize].push(Self {
|
2022-06-01 18:53:19 +02:00
|
|
|
local_z: *looking_z,
|
|
|
|
|
next_z: *looking_z_next,
|
|
|
|
|
challenges,
|
2022-06-06 20:51:14 +02:00
|
|
|
columns: &table.columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
filter_column: &table.filter_column,
|
2022-06-01 18:53:19 +02:00
|
|
|
});
|
|
|
|
|
}
|
2022-05-12 13:47:55 +02:00
|
|
|
|
2022-06-06 20:51:14 +02:00
|
|
|
let (looked_z, looked_z_next) = ctl_zs[looked_table.table as usize].next().unwrap();
|
|
|
|
|
ctl_vars_per_table[looked_table.table as usize].push(Self {
|
2022-05-18 09:22:58 +02:00
|
|
|
local_z: *looked_z,
|
|
|
|
|
next_z: *looked_z_next,
|
|
|
|
|
challenges,
|
2022-06-06 20:51:14 +02:00
|
|
|
columns: &looked_table.columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
filter_column: &looked_table.filter_column,
|
2022-05-18 09:22:58 +02:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ctl_vars_per_table
|
2022-05-12 13:47:55 +02:00
|
|
|
}
|
2022-05-05 22:21:09 +02:00
|
|
|
}
|
|
|
|
|
|
2023-02-10 21:47:51 -08:00
|
|
|
pub(crate) fn eval_cross_table_lookup_checks<F, FE, P, S, const D: usize, const D2: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
vars: StarkEvaluationVars<FE, P, { S::COLUMNS }>,
|
2022-05-16 20:45:30 +02:00
|
|
|
ctl_vars: &[CtlCheckVars<F, FE, P, D2>],
|
2022-05-05 22:21:09 +02:00
|
|
|
consumer: &mut ConstraintConsumer<P>,
|
|
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
FE: FieldExtension<D2, BaseField = F>,
|
|
|
|
|
P: PackedField<Scalar = FE>,
|
|
|
|
|
S: Stark<F, D>,
|
|
|
|
|
{
|
2022-05-13 11:20:29 +02:00
|
|
|
for lookup_vars in ctl_vars {
|
2022-05-16 20:45:30 +02:00
|
|
|
let CtlCheckVars {
|
2022-05-06 14:55:54 +02:00
|
|
|
local_z,
|
|
|
|
|
next_z,
|
|
|
|
|
challenges,
|
|
|
|
|
columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
filter_column,
|
2022-05-13 11:20:29 +02:00
|
|
|
} = lookup_vars;
|
2022-08-23 12:22:54 -07:00
|
|
|
let combine = |v: &[P]| -> P {
|
|
|
|
|
let evals = columns.iter().map(|c| c.eval(v)).collect::<Vec<_>>();
|
|
|
|
|
challenges.combine(evals.iter())
|
|
|
|
|
};
|
|
|
|
|
let filter = |v: &[P]| -> P {
|
|
|
|
|
if let Some(column) = filter_column {
|
|
|
|
|
column.eval(v)
|
|
|
|
|
} else {
|
|
|
|
|
P::ONES
|
|
|
|
|
}
|
2022-06-07 23:09:09 +02:00
|
|
|
};
|
2022-08-23 12:22:54 -07:00
|
|
|
let local_filter = filter(vars.local_values);
|
|
|
|
|
let next_filter = filter(vars.next_values);
|
|
|
|
|
let select = |filter, x| filter * x + P::ONES - filter;
|
2022-05-05 22:21:09 +02:00
|
|
|
|
2022-05-06 14:55:54 +02:00
|
|
|
// Check value of `Z(1)`
|
2022-08-23 12:22:54 -07:00
|
|
|
consumer.constraint_first_row(*local_z - select(local_filter, combine(vars.local_values)));
|
2022-05-06 14:55:54 +02:00
|
|
|
// Check `Z(gw) = combination * Z(w)`
|
2022-08-23 12:22:54 -07:00
|
|
|
consumer.constraint_transition(
|
|
|
|
|
*next_z - *local_z * select(next_filter, combine(vars.next_values)),
|
|
|
|
|
);
|
2022-05-05 22:21:09 +02:00
|
|
|
}
|
|
|
|
|
}
|
2022-05-11 16:09:12 +02:00
|
|
|
|
2022-05-23 17:49:04 +02:00
|
|
|
#[derive(Clone)]
|
2022-06-14 00:53:31 +02:00
|
|
|
pub struct CtlCheckVarsTarget<'a, F: Field, const D: usize> {
|
2022-05-23 17:49:04 +02:00
|
|
|
pub(crate) local_z: ExtensionTarget<D>,
|
|
|
|
|
pub(crate) next_z: ExtensionTarget<D>,
|
|
|
|
|
pub(crate) challenges: GrandProductChallenge<Target>,
|
2022-06-14 00:53:31 +02:00
|
|
|
pub(crate) columns: &'a [Column<F>],
|
2022-06-14 19:09:03 +02:00
|
|
|
pub(crate) filter_column: &'a Option<Column<F>>,
|
2022-05-23 17:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
2022-06-14 00:53:31 +02:00
|
|
|
impl<'a, F: Field, const D: usize> CtlCheckVarsTarget<'a, F, D> {
|
2022-08-25 22:04:28 +02:00
|
|
|
pub(crate) fn from_proof(
|
|
|
|
|
table: Table,
|
2022-08-26 09:42:55 +02:00
|
|
|
proof: &StarkProofTarget<D>,
|
2022-08-25 22:04:28 +02:00
|
|
|
cross_table_lookups: &'a [CrossTableLookup<F>],
|
|
|
|
|
ctl_challenges: &'a GrandProductChallengeSet<Target>,
|
|
|
|
|
num_permutation_zs: usize,
|
|
|
|
|
) -> Vec<Self> {
|
|
|
|
|
let mut ctl_zs = {
|
2022-08-26 09:42:55 +02:00
|
|
|
let openings = &proof.openings;
|
2022-08-25 22:04:28 +02:00
|
|
|
let ctl_zs = openings.permutation_ctl_zs.iter().skip(num_permutation_zs);
|
|
|
|
|
let ctl_zs_next = openings
|
|
|
|
|
.permutation_ctl_zs_next
|
|
|
|
|
.iter()
|
|
|
|
|
.skip(num_permutation_zs);
|
|
|
|
|
ctl_zs.zip(ctl_zs_next)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let mut ctl_vars = vec![];
|
|
|
|
|
for CrossTableLookup {
|
|
|
|
|
looking_tables,
|
|
|
|
|
looked_table,
|
|
|
|
|
} in cross_table_lookups
|
|
|
|
|
{
|
|
|
|
|
for &challenges in &ctl_challenges.challenges {
|
|
|
|
|
for looking_table in looking_tables {
|
|
|
|
|
if looking_table.table == table {
|
|
|
|
|
let (looking_z, looking_z_next) = ctl_zs.next().unwrap();
|
|
|
|
|
ctl_vars.push(Self {
|
|
|
|
|
local_z: *looking_z,
|
|
|
|
|
next_z: *looking_z_next,
|
|
|
|
|
challenges,
|
|
|
|
|
columns: &looking_table.columns,
|
|
|
|
|
filter_column: &looking_table.filter_column,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if looked_table.table == table {
|
|
|
|
|
let (looked_z, looked_z_next) = ctl_zs.next().unwrap();
|
|
|
|
|
ctl_vars.push(Self {
|
|
|
|
|
local_z: *looked_z,
|
|
|
|
|
next_z: *looked_z_next,
|
|
|
|
|
challenges,
|
|
|
|
|
columns: &looked_table.columns,
|
|
|
|
|
filter_column: &looked_table.filter_column,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-10-03 11:44:52 +02:00
|
|
|
assert!(ctl_zs.next().is_none());
|
2022-08-25 22:04:28 +02:00
|
|
|
ctl_vars
|
|
|
|
|
}
|
2022-05-23 17:49:04 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
pub(crate) fn eval_cross_table_lookup_checks_circuit<
|
|
|
|
|
S: Stark<F, D>,
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-08-25 12:24:22 -07:00
|
|
|
vars: StarkEvaluationTargets<D, { S::COLUMNS }>,
|
2022-06-14 00:53:31 +02:00
|
|
|
ctl_vars: &[CtlCheckVarsTarget<F, D>],
|
2022-05-24 16:24:52 +02:00
|
|
|
consumer: &mut RecursiveConstraintConsumer<F, D>,
|
|
|
|
|
) {
|
|
|
|
|
for lookup_vars in ctl_vars {
|
|
|
|
|
let CtlCheckVarsTarget {
|
|
|
|
|
local_z,
|
|
|
|
|
next_z,
|
|
|
|
|
challenges,
|
|
|
|
|
columns,
|
2022-06-14 00:53:31 +02:00
|
|
|
filter_column,
|
2022-05-24 16:24:52 +02:00
|
|
|
} = lookup_vars;
|
|
|
|
|
|
2022-06-07 23:09:09 +02:00
|
|
|
let one = builder.one_extension();
|
2022-08-23 12:22:54 -07:00
|
|
|
let local_filter = if let Some(column) = filter_column {
|
|
|
|
|
column.eval_circuit(builder, vars.local_values)
|
|
|
|
|
} else {
|
|
|
|
|
one
|
|
|
|
|
};
|
|
|
|
|
let next_filter = if let Some(column) = filter_column {
|
|
|
|
|
column.eval_circuit(builder, vars.next_values)
|
2022-06-07 23:09:09 +02:00
|
|
|
} else {
|
2022-06-14 19:09:03 +02:00
|
|
|
one
|
2022-06-07 23:09:09 +02:00
|
|
|
};
|
|
|
|
|
fn select<F: RichField + Extendable<D>, const D: usize>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
filter: ExtensionTarget<D>,
|
|
|
|
|
x: ExtensionTarget<D>,
|
|
|
|
|
) -> ExtensionTarget<D> {
|
|
|
|
|
let one = builder.one_extension();
|
|
|
|
|
let tmp = builder.sub_extension(one, filter);
|
|
|
|
|
builder.mul_add_extension(filter, x, tmp) // filter * x + 1 - filter
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
// Check value of `Z(1)`
|
2022-08-23 12:22:54 -07:00
|
|
|
let local_columns_eval = columns
|
2022-06-14 00:53:31 +02:00
|
|
|
.iter()
|
2022-08-23 12:22:54 -07:00
|
|
|
.map(|c| c.eval_circuit(builder, vars.local_values))
|
2022-06-14 00:53:31 +02:00
|
|
|
.collect::<Vec<_>>();
|
2022-08-23 12:22:54 -07:00
|
|
|
let combined_local = challenges.combine_circuit(builder, &local_columns_eval);
|
|
|
|
|
let selected_local = select(builder, local_filter, combined_local);
|
|
|
|
|
let first_row = builder.sub_extension(*local_z, selected_local);
|
|
|
|
|
consumer.constraint_first_row(builder, first_row);
|
2022-05-24 16:24:52 +02:00
|
|
|
// Check `Z(gw) = combination * Z(w)`
|
2022-08-23 12:22:54 -07:00
|
|
|
let next_columns_eval = columns
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|c| c.eval_circuit(builder, vars.next_values))
|
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
let combined_next = challenges.combine_circuit(builder, &next_columns_eval);
|
|
|
|
|
let selected_next = select(builder, next_filter, combined_next);
|
|
|
|
|
let mut transition = builder.mul_extension(*local_z, selected_next);
|
|
|
|
|
transition = builder.sub_extension(*next_z, transition);
|
2022-05-24 16:24:52 +02:00
|
|
|
consumer.constraint_transition(builder, transition);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-10 21:47:51 -08:00
|
|
|
pub(crate) fn verify_cross_table_lookups<F: RichField + Extendable<D>, const D: usize>(
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
cross_table_lookups: &[CrossTableLookup<F>],
|
2022-09-23 15:50:57 +02:00
|
|
|
ctl_zs_lasts: [Vec<F>; NUM_TABLES],
|
2022-05-11 16:09:12 +02:00
|
|
|
config: &StarkConfig,
|
|
|
|
|
) -> Result<()> {
|
2022-09-23 15:50:57 +02:00
|
|
|
let mut ctl_zs_openings = ctl_zs_lasts.iter().map(|v| v.iter()).collect::<Vec<_>>();
|
2023-01-03 11:36:42 -08:00
|
|
|
for CrossTableLookup {
|
|
|
|
|
looking_tables,
|
|
|
|
|
looked_table,
|
2023-01-03 12:29:14 -08:00
|
|
|
} in cross_table_lookups.iter()
|
2022-05-11 16:09:12 +02:00
|
|
|
{
|
2022-06-16 02:06:15 +02:00
|
|
|
for _ in 0..config.num_challenges {
|
|
|
|
|
let looking_zs_prod = looking_tables
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|table| *ctl_zs_openings[table.table as usize].next().unwrap())
|
|
|
|
|
.product::<F>();
|
|
|
|
|
let looked_z = *ctl_zs_openings[looked_table.table as usize].next().unwrap();
|
2023-01-03 11:36:42 -08:00
|
|
|
|
|
|
|
|
ensure!(
|
|
|
|
|
looking_zs_prod == looked_z,
|
|
|
|
|
"Cross-table lookup verification failed."
|
|
|
|
|
);
|
2022-06-16 02:06:15 +02:00
|
|
|
}
|
2022-05-11 16:09:12 +02:00
|
|
|
}
|
2022-06-16 02:15:40 +02:00
|
|
|
debug_assert!(ctl_zs_openings.iter_mut().all(|iter| iter.next().is_none()));
|
2022-05-11 16:09:12 +02:00
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2022-05-24 16:24:52 +02:00
|
|
|
|
2023-02-10 21:47:51 -08:00
|
|
|
pub(crate) fn verify_cross_table_lookups_circuit<F: RichField + Extendable<D>, const D: usize>(
|
2022-05-24 16:24:52 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
cross_table_lookups: Vec<CrossTableLookup<F>>,
|
2022-09-23 16:25:02 +02:00
|
|
|
ctl_zs_lasts: [Vec<Target>; NUM_TABLES],
|
2022-05-24 16:24:52 +02:00
|
|
|
inner_config: &StarkConfig,
|
|
|
|
|
) {
|
2022-09-23 16:25:02 +02:00
|
|
|
let mut ctl_zs_openings = ctl_zs_lasts.iter().map(|v| v.iter()).collect::<Vec<_>>();
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
for CrossTableLookup {
|
|
|
|
|
looking_tables,
|
|
|
|
|
looked_table,
|
|
|
|
|
} in cross_table_lookups.into_iter()
|
2022-05-24 16:24:52 +02:00
|
|
|
{
|
2022-06-16 02:06:15 +02:00
|
|
|
for _ in 0..inner_config.num_challenges {
|
|
|
|
|
let looking_zs_prod = builder.mul_many(
|
|
|
|
|
looking_tables
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|table| *ctl_zs_openings[table.table as usize].next().unwrap()),
|
|
|
|
|
);
|
|
|
|
|
let looked_z = *ctl_zs_openings[looked_table.table as usize].next().unwrap();
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
builder.connect(looking_zs_prod, looked_z);
|
2022-06-07 22:19:36 +02:00
|
|
|
}
|
2022-05-24 16:24:52 +02:00
|
|
|
}
|
2022-06-16 02:15:40 +02:00
|
|
|
debug_assert!(ctl_zs_openings.iter_mut().all(|iter| iter.next().is_none()));
|
2022-05-24 16:24:52 +02:00
|
|
|
}
|
2022-07-11 11:07:16 +02:00
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub(crate) mod testutils {
|
|
|
|
|
use std::collections::HashMap;
|
|
|
|
|
|
|
|
|
|
use plonky2::field::polynomial::PolynomialValues;
|
|
|
|
|
use plonky2::field::types::Field;
|
|
|
|
|
|
|
|
|
|
use crate::all_stark::Table;
|
|
|
|
|
use crate::cross_table_lookup::{CrossTableLookup, TableWithColumns};
|
|
|
|
|
|
|
|
|
|
type MultiSet<F> = HashMap<Vec<F>, Vec<(Table, usize)>>;
|
|
|
|
|
|
2022-07-11 14:13:07 +02:00
|
|
|
/// Check that the provided traces and cross-table lookups are consistent.
|
2022-12-03 11:21:31 -08:00
|
|
|
#[allow(unused)] // TODO: used later?
|
2022-07-11 14:13:07 +02:00
|
|
|
pub(crate) fn check_ctls<F: Field>(
|
2022-07-11 11:07:16 +02:00
|
|
|
trace_poly_values: &[Vec<PolynomialValues<F>>],
|
2022-07-11 14:13:07 +02:00
|
|
|
cross_table_lookups: &[CrossTableLookup<F>],
|
2022-07-11 11:07:16 +02:00
|
|
|
) {
|
2022-07-11 14:13:07 +02:00
|
|
|
for (i, ctl) in cross_table_lookups.iter().enumerate() {
|
|
|
|
|
check_ctl(trace_poly_values, ctl, i);
|
2022-07-11 11:07:16 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn check_ctl<F: Field>(
|
|
|
|
|
trace_poly_values: &[Vec<PolynomialValues<F>>],
|
|
|
|
|
ctl: &CrossTableLookup<F>,
|
|
|
|
|
ctl_index: usize,
|
|
|
|
|
) {
|
|
|
|
|
let CrossTableLookup {
|
|
|
|
|
looking_tables,
|
|
|
|
|
looked_table,
|
|
|
|
|
} = ctl;
|
2022-07-11 14:24:12 +02:00
|
|
|
|
|
|
|
|
// Maps `m` with `(table, i) in m[row]` iff the `i`-th row of `table` is equal to `row` and
|
|
|
|
|
// the filter is 1. Without default values, the CTL check holds iff `looking_multiset == looked_multiset`.
|
2022-07-11 11:07:16 +02:00
|
|
|
let mut looking_multiset = MultiSet::<F>::new();
|
|
|
|
|
let mut looked_multiset = MultiSet::<F>::new();
|
|
|
|
|
|
|
|
|
|
for table in looking_tables {
|
|
|
|
|
process_table(trace_poly_values, table, &mut looking_multiset);
|
|
|
|
|
}
|
|
|
|
|
process_table(trace_poly_values, looked_table, &mut looked_multiset);
|
|
|
|
|
|
|
|
|
|
let empty = &vec![];
|
2023-01-03 12:43:05 -08:00
|
|
|
// Check that every row in the looking tables appears in the looked table the same number of times.
|
2022-07-11 11:07:16 +02:00
|
|
|
for (row, looking_locations) in &looking_multiset {
|
2022-07-11 14:56:27 +02:00
|
|
|
let looked_locations = looked_multiset.get(row).unwrap_or(empty);
|
2022-07-11 11:07:16 +02:00
|
|
|
check_locations(looking_locations, looked_locations, ctl_index, row);
|
|
|
|
|
}
|
2022-07-11 14:13:07 +02:00
|
|
|
// Check that every row in the looked tables appears in the looked table the same number of times.
|
2022-07-11 11:07:16 +02:00
|
|
|
for (row, looked_locations) in &looked_multiset {
|
2022-07-11 14:56:27 +02:00
|
|
|
let looking_locations = looking_multiset.get(row).unwrap_or(empty);
|
2022-07-11 11:07:16 +02:00
|
|
|
check_locations(looking_locations, looked_locations, ctl_index, row);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-11 14:13:07 +02:00
|
|
|
fn process_table<F: Field>(
|
|
|
|
|
trace_poly_values: &[Vec<PolynomialValues<F>>],
|
|
|
|
|
table: &TableWithColumns<F>,
|
|
|
|
|
multiset: &mut MultiSet<F>,
|
|
|
|
|
) {
|
|
|
|
|
let trace = &trace_poly_values[table.table as usize];
|
2022-08-23 12:22:54 -07:00
|
|
|
for i in 0..trace[0].len() {
|
2022-07-11 14:13:07 +02:00
|
|
|
let filter = if let Some(column) = &table.filter_column {
|
2022-08-23 12:22:54 -07:00
|
|
|
column.eval_table(trace, i)
|
2022-07-11 14:13:07 +02:00
|
|
|
} else {
|
|
|
|
|
F::ONE
|
|
|
|
|
};
|
|
|
|
|
if filter.is_one() {
|
|
|
|
|
let row = table
|
|
|
|
|
.columns
|
|
|
|
|
.iter()
|
2022-08-23 12:22:54 -07:00
|
|
|
.map(|c| c.eval_table(trace, i))
|
2022-07-11 14:13:07 +02:00
|
|
|
.collect::<Vec<_>>();
|
2022-08-23 12:22:54 -07:00
|
|
|
multiset.entry(row).or_default().push((table.table, i));
|
2022-07-11 14:13:07 +02:00
|
|
|
} else {
|
|
|
|
|
assert_eq!(filter, F::ZERO, "Non-binary filter?")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-11 11:07:16 +02:00
|
|
|
fn check_locations<F: Field>(
|
|
|
|
|
looking_locations: &[(Table, usize)],
|
|
|
|
|
looked_locations: &[(Table, usize)],
|
|
|
|
|
ctl_index: usize,
|
|
|
|
|
row: &[F],
|
|
|
|
|
) {
|
|
|
|
|
if looking_locations.len() != looked_locations.len() {
|
|
|
|
|
panic!(
|
|
|
|
|
"CTL #{ctl_index}:\n\
|
2022-07-11 14:16:58 +02:00
|
|
|
Row {row:?} is present {l0} times in the looking tables, but {l1} times in the looked table.\n\
|
|
|
|
|
Looking locations (Table, Row index): {looking_locations:?}.\n\
|
|
|
|
|
Looked locations (Table, Row index): {looked_locations:?}.",
|
2022-07-11 11:07:16 +02:00
|
|
|
l0 = looking_locations.len(),
|
|
|
|
|
l1 = looked_locations.len(),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|