mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-02-07 23:43:08 +00:00
Use CTL challenges for logUP + change comments + add assert
This commit is contained in:
parent
f65ad58a08
commit
c9c0f8b7e5
@ -126,6 +126,12 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
|
||||
for col in SHARED_COLS {
|
||||
for i in 0..n_rows {
|
||||
let x = cols[col][i].to_canonical_u64() as usize;
|
||||
assert!(
|
||||
x < RANGE_MAX,
|
||||
"column value {} exceeds the max range value {}",
|
||||
x,
|
||||
RANGE_MAX
|
||||
);
|
||||
cols[RC_FREQUENCIES][x] += F::ONE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,97 +0,0 @@
|
||||
// Inspired by https://github.com/AztecProtocol/weierstrudel/blob/master/huff_modules/endomorphism.huff
|
||||
// See also Sage code in evm/src/cpu/kernel/tests/ecc/bn_glv_test_data
|
||||
// Given scalar `k ∈ Bn254::ScalarField`, return `u, k1, k2` with `k1,k2 < 2^127` and such that
|
||||
// `k = k1 - s*k2` if `u==0` otherwise `k = k1 + s*k2`, where `s` is the scalar value representing the endomorphism.
|
||||
// In the comments below, N means @BN_SCALAR
|
||||
//
|
||||
// Z3 proof that the resulting `k1, k2` satisfy `k1>0`, `k1 < 2^127` and `|k2| < 2^127`.
|
||||
// ```python
|
||||
// from z3 import Solver, Int, Or, unsat
|
||||
// q = 0x30644E72E131A029B85045B68181585D2833E84879B9709143E1F593F0000001
|
||||
// glv_s = 0xB3C4D79D41A917585BFC41088D8DAAA78B17EA66B99C90DD
|
||||
//
|
||||
// b2 = 0x89D3256894D213E3
|
||||
// b1 = -0x6F4D8248EEB859FC8211BBEB7D4F1128
|
||||
//
|
||||
// g1 = 0x24CCEF014A773D2CF7A7BD9D4391EB18D
|
||||
// g2 = 0x2D91D232EC7E0B3D7
|
||||
// k = Int("k")
|
||||
// c1 = Int("c1")
|
||||
// c2 = Int("c2")
|
||||
// s = Solver()
|
||||
//
|
||||
// c2p = -c2
|
||||
// s.add(k < q)
|
||||
// s.add(0 < k)
|
||||
// s.add(c1 * (2**256) <= g2 * k)
|
||||
// s.add((c1 + 1) * (2**256) > g2 * k)
|
||||
// s.add(c2p * (2**256) <= g1 * k)
|
||||
// s.add((c2p + 1) * (2**256) > g1 * k)
|
||||
//
|
||||
// q1 = c1 * b1
|
||||
// q2 = c2 * b2
|
||||
//
|
||||
// k2 = q2 - q1
|
||||
// k2L = (glv_s * k2) % q
|
||||
// k1 = k - k2L
|
||||
// k2 = -k2
|
||||
//
|
||||
// s.add(Or((k2 >= 2**127), (-k2 >= 2**127), (k1 >= 2**127), (k1 < 0)))
|
||||
//
|
||||
// assert s.check() == unsat
|
||||
// ```
|
||||
global bn_glv_decompose:
|
||||
// stack: k, retdest
|
||||
PUSH @BN_SCALAR DUP1 DUP1
|
||||
// Compute c2 which is the top 256 bits of k*g1. Use asm from https://medium.com/wicketh/mathemagic-full-multiply-27650fec525d.
|
||||
PUSH @U256_MAX
|
||||
// stack: -1, N, N, N, k, retdest
|
||||
PUSH @BN_GLV_MINUS_G1 DUP6
|
||||
// stack: k, g1, -1, N, N, N, k, retdest
|
||||
MULMOD
|
||||
// stack: (k * g1 % -1), N, N, N, k, retdest
|
||||
PUSH @BN_GLV_MINUS_G1 DUP6
|
||||
// stack: k, g1, (k * g1 % -1), N, N, N, k, retdest
|
||||
MUL
|
||||
// stack: bottom = (k * g1), (k * g1 % -1), N, N, N, k, retdest
|
||||
DUP1 DUP3
|
||||
// stack: (k * g1 % -1), bottom, bottom, (k * g1 % -1), N, N, N, k, retdest
|
||||
LT SWAP2 SUB SUB
|
||||
// stack: c2, N, N, N, k, retdest
|
||||
PUSH @BN_GLV_B2 MULMOD
|
||||
// stack: q2=c2*b2, N, N, k, retdest
|
||||
|
||||
// Use the same trick to compute c1 = top 256 bits of g2*k.
|
||||
PUSH @BN_SCALAR PUSH @U256_MAX
|
||||
PUSH @BN_GLV_G2 DUP7 MULMOD
|
||||
PUSH @BN_GLV_G2 DUP7 MUL
|
||||
DUP1 DUP3 LT
|
||||
SWAP2 SUB SUB
|
||||
// stack: c1, N, q2, N, N, k, retdest
|
||||
PUSH @BN_GLV_B1 MULMOD
|
||||
// stack: q1, q2, N, N, k, retdest
|
||||
|
||||
// We compute k2 = q1 + q2 - N, but we check for underflow and return N-q1-q2 instead if there is one,
|
||||
// along with a flag `underflow` set to 1 if there is an underflow, 0 otherwise.
|
||||
ADD %sub_check_underflow
|
||||
// stack: k2, underflow, N, k, retdest
|
||||
SWAP3 PUSH @BN_SCALAR DUP5 PUSH @BN_GLV_S
|
||||
// stack: s, k2, N, k, underflow, N, k2, retdest
|
||||
MULMOD
|
||||
// stack: s*k2, k, underflow, N, k2, retdest
|
||||
// Need to return `k + s*k2` if no underflow occur, otherwise return `k - s*k2` which is done in the `underflowed` fn.
|
||||
SWAP2 DUP1 %jumpi(underflowed)
|
||||
%stack (underflow, k, x, N, k2) -> (k, x, N, k2, underflow)
|
||||
ADDMOD
|
||||
%stack (k1, k2, underflow, retdest) -> (retdest, underflow, k1, k2)
|
||||
JUMP
|
||||
|
||||
underflowed:
|
||||
// stack: underflow, k, s*k2, N, k2
|
||||
// Compute (k-s*k2)%N. TODO: Use SUBMOD here when ready
|
||||
%stack (u, k, x, N, k2) -> (N, x, k, N, k2, u)
|
||||
SUB ADDMOD
|
||||
%stack (k1, k2, underflow, retdest) -> (retdest, underflow, k1, k2)
|
||||
JUMP
|
||||
|
||||
|
||||
@ -1,73 +0,0 @@
|
||||
// Computes the multiplication `a*G` using a standard MSM with the GLV decomposition of `a`.
|
||||
// see there for a detailed description.
|
||||
global bn_msm:
|
||||
// stack: retdest
|
||||
PUSH 0 PUSH 0 PUSH 0
|
||||
global bn_msm_loop:
|
||||
// stack: accx, accy, i, retdest
|
||||
DUP3 %bn_mload_wnaf_a
|
||||
// stack: w, accx, accy, i, retdest
|
||||
DUP1 %jumpi(bn_msm_loop_add_a_nonzero)
|
||||
POP
|
||||
msm_loop_add_b:
|
||||
//stack: accx, accy, i, retdest
|
||||
DUP3 %bn_mload_wnaf_b
|
||||
// stack: w, accx, accy, i, retdest
|
||||
DUP1 %jumpi(bn_msm_loop_add_b_nonzero)
|
||||
POP
|
||||
msm_loop_contd:
|
||||
%stack (accx, accy, i, retdest) -> (i, i, accx, accy, retdest)
|
||||
// TODO: the GLV scalars for the BN curve are 127-bit, so could use 127 here. But this would require modifying `wnaf.asm`. Not sure it's worth it...
|
||||
%eq_const(129) %jumpi(msm_end)
|
||||
%increment
|
||||
//stack: i+1, accx, accy, retdest
|
||||
%stack (i, accx, accy, retdest) -> (accx, accy, bn_msm_loop, i, retdest)
|
||||
%jump(bn_double)
|
||||
|
||||
msm_end:
|
||||
%stack (i, accx, accy, retdest) -> (retdest, accx, accy)
|
||||
JUMP
|
||||
|
||||
bn_msm_loop_add_a_nonzero:
|
||||
%stack (w, accx, accy, i, retdest) -> (w, accx, accy, msm_loop_add_b, i, retdest)
|
||||
%bn_mload_point_a
|
||||
// stack: px, py, accx, accy, msm_loop_add_b, i, retdest
|
||||
%jump(bn_add_valid_points)
|
||||
|
||||
bn_msm_loop_add_b_nonzero:
|
||||
%stack (w, accx, accy, i, retdest) -> (w, accx, accy, msm_loop_contd, i, retdest)
|
||||
%bn_mload_point_b
|
||||
// stack: px, py, accx, accy, msm_loop_contd, i, retdest
|
||||
%jump(bn_add_valid_points)
|
||||
|
||||
%macro bn_mload_wnaf_a
|
||||
// stack: i
|
||||
%mload_kernel(@SEGMENT_KERNEL_BN_WNAF_A)
|
||||
%endmacro
|
||||
|
||||
%macro bn_mload_wnaf_b
|
||||
// stack: i
|
||||
%mload_kernel(@SEGMENT_KERNEL_BN_WNAF_B)
|
||||
%endmacro
|
||||
|
||||
%macro bn_mload_point_a
|
||||
// stack: w
|
||||
DUP1
|
||||
%mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
//stack: Gy, w
|
||||
SWAP1 %decrement %mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
//stack: Gx, Gy
|
||||
%endmacro
|
||||
|
||||
%macro bn_mload_point_b
|
||||
// stack: w
|
||||
DUP1
|
||||
%mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
PUSH @BN_BNEG_LOC %mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
%stack (bneg, Gy, w) -> (@BN_BASE, Gy, bneg, bneg, Gy, w)
|
||||
SUB SWAP1 ISZERO MUL SWAP2 MUL ADD
|
||||
SWAP1 %decrement %mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
//stack: Gx, Gy
|
||||
PUSH @BN_GLV_BETA
|
||||
MULFP254
|
||||
%endmacro
|
||||
@ -1,35 +0,0 @@
|
||||
// Precompute a table of multiples of the BN254 point `Q = (Qx, Qy)`.
|
||||
// Let `(Qxi, Qyi) = i * Q`, then store in the `SEGMENT_KERNEL_BN_TABLE_Q` segment of memory the values
|
||||
// `i-1 => Qxi`, `i => Qyi if i < 16 else -Qy(32-i)` for `i in range(1, 32, 2)`.
|
||||
global bn_precompute_table:
|
||||
// stack: Qx, Qy, retdest
|
||||
PUSH precompute_table_contd DUP3 DUP3
|
||||
%jump(bn_double)
|
||||
precompute_table_contd:
|
||||
// stack: Qx2, Qy2, Qx, Qy, retdest
|
||||
PUSH 1
|
||||
bn_precompute_table_loop:
|
||||
// stack i, Qx2, Qy2, Qx, Qy, retdest
|
||||
PUSH 1 DUP2 SUB
|
||||
%stack (im, i, Qx2, Qy2, Qx, Qy, retdest) -> (i, Qy, im, Qx, i, Qx2, Qy2, Qx, Qy, retdest)
|
||||
%mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q) %mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
// stack: i, Qx2, Qy2, Qx, Qy, retdest
|
||||
DUP1 PUSH 32 SUB PUSH 1 DUP2 SUB
|
||||
// stack: 31-i, 32-i, i, Qx2, Qy2, Qx, Qy, retdest
|
||||
DUP7 PUSH @BN_BASE SUB
|
||||
// TODO: Could maybe avoid storing Qx a second time here, not sure if it would be more efficient.
|
||||
%stack (Qyy, iii, ii, i, Qx2, Qy2, Qx, Qy, retdest) -> (iii, Qx, ii, Qyy, i, Qx2, Qy2, Qx, Qy, retdest)
|
||||
%mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q) %mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
|
||||
// stack: i, Qx2, Qy2, Qx, Qy, retdest
|
||||
PUSH 2 ADD
|
||||
// stack: i+2, Qx2, Qy2, Qx, Qy, retdest
|
||||
DUP1 PUSH 16 LT %jumpi(precompute_table_end)
|
||||
%stack (i, Qx2, Qy2, Qx, Qy, retdest) -> (Qx, Qy, Qx2, Qy2, precompute_table_loop_contd, i, Qx2, Qy2, retdest)
|
||||
%jump(bn_add_valid_points)
|
||||
precompute_table_loop_contd:
|
||||
%stack (Qx, Qy, i, Qx2, Qy2, retdest) -> (i, Qx2, Qy2, Qx, Qy, retdest)
|
||||
%jump(bn_precompute_table_loop)
|
||||
|
||||
precompute_table_end:
|
||||
// stack: i, Qx2, Qy2, Qx, Qy, retdest
|
||||
%pop5 JUMP
|
||||
@ -205,11 +205,7 @@ pub(crate) fn observe_public_values_target<
|
||||
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D> {
|
||||
/// Computes all Fiat-Shamir challenges used in the STARK proof.
|
||||
pub(crate) fn get_challenges(
|
||||
&self,
|
||||
all_stark: &AllStark<F, D>,
|
||||
config: &StarkConfig,
|
||||
) -> AllProofChallenges<F, D> {
|
||||
pub(crate) fn get_challenges(&self, config: &StarkConfig) -> AllProofChallenges<F, D> {
|
||||
let mut challenger = Challenger::<F, C::Hasher>::new();
|
||||
|
||||
for proof in &self.stark_proofs {
|
||||
@ -221,14 +217,12 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> A
|
||||
let ctl_challenges =
|
||||
get_grand_product_challenge_set(&mut challenger, config.num_challenges);
|
||||
|
||||
let lookups = all_stark.num_lookups_helper_columns(config);
|
||||
|
||||
AllProofChallenges {
|
||||
stark_challenges: core::array::from_fn(|i| {
|
||||
challenger.compact();
|
||||
self.stark_proofs[i]
|
||||
.proof
|
||||
.get_challenges(&mut challenger, lookups[i] > 0, config)
|
||||
.get_challenges(&mut challenger, config)
|
||||
}),
|
||||
ctl_challenges,
|
||||
}
|
||||
@ -257,7 +251,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> A
|
||||
for i in 0..NUM_TABLES {
|
||||
self.stark_proofs[i]
|
||||
.proof
|
||||
.get_challenges(&mut challenger, lookups[i] > 0, config);
|
||||
.get_challenges(&mut challenger, config);
|
||||
challenger_states.push(challenger.compact());
|
||||
}
|
||||
|
||||
@ -277,7 +271,6 @@ where
|
||||
pub(crate) fn get_challenges(
|
||||
&self,
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
stark_use_lookup: bool,
|
||||
config: &StarkConfig,
|
||||
) -> StarkProofChallenges<F, D> {
|
||||
let degree_bits = self.recover_degree_bits(config);
|
||||
@ -298,9 +291,6 @@ where
|
||||
|
||||
let num_challenges = config.num_challenges;
|
||||
|
||||
let lookup_challenges =
|
||||
stark_use_lookup.then(|| challenger.get_n_challenges(config.num_challenges));
|
||||
|
||||
challenger.observe_cap(auxiliary_polys_cap);
|
||||
|
||||
let stark_alphas = challenger.get_n_challenges(num_challenges);
|
||||
@ -311,7 +301,6 @@ where
|
||||
challenger.observe_openings(&openings.to_fri_openings());
|
||||
|
||||
StarkProofChallenges {
|
||||
lookup_challenges,
|
||||
stark_alphas,
|
||||
stark_zeta,
|
||||
fri_challenges: challenger.fri_challenges::<C, D>(
|
||||
@ -330,7 +319,6 @@ impl<const D: usize> StarkProofTarget<D> {
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
|
||||
stark_use_lookup: bool,
|
||||
config: &StarkConfig,
|
||||
) -> StarkProofChallengesTarget<D>
|
||||
where
|
||||
@ -352,9 +340,6 @@ impl<const D: usize> StarkProofTarget<D> {
|
||||
|
||||
let num_challenges = config.num_challenges;
|
||||
|
||||
let lookup_challenges =
|
||||
stark_use_lookup.then(|| challenger.get_n_challenges(builder, num_challenges));
|
||||
|
||||
challenger.observe_cap(auxiliary_polys);
|
||||
|
||||
let stark_alphas = challenger.get_n_challenges(builder, num_challenges);
|
||||
@ -365,7 +350,6 @@ impl<const D: usize> StarkProofTarget<D> {
|
||||
challenger.observe_openings(&openings.to_fri_openings(builder.zero()));
|
||||
|
||||
StarkProofChallengesTarget {
|
||||
lookup_challenges,
|
||||
stark_alphas,
|
||||
stark_zeta,
|
||||
fri_challenges: challenger.fri_challenges(
|
||||
|
||||
@ -650,7 +650,9 @@ mod tests {
|
||||
use tiny_keccak::keccakf;
|
||||
|
||||
use crate::config::StarkConfig;
|
||||
use crate::cross_table_lookup::{CtlData, CtlZData, GrandProductChallenge};
|
||||
use crate::cross_table_lookup::{
|
||||
CtlData, CtlZData, GrandProductChallenge, GrandProductChallengeSet,
|
||||
};
|
||||
use crate::keccak::columns::reg_output_limb;
|
||||
use crate::keccak::keccak_stark::{KeccakStark, NUM_INPUTS, NUM_ROUNDS};
|
||||
use crate::prover::prove_single_table;
|
||||
@ -766,7 +768,7 @@ mod tests {
|
||||
filter_column: None,
|
||||
};
|
||||
let ctl_data = CtlData {
|
||||
zs_columns: vec![ctl_z_data; config.num_challenges],
|
||||
zs_columns: vec![ctl_z_data.clone(); config.num_challenges],
|
||||
};
|
||||
|
||||
prove_single_table(
|
||||
@ -775,6 +777,9 @@ mod tests {
|
||||
&trace_poly_values,
|
||||
&trace_commitments,
|
||||
&ctl_data,
|
||||
GrandProductChallengeSet {
|
||||
challenges: vec![ctl_z_data.challenge; config.num_challenges],
|
||||
},
|
||||
&mut Challenger::new(),
|
||||
&mut timing,
|
||||
)?;
|
||||
|
||||
@ -34,7 +34,7 @@ impl Lookup {
|
||||
}
|
||||
}
|
||||
|
||||
/// logUp protocol from https://ia.cr/2022/1530 (TODO link to newer version?)
|
||||
/// logUp protocol from https://ia.cr/2022/1530
|
||||
/// Compute the helper columns for the lookup argument.
|
||||
/// Given columns `f0,...,fk` and a column `t`, such that `∪fi ⊆ t`, and challenges `x`,
|
||||
/// this computes the helper columns `h_i = 1/(x+f_2i) + 1/(x+f_2i+1)`, `g = 1/(x+t)`,
|
||||
@ -55,10 +55,10 @@ pub(crate) fn lookup_helper_columns<F: Field>(
|
||||
// For each batch of `constraint_degree-1` columns `fi`, compute `sum 1/(f_i+challenge)` and
|
||||
// add it to the helper columns.
|
||||
// TODO: This does one batch inversion per column. It would also be possible to do one batch inversion
|
||||
// for every column, but that would require building a big vector of all the columns concatenated.
|
||||
// for every group of columns, but that would require building a big vector of all the columns concatenated.
|
||||
// Not sure which approach is better.
|
||||
// Note: these are the h_k(x) polynomials in the paper, with a few differences:
|
||||
// * Here, the first ratio m_0(x)/phi_0(x) is not included with the columns batched up to create the
|
||||
// * Here, the first ratio m_0(x)/phi_0(x) is not included with the columns batched up to create the
|
||||
// h_k polynomials; instead there's a separate helper column for it (see below).
|
||||
// * Here, we use 1 instead of -1 as the numerator (and subtract later).
|
||||
// * Here, for now, the batch size (l) is always constraint_degree - 1 = 2.
|
||||
|
||||
@ -137,7 +137,7 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
trace_rows
|
||||
}
|
||||
|
||||
/// Generates the `COUNTER`, `RANGE_CHECK_PERMUTED` and `COUNTER_PERMUTED` columns, given a
|
||||
/// Generates the `COUNTER`, `RANGE_CHECK` and `FREQUENCIES` columns, given a
|
||||
/// trace in column-major form.
|
||||
fn generate_trace_col_major(trace_col_vecs: &mut [Vec<F>]) {
|
||||
let height = trace_col_vecs[0].len();
|
||||
|
||||
@ -674,9 +674,6 @@ impl<const D: usize> StarkProofTarget<D> {
|
||||
}
|
||||
|
||||
pub(crate) struct StarkProofChallenges<F: RichField + Extendable<D>, const D: usize> {
|
||||
/// Randomness used in lookup arguments.
|
||||
pub lookup_challenges: Option<Vec<F>>,
|
||||
|
||||
/// Random values used to combine STARK constraints.
|
||||
pub stark_alphas: Vec<F>,
|
||||
|
||||
@ -687,7 +684,6 @@ pub(crate) struct StarkProofChallenges<F: RichField + Extendable<D>, const D: us
|
||||
}
|
||||
|
||||
pub(crate) struct StarkProofChallengesTarget<const D: usize> {
|
||||
pub lookup_challenges: Option<Vec<Target>>,
|
||||
pub stark_alphas: Vec<Target>,
|
||||
pub stark_zeta: ExtensionTarget<D>,
|
||||
pub fri_challenges: FriChallengesTarget<D>,
|
||||
|
||||
@ -28,6 +28,7 @@ use crate::cpu::cpu_stark::CpuStark;
|
||||
use crate::cpu::kernel::aggregator::KERNEL;
|
||||
use crate::cross_table_lookup::{
|
||||
cross_table_lookup_data, get_grand_product_challenge_set, CtlCheckVars, CtlData,
|
||||
GrandProductChallengeSet,
|
||||
};
|
||||
use crate::generation::outputs::GenerationOutputs;
|
||||
use crate::generation::{generate_traces, GenerationInputs};
|
||||
@ -172,6 +173,7 @@ where
|
||||
trace_commitments,
|
||||
ctl_data_per_table,
|
||||
&mut challenger,
|
||||
ctl_challenges.clone(),
|
||||
timing
|
||||
)?
|
||||
);
|
||||
@ -190,6 +192,7 @@ fn prove_with_commitments<F, C, const D: usize>(
|
||||
trace_commitments: Vec<PolynomialBatch<F, C, D>>,
|
||||
ctl_data_per_table: [CtlData<F>; NUM_TABLES],
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
ctl_challenges: GrandProductChallengeSet<F>,
|
||||
timing: &mut TimingTree,
|
||||
) -> Result<[StarkProofWithMetadata<F, C, D>; NUM_TABLES]>
|
||||
where
|
||||
@ -212,6 +215,7 @@ where
|
||||
&trace_poly_values[Table::Arithmetic as usize],
|
||||
&trace_commitments[Table::Arithmetic as usize],
|
||||
&ctl_data_per_table[Table::Arithmetic as usize],
|
||||
ctl_challenges.clone(),
|
||||
challenger,
|
||||
timing,
|
||||
)?
|
||||
@ -238,6 +242,7 @@ where
|
||||
&trace_poly_values[Table::Cpu as usize],
|
||||
&trace_commitments[Table::Cpu as usize],
|
||||
&ctl_data_per_table[Table::Cpu as usize],
|
||||
ctl_challenges.clone(),
|
||||
challenger,
|
||||
timing,
|
||||
)?
|
||||
@ -251,6 +256,7 @@ where
|
||||
&trace_poly_values[Table::Keccak as usize],
|
||||
&trace_commitments[Table::Keccak as usize],
|
||||
&ctl_data_per_table[Table::Keccak as usize],
|
||||
ctl_challenges.clone(),
|
||||
challenger,
|
||||
timing,
|
||||
)?
|
||||
@ -264,6 +270,7 @@ where
|
||||
&trace_poly_values[Table::KeccakSponge as usize],
|
||||
&trace_commitments[Table::KeccakSponge as usize],
|
||||
&ctl_data_per_table[Table::KeccakSponge as usize],
|
||||
ctl_challenges.clone(),
|
||||
challenger,
|
||||
timing,
|
||||
)?
|
||||
@ -277,6 +284,7 @@ where
|
||||
&trace_poly_values[Table::Logic as usize],
|
||||
&trace_commitments[Table::Logic as usize],
|
||||
&ctl_data_per_table[Table::Logic as usize],
|
||||
ctl_challenges.clone(),
|
||||
challenger,
|
||||
timing,
|
||||
)?
|
||||
@ -290,6 +298,7 @@ where
|
||||
&trace_poly_values[Table::Memory as usize],
|
||||
&trace_commitments[Table::Memory as usize],
|
||||
&ctl_data_per_table[Table::Memory as usize],
|
||||
ctl_challenges,
|
||||
challenger,
|
||||
timing,
|
||||
)?
|
||||
@ -313,6 +322,7 @@ pub(crate) fn prove_single_table<F, C, S, const D: usize>(
|
||||
trace_poly_values: &[PolynomialValues<F>],
|
||||
trace_commitment: &PolynomialBatch<F, C, D>,
|
||||
ctl_data: &CtlData<F>,
|
||||
ctl_challenges: GrandProductChallengeSet<F>,
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
timing: &mut TimingTree,
|
||||
) -> Result<StarkProofWithMetadata<F, C, D>>
|
||||
@ -335,9 +345,13 @@ where
|
||||
let init_challenger_state = challenger.compact();
|
||||
|
||||
let constraint_degree = stark.constraint_degree();
|
||||
let lookup_challenges = stark
|
||||
.uses_lookups()
|
||||
.then(|| challenger.get_n_challenges(config.num_challenges));
|
||||
let lookup_challenges = stark.uses_lookups().then(|| {
|
||||
ctl_challenges
|
||||
.challenges
|
||||
.iter()
|
||||
.map(|ch| ch.beta)
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
let lookups = stark.lookups();
|
||||
let lookup_helper_columns = timed!(
|
||||
timing,
|
||||
|
||||
@ -339,12 +339,8 @@ where
|
||||
}));
|
||||
let mut challenger =
|
||||
RecursiveChallenger::<F, C::Hasher, D>::from_state(init_challenger_state_target);
|
||||
let challenges = proof_target.get_challenges::<F, C>(
|
||||
&mut builder,
|
||||
&mut challenger,
|
||||
num_lookup_columns > 0,
|
||||
inner_config,
|
||||
);
|
||||
let challenges =
|
||||
proof_target.get_challenges::<F, C>(&mut builder, &mut challenger, inner_config);
|
||||
let challenger_state = challenger.compact(&mut builder);
|
||||
builder.register_public_inputs(challenger_state.as_ref());
|
||||
|
||||
@ -438,10 +434,17 @@ fn verify_stark_proof_with_challenges_circuit<
|
||||
);
|
||||
|
||||
let num_lookup_columns = stark.num_lookup_helper_columns(inner_config);
|
||||
let lookup_challenges = (num_lookup_columns > 0).then(|| {
|
||||
ctl_vars
|
||||
.iter()
|
||||
.map(|ch| ch.challenges.beta)
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
let lookup_vars = stark.uses_lookups().then(|| LookupCheckVarsTarget {
|
||||
local_values: auxiliary_polys[..num_lookup_columns].to_vec(),
|
||||
next_values: auxiliary_polys_next[..num_lookup_columns].to_vec(),
|
||||
challenges: challenges.lookup_challenges.clone().unwrap(),
|
||||
challenges: lookup_challenges.unwrap(),
|
||||
});
|
||||
|
||||
with_context!(
|
||||
|
||||
@ -50,7 +50,7 @@ where
|
||||
let AllProofChallenges {
|
||||
stark_challenges,
|
||||
ctl_challenges,
|
||||
} = all_proof.get_challenges(all_stark, config);
|
||||
} = all_proof.get_challenges(config);
|
||||
|
||||
let num_lookup_columns = all_stark.num_lookups_helper_columns(config);
|
||||
|
||||
@ -331,10 +331,17 @@ where
|
||||
l_last,
|
||||
);
|
||||
let num_lookup_columns = stark.num_lookup_helper_columns(config);
|
||||
let lookup_challenges = (num_lookup_columns > 0).then(|| {
|
||||
ctl_vars
|
||||
.iter()
|
||||
.map(|ch| ch.challenges.beta)
|
||||
.collect::<Vec<_>>()
|
||||
});
|
||||
|
||||
let lookup_vars = stark.uses_lookups().then(|| LookupCheckVars {
|
||||
local_values: auxiliary_polys[..num_lookup_columns].to_vec(),
|
||||
next_values: auxiliary_polys_next[..num_lookup_columns].to_vec(),
|
||||
challenges: challenges.lookup_challenges.clone().unwrap(),
|
||||
challenges: lookup_challenges.unwrap(),
|
||||
});
|
||||
let lookups = stark.lookups();
|
||||
eval_vanishing_poly::<F, F::Extension, F::Extension, S, D, D>(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user