2022-09-29 13:45:46 -06:00
|
|
|
use ethereum_types::{Address, H256, U256};
|
2022-05-04 20:57:07 +02:00
|
|
|
use itertools::Itertools;
|
2022-06-27 07:18:21 -07:00
|
|
|
use plonky2::field::extension::{Extendable, FieldExtension};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::fri::oracle::PolynomialBatch;
|
2022-08-25 12:16:52 -07:00
|
|
|
use plonky2::fri::proof::{FriChallenges, FriChallengesTarget, FriProof, FriProofTarget};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::fri::structure::{
|
|
|
|
|
FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget,
|
|
|
|
|
};
|
|
|
|
|
use plonky2::hash::hash_types::{MerkleCapTarget, RichField};
|
2022-09-22 11:01:27 +02:00
|
|
|
use plonky2::hash::hashing::SPONGE_WIDTH;
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::hash::merkle_tree::MerkleCap;
|
|
|
|
|
use plonky2::iop::ext_target::ExtensionTarget;
|
|
|
|
|
use plonky2::iop::target::Target;
|
|
|
|
|
use plonky2::plonk::config::GenericConfig;
|
2023-01-30 08:51:33 -08:00
|
|
|
use plonky2_maybe_rayon::*;
|
2022-09-19 11:05:48 -06:00
|
|
|
use serde::{Deserialize, Serialize};
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2022-08-26 10:12:45 +02:00
|
|
|
use crate::all_stark::NUM_TABLES;
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::config::StarkConfig;
|
2022-05-12 13:51:02 +02:00
|
|
|
use crate::permutation::GrandProductChallengeSet;
|
2022-05-11 14:35:33 +02:00
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
/// A STARK proof for each table, plus some metadata used to create recursive wrapper proofs.
|
2022-05-11 14:35:33 +02:00
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
|
pub struct AllProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub stark_proofs: [StarkProofWithMetadata<F, C, D>; NUM_TABLES],
|
|
|
|
|
pub(crate) ctl_challenges: GrandProductChallengeSet<F>,
|
2022-08-25 12:24:22 -07:00
|
|
|
pub public_values: PublicValues,
|
2022-05-11 14:35:33 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-26 16:27:15 +02:00
|
|
|
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D> {
|
2022-08-26 10:12:45 +02:00
|
|
|
pub fn degree_bits(&self, config: &StarkConfig) -> [usize; NUM_TABLES] {
|
2023-01-30 08:51:33 -08:00
|
|
|
core::array::from_fn(|i| self.stark_proofs[i].proof.recover_degree_bits(config))
|
2022-05-26 16:27:15 +02:00
|
|
|
}
|
2022-09-05 08:38:57 +02:00
|
|
|
}
|
2022-05-26 16:27:15 +02:00
|
|
|
|
2022-05-11 14:35:33 +02:00
|
|
|
pub(crate) struct AllProofChallenges<F: RichField + Extendable<D>, const D: usize> {
|
2022-08-26 10:12:45 +02:00
|
|
|
pub stark_challenges: [StarkProofChallenges<F, D>; NUM_TABLES],
|
2022-05-12 13:51:02 +02:00
|
|
|
pub ctl_challenges: GrandProductChallengeSet<F>,
|
2022-05-11 14:35:33 +02:00
|
|
|
}
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2022-10-06 16:40:03 +02:00
|
|
|
#[allow(unused)] // TODO: should be used soon
|
2022-09-22 11:01:27 +02:00
|
|
|
pub(crate) struct AllChallengerState<F: RichField + Extendable<D>, const D: usize> {
|
2022-09-23 16:36:17 +02:00
|
|
|
/// Sponge state of the challenger before starting each proof,
|
|
|
|
|
/// along with the final state after all proofs are done. This final state isn't strictly needed.
|
2022-09-22 11:01:27 +02:00
|
|
|
pub states: [[F; SPONGE_WIDTH]; NUM_TABLES + 1],
|
|
|
|
|
pub ctl_challenges: GrandProductChallengeSet<F>,
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-25 22:11:25 -07:00
|
|
|
/// Memory values which are public.
|
|
|
|
|
#[derive(Debug, Clone, Default)]
|
2022-08-25 12:24:22 -07:00
|
|
|
pub struct PublicValues {
|
|
|
|
|
pub trie_roots_before: TrieRoots,
|
|
|
|
|
pub trie_roots_after: TrieRoots,
|
|
|
|
|
pub block_metadata: BlockMetadata,
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-25 22:11:25 -07:00
|
|
|
#[derive(Debug, Clone, Default)]
|
2022-08-25 12:24:22 -07:00
|
|
|
pub struct TrieRoots {
|
2022-09-29 13:45:46 -06:00
|
|
|
pub state_root: H256,
|
|
|
|
|
pub transactions_root: H256,
|
|
|
|
|
pub receipts_root: H256,
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|
|
|
|
|
|
2022-09-19 11:05:48 -06:00
|
|
|
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
2022-08-25 12:24:22 -07:00
|
|
|
pub struct BlockMetadata {
|
2022-08-25 23:35:38 -07:00
|
|
|
pub block_beneficiary: Address,
|
2022-08-25 12:24:22 -07:00
|
|
|
pub block_timestamp: U256,
|
|
|
|
|
pub block_number: U256,
|
|
|
|
|
pub block_difficulty: U256,
|
|
|
|
|
pub block_gaslimit: U256,
|
|
|
|
|
pub block_chain_id: U256,
|
2022-08-25 23:35:38 -07:00
|
|
|
pub block_base_fee: U256,
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|
|
|
|
|
|
2022-08-25 22:11:25 -07:00
|
|
|
/// Memory values which are public.
|
2022-08-25 12:24:22 -07:00
|
|
|
/// Note: All the larger integers are encoded with 32-bit limbs in little-endian order.
|
|
|
|
|
pub struct PublicValuesTarget {
|
|
|
|
|
pub trie_roots_before: TrieRootsTarget,
|
|
|
|
|
pub trie_roots_after: TrieRootsTarget,
|
|
|
|
|
pub block_metadata: BlockMetadataTarget,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub struct TrieRootsTarget {
|
|
|
|
|
pub state_root: [Target; 8],
|
|
|
|
|
pub transactions_root: [Target; 8],
|
|
|
|
|
pub receipts_root: [Target; 8],
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub struct BlockMetadataTarget {
|
2022-08-25 23:35:38 -07:00
|
|
|
pub block_beneficiary: [Target; 5],
|
2022-08-25 12:24:22 -07:00
|
|
|
pub block_timestamp: Target,
|
|
|
|
|
pub block_number: Target,
|
|
|
|
|
pub block_difficulty: Target,
|
|
|
|
|
pub block_gaslimit: Target,
|
|
|
|
|
pub block_chain_id: Target,
|
2022-08-25 23:35:38 -07:00
|
|
|
pub block_base_fee: Target,
|
2022-05-20 11:21:13 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
|
pub struct StarkProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
|
|
|
|
|
/// Merkle cap of LDEs of trace values.
|
|
|
|
|
pub trace_cap: MerkleCap<F, C::Hasher>,
|
|
|
|
|
/// Merkle cap of LDEs of permutation Z values.
|
2022-05-17 09:41:06 +02:00
|
|
|
pub permutation_ctl_zs_cap: MerkleCap<F, C::Hasher>,
|
2022-05-04 20:57:07 +02:00
|
|
|
/// Merkle cap of LDEs of trace values.
|
|
|
|
|
pub quotient_polys_cap: MerkleCap<F, C::Hasher>,
|
|
|
|
|
/// Purported values of each polynomial at the challenge point.
|
|
|
|
|
pub openings: StarkOpeningSet<F, D>,
|
|
|
|
|
/// A batch FRI argument for all openings.
|
|
|
|
|
pub opening_proof: FriProof<F, C::Hasher, D>,
|
|
|
|
|
}
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
/// A `StarkProof` along with some metadata about the initial Fiat-Shamir state, which is used when
|
|
|
|
|
/// creating a recursive wrapper proof around a STARK proof.
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
|
pub struct StarkProofWithMetadata<F, C, const D: usize>
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
C: GenericConfig<D, F = F>,
|
|
|
|
|
{
|
|
|
|
|
pub(crate) init_challenger_state: [F; SPONGE_WIDTH],
|
|
|
|
|
pub(crate) proof: StarkProof<F, C, D>,
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> StarkProof<F, C, D> {
|
|
|
|
|
/// Recover the length of the trace from a STARK proof and a STARK config.
|
|
|
|
|
pub fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
|
|
|
|
|
let initial_merkle_proof = &self.opening_proof.query_round_proofs[0]
|
|
|
|
|
.initial_trees_proof
|
|
|
|
|
.evals_proofs[0]
|
|
|
|
|
.1;
|
|
|
|
|
let lde_bits = config.fri_config.cap_height + initial_merkle_proof.siblings.len();
|
|
|
|
|
lde_bits - config.fri_config.rate_bits
|
|
|
|
|
}
|
2022-08-25 22:04:28 +02:00
|
|
|
|
|
|
|
|
pub fn num_ctl_zs(&self) -> usize {
|
|
|
|
|
self.openings.ctl_zs_last.len()
|
|
|
|
|
}
|
2022-05-04 20:57:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub struct StarkProofTarget<const D: usize> {
|
|
|
|
|
pub trace_cap: MerkleCapTarget,
|
2022-05-20 11:21:13 +02:00
|
|
|
pub permutation_ctl_zs_cap: MerkleCapTarget,
|
2022-05-04 20:57:07 +02:00
|
|
|
pub quotient_polys_cap: MerkleCapTarget,
|
|
|
|
|
pub openings: StarkOpeningSetTarget<D>,
|
|
|
|
|
pub opening_proof: FriProofTarget<D>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<const D: usize> StarkProofTarget<D> {
|
|
|
|
|
/// Recover the length of the trace from a STARK proof and a STARK config.
|
|
|
|
|
pub fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
|
|
|
|
|
let initial_merkle_proof = &self.opening_proof.query_round_proofs[0]
|
|
|
|
|
.initial_trees_proof
|
|
|
|
|
.evals_proofs[0]
|
|
|
|
|
.1;
|
|
|
|
|
let lde_bits = config.fri_config.cap_height + initial_merkle_proof.siblings.len();
|
|
|
|
|
lde_bits - config.fri_config.rate_bits
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub(crate) struct StarkProofChallenges<F: RichField + Extendable<D>, const D: usize> {
|
|
|
|
|
/// Randomness used in any permutation arguments.
|
2022-05-12 13:51:02 +02:00
|
|
|
pub permutation_challenge_sets: Option<Vec<GrandProductChallengeSet<F>>>,
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
/// Random values used to combine STARK constraints.
|
|
|
|
|
pub stark_alphas: Vec<F>,
|
|
|
|
|
|
|
|
|
|
/// Point at which the STARK polynomials are opened.
|
|
|
|
|
pub stark_zeta: F::Extension,
|
|
|
|
|
|
|
|
|
|
pub fri_challenges: FriChallenges<F, D>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub(crate) struct StarkProofChallengesTarget<const D: usize> {
|
2022-05-12 13:51:02 +02:00
|
|
|
pub permutation_challenge_sets: Option<Vec<GrandProductChallengeSet<Target>>>,
|
2022-05-04 20:57:07 +02:00
|
|
|
pub stark_alphas: Vec<Target>,
|
|
|
|
|
pub stark_zeta: ExtensionTarget<D>,
|
|
|
|
|
pub fri_challenges: FriChallengesTarget<D>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Purported values of each polynomial at the challenge point.
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
|
pub struct StarkOpeningSet<F: RichField + Extendable<D>, const D: usize> {
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Openings of trace polynomials at `zeta`.
|
2022-05-04 20:57:07 +02:00
|
|
|
pub local_values: Vec<F::Extension>,
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Openings of trace polynomials at `g * zeta`.
|
2022-05-04 20:57:07 +02:00
|
|
|
pub next_values: Vec<F::Extension>,
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Openings of permutations and cross-table lookups `Z` polynomials at `zeta`.
|
2022-05-17 09:41:06 +02:00
|
|
|
pub permutation_ctl_zs: Vec<F::Extension>,
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Openings of permutations and cross-table lookups `Z` polynomials at `g * zeta`.
|
2022-06-02 23:55:56 +02:00
|
|
|
pub permutation_ctl_zs_next: Vec<F::Extension>,
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Openings of cross-table lookups `Z` polynomials at `g^-1`.
|
2022-05-13 11:20:29 +02:00
|
|
|
pub ctl_zs_last: Vec<F>,
|
2022-05-16 20:45:30 +02:00
|
|
|
/// Openings of quotient polynomials at `zeta`.
|
2022-05-04 20:57:07 +02:00
|
|
|
pub quotient_polys: Vec<F::Extension>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<F: RichField + Extendable<D>, const D: usize> StarkOpeningSet<F, D> {
|
|
|
|
|
pub fn new<C: GenericConfig<D, F = F>>(
|
|
|
|
|
zeta: F::Extension,
|
|
|
|
|
g: F,
|
|
|
|
|
trace_commitment: &PolynomialBatch<F, C, D>,
|
2022-05-17 09:41:06 +02:00
|
|
|
permutation_ctl_zs_commitment: &PolynomialBatch<F, C, D>,
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_commitment: &PolynomialBatch<F, C, D>,
|
2022-05-10 15:21:09 +02:00
|
|
|
degree_bits: usize,
|
|
|
|
|
num_permutation_zs: usize,
|
2022-05-04 20:57:07 +02:00
|
|
|
) -> Self {
|
|
|
|
|
let eval_commitment = |z: F::Extension, c: &PolynomialBatch<F, C, D>| {
|
|
|
|
|
c.polynomials
|
|
|
|
|
.par_iter()
|
|
|
|
|
.map(|p| p.to_extension().eval(z))
|
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
|
};
|
2022-05-11 16:09:12 +02:00
|
|
|
let eval_commitment_base = |z: F, c: &PolynomialBatch<F, C, D>| {
|
|
|
|
|
c.polynomials
|
|
|
|
|
.par_iter()
|
|
|
|
|
.map(|p| p.eval(z))
|
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
|
};
|
2022-06-02 23:55:56 +02:00
|
|
|
let zeta_next = zeta.scalar_mul(g);
|
2022-05-04 20:57:07 +02:00
|
|
|
Self {
|
|
|
|
|
local_values: eval_commitment(zeta, trace_commitment),
|
2022-06-02 23:55:56 +02:00
|
|
|
next_values: eval_commitment(zeta_next, trace_commitment),
|
2022-05-17 09:41:06 +02:00
|
|
|
permutation_ctl_zs: eval_commitment(zeta, permutation_ctl_zs_commitment),
|
2022-06-02 23:55:56 +02:00
|
|
|
permutation_ctl_zs_next: eval_commitment(zeta_next, permutation_ctl_zs_commitment),
|
2022-05-17 09:41:06 +02:00
|
|
|
ctl_zs_last: eval_commitment_base(
|
|
|
|
|
F::primitive_root_of_unity(degree_bits).inverse(),
|
|
|
|
|
permutation_ctl_zs_commitment,
|
|
|
|
|
)[num_permutation_zs..]
|
|
|
|
|
.to_vec(),
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys: eval_commitment(zeta, quotient_commitment),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub(crate) fn to_fri_openings(&self) -> FriOpenings<F, D> {
|
|
|
|
|
let zeta_batch = FriOpeningBatch {
|
|
|
|
|
values: self
|
|
|
|
|
.local_values
|
|
|
|
|
.iter()
|
2022-05-17 09:41:06 +02:00
|
|
|
.chain(&self.permutation_ctl_zs)
|
2022-05-04 20:57:07 +02:00
|
|
|
.chain(&self.quotient_polys)
|
|
|
|
|
.copied()
|
|
|
|
|
.collect_vec(),
|
|
|
|
|
};
|
2022-06-02 23:55:56 +02:00
|
|
|
let zeta_next_batch = FriOpeningBatch {
|
2022-05-04 20:57:07 +02:00
|
|
|
values: self
|
|
|
|
|
.next_values
|
|
|
|
|
.iter()
|
2022-06-02 23:55:56 +02:00
|
|
|
.chain(&self.permutation_ctl_zs_next)
|
2022-05-04 20:57:07 +02:00
|
|
|
.copied()
|
|
|
|
|
.collect_vec(),
|
|
|
|
|
};
|
2022-05-20 11:21:13 +02:00
|
|
|
debug_assert!(!self.ctl_zs_last.is_empty());
|
|
|
|
|
let ctl_last_batch = FriOpeningBatch {
|
|
|
|
|
values: self
|
|
|
|
|
.ctl_zs_last
|
|
|
|
|
.iter()
|
|
|
|
|
.copied()
|
|
|
|
|
.map(F::Extension::from_basefield)
|
|
|
|
|
.collect(),
|
|
|
|
|
};
|
2022-05-10 15:21:09 +02:00
|
|
|
|
2022-05-20 11:21:13 +02:00
|
|
|
FriOpenings {
|
2022-06-02 23:55:56 +02:00
|
|
|
batches: vec![zeta_batch, zeta_next_batch, ctl_last_batch],
|
2022-05-20 11:21:13 +02:00
|
|
|
}
|
2022-05-04 20:57:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub struct StarkOpeningSetTarget<const D: usize> {
|
|
|
|
|
pub local_values: Vec<ExtensionTarget<D>>,
|
|
|
|
|
pub next_values: Vec<ExtensionTarget<D>>,
|
2022-05-20 11:21:13 +02:00
|
|
|
pub permutation_ctl_zs: Vec<ExtensionTarget<D>>,
|
2022-06-02 23:55:56 +02:00
|
|
|
pub permutation_ctl_zs_next: Vec<ExtensionTarget<D>>,
|
2022-05-20 11:21:13 +02:00
|
|
|
pub ctl_zs_last: Vec<Target>,
|
2022-05-04 20:57:07 +02:00
|
|
|
pub quotient_polys: Vec<ExtensionTarget<D>>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<const D: usize> StarkOpeningSetTarget<D> {
|
2022-05-20 11:21:13 +02:00
|
|
|
pub(crate) fn to_fri_openings(&self, zero: Target) -> FriOpeningsTarget<D> {
|
2022-05-04 20:57:07 +02:00
|
|
|
let zeta_batch = FriOpeningBatchTarget {
|
|
|
|
|
values: self
|
|
|
|
|
.local_values
|
|
|
|
|
.iter()
|
2022-05-20 11:21:13 +02:00
|
|
|
.chain(&self.permutation_ctl_zs)
|
2022-05-04 20:57:07 +02:00
|
|
|
.chain(&self.quotient_polys)
|
|
|
|
|
.copied()
|
|
|
|
|
.collect_vec(),
|
|
|
|
|
};
|
2022-06-02 23:55:56 +02:00
|
|
|
let zeta_next_batch = FriOpeningBatchTarget {
|
2022-05-04 20:57:07 +02:00
|
|
|
values: self
|
|
|
|
|
.next_values
|
|
|
|
|
.iter()
|
2022-06-02 23:55:56 +02:00
|
|
|
.chain(&self.permutation_ctl_zs_next)
|
2022-05-04 20:57:07 +02:00
|
|
|
.copied()
|
|
|
|
|
.collect_vec(),
|
|
|
|
|
};
|
2022-05-20 11:21:13 +02:00
|
|
|
debug_assert!(!self.ctl_zs_last.is_empty());
|
|
|
|
|
let ctl_last_batch = FriOpeningBatchTarget {
|
|
|
|
|
values: self
|
|
|
|
|
.ctl_zs_last
|
|
|
|
|
.iter()
|
|
|
|
|
.copied()
|
|
|
|
|
.map(|t| t.to_ext_target(zero))
|
|
|
|
|
.collect(),
|
|
|
|
|
};
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
FriOpeningsTarget {
|
2022-06-02 23:55:56 +02:00
|
|
|
batches: vec![zeta_batch, zeta_next_batch, ctl_last_batch],
|
2022-05-04 20:57:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|