2022-09-23 16:25:02 +02:00
|
|
|
use std::fmt::Debug;
|
|
|
|
|
|
2022-09-23 13:41:14 +02:00
|
|
|
use anyhow::{ensure, Result};
|
2022-05-04 20:57:07 +02:00
|
|
|
use itertools::Itertools;
|
2022-06-27 07:18:21 -07:00
|
|
|
use plonky2::field::extension::Extendable;
|
2022-06-27 12:24:09 -07:00
|
|
|
use plonky2::field::types::Field;
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::fri::witness_util::set_fri_proof_target;
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use plonky2::gates::exponentiation::ExponentiationGate;
|
|
|
|
|
use plonky2::gates::gate::GateRef;
|
|
|
|
|
use plonky2::gates::noop::NoopGate;
|
2023-01-03 11:48:25 -08:00
|
|
|
use plonky2::hash::hash_types::RichField;
|
2022-11-22 08:48:48 -05:00
|
|
|
use plonky2::hash::hashing::HashConfig;
|
2022-09-23 13:41:14 +02:00
|
|
|
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::iop::ext_target::ExtensionTarget;
|
2022-05-24 16:24:52 +02:00
|
|
|
use plonky2::iop::target::Target;
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use plonky2::iop::witness::{PartialWitness, Witness, WitnessWrite};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
2023-01-03 11:48:25 -08:00
|
|
|
use plonky2::plonk::circuit_data::{CircuitConfig, CircuitData, VerifierCircuitData};
|
2022-11-22 08:48:48 -05:00
|
|
|
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
|
2022-10-04 09:56:12 +02:00
|
|
|
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::util::reducing::ReducingFactorTarget;
|
2023-04-04 15:23:19 -04:00
|
|
|
use plonky2::util::serialization::{
|
|
|
|
|
Buffer, GateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
|
|
|
|
|
};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::with_context;
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use plonky2_util::log2_ceil;
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
use crate::all_stark::{Table, NUM_TABLES};
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::config::StarkConfig;
|
|
|
|
|
use crate::constraint_consumer::RecursiveConstraintConsumer;
|
2023-01-03 11:48:25 -08:00
|
|
|
use crate::cross_table_lookup::{verify_cross_table_lookups, CrossTableLookup, CtlCheckVarsTarget};
|
2022-09-22 11:01:27 +02:00
|
|
|
use crate::permutation::{
|
2023-01-03 11:48:25 -08:00
|
|
|
get_grand_product_challenge_set, GrandProductChallenge, GrandProductChallengeSet,
|
|
|
|
|
PermutationCheckDataTarget,
|
2022-09-22 11:01:27 +02:00
|
|
|
};
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::proof::{
|
2023-01-03 11:48:25 -08:00
|
|
|
BlockMetadata, BlockMetadataTarget, PublicValues, PublicValuesTarget, StarkOpeningSetTarget,
|
|
|
|
|
StarkProof, StarkProofChallengesTarget, StarkProofTarget, StarkProofWithMetadata, TrieRoots,
|
|
|
|
|
TrieRootsTarget,
|
2022-05-04 20:57:07 +02:00
|
|
|
};
|
|
|
|
|
use crate::stark::Stark;
|
2022-11-02 17:50:31 -07:00
|
|
|
use crate::util::{h160_limbs, h256_limbs};
|
2022-05-18 09:22:58 +02:00
|
|
|
use crate::vanishing_poly::eval_vanishing_poly_circuit;
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::vars::StarkEvaluationTargets;
|
|
|
|
|
|
2022-09-05 16:34:29 +02:00
|
|
|
/// Table-wise recursive proofs of an `AllProof`.
|
|
|
|
|
pub struct RecursiveAllProof<
|
2022-08-25 22:04:28 +02:00
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-08-25 22:04:28 +02:00
|
|
|
const D: usize,
|
|
|
|
|
> {
|
2023-04-01 09:34:13 -04:00
|
|
|
pub recursive_proofs: [ProofWithPublicInputs<F, C, D>; NUM_TABLES],
|
2022-09-05 09:17:00 +02:00
|
|
|
}
|
|
|
|
|
|
2022-11-22 08:48:48 -05:00
|
|
|
pub(crate) struct PublicInputs<T: Copy + Eq + PartialEq + Debug, HC: HashConfig>
|
|
|
|
|
where
|
|
|
|
|
[(); HC::WIDTH]:,
|
|
|
|
|
{
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) trace_cap: Vec<Vec<T>>,
|
|
|
|
|
pub(crate) ctl_zs_last: Vec<T>,
|
|
|
|
|
pub(crate) ctl_challenges: GrandProductChallengeSet<T>,
|
2022-11-22 08:48:48 -05:00
|
|
|
pub(crate) challenger_state_before: [T; HC::WIDTH],
|
|
|
|
|
pub(crate) challenger_state_after: [T; HC::WIDTH],
|
2022-09-23 13:41:14 +02:00
|
|
|
}
|
|
|
|
|
|
2022-10-03 11:44:52 +02:00
|
|
|
/// Similar to the unstable `Iterator::next_chunk`. Could be replaced with that when it's stable.
|
|
|
|
|
fn next_chunk<T: Debug, const N: usize>(iter: &mut impl Iterator<Item = T>) -> [T; N] {
|
|
|
|
|
(0..N)
|
|
|
|
|
.flat_map(|_| iter.next())
|
|
|
|
|
.collect_vec()
|
|
|
|
|
.try_into()
|
|
|
|
|
.expect("Not enough elements")
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-22 08:48:48 -05:00
|
|
|
impl<T: Copy + Eq + PartialEq + Debug, HC: HashConfig> PublicInputs<T, HC>
|
|
|
|
|
where
|
|
|
|
|
[(); HC::WIDTH]:,
|
|
|
|
|
{
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) fn from_vec(v: &[T], config: &StarkConfig) -> Self {
|
2022-10-03 11:44:52 +02:00
|
|
|
let mut iter = v.iter().copied();
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let trace_cap = (0..config.fri_config.num_cap_elements())
|
2022-10-03 11:44:52 +02:00
|
|
|
.map(|_| next_chunk::<_, 4>(&mut iter).to_vec())
|
2022-09-23 16:25:02 +02:00
|
|
|
.collect();
|
2022-09-23 13:41:14 +02:00
|
|
|
let ctl_challenges = GrandProductChallengeSet {
|
|
|
|
|
challenges: (0..config.num_challenges)
|
2022-10-03 11:44:52 +02:00
|
|
|
.map(|_| GrandProductChallenge {
|
|
|
|
|
beta: iter.next().unwrap(),
|
|
|
|
|
gamma: iter.next().unwrap(),
|
2022-09-23 13:41:14 +02:00
|
|
|
})
|
|
|
|
|
.collect(),
|
|
|
|
|
};
|
2022-10-03 11:44:52 +02:00
|
|
|
let challenger_state_before = next_chunk(&mut iter);
|
|
|
|
|
let challenger_state_after = next_chunk(&mut iter);
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let ctl_zs_last: Vec<_> = iter.collect();
|
2022-09-23 13:41:14 +02:00
|
|
|
|
|
|
|
|
Self {
|
|
|
|
|
trace_cap,
|
|
|
|
|
ctl_zs_last,
|
|
|
|
|
ctl_challenges,
|
|
|
|
|
challenger_state_before,
|
|
|
|
|
challenger_state_after,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
|
|
|
|
RecursiveAllProof<F, C, D>
|
2022-09-05 09:17:00 +02:00
|
|
|
{
|
2022-09-05 16:34:29 +02:00
|
|
|
/// Verify every recursive proof.
|
2022-09-26 15:47:35 +02:00
|
|
|
pub fn verify(
|
|
|
|
|
self,
|
2023-04-01 09:34:13 -04:00
|
|
|
verifier_data: &[VerifierCircuitData<F, C, D>; NUM_TABLES],
|
2022-10-03 11:44:52 +02:00
|
|
|
cross_table_lookups: Vec<CrossTableLookup<F>>,
|
2022-09-26 15:47:35 +02:00
|
|
|
inner_config: &StarkConfig,
|
|
|
|
|
) -> Result<()>
|
2022-09-05 09:17:00 +02:00
|
|
|
where
|
2023-04-01 09:34:13 -04:00
|
|
|
[(); C::HCO::WIDTH]:,
|
|
|
|
|
[(); C::HCI::WIDTH]:,
|
2022-09-05 09:17:00 +02:00
|
|
|
{
|
2023-01-30 08:51:33 -08:00
|
|
|
let pis: [_; NUM_TABLES] = core::array::from_fn(|i| {
|
2023-04-01 09:34:13 -04:00
|
|
|
PublicInputs::<F, C::HCO>::from_vec(
|
|
|
|
|
&self.recursive_proofs[i].public_inputs,
|
|
|
|
|
inner_config,
|
|
|
|
|
)
|
2022-09-23 13:41:14 +02:00
|
|
|
});
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
let mut challenger = Challenger::<F, C::HCO, C::Hasher>::new();
|
2022-09-23 13:41:14 +02:00
|
|
|
for pi in &pis {
|
2022-09-23 16:25:02 +02:00
|
|
|
for h in &pi.trace_cap {
|
|
|
|
|
challenger.observe_elements(h);
|
|
|
|
|
}
|
2022-09-23 13:41:14 +02:00
|
|
|
}
|
|
|
|
|
let ctl_challenges =
|
|
|
|
|
get_grand_product_challenge_set(&mut challenger, inner_config.num_challenges);
|
2022-09-23 16:25:02 +02:00
|
|
|
// Check that the correct CTL challenges are used in every proof.
|
2022-09-23 13:41:14 +02:00
|
|
|
for pi in &pis {
|
|
|
|
|
ensure!(ctl_challenges == pi.ctl_challenges);
|
|
|
|
|
}
|
2022-09-23 16:25:02 +02:00
|
|
|
|
2022-10-03 10:53:33 +02:00
|
|
|
let state = challenger.compact();
|
2022-09-23 13:41:14 +02:00
|
|
|
ensure!(state == pis[0].challenger_state_before);
|
2022-09-23 16:25:02 +02:00
|
|
|
// Check that the challenger state is consistent between proofs.
|
2022-09-23 13:41:14 +02:00
|
|
|
for i in 1..NUM_TABLES {
|
|
|
|
|
ensure!(pis[i].challenger_state_before == pis[i - 1].challenger_state_after);
|
|
|
|
|
}
|
2022-09-23 16:25:02 +02:00
|
|
|
|
|
|
|
|
// Verify the CTL checks.
|
2023-02-10 21:47:51 -08:00
|
|
|
verify_cross_table_lookups::<F, D>(
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
&cross_table_lookups,
|
2022-09-23 15:50:57 +02:00
|
|
|
pis.map(|p| p.ctl_zs_last),
|
|
|
|
|
inner_config,
|
|
|
|
|
)?;
|
2022-09-23 16:25:02 +02:00
|
|
|
|
|
|
|
|
// Verify the proofs.
|
2022-09-26 15:47:35 +02:00
|
|
|
for (proof, verifier_data) in self.recursive_proofs.into_iter().zip(verifier_data) {
|
2022-09-05 09:17:00 +02:00
|
|
|
verifier_data.verify(proof)?;
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2022-08-25 22:04:28 +02:00
|
|
|
}
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
/// Represents a circuit which recursively verifies a STARK proof.
|
2023-02-21 21:12:03 -05:00
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) struct StarkWrapperCircuit<F, C, const D: usize>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
|
|
|
|
[(); C::HCO::WIDTH]:,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) circuit: CircuitData<F, C, D>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) stark_proof_target: StarkProofTarget<D>,
|
|
|
|
|
pub(crate) ctl_challenges_target: GrandProductChallengeSet<Target>,
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) init_challenger_state_target: [Target; C::HCO::WIDTH],
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) zero_target: Target,
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
impl<F, C, const D: usize> StarkWrapperCircuit<F, C, D>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
|
|
|
|
C::Hasher: AlgebraicHasher<F, C::HCO>,
|
|
|
|
|
[(); C::HCO::WIDTH]:,
|
|
|
|
|
[(); C::HCI::WIDTH]:,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
2023-02-21 21:12:03 -05:00
|
|
|
pub fn to_buffer(
|
|
|
|
|
&self,
|
|
|
|
|
buffer: &mut Vec<u8>,
|
|
|
|
|
gate_serializer: &dyn GateSerializer<F, D>,
|
|
|
|
|
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
|
|
|
|
) -> IoResult<()> {
|
|
|
|
|
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
|
|
|
|
|
buffer.write_target_vec(&self.init_challenger_state_target)?;
|
|
|
|
|
buffer.write_target(self.zero_target)?;
|
|
|
|
|
self.stark_proof_target.to_buffer(buffer)?;
|
|
|
|
|
self.ctl_challenges_target.to_buffer(buffer)?;
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn from_buffer(
|
|
|
|
|
buffer: &mut Buffer,
|
|
|
|
|
gate_serializer: &dyn GateSerializer<F, D>,
|
|
|
|
|
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
|
|
|
|
) -> IoResult<Self> {
|
|
|
|
|
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
|
|
|
|
|
let init_challenger_state_target = buffer.read_target_vec()?;
|
|
|
|
|
let zero_target = buffer.read_target()?;
|
|
|
|
|
let stark_proof_target = StarkProofTarget::from_buffer(buffer)?;
|
|
|
|
|
let ctl_challenges_target = GrandProductChallengeSet::from_buffer(buffer)?;
|
|
|
|
|
Ok(Self {
|
|
|
|
|
circuit,
|
|
|
|
|
stark_proof_target,
|
|
|
|
|
ctl_challenges_target,
|
|
|
|
|
init_challenger_state_target: init_challenger_state_target.try_into().unwrap(),
|
|
|
|
|
zero_target,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) fn prove(
|
|
|
|
|
&self,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof_with_metadata: &StarkProofWithMetadata<F, C, D>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
ctl_challenges: &GrandProductChallengeSet<F>,
|
2023-04-01 09:34:13 -04:00
|
|
|
) -> Result<ProofWithPublicInputs<F, C, D>> {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let mut inputs = PartialWitness::new();
|
|
|
|
|
|
|
|
|
|
set_stark_proof_target(
|
|
|
|
|
&mut inputs,
|
|
|
|
|
&self.stark_proof_target,
|
|
|
|
|
&proof_with_metadata.proof,
|
|
|
|
|
self.zero_target,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
for (challenge_target, challenge) in self
|
|
|
|
|
.ctl_challenges_target
|
|
|
|
|
.challenges
|
|
|
|
|
.iter()
|
|
|
|
|
.zip(&ctl_challenges.challenges)
|
|
|
|
|
{
|
|
|
|
|
inputs.set_target(challenge_target.beta, challenge.beta);
|
|
|
|
|
inputs.set_target(challenge_target.gamma, challenge.gamma);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inputs.set_target_arr(
|
|
|
|
|
self.init_challenger_state_target,
|
|
|
|
|
proof_with_metadata.init_challenger_state,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
self.circuit.prove(inputs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Represents a circuit which recursively verifies a PLONK proof.
|
2023-02-21 21:12:03 -05:00
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) struct PlonkWrapperCircuit<F, C, const D: usize>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) circuit: CircuitData<F, C, D>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) proof_with_pis_target: ProofWithPublicInputsTarget<D>,
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
impl<F, C, const D: usize> PlonkWrapperCircuit<F, C, D>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
|
|
|
|
C::Hasher: AlgebraicHasher<F, C::HCO>,
|
|
|
|
|
[(); C::HCO::WIDTH]:,
|
|
|
|
|
[(); C::HCI::WIDTH]:,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
|
|
|
|
pub(crate) fn prove(
|
|
|
|
|
&self,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof: &ProofWithPublicInputs<F, C, D>,
|
|
|
|
|
) -> Result<ProofWithPublicInputs<F, C, D>> {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let mut inputs = PartialWitness::new();
|
|
|
|
|
inputs.set_proof_with_pis_target(&self.proof_with_pis_target, proof);
|
|
|
|
|
self.circuit.prove(inputs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns the recursive Stark circuit.
|
|
|
|
|
pub(crate) fn recursive_stark_circuit<
|
2022-09-26 15:47:35 +02:00
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-09-26 15:47:35 +02:00
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
|
|
|
|
table: Table,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
stark: &S,
|
2022-09-26 15:47:35 +02:00
|
|
|
degree_bits: usize,
|
|
|
|
|
cross_table_lookups: &[CrossTableLookup<F>],
|
|
|
|
|
inner_config: &StarkConfig,
|
|
|
|
|
circuit_config: &CircuitConfig,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
min_degree_bits: usize,
|
2023-04-01 09:34:13 -04:00
|
|
|
) -> StarkWrapperCircuit<F, C, D>
|
2022-09-26 15:47:35 +02:00
|
|
|
where
|
|
|
|
|
[(); S::COLUMNS]:,
|
2023-04-01 09:34:13 -04:00
|
|
|
C::Hasher: AlgebraicHasher<F, C::HCO>,
|
|
|
|
|
[(); C::HCO::WIDTH]:,
|
|
|
|
|
[(); C::HCI::WIDTH]:,
|
2022-09-26 15:47:35 +02:00
|
|
|
{
|
|
|
|
|
let mut builder = CircuitBuilder::<F, D>::new(circuit_config.clone());
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let zero_target = builder.zero();
|
2022-09-26 15:47:35 +02:00
|
|
|
|
|
|
|
|
let num_permutation_zs = stark.num_permutation_batches(inner_config);
|
|
|
|
|
let num_permutation_batch_size = stark.permutation_batch_size();
|
|
|
|
|
let num_ctl_zs =
|
|
|
|
|
CrossTableLookup::num_ctl_zs(cross_table_lookups, table, inner_config.num_challenges);
|
|
|
|
|
let proof_target =
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
add_virtual_stark_proof(&mut builder, stark, inner_config, degree_bits, num_ctl_zs);
|
2022-09-26 15:47:35 +02:00
|
|
|
builder.register_public_inputs(
|
|
|
|
|
&proof_target
|
|
|
|
|
.trace_cap
|
|
|
|
|
.0
|
|
|
|
|
.iter()
|
|
|
|
|
.flat_map(|h| h.elements)
|
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let ctl_challenges_target = GrandProductChallengeSet {
|
|
|
|
|
challenges: (0..inner_config.num_challenges)
|
|
|
|
|
.map(|_| GrandProductChallenge {
|
|
|
|
|
beta: builder.add_virtual_public_input(),
|
|
|
|
|
gamma: builder.add_virtual_public_input(),
|
|
|
|
|
})
|
|
|
|
|
.collect(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let ctl_vars = CtlCheckVarsTarget::from_proof(
|
|
|
|
|
table,
|
|
|
|
|
&proof_target,
|
|
|
|
|
cross_table_lookups,
|
|
|
|
|
&ctl_challenges_target,
|
|
|
|
|
num_permutation_zs,
|
|
|
|
|
);
|
|
|
|
|
|
2023-01-30 08:51:33 -08:00
|
|
|
let init_challenger_state_target = core::array::from_fn(|_| builder.add_virtual_public_input());
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let mut challenger =
|
2023-04-01 09:34:13 -04:00
|
|
|
RecursiveChallenger::<F, C::HCO, C::Hasher, D>::from_state(init_challenger_state_target);
|
|
|
|
|
let challenges = proof_target.get_challenges::<F, C>(
|
2022-09-26 15:47:35 +02:00
|
|
|
&mut builder,
|
|
|
|
|
&mut challenger,
|
|
|
|
|
num_permutation_zs > 0,
|
|
|
|
|
num_permutation_batch_size,
|
|
|
|
|
inner_config,
|
|
|
|
|
);
|
2022-10-03 10:53:33 +02:00
|
|
|
let challenger_state = challenger.compact(&mut builder);
|
2022-09-26 15:47:35 +02:00
|
|
|
builder.register_public_inputs(&challenger_state);
|
|
|
|
|
|
|
|
|
|
builder.register_public_inputs(&proof_target.openings.ctl_zs_last);
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
verify_stark_proof_with_challenges_circuit::<F, C, _, D>(
|
2022-09-26 15:47:35 +02:00
|
|
|
&mut builder,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
stark,
|
2022-09-26 15:47:35 +02:00
|
|
|
&proof_target,
|
|
|
|
|
&challenges,
|
|
|
|
|
&ctl_vars,
|
|
|
|
|
inner_config,
|
|
|
|
|
);
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
add_common_recursion_gates(&mut builder);
|
|
|
|
|
|
|
|
|
|
// Pad to the minimum degree.
|
|
|
|
|
while log2_ceil(builder.num_gates()) < min_degree_bits {
|
|
|
|
|
builder.add_gate(NoopGate, vec![]);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
let circuit = builder.build::<C>();
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
StarkWrapperCircuit {
|
|
|
|
|
circuit,
|
|
|
|
|
stark_proof_target: proof_target,
|
|
|
|
|
ctl_challenges_target,
|
|
|
|
|
init_challenger_state_target,
|
|
|
|
|
zero_target,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Add gates that are sometimes used by recursive circuits, even if it's not actually used by this
|
|
|
|
|
/// particular recursive circuit. This is done for uniformity. We sometimes want all recursion
|
|
|
|
|
/// circuits to have the same gate set, so that we can do 1-of-n conditional recursion efficiently.
|
|
|
|
|
pub(crate) fn add_common_recursion_gates<F: RichField + Extendable<D>, const D: usize>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) {
|
|
|
|
|
builder.add_gate_to_gate_set(GateRef::new(ExponentiationGate::new_from_config(
|
|
|
|
|
&builder.config,
|
|
|
|
|
)));
|
2022-09-26 15:47:35 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
/// Recursively verifies an inner proof.
|
2022-05-18 09:22:58 +02:00
|
|
|
fn verify_stark_proof_with_challenges_circuit<
|
2022-05-04 20:57:07 +02:00
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-05-04 20:57:07 +02:00
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark: &S,
|
2022-08-25 12:24:22 -07:00
|
|
|
proof: &StarkProofTarget<D>,
|
2022-05-24 16:24:52 +02:00
|
|
|
challenges: &StarkProofChallengesTarget<D>,
|
2022-06-14 00:53:31 +02:00
|
|
|
ctl_vars: &[CtlCheckVarsTarget<F, D>],
|
2022-05-04 20:57:07 +02:00
|
|
|
inner_config: &StarkConfig,
|
|
|
|
|
) where
|
2023-04-01 09:34:13 -04:00
|
|
|
C::Hasher: AlgebraicHasher<F, C::HCO>,
|
2022-05-04 20:57:07 +02:00
|
|
|
[(); S::COLUMNS]:,
|
2023-04-01 09:34:13 -04:00
|
|
|
[(); C::HCO::WIDTH]:,
|
2022-05-04 20:57:07 +02:00
|
|
|
{
|
2022-05-24 16:24:52 +02:00
|
|
|
let zero = builder.zero();
|
2022-05-04 20:57:07 +02:00
|
|
|
let one = builder.one_extension();
|
|
|
|
|
|
|
|
|
|
let StarkOpeningSetTarget {
|
|
|
|
|
local_values,
|
|
|
|
|
next_values,
|
2022-05-26 20:37:30 +02:00
|
|
|
permutation_ctl_zs,
|
2022-06-02 23:55:56 +02:00
|
|
|
permutation_ctl_zs_next,
|
2022-05-24 16:24:52 +02:00
|
|
|
ctl_zs_last,
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys,
|
|
|
|
|
} = &proof.openings;
|
|
|
|
|
let vars = StarkEvaluationTargets {
|
|
|
|
|
local_values: &local_values.to_vec().try_into().unwrap(),
|
|
|
|
|
next_values: &next_values.to_vec().try_into().unwrap(),
|
|
|
|
|
};
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
let degree_bits = proof.recover_degree_bits(inner_config);
|
2022-05-04 20:57:07 +02:00
|
|
|
let zeta_pow_deg = builder.exp_power_of_2_extension(challenges.stark_zeta, degree_bits);
|
|
|
|
|
let z_h_zeta = builder.sub_extension(zeta_pow_deg, one);
|
2022-09-12 08:09:17 +02:00
|
|
|
let (l_0, l_last) =
|
|
|
|
|
eval_l_0_and_l_last_circuit(builder, degree_bits, challenges.stark_zeta, z_h_zeta);
|
2022-05-04 20:57:07 +02:00
|
|
|
let last =
|
|
|
|
|
builder.constant_extension(F::Extension::primitive_root_of_unity(degree_bits).inverse());
|
|
|
|
|
let z_last = builder.sub_extension(challenges.stark_zeta, last);
|
|
|
|
|
|
|
|
|
|
let mut consumer = RecursiveConstraintConsumer::<F, D>::new(
|
|
|
|
|
builder.zero_extension(),
|
2022-05-24 16:24:52 +02:00
|
|
|
challenges.stark_alphas.clone(),
|
2022-05-04 20:57:07 +02:00
|
|
|
z_last,
|
2022-09-12 08:09:17 +02:00
|
|
|
l_0,
|
2022-05-04 20:57:07 +02:00
|
|
|
l_last,
|
|
|
|
|
);
|
|
|
|
|
|
2022-05-26 20:37:30 +02:00
|
|
|
let num_permutation_zs = stark.num_permutation_batches(inner_config);
|
2022-05-04 20:57:07 +02:00
|
|
|
let permutation_data = stark
|
|
|
|
|
.uses_permutation_args()
|
|
|
|
|
.then(|| PermutationCheckDataTarget {
|
2022-05-26 20:37:30 +02:00
|
|
|
local_zs: permutation_ctl_zs[..num_permutation_zs].to_vec(),
|
2022-06-02 23:55:56 +02:00
|
|
|
next_zs: permutation_ctl_zs_next[..num_permutation_zs].to_vec(),
|
2022-05-24 16:24:52 +02:00
|
|
|
permutation_challenge_sets: challenges.permutation_challenge_sets.clone().unwrap(),
|
2022-05-04 20:57:07 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
|
|
with_context!(
|
|
|
|
|
builder,
|
|
|
|
|
"evaluate vanishing polynomial",
|
2023-02-10 21:47:51 -08:00
|
|
|
eval_vanishing_poly_circuit::<F, S, D>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark,
|
2022-05-04 20:57:07 +02:00
|
|
|
inner_config,
|
|
|
|
|
vars,
|
|
|
|
|
permutation_data,
|
2022-05-24 16:24:52 +02:00
|
|
|
ctl_vars,
|
2022-05-04 20:57:07 +02:00
|
|
|
&mut consumer,
|
|
|
|
|
)
|
|
|
|
|
);
|
|
|
|
|
let vanishing_polys_zeta = consumer.accumulators();
|
|
|
|
|
|
|
|
|
|
// Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta.
|
|
|
|
|
let mut scale = ReducingFactorTarget::new(zeta_pow_deg);
|
|
|
|
|
for (i, chunk) in quotient_polys
|
|
|
|
|
.chunks(stark.quotient_degree_factor())
|
|
|
|
|
.enumerate()
|
|
|
|
|
{
|
|
|
|
|
let recombined_quotient = scale.reduce(chunk, builder);
|
|
|
|
|
let computed_vanishing_poly = builder.mul_extension(z_h_zeta, recombined_quotient);
|
|
|
|
|
builder.connect_extension(vanishing_polys_zeta[i], computed_vanishing_poly);
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
let merkle_caps = vec![
|
|
|
|
|
proof.trace_cap.clone(),
|
|
|
|
|
proof.permutation_ctl_zs_cap.clone(),
|
|
|
|
|
proof.quotient_polys_cap.clone(),
|
|
|
|
|
];
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
let fri_instance = stark.fri_instance_target(
|
|
|
|
|
builder,
|
|
|
|
|
challenges.stark_zeta,
|
|
|
|
|
F::primitive_root_of_unity(degree_bits),
|
2022-05-24 16:24:52 +02:00
|
|
|
degree_bits,
|
|
|
|
|
ctl_zs_last.len(),
|
2022-05-04 20:57:07 +02:00
|
|
|
inner_config,
|
|
|
|
|
);
|
2023-04-01 09:34:13 -04:00
|
|
|
builder.verify_fri_proof::<C>(
|
2022-05-04 20:57:07 +02:00
|
|
|
&fri_instance,
|
2022-05-24 16:24:52 +02:00
|
|
|
&proof.openings.to_fri_openings(zero),
|
2022-05-04 20:57:07 +02:00
|
|
|
&challenges.fri_challenges,
|
|
|
|
|
&merkle_caps,
|
|
|
|
|
&proof.opening_proof,
|
|
|
|
|
&inner_config.fri_params(degree_bits),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-12 08:09:17 +02:00
|
|
|
fn eval_l_0_and_l_last_circuit<F: RichField + Extendable<D>, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
log_n: usize,
|
|
|
|
|
x: ExtensionTarget<D>,
|
|
|
|
|
z_x: ExtensionTarget<D>,
|
|
|
|
|
) -> (ExtensionTarget<D>, ExtensionTarget<D>) {
|
|
|
|
|
let n = builder.constant_extension(F::Extension::from_canonical_usize(1 << log_n));
|
|
|
|
|
let g = builder.constant_extension(F::Extension::primitive_root_of_unity(log_n));
|
|
|
|
|
let one = builder.one_extension();
|
2022-09-12 08:09:17 +02:00
|
|
|
let l_0_deno = builder.mul_sub_extension(n, x, n);
|
2022-05-04 20:57:07 +02:00
|
|
|
let l_last_deno = builder.mul_sub_extension(g, x, one);
|
|
|
|
|
let l_last_deno = builder.mul_extension(n, l_last_deno);
|
|
|
|
|
|
|
|
|
|
(
|
2022-09-12 08:09:17 +02:00
|
|
|
builder.div_extension(z_x, l_0_deno),
|
2022-05-04 20:57:07 +02:00
|
|
|
builder.div_extension(z_x, l_last_deno),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
#[allow(unused)] // TODO: used later?
|
|
|
|
|
pub(crate) fn add_virtual_public_values<F: RichField + Extendable<D>, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-08-25 12:24:22 -07:00
|
|
|
) -> PublicValuesTarget {
|
|
|
|
|
let trie_roots_before = add_virtual_trie_roots(builder);
|
|
|
|
|
let trie_roots_after = add_virtual_trie_roots(builder);
|
|
|
|
|
let block_metadata = add_virtual_block_metadata(builder);
|
|
|
|
|
PublicValuesTarget {
|
|
|
|
|
trie_roots_before,
|
|
|
|
|
trie_roots_after,
|
|
|
|
|
block_metadata,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_trie_roots<F: RichField + Extendable<D>, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) -> TrieRootsTarget {
|
|
|
|
|
let state_root = builder.add_virtual_target_arr();
|
|
|
|
|
let transactions_root = builder.add_virtual_target_arr();
|
|
|
|
|
let receipts_root = builder.add_virtual_target_arr();
|
|
|
|
|
TrieRootsTarget {
|
|
|
|
|
state_root,
|
|
|
|
|
transactions_root,
|
|
|
|
|
receipts_root,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_block_metadata<F: RichField + Extendable<D>, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) -> BlockMetadataTarget {
|
2022-08-25 23:35:38 -07:00
|
|
|
let block_beneficiary = builder.add_virtual_target_arr();
|
2022-08-25 12:24:22 -07:00
|
|
|
let block_timestamp = builder.add_virtual_target();
|
|
|
|
|
let block_number = builder.add_virtual_target();
|
|
|
|
|
let block_difficulty = builder.add_virtual_target();
|
|
|
|
|
let block_gaslimit = builder.add_virtual_target();
|
|
|
|
|
let block_chain_id = builder.add_virtual_target();
|
2022-08-25 23:35:38 -07:00
|
|
|
let block_base_fee = builder.add_virtual_target();
|
2022-08-25 12:24:22 -07:00
|
|
|
BlockMetadataTarget {
|
2022-08-25 23:35:38 -07:00
|
|
|
block_beneficiary,
|
2022-08-25 12:24:22 -07:00
|
|
|
block_timestamp,
|
|
|
|
|
block_number,
|
|
|
|
|
block_difficulty,
|
|
|
|
|
block_gaslimit,
|
|
|
|
|
block_chain_id,
|
2022-08-25 23:35:38 -07:00
|
|
|
block_base_fee,
|
2022-05-04 20:57:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_stark_proof<
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark: &S,
|
2022-05-04 20:57:07 +02:00
|
|
|
config: &StarkConfig,
|
|
|
|
|
degree_bits: usize,
|
2022-05-25 08:00:41 +02:00
|
|
|
num_ctl_zs: usize,
|
2022-05-04 20:57:07 +02:00
|
|
|
) -> StarkProofTarget<D> {
|
|
|
|
|
let fri_params = config.fri_params(degree_bits);
|
|
|
|
|
let cap_height = fri_params.config.cap_height;
|
|
|
|
|
|
2022-05-26 16:27:15 +02:00
|
|
|
let num_leaves_per_oracle = vec![
|
|
|
|
|
S::COLUMNS,
|
|
|
|
|
stark.num_permutation_batches(config) + num_ctl_zs,
|
|
|
|
|
stark.quotient_degree_factor() * config.num_challenges,
|
|
|
|
|
];
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
let permutation_zs_cap = builder.add_virtual_cap(cap_height);
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
StarkProofTarget {
|
|
|
|
|
trace_cap: builder.add_virtual_cap(cap_height),
|
2022-05-20 11:21:13 +02:00
|
|
|
permutation_ctl_zs_cap: permutation_zs_cap,
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys_cap: builder.add_virtual_cap(cap_height),
|
2022-10-06 16:32:35 +02:00
|
|
|
openings: add_virtual_stark_opening_set::<F, S, D>(builder, stark, num_ctl_zs, config),
|
2022-05-04 20:57:07 +02:00
|
|
|
opening_proof: builder.add_virtual_fri_proof(&num_leaves_per_oracle, &fri_params),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-06 16:32:35 +02:00
|
|
|
fn add_virtual_stark_opening_set<F: RichField + Extendable<D>, S: Stark<F, D>, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark: &S,
|
2022-05-25 08:00:41 +02:00
|
|
|
num_ctl_zs: usize,
|
2022-05-04 20:57:07 +02:00
|
|
|
config: &StarkConfig,
|
|
|
|
|
) -> StarkOpeningSetTarget<D> {
|
|
|
|
|
let num_challenges = config.num_challenges;
|
|
|
|
|
StarkOpeningSetTarget {
|
|
|
|
|
local_values: builder.add_virtual_extension_targets(S::COLUMNS),
|
|
|
|
|
next_values: builder.add_virtual_extension_targets(S::COLUMNS),
|
2022-05-24 16:24:52 +02:00
|
|
|
permutation_ctl_zs: builder
|
2022-05-26 16:27:15 +02:00
|
|
|
.add_virtual_extension_targets(stark.num_permutation_batches(config) + num_ctl_zs),
|
2022-06-02 23:55:56 +02:00
|
|
|
permutation_ctl_zs_next: builder
|
2022-05-26 16:27:15 +02:00
|
|
|
.add_virtual_extension_targets(stark.num_permutation_batches(config) + num_ctl_zs),
|
|
|
|
|
ctl_zs_last: builder.add_virtual_targets(num_ctl_zs),
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys: builder
|
|
|
|
|
.add_virtual_extension_targets(stark.quotient_degree_factor() * num_challenges),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) fn set_stark_proof_target<F, C: GenericConfig<D, F = F>, W, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
witness: &mut W,
|
|
|
|
|
proof_target: &StarkProofTarget<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof: &StarkProof<F, C, D>,
|
2022-05-24 16:24:52 +02:00
|
|
|
zero: Target,
|
2022-05-04 20:57:07 +02:00
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C::Hasher: AlgebraicHasher<F, C::HCO>,
|
2022-05-04 20:57:07 +02:00
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
witness.set_cap_target(&proof_target.trace_cap, &proof.trace_cap);
|
|
|
|
|
witness.set_cap_target(&proof_target.quotient_polys_cap, &proof.quotient_polys_cap);
|
|
|
|
|
|
|
|
|
|
witness.set_fri_openings(
|
2022-05-24 16:24:52 +02:00
|
|
|
&proof_target.openings.to_fri_openings(zero),
|
2022-05-04 20:57:07 +02:00
|
|
|
&proof.openings.to_fri_openings(),
|
|
|
|
|
);
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
witness.set_cap_target(
|
|
|
|
|
&proof_target.permutation_ctl_zs_cap,
|
|
|
|
|
&proof.permutation_ctl_zs_cap,
|
|
|
|
|
);
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
set_fri_proof_target(witness, &proof_target.opening_proof, &proof.opening_proof);
|
|
|
|
|
}
|
2022-08-25 12:24:22 -07:00
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
#[allow(unused)] // TODO: used later?
|
|
|
|
|
pub(crate) fn set_public_value_targets<F, W, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
witness: &mut W,
|
|
|
|
|
public_values_target: &PublicValuesTarget,
|
|
|
|
|
public_values: &PublicValues,
|
|
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
set_trie_roots_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.trie_roots_before,
|
|
|
|
|
&public_values.trie_roots_before,
|
|
|
|
|
);
|
|
|
|
|
set_trie_roots_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.trie_roots_after,
|
|
|
|
|
&public_values.trie_roots_after,
|
|
|
|
|
);
|
|
|
|
|
set_block_metadata_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.block_metadata,
|
|
|
|
|
&public_values.block_metadata,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn set_trie_roots_target<F, W, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
witness: &mut W,
|
|
|
|
|
trie_roots_target: &TrieRootsTarget,
|
|
|
|
|
trie_roots: &TrieRoots,
|
|
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
witness.set_target_arr(
|
|
|
|
|
trie_roots_target.state_root,
|
2022-09-29 13:45:46 -06:00
|
|
|
h256_limbs(trie_roots.state_root),
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
|
|
|
|
witness.set_target_arr(
|
|
|
|
|
trie_roots_target.transactions_root,
|
2022-09-29 13:45:46 -06:00
|
|
|
h256_limbs(trie_roots.transactions_root),
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
|
|
|
|
witness.set_target_arr(
|
|
|
|
|
trie_roots_target.receipts_root,
|
2022-09-29 13:45:46 -06:00
|
|
|
h256_limbs(trie_roots.receipts_root),
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn set_block_metadata_target<F, W, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
witness: &mut W,
|
|
|
|
|
block_metadata_target: &BlockMetadataTarget,
|
|
|
|
|
block_metadata: &BlockMetadata,
|
|
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
witness.set_target_arr(
|
2022-08-25 23:35:38 -07:00
|
|
|
block_metadata_target.block_beneficiary,
|
|
|
|
|
h160_limbs(block_metadata.block_beneficiary),
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_timestamp,
|
|
|
|
|
F::from_canonical_u64(block_metadata.block_timestamp.as_u64()),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_number,
|
|
|
|
|
F::from_canonical_u64(block_metadata.block_number.as_u64()),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_difficulty,
|
|
|
|
|
F::from_canonical_u64(block_metadata.block_difficulty.as_u64()),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_gaslimit,
|
|
|
|
|
F::from_canonical_u64(block_metadata.block_gaslimit.as_u64()),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_chain_id,
|
|
|
|
|
F::from_canonical_u64(block_metadata.block_chain_id.as_u64()),
|
|
|
|
|
);
|
2022-08-25 23:35:38 -07:00
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_base_fee,
|
|
|
|
|
F::from_canonical_u64(block_metadata.block_base_fee.as_u64()),
|
|
|
|
|
);
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|