2024-02-01 07:16:28 -05:00
|
|
|
use core::array::from_fn;
|
|
|
|
|
use core::fmt::Debug;
|
2022-09-23 16:25:02 +02:00
|
|
|
|
2023-10-09 09:07:01 -04:00
|
|
|
use anyhow::Result;
|
2024-02-03 12:21:38 -05:00
|
|
|
use ethereum_types::{BigEndianHash, U256};
|
2022-06-27 07:18:21 -07:00
|
|
|
use plonky2::field::extension::Extendable;
|
2022-06-27 12:24:09 -07:00
|
|
|
use plonky2::field::types::Field;
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::fri::witness_util::set_fri_proof_target;
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use plonky2::gates::exponentiation::ExponentiationGate;
|
|
|
|
|
use plonky2::gates::gate::GateRef;
|
|
|
|
|
use plonky2::gates::noop::NoopGate;
|
2023-01-03 11:48:25 -08:00
|
|
|
use plonky2::hash::hash_types::RichField;
|
2023-05-11 02:59:02 +10:00
|
|
|
use plonky2::hash::hashing::PlonkyPermutation;
|
2023-10-09 09:07:01 -04:00
|
|
|
use plonky2::iop::challenger::RecursiveChallenger;
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::iop::ext_target::ExtensionTarget;
|
2022-05-24 16:24:52 +02:00
|
|
|
use plonky2::iop::target::Target;
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use plonky2::iop::witness::{PartialWitness, Witness, WitnessWrite};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
2023-10-09 09:07:01 -04:00
|
|
|
use plonky2::plonk::circuit_data::{CircuitConfig, CircuitData};
|
|
|
|
|
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
|
2022-10-04 09:56:12 +02:00
|
|
|
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::util::reducing::ReducingFactorTarget;
|
2023-04-04 15:23:19 -04:00
|
|
|
use plonky2::util::serialization::{
|
|
|
|
|
Buffer, GateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
|
|
|
|
|
};
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::with_context;
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
use plonky2_util::log2_ceil;
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2023-10-09 09:07:01 -04:00
|
|
|
use crate::all_stark::Table;
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::config::StarkConfig;
|
|
|
|
|
use crate::constraint_consumer::RecursiveConstraintConsumer;
|
2023-11-30 10:04:08 -05:00
|
|
|
use crate::cpu::kernel::aggregator::KERNEL;
|
2023-08-19 10:46:01 -04:00
|
|
|
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
|
2024-02-01 18:19:49 -05:00
|
|
|
use crate::cross_table_lookup::{CrossTableLookup, CtlCheckVarsTarget, GrandProductChallengeSet};
|
2023-09-22 09:19:13 -04:00
|
|
|
use crate::evaluation_frame::StarkEvaluationFrame;
|
2024-02-01 18:19:49 -05:00
|
|
|
use crate::lookup::{GrandProductChallenge, LookupCheckVarsTarget};
|
2023-08-19 10:46:01 -04:00
|
|
|
use crate::memory::segments::Segment;
|
|
|
|
|
use crate::memory::VALUE_LIMBS;
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::proof::{
|
2023-08-21 23:32:53 +01:00
|
|
|
BlockHashes, BlockHashesTarget, BlockMetadata, BlockMetadataTarget, ExtraBlockData,
|
|
|
|
|
ExtraBlockDataTarget, PublicValues, PublicValuesTarget, StarkOpeningSetTarget, StarkProof,
|
2024-02-08 08:43:04 -05:00
|
|
|
StarkProofChallengesTarget, StarkProofTarget, StarkProofWithMetadata, TrieRoots,
|
|
|
|
|
TrieRootsTarget,
|
2022-05-04 20:57:07 +02:00
|
|
|
};
|
|
|
|
|
use crate::stark::Stark;
|
2024-02-03 12:21:38 -05:00
|
|
|
use crate::util::{h256_limbs, u256_limbs, u256_to_u32, u256_to_u64};
|
2022-05-18 09:22:58 +02:00
|
|
|
use crate::vanishing_poly::eval_vanishing_poly_circuit;
|
2023-09-12 19:23:16 -04:00
|
|
|
use crate::witness::errors::ProgramError;
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2023-05-11 02:59:02 +10:00
|
|
|
pub(crate) struct PublicInputs<T: Copy + Default + Eq + PartialEq + Debug, P: PlonkyPermutation<T>>
|
2022-11-22 08:48:48 -05:00
|
|
|
{
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) trace_cap: Vec<Vec<T>>,
|
2023-09-11 14:11:13 -04:00
|
|
|
pub(crate) ctl_zs_first: Vec<T>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) ctl_challenges: GrandProductChallengeSet<T>,
|
2023-05-11 02:59:02 +10:00
|
|
|
pub(crate) challenger_state_before: P,
|
|
|
|
|
pub(crate) challenger_state_after: P,
|
2022-10-03 11:44:52 +02:00
|
|
|
}
|
|
|
|
|
|
2023-05-11 02:59:02 +10:00
|
|
|
impl<T: Copy + Debug + Default + Eq + PartialEq, P: PlonkyPermutation<T>> PublicInputs<T, P> {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) fn from_vec(v: &[T], config: &StarkConfig) -> Self {
|
2023-05-11 02:59:02 +10:00
|
|
|
// TODO: Document magic number 4; probably comes from
|
|
|
|
|
// Ethereum 256 bits = 4 * Goldilocks 64 bits
|
|
|
|
|
let nelts = config.fri_config.num_cap_elements();
|
|
|
|
|
let mut trace_cap = Vec::with_capacity(nelts);
|
|
|
|
|
for i in 0..nelts {
|
|
|
|
|
trace_cap.push(v[4 * i..4 * (i + 1)].to_vec());
|
|
|
|
|
}
|
|
|
|
|
let mut iter = v.iter().copied().skip(4 * nelts);
|
2022-09-23 13:41:14 +02:00
|
|
|
let ctl_challenges = GrandProductChallengeSet {
|
|
|
|
|
challenges: (0..config.num_challenges)
|
2022-10-03 11:44:52 +02:00
|
|
|
.map(|_| GrandProductChallenge {
|
|
|
|
|
beta: iter.next().unwrap(),
|
|
|
|
|
gamma: iter.next().unwrap(),
|
2022-09-23 13:41:14 +02:00
|
|
|
})
|
|
|
|
|
.collect(),
|
|
|
|
|
};
|
2023-05-11 02:59:02 +10:00
|
|
|
let challenger_state_before = P::new(&mut iter);
|
|
|
|
|
let challenger_state_after = P::new(&mut iter);
|
2023-09-11 14:11:13 -04:00
|
|
|
let ctl_zs_first: Vec<_> = iter.collect();
|
2022-09-23 13:41:14 +02:00
|
|
|
|
|
|
|
|
Self {
|
|
|
|
|
trace_cap,
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first,
|
2022-09-23 13:41:14 +02:00
|
|
|
ctl_challenges,
|
|
|
|
|
challenger_state_before,
|
|
|
|
|
challenger_state_after,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
/// Represents a circuit which recursively verifies a STARK proof.
|
2023-02-21 21:12:03 -05:00
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) struct StarkWrapperCircuit<F, C, const D: usize>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2023-05-11 02:59:02 +10:00
|
|
|
C::Hasher: AlgebraicHasher<F>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) circuit: CircuitData<F, C, D>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) stark_proof_target: StarkProofTarget<D>,
|
|
|
|
|
pub(crate) ctl_challenges_target: GrandProductChallengeSet<Target>,
|
2023-05-11 02:59:02 +10:00
|
|
|
pub(crate) init_challenger_state_target:
|
|
|
|
|
<C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) zero_target: Target,
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
impl<F, C, const D: usize> StarkWrapperCircuit<F, C, D>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2023-05-11 02:59:02 +10:00
|
|
|
C::Hasher: AlgebraicHasher<F>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
2023-11-13 09:26:56 -05:00
|
|
|
pub(crate) fn to_buffer(
|
2023-02-21 21:12:03 -05:00
|
|
|
&self,
|
|
|
|
|
buffer: &mut Vec<u8>,
|
|
|
|
|
gate_serializer: &dyn GateSerializer<F, D>,
|
|
|
|
|
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
|
|
|
|
) -> IoResult<()> {
|
|
|
|
|
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
|
2023-05-11 02:59:02 +10:00
|
|
|
buffer.write_target_vec(self.init_challenger_state_target.as_ref())?;
|
2023-02-21 21:12:03 -05:00
|
|
|
buffer.write_target(self.zero_target)?;
|
|
|
|
|
self.stark_proof_target.to_buffer(buffer)?;
|
|
|
|
|
self.ctl_challenges_target.to_buffer(buffer)?;
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-13 09:26:56 -05:00
|
|
|
pub(crate) fn from_buffer(
|
2023-02-21 21:12:03 -05:00
|
|
|
buffer: &mut Buffer,
|
|
|
|
|
gate_serializer: &dyn GateSerializer<F, D>,
|
|
|
|
|
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
|
|
|
|
|
) -> IoResult<Self> {
|
|
|
|
|
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
|
2023-05-11 02:59:02 +10:00
|
|
|
let target_vec = buffer.read_target_vec()?;
|
|
|
|
|
let init_challenger_state_target =
|
2023-09-08 10:41:22 -07:00
|
|
|
<C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation::new(target_vec);
|
2023-02-21 21:12:03 -05:00
|
|
|
let zero_target = buffer.read_target()?;
|
|
|
|
|
let stark_proof_target = StarkProofTarget::from_buffer(buffer)?;
|
|
|
|
|
let ctl_challenges_target = GrandProductChallengeSet::from_buffer(buffer)?;
|
|
|
|
|
Ok(Self {
|
|
|
|
|
circuit,
|
|
|
|
|
stark_proof_target,
|
|
|
|
|
ctl_challenges_target,
|
2023-05-11 02:59:02 +10:00
|
|
|
init_challenger_state_target,
|
2023-02-21 21:12:03 -05:00
|
|
|
zero_target,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) fn prove(
|
|
|
|
|
&self,
|
2024-02-08 08:43:04 -05:00
|
|
|
proof_with_metadata: &StarkProofWithMetadata<F, C, D>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
ctl_challenges: &GrandProductChallengeSet<F>,
|
2023-04-01 09:34:13 -04:00
|
|
|
) -> Result<ProofWithPublicInputs<F, C, D>> {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let mut inputs = PartialWitness::new();
|
|
|
|
|
|
|
|
|
|
set_stark_proof_target(
|
|
|
|
|
&mut inputs,
|
|
|
|
|
&self.stark_proof_target,
|
2024-02-08 08:43:04 -05:00
|
|
|
&proof_with_metadata.proof,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
self.zero_target,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
for (challenge_target, challenge) in self
|
|
|
|
|
.ctl_challenges_target
|
|
|
|
|
.challenges
|
|
|
|
|
.iter()
|
|
|
|
|
.zip(&ctl_challenges.challenges)
|
|
|
|
|
{
|
|
|
|
|
inputs.set_target(challenge_target.beta, challenge.beta);
|
|
|
|
|
inputs.set_target(challenge_target.gamma, challenge.gamma);
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-08 08:43:04 -05:00
|
|
|
inputs.set_target_arr(
|
|
|
|
|
self.init_challenger_state_target.as_ref(),
|
|
|
|
|
proof_with_metadata.init_challenger_state.as_ref(),
|
|
|
|
|
);
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
self.circuit.prove(inputs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Represents a circuit which recursively verifies a PLONK proof.
|
2023-02-21 21:12:03 -05:00
|
|
|
#[derive(Eq, PartialEq, Debug)]
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) struct PlonkWrapperCircuit<F, C, const D: usize>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) circuit: CircuitData<F, C, D>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
pub(crate) proof_with_pis_target: ProofWithPublicInputsTarget<D>,
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
impl<F, C, const D: usize> PlonkWrapperCircuit<F, C, D>
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2023-05-11 02:59:02 +10:00
|
|
|
C::Hasher: AlgebraicHasher<F>,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
{
|
|
|
|
|
pub(crate) fn prove(
|
|
|
|
|
&self,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof: &ProofWithPublicInputs<F, C, D>,
|
|
|
|
|
) -> Result<ProofWithPublicInputs<F, C, D>> {
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let mut inputs = PartialWitness::new();
|
|
|
|
|
inputs.set_proof_with_pis_target(&self.proof_with_pis_target, proof);
|
|
|
|
|
self.circuit.prove(inputs)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns the recursive Stark circuit.
|
|
|
|
|
pub(crate) fn recursive_stark_circuit<
|
2022-09-26 15:47:35 +02:00
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-09-26 15:47:35 +02:00
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
|
|
|
|
table: Table,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
stark: &S,
|
2022-09-26 15:47:35 +02:00
|
|
|
degree_bits: usize,
|
|
|
|
|
cross_table_lookups: &[CrossTableLookup<F>],
|
|
|
|
|
inner_config: &StarkConfig,
|
|
|
|
|
circuit_config: &CircuitConfig,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
min_degree_bits: usize,
|
2023-04-01 09:34:13 -04:00
|
|
|
) -> StarkWrapperCircuit<F, C, D>
|
2022-09-26 15:47:35 +02:00
|
|
|
where
|
2023-05-11 02:59:02 +10:00
|
|
|
C::Hasher: AlgebraicHasher<F>,
|
2022-09-26 15:47:35 +02:00
|
|
|
{
|
|
|
|
|
let mut builder = CircuitBuilder::<F, D>::new(circuit_config.clone());
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let zero_target = builder.zero();
|
2022-09-26 15:47:35 +02:00
|
|
|
|
2023-02-13 15:58:26 +01:00
|
|
|
let num_lookup_columns = stark.num_lookup_helper_columns(inner_config);
|
2024-01-10 08:54:13 +01:00
|
|
|
let (total_num_helpers, num_ctl_zs, num_helpers_by_ctl) =
|
|
|
|
|
CrossTableLookup::num_ctl_helpers_zs_all(
|
|
|
|
|
cross_table_lookups,
|
2024-02-01 11:41:42 -05:00
|
|
|
*table,
|
2024-01-10 08:54:13 +01:00
|
|
|
inner_config.num_challenges,
|
|
|
|
|
stark.constraint_degree(),
|
|
|
|
|
);
|
|
|
|
|
let num_ctl_helper_zs = num_ctl_zs + total_num_helpers;
|
|
|
|
|
|
|
|
|
|
let proof_target = add_virtual_stark_proof(
|
|
|
|
|
&mut builder,
|
|
|
|
|
stark,
|
|
|
|
|
inner_config,
|
|
|
|
|
degree_bits,
|
|
|
|
|
num_ctl_helper_zs,
|
|
|
|
|
num_ctl_zs,
|
|
|
|
|
);
|
|
|
|
|
|
2022-09-26 15:47:35 +02:00
|
|
|
builder.register_public_inputs(
|
|
|
|
|
&proof_target
|
|
|
|
|
.trace_cap
|
|
|
|
|
.0
|
|
|
|
|
.iter()
|
|
|
|
|
.flat_map(|h| h.elements)
|
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let ctl_challenges_target = GrandProductChallengeSet {
|
|
|
|
|
challenges: (0..inner_config.num_challenges)
|
|
|
|
|
.map(|_| GrandProductChallenge {
|
|
|
|
|
beta: builder.add_virtual_public_input(),
|
|
|
|
|
gamma: builder.add_virtual_public_input(),
|
|
|
|
|
})
|
|
|
|
|
.collect(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let ctl_vars = CtlCheckVarsTarget::from_proof(
|
2024-02-01 11:41:42 -05:00
|
|
|
*table,
|
2022-09-26 15:47:35 +02:00
|
|
|
&proof_target,
|
|
|
|
|
cross_table_lookups,
|
|
|
|
|
&ctl_challenges_target,
|
2023-02-13 15:58:26 +01:00
|
|
|
num_lookup_columns,
|
2024-01-10 08:54:13 +01:00
|
|
|
total_num_helpers,
|
|
|
|
|
&num_helpers_by_ctl,
|
2022-09-26 15:47:35 +02:00
|
|
|
);
|
|
|
|
|
|
2023-05-11 02:59:02 +10:00
|
|
|
let init_challenger_state_target =
|
|
|
|
|
<C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation::new(std::iter::from_fn(|| {
|
|
|
|
|
Some(builder.add_virtual_public_input())
|
|
|
|
|
}));
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
let mut challenger =
|
2023-05-11 02:59:02 +10:00
|
|
|
RecursiveChallenger::<F, C::Hasher, D>::from_state(init_challenger_state_target);
|
2023-09-14 10:57:33 +01:00
|
|
|
let challenges =
|
|
|
|
|
proof_target.get_challenges::<F, C>(&mut builder, &mut challenger, inner_config);
|
2022-10-03 10:53:33 +02:00
|
|
|
let challenger_state = challenger.compact(&mut builder);
|
2023-05-11 02:59:02 +10:00
|
|
|
builder.register_public_inputs(challenger_state.as_ref());
|
2022-09-26 15:47:35 +02:00
|
|
|
|
2023-09-11 14:11:13 -04:00
|
|
|
builder.register_public_inputs(&proof_target.openings.ctl_zs_first);
|
2022-09-26 15:47:35 +02:00
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
verify_stark_proof_with_challenges_circuit::<F, C, _, D>(
|
2022-09-26 15:47:35 +02:00
|
|
|
&mut builder,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
stark,
|
2022-09-26 15:47:35 +02:00
|
|
|
&proof_target,
|
|
|
|
|
&challenges,
|
|
|
|
|
&ctl_vars,
|
2023-09-14 22:57:52 +01:00
|
|
|
&ctl_challenges_target,
|
2022-09-26 15:47:35 +02:00
|
|
|
inner_config,
|
|
|
|
|
);
|
|
|
|
|
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
add_common_recursion_gates(&mut builder);
|
|
|
|
|
|
|
|
|
|
// Pad to the minimum degree.
|
|
|
|
|
while log2_ceil(builder.num_gates()) < min_degree_bits {
|
|
|
|
|
builder.add_gate(NoopGate, vec![]);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
let circuit = builder.build::<C>();
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
StarkWrapperCircuit {
|
|
|
|
|
circuit,
|
|
|
|
|
stark_proof_target: proof_target,
|
|
|
|
|
ctl_challenges_target,
|
|
|
|
|
init_challenger_state_target,
|
|
|
|
|
zero_target,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Add gates that are sometimes used by recursive circuits, even if it's not actually used by this
|
|
|
|
|
/// particular recursive circuit. This is done for uniformity. We sometimes want all recursion
|
|
|
|
|
/// circuits to have the same gate set, so that we can do 1-of-n conditional recursion efficiently.
|
|
|
|
|
pub(crate) fn add_common_recursion_gates<F: RichField + Extendable<D>, const D: usize>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) {
|
|
|
|
|
builder.add_gate_to_gate_set(GateRef::new(ExponentiationGate::new_from_config(
|
|
|
|
|
&builder.config,
|
|
|
|
|
)));
|
2022-09-26 15:47:35 +02:00
|
|
|
}
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
/// Recursively verifies an inner proof.
|
2022-05-18 09:22:58 +02:00
|
|
|
fn verify_stark_proof_with_challenges_circuit<
|
2022-05-04 20:57:07 +02:00
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-05-04 20:57:07 +02:00
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark: &S,
|
2022-08-25 12:24:22 -07:00
|
|
|
proof: &StarkProofTarget<D>,
|
2022-05-24 16:24:52 +02:00
|
|
|
challenges: &StarkProofChallengesTarget<D>,
|
2022-06-14 00:53:31 +02:00
|
|
|
ctl_vars: &[CtlCheckVarsTarget<F, D>],
|
2023-09-14 22:57:52 +01:00
|
|
|
ctl_challenges: &GrandProductChallengeSet<Target>,
|
2022-05-04 20:57:07 +02:00
|
|
|
inner_config: &StarkConfig,
|
|
|
|
|
) where
|
2023-05-11 02:59:02 +10:00
|
|
|
C::Hasher: AlgebraicHasher<F>,
|
2022-05-04 20:57:07 +02:00
|
|
|
{
|
2022-05-24 16:24:52 +02:00
|
|
|
let zero = builder.zero();
|
2022-05-04 20:57:07 +02:00
|
|
|
let one = builder.one_extension();
|
|
|
|
|
|
2024-01-10 08:54:13 +01:00
|
|
|
let num_ctl_polys = ctl_vars
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|ctl| ctl.helper_columns.len())
|
|
|
|
|
.sum::<usize>();
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
let StarkOpeningSetTarget {
|
|
|
|
|
local_values,
|
|
|
|
|
next_values,
|
2023-02-13 15:58:26 +01:00
|
|
|
auxiliary_polys,
|
|
|
|
|
auxiliary_polys_next,
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first,
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys,
|
|
|
|
|
} = &proof.openings;
|
2023-09-22 09:19:13 -04:00
|
|
|
let vars = S::EvaluationFrameTarget::from_values(local_values, next_values);
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
let degree_bits = proof.recover_degree_bits(inner_config);
|
2022-05-04 20:57:07 +02:00
|
|
|
let zeta_pow_deg = builder.exp_power_of_2_extension(challenges.stark_zeta, degree_bits);
|
|
|
|
|
let z_h_zeta = builder.sub_extension(zeta_pow_deg, one);
|
2022-09-12 08:09:17 +02:00
|
|
|
let (l_0, l_last) =
|
|
|
|
|
eval_l_0_and_l_last_circuit(builder, degree_bits, challenges.stark_zeta, z_h_zeta);
|
2022-05-04 20:57:07 +02:00
|
|
|
let last =
|
|
|
|
|
builder.constant_extension(F::Extension::primitive_root_of_unity(degree_bits).inverse());
|
|
|
|
|
let z_last = builder.sub_extension(challenges.stark_zeta, last);
|
|
|
|
|
|
|
|
|
|
let mut consumer = RecursiveConstraintConsumer::<F, D>::new(
|
|
|
|
|
builder.zero_extension(),
|
2022-05-24 16:24:52 +02:00
|
|
|
challenges.stark_alphas.clone(),
|
2022-05-04 20:57:07 +02:00
|
|
|
z_last,
|
2022-09-12 08:09:17 +02:00
|
|
|
l_0,
|
2022-05-04 20:57:07 +02:00
|
|
|
l_last,
|
|
|
|
|
);
|
|
|
|
|
|
2023-02-13 15:58:26 +01:00
|
|
|
let num_lookup_columns = stark.num_lookup_helper_columns(inner_config);
|
2023-09-14 10:57:33 +01:00
|
|
|
let lookup_challenges = (num_lookup_columns > 0).then(|| {
|
2023-09-14 22:57:52 +01:00
|
|
|
ctl_challenges
|
|
|
|
|
.challenges
|
2023-09-14 10:57:33 +01:00
|
|
|
.iter()
|
2023-09-14 22:57:52 +01:00
|
|
|
.map(|ch| ch.beta)
|
2023-09-14 10:57:33 +01:00
|
|
|
.collect::<Vec<_>>()
|
|
|
|
|
});
|
|
|
|
|
|
2023-02-13 15:58:26 +01:00
|
|
|
let lookup_vars = stark.uses_lookups().then(|| LookupCheckVarsTarget {
|
|
|
|
|
local_values: auxiliary_polys[..num_lookup_columns].to_vec(),
|
|
|
|
|
next_values: auxiliary_polys_next[..num_lookup_columns].to_vec(),
|
2023-09-14 10:57:33 +01:00
|
|
|
challenges: lookup_challenges.unwrap(),
|
2023-02-13 15:58:26 +01:00
|
|
|
});
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
with_context!(
|
|
|
|
|
builder,
|
|
|
|
|
"evaluate vanishing polynomial",
|
2023-02-10 21:47:51 -08:00
|
|
|
eval_vanishing_poly_circuit::<F, S, D>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark,
|
2023-09-22 09:19:13 -04:00
|
|
|
&vars,
|
2023-02-13 15:58:26 +01:00
|
|
|
lookup_vars,
|
2022-05-24 16:24:52 +02:00
|
|
|
ctl_vars,
|
2022-05-04 20:57:07 +02:00
|
|
|
&mut consumer,
|
|
|
|
|
)
|
|
|
|
|
);
|
|
|
|
|
let vanishing_polys_zeta = consumer.accumulators();
|
|
|
|
|
|
|
|
|
|
// Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta.
|
|
|
|
|
let mut scale = ReducingFactorTarget::new(zeta_pow_deg);
|
|
|
|
|
for (i, chunk) in quotient_polys
|
|
|
|
|
.chunks(stark.quotient_degree_factor())
|
|
|
|
|
.enumerate()
|
|
|
|
|
{
|
|
|
|
|
let recombined_quotient = scale.reduce(chunk, builder);
|
|
|
|
|
let computed_vanishing_poly = builder.mul_extension(z_h_zeta, recombined_quotient);
|
|
|
|
|
builder.connect_extension(vanishing_polys_zeta[i], computed_vanishing_poly);
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
let merkle_caps = vec![
|
|
|
|
|
proof.trace_cap.clone(),
|
2023-02-13 15:58:26 +01:00
|
|
|
proof.auxiliary_polys_cap.clone(),
|
2022-05-24 16:24:52 +02:00
|
|
|
proof.quotient_polys_cap.clone(),
|
|
|
|
|
];
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
let fri_instance = stark.fri_instance_target(
|
|
|
|
|
builder,
|
|
|
|
|
challenges.stark_zeta,
|
|
|
|
|
F::primitive_root_of_unity(degree_bits),
|
2024-01-10 08:54:13 +01:00
|
|
|
num_ctl_polys,
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first.len(),
|
2022-05-04 20:57:07 +02:00
|
|
|
inner_config,
|
|
|
|
|
);
|
2023-04-01 09:34:13 -04:00
|
|
|
builder.verify_fri_proof::<C>(
|
2022-05-04 20:57:07 +02:00
|
|
|
&fri_instance,
|
2022-05-24 16:24:52 +02:00
|
|
|
&proof.openings.to_fri_openings(zero),
|
2022-05-04 20:57:07 +02:00
|
|
|
&challenges.fri_challenges,
|
|
|
|
|
&merkle_caps,
|
|
|
|
|
&proof.opening_proof,
|
|
|
|
|
&inner_config.fri_params(degree_bits),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-04 16:26:10 -05:00
|
|
|
/// Recursive version of `get_memory_extra_looking_sum`.
|
|
|
|
|
pub(crate) fn get_memory_extra_looking_sum_circuit<F: RichField + Extendable<D>, const D: usize>(
|
2023-08-19 10:46:01 -04:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
public_values: &PublicValuesTarget,
|
|
|
|
|
challenge: GrandProductChallenge<Target>,
|
|
|
|
|
) -> Target {
|
2023-12-04 16:26:10 -05:00
|
|
|
let mut sum = builder.zero();
|
2023-08-19 10:46:01 -04:00
|
|
|
|
|
|
|
|
// Add metadata writes.
|
2023-09-07 12:15:17 +01:00
|
|
|
let block_fields_scalars = [
|
2023-08-19 10:46:01 -04:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockTimestamp,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.block_metadata.block_timestamp,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockNumber,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.block_metadata.block_number,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockDifficulty,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.block_metadata.block_difficulty,
|
|
|
|
|
),
|
2023-11-17 10:01:26 -05:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockGasLimit,
|
2023-11-17 10:01:26 -05:00
|
|
|
public_values.block_metadata.block_gaslimit,
|
|
|
|
|
),
|
2023-08-19 10:46:01 -04:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockChainId,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.block_metadata.block_chain_id,
|
|
|
|
|
),
|
2023-11-17 10:01:26 -05:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockGasUsed,
|
2023-11-17 10:01:26 -05:00
|
|
|
public_values.block_metadata.block_gas_used,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockGasUsedBefore,
|
2023-11-17 10:01:26 -05:00
|
|
|
public_values.extra_block_data.gas_used_before,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockGasUsedAfter,
|
2023-11-17 10:01:26 -05:00
|
|
|
public_values.extra_block_data.gas_used_after,
|
|
|
|
|
),
|
2023-08-23 23:29:58 +01:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::TxnNumberBefore,
|
2023-08-23 23:29:58 +01:00
|
|
|
public_values.extra_block_data.txn_number_before,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::TxnNumberAfter,
|
2023-08-23 23:29:58 +01:00
|
|
|
public_values.extra_block_data.txn_number_after,
|
|
|
|
|
),
|
2023-08-19 10:46:01 -04:00
|
|
|
];
|
|
|
|
|
|
2024-01-08 11:46:26 +01:00
|
|
|
let beneficiary_random_base_fee_cur_hash_fields: [(GlobalMetadata, &[Target]); 4] = [
|
2023-08-23 23:29:58 +01:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockBeneficiary,
|
2023-08-23 23:29:58 +01:00
|
|
|
&public_values.block_metadata.block_beneficiary,
|
|
|
|
|
),
|
2023-09-25 18:20:22 +02:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockRandom,
|
2023-09-25 18:20:22 +02:00
|
|
|
&public_values.block_metadata.block_random,
|
|
|
|
|
),
|
2023-08-23 23:29:58 +01:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockBaseFee,
|
2023-08-23 23:29:58 +01:00
|
|
|
&public_values.block_metadata.block_base_fee,
|
|
|
|
|
),
|
2023-09-07 12:15:17 +01:00
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::BlockCurrentHash,
|
2023-09-07 12:15:17 +01:00
|
|
|
&public_values.block_hashes.cur_hash,
|
|
|
|
|
),
|
2023-08-23 23:29:58 +01:00
|
|
|
];
|
2023-08-19 10:46:01 -04:00
|
|
|
|
2024-01-08 11:46:26 +01:00
|
|
|
let metadata_segment =
|
|
|
|
|
builder.constant(F::from_canonical_usize(Segment::GlobalMetadata.unscale()));
|
2023-09-07 12:15:17 +01:00
|
|
|
block_fields_scalars.map(|(field, target)| {
|
2023-08-19 10:46:01 -04:00
|
|
|
// Each of those fields fit in 32 bits, hence in a single Target.
|
2024-01-08 11:46:26 +01:00
|
|
|
sum = add_data_write(
|
|
|
|
|
builder,
|
|
|
|
|
challenge,
|
|
|
|
|
sum,
|
|
|
|
|
metadata_segment,
|
|
|
|
|
field.unscale(),
|
|
|
|
|
&[target],
|
|
|
|
|
);
|
2023-08-19 10:46:01 -04:00
|
|
|
});
|
|
|
|
|
|
2023-11-17 10:01:26 -05:00
|
|
|
beneficiary_random_base_fee_cur_hash_fields.map(|(field, targets)| {
|
2024-01-08 11:46:26 +01:00
|
|
|
sum = add_data_write(
|
|
|
|
|
builder,
|
|
|
|
|
challenge,
|
|
|
|
|
sum,
|
|
|
|
|
metadata_segment,
|
|
|
|
|
field.unscale(),
|
|
|
|
|
targets,
|
|
|
|
|
);
|
2023-08-23 23:29:58 +01:00
|
|
|
});
|
|
|
|
|
|
2023-08-21 23:32:53 +01:00
|
|
|
// Add block hashes writes.
|
2024-01-08 11:46:26 +01:00
|
|
|
let block_hashes_segment =
|
|
|
|
|
builder.constant(F::from_canonical_usize(Segment::BlockHashes.unscale()));
|
2023-08-21 23:32:53 +01:00
|
|
|
for i in 0..256 {
|
2023-12-04 16:26:10 -05:00
|
|
|
sum = add_data_write(
|
2023-08-23 23:29:58 +01:00
|
|
|
builder,
|
|
|
|
|
challenge,
|
2023-12-04 16:26:10 -05:00
|
|
|
sum,
|
2023-08-21 23:32:53 +01:00
|
|
|
block_hashes_segment,
|
2023-08-23 23:29:58 +01:00
|
|
|
i,
|
2023-08-21 23:32:53 +01:00
|
|
|
&public_values.block_hashes.prev_hashes[8 * i..8 * (i + 1)],
|
2023-08-23 23:29:58 +01:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-21 23:32:53 +01:00
|
|
|
// Add block bloom filters writes.
|
2024-01-08 11:46:26 +01:00
|
|
|
let bloom_segment =
|
|
|
|
|
builder.constant(F::from_canonical_usize(Segment::GlobalBlockBloom.unscale()));
|
2023-08-23 23:29:58 +01:00
|
|
|
for i in 0..8 {
|
2023-12-04 16:26:10 -05:00
|
|
|
sum = add_data_write(
|
2023-08-23 23:29:58 +01:00
|
|
|
builder,
|
|
|
|
|
challenge,
|
2023-12-04 16:26:10 -05:00
|
|
|
sum,
|
2023-08-23 23:29:58 +01:00
|
|
|
bloom_segment,
|
2023-08-21 23:32:53 +01:00
|
|
|
i,
|
|
|
|
|
&public_values.block_metadata.block_bloom[i * 8..(i + 1) * 8],
|
2023-08-23 23:29:58 +01:00
|
|
|
);
|
2023-09-07 12:15:17 +01:00
|
|
|
}
|
2023-08-19 10:46:01 -04:00
|
|
|
|
|
|
|
|
// Add trie roots writes.
|
|
|
|
|
let trie_fields = [
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::StateTrieRootDigestBefore,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.trie_roots_before.state_root,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::TransactionTrieRootDigestBefore,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.trie_roots_before.transactions_root,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::ReceiptTrieRootDigestBefore,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.trie_roots_before.receipts_root,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::StateTrieRootDigestAfter,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.trie_roots_after.state_root,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::TransactionTrieRootDigestAfter,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.trie_roots_after.transactions_root,
|
|
|
|
|
),
|
|
|
|
|
(
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::ReceiptTrieRootDigestAfter,
|
2023-08-19 10:46:01 -04:00
|
|
|
public_values.trie_roots_after.receipts_root,
|
|
|
|
|
),
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
trie_fields.map(|(field, targets)| {
|
2024-01-08 11:46:26 +01:00
|
|
|
sum = add_data_write(
|
|
|
|
|
builder,
|
|
|
|
|
challenge,
|
|
|
|
|
sum,
|
|
|
|
|
metadata_segment,
|
|
|
|
|
field.unscale(),
|
|
|
|
|
&targets,
|
|
|
|
|
);
|
2023-08-19 10:46:01 -04:00
|
|
|
});
|
|
|
|
|
|
2023-11-30 10:04:08 -05:00
|
|
|
// Add kernel hash and kernel length.
|
|
|
|
|
let kernel_hash_limbs = h256_limbs::<F>(KERNEL.code_hash);
|
|
|
|
|
let kernel_hash_targets: [Target; 8] = from_fn(|i| builder.constant(kernel_hash_limbs[i]));
|
2023-12-04 16:26:10 -05:00
|
|
|
sum = add_data_write(
|
2023-11-30 10:04:08 -05:00
|
|
|
builder,
|
|
|
|
|
challenge,
|
2023-12-04 16:26:10 -05:00
|
|
|
sum,
|
2023-11-30 10:04:08 -05:00
|
|
|
metadata_segment,
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::KernelHash.unscale(),
|
2023-11-30 10:04:08 -05:00
|
|
|
&kernel_hash_targets,
|
|
|
|
|
);
|
|
|
|
|
let kernel_len_target = builder.constant(F::from_canonical_usize(KERNEL.code.len()));
|
2023-12-04 16:26:10 -05:00
|
|
|
sum = add_data_write(
|
2023-11-30 10:04:08 -05:00
|
|
|
builder,
|
|
|
|
|
challenge,
|
2023-12-04 16:26:10 -05:00
|
|
|
sum,
|
2023-11-30 10:04:08 -05:00
|
|
|
metadata_segment,
|
2024-01-08 11:46:26 +01:00
|
|
|
GlobalMetadata::KernelLen.unscale(),
|
2023-11-30 10:04:08 -05:00
|
|
|
&[kernel_len_target],
|
|
|
|
|
);
|
|
|
|
|
|
2023-12-04 16:26:10 -05:00
|
|
|
sum
|
2023-08-19 10:46:01 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-23 23:29:58 +01:00
|
|
|
fn add_data_write<F: RichField + Extendable<D>, const D: usize>(
|
2023-08-19 10:46:01 -04:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
challenge: GrandProductChallenge<Target>,
|
2023-12-04 16:26:10 -05:00
|
|
|
running_sum: Target,
|
2023-08-23 23:29:58 +01:00
|
|
|
segment: Target,
|
|
|
|
|
idx: usize,
|
|
|
|
|
val: &[Target],
|
2023-08-19 10:46:01 -04:00
|
|
|
) -> Target {
|
2023-08-23 23:29:58 +01:00
|
|
|
debug_assert!(val.len() <= VALUE_LIMBS);
|
|
|
|
|
let len = core::cmp::min(val.len(), VALUE_LIMBS);
|
2023-08-19 10:46:01 -04:00
|
|
|
|
|
|
|
|
let row = builder.add_virtual_targets(13);
|
2023-10-16 08:53:59 -04:00
|
|
|
// is_read = false
|
|
|
|
|
builder.assert_zero(row[0]);
|
|
|
|
|
// context = 0
|
|
|
|
|
builder.assert_zero(row[1]);
|
2023-08-19 10:46:01 -04:00
|
|
|
// segment
|
|
|
|
|
builder.connect(row[2], segment);
|
|
|
|
|
// virtual
|
2023-08-23 23:29:58 +01:00
|
|
|
let field_target = builder.constant(F::from_canonical_usize(idx));
|
2023-08-19 10:46:01 -04:00
|
|
|
builder.connect(row[3], field_target);
|
|
|
|
|
|
|
|
|
|
// values
|
|
|
|
|
for j in 0..len {
|
2023-10-16 08:53:59 -04:00
|
|
|
// connect the actual value limbs
|
2023-08-23 23:29:58 +01:00
|
|
|
builder.connect(row[4 + j], val[j]);
|
2023-08-19 10:46:01 -04:00
|
|
|
}
|
|
|
|
|
for j in len..VALUE_LIMBS {
|
2023-10-16 08:53:59 -04:00
|
|
|
// assert that the remaining limbs are 0
|
|
|
|
|
builder.assert_zero(row[4 + j]);
|
2023-08-19 10:46:01 -04:00
|
|
|
}
|
|
|
|
|
|
2023-10-16 08:53:59 -04:00
|
|
|
// timestamp = 1
|
|
|
|
|
builder.assert_one(row[12]);
|
2023-08-19 10:46:01 -04:00
|
|
|
|
|
|
|
|
let combined = challenge.combine_base_circuit(builder, &row);
|
2023-12-04 16:26:10 -05:00
|
|
|
let inverse = builder.inverse(combined);
|
|
|
|
|
builder.add(running_sum, inverse)
|
2023-08-19 10:46:01 -04:00
|
|
|
}
|
|
|
|
|
|
2022-09-12 08:09:17 +02:00
|
|
|
fn eval_l_0_and_l_last_circuit<F: RichField + Extendable<D>, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
log_n: usize,
|
|
|
|
|
x: ExtensionTarget<D>,
|
|
|
|
|
z_x: ExtensionTarget<D>,
|
|
|
|
|
) -> (ExtensionTarget<D>, ExtensionTarget<D>) {
|
|
|
|
|
let n = builder.constant_extension(F::Extension::from_canonical_usize(1 << log_n));
|
|
|
|
|
let g = builder.constant_extension(F::Extension::primitive_root_of_unity(log_n));
|
|
|
|
|
let one = builder.one_extension();
|
2022-09-12 08:09:17 +02:00
|
|
|
let l_0_deno = builder.mul_sub_extension(n, x, n);
|
2022-05-04 20:57:07 +02:00
|
|
|
let l_last_deno = builder.mul_sub_extension(g, x, one);
|
|
|
|
|
let l_last_deno = builder.mul_extension(n, l_last_deno);
|
|
|
|
|
|
|
|
|
|
(
|
2022-09-12 08:09:17 +02:00
|
|
|
builder.div_extension(z_x, l_0_deno),
|
2022-05-04 20:57:07 +02:00
|
|
|
builder.div_extension(z_x, l_last_deno),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_public_values<F: RichField + Extendable<D>, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-08-25 12:24:22 -07:00
|
|
|
) -> PublicValuesTarget {
|
|
|
|
|
let trie_roots_before = add_virtual_trie_roots(builder);
|
|
|
|
|
let trie_roots_after = add_virtual_trie_roots(builder);
|
|
|
|
|
let block_metadata = add_virtual_block_metadata(builder);
|
2023-08-21 23:32:53 +01:00
|
|
|
let block_hashes = add_virtual_block_hashes(builder);
|
2023-08-23 23:29:58 +01:00
|
|
|
let extra_block_data = add_virtual_extra_block_data(builder);
|
2022-08-25 12:24:22 -07:00
|
|
|
PublicValuesTarget {
|
|
|
|
|
trie_roots_before,
|
|
|
|
|
trie_roots_after,
|
|
|
|
|
block_metadata,
|
2023-08-21 23:32:53 +01:00
|
|
|
block_hashes,
|
2023-08-23 23:29:58 +01:00
|
|
|
extra_block_data,
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_trie_roots<F: RichField + Extendable<D>, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) -> TrieRootsTarget {
|
2023-06-21 20:05:39 +02:00
|
|
|
let state_root = builder.add_virtual_public_input_arr();
|
|
|
|
|
let transactions_root = builder.add_virtual_public_input_arr();
|
|
|
|
|
let receipts_root = builder.add_virtual_public_input_arr();
|
2022-08-25 12:24:22 -07:00
|
|
|
TrieRootsTarget {
|
|
|
|
|
state_root,
|
|
|
|
|
transactions_root,
|
|
|
|
|
receipts_root,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_block_metadata<F: RichField + Extendable<D>, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) -> BlockMetadataTarget {
|
2023-06-21 20:05:39 +02:00
|
|
|
let block_beneficiary = builder.add_virtual_public_input_arr();
|
|
|
|
|
let block_timestamp = builder.add_virtual_public_input();
|
|
|
|
|
let block_number = builder.add_virtual_public_input();
|
|
|
|
|
let block_difficulty = builder.add_virtual_public_input();
|
2023-09-25 18:20:22 +02:00
|
|
|
let block_random = builder.add_virtual_public_input_arr();
|
2023-11-17 10:01:26 -05:00
|
|
|
let block_gaslimit = builder.add_virtual_public_input();
|
2023-06-21 20:05:39 +02:00
|
|
|
let block_chain_id = builder.add_virtual_public_input();
|
2023-08-19 10:23:24 -04:00
|
|
|
let block_base_fee = builder.add_virtual_public_input_arr();
|
2023-11-17 10:01:26 -05:00
|
|
|
let block_gas_used = builder.add_virtual_public_input();
|
2023-05-04 09:57:02 +02:00
|
|
|
let block_bloom = builder.add_virtual_public_input_arr();
|
2022-08-25 12:24:22 -07:00
|
|
|
BlockMetadataTarget {
|
2022-08-25 23:35:38 -07:00
|
|
|
block_beneficiary,
|
2022-08-25 12:24:22 -07:00
|
|
|
block_timestamp,
|
|
|
|
|
block_number,
|
|
|
|
|
block_difficulty,
|
2023-09-25 18:20:22 +02:00
|
|
|
block_random,
|
2022-08-25 12:24:22 -07:00
|
|
|
block_gaslimit,
|
|
|
|
|
block_chain_id,
|
2022-08-25 23:35:38 -07:00
|
|
|
block_base_fee,
|
2023-08-23 23:29:58 +01:00
|
|
|
block_gas_used,
|
2023-05-04 09:57:02 +02:00
|
|
|
block_bloom,
|
2022-05-04 20:57:07 +02:00
|
|
|
}
|
|
|
|
|
}
|
2023-08-21 23:32:53 +01:00
|
|
|
|
|
|
|
|
pub(crate) fn add_virtual_block_hashes<F: RichField + Extendable<D>, const D: usize>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) -> BlockHashesTarget {
|
|
|
|
|
let prev_hashes = builder.add_virtual_public_input_arr();
|
|
|
|
|
let cur_hash = builder.add_virtual_public_input_arr();
|
|
|
|
|
BlockHashesTarget {
|
|
|
|
|
prev_hashes,
|
|
|
|
|
cur_hash,
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-08-23 23:29:58 +01:00
|
|
|
pub(crate) fn add_virtual_extra_block_data<F: RichField + Extendable<D>, const D: usize>(
|
|
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
|
|
|
|
) -> ExtraBlockDataTarget {
|
2023-12-09 06:26:55 +01:00
|
|
|
let checkpoint_state_trie_root = builder.add_virtual_public_input_arr();
|
2023-08-23 23:29:58 +01:00
|
|
|
let txn_number_before = builder.add_virtual_public_input();
|
|
|
|
|
let txn_number_after = builder.add_virtual_public_input();
|
2023-11-17 10:01:26 -05:00
|
|
|
let gas_used_before = builder.add_virtual_public_input();
|
|
|
|
|
let gas_used_after = builder.add_virtual_public_input();
|
2023-08-23 23:29:58 +01:00
|
|
|
ExtraBlockDataTarget {
|
2023-12-09 06:26:55 +01:00
|
|
|
checkpoint_state_trie_root,
|
2023-08-23 23:29:58 +01:00
|
|
|
txn_number_before,
|
|
|
|
|
txn_number_after,
|
|
|
|
|
gas_used_before,
|
|
|
|
|
gas_used_after,
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn add_virtual_stark_proof<
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark: &S,
|
2022-05-04 20:57:07 +02:00
|
|
|
config: &StarkConfig,
|
|
|
|
|
degree_bits: usize,
|
2024-01-10 08:54:13 +01:00
|
|
|
num_ctl_helper_zs: usize,
|
2022-05-25 08:00:41 +02:00
|
|
|
num_ctl_zs: usize,
|
2022-05-04 20:57:07 +02:00
|
|
|
) -> StarkProofTarget<D> {
|
|
|
|
|
let fri_params = config.fri_params(degree_bits);
|
|
|
|
|
let cap_height = fri_params.config.cap_height;
|
|
|
|
|
|
2022-05-26 16:27:15 +02:00
|
|
|
let num_leaves_per_oracle = vec![
|
|
|
|
|
S::COLUMNS,
|
2024-01-10 08:54:13 +01:00
|
|
|
stark.num_lookup_helper_columns(config) + num_ctl_helper_zs,
|
2022-05-26 16:27:15 +02:00
|
|
|
stark.quotient_degree_factor() * config.num_challenges,
|
|
|
|
|
];
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2023-02-13 15:58:26 +01:00
|
|
|
let auxiliary_polys_cap = builder.add_virtual_cap(cap_height);
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
StarkProofTarget {
|
|
|
|
|
trace_cap: builder.add_virtual_cap(cap_height),
|
2023-02-13 15:58:26 +01:00
|
|
|
auxiliary_polys_cap,
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys_cap: builder.add_virtual_cap(cap_height),
|
2024-01-10 08:54:13 +01:00
|
|
|
openings: add_virtual_stark_opening_set::<F, S, D>(
|
|
|
|
|
builder,
|
|
|
|
|
stark,
|
|
|
|
|
num_ctl_helper_zs,
|
|
|
|
|
num_ctl_zs,
|
|
|
|
|
config,
|
|
|
|
|
),
|
2022-05-04 20:57:07 +02:00
|
|
|
opening_proof: builder.add_virtual_fri_proof(&num_leaves_per_oracle, &fri_params),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-06 16:32:35 +02:00
|
|
|
fn add_virtual_stark_opening_set<F: RichField + Extendable<D>, S: Stark<F, D>, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
builder: &mut CircuitBuilder<F, D>,
|
2022-09-22 11:01:27 +02:00
|
|
|
stark: &S,
|
2024-01-10 08:54:13 +01:00
|
|
|
num_ctl_helper_zs: usize,
|
2022-05-25 08:00:41 +02:00
|
|
|
num_ctl_zs: usize,
|
2022-05-04 20:57:07 +02:00
|
|
|
config: &StarkConfig,
|
|
|
|
|
) -> StarkOpeningSetTarget<D> {
|
|
|
|
|
let num_challenges = config.num_challenges;
|
|
|
|
|
StarkOpeningSetTarget {
|
|
|
|
|
local_values: builder.add_virtual_extension_targets(S::COLUMNS),
|
|
|
|
|
next_values: builder.add_virtual_extension_targets(S::COLUMNS),
|
2024-01-10 08:54:13 +01:00
|
|
|
auxiliary_polys: builder.add_virtual_extension_targets(
|
|
|
|
|
stark.num_lookup_helper_columns(config) + num_ctl_helper_zs,
|
|
|
|
|
),
|
|
|
|
|
auxiliary_polys_next: builder.add_virtual_extension_targets(
|
|
|
|
|
stark.num_lookup_helper_columns(config) + num_ctl_helper_zs,
|
|
|
|
|
),
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first: builder.add_virtual_targets(num_ctl_zs),
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys: builder
|
|
|
|
|
.add_virtual_extension_targets(stark.quotient_degree_factor() * num_challenges),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
pub(crate) fn set_stark_proof_target<F, C: GenericConfig<D, F = F>, W, const D: usize>(
|
2022-05-04 20:57:07 +02:00
|
|
|
witness: &mut W,
|
|
|
|
|
proof_target: &StarkProofTarget<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof: &StarkProof<F, C, D>,
|
2022-05-24 16:24:52 +02:00
|
|
|
zero: Target,
|
2022-05-04 20:57:07 +02:00
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-05-11 02:59:02 +10:00
|
|
|
C::Hasher: AlgebraicHasher<F>,
|
2022-05-04 20:57:07 +02:00
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
witness.set_cap_target(&proof_target.trace_cap, &proof.trace_cap);
|
|
|
|
|
witness.set_cap_target(&proof_target.quotient_polys_cap, &proof.quotient_polys_cap);
|
|
|
|
|
|
|
|
|
|
witness.set_fri_openings(
|
2022-05-24 16:24:52 +02:00
|
|
|
&proof_target.openings.to_fri_openings(zero),
|
2022-05-04 20:57:07 +02:00
|
|
|
&proof.openings.to_fri_openings(),
|
|
|
|
|
);
|
|
|
|
|
|
2022-05-24 16:24:52 +02:00
|
|
|
witness.set_cap_target(
|
2023-02-13 15:58:26 +01:00
|
|
|
&proof_target.auxiliary_polys_cap,
|
|
|
|
|
&proof.auxiliary_polys_cap,
|
2022-05-24 16:24:52 +02:00
|
|
|
);
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
set_fri_proof_target(witness, &proof_target.opening_proof, &proof.opening_proof);
|
|
|
|
|
}
|
2022-08-25 12:24:22 -07:00
|
|
|
|
2023-12-22 17:23:22 +01:00
|
|
|
pub fn set_public_value_targets<F, W, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
witness: &mut W,
|
|
|
|
|
public_values_target: &PublicValuesTarget,
|
|
|
|
|
public_values: &PublicValues,
|
2023-09-12 19:23:16 -04:00
|
|
|
) -> Result<(), ProgramError>
|
|
|
|
|
where
|
2022-08-25 12:24:22 -07:00
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
set_trie_roots_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.trie_roots_before,
|
|
|
|
|
&public_values.trie_roots_before,
|
|
|
|
|
);
|
|
|
|
|
set_trie_roots_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.trie_roots_after,
|
|
|
|
|
&public_values.trie_roots_after,
|
|
|
|
|
);
|
|
|
|
|
set_block_metadata_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.block_metadata,
|
|
|
|
|
&public_values.block_metadata,
|
2023-09-12 19:23:16 -04:00
|
|
|
)?;
|
2023-08-21 23:32:53 +01:00
|
|
|
set_block_hashes_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.block_hashes,
|
|
|
|
|
&public_values.block_hashes,
|
|
|
|
|
);
|
2023-08-23 23:29:58 +01:00
|
|
|
set_extra_public_values_target(
|
|
|
|
|
witness,
|
|
|
|
|
&public_values_target.extra_block_data,
|
|
|
|
|
&public_values.extra_block_data,
|
2023-09-26 11:13:57 -04:00
|
|
|
)?;
|
2023-09-12 19:23:16 -04:00
|
|
|
|
|
|
|
|
Ok(())
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn set_trie_roots_target<F, W, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
witness: &mut W,
|
|
|
|
|
trie_roots_target: &TrieRootsTarget,
|
|
|
|
|
trie_roots: &TrieRoots,
|
|
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
2023-06-21 20:05:39 +02:00
|
|
|
for (i, limb) in trie_roots.state_root.into_uint().0.into_iter().enumerate() {
|
|
|
|
|
witness.set_target(
|
|
|
|
|
trie_roots_target.state_root[2 * i],
|
|
|
|
|
F::from_canonical_u32(limb as u32),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
trie_roots_target.state_root[2 * i + 1],
|
|
|
|
|
F::from_canonical_u32((limb >> 32) as u32),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i, limb) in trie_roots
|
|
|
|
|
.transactions_root
|
|
|
|
|
.into_uint()
|
|
|
|
|
.0
|
|
|
|
|
.into_iter()
|
|
|
|
|
.enumerate()
|
|
|
|
|
{
|
|
|
|
|
witness.set_target(
|
|
|
|
|
trie_roots_target.transactions_root[2 * i],
|
|
|
|
|
F::from_canonical_u32(limb as u32),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
trie_roots_target.transactions_root[2 * i + 1],
|
|
|
|
|
F::from_canonical_u32((limb >> 32) as u32),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i, limb) in trie_roots
|
|
|
|
|
.receipts_root
|
|
|
|
|
.into_uint()
|
|
|
|
|
.0
|
|
|
|
|
.into_iter()
|
|
|
|
|
.enumerate()
|
|
|
|
|
{
|
|
|
|
|
witness.set_target(
|
|
|
|
|
trie_roots_target.receipts_root[2 * i],
|
|
|
|
|
F::from_canonical_u32(limb as u32),
|
|
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
trie_roots_target.receipts_root[2 * i + 1],
|
|
|
|
|
F::from_canonical_u32((limb >> 32) as u32),
|
|
|
|
|
);
|
|
|
|
|
}
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|
|
|
|
|
|
2023-01-03 11:48:25 -08:00
|
|
|
pub(crate) fn set_block_metadata_target<F, W, const D: usize>(
|
2022-08-25 12:24:22 -07:00
|
|
|
witness: &mut W,
|
|
|
|
|
block_metadata_target: &BlockMetadataTarget,
|
|
|
|
|
block_metadata: &BlockMetadata,
|
2023-09-12 19:23:16 -04:00
|
|
|
) -> Result<(), ProgramError>
|
|
|
|
|
where
|
2022-08-25 12:24:22 -07:00
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
2023-08-01 16:44:22 -04:00
|
|
|
let beneficiary_limbs: [F; 5] =
|
|
|
|
|
u256_limbs::<F>(U256::from_big_endian(&block_metadata.block_beneficiary.0))[..5]
|
|
|
|
|
.try_into()
|
|
|
|
|
.unwrap();
|
|
|
|
|
witness.set_target_arr(&block_metadata_target.block_beneficiary, &beneficiary_limbs);
|
2022-08-25 12:24:22 -07:00
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_timestamp,
|
2023-09-18 14:29:11 -04:00
|
|
|
u256_to_u32(block_metadata.block_timestamp)?,
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_number,
|
2023-09-18 14:29:11 -04:00
|
|
|
u256_to_u32(block_metadata.block_number)?,
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_difficulty,
|
2023-09-18 14:29:11 -04:00
|
|
|
u256_to_u32(block_metadata.block_difficulty)?,
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
2023-09-25 18:20:22 +02:00
|
|
|
witness.set_target_arr(
|
|
|
|
|
&block_metadata_target.block_random,
|
|
|
|
|
&h256_limbs(block_metadata.block_random),
|
|
|
|
|
);
|
2023-11-17 10:01:26 -05:00
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_gaslimit,
|
|
|
|
|
u256_to_u32(block_metadata.block_gaslimit)?,
|
|
|
|
|
);
|
2022-08-25 12:24:22 -07:00
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_chain_id,
|
2023-09-18 14:29:11 -04:00
|
|
|
u256_to_u32(block_metadata.block_chain_id)?,
|
2022-08-25 12:24:22 -07:00
|
|
|
);
|
2023-08-19 10:23:24 -04:00
|
|
|
// Basefee fits in 2 limbs
|
2023-09-18 14:29:11 -04:00
|
|
|
let basefee = u256_to_u64(block_metadata.block_base_fee)?;
|
2023-09-12 19:23:16 -04:00
|
|
|
witness.set_target(block_metadata_target.block_base_fee[0], basefee.0);
|
|
|
|
|
witness.set_target(block_metadata_target.block_base_fee[1], basefee.1);
|
2023-11-17 10:01:26 -05:00
|
|
|
witness.set_target(
|
|
|
|
|
block_metadata_target.block_gas_used,
|
|
|
|
|
u256_to_u32(block_metadata.block_gas_used)?,
|
|
|
|
|
);
|
2023-05-04 09:57:02 +02:00
|
|
|
let mut block_bloom_limbs = [F::ZERO; 64];
|
|
|
|
|
for (i, limbs) in block_bloom_limbs.chunks_exact_mut(8).enumerate() {
|
|
|
|
|
limbs.copy_from_slice(&u256_limbs(block_metadata.block_bloom[i]));
|
|
|
|
|
}
|
|
|
|
|
witness.set_target_arr(&block_metadata_target.block_bloom, &block_bloom_limbs);
|
2023-09-12 19:23:16 -04:00
|
|
|
|
|
|
|
|
Ok(())
|
2022-08-25 12:24:22 -07:00
|
|
|
}
|
2023-08-23 23:29:58 +01:00
|
|
|
|
2023-08-21 23:32:53 +01:00
|
|
|
pub(crate) fn set_block_hashes_target<F, W, const D: usize>(
|
|
|
|
|
witness: &mut W,
|
|
|
|
|
block_hashes_target: &BlockHashesTarget,
|
|
|
|
|
block_hashes: &BlockHashes,
|
|
|
|
|
) where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
|
|
|
|
for i in 0..256 {
|
2023-09-05 15:48:30 +01:00
|
|
|
let block_hash_limbs: [F; 8] = h256_limbs::<F>(block_hashes.prev_hashes[i]);
|
2023-08-21 23:32:53 +01:00
|
|
|
witness.set_target_arr(
|
|
|
|
|
&block_hashes_target.prev_hashes[8 * i..8 * (i + 1)],
|
|
|
|
|
&block_hash_limbs,
|
|
|
|
|
);
|
|
|
|
|
}
|
2023-09-05 15:48:30 +01:00
|
|
|
let cur_block_hash_limbs: [F; 8] = h256_limbs::<F>(block_hashes.cur_hash);
|
2023-08-21 23:32:53 +01:00
|
|
|
witness.set_target_arr(&block_hashes_target.cur_hash, &cur_block_hash_limbs);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-23 23:29:58 +01:00
|
|
|
pub(crate) fn set_extra_public_values_target<F, W, const D: usize>(
|
|
|
|
|
witness: &mut W,
|
|
|
|
|
ed_target: &ExtraBlockDataTarget,
|
|
|
|
|
ed: &ExtraBlockData,
|
2023-09-26 11:13:57 -04:00
|
|
|
) -> Result<(), ProgramError>
|
|
|
|
|
where
|
2023-08-23 23:29:58 +01:00
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
W: Witness<F>,
|
|
|
|
|
{
|
2023-09-11 15:47:33 +01:00
|
|
|
witness.set_target_arr(
|
2023-12-09 06:26:55 +01:00
|
|
|
&ed_target.checkpoint_state_trie_root,
|
|
|
|
|
&h256_limbs::<F>(ed.checkpoint_state_trie_root),
|
2023-09-11 15:47:33 +01:00
|
|
|
);
|
2023-08-23 23:29:58 +01:00
|
|
|
witness.set_target(
|
|
|
|
|
ed_target.txn_number_before,
|
2023-09-26 11:13:57 -04:00
|
|
|
u256_to_u32(ed.txn_number_before)?,
|
2023-08-23 23:29:58 +01:00
|
|
|
);
|
|
|
|
|
witness.set_target(
|
|
|
|
|
ed_target.txn_number_after,
|
2023-09-26 11:13:57 -04:00
|
|
|
u256_to_u32(ed.txn_number_after)?,
|
2023-08-23 23:29:58 +01:00
|
|
|
);
|
2023-11-17 10:01:26 -05:00
|
|
|
witness.set_target(ed_target.gas_used_before, u256_to_u32(ed.gas_used_before)?);
|
|
|
|
|
witness.set_target(ed_target.gas_used_after, u256_to_u32(ed.gas_used_after)?);
|
2023-08-23 23:29:58 +01:00
|
|
|
|
2023-09-26 11:13:57 -04:00
|
|
|
Ok(())
|
2023-08-23 23:29:58 +01:00
|
|
|
}
|