2022-12-02 14:31:18 -08:00
|
|
|
use std::any::type_name;
|
|
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
use anyhow::{ensure, Result};
|
2023-09-25 18:20:22 +02:00
|
|
|
use ethereum_types::{BigEndianHash, U256};
|
2023-09-12 14:45:37 -04:00
|
|
|
use itertools::Itertools;
|
2022-06-27 07:18:21 -07:00
|
|
|
use plonky2::field::extension::{Extendable, FieldExtension};
|
2022-06-27 12:24:09 -07:00
|
|
|
use plonky2::field::types::Field;
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::fri::verifier::verify_fri_proof;
|
|
|
|
|
use plonky2::hash::hash_types::RichField;
|
2022-11-22 08:48:48 -05:00
|
|
|
use plonky2::plonk::config::GenericConfig;
|
2022-05-04 20:57:07 +02:00
|
|
|
use plonky2::plonk::plonk_common::reduce_with_powers;
|
|
|
|
|
|
2023-05-10 15:37:05 -04:00
|
|
|
use crate::all_stark::{AllStark, Table, NUM_TABLES};
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::config::StarkConfig;
|
|
|
|
|
use crate::constraint_consumer::ConstraintConsumer;
|
2023-11-30 10:04:08 -05:00
|
|
|
use crate::cpu::kernel::aggregator::KERNEL;
|
2023-04-14 21:55:44 +08:00
|
|
|
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
|
2023-09-14 17:35:29 +01:00
|
|
|
use crate::cross_table_lookup::{
|
|
|
|
|
verify_cross_table_lookups, CtlCheckVars, GrandProductChallenge, GrandProductChallengeSet,
|
|
|
|
|
};
|
2023-09-22 09:19:13 -04:00
|
|
|
use crate::evaluation_frame::StarkEvaluationFrame;
|
2023-02-13 15:58:26 +01:00
|
|
|
use crate::lookup::LookupCheckVars;
|
2023-04-14 21:55:44 +08:00
|
|
|
use crate::memory::segments::Segment;
|
2023-08-09 10:15:13 +02:00
|
|
|
use crate::memory::VALUE_LIMBS;
|
2022-05-11 14:35:33 +02:00
|
|
|
use crate::proof::{
|
2023-04-14 21:55:44 +08:00
|
|
|
AllProof, AllProofChallenges, PublicValues, StarkOpeningSet, StarkProof, StarkProofChallenges,
|
2022-05-11 14:35:33 +02:00
|
|
|
};
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::stark::Stark;
|
2023-08-09 10:15:13 +02:00
|
|
|
use crate::util::h2u;
|
2022-05-04 20:57:07 +02:00
|
|
|
use crate::vanishing_poly::eval_vanishing_poly;
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
pub fn verify_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
all_stark: &AllStark<F, D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
all_proof: AllProof<F, C, D>,
|
2022-05-04 20:57:07 +02:00
|
|
|
config: &StarkConfig,
|
|
|
|
|
) -> Result<()>
|
|
|
|
|
where
|
|
|
|
|
{
|
2022-05-11 14:35:33 +02:00
|
|
|
let AllProofChallenges {
|
2022-05-19 09:41:15 +02:00
|
|
|
stark_challenges,
|
2022-05-11 14:35:33 +02:00
|
|
|
ctl_challenges,
|
2023-09-12 19:23:16 -04:00
|
|
|
} = all_proof
|
2023-09-19 10:56:17 -04:00
|
|
|
.get_challenges(config)
|
2023-09-12 19:23:16 -04:00
|
|
|
.map_err(|_| anyhow::Error::msg("Invalid sampling of proof challenges."))?;
|
2022-05-11 14:35:33 +02:00
|
|
|
|
2023-02-13 15:58:26 +01:00
|
|
|
let num_lookup_columns = all_stark.num_lookups_helper_columns(config);
|
2022-05-13 10:48:56 +02:00
|
|
|
|
2022-05-11 16:09:12 +02:00
|
|
|
let AllStark {
|
Cross-table lookup for arithmetic stark (#905)
* First draft of linking arithmetic Stark into the CTL mechanism.
* Handle {ADD,SUB,MUL}FP254 operations explicitly in `modular.rs`.
* Adjust argument order; add tests.
* Add CTLs for ADD, MUL, SUB, LT and GT.
* Add CTLs for {ADD,MUL,SUB}MOD, DIV and MOD.
* Add CTLs for {ADD,MUL,SUB}FP254 operations.
* Refactor the CPU/arithmetic CTL mapping; add some documentation.
* Minor comment fixes.
* Combine addcy CTLs at the expense of repeated constraint evaluation.
* Combine addcy CTLs at the expense of repeated constraint evaluation.
* Merge `*FP254` CTL into main CTL; rename some registers.
* Connect extra argument from CPU in binary ops to facilitate combining with ternary ops.
* Merge modular ops CTL into main CTL.
* Refactor DIV and MOD code into its own module.
* Merge DIV and MOD into arithmetic CTL.
* Clippy.
* Fixes related to merge.
* Simplify register naming.
* Generate u16 BN254 modulus limbs at compile time.
* Clippy.
* Add degree bits ranges for Arithmetic table.
2023-05-11 03:29:06 +10:00
|
|
|
arithmetic_stark,
|
2023-09-12 14:45:37 -04:00
|
|
|
byte_packing_stark,
|
2022-05-11 16:09:12 +02:00
|
|
|
cpu_stark,
|
|
|
|
|
keccak_stark,
|
2022-12-03 11:21:31 -08:00
|
|
|
keccak_sponge_stark,
|
2022-06-17 11:57:14 -07:00
|
|
|
logic_stark,
|
2022-06-23 13:59:57 -07:00
|
|
|
memory_stark,
|
2022-05-11 16:09:12 +02:00
|
|
|
cross_table_lookups,
|
|
|
|
|
} = all_stark;
|
2022-05-11 14:35:33 +02:00
|
|
|
|
2022-05-16 20:45:30 +02:00
|
|
|
let ctl_vars_per_table = CtlCheckVars::from_proofs(
|
2022-05-19 09:41:15 +02:00
|
|
|
&all_proof.stark_proofs,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
cross_table_lookups,
|
2022-05-12 22:29:10 +02:00
|
|
|
&ctl_challenges,
|
2023-02-13 15:58:26 +01:00
|
|
|
&num_lookup_columns,
|
2022-05-12 22:29:10 +02:00
|
|
|
);
|
2022-05-12 13:47:55 +02:00
|
|
|
|
Cross-table lookup for arithmetic stark (#905)
* First draft of linking arithmetic Stark into the CTL mechanism.
* Handle {ADD,SUB,MUL}FP254 operations explicitly in `modular.rs`.
* Adjust argument order; add tests.
* Add CTLs for ADD, MUL, SUB, LT and GT.
* Add CTLs for {ADD,MUL,SUB}MOD, DIV and MOD.
* Add CTLs for {ADD,MUL,SUB}FP254 operations.
* Refactor the CPU/arithmetic CTL mapping; add some documentation.
* Minor comment fixes.
* Combine addcy CTLs at the expense of repeated constraint evaluation.
* Combine addcy CTLs at the expense of repeated constraint evaluation.
* Merge `*FP254` CTL into main CTL; rename some registers.
* Connect extra argument from CPU in binary ops to facilitate combining with ternary ops.
* Merge modular ops CTL into main CTL.
* Refactor DIV and MOD code into its own module.
* Merge DIV and MOD into arithmetic CTL.
* Clippy.
* Fixes related to merge.
* Simplify register naming.
* Generate u16 BN254 modulus limbs at compile time.
* Clippy.
* Add degree bits ranges for Arithmetic table.
2023-05-11 03:29:06 +10:00
|
|
|
verify_stark_proof_with_challenges(
|
|
|
|
|
arithmetic_stark,
|
|
|
|
|
&all_proof.stark_proofs[Table::Arithmetic as usize].proof,
|
|
|
|
|
&stark_challenges[Table::Arithmetic as usize],
|
|
|
|
|
&ctl_vars_per_table[Table::Arithmetic as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
Cross-table lookup for arithmetic stark (#905)
* First draft of linking arithmetic Stark into the CTL mechanism.
* Handle {ADD,SUB,MUL}FP254 operations explicitly in `modular.rs`.
* Adjust argument order; add tests.
* Add CTLs for ADD, MUL, SUB, LT and GT.
* Add CTLs for {ADD,MUL,SUB}MOD, DIV and MOD.
* Add CTLs for {ADD,MUL,SUB}FP254 operations.
* Refactor the CPU/arithmetic CTL mapping; add some documentation.
* Minor comment fixes.
* Combine addcy CTLs at the expense of repeated constraint evaluation.
* Combine addcy CTLs at the expense of repeated constraint evaluation.
* Merge `*FP254` CTL into main CTL; rename some registers.
* Connect extra argument from CPU in binary ops to facilitate combining with ternary ops.
* Merge modular ops CTL into main CTL.
* Refactor DIV and MOD code into its own module.
* Merge DIV and MOD into arithmetic CTL.
* Clippy.
* Fixes related to merge.
* Simplify register naming.
* Generate u16 BN254 modulus limbs at compile time.
* Clippy.
* Add degree bits ranges for Arithmetic table.
2023-05-11 03:29:06 +10:00
|
|
|
config,
|
|
|
|
|
)?;
|
2023-09-12 14:45:37 -04:00
|
|
|
verify_stark_proof_with_challenges(
|
|
|
|
|
byte_packing_stark,
|
|
|
|
|
&all_proof.stark_proofs[Table::BytePacking as usize].proof,
|
|
|
|
|
&stark_challenges[Table::BytePacking as usize],
|
|
|
|
|
&ctl_vars_per_table[Table::BytePacking as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
2023-09-12 14:45:37 -04:00
|
|
|
config,
|
|
|
|
|
)?;
|
2022-05-12 13:47:55 +02:00
|
|
|
verify_stark_proof_with_challenges(
|
|
|
|
|
cpu_stark,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
&all_proof.stark_proofs[Table::Cpu as usize].proof,
|
2022-05-19 09:41:15 +02:00
|
|
|
&stark_challenges[Table::Cpu as usize],
|
2022-05-12 13:47:55 +02:00
|
|
|
&ctl_vars_per_table[Table::Cpu as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
2022-05-11 14:35:33 +02:00
|
|
|
config,
|
|
|
|
|
)?;
|
|
|
|
|
verify_stark_proof_with_challenges(
|
2022-05-11 16:09:12 +02:00
|
|
|
keccak_stark,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
&all_proof.stark_proofs[Table::Keccak as usize].proof,
|
2022-05-19 09:41:15 +02:00
|
|
|
&stark_challenges[Table::Keccak as usize],
|
2022-05-12 13:47:55 +02:00
|
|
|
&ctl_vars_per_table[Table::Keccak as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
2022-05-12 13:47:55 +02:00
|
|
|
config,
|
|
|
|
|
)?;
|
2022-08-14 16:36:07 -07:00
|
|
|
verify_stark_proof_with_challenges(
|
2022-12-03 11:21:31 -08:00
|
|
|
keccak_sponge_stark,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
&all_proof.stark_proofs[Table::KeccakSponge as usize].proof,
|
2022-12-03 11:21:31 -08:00
|
|
|
&stark_challenges[Table::KeccakSponge as usize],
|
|
|
|
|
&ctl_vars_per_table[Table::KeccakSponge as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
2022-08-14 16:36:07 -07:00
|
|
|
config,
|
|
|
|
|
)?;
|
2022-06-17 11:57:14 -07:00
|
|
|
verify_stark_proof_with_challenges(
|
|
|
|
|
logic_stark,
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
&all_proof.stark_proofs[Table::Logic as usize].proof,
|
2022-06-17 11:57:14 -07:00
|
|
|
&stark_challenges[Table::Logic as usize],
|
|
|
|
|
&ctl_vars_per_table[Table::Logic as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
2022-06-17 11:57:14 -07:00
|
|
|
config,
|
|
|
|
|
)?;
|
2023-09-12 14:45:37 -04:00
|
|
|
verify_stark_proof_with_challenges(
|
|
|
|
|
memory_stark,
|
|
|
|
|
&all_proof.stark_proofs[Table::Memory as usize].proof,
|
|
|
|
|
&stark_challenges[Table::Memory as usize],
|
|
|
|
|
&ctl_vars_per_table[Table::Memory as usize],
|
2023-09-14 17:35:29 +01:00
|
|
|
&ctl_challenges,
|
2023-09-12 14:45:37 -04:00
|
|
|
config,
|
|
|
|
|
)?;
|
2022-05-12 13:47:55 +02:00
|
|
|
|
2023-04-14 21:55:44 +08:00
|
|
|
let public_values = all_proof.public_values;
|
|
|
|
|
|
2023-12-04 16:26:10 -05:00
|
|
|
// Extra sums to add to the looked last value.
|
2023-09-12 14:45:37 -04:00
|
|
|
// Only necessary for the Memory values.
|
2023-12-04 16:26:10 -05:00
|
|
|
let mut extra_looking_sums = vec![vec![F::ZERO; config.num_challenges]; NUM_TABLES];
|
2023-04-14 21:55:44 +08:00
|
|
|
|
|
|
|
|
// Memory
|
2023-12-04 16:26:10 -05:00
|
|
|
extra_looking_sums[Table::Memory as usize] = (0..config.num_challenges)
|
|
|
|
|
.map(|i| get_memory_extra_looking_sum(&public_values, ctl_challenges.challenges[i]))
|
2023-09-12 14:45:37 -04:00
|
|
|
.collect_vec();
|
2023-04-14 21:55:44 +08:00
|
|
|
|
2023-02-10 21:47:51 -08:00
|
|
|
verify_cross_table_lookups::<F, D>(
|
2022-05-12 13:47:55 +02:00
|
|
|
cross_table_lookups,
|
2023-09-11 14:11:13 -04:00
|
|
|
all_proof
|
|
|
|
|
.stark_proofs
|
|
|
|
|
.map(|p| p.proof.openings.ctl_zs_first),
|
2023-12-04 16:26:10 -05:00
|
|
|
extra_looking_sums,
|
2022-05-11 14:35:33 +02:00
|
|
|
config,
|
|
|
|
|
)
|
2022-05-04 20:57:07 +02:00
|
|
|
}
|
|
|
|
|
|
2023-04-14 21:55:44 +08:00
|
|
|
/// Computes the extra product to multiply to the looked value. It contains memory operations not in the CPU trace:
|
2023-11-30 10:04:08 -05:00
|
|
|
/// - block metadata writes,
|
|
|
|
|
/// - trie roots writes.
|
2023-12-04 16:26:10 -05:00
|
|
|
pub(crate) fn get_memory_extra_looking_sum<F, const D: usize>(
|
2023-04-14 21:55:44 +08:00
|
|
|
public_values: &PublicValues,
|
|
|
|
|
challenge: GrandProductChallenge<F>,
|
|
|
|
|
) -> F
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
{
|
2023-12-04 16:26:10 -05:00
|
|
|
let mut sum = F::ZERO;
|
2023-04-14 21:55:44 +08:00
|
|
|
|
2023-08-09 10:15:13 +02:00
|
|
|
// Add metadata and tries writes.
|
|
|
|
|
let fields = [
|
2023-04-14 21:55:44 +08:00
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockBeneficiary,
|
|
|
|
|
U256::from_big_endian(&public_values.block_metadata.block_beneficiary.0),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockTimestamp,
|
|
|
|
|
public_values.block_metadata.block_timestamp,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockNumber,
|
|
|
|
|
public_values.block_metadata.block_number,
|
|
|
|
|
),
|
2023-09-25 18:20:22 +02:00
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockRandom,
|
|
|
|
|
public_values.block_metadata.block_random.into_uint(),
|
|
|
|
|
),
|
2023-04-14 21:55:44 +08:00
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockDifficulty,
|
|
|
|
|
public_values.block_metadata.block_difficulty,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasLimit,
|
|
|
|
|
public_values.block_metadata.block_gaslimit,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockChainId,
|
|
|
|
|
public_values.block_metadata.block_chain_id,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockBaseFee,
|
|
|
|
|
public_values.block_metadata.block_base_fee,
|
|
|
|
|
),
|
2023-08-21 23:32:53 +01:00
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockCurrentHash,
|
2023-09-05 09:53:40 +01:00
|
|
|
h2u(public_values.block_hashes.cur_hash),
|
2023-08-21 23:32:53 +01:00
|
|
|
),
|
2023-08-23 23:29:58 +01:00
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasUsed,
|
|
|
|
|
public_values.block_metadata.block_gas_used,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TxnNumberBefore,
|
|
|
|
|
public_values.extra_block_data.txn_number_before,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TxnNumberAfter,
|
|
|
|
|
public_values.extra_block_data.txn_number_after,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasUsedBefore,
|
|
|
|
|
public_values.extra_block_data.gas_used_before,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasUsedAfter,
|
|
|
|
|
public_values.extra_block_data.gas_used_after,
|
|
|
|
|
),
|
2023-04-14 21:55:44 +08:00
|
|
|
(
|
|
|
|
|
GlobalMetadata::StateTrieRootDigestBefore,
|
2023-08-09 10:15:13 +02:00
|
|
|
h2u(public_values.trie_roots_before.state_root),
|
2023-04-14 21:55:44 +08:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TransactionTrieRootDigestBefore,
|
2023-08-09 10:15:13 +02:00
|
|
|
h2u(public_values.trie_roots_before.transactions_root),
|
2023-04-14 21:55:44 +08:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::ReceiptTrieRootDigestBefore,
|
2023-08-09 10:15:13 +02:00
|
|
|
h2u(public_values.trie_roots_before.receipts_root),
|
2023-04-14 21:55:44 +08:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::StateTrieRootDigestAfter,
|
2023-08-09 10:15:13 +02:00
|
|
|
h2u(public_values.trie_roots_after.state_root),
|
2023-04-14 21:55:44 +08:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TransactionTrieRootDigestAfter,
|
2023-08-09 10:15:13 +02:00
|
|
|
h2u(public_values.trie_roots_after.transactions_root),
|
2023-04-14 21:55:44 +08:00
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::ReceiptTrieRootDigestAfter,
|
2023-08-09 10:15:13 +02:00
|
|
|
h2u(public_values.trie_roots_after.receipts_root),
|
2023-04-14 21:55:44 +08:00
|
|
|
),
|
2023-11-30 10:04:08 -05:00
|
|
|
(GlobalMetadata::KernelHash, h2u(KERNEL.code_hash)),
|
|
|
|
|
(GlobalMetadata::KernelLen, KERNEL.code.len().into()),
|
2023-04-14 21:55:44 +08:00
|
|
|
];
|
2023-08-23 23:29:58 +01:00
|
|
|
|
2023-08-09 10:15:13 +02:00
|
|
|
let segment = F::from_canonical_u32(Segment::GlobalMetadata as u32);
|
2023-04-14 21:55:44 +08:00
|
|
|
|
2023-12-04 16:26:10 -05:00
|
|
|
fields.map(|(field, val)| sum = add_data_write(challenge, segment, sum, field as usize, val));
|
2023-08-23 23:29:58 +01:00
|
|
|
|
|
|
|
|
// Add block bloom writes.
|
|
|
|
|
let bloom_segment = F::from_canonical_u32(Segment::GlobalBlockBloom as u32);
|
|
|
|
|
for index in 0..8 {
|
|
|
|
|
let val = public_values.block_metadata.block_bloom[index];
|
2023-12-04 16:26:10 -05:00
|
|
|
sum = add_data_write(challenge, bloom_segment, sum, index, val);
|
2023-08-23 23:29:58 +01:00
|
|
|
}
|
|
|
|
|
|
2023-08-21 23:32:53 +01:00
|
|
|
// Add Blockhashes writes.
|
|
|
|
|
let block_hashes_segment = F::from_canonical_u32(Segment::BlockHashes as u32);
|
|
|
|
|
for index in 0..256 {
|
|
|
|
|
let val = h2u(public_values.block_hashes.prev_hashes[index]);
|
2023-12-04 16:26:10 -05:00
|
|
|
sum = add_data_write(challenge, block_hashes_segment, sum, index, val);
|
2023-08-21 23:32:53 +01:00
|
|
|
}
|
|
|
|
|
|
2023-12-04 16:26:10 -05:00
|
|
|
sum
|
2023-04-14 21:55:44 +08:00
|
|
|
}
|
|
|
|
|
|
2023-08-23 23:29:58 +01:00
|
|
|
fn add_data_write<F, const D: usize>(
|
|
|
|
|
challenge: GrandProductChallenge<F>,
|
|
|
|
|
segment: F,
|
2023-12-04 16:26:10 -05:00
|
|
|
running_sum: F,
|
2023-08-23 23:29:58 +01:00
|
|
|
index: usize,
|
|
|
|
|
val: U256,
|
|
|
|
|
) -> F
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
{
|
2023-09-08 10:34:37 -07:00
|
|
|
let mut row = [F::ZERO; 13];
|
2023-08-23 23:29:58 +01:00
|
|
|
row[0] = F::ZERO; // is_read
|
|
|
|
|
row[1] = F::ZERO; // context
|
|
|
|
|
row[2] = segment;
|
|
|
|
|
row[3] = F::from_canonical_usize(index);
|
|
|
|
|
|
|
|
|
|
for j in 0..VALUE_LIMBS {
|
|
|
|
|
row[j + 4] = F::from_canonical_u32((val >> (j * 32)).low_u32());
|
|
|
|
|
}
|
|
|
|
|
row[12] = F::ONE; // timestamp
|
2023-12-04 16:26:10 -05:00
|
|
|
running_sum + challenge.combine(row.iter()).inverse()
|
2023-08-23 23:29:58 +01:00
|
|
|
}
|
2023-08-21 23:32:53 +01:00
|
|
|
|
2022-05-04 20:57:07 +02:00
|
|
|
pub(crate) fn verify_stark_proof_with_challenges<
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-05-04 20:57:07 +02:00
|
|
|
S: Stark<F, D>,
|
|
|
|
|
const D: usize,
|
|
|
|
|
>(
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
stark: &S,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof: &StarkProof<F, C, D>,
|
2022-05-19 09:41:15 +02:00
|
|
|
challenges: &StarkProofChallenges<F, D>,
|
2022-05-16 20:45:30 +02:00
|
|
|
ctl_vars: &[CtlCheckVars<F, F::Extension, F::Extension, D>],
|
2023-09-14 17:35:29 +01:00
|
|
|
ctl_challenges: &GrandProductChallengeSet<F>,
|
2022-05-04 20:57:07 +02:00
|
|
|
config: &StarkConfig,
|
2023-09-22 09:19:13 -04:00
|
|
|
) -> Result<()> {
|
2022-12-02 14:31:18 -08:00
|
|
|
log::debug!("Checking proof: {}", type_name::<S>());
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
validate_proof_shape(stark, proof, config, ctl_vars.len())?;
|
2022-05-04 20:57:07 +02:00
|
|
|
let StarkOpeningSet {
|
|
|
|
|
local_values,
|
|
|
|
|
next_values,
|
2023-02-13 15:58:26 +01:00
|
|
|
auxiliary_polys,
|
|
|
|
|
auxiliary_polys_next,
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first,
|
2022-05-04 20:57:07 +02:00
|
|
|
quotient_polys,
|
|
|
|
|
} = &proof.openings;
|
2023-09-22 09:19:13 -04:00
|
|
|
let vars = S::EvaluationFrame::from_values(local_values, next_values);
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2022-05-11 14:35:33 +02:00
|
|
|
let degree_bits = proof.recover_degree_bits(config);
|
2022-09-12 08:09:17 +02:00
|
|
|
let (l_0, l_last) = eval_l_0_and_l_last(degree_bits, challenges.stark_zeta);
|
2022-05-04 20:57:07 +02:00
|
|
|
let last = F::primitive_root_of_unity(degree_bits).inverse();
|
|
|
|
|
let z_last = challenges.stark_zeta - last.into();
|
|
|
|
|
let mut consumer = ConstraintConsumer::<F::Extension>::new(
|
|
|
|
|
challenges
|
|
|
|
|
.stark_alphas
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|&alpha| F::Extension::from_basefield(alpha))
|
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
|
z_last,
|
2022-09-12 08:09:17 +02:00
|
|
|
l_0,
|
2022-05-04 20:57:07 +02:00
|
|
|
l_last,
|
|
|
|
|
);
|
2023-02-13 15:58:26 +01:00
|
|
|
let num_lookup_columns = stark.num_lookup_helper_columns(config);
|
2023-09-14 10:57:33 +01:00
|
|
|
let lookup_challenges = (num_lookup_columns > 0).then(|| {
|
2023-09-14 17:35:29 +01:00
|
|
|
ctl_challenges
|
|
|
|
|
.challenges
|
2023-09-14 10:57:33 +01:00
|
|
|
.iter()
|
2023-09-14 17:35:29 +01:00
|
|
|
.map(|ch| ch.beta)
|
2023-09-14 10:57:33 +01:00
|
|
|
.collect::<Vec<_>>()
|
2022-05-04 20:57:07 +02:00
|
|
|
});
|
2023-09-14 10:57:33 +01:00
|
|
|
|
2023-02-13 15:58:26 +01:00
|
|
|
let lookup_vars = stark.uses_lookups().then(|| LookupCheckVars {
|
|
|
|
|
local_values: auxiliary_polys[..num_lookup_columns].to_vec(),
|
|
|
|
|
next_values: auxiliary_polys_next[..num_lookup_columns].to_vec(),
|
2023-09-14 10:57:33 +01:00
|
|
|
challenges: lookup_challenges.unwrap(),
|
2022-05-04 20:57:07 +02:00
|
|
|
});
|
2023-02-13 15:58:26 +01:00
|
|
|
let lookups = stark.lookups();
|
2023-02-10 21:47:51 -08:00
|
|
|
eval_vanishing_poly::<F, F::Extension, F::Extension, S, D, D>(
|
Shrink STARK proofs to a constant degree
The goal here is to end up with a single "root" circuit representing any EVM proof. I.e. it must verify each STARK, but be general enough to work with any combination of STARK sizes (within some range of sizes that we chose to support). This root circuit can then be plugged into our aggregation circuit.
In particular, for each STARK, and for each initial `degree_bits` (within a range that we choose to support), this adds a "shrinking chain" of circuits. Such a chain shrinks a STARK proof from that initial `degree_bits` down to a constant, `THRESHOLD_DEGREE_BITS`.
The root circuit then combines these shrunk-to-constant proofs for each table. It's similar to `RecursiveAllProof::verify_circuit`; I adapted the code from there and I think we can remove it after. The main difference is that now instead of having one verification key per STARK, we have several possible VKs, one per initial `degree_bits`. We bake the list of possible VKs into the root circuit, and have the prover indicate the index of the VK they're actually using.
This also partially removes the default feature of CTLs. So far we've used filters instead of defaults. Until now it was easy to keep supporting defaults just in case, but here maintaining support would require some more work. E.g. we couldn't use `exp_u64` any more, since the size delta is now dynamic, it can't be hardcoded. If there are no concerns, I'll fully remove the feature after.
2022-12-27 18:15:18 -08:00
|
|
|
stark,
|
2023-09-22 09:19:13 -04:00
|
|
|
&vars,
|
2023-02-13 15:58:26 +01:00
|
|
|
&lookups,
|
|
|
|
|
lookup_vars,
|
2022-05-13 11:20:29 +02:00
|
|
|
ctl_vars,
|
2022-05-04 20:57:07 +02:00
|
|
|
&mut consumer,
|
|
|
|
|
);
|
|
|
|
|
let vanishing_polys_zeta = consumer.accumulators();
|
|
|
|
|
|
|
|
|
|
// Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta.
|
|
|
|
|
let zeta_pow_deg = challenges.stark_zeta.exp_power_of_2(degree_bits);
|
|
|
|
|
let z_h_zeta = zeta_pow_deg - F::Extension::ONE;
|
|
|
|
|
// `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations.
|
|
|
|
|
// Each chunk of `quotient_degree_factor` holds the evaluations of `t_0(zeta),...,t_{quotient_degree_factor-1}(zeta)`
|
|
|
|
|
// where the "real" quotient polynomial is `t(X) = t_0(X) + t_1(X)*X^n + t_2(X)*X^{2n} + ...`.
|
|
|
|
|
// So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each
|
|
|
|
|
// `quotient_degree_factor`-sized chunk of the original evaluations.
|
|
|
|
|
for (i, chunk) in quotient_polys
|
|
|
|
|
.chunks(stark.quotient_degree_factor())
|
|
|
|
|
.enumerate()
|
|
|
|
|
{
|
|
|
|
|
ensure!(
|
|
|
|
|
vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg),
|
|
|
|
|
"Mismatch between evaluation and opening of quotient polynomial"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-17 09:41:06 +02:00
|
|
|
let merkle_caps = vec![
|
|
|
|
|
proof.trace_cap.clone(),
|
2023-02-13 15:58:26 +01:00
|
|
|
proof.auxiliary_polys_cap.clone(),
|
2022-05-17 09:41:06 +02:00
|
|
|
proof.quotient_polys_cap.clone(),
|
|
|
|
|
];
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
verify_fri_proof::<F, C, D>(
|
2022-05-04 20:57:07 +02:00
|
|
|
&stark.fri_instance(
|
|
|
|
|
challenges.stark_zeta,
|
|
|
|
|
F::primitive_root_of_unity(degree_bits),
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first.len(),
|
2022-05-04 20:57:07 +02:00
|
|
|
config,
|
|
|
|
|
),
|
|
|
|
|
&proof.openings.to_fri_openings(),
|
|
|
|
|
&challenges.fri_challenges,
|
|
|
|
|
&merkle_caps,
|
|
|
|
|
&proof.opening_proof,
|
|
|
|
|
&config.fri_params(degree_bits),
|
|
|
|
|
)?;
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-01 09:34:13 -04:00
|
|
|
fn validate_proof_shape<F, C, S, const D: usize>(
|
2022-09-19 20:54:45 -07:00
|
|
|
stark: &S,
|
2023-04-01 09:34:13 -04:00
|
|
|
proof: &StarkProof<F, C, D>,
|
2022-09-19 20:54:45 -07:00
|
|
|
config: &StarkConfig,
|
2022-09-19 21:30:14 -07:00
|
|
|
num_ctl_zs: usize,
|
2022-09-19 20:54:45 -07:00
|
|
|
) -> anyhow::Result<()>
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
2023-04-01 09:34:13 -04:00
|
|
|
C: GenericConfig<D, F = F>,
|
2022-09-19 20:54:45 -07:00
|
|
|
S: Stark<F, D>,
|
|
|
|
|
{
|
|
|
|
|
let StarkProof {
|
|
|
|
|
trace_cap,
|
2023-02-13 15:58:26 +01:00
|
|
|
auxiliary_polys_cap,
|
2022-09-19 20:54:45 -07:00
|
|
|
quotient_polys_cap,
|
|
|
|
|
openings,
|
|
|
|
|
// The shape of the opening proof will be checked in the FRI verifier (see
|
|
|
|
|
// validate_fri_proof_shape), so we ignore it here.
|
|
|
|
|
opening_proof: _,
|
|
|
|
|
} = proof;
|
|
|
|
|
|
2022-09-19 23:04:53 -07:00
|
|
|
let StarkOpeningSet {
|
|
|
|
|
local_values,
|
|
|
|
|
next_values,
|
2023-02-13 15:58:26 +01:00
|
|
|
auxiliary_polys,
|
|
|
|
|
auxiliary_polys_next,
|
2023-09-11 14:11:13 -04:00
|
|
|
ctl_zs_first,
|
2022-09-19 23:04:53 -07:00
|
|
|
quotient_polys,
|
|
|
|
|
} = openings;
|
|
|
|
|
|
2022-09-19 20:54:45 -07:00
|
|
|
let degree_bits = proof.recover_degree_bits(config);
|
|
|
|
|
let fri_params = config.fri_params(degree_bits);
|
|
|
|
|
let cap_height = fri_params.config.cap_height;
|
2023-02-13 15:58:26 +01:00
|
|
|
let num_auxiliary = num_ctl_zs + stark.num_lookup_helper_columns(config);
|
2022-09-19 23:04:53 -07:00
|
|
|
|
2022-09-19 20:54:45 -07:00
|
|
|
ensure!(trace_cap.height() == cap_height);
|
2023-02-13 15:58:26 +01:00
|
|
|
ensure!(auxiliary_polys_cap.height() == cap_height);
|
2022-09-19 20:54:45 -07:00
|
|
|
ensure!(quotient_polys_cap.height() == cap_height);
|
|
|
|
|
|
2022-09-19 23:04:53 -07:00
|
|
|
ensure!(local_values.len() == S::COLUMNS);
|
|
|
|
|
ensure!(next_values.len() == S::COLUMNS);
|
2023-02-13 15:58:26 +01:00
|
|
|
ensure!(auxiliary_polys.len() == num_auxiliary);
|
|
|
|
|
ensure!(auxiliary_polys_next.len() == num_auxiliary);
|
2023-09-11 14:11:13 -04:00
|
|
|
ensure!(ctl_zs_first.len() == num_ctl_zs);
|
2022-09-19 23:04:53 -07:00
|
|
|
ensure!(quotient_polys.len() == stark.num_quotient_polys(config));
|
2022-09-19 20:54:45 -07:00
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-12 08:09:17 +02:00
|
|
|
/// Evaluate the Lagrange polynomials `L_0` and `L_(n-1)` at a point `x`.
|
|
|
|
|
/// `L_0(x) = (x^n - 1)/(n * (x - 1))`
|
|
|
|
|
/// `L_(n-1)(x) = (x^n - 1)/(n * (g * x - 1))`, with `g` the first element of the subgroup.
|
|
|
|
|
fn eval_l_0_and_l_last<F: Field>(log_n: usize, x: F) -> (F, F) {
|
2022-05-04 20:57:07 +02:00
|
|
|
let n = F::from_canonical_usize(1 << log_n);
|
|
|
|
|
let g = F::primitive_root_of_unity(log_n);
|
|
|
|
|
let z_x = x.exp_power_of_2(log_n) - F::ONE;
|
|
|
|
|
let invs = F::batch_multiplicative_inverse(&[n * (x - F::ONE), n * (g * x - F::ONE)]);
|
|
|
|
|
|
|
|
|
|
(z_x * invs[0], z_x * invs[1])
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-16 08:53:42 -04:00
|
|
|
#[cfg(test)]
|
|
|
|
|
pub(crate) mod testutils {
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
|
|
/// Output all the extra memory rows that don't appear in the CPU trace but are
|
|
|
|
|
/// necessary to correctly check the MemoryStark CTL.
|
|
|
|
|
pub(crate) fn get_memory_extra_looking_values<F, const D: usize>(
|
|
|
|
|
public_values: &PublicValues,
|
|
|
|
|
) -> Vec<Vec<F>>
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
{
|
|
|
|
|
// Add metadata and tries writes.
|
|
|
|
|
let fields = [
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockBeneficiary,
|
|
|
|
|
U256::from_big_endian(&public_values.block_metadata.block_beneficiary.0),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockTimestamp,
|
|
|
|
|
public_values.block_metadata.block_timestamp,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockNumber,
|
|
|
|
|
public_values.block_metadata.block_number,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockRandom,
|
|
|
|
|
public_values.block_metadata.block_random.into_uint(),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockDifficulty,
|
|
|
|
|
public_values.block_metadata.block_difficulty,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasLimit,
|
|
|
|
|
public_values.block_metadata.block_gaslimit,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockChainId,
|
|
|
|
|
public_values.block_metadata.block_chain_id,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockBaseFee,
|
|
|
|
|
public_values.block_metadata.block_base_fee,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockCurrentHash,
|
|
|
|
|
h2u(public_values.block_hashes.cur_hash),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasUsed,
|
|
|
|
|
public_values.block_metadata.block_gas_used,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TxnNumberBefore,
|
|
|
|
|
public_values.extra_block_data.txn_number_before,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TxnNumberAfter,
|
|
|
|
|
public_values.extra_block_data.txn_number_after,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasUsedBefore,
|
|
|
|
|
public_values.extra_block_data.gas_used_before,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::BlockGasUsedAfter,
|
|
|
|
|
public_values.extra_block_data.gas_used_after,
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::StateTrieRootDigestBefore,
|
|
|
|
|
h2u(public_values.trie_roots_before.state_root),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TransactionTrieRootDigestBefore,
|
|
|
|
|
h2u(public_values.trie_roots_before.transactions_root),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::ReceiptTrieRootDigestBefore,
|
|
|
|
|
h2u(public_values.trie_roots_before.receipts_root),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::StateTrieRootDigestAfter,
|
|
|
|
|
h2u(public_values.trie_roots_after.state_root),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::TransactionTrieRootDigestAfter,
|
|
|
|
|
h2u(public_values.trie_roots_after.transactions_root),
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
GlobalMetadata::ReceiptTrieRootDigestAfter,
|
|
|
|
|
h2u(public_values.trie_roots_after.receipts_root),
|
|
|
|
|
),
|
2023-11-30 10:04:08 -05:00
|
|
|
(GlobalMetadata::KernelHash, h2u(KERNEL.code_hash)),
|
|
|
|
|
(GlobalMetadata::KernelLen, KERNEL.code.len().into()),
|
2023-10-16 08:53:42 -04:00
|
|
|
];
|
|
|
|
|
|
|
|
|
|
let segment = F::from_canonical_u32(Segment::GlobalMetadata as u32);
|
|
|
|
|
let mut extra_looking_rows = Vec::new();
|
|
|
|
|
|
|
|
|
|
fields.map(|(field, val)| {
|
|
|
|
|
extra_looking_rows.push(add_extra_looking_row(segment, field as usize, val))
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Add block bloom writes.
|
|
|
|
|
let bloom_segment = F::from_canonical_u32(Segment::GlobalBlockBloom as u32);
|
|
|
|
|
for index in 0..8 {
|
|
|
|
|
let val = public_values.block_metadata.block_bloom[index];
|
|
|
|
|
extra_looking_rows.push(add_extra_looking_row(bloom_segment, index, val));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add Blockhashes writes.
|
|
|
|
|
let block_hashes_segment = F::from_canonical_u32(Segment::BlockHashes as u32);
|
|
|
|
|
for index in 0..256 {
|
|
|
|
|
let val = h2u(public_values.block_hashes.prev_hashes[index]);
|
|
|
|
|
extra_looking_rows.push(add_extra_looking_row(block_hashes_segment, index, val));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
extra_looking_rows
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn add_extra_looking_row<F, const D: usize>(segment: F, index: usize, val: U256) -> Vec<F>
|
|
|
|
|
where
|
|
|
|
|
F: RichField + Extendable<D>,
|
|
|
|
|
{
|
|
|
|
|
let mut row = vec![F::ZERO; 13];
|
|
|
|
|
row[0] = F::ZERO; // is_read
|
|
|
|
|
row[1] = F::ZERO; // context
|
|
|
|
|
row[2] = segment;
|
|
|
|
|
row[3] = F::from_canonical_usize(index);
|
|
|
|
|
|
|
|
|
|
for j in 0..VALUE_LIMBS {
|
|
|
|
|
row[j + 4] = F::from_canonical_u32((val >> (j * 32)).low_u32());
|
|
|
|
|
}
|
|
|
|
|
row[12] = F::ONE; // timestamp
|
|
|
|
|
row
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-05-04 20:57:07 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use plonky2::field::goldilocks_field::GoldilocksField;
|
|
|
|
|
use plonky2::field::polynomial::PolynomialValues;
|
2022-11-03 08:26:03 -07:00
|
|
|
use plonky2::field::types::Sample;
|
2022-05-04 20:57:07 +02:00
|
|
|
|
2022-09-12 08:09:17 +02:00
|
|
|
use crate::verifier::eval_l_0_and_l_last;
|
2022-05-04 20:57:07 +02:00
|
|
|
|
|
|
|
|
#[test]
|
2022-09-12 08:09:17 +02:00
|
|
|
fn test_eval_l_0_and_l_last() {
|
2022-05-04 20:57:07 +02:00
|
|
|
type F = GoldilocksField;
|
|
|
|
|
let log_n = 5;
|
|
|
|
|
let n = 1 << log_n;
|
|
|
|
|
|
|
|
|
|
let x = F::rand(); // challenge point
|
|
|
|
|
let expected_l_first_x = PolynomialValues::selector(n, 0).ifft().eval(x);
|
|
|
|
|
let expected_l_last_x = PolynomialValues::selector(n, n - 1).ifft().eval(x);
|
|
|
|
|
|
2022-09-12 08:09:17 +02:00
|
|
|
let (l_first_x, l_last_x) = eval_l_0_and_l_last(log_n, x);
|
2022-05-04 20:57:07 +02:00
|
|
|
assert_eq!(l_first_x, expected_l_first_x);
|
|
|
|
|
assert_eq!(l_last_x, expected_l_last_x);
|
|
|
|
|
}
|
|
|
|
|
}
|