diff --git a/src/bin/bench_recursion.rs b/src/bin/bench_recursion.rs index 19f7716c..59b65e51 100644 --- a/src/bin/bench_recursion.rs +++ b/src/bin/bench_recursion.rs @@ -8,7 +8,6 @@ use plonky2::fri::FriConfig; use plonky2::gates::constant::ConstantGate; use plonky2::gates::gmimc::GMiMCGate; use plonky2::hash::GMIMC_ROUNDS; -use plonky2::prover::PLONK_BLINDING; use plonky2::witness::PartialWitness; fn main() { @@ -19,12 +18,6 @@ fn main() { env_logger::Builder::from_env(Env::default().default_filter_or("debug")).init(); bench_prove::(); - - // bench_field_mul::(); - - // bench_fft(); - println!(); - // bench_gmimc::(); } fn bench_prove, const D: usize>() { @@ -32,7 +25,7 @@ fn bench_prove, const D: usize>() { let config = CircuitConfig { num_wires: 134, - num_routed_wires: 12, + num_routed_wires: 27, security_bits: 128, rate_bits: 3, num_challenges: 3, @@ -41,23 +34,22 @@ fn bench_prove, const D: usize>() { rate_bits: 3, reduction_arity_bits: vec![1], num_query_rounds: 1, - blinding: PLONK_BLINDING.to_vec(), }, }; let mut builder = CircuitBuilder::::new(config); + let zero = builder.zero(); + let zero_ext = builder.zero_extension(); + + let mut state = [zero; 12]; for _ in 0..10000 { - builder.add_gate_no_constants(gmimc_gate.clone()); + state = builder.permute(state); } - builder.add_gate(ConstantGate::get(), vec![F::NEG_ONE]); - - // for _ in 0..(40 * 5) { - // builder.add_gate( - // FriConsistencyGate::new(2, 3, 13), - // vec![F::primitive_root_of_unity(13)]); - // } + // Random other gates. + builder.add(zero, zero); + builder.add_extension(zero_ext, zero_ext); let prover = builder.build_prover(); let inputs = PartialWitness::new(); diff --git a/src/circuit_builder.rs b/src/circuit_builder.rs index 4d414906..9863de47 100644 --- a/src/circuit_builder.rs +++ b/src/circuit_builder.rs @@ -8,17 +8,20 @@ use crate::circuit_data::{ VerifierCircuitData, VerifierOnlyCircuitData, }; use crate::field::cosets::get_unique_coset_shifts; +use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::Extendable; use crate::gates::constant::ConstantGate; -use crate::gates::gate::{GateInstance, GateRef}; +use crate::gates::gate::{GateInstance, GateRef, PrefixedGate}; +use crate::gates::gate_tree::Tree; use crate::gates::noop::NoopGate; use crate::generator::{CopyGenerator, RandomValueGenerator, WitnessGenerator}; use crate::hash::hash_n_to_hash; use crate::permutation_argument::TargetPartitions; +use crate::plonk_common::PlonkPolynomials; use crate::polynomial::commitment::ListPolynomialCommitment; use crate::polynomial::polynomial::PolynomialValues; use crate::target::Target; -use crate::util::{log2_ceil, log2_strict, transpose}; +use crate::util::{log2_ceil, log2_strict, transpose, transpose_poly_values}; use crate::wire::Wire; pub struct CircuitBuilder, const D: usize> { @@ -129,6 +132,12 @@ impl, const D: usize> CircuitBuilder { self.assert_equal(src, dst); } + pub fn route_extension(&mut self, src: ExtensionTarget, dst: ExtensionTarget) { + for i in 0..D { + self.route(src.0[i], dst.0[i]); + } + } + /// Adds a generator which will copy `src` to `dst`. pub fn generate_copy(&mut self, src: Target, dst: Target) { self.add_generator(CopyGenerator { src, dst }); @@ -148,6 +157,17 @@ impl, const D: usize> CircuitBuilder { self.copy_constraints.push((x, y)); } + pub fn assert_zero(&mut self, x: Target) { + let zero = self.zero(); + self.assert_equal(x, zero); + } + + pub fn assert_equal_extension(&mut self, x: ExtensionTarget, y: ExtensionTarget) { + for i in 0..D { + self.assert_equal(x.0[i], y.0[i]); + } + } + pub fn add_generators(&mut self, generators: Vec>>) { self.generators.extend(generators); } @@ -299,22 +319,26 @@ impl, const D: usize> CircuitBuilder { } } - fn constant_polys(&self) -> Vec> { - let num_constants = self - .gate_instances + fn constant_polys(&self, gates: &[PrefixedGate]) -> Vec> { + let num_constants = gates .iter() - .map(|gate_inst| gate_inst.constants.len()) + .map(|gate| gate.gate.0.num_constants() + gate.prefix.len()) .max() .unwrap(); let constants_per_gate = self .gate_instances .iter() - .map(|gate_inst| { - let mut padded_constants = gate_inst.constants.clone(); - for _ in padded_constants.len()..num_constants { - padded_constants.push(F::ZERO); - } - padded_constants + .map(|gate| { + let prefix = &gates + .iter() + .find(|g| g.gate.0.id() == gate.gate_type.0.id()) + .unwrap() + .prefix; + let mut prefixed_constants = Vec::with_capacity(num_constants); + prefixed_constants.extend(prefix.iter().map(|&b| if b { F::ONE } else { F::ZERO })); + prefixed_constants.extend_from_slice(&gate.constants); + prefixed_constants.resize(num_constants, F::ZERO); + prefixed_constants }) .collect::>(); @@ -324,7 +348,7 @@ impl, const D: usize> CircuitBuilder { .collect() } - fn sigma_vecs(&self, k_is: &[F]) -> Vec> { + fn sigma_vecs(&self, k_is: &[F], subgroup: &[F]) -> Vec> { let degree = self.gate_instances.len(); let degree_log = log2_strict(degree); let mut target_partitions = TargetPartitions::new(); @@ -344,7 +368,7 @@ impl, const D: usize> CircuitBuilder { } let wire_partitions = target_partitions.to_wire_partitions(); - wire_partitions.get_sigma_polys(degree_log, k_is) + wire_partitions.get_sigma_polys(degree_log, k_is, subgroup) } /// Builds a "full circuit", with both prover and verifier data. @@ -358,33 +382,38 @@ impl, const D: usize> CircuitBuilder { let degree = self.gate_instances.len(); info!("degree after blinding & padding: {}", degree); - let constant_vecs = self.constant_polys(); - let constants_commitment = ListPolynomialCommitment::new( - constant_vecs.into_iter().map(|v| v.ifft()).collect(), - self.config.fri_config.rate_bits, - false, - ); + let gates = self.gates.iter().cloned().collect(); + let gate_tree = Tree::from_gates(gates); + let prefixed_gates = PrefixedGate::from_tree(gate_tree); + + let degree_bits = log2_strict(degree); + let subgroup = F::two_adic_subgroup(degree_bits); + + let constant_vecs = self.constant_polys(&prefixed_gates); + let num_constants = constant_vecs.len(); let k_is = get_unique_coset_shifts(degree, self.config.num_routed_wires); - let sigma_vecs = self.sigma_vecs(&k_is); - let sigmas_commitment = ListPolynomialCommitment::new( - sigma_vecs.into_iter().map(|v| v.ifft()).collect(), + let sigma_vecs = self.sigma_vecs(&k_is, &subgroup); + + let constants_sigmas_vecs = [constant_vecs, sigma_vecs.clone()].concat(); + let constants_sigmas_commitment = ListPolynomialCommitment::new( + constants_sigmas_vecs, self.config.fri_config.rate_bits, - false, + PlonkPolynomials::CONSTANTS_SIGMAS.blinding, ); - let constants_root = constants_commitment.merkle_tree.root; - let sigmas_root = sigmas_commitment.merkle_tree.root; + let constants_sigmas_root = constants_sigmas_commitment.merkle_tree.root; let verifier_only = VerifierOnlyCircuitData { - constants_root, - sigmas_root, + constants_sigmas_root, }; - let generators = self.generators; let prover_only = ProverOnlyCircuitData { - generators, - constants_commitment, - sigmas_commitment, + generators: self.generators, + constants_sigmas_commitment, + sigmas: transpose_poly_values(sigma_vecs), + subgroup, + copy_constraints: self.copy_constraints, + gate_instances: self.gate_instances, }; // The HashSet of gates will have a non-deterministic order. When converting to a Vec, we @@ -398,17 +427,20 @@ impl, const D: usize> CircuitBuilder { .max() .expect("No gates?"); - let degree_bits = log2_strict(degree); - // TODO: This should also include an encoding of gate constraints. - let circuit_digest_parts = [constants_root.elements, sigmas_root.elements]; + let circuit_digest_parts = [ + constants_sigmas_root.elements.to_vec(), + vec![/* Add other circuit data here */], + ]; let circuit_digest = hash_n_to_hash(circuit_digest_parts.concat(), false); let common = CommonCircuitData { config: self.config, degree_bits, - gates, + gates: prefixed_gates, + max_filtered_constraint_degree_bits: 3, // TODO: compute this correctly once filters land. num_gate_constraints, + num_constants, k_is, circuit_digest, }; diff --git a/src/circuit_data.rs b/src/circuit_data.rs index 4d9a7110..afb37628 100644 --- a/src/circuit_data.rs +++ b/src/circuit_data.rs @@ -1,13 +1,16 @@ +use std::ops::Range; + use anyhow::Result; use crate::field::extension_field::Extendable; use crate::field::field::Field; use crate::fri::FriConfig; -use crate::gates::gate::GateRef; +use crate::gates::gate::{GateInstance, PrefixedGate}; use crate::generator::WitnessGenerator; use crate::polynomial::commitment::ListPolynomialCommitment; use crate::proof::{Hash, HashTarget, Proof}; use crate::prover::prove; +use crate::target::Target; use crate::verifier::verify; use crate::witness::PartialWitness; @@ -38,7 +41,6 @@ impl Default for CircuitConfig { rate_bits: 1, reduction_arity_bits: vec![1], num_query_rounds: 1, - blinding: vec![true], }, } } @@ -48,11 +50,27 @@ impl CircuitConfig { pub fn num_advice_wires(&self) -> usize { self.num_wires - self.num_routed_wires } + + pub(crate) fn large_config() -> Self { + Self { + num_wires: 134, + num_routed_wires: 12, + security_bits: 128, + rate_bits: 3, + num_challenges: 3, + fri_config: FriConfig { + proof_of_work_bits: 1, + rate_bits: 3, + reduction_arity_bits: vec![1], + num_query_rounds: 1, + }, + } + } } /// Circuit data required by the prover or the verifier. pub struct CircuitData, const D: usize> { - pub(crate) prover_only: ProverOnlyCircuitData, + pub(crate) prover_only: ProverOnlyCircuitData, pub(crate) verifier_only: VerifierOnlyCircuitData, pub(crate) common: CommonCircuitData, } @@ -75,7 +93,7 @@ impl, const D: usize> CircuitData { /// required, like LDEs of preprocessed polynomials. If more succinctness was desired, we could /// construct a more minimal prover structure and convert back and forth. pub struct ProverCircuitData, const D: usize> { - pub(crate) prover_only: ProverOnlyCircuitData, + pub(crate) prover_only: ProverOnlyCircuitData, pub(crate) common: CommonCircuitData, } @@ -98,35 +116,44 @@ impl, const D: usize> VerifierCircuitData { } /// Circuit data required by the prover, but not the verifier. -pub(crate) struct ProverOnlyCircuitData { +pub(crate) struct ProverOnlyCircuitData, const D: usize> { pub generators: Vec>>, - /// Commitments to the constants polynomial. - pub constants_commitment: ListPolynomialCommitment, - /// Commitments to the sigma polynomial. - pub sigmas_commitment: ListPolynomialCommitment, + /// Commitments to the constants polynomials and sigma polynomials. + pub constants_sigmas_commitment: ListPolynomialCommitment, + /// The transpose of the list of sigma polynomials. + pub sigmas: Vec>, + /// Subgroup of order `degree`. + pub subgroup: Vec, + /// The circuit's copy constraints. + pub copy_constraints: Vec<(Target, Target)>, + /// The concrete placement of each gate in the circuit. + pub gate_instances: Vec>, } /// Circuit data required by the verifier, but not the prover. pub(crate) struct VerifierOnlyCircuitData { - /// A commitment to each constant polynomial. - pub(crate) constants_root: Hash, - - /// A commitment to each permutation polynomial. - pub(crate) sigmas_root: Hash, + /// A commitment to each constant polynomial and each permutation polynomial. + pub(crate) constants_sigmas_root: Hash, } /// Circuit data required by both the prover and the verifier. -pub(crate) struct CommonCircuitData, const D: usize> { +pub struct CommonCircuitData, const D: usize> { pub(crate) config: CircuitConfig, pub(crate) degree_bits: usize, - /// The types of gates used in this circuit. - pub(crate) gates: Vec>, + /// The types of gates used in this circuit, along with their prefixes. + pub(crate) gates: Vec>, + + /// The maximum degree of a filter times a constraint by any gate. + pub(crate) max_filtered_constraint_degree_bits: usize, /// The largest number of constraints imposed by any gate. pub(crate) num_gate_constraints: usize, + /// The number of constant wires. + pub(crate) num_constants: usize, + /// The `{k_i}` valued used in `S_ID_i` in Plonk's permutation argument. pub(crate) k_is: Vec, @@ -151,19 +178,29 @@ impl, const D: usize> CommonCircuitData { pub fn constraint_degree(&self) -> usize { self.gates .iter() - .map(|g| g.0.degree()) + .map(|g| g.gate.0.degree()) .max() .expect("No gates?") } pub fn quotient_degree(&self) -> usize { - self.constraint_degree() - 1 + ((1 << self.max_filtered_constraint_degree_bits) - 1) * self.degree() } pub fn total_constraints(&self) -> usize { // 2 constraints for each Z check. self.config.num_challenges * 2 + self.num_gate_constraints } + + /// Range of the constants polynomials in the `constants_sigmas_commitment`. + pub fn constants_range(&self) -> Range { + 0..self.num_constants + } + + /// Range of the sigma polynomials in the `constants_sigmas_commitment`. + pub fn sigmas_range(&self) -> Range { + self.num_constants..self.num_constants + self.config.num_routed_wires + } } /// The `Target` version of `VerifierCircuitData`, for use inside recursive circuits. Note that this diff --git a/src/field/crandall_field.rs b/src/field/crandall_field.rs index 0cee8860..dbd29cb2 100644 --- a/src/field/crandall_field.rs +++ b/src/field/crandall_field.rs @@ -8,7 +8,7 @@ use num::Integer; use crate::field::extension_field::quadratic::QuadraticCrandallField; use crate::field::extension_field::quartic::QuarticCrandallField; -use crate::field::extension_field::Extendable; +use crate::field::extension_field::{Extendable, Frobenius}; use crate::field::field::Field; /// EPSILON = 9 * 2**28 - 1 @@ -444,6 +444,8 @@ fn split(x: u128) -> (u64, u64) { (x as u64, (x >> 64) as u64) } +impl Frobenius<1> for CrandallField {} + #[cfg(test)] mod tests { use crate::test_arithmetic; diff --git a/src/field/extension_field/algebra.rs b/src/field/extension_field/algebra.rs index 996053bf..fcd60185 100644 --- a/src/field/extension_field/algebra.rs +++ b/src/field/extension_field/algebra.rs @@ -220,7 +220,6 @@ mod tests { let y = ExtensionAlgebra::from_basefield_array(arr1); let z = x * y; - dbg!(z.0, mul_mle(ts.clone())); assert_eq!(z.0, mul_mle(ts)); } diff --git a/src/field/extension_field/mod.rs b/src/field/extension_field/mod.rs index 60d2b2e1..2a176fe9 100644 --- a/src/field/extension_field/mod.rs +++ b/src/field/extension_field/mod.rs @@ -1,3 +1,5 @@ +use std::convert::TryInto; + use crate::field::field::Field; pub mod algebra; @@ -12,32 +14,42 @@ pub mod target; pub trait OEF: FieldExtension { // Element W of BaseField, such that `X^d - W` is irreducible over BaseField. const W: Self::BaseField; - - /// Frobenius automorphisms: x -> x^p, where p is the order of BaseField. - fn frobenius(&self) -> Self { - let arr = self.to_basefield_array(); - let k = (Self::BaseField::ORDER - 1) / (D as u64); - let z0 = Self::W.exp(k); - let mut z = Self::BaseField::ONE; - let mut res = [Self::BaseField::ZERO; D]; - for i in 0..D { - res[i] = arr[i] * z; - z *= z0; - } - - Self::from_basefield_array(res) - } } impl OEF<1> for F { const W: Self::BaseField = F::ZERO; } -pub trait Extendable: Field + Sized { - type Extension: Field + OEF + From; +pub trait Frobenius: OEF { + /// FrobeniusField automorphisms: x -> x^p, where p is the order of BaseField. + fn frobenius(&self) -> Self { + self.repeated_frobenius(1) + } + + /// Repeated Frobenius automorphisms: x -> x^(p^k). + fn repeated_frobenius(&self, count: usize) -> Self { + if count == 0 { + return *self; + } else if count >= D { + return self.repeated_frobenius(count % D); + } + let arr = self.to_basefield_array(); + let k = (Self::BaseField::ORDER - 1) / (D as u64); + let z0 = Self::W.exp(k * count as u64); + let mut res = [Self::BaseField::ZERO; D]; + for (i, z) in z0.powers().take(D).enumerate() { + res[i] = arr[i] * z; + } + + Self::from_basefield_array(res) + } } -impl Extendable<1> for F { +pub trait Extendable: Field + Sized { + type Extension: Field + OEF + Frobenius + From; +} + +impl + FieldExtension<1, BaseField = F>> Extendable<1> for F { type Extension = F; } @@ -88,10 +100,6 @@ where { debug_assert_eq!(l.len() % D, 0); l.chunks_exact(D) - .map(|c| { - let mut arr = [F::ZERO; D]; - arr.copy_from_slice(c); - F::Extension::from_basefield_array(arr) - }) + .map(|c| F::Extension::from_basefield_array(c.to_vec().try_into().unwrap())) .collect() } diff --git a/src/field/extension_field/quadratic.rs b/src/field/extension_field/quadratic.rs index b61ccbaa..af21ad60 100644 --- a/src/field/extension_field/quadratic.rs +++ b/src/field/extension_field/quadratic.rs @@ -6,7 +6,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssi use rand::Rng; use crate::field::crandall_field::CrandallField; -use crate::field::extension_field::{FieldExtension, OEF}; +use crate::field::extension_field::{FieldExtension, Frobenius, OEF}; use crate::field::field::Field; #[derive(Copy, Clone, Eq, PartialEq, Hash)] @@ -18,6 +18,8 @@ impl OEF<2> for QuadraticCrandallField { const W: CrandallField = CrandallField(3); } +impl Frobenius<2> for QuadraticCrandallField {} + impl FieldExtension<2> for QuadraticCrandallField { type BaseField = CrandallField; @@ -65,7 +67,7 @@ impl Field for QuadraticCrandallField { return None; } - let a_pow_r_minus_1 = OEF::<2>::frobenius(self); + let a_pow_r_minus_1 = self.frobenius(); let a_pow_r = a_pow_r_minus_1 * *self; debug_assert!(FieldExtension::<2>::is_in_basefield(&a_pow_r)); @@ -192,7 +194,7 @@ impl DivAssign for QuadraticCrandallField { #[cfg(test)] mod tests { use crate::field::extension_field::quadratic::QuadraticCrandallField; - use crate::field::extension_field::{FieldExtension, OEF}; + use crate::field::extension_field::{FieldExtension, Frobenius}; use crate::field::field::Field; #[test] @@ -233,7 +235,7 @@ mod tests { let x = F::rand(); assert_eq!( x.exp(>::BaseField::ORDER), - OEF::<2>::frobenius(&x) + x.frobenius() ); } diff --git a/src/field/extension_field/quartic.rs b/src/field/extension_field/quartic.rs index 2acac183..b93cbb56 100644 --- a/src/field/extension_field/quartic.rs +++ b/src/field/extension_field/quartic.rs @@ -6,7 +6,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssi use rand::Rng; use crate::field::crandall_field::CrandallField; -use crate::field::extension_field::{FieldExtension, OEF}; +use crate::field::extension_field::{FieldExtension, Frobenius, OEF}; use crate::field::field::Field; /// A quartic extension of `CrandallField`. @@ -20,6 +20,8 @@ impl OEF<4> for QuarticCrandallField { const W: CrandallField = CrandallField(3); } +impl Frobenius<4> for QuarticCrandallField {} + impl FieldExtension<4> for QuarticCrandallField { type BaseField = CrandallField; @@ -93,9 +95,9 @@ impl Field for QuarticCrandallField { return None; } - let a_pow_p = OEF::<4>::frobenius(self); + let a_pow_p = self.frobenius(); let a_pow_p_plus_1 = a_pow_p * *self; - let a_pow_p3_plus_p2 = OEF::<4>::frobenius(&OEF::<4>::frobenius(&a_pow_p_plus_1)); + let a_pow_p3_plus_p2 = a_pow_p_plus_1.repeated_frobenius(2); let a_pow_r_minus_1 = a_pow_p3_plus_p2 * a_pow_p; let a_pow_r = a_pow_r_minus_1 * *self; debug_assert!(FieldExtension::<4>::is_in_basefield(&a_pow_r)); @@ -241,7 +243,7 @@ impl DivAssign for QuarticCrandallField { #[cfg(test)] mod tests { use crate::field::extension_field::quartic::QuarticCrandallField; - use crate::field::extension_field::{FieldExtension, OEF}; + use crate::field::extension_field::{FieldExtension, Frobenius, OEF}; use crate::field::field::Field; fn exp_naive(x: F, power: u128) -> F { @@ -292,11 +294,18 @@ mod tests { #[test] fn test_frobenius() { type F = QuarticCrandallField; + const D: usize = 4; let x = F::rand(); assert_eq!( - exp_naive(x, >::BaseField::ORDER as u128), - OEF::<4>::frobenius(&x) + exp_naive(x, >::BaseField::ORDER as u128), + x.frobenius() ); + for count in 2..D { + assert_eq!( + x.repeated_frobenius(count), + (0..count).fold(x, |acc, _| acc.frobenius()) + ); + } } #[test] diff --git a/src/field/extension_field/target.rs b/src/field/extension_field/target.rs index 71e04360..0316b04f 100644 --- a/src/field/extension_field/target.rs +++ b/src/field/extension_field/target.rs @@ -1,7 +1,11 @@ +use std::convert::{TryFrom, TryInto}; +use std::ops::Range; + use crate::circuit_builder::CircuitBuilder; use crate::field::extension_field::algebra::ExtensionAlgebra; use crate::field::extension_field::{Extendable, FieldExtension, OEF}; use crate::field::field::Field; +use crate::gates::mul_extension::MulExtensionGate; use crate::target::Target; /// `Target`s representing an element of an extension field. @@ -12,6 +16,50 @@ impl ExtensionTarget { pub fn to_target_array(&self) -> [Target; D] { self.0 } + + pub fn frobenius>(&self, builder: &mut CircuitBuilder) -> Self { + self.repeated_frobenius(1, builder) + } + + pub fn repeated_frobenius>( + &self, + count: usize, + builder: &mut CircuitBuilder, + ) -> Self { + if count == 0 { + return *self; + } else if count >= D { + return self.repeated_frobenius(count % D, builder); + } + let arr = self.to_target_array(); + let k = (F::ORDER - 1) / (D as u64); + let z0 = F::W.exp(k * count as u64); + let zs = z0 + .powers() + .take(D) + .map(|z| builder.constant(z)) + .collect::>(); + + let mut res = Vec::with_capacity(D); + for (z, a) in zs.into_iter().zip(arr) { + res.push(builder.mul(z, a)); + } + + res.try_into().unwrap() + } + + pub fn from_range(gate: usize, range: Range) -> Self { + debug_assert_eq!(range.end - range.start, D); + Target::wires_from_range(gate, range).try_into().unwrap() + } +} + +impl TryFrom> for ExtensionTarget { + type Error = Vec; + + fn try_from(value: Vec) -> Result { + Ok(Self(value.try_into()?)) + } } /// `Target`s representing an element of an extension of an extension field. @@ -92,6 +140,7 @@ impl, const D: usize> CircuitBuilder { sum } + /// TODO: Change this to using an `arithmetic_extension` function once `MulExtensionGate` supports addend. pub fn sub_extension( &mut self, mut a: ExtensionTarget, @@ -114,23 +163,31 @@ impl, const D: usize> CircuitBuilder { a } + pub fn mul_extension_with_const( + &mut self, + const_0: F, + multiplicand_0: ExtensionTarget, + multiplicand_1: ExtensionTarget, + ) -> ExtensionTarget { + let gate = self.add_gate(MulExtensionGate::new(), vec![const_0]); + + let wire_multiplicand_0 = + ExtensionTarget::from_range(gate, MulExtensionGate::::wires_multiplicand_0()); + let wire_multiplicand_1 = + ExtensionTarget::from_range(gate, MulExtensionGate::::wires_multiplicand_1()); + let wire_output = ExtensionTarget::from_range(gate, MulExtensionGate::::wires_output()); + + self.route_extension(multiplicand_0, wire_multiplicand_0); + self.route_extension(multiplicand_1, wire_multiplicand_1); + wire_output + } + pub fn mul_extension( &mut self, - a: ExtensionTarget, - b: ExtensionTarget, + multiplicand_0: ExtensionTarget, + multiplicand_1: ExtensionTarget, ) -> ExtensionTarget { - let mut res = [self.zero(); D]; - for i in 0..D { - for j in 0..D { - res[(i + j) % D] = if i + j < D { - self.mul_add(a.0[i], b.0[j], res[(i + j) % D]) - } else { - // W * a[i] * b[i] + res[(i + j) % D] - self.arithmetic(F::Extension::W, a.0[i], b.0[i], F::ONE, res[(i + j) % D]) - } - } - } - ExtensionTarget(res) + self.mul_extension_with_const(F::ONE, multiplicand_0, multiplicand_1) } pub fn mul_ext_algebra( @@ -164,6 +221,7 @@ impl, const D: usize> CircuitBuilder { /// Like `mul_add`, but for `ExtensionTarget`s. Note that, unlike `mul_add`, this has no /// performance benefit over separate muls and adds. + /// TODO: Change this to using an `arithmetic_extension` function once `MulExtensionGate` supports addend. pub fn mul_add_extension( &mut self, a: ExtensionTarget, @@ -174,12 +232,23 @@ impl, const D: usize> CircuitBuilder { self.add_extension(product, c) } + /// Like `mul_sub`, but for `ExtensionTarget`s. Note that, unlike `mul_sub`, this has no + /// performance benefit over separate muls and subs. + /// TODO: Change this to using an `arithmetic_extension` function once `MulExtensionGate` supports addend. + pub fn scalar_mul_sub_extension( + &mut self, + a: Target, + b: ExtensionTarget, + c: ExtensionTarget, + ) -> ExtensionTarget { + let product = self.scalar_mul_ext(a, b); + self.sub_extension(product, c) + } + /// Returns `a * b`, where `b` is in the extension field and `a` is in the base field. - pub fn scalar_mul_ext(&mut self, a: Target, mut b: ExtensionTarget) -> ExtensionTarget { - for i in 0..D { - b.0[i] = self.mul(a, b.0[i]); - } - b + pub fn scalar_mul_ext(&mut self, a: Target, b: ExtensionTarget) -> ExtensionTarget { + let a_ext = self.convert_to_ext(a); + self.mul_extension(a_ext, b) } /// Returns `a * b`, where `b` is in the extension of the extension field, and `a` is in the @@ -194,4 +263,26 @@ impl, const D: usize> CircuitBuilder { } b } + + pub fn convert_to_ext(&mut self, t: Target) -> ExtensionTarget { + let zero = self.zero(); + let mut arr = [zero; D]; + arr[0] = t; + ExtensionTarget(arr) + } +} + +/// Flatten the slice by sending every extension target to its D-sized canonical representation. +pub fn flatten_target(l: &[ExtensionTarget]) -> Vec { + l.iter() + .flat_map(|x| x.to_target_array().to_vec()) + .collect() +} + +/// Batch every D-sized chunks into extension targets. +pub fn unflatten_target, const D: usize>(l: &[Target]) -> Vec> { + debug_assert_eq!(l.len() % D, 0); + l.chunks_exact(D) + .map(|c| c.to_vec().try_into().unwrap()) + .collect() } diff --git a/src/field/fft.rs b/src/field/fft.rs index 8bcde967..56764b47 100644 --- a/src/field/fft.rs +++ b/src/field/fft.rs @@ -44,12 +44,14 @@ pub(crate) fn fft_precompute(degree: usize) -> FftPrecomputation { let degree_log = log2_ceil(degree); let mut subgroups_rev = Vec::new(); - for i in 0..=degree_log { - let g_i = F::primitive_root_of_unity(i); - let subgroup = F::cyclic_subgroup_known_order(g_i, 1 << i); + let mut subgroup = F::two_adic_subgroup(degree_log); + for _i in 0..=degree_log { + let subsubgroup = subgroup.iter().step_by(2).copied().collect(); let subgroup_rev = reverse_index_bits(subgroup); subgroups_rev.push(subgroup_rev); + subgroup = subsubgroup; } + subgroups_rev.reverse(); FftPrecomputation { subgroups_rev } } @@ -126,32 +128,11 @@ pub(crate) fn fft_with_precomputation_power_of_2( PolynomialValues { values } } -pub(crate) fn coset_fft(poly: PolynomialCoeffs, shift: F) -> PolynomialValues { - let mut points = fft(poly); - let mut shift_exp_i = F::ONE; - for p in points.values.iter_mut() { - *p *= shift_exp_i; - shift_exp_i *= shift; - } - points -} - pub(crate) fn ifft(poly: PolynomialValues) -> PolynomialCoeffs { let precomputation = fft_precompute(poly.len()); ifft_with_precomputation_power_of_2(poly, &precomputation) } -pub(crate) fn coset_ifft(poly: PolynomialValues, shift: F) -> PolynomialCoeffs { - let shift_inv = shift.inverse(); - let mut shift_inv_exp_i = F::ONE; - let mut coeffs = ifft(poly); - for c in coeffs.coeffs.iter_mut() { - *c *= shift_inv_exp_i; - shift_inv_exp_i *= shift_inv; - } - coeffs -} - #[cfg(test)] mod tests { use crate::field::crandall_field::CrandallField; @@ -200,10 +181,9 @@ mod tests { let degree = coefficients.len(); let degree_log = log2_strict(degree); - let g = F::primitive_root_of_unity(degree_log); - let powers_of_g = F::cyclic_subgroup_known_order(g, degree); + let subgroup = F::two_adic_subgroup(degree_log); - let values = powers_of_g + let values = subgroup .into_iter() .map(|x| evaluate_at_naive(&coefficients, x)) .collect(); diff --git a/src/field/field.rs b/src/field/field.rs index 3c6f0e47..516012d2 100644 --- a/src/field/field.rs +++ b/src/field/field.rs @@ -7,6 +7,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssi use num::Integer; use rand::Rng; +use crate::field::extension_field::Frobenius; use crate::util::bits_u64; /// A finite field with prime order less than 2^64. @@ -103,21 +104,18 @@ pub trait Field: fn primitive_root_of_unity(n_log: usize) -> Self { assert!(n_log <= Self::TWO_ADICITY); let mut base = Self::POWER_OF_TWO_GENERATOR; - for _ in n_log..Self::TWO_ADICITY { - base = base.square(); - } - base + base.exp_power_of_2(Self::TWO_ADICITY - n_log) } /// Computes a multiplicative subgroup whose order is known in advance. fn cyclic_subgroup_known_order(generator: Self, order: usize) -> Vec { - let mut subgroup = Vec::with_capacity(order); - let mut current = Self::ONE; - for _i in 0..order { - subgroup.push(current); - current *= generator; - } - subgroup + generator.powers().take(order).collect() + } + + /// Computes the subgroup generated by the root of unity of a given order generated by `Self::primitive_root_of_unity`. + fn two_adic_subgroup(n_log: usize) -> Vec { + let generator = Self::primitive_root_of_unity(n_log); + generator.powers().take(1 << n_log).collect() } fn cyclic_subgroup_unknown_order(generator: Self) -> Vec { @@ -157,6 +155,14 @@ pub trait Field: bits_u64(self.to_canonical_u64()) } + fn exp_power_of_2(&self, power_log: usize) -> Self { + let mut res = *self; + for _ in 0..power_log { + res = res.square(); + } + res + } + fn exp(&self, power: u64) -> Self { let mut current = *self; let mut product = Self::ONE; @@ -265,6 +271,11 @@ pub trait Field: fn rand_vec(n: usize) -> Vec { (0..n).map(|_| Self::rand()).collect() } + + /// Representative `g` of the coset used in FRI, so that LDEs in FRI are done over `gH`. + fn coset_shift() -> Self { + Self::MULTIPLICATIVE_GROUP_GENERATOR + } } /// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`. @@ -283,3 +294,17 @@ impl Iterator for Powers { Some(result) } } + +impl Powers { + /// Apply the Frobenius automorphism `k` times. + pub fn repeated_frobenius(self, k: usize) -> Self + where + F: Frobenius, + { + let Self { base, current } = self; + Self { + base: base.repeated_frobenius(k), + current: current.repeated_frobenius(k), + } + } +} diff --git a/src/field/lagrange.rs b/src/field/interpolation.rs similarity index 78% rename from src/field/lagrange.rs rename to src/field/interpolation.rs index 8911feb0..3d5e609c 100644 --- a/src/field/lagrange.rs +++ b/src/field/interpolation.rs @@ -10,10 +10,8 @@ use crate::util::log2_ceil; pub(crate) fn interpolant(points: &[(F, F)]) -> PolynomialCoeffs { let n = points.len(); let n_log = log2_ceil(n); - let n_padded = 1 << n_log; - let g = F::primitive_root_of_unity(n_log); - let subgroup = F::cyclic_subgroup_known_order(g, n_padded); + let subgroup = F::two_adic_subgroup(n_log); let barycentric_weights = barycentric_weights(points); let subgroup_evals = subgroup .into_iter() @@ -65,11 +63,23 @@ pub fn barycentric_weights(points: &[(F, F)]) -> Vec { ) } +/// Interpolate the linear polynomial passing through `points` on `x`. +pub fn interpolate2(points: [(F, F); 2], x: F) -> F { + // a0 -> a1 + // b0 -> b1 + // x -> a1 + (x-a0)*(b1-a1)/(b0-a0) + let (a0, a1) = points[0]; + let (b0, b1) = points[1]; + assert_ne!(a0, b0); + a1 + (x - a0) * (b1 - a1) / (b0 - a0) +} + #[cfg(test)] mod tests { + use super::*; use crate::field::crandall_field::CrandallField; + use crate::field::extension_field::quartic::QuarticCrandallField; use crate::field::field::Field; - use crate::field::lagrange::interpolant; use crate::polynomial::polynomial::PolynomialCoeffs; #[test] @@ -92,8 +102,7 @@ mod tests { for deg_log in 0..4 { let deg = 1 << deg_log; - let g = F::primitive_root_of_unity(deg_log); - let domain = F::cyclic_subgroup_known_order(g, deg); + let domain = F::two_adic_subgroup(deg_log); let coeffs = F::rand_vec(deg); let coeffs = PolynomialCoeffs { coeffs }; @@ -120,4 +129,18 @@ mod tests { fn eval_naive(coeffs: &PolynomialCoeffs, domain: &[F]) -> Vec<(F, F)> { domain.iter().map(|&x| (x, coeffs.eval(x))).collect() } + + #[test] + fn test_interpolate2() { + type F = QuarticCrandallField; + let points = [(F::rand(), F::rand()), (F::rand(), F::rand())]; + let x = F::rand(); + + let ev0 = interpolant(&points).eval(x); + let ev1 = interpolate(&points, x, &barycentric_weights(&points)); + let ev2 = interpolate2(points, x); + + assert_eq!(ev0, ev1); + assert_eq!(ev0, ev2); + } } diff --git a/src/field/mod.rs b/src/field/mod.rs index 179fb10d..15efe280 100644 --- a/src/field/mod.rs +++ b/src/field/mod.rs @@ -3,7 +3,7 @@ pub mod crandall_field; pub mod extension_field; pub mod fft; pub mod field; -pub(crate) mod lagrange; +pub(crate) mod interpolation; #[cfg(test)] mod field_testing; diff --git a/src/fri/mod.rs b/src/fri/mod.rs index c147e8c6..87fe3db5 100644 --- a/src/fri/mod.rs +++ b/src/fri/mod.rs @@ -1,6 +1,5 @@ -use crate::polynomial::commitment::SALT_SIZE; - pub mod prover; +mod recursive_verifier; pub mod verifier; /// Somewhat arbitrary. Smaller values will increase delta, but with diminishing returns, @@ -21,20 +20,6 @@ pub struct FriConfig { /// Number of query rounds to perform. pub num_query_rounds: usize, - - /// Vector of the same length as the number of initial Merkle trees. - /// `blinding[i]==true` iff the i-th tree is salted. - pub blinding: Vec, -} - -impl FriConfig { - pub(crate) fn salt_size(&self, i: usize) -> usize { - if self.blinding[i] { - SALT_SIZE - } else { - 0 - } - } } fn fri_delta(rate_log: usize, conjecture: bool) -> f64 { diff --git a/src/fri/prover.rs b/src/fri/prover.rs index d1eeadcf..f937ee55 100644 --- a/src/fri/prover.rs +++ b/src/fri/prover.rs @@ -107,7 +107,7 @@ fn fri_proof_of_work(current_hash: Hash, config: &FriConfig) -> F { ) .to_canonical_u64() .leading_zeros() - >= config.proof_of_work_bits + >= config.proof_of_work_bits + F::ORDER.leading_zeros() }) .map(F::from_canonical_u64) .expect("Proof of work failed.") diff --git a/src/fri/recursive_verifier.rs b/src/fri/recursive_verifier.rs new file mode 100644 index 00000000..fc320f31 --- /dev/null +++ b/src/fri/recursive_verifier.rs @@ -0,0 +1,358 @@ +use itertools::izip; + +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::{flatten_target, ExtensionTarget}; +use crate::field::extension_field::Extendable; +use crate::field::field::Field; +use crate::fri::FriConfig; +use crate::plonk_challenger::RecursiveChallenger; +use crate::plonk_common::PlonkPolynomials; +use crate::proof::{ + FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, HashTarget, OpeningSetTarget, +}; +use crate::target::Target; +use crate::util::{log2_strict, reverse_index_bits_in_place}; + +impl, const D: usize> CircuitBuilder { + /// Computes P'(x^arity) from {P(x*g^i)}_(i=0..arity), where g is a `arity`-th root of unity + /// and P' is the FRI reduced polynomial. + fn compute_evaluation( + &mut self, + x: Target, + old_x_index: Target, + arity_bits: usize, + last_evals: &[ExtensionTarget], + beta: ExtensionTarget, + ) -> ExtensionTarget { + debug_assert_eq!(last_evals.len(), 1 << arity_bits); + + let g = F::primitive_root_of_unity(arity_bits); + + // The evaluation vector needs to be reordered first. + let mut evals = last_evals.to_vec(); + reverse_index_bits_in_place(&mut evals); + let mut old_x_index_bits = self.split_le(old_x_index, arity_bits); + old_x_index_bits.reverse(); + let evals = self.rotate_left_from_bits(&old_x_index_bits, &evals); + + // The answer is gotten by interpolating {(x*g^i, P(x*g^i))} and evaluating at beta. + let points = g + .powers() + .map(|y| { + let yt = self.constant(y); + self.mul(x, yt) + }) + .zip(evals) + .collect::>(); + + self.interpolate(&points, beta) + } + + fn fri_verify_proof_of_work( + &mut self, + proof: &FriProofTarget, + challenger: &mut RecursiveChallenger, + config: &FriConfig, + ) { + let mut inputs = challenger.get_hash(self).elements.to_vec(); + inputs.push(proof.pow_witness); + + let hash = self.hash_n_to_m(inputs, 1, false)[0]; + self.assert_leading_zeros(hash, config.proof_of_work_bits + F::ORDER.leading_zeros()); + } + + pub fn verify_fri_proof( + &mut self, + purported_degree_log: usize, + // Openings of the PLONK polynomials. + os: &OpeningSetTarget, + // Point at which the PLONK polynomials are opened. + zeta: ExtensionTarget, + // Scaling factor to combine polynomials. + alpha: ExtensionTarget, + initial_merkle_roots: &[HashTarget], + proof: &FriProofTarget, + challenger: &mut RecursiveChallenger, + config: &FriConfig, + ) { + let total_arities = config.reduction_arity_bits.iter().sum::(); + debug_assert_eq!( + purported_degree_log, + log2_strict(proof.final_poly.len()) + total_arities - config.rate_bits, + "Final polynomial has wrong degree." + ); + + // Size of the LDE domain. + let n = proof.final_poly.len() << total_arities; + + // Recover the random betas used in the FRI reductions. + let betas = proof + .commit_phase_merkle_roots + .iter() + .map(|root| { + challenger.observe_hash(root); + challenger.get_extension_challenge(self) + }) + .collect::>(); + challenger.observe_extension_elements(&proof.final_poly.0); + + // Check PoW. + self.fri_verify_proof_of_work(proof, challenger, config); + + // Check that parameters are coherent. + debug_assert_eq!( + config.num_query_rounds, + proof.query_round_proofs.len(), + "Number of query rounds does not match config." + ); + debug_assert!( + !config.reduction_arity_bits.is_empty(), + "Number of reductions should be non-zero." + ); + + for round_proof in &proof.query_round_proofs { + self.fri_verifier_query_round( + os, + zeta, + alpha, + initial_merkle_roots, + &proof, + challenger, + n, + &betas, + round_proof, + config, + ); + } + } + + fn fri_verify_initial_proof( + &mut self, + x_index: Target, + proof: &FriInitialTreeProofTarget, + initial_merkle_roots: &[HashTarget], + ) { + for ((evals, merkle_proof), &root) in proof.evals_proofs.iter().zip(initial_merkle_roots) { + self.verify_merkle_proof(evals.clone(), x_index, root, merkle_proof); + } + } + + fn fri_combine_initial( + &mut self, + proof: &FriInitialTreeProofTarget, + alpha: ExtensionTarget, + os: &OpeningSetTarget, + zeta: ExtensionTarget, + subgroup_x: Target, + ) -> ExtensionTarget { + assert!(D > 1, "Not implemented for D=1."); + let config = &self.config.fri_config.clone(); + let degree_log = proof.evals_proofs[0].1.siblings.len() - config.rate_bits; + let subgroup_x = self.convert_to_ext(subgroup_x); + let mut alpha_powers = self.powers(alpha); + let mut sum = self.zero_extension(); + + // We will add three terms to `sum`: + // - one for polynomials opened at `x` only + // - one for polynomials opened at `x` and `g x` + // - one for polynomials opened at `x` and `x.frobenius()` + + // Polynomials opened at `x`, i.e., the constants, sigmas and quotient polynomials. + let single_evals = [ + PlonkPolynomials::CONSTANTS_SIGMAS, + PlonkPolynomials::QUOTIENT, + ] + .iter() + .flat_map(|&p| proof.unsalted_evals(p)) + .map(|&e| self.convert_to_ext(e)) + .collect::>(); + let single_openings = os + .constants + .iter() + .chain(&os.plonk_sigmas) + .chain(&os.quotient_polys); + let mut single_numerator = self.zero_extension(); + for (e, &o) in izip!(single_evals, single_openings) { + let a = alpha_powers.next(self); + let diff = self.sub_extension(e, o); + single_numerator = self.mul_add_extension(a, diff, single_numerator); + } + let single_denominator = self.sub_extension(subgroup_x, zeta); + let quotient = self.div_unsafe_extension(single_numerator, single_denominator); + sum = self.add_extension(sum, quotient); + + // Polynomials opened at `x` and `g x`, i.e., the Zs polynomials. + let zs_evals = proof + .unsalted_evals(PlonkPolynomials::ZS) + .iter() + .map(|&e| self.convert_to_ext(e)) + .collect::>(); + // TODO: Would probably be more efficient using `CircuitBuilder::reduce_with_powers_recursive` + let mut zs_composition_eval = self.zero_extension(); + let mut alpha_powers_cloned = alpha_powers.clone(); + for &e in &zs_evals { + let a = alpha_powers_cloned.next(self); + zs_composition_eval = self.mul_add_extension(a, e, zs_composition_eval); + } + + let g = self.constant_extension(F::Extension::primitive_root_of_unity(degree_log)); + let zeta_right = self.mul_extension(g, zeta); + let mut zs_ev_zeta = self.zero_extension(); + let mut alpha_powers_cloned = alpha_powers.clone(); + for &t in &os.plonk_zs { + let a = alpha_powers_cloned.next(self); + zs_ev_zeta = self.mul_add_extension(a, t, zs_ev_zeta); + } + let mut zs_ev_zeta_right = self.zero_extension(); + for &t in &os.plonk_zs_right { + let a = alpha_powers.next(self); + zs_ev_zeta_right = self.mul_add_extension(a, t, zs_ev_zeta); + } + let interpol_val = self.interpolate2( + [(zeta, zs_ev_zeta), (zeta_right, zs_ev_zeta_right)], + subgroup_x, + ); + let zs_numerator = self.sub_extension(zs_composition_eval, interpol_val); + let vanish_zeta = self.sub_extension(subgroup_x, zeta); + let vanish_zeta_right = self.sub_extension(subgroup_x, zeta_right); + let zs_denominator = self.mul_extension(vanish_zeta, vanish_zeta_right); + let zs_quotient = self.div_unsafe_extension(zs_numerator, zs_denominator); + sum = self.add_extension(sum, zs_quotient); + + // Polynomials opened at `x` and `x.frobenius()`, i.e., the wires polynomials. + let wire_evals = proof + .unsalted_evals(PlonkPolynomials::WIRES) + .iter() + .map(|&e| self.convert_to_ext(e)) + .collect::>(); + let mut wire_composition_eval = self.zero_extension(); + let mut alpha_powers_cloned = alpha_powers.clone(); + for &e in &wire_evals { + let a = alpha_powers_cloned.next(self); + wire_composition_eval = self.mul_add_extension(a, e, wire_composition_eval); + } + let mut alpha_powers_cloned = alpha_powers.clone(); + let wire_eval = os.wires.iter().fold(self.zero_extension(), |acc, &w| { + let a = alpha_powers_cloned.next(self); + self.mul_add_extension(a, w, acc) + }); + let mut alpha_powers_frob = alpha_powers.repeated_frobenius(D - 1, self); + let wire_eval_frob = os + .wires + .iter() + .fold(self.zero_extension(), |acc, &w| { + let a = alpha_powers_frob.next(self); + self.mul_add_extension(a, w, acc) + }) + .frobenius(self); + let zeta_frob = zeta.frobenius(self); + let wire_interpol_val = + self.interpolate2([(zeta, wire_eval), (zeta_frob, wire_eval_frob)], subgroup_x); + let wire_numerator = self.sub_extension(wire_composition_eval, wire_interpol_val); + let vanish_zeta_frob = self.sub_extension(subgroup_x, zeta_frob); + let wire_denominator = self.mul_extension(vanish_zeta, vanish_zeta_frob); + let wire_quotient = self.div_unsafe_extension(wire_numerator, wire_denominator); + sum = self.add_extension(sum, wire_quotient); + + sum + } + + fn fri_verifier_query_round( + &mut self, + os: &OpeningSetTarget, + zeta: ExtensionTarget, + alpha: ExtensionTarget, + initial_merkle_roots: &[HashTarget], + proof: &FriProofTarget, + challenger: &mut RecursiveChallenger, + n: usize, + betas: &[ExtensionTarget], + round_proof: &FriQueryRoundTarget, + config: &FriConfig, + ) { + let n_log = log2_strict(n); + let mut evaluations: Vec>> = Vec::new(); + // TODO: Do we need to range check `x_index` to a target smaller than `p`? + let mut x_index = challenger.get_challenge(self); + x_index = self.split_low_high(x_index, n_log, 64).0; + let mut x_index_num_bits = n_log; + let mut domain_size = n; + self.fri_verify_initial_proof( + x_index, + &round_proof.initial_trees_proof, + initial_merkle_roots, + ); + let mut old_x_index = self.zero(); + // `subgroup_x` is `subgroup[x_index]`, i.e., the actual field element in the domain. + let g = self.constant(F::MULTIPLICATIVE_GROUP_GENERATOR); + let phi = self.constant(F::primitive_root_of_unity(n_log)); + + let reversed_x = self.reverse_limbs::<2>(x_index, n_log); + let phi = self.exp(phi, reversed_x, n_log); + let mut subgroup_x = self.mul(g, phi); + + for (i, &arity_bits) in config.reduction_arity_bits.iter().enumerate() { + let next_domain_size = domain_size >> arity_bits; + let e_x = if i == 0 { + self.fri_combine_initial( + &round_proof.initial_trees_proof, + alpha, + os, + zeta, + subgroup_x, + ) + } else { + let last_evals = &evaluations[i - 1]; + // Infer P(y) from {P(x)}_{x^arity=y}. + self.compute_evaluation( + subgroup_x, + old_x_index, + config.reduction_arity_bits[i - 1], + last_evals, + betas[i - 1], + ) + }; + let mut evals = round_proof.steps[i].evals.clone(); + // Insert P(y) into the evaluation vector, since it wasn't included by the prover. + let (low_x_index, high_x_index) = + self.split_low_high(x_index, arity_bits, x_index_num_bits); + evals = self.insert(low_x_index, e_x, evals); + evaluations.push(evals); + self.verify_merkle_proof( + flatten_target(&evaluations[i]), + high_x_index, + proof.commit_phase_merkle_roots[i], + &round_proof.steps[i].merkle_proof, + ); + + if i > 0 { + // Update the point x to x^arity. + for _ in 0..config.reduction_arity_bits[i - 1] { + subgroup_x = self.square(subgroup_x); + } + } + domain_size = next_domain_size; + old_x_index = low_x_index; + x_index = high_x_index; + x_index_num_bits -= arity_bits; + } + + let last_evals = evaluations.last().unwrap(); + let final_arity_bits = *config.reduction_arity_bits.last().unwrap(); + let purported_eval = self.compute_evaluation( + subgroup_x, + old_x_index, + final_arity_bits, + last_evals, + *betas.last().unwrap(), + ); + for _ in 0..final_arity_bits { + subgroup_x = self.square(subgroup_x); + } + + // Final check of FRI. After all the reductions, we check that the final polynomial is equal + // to the one sent by the prover. + let eval = proof.final_poly.eval_scalar(self, subgroup_x); + self.assert_equal_extension(eval, purported_eval); + } +} diff --git a/src/fri/verifier.rs b/src/fri/verifier.rs index 68f4ee15..3db4f6f5 100644 --- a/src/fri/verifier.rs +++ b/src/fri/verifier.rs @@ -1,14 +1,15 @@ use anyhow::{ensure, Result}; -use crate::field::extension_field::{flatten, Extendable, FieldExtension, OEF}; +use crate::field::extension_field::{flatten, Extendable, FieldExtension, Frobenius}; use crate::field::field::Field; -use crate::field::lagrange::{barycentric_weights, interpolant, interpolate}; +use crate::field::interpolation::{barycentric_weights, interpolate, interpolate2}; use crate::fri::FriConfig; use crate::hash::hash_n_to_1; use crate::merkle_proofs::verify_merkle_proof; use crate::plonk_challenger::Challenger; -use crate::plonk_common::reduce_with_iter; +use crate::plonk_common::PlonkPolynomials; use crate::proof::{FriInitialTreeProof, FriProof, FriQueryRound, Hash, OpeningSet}; +use crate::util::scaling::ReducingFactor; use crate::util::{log2_strict, reverse_bits, reverse_index_bits_in_place}; /// Computes P'(x^arity) from {P(x*g^i)}_(i=0..arity), where g is a `arity`-th root of unity @@ -151,59 +152,76 @@ fn fri_combine_initial, const D: usize>( assert!(D > 1, "Not implemented for D=1."); let degree_log = proof.evals_proofs[0].1.siblings.len() - config.rate_bits; let subgroup_x = F::Extension::from_basefield(subgroup_x); - let mut alpha_powers = alpha.powers(); + let mut alpha = ReducingFactor::new(alpha); let mut sum = F::Extension::ZERO; // We will add three terms to `sum`: // - one for various polynomials which are opened at a single point `x` // - one for Zs, which are opened at `x` and `g x` - // - one for wire polynomials, which are opened at `x` and its conjugate + // - one for wire polynomials, which are opened at `x` and `x.frobenius()` - let single_evals = [0, 1, 4] - .iter() - .flat_map(|&i| proof.unsalted_evals(i, config)) - .map(|&e| F::Extension::from_basefield(e)); + // Polynomials opened at `x`, i.e., the constants, sigmas and quotient polynomials. + let single_evals = [ + PlonkPolynomials::CONSTANTS_SIGMAS, + PlonkPolynomials::QUOTIENT, + ] + .iter() + .flat_map(|&p| proof.unsalted_evals(p)) + .map(|&e| F::Extension::from_basefield(e)); let single_openings = os .constants .iter() .chain(&os.plonk_s_sigmas) .chain(&os.quotient_polys); - let single_diffs = single_evals.zip(single_openings).map(|(e, &o)| e - o); - let single_numerator = reduce_with_iter(single_diffs, &mut alpha_powers); + let single_diffs = single_evals + .into_iter() + .zip(single_openings) + .map(|(e, &o)| e - o) + .collect::>(); + let single_numerator = alpha.reduce(single_diffs.iter()); let single_denominator = subgroup_x - zeta; sum += single_numerator / single_denominator; + alpha.reset(); + // Polynomials opened at `x` and `g x`, i.e., the Zs polynomials. let zs_evals = proof - .unsalted_evals(3, config) + .unsalted_evals(PlonkPolynomials::ZS) .iter() .map(|&e| F::Extension::from_basefield(e)); - let zs_composition_eval = reduce_with_iter(zs_evals, alpha_powers.clone()); + let zs_composition_eval = alpha.clone().reduce(zs_evals); let zeta_right = F::Extension::primitive_root_of_unity(degree_log) * zeta; - let zs_interpol = interpolant(&[ - (zeta, reduce_with_iter(&os.plonk_zs, alpha_powers.clone())), - ( - zeta_right, - reduce_with_iter(&os.plonk_zs_right, &mut alpha_powers), - ), - ]); - let zs_numerator = zs_composition_eval - zs_interpol.eval(subgroup_x); + let zs_interpol = interpolate2( + [ + (zeta, alpha.clone().reduce(os.plonk_zs.iter())), + (zeta_right, alpha.reduce(os.plonk_zs_right.iter())), + ], + subgroup_x, + ); + let zs_numerator = zs_composition_eval - zs_interpol; let zs_denominator = (subgroup_x - zeta) * (subgroup_x - zeta_right); + sum = alpha.shift(sum); sum += zs_numerator / zs_denominator; + // Polynomials opened at `x` and `x.frobenius()`, i.e., the wires polynomials. let wire_evals = proof - .unsalted_evals(2, config) + .unsalted_evals(PlonkPolynomials::WIRES) .iter() .map(|&e| F::Extension::from_basefield(e)); - let wire_composition_eval = reduce_with_iter(wire_evals, alpha_powers.clone()); + let wire_composition_eval = alpha.clone().reduce(wire_evals); let zeta_frob = zeta.frobenius(); - let wire_evals_frob = os.wires.iter().map(|e| e.frobenius()); - let wires_interpol = interpolant(&[ - (zeta, reduce_with_iter(&os.wires, alpha_powers.clone())), - (zeta_frob, reduce_with_iter(wire_evals_frob, alpha_powers)), - ]); - let wires_numerator = wire_composition_eval - wires_interpol.eval(subgroup_x); - let wires_denominator = (subgroup_x - zeta) * (subgroup_x - zeta_frob); - sum += wires_numerator / wires_denominator; + let mut alpha_frob = alpha.repeated_frobenius(D - 1); + let wire_eval = alpha.reduce(os.wires.iter()); + // We want to compute `sum a^i*phi(w_i)`, where `phi` denotes the Frobenius automorphism. + // Since `phi^D=id` and `phi` is a field automorphism, we have the following equalities: + // `sum a^i*phi(w_i) = sum phi(phi^(D-1)(a^i)*w_i) = phi(sum phi^(D-1)(a)^i*w_i)` + // So we can compute the original sum using only one call to the `D-1`-repeated Frobenius of alpha, + // and one call at the end of the sum. + let wire_eval_frob = alpha_frob.reduce(os.wires.iter()).frobenius(); + let wire_interpol = interpolate2([(zeta, wire_eval), (zeta_frob, wire_eval_frob)], subgroup_x); + let wire_numerator = wire_composition_eval - wire_interpol; + let wire_denominator = (subgroup_x - zeta) * (subgroup_x - zeta_frob); + sum = alpha.shift(sum); + sum += wire_numerator / wire_denominator; sum } @@ -276,7 +294,7 @@ fn fri_verifier_query_round, const D: usize>( } } domain_size = next_domain_size; - old_x_index = x_index; + old_x_index = x_index & (arity - 1); x_index >>= arity_bits; } diff --git a/src/gadgets/arithmetic.rs b/src/gadgets/arithmetic.rs index a214df3f..2f8b1559 100644 --- a/src/gadgets/arithmetic.rs +++ b/src/gadgets/arithmetic.rs @@ -1,7 +1,11 @@ +use std::convert::TryInto; + use crate::circuit_builder::CircuitBuilder; -use crate::field::extension_field::Extendable; +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::{Extendable, FieldExtension}; use crate::field::field::Field; use crate::gates::arithmetic::ArithmeticGate; +use crate::gates::mul_extension::MulExtensionGate; use crate::generator::SimpleGenerator; use crate::target::Target; use crate::wire::Wire; @@ -169,6 +173,22 @@ impl, const D: usize> CircuitBuilder { product } + // TODO: Optimize this, maybe with a new gate. + /// Exponentiate `base` to the power of `exponent`, where `exponent < 2^num_bits`. + pub fn exp(&mut self, base: Target, exponent: Target, num_bits: usize) -> Target { + let mut current = base; + let one = self.one(); + let mut product = one; + let exponent_bits = self.split_le(exponent, num_bits); + + for bit in exponent_bits.into_iter() { + product = self.mul_many(&[bit, current, product]); + current = self.mul(current, current); + } + + product + } + /// Computes `q = x / y` by witnessing `q` and requiring that `q * y = x`. This can be unsafe in /// some cases, as it allows `0 / 0 = `. pub fn div_unsafe(&mut self, x: Target, y: Target) -> Target { @@ -224,6 +244,38 @@ impl, const D: usize> CircuitBuilder { q } + + /// Computes `q = x / y` by witnessing `q` and requiring that `q * y = x`. This can be unsafe in + /// some cases, as it allows `0 / 0 = `. + pub fn div_unsafe_extension( + &mut self, + x: ExtensionTarget, + y: ExtensionTarget, + ) -> ExtensionTarget { + // Add an `ArithmeticGate` to compute `q * y`. + let gate = self.add_gate(MulExtensionGate::new(), vec![F::ONE]); + + let multiplicand_0 = + Target::wires_from_range(gate, MulExtensionGate::::wires_multiplicand_0()); + let multiplicand_0 = ExtensionTarget(multiplicand_0.try_into().unwrap()); + let multiplicand_1 = + Target::wires_from_range(gate, MulExtensionGate::::wires_multiplicand_1()); + let multiplicand_1 = ExtensionTarget(multiplicand_1.try_into().unwrap()); + let output = Target::wires_from_range(gate, MulExtensionGate::::wires_output()); + let output = ExtensionTarget(output.try_into().unwrap()); + + self.add_generator(QuotientGeneratorExtension { + numerator: x, + denominator: y, + quotient: multiplicand_0, + }); + + self.route_extension(y, multiplicand_1); + + self.assert_equal_extension(output, x); + + multiplicand_0 + } } struct QuotientGenerator { @@ -243,3 +295,136 @@ impl SimpleGenerator for QuotientGenerator { PartialWitness::singleton_target(self.quotient, num / den) } } + +struct QuotientGeneratorExtension { + numerator: ExtensionTarget, + denominator: ExtensionTarget, + quotient: ExtensionTarget, +} + +impl, const D: usize> SimpleGenerator for QuotientGeneratorExtension { + fn dependencies(&self) -> Vec { + let mut deps = self.numerator.to_target_array().to_vec(); + deps.extend(&self.denominator.to_target_array()); + deps + } + + fn run_once(&self, witness: &PartialWitness) -> PartialWitness { + let num = witness.get_extension_target(self.numerator); + let dem = witness.get_extension_target(self.denominator); + let quotient = num / dem; + let mut pw = PartialWitness::new(); + for i in 0..D { + pw.set_target( + self.quotient.to_target_array()[i], + quotient.to_basefield_array()[i], + ); + } + pw + } +} + +/// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`. +#[derive(Clone)] +pub struct PowersTarget { + base: ExtensionTarget, + current: ExtensionTarget, +} + +impl PowersTarget { + pub fn next>( + &mut self, + builder: &mut CircuitBuilder, + ) -> ExtensionTarget { + let result = self.current; + self.current = builder.mul_extension(self.base, self.current); + result + } + + pub fn repeated_frobenius>( + self, + k: usize, + builder: &mut CircuitBuilder, + ) -> Self { + let Self { base, current } = self; + Self { + base: base.repeated_frobenius(k, builder), + current: current.repeated_frobenius(k, builder), + } + } +} + +impl, const D: usize> CircuitBuilder { + pub fn powers(&mut self, base: ExtensionTarget) -> PowersTarget { + PowersTarget { + base, + current: self.one_extension(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::circuit_builder::CircuitBuilder; + use crate::circuit_data::CircuitConfig; + use crate::field::crandall_field::CrandallField; + use crate::field::extension_field::quartic::QuarticCrandallField; + use crate::field::field::Field; + use crate::fri::FriConfig; + use crate::gates::arithmetic::ArithmeticGate; + use crate::target::Target; + use crate::witness::PartialWitness; + + #[test] + fn test_div() { + type F = CrandallField; + type FF = QuarticCrandallField; + const D: usize = 4; + + let config = CircuitConfig::large_config(); + + let mut builder = CircuitBuilder::::new(config); + + let x = F::rand(); + let y = F::rand(); + let mut pw = PartialWitness::new(); + /// Computes x*x + 0*y = x^2. + let square_gate = builder.add_gate(ArithmeticGate::new(), vec![F::ONE, F::ZERO]); + pw.set_target(Target::wire(square_gate, 0), x); + pw.set_target(Target::wire(square_gate, 1), x); + let x2t = Target::wire(square_gate, ArithmeticGate::WIRE_OUTPUT); + let yt = Target::wire(square_gate, ArithmeticGate::WIRE_ADDEND); + pw.set_target(yt, y); + // Constant for x*x/y. + let zt = builder.constant(x * x / y); + // Computed division for x*x/y using the division gadget. + let comp_zt = builder.div_unsafe(x2t, yt); + builder.assert_equal(zt, comp_zt); + + let data = builder.build(); + let proof = data.prove(pw); + } + + #[test] + fn test_div_extension() { + type F = CrandallField; + type FF = QuarticCrandallField; + const D: usize = 4; + + let config = CircuitConfig::large_config(); + + let mut builder = CircuitBuilder::::new(config); + + let x = FF::rand(); + let y = FF::rand(); + let z = x / y; + let xt = builder.constant_extension(x); + let yt = builder.constant_extension(y); + let zt = builder.constant_extension(z); + let comp_zt = builder.div_unsafe_extension(xt, yt); + builder.assert_equal_extension(zt, comp_zt); + + let data = builder.build(); + let proof = data.prove(PartialWitness::new()); + } +} diff --git a/src/gadgets/insert.rs b/src/gadgets/insert.rs new file mode 100644 index 00000000..64cf7299 --- /dev/null +++ b/src/gadgets/insert.rs @@ -0,0 +1,71 @@ +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::Extendable; +use crate::target::Target; + +impl, const D: usize> CircuitBuilder { + /// Inserts a `Target` in a vector at a non-deterministic index. This is done by rotating to the + /// left, inserting at 0 and then rotating to the right. + /// Note: `index` is not range-checked. + pub fn insert( + &mut self, + index: Target, + element: ExtensionTarget, + v: Vec>, + ) -> Vec> { + let mut v = self.rotate_left(index, &v); + v.insert(0, element); + self.rotate_right(index, &v) + } +} +#[cfg(test)] +mod tests { + use super::*; + use crate::circuit_data::CircuitConfig; + use crate::field::crandall_field::CrandallField; + use crate::field::extension_field::quartic::QuarticCrandallField; + use crate::field::field::Field; + use crate::witness::PartialWitness; + + fn real_insert( + index: usize, + element: ExtensionTarget, + v: &[ExtensionTarget], + ) -> Vec> { + let mut res = v.to_vec(); + res.insert(index, element); + res + } + + fn test_insert_given_len(len_log: usize) { + type F = CrandallField; + type FF = QuarticCrandallField; + let len = 1 << len_log; + let config = CircuitConfig::large_config(); + let mut builder = CircuitBuilder::::new(config); + let v = (0..len - 1) + .map(|_| builder.constant_extension(FF::rand())) + .collect::>(); + + for i in 0..len { + let it = builder.constant(F::from_canonical_usize(i)); + let elem = builder.constant_extension(FF::rand()); + let inserted = real_insert(i, elem, &v); + let purported_inserted = builder.insert(it, elem, v.clone()); + + for (x, y) in inserted.into_iter().zip(purported_inserted) { + builder.route_extension(x, y); + } + } + + let data = builder.build(); + let proof = data.prove(PartialWitness::new()); + } + + #[test] + fn test_insert() { + for len_log in 1..3 { + test_insert_given_len(len_log); + } + } +} diff --git a/src/gadgets/interpolation.rs b/src/gadgets/interpolation.rs new file mode 100644 index 00000000..40b91c1e --- /dev/null +++ b/src/gadgets/interpolation.rs @@ -0,0 +1,142 @@ +use std::marker::PhantomData; + +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::Extendable; +use crate::gates::interpolation::InterpolationGate; +use crate::target::Target; + +impl, const D: usize> CircuitBuilder { + /// Interpolate two points. No need for an `InterpolationGate` since the coefficients + /// of the linear interpolation polynomial can be easily computed with arithmetic operations. + pub fn interpolate2( + &mut self, + interpolation_points: [(ExtensionTarget, ExtensionTarget); 2], + evaluation_point: ExtensionTarget, + ) -> ExtensionTarget { + // a0 -> a1 + // b0 -> b1 + // x -> a1 + (x-a0)*(b1-a1)/(b0-a0) + + let x_m_a0 = self.sub_extension(evaluation_point, interpolation_points[0].0); + let b1_m_a1 = self.sub_extension(interpolation_points[1].1, interpolation_points[0].1); + let b0_m_a0 = self.sub_extension(interpolation_points[1].0, interpolation_points[0].0); + let quotient = self.div_unsafe_extension(b1_m_a1, b0_m_a0); + + self.mul_add_extension(x_m_a0, quotient, interpolation_points[0].1) + } + + /// Interpolate a list of point/evaluation pairs at a given point. + /// Returns the evaluation of the interpolated polynomial at `evaluation_point`. + pub fn interpolate( + &mut self, + interpolation_points: &[(Target, ExtensionTarget)], + evaluation_point: ExtensionTarget, + ) -> ExtensionTarget { + let gate = InterpolationGate:: { + num_points: interpolation_points.len(), + _phantom: PhantomData, + }; + let gate_index = + self.add_gate_no_constants(InterpolationGate::new(interpolation_points.len())); + for (i, &(p, v)) in interpolation_points.iter().enumerate() { + self.route(p, Target::wire(gate_index, gate.wire_point(i))); + self.route_extension( + v, + ExtensionTarget::from_range(gate_index, gate.wires_value(i)), + ); + } + self.route_extension( + evaluation_point, + ExtensionTarget::from_range(gate_index, gate.wires_evaluation_point()), + ); + + ExtensionTarget::from_range(gate_index, gate.wires_evaluation_value()) + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use super::*; + use crate::circuit_data::CircuitConfig; + use crate::field::crandall_field::CrandallField; + use crate::field::extension_field::quartic::QuarticCrandallField; + use crate::field::extension_field::FieldExtension; + use crate::field::field::Field; + use crate::field::interpolation::{interpolant, interpolate}; + use crate::witness::PartialWitness; + + #[test] + fn test_interpolate() { + type F = CrandallField; + type FF = QuarticCrandallField; + let config = CircuitConfig { + num_routed_wires: 18, + ..CircuitConfig::large_config() + }; + let mut builder = CircuitBuilder::::new(config); + + let len = 2; + let points = (0..len) + .map(|_| (F::rand(), FF::rand())) + .collect::>(); + + let homogeneous_points = points + .iter() + .map(|&(a, b)| (>::from_basefield(a), b)) + .collect::>(); + + let true_interpolant = interpolant(&homogeneous_points); + + let z = FF::rand(); + let true_eval = true_interpolant.eval(z); + + let points_target = points + .iter() + .map(|&(p, v)| (builder.constant(p), builder.constant_extension(v))) + .collect::>(); + + let zt = builder.constant_extension(z); + + let eval = builder.interpolate(&points_target, zt); + let true_eval_target = builder.constant_extension(true_eval); + builder.assert_equal_extension(eval, true_eval_target); + + let data = builder.build(); + let proof = data.prove(PartialWitness::new()); + } + + #[test] + fn test_interpolate2() { + type F = CrandallField; + type FF = QuarticCrandallField; + let config = CircuitConfig::large_config(); + let mut builder = CircuitBuilder::::new(config); + + let len = 2; + let points = (0..len) + .map(|_| (FF::rand(), FF::rand())) + .collect::>(); + + let true_interpolant = interpolant(&points); + + let z = FF::rand(); + let true_eval = true_interpolant.eval(z); + + let points_target = points + .iter() + .map(|&(p, v)| (builder.constant_extension(p), builder.constant_extension(v))) + .collect::>(); + + let zt = builder.constant_extension(z); + + let eval = builder.interpolate2(points_target.try_into().unwrap(), zt); + let true_eval_target = builder.constant_extension(true_eval); + builder.assert_equal_extension(eval, true_eval_target); + + let data = builder.build(); + let proof = data.prove(PartialWitness::new()); + } +} diff --git a/src/gadgets/mod.rs b/src/gadgets/mod.rs index 9a6a728e..a1e041fc 100644 --- a/src/gadgets/mod.rs +++ b/src/gadgets/mod.rs @@ -1,4 +1,9 @@ pub mod arithmetic; pub mod hash; +pub mod insert; +pub mod interpolation; pub mod polynomial; +pub mod range_check; +pub mod rotate; +pub mod split_base; pub(crate) mod split_join; diff --git a/src/gadgets/polynomial.rs b/src/gadgets/polynomial.rs index 543be834..07bc1952 100644 --- a/src/gadgets/polynomial.rs +++ b/src/gadgets/polynomial.rs @@ -6,6 +6,10 @@ use crate::target::Target; pub struct PolynomialCoeffsExtTarget(pub Vec>); impl PolynomialCoeffsExtTarget { + pub fn len(&self) -> usize { + self.0.len() + } + pub fn eval_scalar>( &self, builder: &mut CircuitBuilder, diff --git a/src/gadgets/range_check.rs b/src/gadgets/range_check.rs new file mode 100644 index 00000000..7fd35efc --- /dev/null +++ b/src/gadgets/range_check.rs @@ -0,0 +1,63 @@ +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::Extendable; +use crate::field::field::Field; +use crate::gates::base_sum::BaseSumGate; +use crate::generator::SimpleGenerator; +use crate::target::Target; +use crate::witness::PartialWitness; + +impl, const D: usize> CircuitBuilder { + /// Checks that `x < 2^n_log` using a `BaseSumGate`. + pub fn range_check(&mut self, x: Target, n_log: usize) { + let gate = self.add_gate(BaseSumGate::<2>::new(n_log), vec![]); + let sum = Target::wire(gate, BaseSumGate::<2>::WIRE_SUM); + self.route(x, sum); + } + + /// Returns `(a,b)` such that `x = a + 2^n_log * b` with `a < 2^n_log`. + /// `x` is assumed to be range-checked for having `num_bits` bits. + pub fn split_low_high(&mut self, x: Target, n_log: usize, num_bits: usize) -> (Target, Target) { + let low_gate = self.add_gate(BaseSumGate::<2>::new(n_log), vec![]); + let high_gate = self.add_gate(BaseSumGate::<2>::new(num_bits - n_log), vec![]); + let low = Target::wire(low_gate, BaseSumGate::<2>::WIRE_SUM); + let high = Target::wire(high_gate, BaseSumGate::<2>::WIRE_SUM); + self.add_generator(LowHighGenerator { + integer: x, + n_log, + low, + high, + }); + + let pow2 = self.constant(F::from_canonical_u64(1 << n_log)); + let comp_x = self.mul_add(high, pow2, low); + self.assert_equal(x, comp_x); + + (low, high) + } +} + +#[derive(Debug)] +struct LowHighGenerator { + integer: Target, + n_log: usize, + low: Target, + high: Target, +} + +impl SimpleGenerator for LowHighGenerator { + fn dependencies(&self) -> Vec { + vec![self.integer] + } + + fn run_once(&self, witness: &PartialWitness) -> PartialWitness { + let integer_value = witness.get_target(self.integer).to_canonical_u64(); + let low = integer_value & ((1 << self.n_log) - 1); + let high = integer_value >> self.n_log; + + let mut result = PartialWitness::new(); + result.set_target(self.low, F::from_canonical_u64(low)); + result.set_target(self.high, F::from_canonical_u64(high)); + + result + } +} diff --git a/src/gadgets/rotate.rs b/src/gadgets/rotate.rs new file mode 100644 index 00000000..bd39a36b --- /dev/null +++ b/src/gadgets/rotate.rs @@ -0,0 +1,161 @@ +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::Extendable; +use crate::target::Target; +use crate::util::log2_ceil; + +impl, const D: usize> CircuitBuilder { + /// Selects `x` or `y` based on `b`, which is assumed to be binary. + /// In particular, this returns `if b { x } else { y }`. + /// Note: This does not range-check `b`. + // TODO: This uses 10 gates per call. If addends are added to `MulExtensionGate`, this will be + // reduced to 2 gates. We could also use a new degree 2 `SelectGate` for this. + // If `num_routed_wire` is larger than 26, we could batch two `select` in one gate. + pub fn select( + &mut self, + b: Target, + x: ExtensionTarget, + y: ExtensionTarget, + ) -> ExtensionTarget { + let b_y_minus_y = self.scalar_mul_sub_extension(b, y, y); + self.scalar_mul_sub_extension(b, x, b_y_minus_y) + } + + /// Left-rotates an array `k` times if `b=1` else return the same array. + pub fn rotate_left_fixed( + &mut self, + b: Target, + k: usize, + v: &[ExtensionTarget], + ) -> Vec> { + let len = v.len(); + debug_assert!(k < len, "Trying to rotate by more than the vector length."); + let mut res = Vec::new(); + + for i in 0..len { + res.push(self.select(b, v[(i + k) % len], v[i])); + } + + res + } + + /// Left-rotates an array `k` times if `b=1` else return the same array. + pub fn rotate_right_fixed( + &mut self, + b: Target, + k: usize, + v: &[ExtensionTarget], + ) -> Vec> { + let len = v.len(); + debug_assert!(k < len, "Trying to rotate by more than the vector length."); + let mut res = Vec::new(); + + for i in 0..len { + res.push(self.select(b, v[(len + i - k) % len], v[i])); + } + + res + } + + /// Left-rotates an vector by the `Target` having bits given in little-endian by `num_rotation_bits`. + pub fn rotate_left_from_bits( + &mut self, + num_rotation_bits: &[Target], + v: &[ExtensionTarget], + ) -> Vec> { + let mut v = v.to_vec(); + + for i in 0..num_rotation_bits.len() { + v = self.rotate_left_fixed(num_rotation_bits[i], 1 << i, &v); + } + + v + } + + pub fn rotate_right_from_bits( + &mut self, + num_rotation_bits: &[Target], + v: &[ExtensionTarget], + ) -> Vec> { + let mut v = v.to_vec(); + + for i in 0..num_rotation_bits.len() { + v = self.rotate_right_fixed(num_rotation_bits[i], 1 << i, &v); + } + + v + } + + /// Left-rotates an array by `num_rotation`. Assumes that `num_rotation` is range-checked to be + /// less than `2^len_bits`. + pub fn rotate_left( + &mut self, + num_rotation: Target, + v: &[ExtensionTarget], + ) -> Vec> { + let len_bits = log2_ceil(v.len()); + let bits = self.split_le(num_rotation, len_bits); + + self.rotate_left_from_bits(&bits, v) + } + + pub fn rotate_right( + &mut self, + num_rotation: Target, + v: &[ExtensionTarget], + ) -> Vec> { + let len_bits = log2_ceil(v.len()); + let bits = self.split_le(num_rotation, len_bits); + + self.rotate_right_from_bits(&bits, v) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::circuit_data::CircuitConfig; + use crate::field::crandall_field::CrandallField; + use crate::field::extension_field::quartic::QuarticCrandallField; + use crate::field::field::Field; + use crate::witness::PartialWitness; + + fn real_rotate( + num_rotation: usize, + v: &[ExtensionTarget], + ) -> Vec> { + let mut res = v.to_vec(); + res.rotate_left(num_rotation); + res + } + + fn test_rotate_given_len(len: usize) { + type F = CrandallField; + type FF = QuarticCrandallField; + let config = CircuitConfig::large_config(); + let mut builder = CircuitBuilder::::new(config); + let v = (0..len) + .map(|_| builder.constant_extension(FF::rand())) + .collect::>(); + + for i in 0..len { + let it = builder.constant(F::from_canonical_usize(i)); + let rotated = real_rotate(i, &v); + let purported_rotated = builder.rotate_left(it, &v); + + for (x, y) in rotated.into_iter().zip(purported_rotated) { + builder.assert_equal_extension(x, y); + } + } + + let data = builder.build(); + let proof = data.prove(PartialWitness::new()); + } + + #[test] + fn test_rotate() { + for len in 1..5 { + test_rotate_given_len(len); + } + } +} diff --git a/src/gadgets/split_base.rs b/src/gadgets/split_base.rs new file mode 100644 index 00000000..37f88409 --- /dev/null +++ b/src/gadgets/split_base.rs @@ -0,0 +1,71 @@ +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::Extendable; +use crate::gates::base_sum::BaseSumGate; +use crate::target::Target; + +impl, const D: usize> CircuitBuilder { + /// Split the given element into a list of targets, where each one represents a + /// base-B limb of the element, with little-endian ordering. + pub(crate) fn split_le_base( + &mut self, + x: Target, + num_limbs: usize, + ) -> Vec { + let gate = self.add_gate(BaseSumGate::::new(num_limbs), vec![]); + let sum = Target::wire(gate, BaseSumGate::::WIRE_SUM); + self.route(x, sum); + + Target::wires_from_range( + gate, + BaseSumGate::::START_LIMBS..BaseSumGate::::START_LIMBS + num_limbs, + ) + } + + /// Asserts that `x`'s big-endian bit representation has at least `leading_zeros` leading zeros. + pub(crate) fn assert_leading_zeros(&mut self, x: Target, leading_zeros: u32) { + self.range_check(x, (64 - leading_zeros) as usize); + } + + pub(crate) fn reverse_limbs(&mut self, x: Target, num_limbs: usize) -> Target { + let gate = self.add_gate(BaseSumGate::::new(num_limbs), vec![]); + let sum = Target::wire(gate, BaseSumGate::::WIRE_SUM); + self.route(x, sum); + + Target::wire(gate, BaseSumGate::::WIRE_REVERSED_SUM) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::circuit_data::CircuitConfig; + use crate::field::crandall_field::CrandallField; + use crate::field::field::Field; + use crate::witness::PartialWitness; + + #[test] + fn test_split_base() { + type F = CrandallField; + let config = CircuitConfig::large_config(); + let mut builder = CircuitBuilder::::new(config); + let x = F::from_canonical_usize(0b110100000); // 416 = 1532 in base 6. + let xt = builder.constant(x); + let limbs = builder.split_le_base::<6>(xt, 24); + let one = builder.one(); + let two = builder.two(); + let three = builder.constant(F::from_canonical_u64(3)); + let five = builder.constant(F::from_canonical_u64(5)); + builder.route(limbs[0], two); + builder.route(limbs[1], three); + builder.route(limbs[2], five); + builder.route(limbs[3], one); + let rev = builder.constant(F::from_canonical_u64(11)); + let revt = builder.reverse_limbs::<2>(xt, 9); + builder.route(revt, rev); + + builder.assert_leading_zeros(xt, 64 - 9); + let data = builder.build(); + + let proof = data.prove(PartialWitness::new()); + } +} diff --git a/src/gadgets/split_join.rs b/src/gadgets/split_join.rs index e65198ec..5eb60148 100644 --- a/src/gadgets/split_join.rs +++ b/src/gadgets/split_join.rs @@ -1,8 +1,10 @@ use crate::circuit_builder::CircuitBuilder; use crate::field::extension_field::Extendable; use crate::field::field::Field; +use crate::gates::base_sum::BaseSumGate; use crate::generator::{SimpleGenerator, WitnessGenerator}; use crate::target::Target; +use crate::util::ceil_div_usize; use crate::wire::Wire; use crate::witness::PartialWitness; @@ -21,6 +23,53 @@ impl, const D: usize> CircuitBuilder { }); bit_targets } + + /// Split the given integer into a list of wires, where each one represents a + /// bit of the integer, with little-endian ordering. + /// Verifies that the decomposition is correct by using `k` `BaseSum<2>` gates + /// with `k` such that `k*num_routed_wires>=num_bits`. + pub(crate) fn split_le(&mut self, integer: Target, num_bits: usize) -> Vec { + if num_bits == 0 { + return Vec::new(); + } + let bits_per_gate = self.config.num_routed_wires - BaseSumGate::<2>::START_LIMBS; + let k = ceil_div_usize(num_bits, bits_per_gate); + let gates = (0..k) + .map(|_| self.add_gate_no_constants(BaseSumGate::<2>::new(bits_per_gate))) + .collect::>(); + + let mut bits = Vec::with_capacity(num_bits); + for &gate in &gates { + bits.extend(Target::wires_from_range( + gate, + BaseSumGate::<2>::START_LIMBS..BaseSumGate::<2>::START_LIMBS + bits_per_gate, + )); + } + bits.drain(num_bits..); + + let zero = self.zero(); + let one = self.one(); + let mut acc = zero; + for &gate in gates.iter().rev() { + let sum = Target::wire(gate, BaseSumGate::<2>::WIRE_SUM); + acc = self.arithmetic( + F::from_canonical_usize(1 << bits_per_gate), + acc, + one, + F::ONE, + sum, + ); + } + self.assert_equal(acc, integer); + + self.add_generator(WireSplitGenerator { + integer, + gates, + num_limbs: bits_per_gate, + }); + + bits + } } /// Generator for a little-endian split. @@ -79,3 +128,39 @@ impl SimpleGenerator for SplitGenerator { result } } + +#[derive(Debug)] +struct WireSplitGenerator { + integer: Target, + gates: Vec, + num_limbs: usize, +} + +impl SimpleGenerator for WireSplitGenerator { + fn dependencies(&self) -> Vec { + vec![self.integer] + } + + fn run_once(&self, witness: &PartialWitness) -> PartialWitness { + let mut integer_value = witness.get_target(self.integer).to_canonical_u64(); + + let mut result = PartialWitness::new(); + for &gate in &self.gates { + let sum = Target::wire(gate, BaseSumGate::<2>::WIRE_SUM); + result.set_target( + sum, + F::from_canonical_u64(integer_value & ((1 << self.num_limbs) - 1)), + ); + integer_value >>= self.num_limbs; + } + + debug_assert_eq!( + integer_value, + 0, + "Integer too large to fit in {} many `BaseSumGate`s", + self.gates.len() + ); + + result + } +} diff --git a/src/gates/base_sum.rs b/src/gates/base_sum.rs new file mode 100644 index 00000000..e34aa6c1 --- /dev/null +++ b/src/gates/base_sum.rs @@ -0,0 +1,182 @@ +use std::ops::Range; + +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::Extendable; +use crate::field::field::Field; +use crate::gates::gate::{Gate, GateRef}; +use crate::generator::{SimpleGenerator, WitnessGenerator}; +use crate::plonk_common::{reduce_with_powers, reduce_with_powers_recursive}; +use crate::target::Target; +use crate::vars::{EvaluationTargets, EvaluationVars}; +use crate::witness::PartialWitness; + +/// A gate which can decompose a number into base B little-endian limbs, +/// and compute the limb-reversed (i.e. big-endian) sum. +#[derive(Debug)] +pub struct BaseSumGate { + num_limbs: usize, +} + +impl BaseSumGate { + pub fn new, const D: usize>(num_limbs: usize) -> GateRef { + GateRef::new(BaseSumGate:: { num_limbs }) + } + + pub const WIRE_SUM: usize = 0; + pub const WIRE_REVERSED_SUM: usize = 1; + pub const START_LIMBS: usize = 2; + + /// Returns the index of the `i`th limb wire. + pub fn limbs(&self) -> Range { + Self::START_LIMBS..Self::START_LIMBS + self.num_limbs + } +} + +impl, const D: usize, const B: usize> Gate for BaseSumGate { + fn id(&self) -> String { + format!("{:?} + Base: {}", self, B) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let sum = vars.local_wires[Self::WIRE_SUM]; + let reversed_sum = vars.local_wires[Self::WIRE_REVERSED_SUM]; + let mut limbs = vars.local_wires[self.limbs()].to_vec(); + let computed_sum = reduce_with_powers(&limbs, F::Extension::from_canonical_usize(B)); + limbs.reverse(); + let computed_reversed_sum = + reduce_with_powers(&limbs, F::Extension::from_canonical_usize(B)); + let mut constraints = vec![computed_sum - sum, computed_reversed_sum - reversed_sum]; + for limb in limbs { + constraints.push( + (0..B) + .map(|i| limb - F::Extension::from_canonical_usize(i)) + .product(), + ); + } + constraints + } + + fn eval_unfiltered_recursively( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let base = builder.constant(F::from_canonical_usize(B)); + let sum = vars.local_wires[Self::WIRE_SUM]; + let reversed_sum = vars.local_wires[Self::WIRE_REVERSED_SUM]; + let mut limbs = vars.local_wires[self.limbs()].to_vec(); + let computed_sum = reduce_with_powers_recursive(builder, &limbs, base); + limbs.reverse(); + let computed_reversed_sum = reduce_with_powers_recursive(builder, &limbs, base); + let mut constraints = vec![ + builder.sub_extension(computed_sum, sum), + builder.sub_extension(computed_reversed_sum, reversed_sum), + ]; + for limb in limbs { + constraints.push({ + let mut acc = builder.one_extension(); + (0..B).for_each(|i| { + let it = builder.constant_extension(F::from_canonical_usize(i).into()); + let diff = builder.sub_extension(limb, it); + acc = builder.mul_extension(acc, diff); + }); + acc + }); + } + constraints + } + + fn generators( + &self, + gate_index: usize, + _local_constants: &[F], + ) -> Vec>> { + let gen = BaseSplitGenerator:: { + gate_index, + num_limbs: self.num_limbs, + }; + vec![Box::new(gen)] + } + + // 2 for the sum and reversed sum, then `num_limbs` for the limbs. + fn num_wires(&self) -> usize { + self.num_limbs + 2 + } + + fn num_constants(&self) -> usize { + 0 + } + + // Bounded by the range-check (x-0)*(x-1)*...*(x-B+1). + fn degree(&self) -> usize { + B + } + + // 2 for checking the sum and reversed sum, then `num_limbs` for range-checking the limbs. + fn num_constraints(&self) -> usize { + 2 + self.num_limbs + } +} + +#[derive(Debug)] +pub struct BaseSplitGenerator { + gate_index: usize, + num_limbs: usize, +} + +impl SimpleGenerator for BaseSplitGenerator { + fn dependencies(&self) -> Vec { + vec![Target::wire(self.gate_index, BaseSumGate::::WIRE_SUM)] + } + + fn run_once(&self, witness: &PartialWitness) -> PartialWitness { + let sum_value = witness + .get_target(Target::wire(self.gate_index, BaseSumGate::::WIRE_SUM)) + .to_canonical_u64() as usize; + debug_assert_eq!( + (0..self.num_limbs).fold(sum_value, |acc, _| acc / B), + 0, + "Integer too large to fit in given number of limbs" + ); + + let limbs = (BaseSumGate::::START_LIMBS..BaseSumGate::::START_LIMBS + self.num_limbs) + .map(|i| Target::wire(self.gate_index, i)); + let limbs_value = (0..self.num_limbs) + .scan(sum_value, |acc, _| { + let tmp = *acc % B; + *acc /= B; + Some(F::from_canonical_usize(tmp)) + }) + .collect::>(); + + let b_field = F::from_canonical_usize(B); + let reversed_sum = limbs_value + .iter() + .fold(F::ZERO, |acc, &x| acc * b_field + x); + + let mut result = PartialWitness::new(); + result.set_target( + Target::wire(self.gate_index, BaseSumGate::::WIRE_REVERSED_SUM), + reversed_sum, + ); + for (b, b_value) in limbs.zip(limbs_value) { + result.set_target(b, b_value); + } + + result + } +} + +#[cfg(test)] +mod tests { + use crate::circuit_data::CircuitConfig; + use crate::field::crandall_field::CrandallField; + use crate::gates::base_sum::BaseSumGate; + use crate::gates::gate_testing::test_low_degree; + + #[test] + fn low_degree() { + test_low_degree(BaseSumGate::<6>::new::(11)) + } +} diff --git a/src/gates/gate.rs b/src/gates/gate.rs index 1765191e..4b37892c 100644 --- a/src/gates/gate.rs +++ b/src/gates/gate.rs @@ -1,9 +1,12 @@ +use std::fmt::{Debug, Error, Formatter}; use std::hash::{Hash, Hasher}; use std::sync::Arc; use crate::circuit_builder::CircuitBuilder; use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::{Extendable, FieldExtension}; +use crate::field::field::Field; +use crate::gates::gate_tree::Tree; use crate::generator::WitnessGenerator; use crate::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; @@ -51,15 +54,23 @@ pub trait Gate, const D: usize>: 'static + Send + Sync { vars: EvaluationTargets, ) -> Vec>; - fn eval_filtered(&self, vars: EvaluationVars) -> Vec { - // TODO: Filter + fn eval_filtered(&self, mut vars: EvaluationVars, prefix: &[bool]) -> Vec { + let filter = compute_filter(prefix, vars.local_constants); + vars.remove_prefix(prefix); self.eval_unfiltered(vars) + .into_iter() + .map(|c| filter * c) + .collect() } /// Like `eval_filtered`, but specialized for points in the base field. - fn eval_filtered_base(&self, vars: EvaluationVarsBase) -> Vec { - // TODO: Filter + fn eval_filtered_base(&self, mut vars: EvaluationVarsBase, prefix: &[bool]) -> Vec { + let filter = compute_filter(prefix, vars.local_constants); + vars.remove_prefix(prefix); self.eval_unfiltered_base(vars) + .into_iter() + .map(|c| c * filter) + .collect() } fn eval_filtered_recursively( @@ -113,8 +124,46 @@ impl, const D: usize> Hash for GateRef { impl, const D: usize> Eq for GateRef {} +impl, const D: usize> Debug for GateRef { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { + write!(f, "{}", self.0.id()) + } +} + /// A gate along with any constants used to configure it. pub struct GateInstance, const D: usize> { pub gate_type: GateRef, pub constants: Vec, } + +/// Map each gate to a boolean prefix used to construct the gate's selector polynomial. +#[derive(Debug, Clone)] +pub struct PrefixedGate, const D: usize> { + pub gate: GateRef, + pub prefix: Vec, +} + +impl, const D: usize> PrefixedGate { + pub fn from_tree(tree: Tree>) -> Vec { + tree.traversal() + .into_iter() + .map(|(gate, prefix)| PrefixedGate { gate, prefix }) + .collect() + } +} + +/// A gate's filter is computed as `prod b_i*c_i + (1-b_i)*(1-c_i)`, with `(b_i)` the prefix and +/// `(c_i)` the local constants, which is one if the prefix of `constants` matches `prefix`. +fn compute_filter(prefix: &[bool], constants: &[K]) -> K { + prefix + .iter() + .enumerate() + .map(|(i, &b)| { + if b { + constants[i] + } else { + K::ONE - constants[i] + } + }) + .product() +} diff --git a/src/gates/gate_tree.rs b/src/gates/gate_tree.rs new file mode 100644 index 00000000..1423de76 --- /dev/null +++ b/src/gates/gate_tree.rs @@ -0,0 +1,257 @@ +use log::info; + +use crate::field::extension_field::Extendable; +use crate::gates::gate::GateRef; + +/// A binary tree where leaves hold some type `T` and other nodes are empty. +#[derive(Debug, Clone)] +pub enum Tree { + Leaf(T), + Bifurcation(Option>>, Option>>), +} + +impl Default for Tree { + fn default() -> Self { + Self::Bifurcation(None, None) + } +} + +impl Tree { + /// Traverse a tree using a depth-first traversal and collect data and position for each leaf. + /// A leaf's position is represented by its left/right path, where `false` means left and `true` means right. + pub fn traversal(&self) -> Vec<(T, Vec)> { + let mut res = Vec::new(); + let prefix = []; + self.traverse(&prefix, &mut res); + res + } + + /// Utility function to traverse the tree. + fn traverse(&self, prefix: &[bool], current: &mut Vec<(T, Vec)>) { + match &self { + // If node is a leaf, collect the data and position. + Tree::Leaf(t) => { + current.push((t.clone(), prefix.to_vec())); + } + // Otherwise, traverse the left subtree and then the right subtree. + Tree::Bifurcation(left, right) => { + if let Some(l) = left { + let mut left_prefix = prefix.to_vec(); + left_prefix.push(false); + l.traverse(&left_prefix, current); + } + if let Some(r) = right { + let mut right_prefix = prefix.to_vec(); + right_prefix.push(true); + r.traverse(&right_prefix, current); + } + } + } + } +} + +impl, const D: usize> Tree> { + /// The binary gate tree influences the degree `D` of the constraint polynomial and the number `C` + /// of constant wires in the circuit. We want to construct a tree minimizing both values. To do so + /// we iterate over possible values of `(D, C)` and try to construct a tree with these values. + /// For this construction, we use the greedy algorithm in `Self::find_tree`. + /// This latter function greedily adds gates at the depth where + /// `filtered_deg(gate)=D, constant_wires(gate)=C` to ensure no space is wasted. + /// We return the first tree found in this manner. + pub fn from_gates(mut gates: Vec>) -> Self { + let timer = std::time::Instant::now(); + gates.sort_unstable_by_key(|g| (-(g.0.degree() as isize), -(g.0.num_constants() as isize))); + + for max_degree_bits in 1..10 { + // The constraint polynomials are padded to the next power in `compute_vanishig_polys`. + // So we can restrict our search space by setting `max_degree` to a power of 2. + let max_degree = 1 << max_degree_bits; + for max_constants in 1..100 { + if let Some(mut tree) = Self::find_tree(&gates, max_degree, max_constants) { + tree.shorten(); + info!( + "Found tree with max degree {} in {}s.", + max_degree, + timer.elapsed().as_secs_f32() + ); + return tree; + } + } + } + + panic!("Can't find a tree.") + } + + /// Greedily add gates wherever possible. Returns `None` if this fails. + fn find_tree(gates: &[GateRef], max_degree: usize, max_constants: usize) -> Option { + let mut tree = Tree::default(); + + for g in gates { + tree.try_add_gate(g, max_degree, max_constants)?; + } + Some(tree) + } + + /// Try to add a gate in the tree. Returns `None` if this fails. + fn try_add_gate( + &mut self, + g: &GateRef, + max_degree: usize, + max_constants: usize, + ) -> Option<()> { + // We want `gate.degree + depth <= max_degree` and `gate.num_constants + depth <= max_wires`. + let depth = max_degree + .checked_sub(g.0.degree())? + .min(max_constants.checked_sub(g.0.num_constants())?); + self.try_add_gate_at_depth(g, depth) + } + + /// Try to add a gate in the tree at a specified depth. Returns `None` if this fails. + fn try_add_gate_at_depth(&mut self, g: &GateRef, depth: usize) -> Option<()> { + // If depth is 0, we have to insert the gate here. + if depth == 0 { + return if let Tree::Bifurcation(None, None) = self { + // Insert the gate as a new leaf. + *self = Tree::Leaf(g.clone()); + Some(()) + } else { + // A leaf is already here. + None + }; + } + + // A leaf is already here so we cannot go deeper. + if let Tree::Leaf(_) = self { + return None; + } + + if let Tree::Bifurcation(left, right) = self { + if let Some(left) = left { + // Try to add the gate to the left if there's already a left subtree. + if left.try_add_gate_at_depth(g, depth - 1).is_some() { + return Some(()); + } + } else { + // Add a new left subtree and try to add the gate to it. + let mut new_left = Tree::default(); + if new_left.try_add_gate_at_depth(g, depth - 1).is_some() { + *left = Some(Box::new(new_left)); + return Some(()); + } + } + if let Some(right) = right { + // Try to add the gate to the right if there's already a right subtree. + if right.try_add_gate_at_depth(g, depth - 1).is_some() { + return Some(()); + } + } else { + // Add a new right subtree and try to add the gate to it. + let mut new_right = Tree::default(); + if new_right.try_add_gate_at_depth(g, depth - 1).is_some() { + *right = Some(Box::new(new_right)); + return Some(()); + } + } + } + + None + } + + /// `Self::find_tree` returns a tree where each gate has `F(gate)=M` (see `Self::from_gates` comment). + /// This can produce subtrees with more nodes than necessary. This function removes useless nodes, + /// i.e., nodes that have a left but no right subtree. + fn shorten(&mut self) { + if let Tree::Bifurcation(left, right) = self { + if let (Some(left), None) = (left, right) { + // If the node has a left but no right subtree, set the node to its (shortened) left subtree. + let mut new = *left.clone(); + new.shorten(); + *self = new; + } + } + if let Tree::Bifurcation(left, right) = self { + if let Some(left) = left { + // Shorten the left subtree if there is one. + left.shorten(); + } + if let Some(right) = right { + // Shorten the right subtree if there is one. + right.shorten(); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::field::crandall_field::CrandallField; + use crate::gates::arithmetic::ArithmeticGate; + use crate::gates::base_sum::BaseSumGate; + use crate::gates::constant::ConstantGate; + use crate::gates::gmimc::GMiMCGate; + use crate::gates::interpolation::InterpolationGate; + use crate::gates::mul_extension::MulExtensionGate; + use crate::gates::noop::NoopGate; + use crate::hash::GMIMC_ROUNDS; + + #[test] + fn test_prefix_generation() { + env_logger::init(); + type F = CrandallField; + const D: usize = 4; + + let gates = vec![ + NoopGate::get::(), + ConstantGate::get(), + ArithmeticGate::new(), + BaseSumGate::<4>::new(4), + GMiMCGate::::with_automatic_constants(), + InterpolationGate::new(4), + MulExtensionGate::new(), + ]; + let len = gates.len(); + + let tree = Tree::from_gates(gates.clone()); + let mut gates_with_prefix = tree.traversal(); + for (g, p) in &gates_with_prefix { + info!( + "\nGate: {}, prefix: {:?}.\n\ + Filtered constraint degree: {}, Num constant wires: {}", + &g.0.id()[..20.min(g.0.id().len())], + p, + g.0.degree() + p.len(), + g.0.num_constants() + p.len() + ); + } + + assert_eq!( + gates_with_prefix.len(), + gates.len(), + "The tree has too much or too little gates." + ); + assert!( + gates + .iter() + .all(|g| gates_with_prefix.iter().map(|(gg, _)| gg).any(|gg| gg == g)), + "Some gates are not in the tree." + ); + assert!( + gates_with_prefix + .iter() + .all(|(g, p)| g.0.degree() + g.0.num_constants() + p.len() <= 8), + "Total degree is larger than 8." + ); + + gates_with_prefix.sort_unstable_by_key(|(g, p)| p.len()); + for i in 0..gates_with_prefix.len() { + for j in i + 1..gates_with_prefix.len() { + assert_ne!( + &gates_with_prefix[i].1, + &gates_with_prefix[j].1[0..gates_with_prefix[i].1.len()], + "Some gates share an overlapping prefix" + ); + } + } + } +} diff --git a/src/gates/gmimc.rs b/src/gates/gmimc.rs index 19042d57..bdfade7c 100644 --- a/src/gates/gmimc.rs +++ b/src/gates/gmimc.rs @@ -323,6 +323,8 @@ mod tests { use crate::gates::gmimc::{GMiMCGate, W}; use crate::generator::generate_partial_witness; use crate::gmimc::gmimc_permute_naive; + use crate::permutation_argument::TargetPartitions; + use crate::target::Target; use crate::wire::Wire; use crate::witness::PartialWitness; diff --git a/src/gates/interpolation.rs b/src/gates/interpolation.rs index c17be3e0..ccf8d57d 100644 --- a/src/gates/interpolation.rs +++ b/src/gates/interpolation.rs @@ -6,7 +6,7 @@ use crate::circuit_builder::CircuitBuilder; use crate::field::extension_field::algebra::PolynomialCoeffsAlgebra; use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::{Extendable, FieldExtension}; -use crate::field::lagrange::interpolant; +use crate::field::interpolation::interpolant; use crate::gadgets::polynomial::PolynomialCoeffsExtAlgebraTarget; use crate::gates::gate::{Gate, GateRef}; use crate::generator::{SimpleGenerator, WitnessGenerator}; @@ -22,8 +22,8 @@ use crate::witness::PartialWitness; /// given point. #[derive(Clone, Debug)] pub(crate) struct InterpolationGate, const D: usize> { - num_points: usize, - _phantom: PhantomData, + pub num_points: usize, + pub _phantom: PhantomData, } impl, const D: usize> InterpolationGate { @@ -355,9 +355,7 @@ mod tests { }; assert!( - gate.eval_unfiltered(vars.clone()) - .iter() - .all(|x| x.is_zero()), + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), "Gate constraints are not satisfied." ); } diff --git a/src/gates/mod.rs b/src/gates/mod.rs index ebcf6e3f..bb8b178b 100644 --- a/src/gates/mod.rs +++ b/src/gates/mod.rs @@ -1,8 +1,11 @@ pub(crate) mod arithmetic; +pub mod base_sum; pub mod constant; pub(crate) mod gate; +pub mod gate_tree; pub mod gmimc; -mod interpolation; +pub mod interpolation; +pub mod mul_extension; pub(crate) mod noop; #[cfg(test)] diff --git a/src/gates/mul_extension.rs b/src/gates/mul_extension.rs new file mode 100644 index 00000000..e378e2b1 --- /dev/null +++ b/src/gates/mul_extension.rs @@ -0,0 +1,145 @@ +use std::ops::Range; + +use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::{Extendable, FieldExtension}; +use crate::gates::gate::{Gate, GateRef}; +use crate::generator::{SimpleGenerator, WitnessGenerator}; +use crate::target::Target; +use crate::vars::{EvaluationTargets, EvaluationVars}; +use crate::wire::Wire; +use crate::witness::PartialWitness; + +/// A gate which can multiply two field extension elements. +/// TODO: Add an addend if `NUM_ROUTED_WIRES` is large enough. +#[derive(Debug)] +pub struct MulExtensionGate; + +impl MulExtensionGate { + pub fn new>() -> GateRef { + GateRef::new(MulExtensionGate) + } + + pub fn wires_multiplicand_0() -> Range { + 0..D + } + pub fn wires_multiplicand_1() -> Range { + D..2 * D + } + pub fn wires_output() -> Range { + 2 * D..3 * D + } +} + +impl, const D: usize> Gate for MulExtensionGate { + fn id(&self) -> String { + format!("{:?}", self) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let const_0 = vars.local_constants[0]; + let multiplicand_0 = vars.get_local_ext_algebra(Self::wires_multiplicand_0()); + let multiplicand_1 = vars.get_local_ext_algebra(Self::wires_multiplicand_1()); + let output = vars.get_local_ext_algebra(Self::wires_output()); + let computed_output = multiplicand_0 * multiplicand_1 * const_0.into(); + (output - computed_output).to_basefield_array().to_vec() + } + + fn eval_unfiltered_recursively( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let const_0 = vars.local_constants[0]; + let multiplicand_0 = vars.get_local_ext_algebra(Self::wires_multiplicand_0()); + let multiplicand_1 = vars.get_local_ext_algebra(Self::wires_multiplicand_1()); + let output = vars.get_local_ext_algebra(Self::wires_output()); + let computed_output = builder.mul_ext_algebra(multiplicand_0, multiplicand_1); + let computed_output = builder.scalar_mul_ext_algebra(const_0, computed_output); + let diff = builder.sub_ext_algebra(output, computed_output); + diff.to_ext_target_array().to_vec() + } + + fn generators( + &self, + gate_index: usize, + local_constants: &[F], + ) -> Vec>> { + let gen = MulExtensionGenerator { + gate_index, + const_0: local_constants[0], + }; + vec![Box::new(gen)] + } + + fn num_wires(&self) -> usize { + 12 + } + + fn num_constants(&self) -> usize { + 1 + } + + fn degree(&self) -> usize { + 3 + } + + fn num_constraints(&self) -> usize { + D + } +} + +struct MulExtensionGenerator, const D: usize> { + gate_index: usize, + const_0: F, +} + +impl, const D: usize> SimpleGenerator for MulExtensionGenerator { + fn dependencies(&self) -> Vec { + MulExtensionGate::::wires_multiplicand_0() + .chain(MulExtensionGate::::wires_multiplicand_1()) + .map(|i| { + Target::Wire(Wire { + gate: self.gate_index, + input: i, + }) + }) + .collect() + } + + fn run_once(&self, witness: &PartialWitness) -> PartialWitness { + let multiplicand_0_target = ExtensionTarget::from_range( + self.gate_index, + MulExtensionGate::::wires_multiplicand_0(), + ); + let multiplicand_0 = witness.get_extension_target(multiplicand_0_target); + + let multiplicand_1_target = ExtensionTarget::from_range( + self.gate_index, + MulExtensionGate::::wires_multiplicand_1(), + ); + let multiplicand_1 = witness.get_extension_target(multiplicand_1_target); + + let output_target = + ExtensionTarget::from_range(self.gate_index, MulExtensionGate::::wires_output()); + + let computed_output = + F::Extension::from_basefield(self.const_0) * multiplicand_0 * multiplicand_1; + + let mut pw = PartialWitness::new(); + pw.set_extension_target(output_target, computed_output); + pw + } +} + +#[cfg(test)] +mod tests { + use crate::field::crandall_field::CrandallField; + use crate::gates::gate_testing::test_low_degree; + use crate::gates::mul_extension::MulExtensionGate; + + #[test] + fn low_degree() { + test_low_degree(MulExtensionGate::<4>::new::()) + } +} diff --git a/src/gates/noop.rs b/src/gates/noop.rs index eddd0361..a12df932 100644 --- a/src/gates/noop.rs +++ b/src/gates/noop.rs @@ -5,7 +5,7 @@ use crate::gates::gate::{Gate, GateRef}; use crate::generator::WitnessGenerator; use crate::vars::{EvaluationTargets, EvaluationVars}; -/// A gate which takes a single constant parameter and outputs that value. +/// A gate which does nothing. pub struct NoopGate; impl NoopGate { diff --git a/src/generator.rs b/src/generator.rs index 53fb0d42..a2b35a53 100644 --- a/src/generator.rs +++ b/src/generator.rs @@ -24,10 +24,7 @@ pub(crate) fn generate_partial_witness( // Build a list of "pending" generators which are queued to be run. Initially, all generators // are queued. - let mut pending_generator_indices = HashSet::new(); - for i in 0..generators.len() { - pending_generator_indices.insert(i); - } + let mut pending_generator_indices: HashSet<_> = (0..generators.len()).collect(); // We also track a list of "expired" generators which have already returned false. let mut expired_generator_indices = HashSet::new(); @@ -58,6 +55,11 @@ pub(crate) fn generate_partial_witness( pending_generator_indices = next_pending_generator_indices; } + assert_eq!( + expired_generator_indices.len(), + generators.len(), + "Some generators weren't run." + ); } /// A generator participates in the generation of the witness. diff --git a/src/merkle_proofs.rs b/src/merkle_proofs.rs index d5ab8a78..7d50f466 100644 --- a/src/merkle_proofs.rs +++ b/src/merkle_proofs.rs @@ -62,7 +62,7 @@ impl, const D: usize> CircuitBuilder { leaf_data: Vec, leaf_index: Target, merkle_root: HashTarget, - proof: MerkleProofTarget, + proof: &MerkleProofTarget, ) { let zero = self.zero(); let height = proof.siblings.len(); @@ -71,7 +71,7 @@ impl, const D: usize> CircuitBuilder { let mut state: HashTarget = self.hash_or_noop(leaf_data); let mut acc_leaf_index = zero; - for (bit, sibling) in purported_index_bits.into_iter().zip(proof.siblings) { + for (bit, &sibling) in purported_index_bits.into_iter().zip(&proof.siblings) { let gate = self .add_gate_no_constants(GMiMCGate::::with_automatic_constants()); diff --git a/src/permutation_argument.rs b/src/permutation_argument.rs index 62ee63d4..54436ecb 100644 --- a/src/permutation_argument.rs +++ b/src/permutation_argument.rs @@ -110,9 +110,9 @@ impl WirePartitions { &self, degree_log: usize, k_is: &[F], + subgroup: &[F], ) -> Vec> { let degree = 1 << degree_log; - let subgroup_generator = F::primitive_root_of_unity(degree_log); let sigma = self.get_sigma_map(degree); sigma @@ -120,7 +120,7 @@ impl WirePartitions { .map(|chunk| { let values = chunk .par_iter() - .map(|&x| k_is[x / degree] * subgroup_generator.exp((x % degree) as u64)) + .map(|&x| k_is[x / degree] * subgroup[x % degree]) .collect::>(); PolynomialValues::new(values) }) diff --git a/src/plonk_challenger.rs b/src/plonk_challenger.rs index fd21ee0d..9af5e590 100644 --- a/src/plonk_challenger.rs +++ b/src/plonk_challenger.rs @@ -1,4 +1,7 @@ +use std::convert::TryInto; + use crate::circuit_builder::CircuitBuilder; +use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::{Extendable, FieldExtension}; use crate::field::field::Field; use crate::hash::{permute, SPONGE_RATE, SPONGE_WIDTH}; @@ -41,9 +44,7 @@ impl Challenger { where F: Extendable, { - for &e in &element.to_basefield_array() { - self.observe_element(e); - } + self.observe_elements(&element.to_basefield_array()); } pub fn observe_elements(&mut self, elements: &[F]) { @@ -177,7 +178,7 @@ impl Default for Challenger { } /// A recursive version of `Challenger`. -pub(crate) struct RecursiveChallenger { +pub struct RecursiveChallenger { sponge_state: [Target; SPONGE_WIDTH], input_buffer: Vec, output_buffer: Vec, @@ -212,6 +213,16 @@ impl RecursiveChallenger { self.observe_elements(&hash.elements) } + pub fn observe_extension_element(&mut self, element: ExtensionTarget) { + self.observe_elements(&element.0); + } + + pub fn observe_extension_elements(&mut self, elements: &[ExtensionTarget]) { + for &element in elements { + self.observe_extension_element(element); + } + } + pub(crate) fn get_challenge, const D: usize>( &mut self, builder: &mut CircuitBuilder, @@ -255,6 +266,27 @@ impl RecursiveChallenger { (0..n).map(|_| self.get_challenge(builder)).collect() } + pub fn get_hash, const D: usize>( + &mut self, + builder: &mut CircuitBuilder, + ) -> HashTarget { + HashTarget { + elements: [ + self.get_challenge(builder), + self.get_challenge(builder), + self.get_challenge(builder), + self.get_challenge(builder), + ], + } + } + + pub fn get_extension_challenge, const D: usize>( + &mut self, + builder: &mut CircuitBuilder, + ) -> ExtensionTarget { + self.get_n_challenges(builder, D).try_into().unwrap() + } + /// Absorb any buffered inputs. After calling this, the input buffer will be empty. fn absorb_buffered_inputs, const D: usize>( &mut self, @@ -289,6 +321,7 @@ mod tests { use crate::field::crandall_field::CrandallField; use crate::field::field::Field; use crate::generator::generate_partial_witness; + use crate::permutation_argument::TargetPartitions; use crate::plonk_challenger::{Challenger, RecursiveChallenger}; use crate::target::Target; use crate::witness::PartialWitness; diff --git a/src/plonk_common.rs b/src/plonk_common.rs index e8c3c995..db304c0a 100644 --- a/src/plonk_common.rs +++ b/src/plonk_common.rs @@ -5,11 +5,58 @@ use crate::circuit_data::CommonCircuitData; use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::Extendable; use crate::field::field::Field; -use crate::gates::gate::GateRef; +use crate::gates::gate::{GateRef, PrefixedGate}; +use crate::polynomial::commitment::SALT_SIZE; use crate::polynomial::polynomial::PolynomialCoeffs; use crate::target::Target; use crate::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; +/// Holds the Merkle tree index and blinding flag of a set of polynomials used in FRI. +#[derive(Debug, Copy, Clone)] +pub struct PolynomialsIndexBlinding { + pub(crate) index: usize, + pub(crate) blinding: bool, +} +impl PolynomialsIndexBlinding { + pub fn salt_size(&self) -> usize { + if self.blinding { + SALT_SIZE + } else { + 0 + } + } +} +/// Holds the indices and blinding flags of the Plonk polynomials. +pub struct PlonkPolynomials; +impl PlonkPolynomials { + pub const CONSTANTS_SIGMAS: PolynomialsIndexBlinding = PolynomialsIndexBlinding { + index: 0, + blinding: false, + }; + pub const WIRES: PolynomialsIndexBlinding = PolynomialsIndexBlinding { + index: 1, + blinding: true, + }; + pub const ZS: PolynomialsIndexBlinding = PolynomialsIndexBlinding { + index: 2, + blinding: true, + }; + pub const QUOTIENT: PolynomialsIndexBlinding = PolynomialsIndexBlinding { + index: 3, + blinding: true, + }; + + pub fn polynomials(i: usize) -> PolynomialsIndexBlinding { + match i { + 0 => Self::CONSTANTS_SIGMAS, + 1 => Self::WIRES, + 2 => Self::ZS, + 3 => Self::QUOTIENT, + _ => panic!("There are only 4 sets of polynomials in Plonk."), + } + } +} + /// Evaluate the vanishing polynomial at `x`. In this context, the vanishing polynomial is a random /// linear combination of gate constraints, plus some other terms relating to the permutation /// argument. All such terms should vanish on `H`. @@ -64,6 +111,7 @@ pub(crate) fn eval_vanishing_poly, const D: usize>( /// Like `eval_vanishing_poly`, but specialized for base field points. pub(crate) fn eval_vanishing_poly_base, const D: usize>( common_data: &CommonCircuitData, + index: usize, x: F, vars: EvaluationVarsBase, local_plonk_zs: &[F], @@ -72,6 +120,7 @@ pub(crate) fn eval_vanishing_poly_base, const D: usize>( betas: &[F], gammas: &[F], alphas: &[F], + z_h_on_coset: &ZeroPolyOnCoset, ) -> Vec { let constraint_terms = evaluate_gate_constraints_base(&common_data.gates, common_data.num_gate_constraints, vars); @@ -84,7 +133,7 @@ pub(crate) fn eval_vanishing_poly_base, const D: usize>( for i in 0..common_data.config.num_challenges { let z_x = local_plonk_zs[i]; let z_gz = next_plonk_zs[i]; - vanishing_z_1_terms.push(eval_l_1(common_data.degree(), x) * (z_x - F::ONE)); + vanishing_z_1_terms.push(z_h_on_coset.eval_l1(index, x) * (z_x - F::ONE)); let mut f_prime = F::ONE; let mut g_prime = F::ONE; @@ -115,13 +164,13 @@ pub(crate) fn eval_vanishing_poly_base, const D: usize>( /// strictly necessary, but it helps performance by ensuring that we allocate a vector with exactly /// the capacity that we need. pub fn evaluate_gate_constraints, const D: usize>( - gates: &[GateRef], + gates: &[PrefixedGate], num_gate_constraints: usize, vars: EvaluationVars, ) -> Vec { let mut constraints = vec![F::Extension::ZERO; num_gate_constraints]; for gate in gates { - let gate_constraints = gate.0.eval_filtered(vars); + let gate_constraints = gate.gate.0.eval_filtered(vars, &gate.prefix); for (i, c) in gate_constraints.into_iter().enumerate() { debug_assert!( i < num_gate_constraints, @@ -134,13 +183,13 @@ pub fn evaluate_gate_constraints, const D: usize>( } pub fn evaluate_gate_constraints_base, const D: usize>( - gates: &[GateRef], + gates: &[PrefixedGate], num_gate_constraints: usize, vars: EvaluationVarsBase, ) -> Vec { let mut constraints = vec![F::ZERO; num_gate_constraints]; for gate in gates { - let gate_constraints = gate.0.eval_filtered_base(vars); + let gate_constraints = gate.gate.0.eval_filtered_base(vars, &gate.prefix); for (i, c) in gate_constraints.into_iter().enumerate() { debug_assert!( i < num_gate_constraints, @@ -174,6 +223,51 @@ pub(crate) fn eval_zero_poly(n: usize, x: F) -> F { x.exp(n as u64) - F::ONE } +/// Precomputations of the evaluation of `Z_H(X) = X^n - 1` on a coset `gK` with `H <= K`. +pub(crate) struct ZeroPolyOnCoset { + /// `n = |H|`. + n: F, + /// `rate = |K|/|H|`. + rate: usize, + /// Holds `g^n * (w^n)^i - 1 = g^n * v^i - 1` for `i in 0..rate`, with `w` a generator of `K` and `v` a + /// `rate`-primitive root of unity. + evals: Vec, + /// Holds the multiplicative inverses of `evals`. + inverses: Vec, +} +impl ZeroPolyOnCoset { + pub fn new(n_log: usize, rate_bits: usize) -> Self { + let g_pow_n = F::coset_shift().exp_power_of_2(n_log); + let evals = F::two_adic_subgroup(rate_bits) + .into_iter() + .map(|x| g_pow_n * x - F::ONE) + .collect::>(); + let inverses = F::batch_multiplicative_inverse(&evals); + Self { + n: F::from_canonical_usize(1 << n_log), + rate: 1 << rate_bits, + evals, + inverses, + } + } + + /// Returns `Z_H(g * w^i)`. + pub fn eval(&self, i: usize) -> F { + self.evals[i % self.rate] + } + + /// Returns `1 / Z_H(g * w^i)`. + pub fn eval_inverse(&self, i: usize) -> F { + self.inverses[i % self.rate] + } + + /// Returns `L_1(x) = Z_H(x)/(n * (x - 1))` with `x = w^i`. + pub fn eval_l1(&self, i: usize, x: F) -> F { + // Could also precompute the inverses using Montgomery. + self.eval(i) * (self.n * (x - F::ONE)).inverse() + } +} + /// Evaluate the Lagrange basis `L_1` with `L_1(1) = 1`, and `L_1(x) = 0` for other members of an /// order `n` multiplicative subgroup. pub(crate) fn eval_l_1(n: usize, x: F) -> F { @@ -206,10 +300,15 @@ pub(crate) fn reduce_with_powers(terms: &[F], alpha: F) -> F { pub(crate) fn reduce_with_powers_recursive, const D: usize>( builder: &mut CircuitBuilder, - terms: Vec, + terms: &[ExtensionTarget], alpha: Target, -) -> Target { - todo!() +) -> ExtensionTarget { + let mut sum = builder.zero_extension(); + for &term in terms.iter().rev() { + sum = builder.scalar_mul_ext(alpha, sum); + sum = builder.add_extension(sum, term); + } + sum } /// Reduce a sequence of field elements by the given coefficients. diff --git a/src/polynomial/commitment.rs b/src/polynomial/commitment.rs index 355a56e4..da647ffa 100644 --- a/src/polynomial/commitment.rs +++ b/src/polynomial/commitment.rs @@ -1,18 +1,19 @@ use anyhow::Result; use rayon::prelude::*; -use crate::field::extension_field::FieldExtension; -use crate::field::extension_field::{Extendable, OEF}; +use crate::circuit_data::CommonCircuitData; +use crate::field::extension_field::Extendable; +use crate::field::extension_field::{FieldExtension, Frobenius}; use crate::field::field::Field; -use crate::field::lagrange::interpolant; use crate::fri::{prover::fri_proof, verifier::verify_fri_proof, FriConfig}; use crate::merkle_tree::MerkleTree; use crate::plonk_challenger::Challenger; -use crate::plonk_common::{reduce_polys_with_iter, reduce_with_iter}; -use crate::polynomial::polynomial::PolynomialCoeffs; -use crate::proof::{FriProof, Hash, OpeningSet}; +use crate::plonk_common::PlonkPolynomials; +use crate::polynomial::polynomial::{PolynomialCoeffs, PolynomialValues}; +use crate::proof::{FriProof, FriProofTarget, Hash, OpeningSet}; use crate::timed; -use crate::util::{log2_strict, reverse_index_bits_in_place, transpose}; +use crate::util::scaling::ReducingFactor; +use crate::util::{log2_strict, reverse_bits, reverse_index_bits_in_place, transpose}; pub const SALT_SIZE: usize = 2; @@ -20,18 +21,49 @@ pub struct ListPolynomialCommitment { pub polynomials: Vec>, pub merkle_tree: MerkleTree, pub degree: usize, + pub degree_log: usize, pub rate_bits: usize, pub blinding: bool, } impl ListPolynomialCommitment { - pub fn new(polynomials: Vec>, rate_bits: usize, blinding: bool) -> Self { + /// Creates a list polynomial commitment for the polynomials interpolating the values in `values`. + pub fn new(values: Vec>, rate_bits: usize, blinding: bool) -> Self { + let degree = values[0].len(); + let polynomials = values + .par_iter() + .map(|v| v.clone().ifft()) + .collect::>(); + let lde_values = timed!( + Self::lde_values(&polynomials, rate_bits, blinding), + "to compute LDE" + ); + + Self::new_from_data(polynomials, lde_values, degree, rate_bits, blinding) + } + + /// Creates a list polynomial commitment for the polynomials `polynomials`. + pub fn new_from_polys( + polynomials: Vec>, + rate_bits: usize, + blinding: bool, + ) -> Self { let degree = polynomials[0].len(); let lde_values = timed!( Self::lde_values(&polynomials, rate_bits, blinding), "to compute LDE" ); + Self::new_from_data(polynomials, lde_values, degree, rate_bits, blinding) + } + + fn new_from_data( + polynomials: Vec>, + lde_values: Vec>, + degree: usize, + rate_bits: usize, + blinding: bool, + ) -> Self { let mut leaves = timed!(transpose(&lde_values), "to transpose LDEs"); reverse_index_bits_in_place(&mut leaves); let merkle_tree = timed!(MerkleTree::new(leaves, false), "to build Merkle tree"); @@ -40,6 +72,7 @@ impl ListPolynomialCommitment { polynomials, merkle_tree, degree, + degree_log: log2_strict(degree), rate_bits, blinding, } @@ -55,10 +88,7 @@ impl ListPolynomialCommitment { .par_iter() .map(|p| { assert_eq!(p.len(), degree, "Polynomial degree invalid."); - p.clone() - .lde(rate_bits) - .coset_fft(F::MULTIPLICATIVE_GROUP_GENERATOR) - .values + p.clone().lde(rate_bits).coset_fft(F::coset_shift()).values }) .chain(if blinding { // If blinding, salt with two random elements to each leaf vector. @@ -71,24 +101,26 @@ impl ListPolynomialCommitment { .collect() } - pub fn leaf(&self, index: usize) -> &[F] { - let leaf = &self.merkle_tree.leaves[index]; - &leaf[0..leaf.len() - if self.blinding { SALT_SIZE } else { 0 }] + pub fn get_lde_values(&self, index: usize) -> &[F] { + let index = reverse_bits(index, self.degree_log + self.rate_bits); + let slice = &self.merkle_tree.leaves[index]; + &slice[..slice.len() - if self.blinding { SALT_SIZE } else { 0 }] } /// Takes the commitments to the constants - sigmas - wires - zs - quotient — polynomials, /// and an opening point `zeta` and produces a batched opening proof + opening set. pub fn open_plonk( - commitments: &[&Self; 5], + commitments: &[&Self; 4], zeta: F::Extension, challenger: &mut Challenger, - config: &FriConfig, + common_data: &CommonCircuitData, ) -> (OpeningProof, OpeningSet) where F: Extendable, { + let config = &common_data.config.fri_config; assert!(D > 1, "Not implemented for D=1."); - let degree_log = log2_strict(commitments[0].degree); + let degree_log = commitments[0].degree_log; let g = F::Extension::primitive_root_of_unity(degree_log); for p in &[zeta, g * zeta] { assert_ne!( @@ -105,65 +137,54 @@ impl ListPolynomialCommitment { commitments[1], commitments[2], commitments[3], - commitments[4], + common_data, ); challenger.observe_opening_set(&os); let alpha = challenger.get_extension_challenge(); - let mut alpha_powers = alpha.powers(); + let mut alpha = ReducingFactor::new(alpha); // Final low-degree polynomial that goes into FRI. let mut final_poly = PolynomialCoeffs::empty(); // Polynomials opened at a single point. - let single_polys = [0, 1, 4] - .iter() - .flat_map(|&i| &commitments[i].polynomials) - .map(|p| p.to_extension()); - let single_os = [&os.constants, &os.plonk_s_sigmas, &os.quotient_polys]; - let single_evals = single_os.iter().flat_map(|v| v.iter()); - let single_composition_poly = reduce_polys_with_iter(single_polys, alpha_powers.clone()); - let single_composition_eval = reduce_with_iter(single_evals, &mut alpha_powers); + let single_polys = [ + PlonkPolynomials::CONSTANTS_SIGMAS, + PlonkPolynomials::QUOTIENT, + ] + .iter() + .flat_map(|&p| &commitments[p.index].polynomials) + .map(|p| p.to_extension()); + let single_composition_poly = alpha.reduce_polys(single_polys); - let single_quotient = Self::compute_quotient( - &[zeta], - &[single_composition_eval], - &single_composition_poly, - ); - final_poly = &final_poly + &single_quotient; + let single_quotient = Self::compute_quotient([zeta], single_composition_poly); + final_poly += single_quotient; + alpha.reset(); // Zs polynomials are opened at `zeta` and `g*zeta`. - let zs_polys = commitments[3].polynomials.iter().map(|p| p.to_extension()); - let zs_composition_poly = reduce_polys_with_iter(zs_polys, alpha_powers.clone()); - let zs_composition_evals = [ - reduce_with_iter(&os.plonk_zs, alpha_powers.clone()), - reduce_with_iter(&os.plonk_zs_right, &mut alpha_powers), - ]; + let zs_polys = commitments[PlonkPolynomials::ZS.index] + .polynomials + .iter() + .map(|p| p.to_extension()); + let zs_composition_poly = alpha.reduce_polys(zs_polys); - let zs_quotient = Self::compute_quotient( - &[zeta, g * zeta], - &zs_composition_evals, - &zs_composition_poly, - ); - final_poly = &final_poly + &zs_quotient; + let zs_quotient = Self::compute_quotient([zeta, g * zeta], zs_composition_poly); + alpha.shift_poly(&mut final_poly); + final_poly += zs_quotient; // When working in an extension field, need to check that wires are in the base field. // Check this by opening the wires polynomials at `zeta` and `zeta.frobenius()` and using the fact that // a polynomial `f` is over the base field iff `f(z).frobenius()=f(z.frobenius())` with high probability. - let wire_polys = commitments[2].polynomials.iter().map(|p| p.to_extension()); - let wire_composition_poly = reduce_polys_with_iter(wire_polys, alpha_powers.clone()); - let wire_evals_frob = os.wires.iter().map(|e| e.frobenius()).collect::>(); - let wire_composition_evals = [ - reduce_with_iter(&os.wires, alpha_powers.clone()), - reduce_with_iter(&wire_evals_frob, alpha_powers), - ]; + let wire_polys = commitments[PlonkPolynomials::WIRES.index] + .polynomials + .iter() + .map(|p| p.to_extension()); + let wire_composition_poly = alpha.reduce_polys(wire_polys); - let wires_quotient = Self::compute_quotient( - &[zeta, zeta.frobenius()], - &wire_composition_evals, - &wire_composition_poly, - ); - final_poly = &final_poly + &wires_quotient; + let wires_quotient = + Self::compute_quotient([zeta, zeta.frobenius()], wire_composition_poly); + alpha.shift_poly(&mut final_poly); + final_poly += wires_quotient; let lde_final_poly = final_poly.lde(config.rate_bits); let lde_final_values = lde_final_poly @@ -194,28 +215,27 @@ impl ListPolynomialCommitment { /// Given `points=(x_i)`, `evals=(y_i)` and `poly=P` with `P(x_i)=y_i`, computes the polynomial /// `Q=(P-I)/Z` where `I` interpolates `(x_i, y_i)` and `Z` is the vanishing polynomial on `(x_i)`. - fn compute_quotient( - points: &[F::Extension], - evals: &[F::Extension], - poly: &PolynomialCoeffs, + fn compute_quotient( + points: [F::Extension; N], + poly: PolynomialCoeffs, ) -> PolynomialCoeffs where F: Extendable, { - let pairs = points - .iter() - .zip(evals) - .map(|(&x, &e)| (x, e)) - .collect::>(); - debug_assert!(pairs.iter().all(|&(x, e)| poly.eval(x) == e)); - - let interpolant = interpolant(&pairs); - let denominator = points.iter().fold(PolynomialCoeffs::one(), |acc, &x| { - &acc * &PolynomialCoeffs::new(vec![-x, F::Extension::ONE]) - }); - let numerator = poly - &interpolant; - let (quotient, rem) = numerator.div_rem(&denominator); - debug_assert!(rem.is_zero()); + let quotient = if N == 1 { + poly.divide_by_linear(points[0]).0 + } else if N == 2 { + // The denominator is `(X - p0)(X - p1) = p0 p1 - (p0 + p1) X + X^2`. + let denominator = vec![ + points[0] * points[1], + -points[0] - points[1], + F::Extension::ONE, + ] + .into(); + poly.div_rem_long_division(&denominator).0 // Could also use `divide_by_linear` twice. + } else { + unreachable!("This shouldn't happen. Plonk should open polynomials at 1 or 2 points.") + }; quotient.padded(quotient.degree_plus_one().next_power_of_two()) } @@ -253,21 +273,26 @@ impl, const D: usize> OpeningProof { } } +pub struct OpeningProofTarget { + fri_proof: FriProofTarget, +} + #[cfg(test)] mod tests { use anyhow::Result; - use rand::Rng; use super::*; + use crate::circuit_data::CircuitConfig; + use crate::plonk_common::PlonkPolynomials; fn gen_random_test_case, const D: usize>( k: usize, degree_log: usize, - ) -> Vec> { + ) -> Vec> { let degree = 1 << degree_log; (0..k) - .map(|_| PolynomialCoeffs::new(F::rand_vec(degree))) + .map(|_| PolynomialValues::new(F::rand_vec(degree))) .collect() } @@ -284,44 +309,47 @@ mod tests { point } - fn gen_random_blindings() -> Vec { - let mut rng = rand::thread_rng(); - vec![ - rng.gen_bool(0.5), - rng.gen_bool(0.5), - rng.gen_bool(0.5), - rng.gen_bool(0.5), - rng.gen_bool(0.5), - ] - } - fn check_batch_polynomial_commitment, const D: usize>() -> Result<()> { - let ks = [1, 2, 3, 5, 8]; + let ks = [10, 2, 3, 8]; let degree_log = 11; let fri_config = FriConfig { proof_of_work_bits: 2, rate_bits: 2, reduction_arity_bits: vec![2, 3, 1, 2], num_query_rounds: 3, - blinding: gen_random_blindings(), + }; + // We only care about `fri_config, num_constants`, and `num_routed_wires` here. + let common_data = CommonCircuitData { + config: CircuitConfig { + fri_config, + num_routed_wires: 6, + ..CircuitConfig::large_config() + }, + degree_bits: 0, + gates: vec![], + max_filtered_constraint_degree_bits: 0, + num_gate_constraints: 0, + num_constants: 4, + k_is: vec![F::ONE; 6], + circuit_digest: Hash::from_partial(vec![]), }; - let lpcs = (0..5) + let lpcs = (0..4) .map(|i| { ListPolynomialCommitment::::new( gen_random_test_case(ks[i], degree_log), - fri_config.rate_bits, - fri_config.blinding[i], + common_data.config.fri_config.rate_bits, + PlonkPolynomials::polynomials(i).blinding, ) }) .collect::>(); let zeta = gen_random_point::(degree_log); let (proof, os) = ListPolynomialCommitment::open_plonk::( - &[&lpcs[0], &lpcs[1], &lpcs[2], &lpcs[3], &lpcs[4]], + &[&lpcs[0], &lpcs[1], &lpcs[2], &lpcs[3]], zeta, &mut Challenger::new(), - &fri_config, + &common_data, ); proof.verify( @@ -332,10 +360,9 @@ mod tests { lpcs[1].merkle_tree.root, lpcs[2].merkle_tree.root, lpcs[3].merkle_tree.root, - lpcs[4].merkle_tree.root, ], &mut Challenger::new(), - &fri_config, + &common_data.config.fri_config, ) } diff --git a/src/polynomial/division.rs b/src/polynomial/division.rs index b74fd00f..50e1f8a6 100644 --- a/src/polynomial/division.rs +++ b/src/polynomial/division.rs @@ -26,7 +26,7 @@ impl PolynomialCoeffs { .to_vec() .into(); let mut q = rev_q.rev(); - let mut qb = &q * b; + let qb = &q * b; let mut r = self - &qb; q.trim(); r.trim(); @@ -59,8 +59,7 @@ impl PolynomialCoeffs { quotient.coeffs[cur_q_degree] = cur_q_coeff; for (i, &div_coeff) in b.coeffs.iter().enumerate() { - remainder.coeffs[cur_q_degree + i] = - remainder.coeffs[cur_q_degree + i] - (cur_q_coeff * div_coeff); + remainder.coeffs[cur_q_degree + i] -= cur_q_coeff * div_coeff; } remainder.trim(); } @@ -97,7 +96,7 @@ impl PolynomialCoeffs { let denominators = (0..a_eval.len()) .map(|i| { if i != 0 { - root_pow = root_pow * root_n; + root_pow *= root_n; } denominator_g * root_pow - F::ONE }) @@ -125,8 +124,25 @@ impl PolynomialCoeffs { p } + /// Let `self=p(X)`, this returns `(p(X)-p(z))/(X-z)` and `p(z)`. + /// See https://en.wikipedia.org/wiki/Horner%27s_method + pub(crate) fn divide_by_linear(&self, z: F) -> (PolynomialCoeffs, F) { + let mut bs = self + .coeffs + .iter() + .rev() + .scan(F::ZERO, |acc, &c| { + *acc = *acc * z + c; + Some(*acc) + }) + .collect::>(); + let ev = bs.pop().unwrap_or(F::ZERO); + bs.reverse(); + (Self { coeffs: bs }, ev) + } + /// Computes the inverse of `self` modulo `x^n`. - pub(crate) fn inv_mod_xn(&self, n: usize) -> Self { + pub fn inv_mod_xn(&self, n: usize) -> Self { assert!(self.coeffs[0].is_nonzero(), "Inverse doesn't exist."); let h = if self.len() < n { @@ -166,7 +182,10 @@ impl PolynomialCoeffs { #[cfg(test)] mod tests { + use std::time::Instant; + use crate::field::crandall_field::CrandallField; + use crate::field::extension_field::quartic::QuarticCrandallField; use crate::field::field::Field; use crate::polynomial::polynomial::PolynomialCoeffs; @@ -199,4 +218,49 @@ mod tests { let computed_q = a.divide_by_z_h(4); assert_eq!(computed_q, q); } + + #[test] + #[ignore] + fn test_division_by_linear() { + type F = QuarticCrandallField; + let n = 1_000_000; + let poly = PolynomialCoeffs::new(F::rand_vec(n)); + let z = F::rand(); + let ev = poly.eval(z); + + let timer = Instant::now(); + let (quotient, ev2) = poly.div_rem(&PolynomialCoeffs::new(vec![-z, F::ONE])); + println!("{:.3}s for usual", timer.elapsed().as_secs_f32()); + assert_eq!(ev2.trimmed().coeffs, vec![ev]); + + let timer = Instant::now(); + let (quotient, ev3) = poly.div_rem_long_division(&PolynomialCoeffs::new(vec![-z, F::ONE])); + println!("{:.3}s for long division", timer.elapsed().as_secs_f32()); + assert_eq!(ev3.trimmed().coeffs, vec![ev]); + + let timer = Instant::now(); + let horn = poly.divide_by_linear(z); + println!("{:.3}s for Horner", timer.elapsed().as_secs_f32()); + assert_eq!((quotient, ev), horn); + } + + #[test] + #[ignore] + fn test_division_by_quadratic() { + type F = QuarticCrandallField; + let n = 1_000_000; + let poly = PolynomialCoeffs::new(F::rand_vec(n)); + let quad = PolynomialCoeffs::new(F::rand_vec(2)); + + let timer = Instant::now(); + let (quotient0, rem0) = poly.div_rem(&quad); + println!("{:.3}s for usual", timer.elapsed().as_secs_f32()); + + let timer = Instant::now(); + let (quotient1, rem1) = poly.div_rem_long_division(&quad); + println!("{:.3}s for long division", timer.elapsed().as_secs_f32()); + + assert_eq!(quotient0.trimmed(), quotient1.trimmed()); + assert_eq!(rem0.trimmed(), rem1.trimmed()); + } } diff --git a/src/polynomial/polynomial.rs b/src/polynomial/polynomial.rs index 9f605051..888d7af0 100644 --- a/src/polynomial/polynomial.rs +++ b/src/polynomial/polynomial.rs @@ -1,6 +1,8 @@ use std::cmp::max; use std::iter::Sum; -use std::ops::{Add, Mul, Sub}; +use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign}; + +use anyhow::{ensure, Result}; use crate::field::extension_field::Extendable; use crate::field::fft::{fft, ifft}; @@ -34,6 +36,19 @@ impl PolynomialValues { ifft(self) } + /// Returns the polynomial whose evaluation on the coset `shift*H` is `self`. + pub fn coset_ifft(self, shift: F) -> PolynomialCoeffs { + let mut shifted_coeffs = self.ifft(); + shifted_coeffs + .coeffs + .iter_mut() + .zip(shift.inverse().powers()) + .for_each(|(c, r)| { + *c *= r; + }); + shifted_coeffs + } + pub fn lde_multiple(polys: Vec, rate_bits: usize) -> Vec { polys.into_iter().map(|p| p.lde(rate_bits)).collect() } @@ -127,11 +142,21 @@ impl PolynomialCoeffs { self.padded(self.len() << rate_bits) } + pub(crate) fn pad(&mut self, new_len: usize) -> Result<()> { + ensure!( + new_len >= self.len(), + "Trying to pad a polynomial of length {} to a length of {}.", + self.len(), + new_len + ); + self.coeffs.resize(new_len, F::ZERO); + Ok(()) + } + pub(crate) fn padded(&self, new_len: usize) -> Self { - assert!(new_len >= self.len()); - let mut coeffs = self.coeffs.clone(); - coeffs.resize(new_len, F::ZERO); - Self { coeffs } + let mut poly = self.clone(); + poly.pad(new_len).unwrap(); + poly } /// Removes leading zero coefficients. @@ -171,6 +196,7 @@ impl PolynomialCoeffs { fft(self) } + /// Returns the evaluation of the polynomial on the coset `shift*H`. pub fn coset_fft(self, shift: F) -> PolynomialValues { let modified_poly: Self = shift .powers() @@ -243,6 +269,46 @@ impl Sub for &PolynomialCoeffs { } } +impl AddAssign for PolynomialCoeffs { + fn add_assign(&mut self, rhs: Self) { + let len = max(self.len(), rhs.len()); + self.coeffs.resize(len, F::ZERO); + for (l, r) in self.coeffs.iter_mut().zip(rhs.coeffs) { + *l += r; + } + } +} + +impl AddAssign<&Self> for PolynomialCoeffs { + fn add_assign(&mut self, rhs: &Self) { + let len = max(self.len(), rhs.len()); + self.coeffs.resize(len, F::ZERO); + for (l, &r) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *l += r; + } + } +} + +impl SubAssign for PolynomialCoeffs { + fn sub_assign(&mut self, rhs: Self) { + let len = max(self.len(), rhs.len()); + self.coeffs.resize(len, F::ZERO); + for (l, r) in self.coeffs.iter_mut().zip(rhs.coeffs) { + *l -= r; + } + } +} + +impl SubAssign<&Self> for PolynomialCoeffs { + fn sub_assign(&mut self, rhs: &Self) { + let len = max(self.len(), rhs.len()); + self.coeffs.resize(len, F::ZERO); + for (l, &r) in self.coeffs.iter_mut().zip(&rhs.coeffs) { + *l -= r; + } + } +} + impl Mul for &PolynomialCoeffs { type Output = PolynomialCoeffs; @@ -252,6 +318,12 @@ impl Mul for &PolynomialCoeffs { } } +impl MulAssign for PolynomialCoeffs { + fn mul_assign(&mut self, rhs: F) { + self.coeffs.iter_mut().for_each(|x| *x *= rhs); + } +} + impl Mul for &PolynomialCoeffs { type Output = PolynomialCoeffs; @@ -323,8 +395,31 @@ mod tests { .into_iter() .map(|x| poly.eval(x)) .collect::>(); - assert_eq!(coset_evals, naive_coset_evals); + + let ifft_coeffs = PolynomialValues::new(coset_evals).coset_ifft(shift); + assert_eq!(poly, ifft_coeffs.into()); + } + + #[test] + fn test_coset_ifft() { + type F = CrandallField; + + let k = 8; + let n = 1 << k; + let evals = PolynomialValues::new(F::rand_vec(n)); + let shift = F::rand(); + let coeffs = evals.clone().coset_ifft(shift); + + let generator = F::primitive_root_of_unity(k); + let naive_coset_evals = F::cyclic_subgroup_coset_known_order(generator, shift, n) + .into_iter() + .map(|x| coeffs.eval(x)) + .collect::>(); + assert_eq!(evals, naive_coset_evals.into()); + + let fft_evals = coeffs.coset_fft(shift); + assert_eq!(evals, fft_evals); } #[test] diff --git a/src/proof.rs b/src/proof.rs index b1772c45..47b67c9c 100644 --- a/src/proof.rs +++ b/src/proof.rs @@ -1,10 +1,13 @@ use std::convert::TryInto; +use crate::circuit_data::CommonCircuitData; +use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::Extendable; use crate::field::field::Field; -use crate::fri::FriConfig; +use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; use crate::merkle_proofs::{MerkleProof, MerkleProofTarget}; -use crate::polynomial::commitment::{ListPolynomialCommitment, OpeningProof}; +use crate::plonk_common::PolynomialsIndexBlinding; +use crate::polynomial::commitment::{ListPolynomialCommitment, OpeningProof, OpeningProofTarget}; use crate::polynomial::polynomial::PolynomialCoeffs; use crate::target::Target; @@ -34,6 +37,7 @@ impl Hash { } /// Represents a ~256 bit hash output. +#[derive(Copy, Clone, Debug)] pub struct HashTarget { pub(crate) elements: [Target; 4], } @@ -64,57 +68,66 @@ pub struct Proof, const D: usize> { pub plonk_zs_root: Hash, /// Merkle root of LDEs of the quotient polynomial components. pub quotient_polys_root: Hash, - /// Purported values of each polynomial at the challenge point. pub openings: OpeningSet, - /// A FRI argument for each FRI query. pub opening_proof: OpeningProof, } -pub struct ProofTarget { - /// Merkle root of LDEs of wire values. +pub struct ProofTarget { pub wires_root: HashTarget, - /// Merkle root of LDEs of Z, in the context of Plonk's permutation argument. pub plonk_zs_root: HashTarget, - /// Merkle root of LDEs of the quotient polynomial components. pub quotient_polys_root: HashTarget, - - /// Purported values of each polynomial at each challenge point. - pub openings: Vec, - - /// A FRI argument for each FRI query. - pub fri_proofs: Vec, + pub openings: Vec>, + pub opening_proof: Vec>, } /// Evaluations and Merkle proof produced by the prover in a FRI query step. -// TODO: Implement FriQueryStepTarget pub struct FriQueryStep, const D: usize> { pub evals: Vec, pub merkle_proof: MerkleProof, } +pub struct FriQueryStepTarget { + pub evals: Vec>, + pub merkle_proof: MerkleProofTarget, +} + /// Evaluations and Merkle proofs of the original set of polynomials, /// before they are combined into a composition polynomial. -// TODO: Implement FriInitialTreeProofTarget pub struct FriInitialTreeProof { pub evals_proofs: Vec<(Vec, MerkleProof)>, } impl FriInitialTreeProof { - pub(crate) fn unsalted_evals(&self, i: usize, config: &FriConfig) -> &[F] { - let evals = &self.evals_proofs[i].0; - &evals[..evals.len() - config.salt_size(i)] + pub(crate) fn unsalted_evals(&self, polynomials: PolynomialsIndexBlinding) -> &[F] { + let evals = &self.evals_proofs[polynomials.index].0; + &evals[..evals.len() - polynomials.salt_size()] + } +} + +pub struct FriInitialTreeProofTarget { + pub evals_proofs: Vec<(Vec, MerkleProofTarget)>, +} + +impl FriInitialTreeProofTarget { + pub(crate) fn unsalted_evals(&self, polynomials: PolynomialsIndexBlinding) -> &[Target] { + let evals = &self.evals_proofs[polynomials.index].0; + &evals[..evals.len() - polynomials.salt_size()] } } /// Proof for a FRI query round. -// TODO: Implement FriQueryRoundTarget pub struct FriQueryRound, const D: usize> { pub initial_trees_proof: FriInitialTreeProof, pub steps: Vec>, } +pub struct FriQueryRoundTarget { + pub initial_trees_proof: FriInitialTreeProofTarget, + pub steps: Vec>, +} + pub struct FriProof, const D: usize> { /// A Merkle root for each reduced polynomial in the commit phase. pub commit_phase_merkle_roots: Vec>, @@ -126,18 +139,14 @@ pub struct FriProof, const D: usize> { pub pow_witness: F, } -/// Represents a single FRI query, i.e. a path through the reduction tree. -pub struct FriProofTarget { - /// A Merkle root for each reduced polynomial in the commit phase. +pub struct FriProofTarget { pub commit_phase_merkle_roots: Vec, - /// Merkle proofs for the original purported codewords, i.e. the subject of the LDT. - pub initial_merkle_proofs: Vec, - /// Merkle proofs for the reduced polynomials that were sent in the commit phase. - pub intermediate_merkle_proofs: Vec, - /// The final polynomial in coefficient form. - pub final_poly: Vec, + pub query_round_proofs: Vec>, + pub final_poly: PolynomialCoeffsExtTarget, + pub pow_witness: Target, } +#[derive(Clone, Debug)] /// The purported values of each polynomial at a single point. pub struct OpeningSet, const D: usize> { pub constants: Vec, @@ -152,11 +161,11 @@ impl, const D: usize> OpeningSet { pub fn new( z: F::Extension, g: F::Extension, - constant_commitment: &ListPolynomialCommitment, - plonk_sigmas_commitment: &ListPolynomialCommitment, + constants_sigmas_commitment: &ListPolynomialCommitment, wires_commitment: &ListPolynomialCommitment, plonk_zs_commitment: &ListPolynomialCommitment, quotient_polys_commitment: &ListPolynomialCommitment, + common_data: &CommonCircuitData, ) -> Self { let eval_commitment = |z: F::Extension, c: &ListPolynomialCommitment| { c.polynomials @@ -164,9 +173,10 @@ impl, const D: usize> OpeningSet { .map(|p| p.to_extension().eval(z)) .collect::>() }; + let constants_sigmas_eval = eval_commitment(z, constants_sigmas_commitment); Self { - constants: eval_commitment(z, constant_commitment), - plonk_s_sigmas: eval_commitment(z, plonk_sigmas_commitment), + constants: constants_sigmas_eval[common_data.constants_range()].to_vec(), + plonk_s_sigmas: constants_sigmas_eval[common_data.sigmas_range()].to_vec(), wires: eval_commitment(z, wires_commitment), plonk_zs: eval_commitment(z, plonk_zs_commitment), plonk_zs_right: eval_commitment(g * z, plonk_zs_commitment), @@ -176,10 +186,11 @@ impl, const D: usize> OpeningSet { } /// The purported values of each polynomial at a single point. -pub struct OpeningSetTarget { - pub constants: Vec, - pub plonk_sigmas: Vec, - pub wires: Vec, - pub plonk_zs: Vec, - pub quotient_polys: Vec, +pub struct OpeningSetTarget { + pub constants: Vec>, + pub plonk_sigmas: Vec>, + pub wires: Vec>, + pub plonk_zs: Vec>, + pub plonk_zs_right: Vec>, + pub quotient_polys: Vec>, } diff --git a/src/prover.rs b/src/prover.rs index 7a492eaa..e8cd7f46 100644 --- a/src/prover.rs +++ b/src/prover.rs @@ -5,49 +5,55 @@ use rayon::prelude::*; use crate::circuit_data::{CommonCircuitData, ProverOnlyCircuitData}; use crate::field::extension_field::Extendable; -use crate::field::fft::ifft; -use crate::field::field::Field; use crate::generator::generate_partial_witness; use crate::plonk_challenger::Challenger; -use crate::plonk_common::eval_vanishing_poly_base; +use crate::plonk_common::{eval_vanishing_poly_base, PlonkPolynomials, ZeroPolyOnCoset}; use crate::polynomial::commitment::ListPolynomialCommitment; use crate::polynomial::polynomial::{PolynomialCoeffs, PolynomialValues}; use crate::proof::Proof; use crate::timed; use crate::util::transpose; use crate::vars::EvaluationVarsBase; -use crate::wire::Wire; -use crate::witness::PartialWitness; - -/// Corresponds to constants - sigmas - wires - zs - quotient — polynomial commitments. -pub const PLONK_BLINDING: [bool; 5] = [false, false, true, true, true]; +use crate::witness::{PartialWitness, Witness}; pub(crate) fn prove, const D: usize>( - prover_data: &ProverOnlyCircuitData, + prover_data: &ProverOnlyCircuitData, common_data: &CommonCircuitData, inputs: PartialWitness, ) -> Proof { let fri_config = &common_data.config.fri_config; - - let start_proof_gen = Instant::now(); - - let mut witness = inputs; - info!("Running {} generators", prover_data.generators.len()); - timed!( - generate_partial_witness(&mut witness, &prover_data.generators), - "to generate witness" - ); - let config = &common_data.config; let num_wires = config.num_wires; let num_challenges = config.num_challenges; let quotient_degree = common_data.quotient_degree(); - let degree = common_data.degree(); - let wires_polynomials: Vec> = timed!( - (0..num_wires) - .into_par_iter() - .map(|i| compute_wire_polynomial(i, &witness, degree)) + + let start_proof_gen = Instant::now(); + + let mut partial_witness = inputs; + info!("Running {} generators", prover_data.generators.len()); + timed!( + generate_partial_witness(&mut partial_witness, &prover_data.generators), + "to generate witness" + ); + + let witness = timed!( + partial_witness.full_witness(degree, num_wires), + "to compute full witness" + ); + + timed!( + witness + .check_copy_constraints(&prover_data.copy_constraints, &prover_data.gate_instances) + .unwrap(), // TODO: Change return value to `Result` and use `?` here. + "to check copy constraints" + ); + + let wires_values: Vec> = timed!( + witness + .wire_values + .iter() + .map(|column| PolynomialValues::new(column.clone())) .collect(), "to compute wire polynomials" ); @@ -55,7 +61,11 @@ pub(crate) fn prove, const D: usize>( // TODO: Could try parallelizing the transpose, or not doing it explicitly, instead having // merkle_root_bit_rev_order do it implicitly. let wires_commitment = timed!( - ListPolynomialCommitment::new(wires_polynomials, fri_config.rate_bits, true), + ListPolynomialCommitment::new( + wires_values, + fri_config.rate_bits, + PlonkPolynomials::WIRES.blinding + ), "to compute wires commitment" ); @@ -68,10 +78,17 @@ pub(crate) fn prove, const D: usize>( let betas = challenger.get_n_challenges(num_challenges); let gammas = challenger.get_n_challenges(num_challenges); - let plonk_z_vecs = timed!(compute_zs(&common_data), "to compute Z's"); + let plonk_z_vecs = timed!( + compute_zs(&witness, &betas, &gammas, prover_data, common_data), + "to compute Z's" + ); let plonk_zs_commitment = timed!( - ListPolynomialCommitment::new(plonk_z_vecs, fri_config.rate_bits, true), + ListPolynomialCommitment::new( + plonk_z_vecs, + fri_config.rate_bits, + PlonkPolynomials::ZS.blinding + ), "to commit to Z's" ); @@ -79,8 +96,8 @@ pub(crate) fn prove, const D: usize>( let alphas = challenger.get_n_challenges(num_challenges); - let vanishing_polys = timed!( - compute_vanishing_polys( + let quotient_polys = timed!( + compute_quotient_polys( common_data, prover_data, &wires_commitment, @@ -94,20 +111,27 @@ pub(crate) fn prove, const D: usize>( // Compute the quotient polynomials, aka `t` in the Plonk paper. let all_quotient_poly_chunks = timed!( - vanishing_polys + quotient_polys .into_par_iter() - .flat_map(|vanishing_poly| { - let vanishing_poly_coeff = ifft(vanishing_poly); - let quotient_poly_coeff = vanishing_poly_coeff.divide_by_z_h(degree); + .flat_map(|mut quotient_poly| { + quotient_poly.trim(); + quotient_poly.pad(quotient_degree).expect( + "The quotient polynomial doesn't have the right degree.\ + This may be because the `Z`s polynomials are still too high degree.", + ); // Split t into degree-n chunks. - quotient_poly_coeff.chunks(degree) + quotient_poly.chunks(degree) }) .collect(), "to compute quotient polys" ); let quotient_polys_commitment = timed!( - ListPolynomialCommitment::new(all_quotient_poly_chunks, fri_config.rate_bits, true), + ListPolynomialCommitment::new_from_polys( + all_quotient_poly_chunks, + fri_config.rate_bits, + PlonkPolynomials::QUOTIENT.blinding + ), "to commit to quotient polys" ); @@ -115,18 +139,17 @@ pub(crate) fn prove, const D: usize>( let zeta = challenger.get_extension_challenge(); - let (opening_proof, mut openings) = timed!( + let (opening_proof, openings) = timed!( ListPolynomialCommitment::open_plonk( &[ - &prover_data.constants_commitment, - &prover_data.sigmas_commitment, + &prover_data.constants_sigmas_commitment, &wires_commitment, &plonk_zs_commitment, "ient_polys_commitment, ], zeta, &mut challenger, - &common_data.config.fri_config + common_data, ), "to compute opening proofs" ); @@ -146,44 +169,97 @@ pub(crate) fn prove, const D: usize>( } fn compute_zs, const D: usize>( + witness: &Witness, + betas: &[F], + gammas: &[F], + prover_data: &ProverOnlyCircuitData, common_data: &CommonCircuitData, -) -> Vec> { +) -> Vec> { (0..common_data.config.num_challenges) - .map(|i| compute_z(common_data, i)) + .map(|i| compute_z(witness, betas[i], gammas[i], prover_data, common_data)) .collect() } fn compute_z, const D: usize>( + witness: &Witness, + beta: F, + gamma: F, + prover_data: &ProverOnlyCircuitData, common_data: &CommonCircuitData, - _i: usize, -) -> PolynomialCoeffs { - PolynomialCoeffs::zero(common_data.degree()) // TODO +) -> PolynomialValues { + let subgroup = &prover_data.subgroup; + let mut plonk_z_points = vec![F::ONE]; + let k_is = &common_data.k_is; + for i in 1..common_data.degree() { + let x = subgroup[i - 1]; + let mut numerator = F::ONE; + let mut denominator = F::ONE; + let s_sigmas = &prover_data.sigmas[i - 1]; + for j in 0..common_data.config.num_routed_wires { + let wire_value = witness.get_wire(i - 1, j); + let k_i = k_is[j]; + let s_id = k_i * x; + let s_sigma = s_sigmas[j]; + numerator *= wire_value + beta * s_id + gamma; + denominator *= wire_value + beta * s_sigma + gamma; + } + let last = *plonk_z_points.last().unwrap(); + plonk_z_points.push(last * numerator / denominator); + } + plonk_z_points.into() } -fn compute_vanishing_polys, const D: usize>( +fn compute_quotient_polys<'a, F: Extendable, const D: usize>( common_data: &CommonCircuitData, - prover_data: &ProverOnlyCircuitData, - wires_commitment: &ListPolynomialCommitment, - plonk_zs_commitment: &ListPolynomialCommitment, + prover_data: &'a ProverOnlyCircuitData, + wires_commitment: &'a ListPolynomialCommitment, + plonk_zs_commitment: &'a ListPolynomialCommitment, betas: &[F], gammas: &[F], alphas: &[F], -) -> Vec> { - let lde_size = common_data.lde_size(); - let lde_gen = common_data.lde_generator(); +) -> Vec> { let num_challenges = common_data.config.num_challenges; + assert!( + common_data.max_filtered_constraint_degree_bits <= common_data.config.rate_bits, + "Having constraints of degree higher than the rate is not supported yet. \ + If we need this in the future, we can precompute the larger LDE before computing the `ListPolynomialCommitment`s." + ); - let points = F::cyclic_subgroup_known_order(lde_gen, lde_size); - let values: Vec> = points + // We reuse the LDE computed in `ListPolynomialCommitment` and extract every `step` points to get + // an LDE matching `max_filtered_constraint_degree`. + let step = + 1 << (common_data.config.rate_bits - common_data.max_filtered_constraint_degree_bits); + // When opening the `Z`s polys at the "next" point in Plonk, need to look at the point `next_step` + // steps away since we work on an LDE of degree `max_filtered_constraint_degree`. + let next_step = 1 << common_data.max_filtered_constraint_degree_bits; + + let points = F::two_adic_subgroup( + common_data.degree_bits + common_data.max_filtered_constraint_degree_bits, + ); + let lde_size = points.len(); + + // Retrieve the LDE values at index `i`. + let get_at_index = |comm: &'a ListPolynomialCommitment, i: usize| -> &'a [F] { + comm.get_lde_values(i * step) + }; + + let z_h_on_coset = ZeroPolyOnCoset::new( + common_data.degree_bits, + common_data.max_filtered_constraint_degree_bits, + ); + + let quotient_values: Vec> = points .into_par_iter() .enumerate() .map(|(i, x)| { - let i_next = (i + 1) % lde_size; - let local_wires = wires_commitment.leaf(i); - let local_constants = prover_data.constants_commitment.leaf(i); - let local_plonk_zs = plonk_zs_commitment.leaf(i); - let next_plonk_zs = plonk_zs_commitment.leaf(i_next); - let s_sigmas = prover_data.sigmas_commitment.leaf(i); + let shifted_x = F::coset_shift() * x; + let i_next = (i + next_step) % lde_size; + let local_constants_sigmas = get_at_index(&prover_data.constants_sigmas_commitment, i); + let local_constants = &local_constants_sigmas[common_data.constants_range()]; + let s_sigmas = &local_constants_sigmas[common_data.sigmas_range()]; + let local_wires = get_at_index(wires_commitment, i); + let local_plonk_zs = get_at_index(plonk_zs_commitment, i); + let next_plonk_zs = get_at_index(plonk_zs_commitment, i_next); debug_assert_eq!(local_wires.len(), common_data.config.num_wires); debug_assert_eq!(local_plonk_zs.len(), num_challenges); @@ -192,9 +268,10 @@ fn compute_vanishing_polys, const D: usize>( local_constants, local_wires, }; - eval_vanishing_poly_base( + let mut quotient_values = eval_vanishing_poly_base( common_data, - x, + i, + shifted_x, vars, local_plonk_zs, next_plonk_zs, @@ -202,31 +279,19 @@ fn compute_vanishing_polys, const D: usize>( betas, gammas, alphas, - ) + &z_h_on_coset, + ); + let denominator_inv = z_h_on_coset.eval_inverse(i); + quotient_values + .iter_mut() + .for_each(|v| *v *= denominator_inv); + quotient_values }) .collect(); - transpose(&values) - .into_iter() + transpose("ient_values) + .into_par_iter() .map(PolynomialValues::new) + .map(|values| values.coset_ifft(F::coset_shift())) .collect() } - -fn compute_wire_polynomial( - input: usize, - witness: &PartialWitness, - degree: usize, -) -> PolynomialCoeffs { - let wire_values = (0..degree) - // Some gates do not use all wires, and we do not require that generators populate unused - // wires, so some wire values will not be set. We can set these to any value; here we - // arbitrary pick zero. Ideally we would verify that no constraints operate on these unset - // wires, but that isn't trivial. - .map(|gate| { - witness - .try_get_wire(Wire { gate, input }) - .unwrap_or(F::ZERO) - }) - .collect(); - PolynomialValues::new(wire_values).ifft() -} diff --git a/src/recursive_verifier.rs b/src/recursive_verifier.rs index a9d37553..c1005a67 100644 --- a/src/recursive_verifier.rs +++ b/src/recursive_verifier.rs @@ -13,7 +13,7 @@ pub fn add_recursive_verifier, const D: usize>( inner_config: CircuitConfig, inner_circuit: VerifierCircuitTarget, inner_gates: Vec>, - inner_proof: ProofTarget, + inner_proof: ProofTarget, ) { assert!(builder.config.num_wires >= MIN_WIRES); assert!(builder.config.num_wires >= MIN_ROUTED_WIRES); diff --git a/src/target.rs b/src/target.rs index b5736564..8aec0b5a 100644 --- a/src/target.rs +++ b/src/target.rs @@ -1,3 +1,5 @@ +use std::ops::Range; + use crate::circuit_data::CircuitConfig; use crate::wire::Wire; @@ -21,4 +23,8 @@ impl Target { Target::VirtualAdviceTarget { .. } => false, } } + + pub fn wires_from_range(gate: usize, range: Range) -> Vec { + range.map(|i| Self::wire(gate, i)).collect() + } } diff --git a/src/util/mod.rs b/src/util/mod.rs index 7ab9bc5b..f901b0af 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -1,3 +1,4 @@ +pub mod scaling; pub(crate) mod timing; use crate::field::field::Field; @@ -7,7 +8,7 @@ pub(crate) fn bits_u64(n: u64) -> usize { (64 - n.leading_zeros()) as usize } -pub(crate) fn ceil_div_usize(a: usize, b: usize) -> usize { +pub(crate) const fn ceil_div_usize(a: usize, b: usize) -> usize { (a + b - 1) / b } diff --git a/src/util/scaling.rs b/src/util/scaling.rs new file mode 100644 index 00000000..cea86195 --- /dev/null +++ b/src/util/scaling.rs @@ -0,0 +1,75 @@ +use std::borrow::Borrow; + +use crate::field::extension_field::Frobenius; +use crate::field::field::Field; +use crate::polynomial::polynomial::PolynomialCoeffs; + +/// When verifying the composition polynomial in FRI we have to compute sums of the form +/// `(sum_0^k a^i * x_i)/d_0 + (sum_k^r a^i * y_i)/d_1` +/// The most efficient way to do this is to compute both quotient separately using Horner's method, +/// scale the second one by `a^(r-1-k)`, and add them up. +/// This struct abstract away these operations by implementing Horner's method and keeping track +/// of the number of multiplications by `a` to compute the scaling factor. +/// See https://github.com/mir-protocol/plonky2/pull/69 for more details and discussions. +#[derive(Debug, Copy, Clone)] +pub struct ReducingFactor { + base: F, + count: u64, +} + +impl ReducingFactor { + pub fn new(base: F) -> Self { + Self { base, count: 0 } + } + + fn mul(&mut self, x: F) -> F { + self.count += 1; + self.base * x + } + + fn mul_poly(&mut self, p: &mut PolynomialCoeffs) { + self.count += 1; + *p *= self.base; + } + + pub fn reduce(&mut self, iter: impl DoubleEndedIterator>) -> F { + iter.rev() + .fold(F::ZERO, |acc, x| self.mul(acc) + *x.borrow()) + } + + pub fn reduce_polys( + &mut self, + polys: impl DoubleEndedIterator>>, + ) -> PolynomialCoeffs { + polys.rev().fold(PolynomialCoeffs::empty(), |mut acc, x| { + self.mul_poly(&mut acc); + acc += x.borrow(); + acc + }) + } + + pub fn shift(&mut self, x: F) -> F { + let tmp = self.base.exp(self.count) * x; + self.count = 0; + tmp + } + + pub fn shift_poly(&mut self, p: &mut PolynomialCoeffs) { + *p *= self.base.exp(self.count); + self.count = 0; + } + + pub fn reset(&mut self) { + self.count = 0; + } + + pub fn repeated_frobenius(&self, count: usize) -> Self + where + F: Frobenius, + { + Self { + base: self.base.repeated_frobenius(count), + count: self.count, + } + } +} diff --git a/src/vars.rs b/src/vars.rs index 74f15f23..7e9b372f 100644 --- a/src/vars.rs +++ b/src/vars.rs @@ -6,13 +6,13 @@ use crate::field::extension_field::target::{ExtensionAlgebraTarget, ExtensionTar use crate::field::extension_field::Extendable; use crate::field::field::Field; -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct EvaluationVars<'a, F: Extendable, const D: usize> { pub(crate) local_constants: &'a [F::Extension], pub(crate) local_wires: &'a [F::Extension], } -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct EvaluationVarsBase<'a, F: Field> { pub(crate) local_constants: &'a [F], pub(crate) local_wires: &'a [F], @@ -27,6 +27,16 @@ impl<'a, F: Extendable, const D: usize> EvaluationVars<'a, F, D> { let arr = self.local_wires[wire_range].try_into().unwrap(); ExtensionAlgebra::from_basefield_array(arr) } + + pub fn remove_prefix(&mut self, prefix: &[bool]) { + self.local_constants = &self.local_constants[prefix.len()..]; + } +} + +impl<'a, F: Field> EvaluationVarsBase<'a, F> { + pub fn remove_prefix(&mut self, prefix: &[bool]) { + self.local_constants = &self.local_constants[prefix.len()..]; + } } #[derive(Copy, Clone)] diff --git a/src/verifier.rs b/src/verifier.rs index 396f23c0..57bad7cf 100644 --- a/src/verifier.rs +++ b/src/verifier.rs @@ -55,25 +55,28 @@ pub(crate) fn verify, const D: usize>( ); // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. - let quotient_polys_zeta = proof.openings.quotient_polys; + let quotient_polys_zeta = &proof.openings.quotient_polys; let z_h_zeta = eval_zero_poly(common_data.degree(), zeta); for i in 0..num_challenges { ensure!(vanishing_polys_zeta[i] == z_h_zeta * quotient_polys_zeta[i]); } - let evaluations = todo!(); + let evaluations = proof.openings.clone(); let merkle_roots = &[ - verifier_data.constants_root, - verifier_data.sigmas_root, + verifier_data.constants_sigmas_root, proof.wires_root, proof.plonk_zs_root, proof.quotient_polys_root, ]; - proof - .opening_proof - .verify(zeta, evaluations, merkle_roots, &mut challenger, fri_config)?; + proof.opening_proof.verify( + zeta, + &evaluations, + merkle_roots, + &mut challenger, + fri_config, + )?; Ok(()) } diff --git a/src/wire.rs b/src/wire.rs index 61b7f5be..f63a19c1 100644 --- a/src/wire.rs +++ b/src/wire.rs @@ -1,3 +1,5 @@ +use std::ops::Range; + use crate::circuit_data::CircuitConfig; /// Represents a wire in the circuit. @@ -13,4 +15,8 @@ impl Wire { pub fn is_routable(&self, config: &CircuitConfig) -> bool { self.input < config.num_routed_wires } + + pub fn from_range(gate: usize, range: Range) -> Vec { + range.map(|i| Wire { gate, input: i }).collect() + } } diff --git a/src/witness.rs b/src/witness.rs index a0b4b2a4..7294f6e4 100644 --- a/src/witness.rs +++ b/src/witness.rs @@ -1,10 +1,61 @@ use std::collections::HashMap; +use std::convert::TryInto; +use anyhow::{ensure, Result}; + +use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::{Extendable, FieldExtension}; use crate::field::field::Field; +use crate::gates::gate::GateInstance; use crate::target::Target; use crate::wire::Wire; +#[derive(Clone, Debug)] +pub struct Witness { + pub(crate) wire_values: Vec>, +} + +impl Witness { + pub fn get_wire(&self, gate: usize, input: usize) -> F { + self.wire_values[input][gate] + } + + /// Checks that the copy constraints are satisfied in the witness. + pub fn check_copy_constraints( + &self, + copy_constraints: &[(Target, Target)], + gate_instances: &[GateInstance], + ) -> Result<()> + where + F: Extendable, + { + for &(a, b) in copy_constraints { + // TODO: Take care of public inputs once they land. + if let ( + Target::Wire(Wire { + gate: a_gate, + input: a_input, + }), + Target::Wire(Wire { + gate: b_gate, + input: b_input, + }), + ) = (a, b) + { + let va = self.get_wire(a_gate, a_input); + let vb = self.get_wire(b_gate, b_input); + ensure!( + va == vb, + "Copy constraint between wire {} of gate #{} (`{}`) and wire {} of gate #{} (`{}`) is not satisfied. \ + Got values of {} and {} respectively.", + a_input, a_gate, gate_instances[a_gate].gate_type.0.id(), b_input, b_gate, + gate_instances[b_gate].gate_type.0.id(), va, vb); + } + } + Ok(()) + } +} + #[derive(Clone, Debug)] pub struct PartialWitness { pub(crate) target_values: HashMap, @@ -39,6 +90,15 @@ impl PartialWitness { targets.iter().map(|&t| self.get_target(t)).collect() } + pub fn get_extension_target(&self, et: ExtensionTarget) -> F::Extension + where + F: Extendable, + { + F::Extension::from_basefield_array( + self.get_targets(&et.to_target_array()).try_into().unwrap(), + ) + } + pub fn try_get_target(&self, target: Target) -> Option { self.target_values.get(&target).cloned() } @@ -70,6 +130,19 @@ impl PartialWitness { } } + pub fn set_extension_target( + &mut self, + et: ExtensionTarget, + value: F::Extension, + ) where + F: Extendable, + { + let limbs = value.to_basefield_array(); + (0..D).for_each(|i| { + self.set_target(et.0[i], limbs[i]); + }); + } + pub fn set_wire(&mut self, wire: Wire, value: F) { self.set_target(Target::Wire(wire), value) } @@ -97,6 +170,16 @@ impl PartialWitness { self.set_target(target, value); } } + + pub fn full_witness(self, degree: usize, num_wires: usize) -> Witness { + let mut wire_values = vec![vec![F::ZERO; degree]; num_wires]; + self.target_values.into_iter().for_each(|(t, v)| { + if let Target::Wire(Wire { gate, input }) = t { + wire_values[input][gate] = v; + } + }); + Witness { wire_values } + } } impl Default for PartialWitness {