Merge branch 'main' into permutation_argument

# Conflicts:
#	src/circuit_builder.rs
#	src/circuit_data.rs
#	src/polynomial/commitment.rs
#	src/prover.rs
#	src/witness.rs
This commit is contained in:
wborgeaud 2021-06-24 10:51:18 +02:00
commit ef7561fc84
24 changed files with 448 additions and 185 deletions

View File

@ -8,7 +8,6 @@ use plonky2::fri::FriConfig;
use plonky2::gates::constant::ConstantGate;
use plonky2::gates::gmimc::GMiMCGate;
use plonky2::hash::GMIMC_ROUNDS;
use plonky2::prover::PLONK_BLINDING;
use plonky2::witness::PartialWitness;
fn main() {
@ -41,7 +40,6 @@ fn bench_prove<F: Field + Extendable<D>, const D: usize>() {
rate_bits: 3,
reduction_arity_bits: vec![1],
num_query_rounds: 1,
blinding: PLONK_BLINDING.to_vec(),
},
};

View File

@ -312,6 +312,8 @@ impl<F: Extendable<D>, const D: usize> CircuitBuilder<F, D> {
constants_commitment,
sigmas_commitment,
subgroup,
copy_constraints: self.copy_constraints,
gate_instances: self.gate_instances,
};
// The HashSet of gates will have a non-deterministic order. When converting to a Vec, we

View File

@ -3,11 +3,12 @@ use anyhow::Result;
use crate::field::extension_field::Extendable;
use crate::field::field::Field;
use crate::fri::FriConfig;
use crate::gates::gate::GateRef;
use crate::gates::gate::{GateInstance, GateRef};
use crate::generator::WitnessGenerator;
use crate::polynomial::commitment::ListPolynomialCommitment;
use crate::proof::{Hash, HashTarget, Proof};
use crate::prover::{prove, PLONK_BLINDING};
use crate::prover::prove;
use crate::target::Target;
use crate::verifier::verify;
use crate::witness::PartialWitness;
@ -38,7 +39,6 @@ impl Default for CircuitConfig {
rate_bits: 1,
reduction_arity_bits: vec![1],
num_query_rounds: 1,
blinding: vec![true],
},
}
}
@ -61,7 +61,6 @@ impl CircuitConfig {
rate_bits: 3,
reduction_arity_bits: vec![1],
num_query_rounds: 1,
blinding: PLONK_BLINDING.to_vec(),
},
}
}
@ -69,7 +68,7 @@ impl CircuitConfig {
/// Circuit data required by the prover or the verifier.
pub struct CircuitData<F: Extendable<D>, const D: usize> {
pub(crate) prover_only: ProverOnlyCircuitData<F>,
pub(crate) prover_only: ProverOnlyCircuitData<F, D>,
pub(crate) verifier_only: VerifierOnlyCircuitData<F>,
pub(crate) common: CommonCircuitData<F, D>,
}
@ -92,7 +91,7 @@ impl<F: Extendable<D>, const D: usize> CircuitData<F, D> {
/// required, like LDEs of preprocessed polynomials. If more succinctness was desired, we could
/// construct a more minimal prover structure and convert back and forth.
pub struct ProverCircuitData<F: Extendable<D>, const D: usize> {
pub(crate) prover_only: ProverOnlyCircuitData<F>,
pub(crate) prover_only: ProverOnlyCircuitData<F, D>,
pub(crate) common: CommonCircuitData<F, D>,
}
@ -115,7 +114,7 @@ impl<F: Extendable<D>, const D: usize> VerifierCircuitData<F, D> {
}
/// Circuit data required by the prover, but not the verifier.
pub(crate) struct ProverOnlyCircuitData<F: Field> {
pub(crate) struct ProverOnlyCircuitData<F: Extendable<D>, const D: usize> {
pub generators: Vec<Box<dyn WitnessGenerator<F>>>,
/// Commitments to the constants polynomial.
pub constants_commitment: ListPolynomialCommitment<F>,
@ -123,6 +122,10 @@ pub(crate) struct ProverOnlyCircuitData<F: Field> {
pub sigmas_commitment: ListPolynomialCommitment<F>,
/// Subgroup of order `degree`.
pub subgroup: Vec<F>,
/// The circuit's copy constraints.
pub copy_constraints: Vec<(Target, Target)>,
/// The concrete placement of each gate in the circuit.
pub gate_instances: Vec<GateInstance<F, D>>,
}
/// Circuit data required by the verifier, but not the prover.

View File

@ -128,32 +128,11 @@ pub(crate) fn fft_with_precomputation_power_of_2<F: Field>(
PolynomialValues { values }
}
pub(crate) fn coset_fft<F: Field>(poly: PolynomialCoeffs<F>, shift: F) -> PolynomialValues<F> {
let mut points = fft(poly);
let mut shift_exp_i = F::ONE;
for p in points.values.iter_mut() {
*p *= shift_exp_i;
shift_exp_i *= shift;
}
points
}
pub(crate) fn ifft<F: Field>(poly: PolynomialValues<F>) -> PolynomialCoeffs<F> {
let precomputation = fft_precompute(poly.len());
ifft_with_precomputation_power_of_2(poly, &precomputation)
}
pub(crate) fn coset_ifft<F: Field>(poly: PolynomialValues<F>, shift: F) -> PolynomialCoeffs<F> {
let shift_inv = shift.inverse();
let mut shift_inv_exp_i = F::ONE;
let mut coeffs = ifft(poly);
for c in coeffs.coeffs.iter_mut() {
*c *= shift_inv_exp_i;
shift_inv_exp_i *= shift_inv;
}
coeffs
}
#[cfg(test)]
mod tests {
use crate::field::crandall_field::CrandallField;

View File

@ -63,11 +63,23 @@ pub fn barycentric_weights<F: Field>(points: &[(F, F)]) -> Vec<F> {
)
}
/// Interpolate the linear polynomial passing through `points` on `x`.
pub fn interpolate2<F: Field>(points: [(F, F); 2], x: F) -> F {
// a0 -> a1
// b0 -> b1
// x -> a1 + (x-a0)*(b1-a1)/(b0-a0)
let (a0, a1) = points[0];
let (b0, b1) = points[1];
assert_ne!(a0, b0);
a1 + (x - a0) * (b1 - a1) / (b0 - a0)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::field::crandall_field::CrandallField;
use crate::field::extension_field::quartic::QuarticCrandallField;
use crate::field::field::Field;
use crate::field::lagrange::interpolant;
use crate::polynomial::polynomial::PolynomialCoeffs;
#[test]
@ -117,4 +129,18 @@ mod tests {
fn eval_naive<F: Field>(coeffs: &PolynomialCoeffs<F>, domain: &[F]) -> Vec<(F, F)> {
domain.iter().map(|&x| (x, coeffs.eval(x))).collect()
}
#[test]
fn test_interpolate2() {
type F = QuarticCrandallField;
let points = [(F::rand(), F::rand()), (F::rand(), F::rand())];
let x = F::rand();
let ev0 = interpolant(&points).eval(x);
let ev1 = interpolate(&points, x, &barycentric_weights(&points));
let ev2 = interpolate2(points, x);
assert_eq!(ev0, ev1);
assert_eq!(ev0, ev2);
}
}

View File

@ -3,7 +3,7 @@ pub mod crandall_field;
pub mod extension_field;
pub mod fft;
pub mod field;
pub(crate) mod lagrange;
pub(crate) mod interpolation;
#[cfg(test)]
mod field_testing;

View File

@ -1,5 +1,3 @@
use crate::polynomial::commitment::SALT_SIZE;
pub mod prover;
mod recursive_verifier;
pub mod verifier;
@ -22,20 +20,6 @@ pub struct FriConfig {
/// Number of query rounds to perform.
pub num_query_rounds: usize,
/// Vector of the same length as the number of initial Merkle trees.
/// `blinding[i]==true` iff the i-th tree is salted.
pub blinding: Vec<bool>,
}
impl FriConfig {
pub(crate) fn salt_size(&self, i: usize) -> usize {
if self.blinding[i] {
SALT_SIZE
} else {
0
}
}
}
fn fri_delta(rate_log: usize, conjecture: bool) -> f64 {

View File

@ -6,6 +6,7 @@ use crate::field::extension_field::Extendable;
use crate::field::field::Field;
use crate::fri::FriConfig;
use crate::plonk_challenger::RecursiveChallenger;
use crate::plonk_common::PlonkPolynomials;
use crate::proof::{
FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, HashTarget, OpeningSetTarget,
};
@ -157,11 +158,15 @@ impl<F: Extendable<D>, const D: usize> CircuitBuilder<F, D> {
// - one for polynomials opened at `x` and `x.frobenius()`
// Polynomials opened at `x`, i.e., the constants, sigmas and quotient polynomials.
let single_evals = [0, 1, 4]
.iter()
.flat_map(|&i| proof.unsalted_evals(i, config))
.map(|&e| self.convert_to_ext(e))
.collect::<Vec<_>>();
let single_evals = [
PlonkPolynomials::CONSTANTS,
PlonkPolynomials::SIGMAS,
PlonkPolynomials::QUOTIENT,
]
.iter()
.flat_map(|&p| proof.unsalted_evals(p))
.map(|&e| self.convert_to_ext(e))
.collect::<Vec<_>>();
let single_openings = os
.constants
.iter()
@ -179,7 +184,7 @@ impl<F: Extendable<D>, const D: usize> CircuitBuilder<F, D> {
// Polynomials opened at `x` and `g x`, i.e., the Zs polynomials.
let zs_evals = proof
.unsalted_evals(3, config)
.unsalted_evals(PlonkPolynomials::ZS)
.iter()
.map(|&e| self.convert_to_ext(e))
.collect::<Vec<_>>();
@ -217,7 +222,7 @@ impl<F: Extendable<D>, const D: usize> CircuitBuilder<F, D> {
// Polynomials opened at `x` and `x.frobenius()`, i.e., the wires polynomials.
let wire_evals = proof
.unsalted_evals(2, config)
.unsalted_evals(PlonkPolynomials::WIRES)
.iter()
.map(|&e| self.convert_to_ext(e))
.collect::<Vec<_>>();

View File

@ -2,13 +2,14 @@ use anyhow::{ensure, Result};
use crate::field::extension_field::{flatten, Extendable, FieldExtension, Frobenius};
use crate::field::field::Field;
use crate::field::lagrange::{barycentric_weights, interpolant, interpolate};
use crate::field::interpolation::{barycentric_weights, interpolate, interpolate2};
use crate::fri::FriConfig;
use crate::hash::hash_n_to_1;
use crate::merkle_proofs::verify_merkle_proof;
use crate::plonk_challenger::Challenger;
use crate::plonk_common::reduce_with_iter;
use crate::plonk_common::PlonkPolynomials;
use crate::proof::{FriInitialTreeProof, FriProof, FriQueryRound, Hash, OpeningSet};
use crate::util::scaling::ReducingFactor;
use crate::util::{log2_strict, reverse_bits, reverse_index_bits_in_place};
/// Computes P'(x^arity) from {P(x*g^i)}_(i=0..arity), where g is a `arity`-th root of unity
@ -151,65 +152,76 @@ fn fri_combine_initial<F: Field + Extendable<D>, const D: usize>(
assert!(D > 1, "Not implemented for D=1.");
let degree_log = proof.evals_proofs[0].1.siblings.len() - config.rate_bits;
let subgroup_x = F::Extension::from_basefield(subgroup_x);
let mut alpha_powers = alpha.powers();
let mut alpha = ReducingFactor::new(alpha);
let mut sum = F::Extension::ZERO;
// We will add three terms to `sum`:
// - one for various polynomials which are opened at a single point `x`
// - one for Zs, which are opened at `x` and `g x`
// - one for wire polynomials, which are opened at `x` and its conjugate
// - one for wire polynomials, which are opened at `x` and `x.frobenius()`
// Polynomials opened at `x`, i.e., the constants, sigmas and quotient polynomials.
let single_evals = [0, 1, 4]
.iter()
.flat_map(|&i| proof.unsalted_evals(i, config))
.map(|&e| F::Extension::from_basefield(e));
let single_evals = [
PlonkPolynomials::CONSTANTS,
PlonkPolynomials::SIGMAS,
PlonkPolynomials::QUOTIENT,
]
.iter()
.flat_map(|&p| proof.unsalted_evals(p))
.map(|&e| F::Extension::from_basefield(e));
let single_openings = os
.constants
.iter()
.chain(&os.plonk_s_sigmas)
.chain(&os.quotient_polys);
let single_diffs = single_evals.zip(single_openings).map(|(e, &o)| e - o);
let single_numerator = reduce_with_iter(single_diffs, &mut alpha_powers);
let single_diffs = single_evals
.into_iter()
.zip(single_openings)
.map(|(e, &o)| e - o)
.collect::<Vec<_>>();
let single_numerator = alpha.reduce(single_diffs.iter());
let single_denominator = subgroup_x - zeta;
sum += single_numerator / single_denominator;
alpha.reset();
// Polynomials opened at `x` and `g x`, i.e., the Zs polynomials.
let zs_evals = proof
.unsalted_evals(3, config)
.unsalted_evals(PlonkPolynomials::ZS)
.iter()
.map(|&e| F::Extension::from_basefield(e));
let zs_composition_eval = reduce_with_iter(zs_evals, alpha_powers.clone());
let zs_composition_eval = alpha.clone().reduce(zs_evals);
let zeta_right = F::Extension::primitive_root_of_unity(degree_log) * zeta;
let zs_interpol = interpolant(&[
(zeta, reduce_with_iter(&os.plonk_zs, alpha_powers.clone())),
(
zeta_right,
reduce_with_iter(&os.plonk_zs_right, &mut alpha_powers),
),
]);
let zs_numerator = zs_composition_eval - zs_interpol.eval(subgroup_x);
let zs_interpol = interpolate2(
[
(zeta, alpha.clone().reduce(os.plonk_zs.iter())),
(zeta_right, alpha.reduce(os.plonk_zs_right.iter())),
],
subgroup_x,
);
let zs_numerator = zs_composition_eval - zs_interpol;
let zs_denominator = (subgroup_x - zeta) * (subgroup_x - zeta_right);
sum = alpha.shift(sum);
sum += zs_numerator / zs_denominator;
// Polynomials opened at `x` and `x.frobenius()`, i.e., the wires polynomials.
let wire_evals = proof
.unsalted_evals(2, config)
.unsalted_evals(PlonkPolynomials::WIRES)
.iter()
.map(|&e| F::Extension::from_basefield(e));
let wire_composition_eval = reduce_with_iter(wire_evals, alpha_powers.clone());
let wire_composition_eval = alpha.clone().reduce(wire_evals);
let zeta_frob = zeta.frobenius();
let wire_eval = reduce_with_iter(&os.wires, alpha_powers.clone());
let mut alpha_frob = alpha.repeated_frobenius(D - 1);
let wire_eval = alpha.reduce(os.wires.iter());
// We want to compute `sum a^i*phi(w_i)`, where `phi` denotes the Frobenius automorphism.
// Since `phi^D=id` and `phi` is a field automorphism, we have the following equalities:
// `sum a^i*phi(w_i) = sum phi(phi^(D-1)(a^i)*w_i) = phi(sum phi^(D-1)(a)^i*w_i)`
// So we can compute the original sum using only one call to the `D-1`-repeated Frobenius of alpha,
// and one call at the end of the sum.
let alpha_powers_frob = alpha_powers.repeated_frobenius(D - 1);
let wire_eval_frob = reduce_with_iter(&os.wires, alpha_powers_frob).frobenius();
let wire_interpol = interpolant(&[(zeta, wire_eval), (zeta_frob, wire_eval_frob)]);
let wire_numerator = wire_composition_eval - wire_interpol.eval(subgroup_x);
let wire_eval_frob = alpha_frob.reduce(os.wires.iter()).frobenius();
let wire_interpol = interpolate2([(zeta, wire_eval), (zeta_frob, wire_eval_frob)], subgroup_x);
let wire_numerator = wire_composition_eval - wire_interpol;
let wire_denominator = (subgroup_x - zeta) * (subgroup_x - zeta_frob);
sum = alpha.shift(sum);
sum += wire_numerator / wire_denominator;
sum

View File

@ -371,7 +371,6 @@ mod tests {
use crate::field::extension_field::quartic::QuarticCrandallField;
use crate::field::field::Field;
use crate::fri::FriConfig;
use crate::prover::PLONK_BLINDING;
use crate::witness::PartialWitness;
#[test]
@ -386,8 +385,6 @@ mod tests {
let x = FF::rand();
let y = FF::rand();
let x = FF::TWO;
let y = FF::ONE;
let z = x / y;
let xt = builder.constant_extension(x);
let yt = builder.constant_extension(y);

View File

@ -1,9 +1,10 @@
use std::marker::PhantomData;
use crate::circuit_builder::CircuitBuilder;
use crate::field::extension_field::target::ExtensionTarget;
use crate::field::extension_field::Extendable;
use crate::gates::interpolation::InterpolationGate;
use crate::target::Target;
use std::marker::PhantomData;
impl<F: Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Interpolate two points. No need for an `InterpolationGate` since the coefficients
@ -56,15 +57,16 @@ impl<F: Extendable<D>, const D: usize> CircuitBuilder<F, D> {
#[cfg(test)]
mod tests {
use std::convert::TryInto;
use super::*;
use crate::circuit_data::CircuitConfig;
use crate::field::crandall_field::CrandallField;
use crate::field::extension_field::quartic::QuarticCrandallField;
use crate::field::extension_field::FieldExtension;
use crate::field::field::Field;
use crate::field::lagrange::{interpolant, interpolate};
use crate::field::interpolation::{interpolant, interpolate};
use crate::witness::PartialWitness;
use std::convert::TryInto;
#[test]
fn test_interpolate() {

View File

@ -323,6 +323,8 @@ mod tests {
use crate::gates::gmimc::{GMiMCGate, W};
use crate::generator::generate_partial_witness;
use crate::gmimc::gmimc_permute_naive;
use crate::permutation_argument::TargetPartitions;
use crate::target::Target;
use crate::wire::Wire;
use crate::witness::PartialWitness;

View File

@ -6,7 +6,7 @@ use crate::circuit_builder::CircuitBuilder;
use crate::field::extension_field::algebra::PolynomialCoeffsAlgebra;
use crate::field::extension_field::target::ExtensionTarget;
use crate::field::extension_field::{Extendable, FieldExtension};
use crate::field::lagrange::interpolant;
use crate::field::interpolation::interpolant;
use crate::gadgets::polynomial::PolynomialCoeffsExtAlgebraTarget;
use crate::gates::gate::{Gate, GateRef};
use crate::generator::{SimpleGenerator, WitnessGenerator};

View File

@ -2,6 +2,7 @@ use std::collections::{HashMap, HashSet};
use std::fmt::Debug;
use crate::field::field::Field;
use crate::permutation_argument::TargetPartitions;
use crate::target::Target;
use crate::witness::PartialWitness;
@ -24,10 +25,7 @@ pub(crate) fn generate_partial_witness<F: Field>(
// Build a list of "pending" generators which are queued to be run. Initially, all generators
// are queued.
let mut pending_generator_indices = HashSet::new();
for i in 0..generators.len() {
pending_generator_indices.insert(i);
}
let mut pending_generator_indices: HashSet<_> = (0..generators.len()).collect();
// We also track a list of "expired" generators which have already returned false.
let mut expired_generator_indices = HashSet::new();
@ -58,6 +56,11 @@ pub(crate) fn generate_partial_witness<F: Field>(
pending_generator_indices = next_pending_generator_indices;
}
assert_eq!(
expired_generator_indices.len(),
generators.len(),
"Some generators weren't run."
);
}
/// A generator participates in the generation of the witness.

View File

@ -1,3 +1,5 @@
use std::convert::TryInto;
use crate::circuit_builder::CircuitBuilder;
use crate::field::extension_field::target::ExtensionTarget;
use crate::field::extension_field::{Extendable, FieldExtension};
@ -5,7 +7,6 @@ use crate::field::field::Field;
use crate::hash::{permute, SPONGE_RATE, SPONGE_WIDTH};
use crate::proof::{Hash, HashTarget, OpeningSet};
use crate::target::Target;
use std::convert::TryInto;
/// Observes prover messages, and generates challenges by hashing the transcript.
#[derive(Clone)]
@ -320,6 +321,7 @@ mod tests {
use crate::field::crandall_field::CrandallField;
use crate::field::field::Field;
use crate::generator::generate_partial_witness;
use crate::permutation_argument::TargetPartitions;
use crate::plonk_challenger::{Challenger, RecursiveChallenger};
use crate::target::Target;
use crate::witness::PartialWitness;

View File

@ -6,10 +6,62 @@ use crate::field::extension_field::target::ExtensionTarget;
use crate::field::extension_field::Extendable;
use crate::field::field::Field;
use crate::gates::gate::GateRef;
use crate::polynomial::commitment::SALT_SIZE;
use crate::polynomial::polynomial::PolynomialCoeffs;
use crate::target::Target;
use crate::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
/// Holds the Merkle tree index and blinding flag of a set of polynomials used in FRI.
#[derive(Debug, Copy, Clone)]
pub struct PolynomialsIndexBlinding {
pub(crate) index: usize,
pub(crate) blinding: bool,
}
impl PolynomialsIndexBlinding {
pub fn salt_size(&self) -> usize {
if self.blinding {
SALT_SIZE
} else {
0
}
}
}
/// Holds the indices and blinding flags of the Plonk polynomials.
pub struct PlonkPolynomials;
impl PlonkPolynomials {
pub const CONSTANTS: PolynomialsIndexBlinding = PolynomialsIndexBlinding {
index: 0,
blinding: false,
};
pub const SIGMAS: PolynomialsIndexBlinding = PolynomialsIndexBlinding {
index: 1,
blinding: false,
};
pub const WIRES: PolynomialsIndexBlinding = PolynomialsIndexBlinding {
index: 2,
blinding: true,
};
pub const ZS: PolynomialsIndexBlinding = PolynomialsIndexBlinding {
index: 3,
blinding: true,
};
pub const QUOTIENT: PolynomialsIndexBlinding = PolynomialsIndexBlinding {
index: 4,
blinding: true,
};
pub fn polynomials(i: usize) -> PolynomialsIndexBlinding {
match i {
0 => Self::CONSTANTS,
1 => Self::SIGMAS,
2 => Self::WIRES,
3 => Self::ZS,
4 => Self::QUOTIENT,
_ => panic!("There are only 5 sets of polynomials in Plonk."),
}
}
}
/// Evaluate the vanishing polynomial at `x`. In this context, the vanishing polynomial is a random
/// linear combination of gate constraints, plus some other terms relating to the permutation
/// argument. All such terms should vanish on `H`.

View File

@ -4,14 +4,15 @@ use rayon::prelude::*;
use crate::field::extension_field::Extendable;
use crate::field::extension_field::{FieldExtension, Frobenius};
use crate::field::field::Field;
use crate::field::lagrange::interpolant;
use crate::fri::{prover::fri_proof, verifier::verify_fri_proof, FriConfig};
use crate::merkle_tree::MerkleTree;
use crate::plonk_challenger::Challenger;
use crate::plonk_common::PlonkPolynomials;
use crate::plonk_common::{reduce_polys_with_iter, reduce_with_iter};
use crate::polynomial::polynomial::{PolynomialCoeffs, PolynomialValues};
use crate::proof::{FriProof, FriProofTarget, Hash, OpeningSet};
use crate::timed;
use crate::util::scaling::ReducingFactor;
use crate::util::{log2_strict, reverse_index_bits_in_place, transpose};
pub const SALT_SIZE: usize = 2;
@ -145,60 +146,50 @@ impl<F: Field> ListPolynomialCommitment<F> {
challenger.observe_opening_set(&os);
let alpha = challenger.get_extension_challenge();
let mut alpha_powers = alpha.powers();
let mut alpha = ReducingFactor::new(alpha);
// Final low-degree polynomial that goes into FRI.
let mut final_poly = PolynomialCoeffs::empty();
// Polynomials opened at a single point.
let single_polys = [0, 1, 4]
.iter()
.flat_map(|&i| &commitments[i].polynomials)
.map(|p| p.to_extension());
let single_os = [&os.constants, &os.plonk_s_sigmas, &os.quotient_polys];
let single_evals = single_os.iter().flat_map(|v| v.iter());
let single_composition_poly = reduce_polys_with_iter(single_polys, alpha_powers.clone());
let single_composition_eval = reduce_with_iter(single_evals, &mut alpha_powers);
let single_polys = [
PlonkPolynomials::CONSTANTS,
PlonkPolynomials::SIGMAS,
PlonkPolynomials::QUOTIENT,
]
.iter()
.flat_map(|&p| &commitments[p.index].polynomials)
.map(|p| p.to_extension());
let single_composition_poly = alpha.reduce_polys(single_polys);
let single_quotient = Self::compute_quotient(
&[zeta],
&[single_composition_eval],
&single_composition_poly,
);
final_poly = &final_poly + &single_quotient;
let single_quotient = Self::compute_quotient([zeta], single_composition_poly);
final_poly += single_quotient;
alpha.reset();
// Zs polynomials are opened at `zeta` and `g*zeta`.
let zs_polys = commitments[3].polynomials.iter().map(|p| p.to_extension());
let zs_composition_poly = reduce_polys_with_iter(zs_polys, alpha_powers.clone());
let zs_composition_evals = [
reduce_with_iter(&os.plonk_zs, alpha_powers.clone()),
reduce_with_iter(&os.plonk_zs_right, &mut alpha_powers),
];
let zs_polys = commitments[PlonkPolynomials::ZS.index]
.polynomials
.iter()
.map(|p| p.to_extension());
let zs_composition_poly = alpha.reduce_polys(zs_polys);
let zs_quotient = Self::compute_quotient(
&[zeta, g * zeta],
&zs_composition_evals,
&zs_composition_poly,
);
final_poly = &final_poly + &zs_quotient;
let zs_quotient = Self::compute_quotient([zeta, g * zeta], zs_composition_poly);
alpha.shift_poly(&mut final_poly);
final_poly += zs_quotient;
// When working in an extension field, need to check that wires are in the base field.
// Check this by opening the wires polynomials at `zeta` and `zeta.frobenius()` and using the fact that
// a polynomial `f` is over the base field iff `f(z).frobenius()=f(z.frobenius())` with high probability.
let wire_polys = commitments[2].polynomials.iter().map(|p| p.to_extension());
let wire_composition_poly = reduce_polys_with_iter(wire_polys, alpha_powers.clone());
let wire_evals_frob = os.wires.iter().map(|e| e.frobenius()).collect::<Vec<_>>();
let wire_composition_evals = [
reduce_with_iter(&os.wires, alpha_powers.clone()),
reduce_with_iter(&wire_evals_frob, alpha_powers),
];
let wire_polys = commitments[PlonkPolynomials::WIRES.index]
.polynomials
.iter()
.map(|p| p.to_extension());
let wire_composition_poly = alpha.reduce_polys(wire_polys);
let wires_quotient = Self::compute_quotient(
&[zeta, zeta.frobenius()],
&wire_composition_evals,
&wire_composition_poly,
);
final_poly = &final_poly + &wires_quotient;
let wires_quotient =
Self::compute_quotient([zeta, zeta.frobenius()], wire_composition_poly);
alpha.shift_poly(&mut final_poly);
final_poly += wires_quotient;
let lde_final_poly = final_poly.lde(config.rate_bits);
let lde_final_values = lde_final_poly
@ -229,28 +220,27 @@ impl<F: Field> ListPolynomialCommitment<F> {
/// Given `points=(x_i)`, `evals=(y_i)` and `poly=P` with `P(x_i)=y_i`, computes the polynomial
/// `Q=(P-I)/Z` where `I` interpolates `(x_i, y_i)` and `Z` is the vanishing polynomial on `(x_i)`.
fn compute_quotient<const D: usize>(
points: &[F::Extension],
evals: &[F::Extension],
poly: &PolynomialCoeffs<F::Extension>,
fn compute_quotient<const D: usize, const N: usize>(
points: [F::Extension; N],
poly: PolynomialCoeffs<F::Extension>,
) -> PolynomialCoeffs<F::Extension>
where
F: Extendable<D>,
{
let pairs = points
.iter()
.zip(evals)
.map(|(&x, &e)| (x, e))
.collect::<Vec<_>>();
debug_assert!(pairs.iter().all(|&(x, e)| poly.eval(x) == e));
let interpolant = interpolant(&pairs);
let denominator = points.iter().fold(PolynomialCoeffs::one(), |acc, &x| {
&acc * &PolynomialCoeffs::new(vec![-x, F::Extension::ONE])
});
let numerator = poly - &interpolant;
let (quotient, rem) = numerator.div_rem(&denominator);
debug_assert!(rem.is_zero());
let quotient = if N == 1 {
poly.divide_by_linear(points[0]).0
} else if N == 2 {
// The denominator is `(X - p0)(X - p1) = p0 p1 - (p0 + p1) X + X^2`.
let denominator = vec![
points[0] * points[1],
-points[0] - points[1],
F::Extension::ONE,
]
.into();
poly.div_rem_long_division(&denominator).0 // Could also use `divide_by_linear` twice.
} else {
unreachable!("This shouldn't happen. Plonk should open polynomials at 1 or 2 points.")
};
quotient.padded(quotient.degree_plus_one().next_power_of_two())
}
@ -295,9 +285,9 @@ pub struct OpeningProofTarget<const D: usize> {
#[cfg(test)]
mod tests {
use anyhow::Result;
use rand::Rng;
use super::*;
use crate::plonk_common::PlonkPolynomials;
fn gen_random_test_case<F: Field + Extendable<D>, const D: usize>(
k: usize,
@ -323,17 +313,6 @@ mod tests {
point
}
fn gen_random_blindings() -> Vec<bool> {
let mut rng = rand::thread_rng();
vec![
rng.gen_bool(0.5),
rng.gen_bool(0.5),
rng.gen_bool(0.5),
rng.gen_bool(0.5),
rng.gen_bool(0.5),
]
}
fn check_batch_polynomial_commitment<F: Field + Extendable<D>, const D: usize>() -> Result<()> {
let ks = [1, 2, 3, 5, 8];
let degree_log = 11;
@ -342,7 +321,6 @@ mod tests {
rate_bits: 2,
reduction_arity_bits: vec![2, 3, 1, 2],
num_query_rounds: 3,
blinding: gen_random_blindings(),
};
let lpcs = (0..5)
@ -350,7 +328,7 @@ mod tests {
ListPolynomialCommitment::<F>::new(
gen_random_test_case(ks[i], degree_log),
fri_config.rate_bits,
fri_config.blinding[i],
PlonkPolynomials::polynomials(i).blinding,
)
})
.collect::<Vec<_>>();

View File

@ -26,7 +26,7 @@ impl<F: Field> PolynomialCoeffs<F> {
.to_vec()
.into();
let mut q = rev_q.rev();
let mut qb = &q * b;
let qb = &q * b;
let mut r = self - &qb;
q.trim();
r.trim();
@ -59,8 +59,7 @@ impl<F: Field> PolynomialCoeffs<F> {
quotient.coeffs[cur_q_degree] = cur_q_coeff;
for (i, &div_coeff) in b.coeffs.iter().enumerate() {
remainder.coeffs[cur_q_degree + i] =
remainder.coeffs[cur_q_degree + i] - (cur_q_coeff * div_coeff);
remainder.coeffs[cur_q_degree + i] -= cur_q_coeff * div_coeff;
}
remainder.trim();
}
@ -97,7 +96,7 @@ impl<F: Field> PolynomialCoeffs<F> {
let denominators = (0..a_eval.len())
.map(|i| {
if i != 0 {
root_pow = root_pow * root_n;
root_pow *= root_n;
}
denominator_g * root_pow - F::ONE
})
@ -125,8 +124,25 @@ impl<F: Field> PolynomialCoeffs<F> {
p
}
/// Let `self=p(X)`, this returns `(p(X)-p(z))/(X-z)` and `p(z)`.
/// See https://en.wikipedia.org/wiki/Horner%27s_method
pub(crate) fn divide_by_linear(&self, z: F) -> (PolynomialCoeffs<F>, F) {
let mut bs = self
.coeffs
.iter()
.rev()
.scan(F::ZERO, |acc, &c| {
*acc = *acc * z + c;
Some(*acc)
})
.collect::<Vec<_>>();
let ev = bs.pop().unwrap_or(F::ZERO);
bs.reverse();
(Self { coeffs: bs }, ev)
}
/// Computes the inverse of `self` modulo `x^n`.
pub(crate) fn inv_mod_xn(&self, n: usize) -> Self {
pub fn inv_mod_xn(&self, n: usize) -> Self {
assert!(self.coeffs[0].is_nonzero(), "Inverse doesn't exist.");
let h = if self.len() < n {
@ -166,7 +182,10 @@ impl<F: Field> PolynomialCoeffs<F> {
#[cfg(test)]
mod tests {
use std::time::Instant;
use crate::field::crandall_field::CrandallField;
use crate::field::extension_field::quartic::QuarticCrandallField;
use crate::field::field::Field;
use crate::polynomial::polynomial::PolynomialCoeffs;
@ -199,4 +218,49 @@ mod tests {
let computed_q = a.divide_by_z_h(4);
assert_eq!(computed_q, q);
}
#[test]
#[ignore]
fn test_division_by_linear() {
type F = QuarticCrandallField;
let n = 1_000_000;
let poly = PolynomialCoeffs::new(F::rand_vec(n));
let z = F::rand();
let ev = poly.eval(z);
let timer = Instant::now();
let (quotient, ev2) = poly.div_rem(&PolynomialCoeffs::new(vec![-z, F::ONE]));
println!("{:.3}s for usual", timer.elapsed().as_secs_f32());
assert_eq!(ev2.trimmed().coeffs, vec![ev]);
let timer = Instant::now();
let (quotient, ev3) = poly.div_rem_long_division(&PolynomialCoeffs::new(vec![-z, F::ONE]));
println!("{:.3}s for long division", timer.elapsed().as_secs_f32());
assert_eq!(ev3.trimmed().coeffs, vec![ev]);
let timer = Instant::now();
let horn = poly.divide_by_linear(z);
println!("{:.3}s for Horner", timer.elapsed().as_secs_f32());
assert_eq!((quotient, ev), horn);
}
#[test]
#[ignore]
fn test_division_by_quadratic() {
type F = QuarticCrandallField;
let n = 1_000_000;
let poly = PolynomialCoeffs::new(F::rand_vec(n));
let quad = PolynomialCoeffs::new(F::rand_vec(2));
let timer = Instant::now();
let (quotient0, rem0) = poly.div_rem(&quad);
println!("{:.3}s for usual", timer.elapsed().as_secs_f32());
let timer = Instant::now();
let (quotient1, rem1) = poly.div_rem_long_division(&quad);
println!("{:.3}s for long division", timer.elapsed().as_secs_f32());
assert_eq!(quotient0.trimmed(), quotient1.trimmed());
assert_eq!(rem0.trimmed(), rem1.trimmed());
}
}

View File

@ -1,6 +1,6 @@
use std::cmp::max;
use std::iter::Sum;
use std::ops::{Add, Mul, Sub};
use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign};
use crate::field::extension_field::Extendable;
use crate::field::fft::{fft, ifft};
@ -248,6 +248,46 @@ impl<F: Field> Sub for &PolynomialCoeffs<F> {
}
}
impl<F: Field> AddAssign for PolynomialCoeffs<F> {
fn add_assign(&mut self, rhs: Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, r) in self.coeffs.iter_mut().zip(rhs.coeffs) {
*l += r;
}
}
}
impl<F: Field> AddAssign<&Self> for PolynomialCoeffs<F> {
fn add_assign(&mut self, rhs: &Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, &r) in self.coeffs.iter_mut().zip(&rhs.coeffs) {
*l += r;
}
}
}
impl<F: Field> SubAssign for PolynomialCoeffs<F> {
fn sub_assign(&mut self, rhs: Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, r) in self.coeffs.iter_mut().zip(rhs.coeffs) {
*l -= r;
}
}
}
impl<F: Field> SubAssign<&Self> for PolynomialCoeffs<F> {
fn sub_assign(&mut self, rhs: &Self) {
let len = max(self.len(), rhs.len());
self.coeffs.resize(len, F::ZERO);
for (l, &r) in self.coeffs.iter_mut().zip(&rhs.coeffs) {
*l -= r;
}
}
}
impl<F: Field> Mul<F> for &PolynomialCoeffs<F> {
type Output = PolynomialCoeffs<F>;
@ -257,6 +297,12 @@ impl<F: Field> Mul<F> for &PolynomialCoeffs<F> {
}
}
impl<F: Field> MulAssign<F> for PolynomialCoeffs<F> {
fn mul_assign(&mut self, rhs: F) {
self.coeffs.iter_mut().for_each(|x| *x *= rhs);
}
}
impl<F: Field> Mul for &PolynomialCoeffs<F> {
type Output = PolynomialCoeffs<F>;

View File

@ -3,9 +3,9 @@ use std::convert::TryInto;
use crate::field::extension_field::target::ExtensionTarget;
use crate::field::extension_field::Extendable;
use crate::field::field::Field;
use crate::fri::FriConfig;
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
use crate::merkle_proofs::{MerkleProof, MerkleProofTarget};
use crate::plonk_common::PolynomialsIndexBlinding;
use crate::polynomial::commitment::{ListPolynomialCommitment, OpeningProof, OpeningProofTarget};
use crate::polynomial::polynomial::PolynomialCoeffs;
use crate::target::Target;
@ -99,9 +99,9 @@ pub struct FriInitialTreeProof<F: Field> {
}
impl<F: Field> FriInitialTreeProof<F> {
pub(crate) fn unsalted_evals(&self, i: usize, config: &FriConfig) -> &[F] {
let evals = &self.evals_proofs[i].0;
&evals[..evals.len() - config.salt_size(i)]
pub(crate) fn unsalted_evals(&self, polynomials: PolynomialsIndexBlinding) -> &[F] {
let evals = &self.evals_proofs[polynomials.index].0;
&evals[..evals.len() - polynomials.salt_size()]
}
}
@ -110,9 +110,9 @@ pub struct FriInitialTreeProofTarget {
}
impl FriInitialTreeProofTarget {
pub(crate) fn unsalted_evals(&self, i: usize, config: &FriConfig) -> &[Target] {
let evals = &self.evals_proofs[i].0;
&evals[..evals.len() - config.salt_size(i)]
pub(crate) fn unsalted_evals(&self, polynomials: PolynomialsIndexBlinding) -> &[Target] {
let evals = &self.evals_proofs[polynomials.index].0;
&evals[..evals.len() - polynomials.salt_size()]
}
}

View File

@ -17,11 +17,8 @@ use crate::util::transpose;
use crate::vars::EvaluationVarsBase;
use crate::witness::{PartialWitness, Witness};
/// Corresponds to constants - sigmas - wires - zs - quotient — polynomial commitments.
pub const PLONK_BLINDING: [bool; 5] = [false, false, true, true, true];
pub(crate) fn prove<F: Extendable<D>, const D: usize>(
prover_data: &ProverOnlyCircuitData<F>,
prover_data: &ProverOnlyCircuitData<F, D>,
common_data: &CommonCircuitData<F, D>,
inputs: PartialWitness<F>,
) -> Proof<F, D> {
@ -36,6 +33,13 @@ pub(crate) fn prove<F: Extendable<D>, const D: usize>(
"to generate witness"
);
timed!(
witness
.check_copy_constraints(&prover_data.copy_constraints, &prover_data.gate_instances)
.unwrap(), // TODO: Change return value to `Result` and use `?` here.
"to check copy constraints"
);
let config = &common_data.config;
let num_wires = config.num_wires;
let num_challenges = config.num_challenges;
@ -204,7 +208,7 @@ fn compute_z<F: Extendable<D>, const D: usize>(
fn compute_vanishing_polys<F: Extendable<D>, const D: usize>(
common_data: &CommonCircuitData<F, D>,
prover_data: &ProverOnlyCircuitData<F>,
prover_data: &ProverOnlyCircuitData<F, D>,
wires_commitment: &ListPolynomialCommitment<F>,
plonk_zs_commitment: &ListPolynomialCommitment<F>,
betas: &[F],

View File

@ -1,3 +1,4 @@
pub mod scaling;
pub(crate) mod timing;
use crate::field::field::Field;

75
src/util/scaling.rs Normal file
View File

@ -0,0 +1,75 @@
use std::borrow::Borrow;
use crate::field::extension_field::Frobenius;
use crate::field::field::Field;
use crate::polynomial::polynomial::PolynomialCoeffs;
/// When verifying the composition polynomial in FRI we have to compute sums of the form
/// `(sum_0^k a^i * x_i)/d_0 + (sum_k^r a^i * y_i)/d_1`
/// The most efficient way to do this is to compute both quotient separately using Horner's method,
/// scale the second one by `a^(r-1-k)`, and add them up.
/// This struct abstract away these operations by implementing Horner's method and keeping track
/// of the number of multiplications by `a` to compute the scaling factor.
/// See https://github.com/mir-protocol/plonky2/pull/69 for more details and discussions.
#[derive(Debug, Copy, Clone)]
pub struct ReducingFactor<F: Field> {
base: F,
count: u64,
}
impl<F: Field> ReducingFactor<F> {
pub fn new(base: F) -> Self {
Self { base, count: 0 }
}
fn mul(&mut self, x: F) -> F {
self.count += 1;
self.base * x
}
fn mul_poly(&mut self, p: &mut PolynomialCoeffs<F>) {
self.count += 1;
*p *= self.base;
}
pub fn reduce(&mut self, iter: impl DoubleEndedIterator<Item = impl Borrow<F>>) -> F {
iter.rev()
.fold(F::ZERO, |acc, x| self.mul(acc) + *x.borrow())
}
pub fn reduce_polys(
&mut self,
polys: impl DoubleEndedIterator<Item = impl Borrow<PolynomialCoeffs<F>>>,
) -> PolynomialCoeffs<F> {
polys.rev().fold(PolynomialCoeffs::empty(), |mut acc, x| {
self.mul_poly(&mut acc);
acc += x.borrow();
acc
})
}
pub fn shift(&mut self, x: F) -> F {
let tmp = self.base.exp(self.count) * x;
self.count = 0;
tmp
}
pub fn shift_poly(&mut self, p: &mut PolynomialCoeffs<F>) {
*p *= self.base.exp(self.count);
self.count = 0;
}
pub fn reset(&mut self) {
self.count = 0;
}
pub fn repeated_frobenius<const D: usize>(&self, count: usize) -> Self
where
F: Frobenius<D>,
{
Self {
base: self.base.repeated_frobenius(count),
count: self.count,
}
}
}

View File

@ -1,9 +1,12 @@
use std::collections::HashMap;
use std::convert::TryInto;
use anyhow::{ensure, Result};
use crate::field::extension_field::target::ExtensionTarget;
use crate::field::extension_field::{Extendable, FieldExtension};
use crate::field::field::Field;
use crate::gates::gate::GateInstance;
use crate::target::Target;
use crate::wire::Wire;
@ -142,6 +145,31 @@ impl<F: Field> PartialWitness<F> {
});
Witness { wire_values }
}
/// Checks that the copy constraints are satisfied in the witness.
pub fn check_copy_constraints<const D: usize>(
&self,
copy_constraints: &[(Target, Target)],
gate_instances: &[GateInstance<F, D>],
) -> Result<()>
where
F: Extendable<D>,
{
for &(a, b) in copy_constraints {
// TODO: Take care of public inputs once they land.
if let (Target::Wire(wa), Target::Wire(wb)) = (a, b) {
let va = self.target_values.get(&a).copied().unwrap_or(F::ZERO);
let vb = self.target_values.get(&b).copied().unwrap_or(F::ZERO);
ensure!(
va == vb,
"Copy constraint between wire {} of gate #{} (`{}`) and wire {} of gate #{} (`{}`) is not satisfied. \
Got values of {} and {} respectively.",
wa.input, wa.gate, gate_instances[wa.gate].gate_type.0.id(), wb.input, wb.gate,
gate_instances[wb.gate].gate_type.0.id(), va, vb);
}
}
Ok(())
}
}
impl<F: Field> Default for PartialWitness<F> {