Merge pull request #32 from mir-protocol/zero-knowledge

Zero knowledge + List polynomial commitments
This commit is contained in:
wborgeaud 2021-05-06 15:31:44 +02:00 committed by GitHub
commit 7617018d82
14 changed files with 1468 additions and 459 deletions

View File

@ -269,6 +269,10 @@ pub trait Field:
fn rand() -> Self {
Self::rand_from_rng(&mut OsRng)
}
fn rand_vec(n: usize) -> Vec<Self> {
(0..n).map(|_| Self::rand()).collect()
}
}
/// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`.

View File

@ -77,8 +77,8 @@ mod tests {
type F = CrandallField;
for deg in 0..10 {
let domain = (0..deg).map(|_| F::rand()).collect::<Vec<_>>();
let coeffs = (0..deg).map(|_| F::rand()).collect();
let domain = F::rand_vec(deg);
let coeffs = F::rand_vec(deg);
let coeffs = PolynomialCoeffs { coeffs };
let points = eval_naive(&coeffs, &domain);
@ -94,7 +94,7 @@ mod tests {
let deg = 1 << deg_log;
let g = F::primitive_root_of_unity(deg_log);
let domain = F::cyclic_subgroup_known_order(g, deg);
let coeffs = (0..deg).map(|_| F::rand()).collect();
let coeffs = F::rand_vec(deg);
let coeffs = PolynomialCoeffs { coeffs };
let points = eval_naive(&coeffs, &domain);
@ -108,8 +108,8 @@ mod tests {
for deg in 0..10 {
let points = deg + 5;
let domain = (0..points).map(|_| F::rand()).collect::<Vec<_>>();
let coeffs = (0..deg).map(|_| F::rand()).collect();
let domain = F::rand_vec(points);
let coeffs = F::rand_vec(deg);
let coeffs = PolynomialCoeffs { coeffs };
let points = eval_naive(&coeffs, &domain);

View File

@ -1,447 +0,0 @@
use crate::field::fft::fft;
use crate::field::field::Field;
use crate::field::lagrange::{barycentric_weights, interpolate};
use crate::hash::hash_n_to_1;
use crate::merkle_proofs::verify_merkle_proof;
use crate::merkle_tree::MerkleTree;
use crate::plonk_challenger::Challenger;
use crate::plonk_common::reduce_with_powers;
use crate::polynomial::polynomial::{PolynomialCoeffs, PolynomialValues};
use crate::proof::{FriProof, FriQueryRound, FriQueryStep, Hash};
use crate::util::{log2_strict, reverse_bits, reverse_index_bits_in_place};
use anyhow::{ensure, Result};
/// Somewhat arbitrary. Smaller values will increase delta, but with diminishing returns,
/// while increasing L, potentially requiring more challenge points.
const EPSILON: f64 = 0.01;
struct FriConfig {
proof_of_work_bits: u32,
rate_bits: usize,
/// The arity of each FRI reduction step, expressed (i.e. the log2 of the actual arity).
/// For example, `[3, 2, 1]` would describe a FRI reduction tree with 8-to-1 reduction, then
/// a 4-to-1 reduction, then a 2-to-1 reduction. After these reductions, the reduced polynomial
/// is sent directly.
reduction_arity_bits: Vec<usize>,
/// Number of query rounds to perform.
num_query_rounds: usize,
}
fn fri_delta(rate_log: usize, conjecture: bool) -> f64 {
let rate = (1 << rate_log) as f64;
if conjecture {
// See Conjecture 2.3 in DEEP-FRI.
1.0 - rate - EPSILON
} else {
// See the Johnson radius.
1.0 - rate.sqrt() - EPSILON
}
}
fn fri_l(codeword_len: usize, rate_log: usize, conjecture: bool) -> f64 {
let rate = (1 << rate_log) as f64;
if conjecture {
// See Conjecture 2.3 in DEEP-FRI.
// We assume the conjecture holds with a constant of 1 (as do other STARK implementations).
(codeword_len as f64) / EPSILON
} else {
// See the Johnson bound.
1.0 / (2.0 * EPSILON * rate.sqrt())
}
}
/// Builds a FRI proof.
fn fri_proof<F: Field>(
// Coefficients of the polynomial on which the LDT is performed.
// Only the first `1/rate` coefficients are non-zero.
polynomial_coeffs: &PolynomialCoeffs<F>,
// Evaluation of the polynomial on the large domain.
polynomial_values: &PolynomialValues<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> FriProof<F> {
let n = polynomial_values.values.len();
assert_eq!(polynomial_coeffs.coeffs.len(), n);
// Commit phase
let (trees, final_coeffs) =
fri_committed_trees(polynomial_coeffs, polynomial_values, challenger, config);
// PoW phase
let current_hash = challenger.get_hash();
let pow_witness = fri_proof_of_work(current_hash, config);
// Query phase
let query_round_proofs = fri_prover_query_rounds(&trees, challenger, n, config);
FriProof {
commit_phase_merkle_roots: trees.iter().map(|t| t.root).collect(),
// TODO: Fix this
initial_merkle_proofs: vec![],
query_round_proofs,
final_poly: final_coeffs,
pow_witness,
}
}
fn fri_committed_trees<F: Field>(
polynomial_coeffs: &PolynomialCoeffs<F>,
polynomial_values: &PolynomialValues<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> (Vec<MerkleTree<F>>, PolynomialCoeffs<F>) {
let mut values = polynomial_values.clone();
let mut coeffs = polynomial_coeffs.clone();
let mut trees = Vec::new();
let num_reductions = config.reduction_arity_bits.len();
for i in 0..num_reductions {
let arity = 1 << config.reduction_arity_bits[i];
reverse_index_bits_in_place(&mut values.values);
let tree = MerkleTree::new(
values
.values
.chunks(arity)
.map(|chunk| chunk.to_vec())
.collect(),
false,
);
challenger.observe_hash(&tree.root);
trees.push(tree);
let beta = challenger.get_challenge();
// P(x) = sum_{i<r} x^i * P_i(x^r) becomes sum_{i<r} beta^i * P_i(x).
coeffs = PolynomialCoeffs::new(
coeffs
.coeffs
.chunks_exact(arity)
.map(|chunk| reduce_with_powers(chunk, beta))
.collect::<Vec<_>>(),
);
// TODO: Is it faster to interpolate?
values = fft(coeffs.clone());
}
challenger.observe_elements(&coeffs.coeffs);
(trees, coeffs)
}
fn fri_proof_of_work<F: Field>(current_hash: Hash<F>, config: &FriConfig) -> F {
(0u64..)
.find(|&i| {
hash_n_to_1(
current_hash
.elements
.iter()
.copied()
.chain(Some(F::from_canonical_u64(i)))
.collect(),
false,
)
.to_canonical_u64()
.leading_zeros()
>= config.proof_of_work_bits
})
.map(F::from_canonical_u64)
.expect("Proof of work failed.")
}
fn fri_verify_proof_of_work<F: Field>(
proof: &FriProof<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> Result<()> {
let hash = hash_n_to_1(
challenger
.get_hash()
.elements
.iter()
.copied()
.chain(Some(proof.pow_witness))
.collect(),
false,
);
ensure!(
hash.to_canonical_u64().leading_zeros()
>= config.proof_of_work_bits + F::ORDER.leading_zeros(),
"Invalid proof of work witness."
);
Ok(())
}
fn fri_prover_query_rounds<F: Field>(
trees: &[MerkleTree<F>],
challenger: &mut Challenger<F>,
n: usize,
config: &FriConfig,
) -> Vec<FriQueryRound<F>> {
(0..config.num_query_rounds)
.map(|_| fri_prover_query_round(trees, challenger, n, config))
.collect()
}
fn fri_prover_query_round<F: Field>(
trees: &[MerkleTree<F>],
challenger: &mut Challenger<F>,
n: usize,
config: &FriConfig,
) -> FriQueryRound<F> {
let mut query_steps = Vec::new();
// TODO: Challenger doesn't change between query rounds, so x is always the same.
let x = challenger.get_challenge();
let mut domain_size = n;
let mut x_index = x.to_canonical_u64() as usize % n;
for (i, tree) in trees.iter().enumerate() {
let arity_bits = config.reduction_arity_bits[i];
let arity = 1 << arity_bits;
let next_domain_size = domain_size >> arity_bits;
let evals = if i == 0 {
// For the first layer, we need to send the evaluation at `x` too.
tree.get(x_index >> arity_bits).to_vec()
} else {
// For the other layers, we don't need to send the evaluation at `x`, since it can
// be inferred by the verifier. See the `compute_evaluation` function.
let mut evals = tree.get(x_index >> arity_bits).to_vec();
evals.remove(x_index & (arity - 1));
evals
};
let merkle_proof = tree.prove(x_index >> arity_bits);
query_steps.push(FriQueryStep {
evals,
merkle_proof,
});
domain_size = next_domain_size;
x_index >>= arity_bits;
}
FriQueryRound { steps: query_steps }
}
/// Computes P'(x^arity) from {P(x*g^i)}_(i=0..arity), where g is a `arity`-th root of unity
/// and P' is the FRI reduced polynomial.
fn compute_evaluation<F: Field>(
x: F,
old_x_index: usize,
arity_bits: usize,
last_evals: &[F],
beta: F,
) -> F {
debug_assert_eq!(last_evals.len(), 1 << arity_bits);
let g = F::primitive_root_of_unity(arity_bits);
// The evaluation vector needs to be reordered first.
let mut evals = last_evals.to_vec();
reverse_index_bits_in_place(&mut evals);
evals.rotate_left(reverse_bits(old_x_index, arity_bits));
// The answer is gotten by interpolating {(x*g^i, P(x*g^i))} and evaluating at beta.
let points = g
.powers()
.zip(evals)
.map(|(y, e)| (x * y, e))
.collect::<Vec<_>>();
let barycentric_weights = barycentric_weights(&points);
interpolate(&points, beta, &barycentric_weights)
}
fn verify_fri_proof<F: Field>(
purported_degree_log: usize,
proof: &FriProof<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> Result<()> {
let total_arities = config.reduction_arity_bits.iter().sum::<usize>();
ensure!(
purported_degree_log
== log2_strict(proof.final_poly.len()) + total_arities - config.rate_bits,
"Final polynomial has wrong degree."
);
// Size of the LDE domain.
let n = proof.final_poly.len() << total_arities;
// Recover the random betas used in the FRI reductions.
let betas = proof
.commit_phase_merkle_roots
.iter()
.map(|root| {
challenger.observe_hash(root);
challenger.get_challenge()
})
.collect::<Vec<_>>();
challenger.observe_elements(&proof.final_poly.coeffs);
// Check PoW.
fri_verify_proof_of_work(proof, challenger, config)?;
// Check that parameters are coherent.
ensure!(
config.num_query_rounds == proof.query_round_proofs.len(),
"Number of query rounds does not match config."
);
ensure!(
!config.reduction_arity_bits.is_empty(),
"Number of reductions should be non-zero."
);
for round_proof in &proof.query_round_proofs {
fri_verifier_query_round(&proof, challenger, n, &betas, round_proof, config)?;
}
Ok(())
}
fn fri_verifier_query_round<F: Field>(
proof: &FriProof<F>,
challenger: &mut Challenger<F>,
n: usize,
betas: &[F],
round_proof: &FriQueryRound<F>,
config: &FriConfig,
) -> Result<()> {
let mut evaluations = Vec::new();
let x = challenger.get_challenge();
let mut domain_size = n;
let mut x_index = x.to_canonical_u64() as usize % n;
let mut old_x_index = 0;
// `subgroup_x` is `subgroup[x_index]`, i.e., the actual field element in the domain.
let log_n = log2_strict(n);
let mut subgroup_x = F::primitive_root_of_unity(log_n).exp_usize(reverse_bits(x_index, log_n));
for (i, &arity_bits) in config.reduction_arity_bits.iter().enumerate() {
let arity = 1 << arity_bits;
let next_domain_size = domain_size >> arity_bits;
if i == 0 {
let evals = round_proof.steps[0].evals.clone();
evaluations.push(evals);
} else {
let last_evals = &evaluations[i - 1];
// Infer P(y) from {P(x)}_{x^arity=y}.
let e_x = compute_evaluation(
subgroup_x,
old_x_index,
config.reduction_arity_bits[i - 1],
last_evals,
betas[i - 1],
);
let mut evals = round_proof.steps[i].evals.clone();
// Insert P(y) into the evaluation vector, since it wasn't included by the prover.
evals.insert(x_index & (arity - 1), e_x);
evaluations.push(evals);
};
verify_merkle_proof(
evaluations[i].clone(),
x_index >> arity_bits,
proof.commit_phase_merkle_roots[i],
&round_proof.steps[i].merkle_proof,
false,
)?;
if i > 0 {
// Update the point x to x^arity.
for _ in 0..config.reduction_arity_bits[i - 1] {
subgroup_x = subgroup_x.square();
}
}
domain_size = next_domain_size;
old_x_index = x_index;
x_index >>= arity_bits;
}
let last_evals = evaluations.last().unwrap();
let final_arity_bits = *config.reduction_arity_bits.last().unwrap();
let purported_eval = compute_evaluation(
subgroup_x,
old_x_index,
final_arity_bits,
last_evals,
*betas.last().unwrap(),
);
for _ in 0..final_arity_bits {
subgroup_x = subgroup_x.square();
}
// Final check of FRI. After all the reductions, we check that the final polynomial is equal
// to the one sent by the prover.
ensure!(
proof.final_poly.eval(subgroup_x) == purported_eval,
"Final polynomial evaluation is invalid."
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::field::crandall_field::CrandallField;
use crate::field::fft::ifft;
use anyhow::Result;
use rand::rngs::ThreadRng;
use rand::Rng;
fn test_fri(
degree_log: usize,
rate_bits: usize,
reduction_arity_bits: Vec<usize>,
num_query_rounds: usize,
) -> Result<()> {
type F = CrandallField;
let n = 1 << degree_log;
let evals = PolynomialValues::new((0..n).map(|_| F::rand()).collect());
let lde = evals.clone().lde(rate_bits);
let config = FriConfig {
num_query_rounds,
rate_bits,
proof_of_work_bits: 2,
reduction_arity_bits,
};
let mut challenger = Challenger::new();
let proof = fri_proof(&ifft(lde.clone()), &lde, &mut challenger, &config);
let mut challenger = Challenger::new();
verify_fri_proof(degree_log, &proof, &mut challenger, &config)?;
Ok(())
}
fn gen_arities(degree_log: usize, rng: &mut ThreadRng) -> Vec<usize> {
let mut arities = Vec::new();
let mut remaining = degree_log;
while remaining > 0 {
let arity = rng.gen_range(0, remaining + 1);
arities.push(arity);
remaining -= arity;
}
arities
}
#[test]
fn test_fri_multi_params() -> Result<()> {
let mut rng = rand::thread_rng();
for degree_log in 1..6 {
for rate_bits in 0..3 {
for num_query_round in 0..4 {
for _ in 0..3 {
test_fri(
degree_log,
rate_bits,
gen_arities(degree_log, &mut rng),
num_query_round,
)?;
}
}
}
}
Ok(())
}
}

141
src/fri/mod.rs Normal file
View File

@ -0,0 +1,141 @@
pub mod prover;
pub mod verifier;
/// Somewhat arbitrary. Smaller values will increase delta, but with diminishing returns,
/// while increasing L, potentially requiring more challenge points.
const EPSILON: f64 = 0.01;
#[derive(Debug, Clone)]
pub struct FriConfig {
pub proof_of_work_bits: u32,
pub rate_bits: usize,
/// The arity of each FRI reduction step, expressed (i.e. the log2 of the actual arity).
/// For example, `[3, 2, 1]` would describe a FRI reduction tree with 8-to-1 reduction, then
/// a 4-to-1 reduction, then a 2-to-1 reduction. After these reductions, the reduced polynomial
/// is sent directly.
pub reduction_arity_bits: Vec<usize>,
/// Number of query rounds to perform.
pub num_query_rounds: usize,
/// True if the last element of the Merkle trees' leaf vectors is a blinding element.
pub blinding: bool,
}
fn fri_delta(rate_log: usize, conjecture: bool) -> f64 {
let rate = (1 << rate_log) as f64;
if conjecture {
// See Conjecture 2.3 in DEEP-FRI.
1.0 - rate - EPSILON
} else {
// See the Johnson radius.
1.0 - rate.sqrt() - EPSILON
}
}
fn fri_l(codeword_len: usize, rate_log: usize, conjecture: bool) -> f64 {
let rate = (1 << rate_log) as f64;
if conjecture {
// See Conjecture 2.3 in DEEP-FRI.
// We assume the conjecture holds with a constant of 1 (as do other STARK implementations).
(codeword_len as f64) / EPSILON
} else {
// See the Johnson bound.
1.0 / (2.0 * EPSILON * rate.sqrt())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::field::crandall_field::CrandallField;
use crate::field::fft::ifft;
use crate::field::field::Field;
use crate::fri::prover::fri_proof;
use crate::fri::verifier::verify_fri_proof;
use crate::merkle_tree::MerkleTree;
use crate::plonk_challenger::Challenger;
use crate::polynomial::polynomial::PolynomialCoeffs;
use crate::util::reverse_index_bits_in_place;
use anyhow::Result;
use rand::rngs::ThreadRng;
use rand::Rng;
fn test_fri(
degree_log: usize,
rate_bits: usize,
reduction_arity_bits: Vec<usize>,
num_query_rounds: usize,
) -> Result<()> {
type F = CrandallField;
let n = 1 << degree_log;
let coeffs = PolynomialCoeffs::new(F::rand_vec(n)).lde(rate_bits);
let coset_lde = coeffs.clone().coset_fft(F::MULTIPLICATIVE_GROUP_GENERATOR);
let config = FriConfig {
num_query_rounds,
rate_bits,
proof_of_work_bits: 2,
reduction_arity_bits,
blinding: false,
};
let tree = {
let mut leaves = coset_lde
.values
.iter()
.map(|&x| vec![x])
.collect::<Vec<_>>();
reverse_index_bits_in_place(&mut leaves);
MerkleTree::new(leaves, false)
};
let root = tree.root;
let mut challenger = Challenger::new();
let proof = fri_proof(&[tree], &coeffs, &coset_lde, &mut challenger, &config);
let mut challenger = Challenger::new();
verify_fri_proof(
degree_log,
&[],
F::ONE,
&[root],
&proof,
&mut challenger,
&config,
)?;
Ok(())
}
fn gen_arities(degree_log: usize, rng: &mut ThreadRng) -> Vec<usize> {
let mut arities = Vec::new();
let mut remaining = degree_log;
while remaining > 0 {
let arity = rng.gen_range(0, remaining + 1);
arities.push(arity);
remaining -= arity;
}
arities
}
#[test]
fn test_fri_multi_params() -> Result<()> {
let mut rng = rand::thread_rng();
for degree_log in 1..6 {
for rate_bits in 0..3 {
for num_query_round in 0..4 {
for _ in 0..3 {
test_fri(
degree_log,
rate_bits,
gen_arities(degree_log, &mut rng),
num_query_round,
)?;
}
}
}
}
Ok(())
}
}

161
src/fri/prover.rs Normal file
View File

@ -0,0 +1,161 @@
use crate::field::field::Field;
use crate::fri::FriConfig;
use crate::hash::hash_n_to_1;
use crate::merkle_tree::MerkleTree;
use crate::plonk_challenger::Challenger;
use crate::plonk_common::reduce_with_powers;
use crate::polynomial::polynomial::{PolynomialCoeffs, PolynomialValues};
use crate::proof::{FriInitialTreeProof, FriProof, FriQueryRound, FriQueryStep, Hash};
use crate::util::reverse_index_bits_in_place;
/// Builds a FRI proof.
pub fn fri_proof<F: Field>(
initial_merkle_trees: &[MerkleTree<F>],
// Coefficients of the polynomial on which the LDT is performed. Only the first `1/rate` coefficients are non-zero.
lde_polynomial_coeffs: &PolynomialCoeffs<F>,
// Evaluation of the polynomial on the large domain.
lde_polynomial_values: &PolynomialValues<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> FriProof<F> {
let n = lde_polynomial_values.values.len();
assert_eq!(lde_polynomial_coeffs.coeffs.len(), n);
// Commit phase
let (trees, final_coeffs) = fri_committed_trees(
lde_polynomial_coeffs,
lde_polynomial_values,
challenger,
config,
);
// PoW phase
let current_hash = challenger.get_hash();
let pow_witness = fri_proof_of_work(current_hash, config);
// Query phase
let query_round_proofs =
fri_prover_query_rounds(initial_merkle_trees, &trees, challenger, n, config);
FriProof {
commit_phase_merkle_roots: trees.iter().map(|t| t.root).collect(),
query_round_proofs,
final_poly: final_coeffs,
pow_witness,
}
}
fn fri_committed_trees<F: Field>(
polynomial_coeffs: &PolynomialCoeffs<F>,
polynomial_values: &PolynomialValues<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> (Vec<MerkleTree<F>>, PolynomialCoeffs<F>) {
let mut values = polynomial_values.clone();
let mut coeffs = polynomial_coeffs.clone();
let mut trees = Vec::new();
let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR;
let num_reductions = config.reduction_arity_bits.len();
for i in 0..num_reductions {
let arity = 1 << config.reduction_arity_bits[i];
reverse_index_bits_in_place(&mut values.values);
let tree = MerkleTree::new(
values
.values
.chunks(arity)
.map(|chunk| chunk.to_vec())
.collect(),
false,
);
challenger.observe_hash(&tree.root);
trees.push(tree);
let beta = challenger.get_challenge();
// P(x) = sum_{i<r} x^i * P_i(x^r) becomes sum_{i<r} beta^i * P_i(x).
coeffs = PolynomialCoeffs::new(
coeffs
.coeffs
.chunks_exact(arity)
.map(|chunk| reduce_with_powers(chunk, beta))
.collect::<Vec<_>>(),
);
shift = shift.exp_u32(arity as u32);
// TODO: Is it faster to interpolate?
values = coeffs.clone().coset_fft(shift);
}
challenger.observe_elements(&coeffs.coeffs);
(trees, coeffs)
}
fn fri_proof_of_work<F: Field>(current_hash: Hash<F>, config: &FriConfig) -> F {
(0u64..)
.find(|&i| {
hash_n_to_1(
current_hash
.elements
.iter()
.copied()
.chain(Some(F::from_canonical_u64(i)))
.collect(),
false,
)
.to_canonical_u64()
.leading_zeros()
>= config.proof_of_work_bits
})
.map(F::from_canonical_u64)
.expect("Proof of work failed.")
}
fn fri_prover_query_rounds<F: Field>(
initial_merkle_trees: &[MerkleTree<F>],
trees: &[MerkleTree<F>],
challenger: &mut Challenger<F>,
n: usize,
config: &FriConfig,
) -> Vec<FriQueryRound<F>> {
(0..config.num_query_rounds)
.map(|_| fri_prover_query_round(initial_merkle_trees, trees, challenger, n, config))
.collect()
}
fn fri_prover_query_round<F: Field>(
initial_merkle_trees: &[MerkleTree<F>],
trees: &[MerkleTree<F>],
challenger: &mut Challenger<F>,
n: usize,
config: &FriConfig,
) -> FriQueryRound<F> {
let mut query_steps = Vec::new();
let x = challenger.get_challenge();
let mut x_index = x.to_canonical_u64() as usize % n;
let initial_proof = initial_merkle_trees
.iter()
.map(|t| (t.get(x_index).to_vec(), t.prove(x_index)))
.collect::<Vec<_>>();
for (i, tree) in trees.iter().enumerate() {
let arity_bits = config.reduction_arity_bits[i];
let arity = 1 << arity_bits;
let mut evals = tree.get(x_index >> arity_bits).to_vec();
evals.remove(x_index & (arity - 1));
let merkle_proof = tree.prove(x_index >> arity_bits);
query_steps.push(FriQueryStep {
evals,
merkle_proof,
});
x_index >>= arity_bits;
}
FriQueryRound {
initial_trees_proof: FriInitialTreeProof {
evals_proofs: initial_proof,
},
steps: query_steps,
}
}

254
src/fri/verifier.rs Normal file
View File

@ -0,0 +1,254 @@
use crate::field::field::Field;
use crate::field::lagrange::{barycentric_weights, interpolant, interpolate};
use crate::fri::FriConfig;
use crate::hash::hash_n_to_1;
use crate::merkle_proofs::verify_merkle_proof;
use crate::plonk_challenger::Challenger;
use crate::polynomial::polynomial::PolynomialCoeffs;
use crate::proof::{FriInitialTreeProof, FriProof, FriQueryRound, Hash};
use crate::util::{log2_strict, reverse_bits, reverse_index_bits_in_place};
use anyhow::{ensure, Result};
/// Computes P'(x^arity) from {P(x*g^i)}_(i=0..arity), where g is a `arity`-th root of unity
/// and P' is the FRI reduced polynomial.
fn compute_evaluation<F: Field>(
x: F,
old_x_index: usize,
arity_bits: usize,
last_evals: &[F],
beta: F,
) -> F {
debug_assert_eq!(last_evals.len(), 1 << arity_bits);
let g = F::primitive_root_of_unity(arity_bits);
// The evaluation vector needs to be reordered first.
let mut evals = last_evals.to_vec();
reverse_index_bits_in_place(&mut evals);
evals.rotate_left(reverse_bits(old_x_index, arity_bits));
// The answer is gotten by interpolating {(x*g^i, P(x*g^i))} and evaluating at beta.
let points = g
.powers()
.zip(evals)
.map(|(y, e)| (x * y, e))
.collect::<Vec<_>>();
let barycentric_weights = barycentric_weights(&points);
interpolate(&points, beta, &barycentric_weights)
}
fn fri_verify_proof_of_work<F: Field>(
proof: &FriProof<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> Result<()> {
let hash = hash_n_to_1(
challenger
.get_hash()
.elements
.iter()
.copied()
.chain(Some(proof.pow_witness))
.collect(),
false,
);
ensure!(
hash.to_canonical_u64().leading_zeros()
>= config.proof_of_work_bits + F::ORDER.leading_zeros(),
"Invalid proof of work witness."
);
Ok(())
}
pub fn verify_fri_proof<F: Field>(
purported_degree_log: usize,
// Point-evaluation pairs for polynomial commitments.
points: &[(F, F)],
// Scaling factor to combine polynomials.
alpha: F,
initial_merkle_roots: &[Hash<F>],
proof: &FriProof<F>,
challenger: &mut Challenger<F>,
config: &FriConfig,
) -> Result<()> {
let total_arities = config.reduction_arity_bits.iter().sum::<usize>();
ensure!(
purported_degree_log
== log2_strict(proof.final_poly.len()) + total_arities - config.rate_bits,
"Final polynomial has wrong degree."
);
// Size of the LDE domain.
let n = proof.final_poly.len() << total_arities;
// Recover the random betas used in the FRI reductions.
let betas = proof
.commit_phase_merkle_roots
.iter()
.map(|root| {
challenger.observe_hash(root);
challenger.get_challenge()
})
.collect::<Vec<_>>();
challenger.observe_elements(&proof.final_poly.coeffs);
// Check PoW.
fri_verify_proof_of_work(proof, challenger, config)?;
// Check that parameters are coherent.
ensure!(
config.num_query_rounds == proof.query_round_proofs.len(),
"Number of query rounds does not match config."
);
ensure!(
!config.reduction_arity_bits.is_empty(),
"Number of reductions should be non-zero."
);
let interpolant = interpolant(points);
for round_proof in &proof.query_round_proofs {
fri_verifier_query_round(
&interpolant,
points,
alpha,
initial_merkle_roots,
&proof,
challenger,
n,
&betas,
round_proof,
config,
)?;
}
Ok(())
}
fn fri_verify_initial_proof<F: Field>(
x_index: usize,
proof: &FriInitialTreeProof<F>,
initial_merkle_roots: &[Hash<F>],
) -> Result<()> {
for ((evals, merkle_proof), &root) in proof.evals_proofs.iter().zip(initial_merkle_roots) {
verify_merkle_proof(evals.clone(), x_index, root, merkle_proof, false)?;
}
Ok(())
}
fn fri_combine_initial<F: Field>(
proof: &FriInitialTreeProof<F>,
alpha: F,
interpolant: &PolynomialCoeffs<F>,
points: &[(F, F)],
subgroup_x: F,
config: &FriConfig,
) -> F {
let e = proof
.evals_proofs
.iter()
.map(|(v, _)| v)
.flatten()
.rev()
.skip(if config.blinding { 2 } else { 0 }) // If blinding, the last two element are salt.
.fold(F::ZERO, |acc, &e| alpha * acc + e);
let numerator = e - interpolant.eval(subgroup_x);
let denominator = points.iter().map(|&(x, _)| subgroup_x - x).product();
numerator / denominator
}
fn fri_verifier_query_round<F: Field>(
interpolant: &PolynomialCoeffs<F>,
points: &[(F, F)],
alpha: F,
initial_merkle_roots: &[Hash<F>],
proof: &FriProof<F>,
challenger: &mut Challenger<F>,
n: usize,
betas: &[F],
round_proof: &FriQueryRound<F>,
config: &FriConfig,
) -> Result<()> {
let mut evaluations: Vec<Vec<F>> = Vec::new();
let x = challenger.get_challenge();
let mut domain_size = n;
let mut x_index = x.to_canonical_u64() as usize % n;
fri_verify_initial_proof(
x_index,
&round_proof.initial_trees_proof,
initial_merkle_roots,
)?;
let mut old_x_index = 0;
// `subgroup_x` is `subgroup[x_index]`, i.e., the actual field element in the domain.
let log_n = log2_strict(n);
let mut subgroup_x = F::MULTIPLICATIVE_GROUP_GENERATOR
* F::primitive_root_of_unity(log_n).exp_usize(reverse_bits(x_index, log_n));
for (i, &arity_bits) in config.reduction_arity_bits.iter().enumerate() {
let arity = 1 << arity_bits;
let next_domain_size = domain_size >> arity_bits;
let e_x = if i == 0 {
fri_combine_initial(
&round_proof.initial_trees_proof,
alpha,
interpolant,
points,
subgroup_x,
config,
)
} else {
let last_evals = &evaluations[i - 1];
// Infer P(y) from {P(x)}_{x^arity=y}.
compute_evaluation(
subgroup_x,
old_x_index,
config.reduction_arity_bits[i - 1],
last_evals,
betas[i - 1],
)
};
let mut evals = round_proof.steps[i].evals.clone();
// Insert P(y) into the evaluation vector, since it wasn't included by the prover.
evals.insert(x_index & (arity - 1), e_x);
evaluations.push(evals);
verify_merkle_proof(
evaluations[i].clone(),
x_index >> arity_bits,
proof.commit_phase_merkle_roots[i],
&round_proof.steps[i].merkle_proof,
false,
)?;
if i > 0 {
// Update the point x to x^arity.
for _ in 0..config.reduction_arity_bits[i - 1] {
subgroup_x = subgroup_x.square();
}
}
domain_size = next_domain_size;
old_x_index = x_index;
x_index >>= arity_bits;
}
let last_evals = evaluations.last().unwrap();
let final_arity_bits = *config.reduction_arity_bits.last().unwrap();
let purported_eval = compute_evaluation(
subgroup_x,
old_x_index,
final_arity_bits,
last_evals,
*betas.last().unwrap(),
);
for _ in 0..final_arity_bits {
subgroup_x = subgroup_x.square();
}
// Final check of FRI. After all the reductions, we check that the final polynomial is equal
// to the one sent by the prover.
ensure!(
proof.final_poly.eval(subgroup_x) == purported_eval,
"Final polynomial evaluation is invalid."
);
Ok(())
}

View File

@ -3,7 +3,6 @@ use crate::field::field::Field;
use crate::gates::gmimc::GMiMCGate;
use crate::hash::GMIMC_ROUNDS;
use crate::hash::{compress, hash_or_noop};
use crate::merkle_tree::MerkleTree;
use crate::proof::{Hash, HashTarget};
use crate::target::Target;
use crate::wire::Wire;

View File

@ -90,9 +90,7 @@ mod tests {
use super::*;
fn random_data<F: Field>(n: usize, k: usize) -> Vec<Vec<F>> {
(0..n)
.map(|_| (0..k).map(|_| F::rand()).collect())
.collect()
(0..n).map(|_| F::rand_vec(k)).collect()
}
fn verify_all_leaves<F: Field>(

View File

@ -258,7 +258,7 @@ mod tests {
// Generate random input messages.
let inputs_per_round: Vec<Vec<F>> = num_inputs_per_round
.iter()
.map(|&n| (0..n).map(|_| F::rand()).collect::<Vec<_>>())
.map(|&n| F::rand_vec(n))
.collect();
let mut challenger = Challenger::new();

View File

@ -0,0 +1,265 @@
use crate::field::field::Field;
use crate::field::lagrange::interpolant;
use crate::fri::{prover::fri_proof, verifier::verify_fri_proof, FriConfig};
use crate::merkle_tree::MerkleTree;
use crate::plonk_challenger::Challenger;
use crate::plonk_common::reduce_with_powers;
use crate::polynomial::old_polynomial::Polynomial;
use crate::polynomial::polynomial::PolynomialCoeffs;
use crate::proof::{FriProof, Hash};
use crate::util::{log2_strict, reverse_index_bits_in_place, transpose};
use anyhow::Result;
struct ListPolynomialCommitment<F: Field> {
pub polynomials: Vec<PolynomialCoeffs<F>>,
pub fri_config: FriConfig,
pub merkle_tree: MerkleTree<F>,
pub degree: usize,
}
impl<F: Field> ListPolynomialCommitment<F> {
pub fn new(polynomials: Vec<PolynomialCoeffs<F>>, fri_config: &FriConfig) -> Self {
let degree = polynomials[0].len();
let lde_values = polynomials
.iter()
.map(|p| {
assert_eq!(p.len(), degree, "Polynomial degree invalid.");
p.clone()
.lde(fri_config.rate_bits)
.coset_fft(F::MULTIPLICATIVE_GROUP_GENERATOR)
.values
})
.chain(if fri_config.blinding {
// If blinding, salt with two random elements to each leaf vector.
(0..2)
.map(|_| F::rand_vec(degree << fri_config.rate_bits))
.collect()
} else {
Vec::new()
})
.collect::<Vec<_>>();
let mut leaves = transpose(&lde_values);
reverse_index_bits_in_place(&mut leaves);
let merkle_tree = MerkleTree::new(leaves, false);
Self {
polynomials,
fri_config: fri_config.clone(),
merkle_tree,
degree,
}
}
pub fn open(
&self,
points: &[F],
challenger: &mut Challenger<F>,
) -> (OpeningProof<F>, Vec<Vec<F>>) {
for p in points {
assert_ne!(
p.exp_usize(self.degree),
F::ONE,
"Opening point is in the subgroup."
);
}
let evaluations = points
.iter()
.map(|&x| {
self.polynomials
.iter()
.map(|p| p.eval(x))
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
for evals in &evaluations {
challenger.observe_elements(evals);
}
let alpha = challenger.get_challenge();
// Scale polynomials by `alpha`.
let composition_poly = self
.polynomials
.iter()
.rev()
.map(|p| p.clone().into())
.fold(Polynomial::empty(), |acc, p| acc.scalar_mul(alpha).add(&p));
// Scale evaluations by `alpha`.
let composition_evals = evaluations
.iter()
.map(|e| reduce_with_powers(e, alpha))
.collect::<Vec<_>>();
let quotient = Self::compute_quotient(points, &composition_evals, &composition_poly);
let lde_quotient = PolynomialCoeffs::from(quotient.clone()).lde(self.fri_config.rate_bits);
let lde_quotient_values = lde_quotient
.clone()
.coset_fft(F::MULTIPLICATIVE_GROUP_GENERATOR);
let fri_proof = fri_proof(
&[self.merkle_tree.clone()],
&lde_quotient,
&lde_quotient_values,
challenger,
&self.fri_config,
);
(
OpeningProof {
fri_proof,
quotient_degree: quotient.len(),
},
evaluations,
)
}
/// Given `points=(x_i)`, `evals=(y_i)` and `poly=P` with `P(x_i)=y_i`, computes the polynomial
/// `Q=(P-I)/Z` where `I` interpolates `(x_i, y_i)` and `Z` is the vanishing polynomial on `(x_i)`.
fn compute_quotient(points: &[F], evals: &[F], poly: &Polynomial<F>) -> Polynomial<F> {
let pairs = points
.iter()
.zip(evals)
.map(|(&x, &e)| (x, e))
.collect::<Vec<_>>();
debug_assert!(pairs.iter().all(|&(x, e)| poly.eval(x) == e));
let interpolant: Polynomial<F> = interpolant(&pairs).into();
let denominator = points
.iter()
.fold(Polynomial::from(vec![F::ONE]), |acc, &x| {
acc.mul(&vec![-x, F::ONE].into())
});
let numerator = poly.add(&interpolant.neg());
let (mut quotient, rem) = numerator.polynomial_division(&denominator);
debug_assert!(rem.is_zero());
quotient.pad((quotient.degree() + 1).next_power_of_two());
quotient
}
}
pub struct OpeningProof<F: Field> {
fri_proof: FriProof<F>,
// TODO: Get the degree from `CommonCircuitData` instead.
quotient_degree: usize,
}
impl<F: Field> OpeningProof<F> {
pub fn verify(
&self,
points: &[F],
evaluations: &[Vec<F>],
merkle_root: Hash<F>,
challenger: &mut Challenger<F>,
fri_config: &FriConfig,
) -> Result<()> {
for evals in evaluations {
challenger.observe_elements(evals);
}
let alpha = challenger.get_challenge();
let scaled_evals = evaluations
.iter()
.map(|e| reduce_with_powers(e, alpha))
.collect::<Vec<_>>();
let pairs = points
.iter()
.zip(&scaled_evals)
.map(|(&x, &e)| (x, e))
.collect::<Vec<_>>();
verify_fri_proof(
log2_strict(self.quotient_degree),
&pairs,
alpha,
&[merkle_root],
&self.fri_proof,
challenger,
fri_config,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::field::crandall_field::CrandallField;
use anyhow::Result;
fn gen_random_test_case<F: Field>(
k: usize,
degree_log: usize,
num_points: usize,
) -> (Vec<PolynomialCoeffs<F>>, Vec<F>) {
let degree = 1 << degree_log;
let polys = (0..k)
.map(|_| PolynomialCoeffs::new(F::rand_vec(degree)))
.collect();
let mut points = F::rand_vec(num_points);
while points.iter().any(|&x| x.exp_usize(degree).is_one()) {
points = F::rand_vec(num_points);
}
(polys, points)
}
#[test]
fn test_polynomial_commitment() -> Result<()> {
type F = CrandallField;
let k = 10;
let degree_log = 11;
let num_points = 3;
let fri_config = FriConfig {
proof_of_work_bits: 2,
rate_bits: 2,
reduction_arity_bits: vec![3, 2, 1, 2],
num_query_rounds: 3,
blinding: false,
};
let (polys, points) = gen_random_test_case::<F>(k, degree_log, num_points);
let lpc = ListPolynomialCommitment::new(polys, &fri_config);
let (proof, evaluations) = lpc.open(&points, &mut Challenger::new());
proof.verify(
&points,
&evaluations,
lpc.merkle_tree.root,
&mut Challenger::new(),
&fri_config,
)
}
#[test]
fn test_polynomial_commitment_blinding() -> Result<()> {
type F = CrandallField;
let k = 10;
let degree_log = 11;
let num_points = 3;
let fri_config = FriConfig {
proof_of_work_bits: 2,
rate_bits: 2,
reduction_arity_bits: vec![3, 2, 1, 2],
num_query_rounds: 3,
blinding: true,
};
let (polys, points) = gen_random_test_case::<F>(k, degree_log, num_points);
let lpc = ListPolynomialCommitment::new(polys, &fri_config);
let (proof, evaluations) = lpc.open(&points, &mut Challenger::new());
proof.verify(
&points,
&evaluations,
lpc.merkle_tree.root,
&mut Challenger::new(),
&fri_config,
)
}
}

View File

@ -1,2 +1,4 @@
pub mod commitment;
pub(crate) mod division;
mod old_polynomial;
pub mod polynomial;

View File

@ -0,0 +1,574 @@
#![allow(clippy::many_single_char_names)]
use crate::field::fft::{
fft_precompute, fft_with_precomputation_power_of_2, ifft_with_precomputation_power_of_2,
FftPrecomputation,
};
use crate::field::field::Field;
use crate::polynomial::polynomial::PolynomialCoeffs;
use crate::util::log2_ceil;
use std::cmp::Ordering;
use std::ops::{Index, IndexMut, RangeBounds};
use std::slice::{Iter, IterMut, SliceIndex};
/// Polynomial struct holding a polynomial in coefficient form.
#[derive(Debug, Clone)]
pub struct Polynomial<F: Field>(Vec<F>);
impl<F: Field> PartialEq for Polynomial<F> {
fn eq(&self, other: &Self) -> bool {
let max_terms = self.0.len().max(other.0.len());
for i in 0..max_terms {
let self_i = self.0.get(i).cloned().unwrap_or(F::ZERO);
let other_i = other.0.get(i).cloned().unwrap_or(F::ZERO);
if self_i != other_i {
return false;
}
}
true
}
}
impl<F: Field> Eq for Polynomial<F> {}
impl<F: Field> From<Vec<F>> for Polynomial<F> {
/// Takes a vector of coefficients and returns the corresponding polynomial.
fn from(coeffs: Vec<F>) -> Self {
Self(coeffs)
}
}
impl<F: Field> From<PolynomialCoeffs<F>> for Polynomial<F> {
fn from(coeffs: PolynomialCoeffs<F>) -> Self {
Self(coeffs.coeffs)
}
}
impl<F: Field> From<Polynomial<F>> for PolynomialCoeffs<F> {
fn from(poly: Polynomial<F>) -> Self {
Self::new(poly.0)
}
}
impl<F, I> Index<I> for Polynomial<F>
where
F: Field,
I: SliceIndex<[F]>,
{
type Output = I::Output;
/// Indexing on the coefficients.
fn index(&self, index: I) -> &Self::Output {
&self.0[index]
}
}
impl<F, I> IndexMut<I> for Polynomial<F>
where
F: Field,
I: SliceIndex<[F]>,
{
fn index_mut(&mut self, index: I) -> &mut <Self as Index<I>>::Output {
&mut self.0[index]
}
}
impl<F: Field> Polynomial<F> {
/// Takes a slice of coefficients and returns the corresponding polynomial.
pub fn from_coeffs(coeffs: &[F]) -> Self {
Self(coeffs.to_vec())
}
/// Returns the coefficient vector.
pub fn coeffs(&self) -> &[F] {
&self.0
}
/// Empty polynomial;
pub fn empty() -> Self {
Self(Vec::new())
}
/// Zero polynomial with length `len`.
/// `len = 1` is the standard representation, but sometimes it's useful to set `len > 1`
/// to have polynomials with uniform length.
pub fn zero(len: usize) -> Self {
Self(vec![F::ZERO; len])
}
pub fn iter(&self) -> Iter<F> {
self.0.iter()
}
pub fn iter_mut(&mut self) -> IterMut<F> {
self.0.iter_mut()
}
pub fn is_zero(&self) -> bool {
self.0.iter().all(|x| x.is_zero())
}
/// Number of coefficients held by the polynomial. Is NOT equal to the degree in general.
pub fn len(&self) -> usize {
self.0.len()
}
/// Degree of the polynomial.
/// Panics on zero polynomial.
pub fn degree(&self) -> usize {
(0usize..self.len())
.rev()
.find(|&i| self[i].is_nonzero())
.expect("Zero polynomial")
}
/// Degree of the polynomial + 1.
fn degree_plus_one(&self) -> usize {
(0usize..self.len())
.rev()
.find(|&i| self[i].is_nonzero())
.map_or(0, |i| i + 1)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Removes the coefficients in the range `range`.
fn drain<R: RangeBounds<usize>>(&mut self, range: R) {
self.0.drain(range);
}
/// Evaluates the polynomial at a point `x`.
pub fn eval(&self, x: F) -> F {
self.iter().rev().fold(F::ZERO, |acc, &c| acc * x + c)
}
/// Evaluates the polynomial at a point `x`, given the list of powers of `x`.
/// Assumes that `self.len() == x_pow.len()`.
pub fn eval_from_power(&self, x_pow: &[F]) -> F {
self.iter()
.zip(x_pow)
.fold(F::ZERO, |acc, (&c, &p)| acc + c * p)
}
/// Evaluates the polynomial on subgroup of `F^*` with a given FFT precomputation.
pub(crate) fn eval_domain(&self, fft_precomputation: &FftPrecomputation<F>) -> Vec<F> {
let domain_size = fft_precomputation.size();
if self.len() < domain_size {
// Need to pad the polynomial to have the same length as the domain.
fft_with_precomputation_power_of_2(
self.padded(domain_size).coeffs().to_vec().into(),
fft_precomputation,
)
.values
} else {
fft_with_precomputation_power_of_2(self.coeffs().to_vec().into(), fft_precomputation)
.values
}
}
/// Computes the interpolating polynomial of a list of `values` on a subgroup of `F^*`.
pub(crate) fn from_evaluations(
values: &[F],
fft_precomputation: &FftPrecomputation<F>,
) -> Self {
Self(ifft_with_precomputation_power_of_2(values.to_vec().into(), fft_precomputation).coeffs)
}
/// Leading coefficient.
pub fn lead(&self) -> F {
self.iter()
.rev()
.find(|x| x.is_nonzero())
.map_or(F::ZERO, |x| *x)
}
/// Reverse the order of the coefficients, not taking into account the leading zero coefficients.
fn rev(&self) -> Self {
let d = self.degree();
Self(self.0[..=d].iter().rev().copied().collect())
}
/// Negates the polynomial's coefficients.
pub fn neg(&self) -> Self {
Self(self.iter().map(|&x| -x).collect())
}
/// Multiply the polynomial's coefficients by a scalar.
pub(crate) fn scalar_mul(&self, c: F) -> Self {
Self(self.iter().map(|&x| c * x).collect())
}
/// Removes leading zero coefficients.
pub fn trim(&mut self) {
self.0.drain(self.degree_plus_one()..);
}
/// Polynomial addition.
pub fn add(&self, other: &Self) -> Self {
let (mut a, mut b) = (self.clone(), other.clone());
match a.len().cmp(&b.len()) {
Ordering::Less => a.pad(b.len()),
Ordering::Greater => b.pad(a.len()),
_ => (),
}
Self(a.iter().zip(b.iter()).map(|(&x, &y)| x + y).collect())
}
/// Zero-pad the coefficients to have a given length.
pub fn pad(&mut self, len: usize) {
self.trim();
assert!(self.len() <= len);
self.0.extend((self.len()..len).map(|_| F::ZERO));
}
/// Returns the zero-padded polynomial.
pub fn padded(&self, len: usize) -> Self {
let mut a = self.clone();
a.pad(len);
a
}
/// Polynomial multiplication.
pub fn mul(&self, b: &Self) -> Self {
if self.is_zero() || b.is_zero() {
return Self::zero(1);
}
let a_deg = self.degree();
let b_deg = b.degree();
let new_deg = (a_deg + b_deg + 1).next_power_of_two();
let a_pad = self.padded(new_deg);
let b_pad = b.padded(new_deg);
let precomputation = fft_precompute(new_deg);
let a_evals = fft_with_precomputation_power_of_2(a_pad.0.to_vec().into(), &precomputation);
let b_evals = fft_with_precomputation_power_of_2(b_pad.0.to_vec().into(), &precomputation);
let mul_evals: Vec<F> = a_evals
.values
.iter()
.zip(b_evals.values.iter())
.map(|(&pa, &pb)| pa * pb)
.collect();
ifft_with_precomputation_power_of_2(mul_evals.to_vec().into(), &precomputation)
.coeffs
.into()
}
/// Polynomial long division.
/// Returns `(q,r)` the quotient and remainder of the polynomial division of `a` by `b`.
/// Generally slower that the equivalent function `Polynomial::polynomial_division`.
pub fn polynomial_long_division(&self, b: &Self) -> (Self, Self) {
let (a_degree, b_degree) = (self.degree(), b.degree());
if self.is_zero() {
(Self::zero(1), Self::empty())
} else if b.is_zero() {
panic!("Division by zero polynomial");
} else if a_degree < b_degree {
(Self::zero(1), self.clone())
} else {
// Now we know that self.degree() >= divisor.degree();
let mut quotient = Self::zero(a_degree - b_degree + 1);
let mut remainder = self.clone();
// Can unwrap here because we know self is not zero.
let divisor_leading_inv = b.lead().inverse();
while !remainder.is_zero() && remainder.degree() >= b_degree {
let cur_q_coeff = remainder.lead() * divisor_leading_inv;
let cur_q_degree = remainder.degree() - b_degree;
quotient[cur_q_degree] = cur_q_coeff;
for (i, &div_coeff) in b.iter().enumerate() {
remainder[cur_q_degree + i] =
remainder[cur_q_degree + i] - (cur_q_coeff * div_coeff);
}
remainder.trim();
}
(quotient, remainder)
}
}
/// Computes the inverse of `self` modulo `x^n`.
fn inv_mod_xn(&self, n: usize) -> Self {
assert!(self[0].is_nonzero(), "Inverse doesn't exist.");
let mut h = self.clone();
if h.len() < n {
h.pad(n);
}
let mut a = Self::empty();
a.0.push(h[0].inverse());
for i in 0..log2_ceil(n) {
let l = 1 << i;
let h0 = h[..l].to_vec().into();
let mut h1: Polynomial<F> = h[l..].to_vec().into();
let mut c = a.mul(&h0);
if l == c.len() {
c = Self::zero(1);
} else {
c.drain(0..l);
}
h1.trim();
let mut tmp = a.mul(&h1);
tmp = tmp.add(&c);
tmp.iter_mut().for_each(|x| *x = -(*x));
tmp.trim();
let mut b = a.mul(&tmp);
b.trim();
if b.len() > l {
b.drain(l..);
}
a.0.extend_from_slice(&b[..]);
}
a.drain(n..);
a
}
/// Polynomial division.
/// Returns `(q,r)` the quotient and remainder of the polynomial division of `a` by `b`.
/// Algorithm from http://people.csail.mit.edu/madhu/ST12/scribe/lect06.pdf
pub fn polynomial_division(&self, b: &Self) -> (Self, Self) {
let (a_degree, b_degree) = (self.degree(), b.degree());
if self.is_zero() {
(Self::zero(1), Self::empty())
} else if b.is_zero() {
panic!("Division by zero polynomial");
} else if a_degree < b_degree {
(Self::zero(1), self.clone())
} else if b_degree == 0 {
(self.scalar_mul(b[0].inverse()), Self::empty())
} else {
let rev_b = b.rev();
let rev_b_inv = rev_b.inv_mod_xn(a_degree - b_degree + 1);
let rev_q: Polynomial<F> = rev_b_inv
.mul(&self.rev()[..=a_degree - b_degree].to_vec().into())[..=a_degree - b_degree]
.to_vec()
.into();
let mut q = rev_q.rev();
let mut qb = q.mul(b);
qb.pad(self.len());
let mut r = self.add(&qb.neg());
q.trim();
r.trim();
(q, r)
}
}
// Divides a polynomial `a` by `Z_H = X^n - 1`. Assumes `Z_H | a`, otherwise result is meaningless.
pub fn divide_by_z_h(&self, n: usize) -> Self {
if self.is_zero() {
return self.clone();
}
let mut a_trim = self.clone();
a_trim.trim();
let g = F::MULTIPLICATIVE_GROUP_GENERATOR;
let mut g_pow = F::ONE;
// Multiply the i-th coefficient of `a` by `g^i`. Then `new_a(w^j) = old_a(g.w^j)`.
a_trim.iter_mut().for_each(|x| {
*x = (*x) * g_pow;
g_pow = g * g_pow;
});
let d = a_trim.degree();
let root = F::primitive_root_of_unity(log2_ceil(d + 1));
let precomputation = fft_precompute(d + 1);
// Equals to the evaluation of `a` on `{g.w^i}`.
let mut a_eval = a_trim.eval_domain(&precomputation);
// Compute the denominators `1/(g^n.w^(n*i) - 1)` using batch inversion.
let denominator_g = g.exp_usize(n);
let root_n = root.exp_usize(n);
let mut root_pow = F::ONE;
let denominators = (0..a_eval.len())
.map(|i| {
if i != 0 {
root_pow = root_pow * root_n;
}
denominator_g * root_pow - F::ONE
})
.collect::<Vec<_>>();
let denominators_inv = F::batch_multiplicative_inverse(&denominators);
// Divide every element of `a_eval` by the corresponding denominator.
// Then, `a_eval` is the evaluation of `a/Z_H` on `{g.w^i}`.
a_eval
.iter_mut()
.zip(denominators_inv.iter())
.for_each(|(x, &d)| {
*x = (*x) * d;
});
// `p` is the interpolating polynomial of `a_eval` on `{w^i}`.
let mut p = Self::from_evaluations(&a_eval, &precomputation);
// We need to scale it by `g^(-i)` to get the interpolating polynomial of `a_eval` on `{g.w^i}`,
// a.k.a `a/Z_H`.
let g_inv = g.inverse();
let mut g_inv_pow = F::ONE;
p.iter_mut().for_each(|x| {
*x = (*x) * g_inv_pow;
g_inv_pow = g_inv_pow * g_inv;
});
p
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::field::crandall_field::CrandallField;
use rand::{thread_rng, Rng};
use std::time::Instant;
#[test]
fn test_polynomial_multiplication() {
type F = CrandallField;
let mut rng = thread_rng();
let (a_deg, b_deg) = (rng.gen_range(1, 10_000), rng.gen_range(1, 10_000));
let a = Polynomial(F::rand_vec(a_deg));
let b = Polynomial(F::rand_vec(b_deg));
let m1 = a.mul(&b);
let m2 = a.mul(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(m1.eval(x), a.eval(x) * b.eval(x));
assert_eq!(m2.eval(x), a.eval(x) * b.eval(x));
}
}
#[test]
fn test_inv_mod_xn() {
type F = CrandallField;
let mut rng = thread_rng();
let a_deg = rng.gen_range(1, 1_000);
let n = rng.gen_range(1, 1_000);
let a = Polynomial(F::rand_vec(a_deg));
let b = a.inv_mod_xn(n);
let mut m = a.mul(&b);
m.drain(n..);
m.trim();
assert_eq!(
m,
Polynomial(vec![F::ONE]),
"a: {:#?}, b:{:#?}, n:{:#?}, m:{:#?}",
a,
b,
n,
m
);
}
#[test]
fn test_polynomial_long_division() {
type F = CrandallField;
let mut rng = thread_rng();
let (a_deg, b_deg) = (rng.gen_range(1, 10_000), rng.gen_range(1, 10_000));
let a = Polynomial(F::rand_vec(a_deg));
let b = Polynomial(F::rand_vec(b_deg));
let (q, r) = a.polynomial_long_division(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(a.eval(x), b.eval(x) * q.eval(x) + r.eval(x));
}
}
#[test]
fn test_polynomial_division() {
type F = CrandallField;
let mut rng = thread_rng();
let (a_deg, b_deg) = (rng.gen_range(1, 10_000), rng.gen_range(1, 10_000));
let a = Polynomial(F::rand_vec(a_deg));
let b = Polynomial(F::rand_vec(b_deg));
let (q, r) = a.polynomial_division(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(a.eval(x), b.eval(x) * q.eval(x) + r.eval(x));
}
}
#[test]
fn test_polynomial_division_by_constant() {
type F = CrandallField;
let mut rng = thread_rng();
let a_deg = rng.gen_range(1, 10_000);
let a = Polynomial(F::rand_vec(a_deg));
let b = Polynomial::from(vec![F::rand()]);
let (q, r) = a.polynomial_division(&b);
for _ in 0..1000 {
let x = F::rand();
assert_eq!(a.eval(x), b.eval(x) * q.eval(x) + r.eval(x));
}
}
#[test]
fn test_division_by_z_h() {
type F = CrandallField;
let mut rng = thread_rng();
let a_deg = rng.gen_range(1, 10_000);
let n = rng.gen_range(1, a_deg);
let mut a = Polynomial(F::rand_vec(a_deg));
a.trim();
let z_h = {
let mut z_h_vec = vec![F::ZERO; n + 1];
z_h_vec[n] = F::ONE;
z_h_vec[0] = F::NEG_ONE;
Polynomial(z_h_vec)
};
let m = a.mul(&z_h);
let now = Instant::now();
let mut a_test = m.divide_by_z_h(n);
a_test.trim();
println!("Division time: {:?}", now.elapsed());
assert_eq!(a, a_test);
}
#[test]
fn divide_zero_poly_by_z_h() {
let zero_poly = Polynomial::<CrandallField>::empty();
zero_poly.divide_by_z_h(16);
}
// Test to see which polynomial division method is faster for divisions of the type
// `(X^n - 1)/(X - a)
#[test]
fn test_division_linear() {
type F = CrandallField;
let mut rng = thread_rng();
let l = 14;
let n = 1 << l;
let g = F::primitive_root_of_unity(l);
let xn_minus_one = {
let mut xn_min_one_vec = vec![F::ZERO; n + 1];
xn_min_one_vec[n] = F::ONE;
xn_min_one_vec[0] = F::NEG_ONE;
Polynomial(xn_min_one_vec)
};
let a = g.exp_usize(rng.gen_range(0, n));
let denom = Polynomial(vec![-a, F::ONE]);
let now = Instant::now();
xn_minus_one.polynomial_division(&denom);
println!("Division time: {:?}", now.elapsed());
let now = Instant::now();
xn_minus_one.polynomial_long_division(&denom);
println!("Division time: {:?}", now.elapsed());
}
#[test]
fn eq() {
type F = CrandallField;
assert_eq!(Polynomial::<F>(vec![]), Polynomial(vec![]));
assert_eq!(Polynomial::<F>(vec![F::ZERO]), Polynomial(vec![F::ZERO]));
assert_eq!(Polynomial::<F>(vec![]), Polynomial(vec![F::ZERO]));
assert_eq!(Polynomial::<F>(vec![F::ZERO]), Polynomial(vec![]));
assert_eq!(
Polynomial::<F>(vec![F::ZERO]),
Polynomial(vec![F::ZERO, F::ZERO])
);
assert_eq!(
Polynomial::<F>(vec![F::ONE]),
Polynomial(vec![F::ONE, F::ZERO])
);
assert_ne!(Polynomial::<F>(vec![]), Polynomial(vec![F::ONE]));
assert_ne!(
Polynomial::<F>(vec![F::ZERO]),
Polynomial(vec![F::ZERO, F::ONE])
);
assert_ne!(
Polynomial::<F>(vec![F::ZERO]),
Polynomial(vec![F::ONE, F::ZERO])
);
}
}

View File

@ -1,6 +1,7 @@
use crate::field::fft::{fft, ifft};
use crate::field::field::Field;
use crate::util::log2_strict;
use std::slice::Iter;
/// A polynomial in point-value form.
///
@ -35,6 +36,12 @@ impl<F: Field> PolynomialValues<F> {
}
}
impl<F: Field> From<Vec<F>> for PolynomialValues<F> {
fn from(values: Vec<F>) -> Self {
Self::new(values)
}
}
/// A polynomial in coefficient form.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PolynomialCoeffs<F: Field> {
@ -108,4 +115,49 @@ impl<F: Field> PolynomialCoeffs<F> {
.find(|&i| self.coeffs[i].is_nonzero())
.map_or(0, |i| i + 1)
}
pub fn fft(self) -> PolynomialValues<F> {
fft(self)
}
pub fn coset_fft(self, shift: F) -> PolynomialValues<F> {
let modified_poly: Self = shift
.powers()
.zip(self.coeffs)
.map(|(r, c)| r * c)
.collect::<Vec<_>>()
.into();
modified_poly.fft()
}
}
impl<F: Field> From<Vec<F>> for PolynomialCoeffs<F> {
fn from(coeffs: Vec<F>) -> Self {
Self::new(coeffs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::field::crandall_field::CrandallField;
#[test]
fn test_coset_fft() {
type F = CrandallField;
let k = 8;
let n = 1 << k;
let poly = PolynomialCoeffs::new(F::rand_vec(n));
let shift = F::rand();
let coset_evals = poly.clone().coset_fft(shift).values;
let generator = F::primitive_root_of_unity(k);
let naive_coset_evals = F::cyclic_subgroup_coset_known_order(generator, shift, n)
.into_iter()
.map(|x| poly.eval(x))
.collect::<Vec<_>>();
assert_eq!(coset_evals, naive_coset_evals);
}
}

View File

@ -90,17 +90,23 @@ pub struct FriQueryStep<F: Field> {
pub merkle_proof: MerkleProof<F>,
}
/// Evaluations and Merkle proofs of the original set of polynomials,
/// before they are combined into a composition polynomial.
// TODO: Implement FriInitialTreeProofTarget
pub struct FriInitialTreeProof<F: Field> {
pub evals_proofs: Vec<(Vec<F>, MerkleProof<F>)>,
}
/// Proof for a FRI query round.
// TODO: Implement FriQueryRoundTarget
pub struct FriQueryRound<F: Field> {
pub initial_trees_proof: FriInitialTreeProof<F>,
pub steps: Vec<FriQueryStep<F>>,
}
pub struct FriProof<F: Field> {
/// A Merkle root for each reduced polynomial in the commit phase.
pub commit_phase_merkle_roots: Vec<Hash<F>>,
/// Merkle proofs for the original purported codewords, i.e. the subject of the LDT.
pub initial_merkle_proofs: Vec<MerkleProof<F>>,
/// Query rounds proofs
pub query_round_proofs: Vec<FriQueryRound<F>>,
/// The final polynomial in coefficient form.