diff --git a/src/curve/curve_adds.rs b/src/curve/curve_adds.rs new file mode 100644 index 00000000..f25d3847 --- /dev/null +++ b/src/curve/curve_adds.rs @@ -0,0 +1,156 @@ +use std::ops::Add; + +use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; +use crate::field::field_types::Field; + +impl Add> for ProjectivePoint { + type Output = ProjectivePoint; + + fn add(self, rhs: ProjectivePoint) -> Self::Output { + let ProjectivePoint { + x: x1, + y: y1, + z: z1, + } = self; + let ProjectivePoint { + x: x2, + y: y2, + z: z2, + } = rhs; + + if z1 == C::BaseField::ZERO { + return rhs; + } + if z2 == C::BaseField::ZERO { + return self; + } + + let x1z2 = x1 * z2; + let y1z2 = y1 * z2; + let x2z1 = x2 * z1; + let y2z1 = y2 * z1; + + // Check if we're doubling or adding inverses. + if x1z2 == x2z1 { + if y1z2 == y2z1 { + // TODO: inline to avoid redundant muls. + return self.double(); + } + if y1z2 == -y2z1 { + return ProjectivePoint::ZERO; + } + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/addition/add-1998-cmo-2 + let z1z2 = z1 * z2; + let u = y2z1 - y1z2; + let uu = u.square(); + let v = x2z1 - x1z2; + let vv = v.square(); + let vvv = v * vv; + let r = vv * x1z2; + let a = uu * z1z2 - vvv - r.double(); + let x3 = v * a; + let y3 = u * (r - a) - vvv * y1z2; + let z3 = vvv * z1z2; + ProjectivePoint::nonzero(x3, y3, z3) + } +} + +impl Add> for ProjectivePoint { + type Output = ProjectivePoint; + + fn add(self, rhs: AffinePoint) -> Self::Output { + let ProjectivePoint { + x: x1, + y: y1, + z: z1, + } = self; + let AffinePoint { + x: x2, + y: y2, + zero: zero2, + } = rhs; + + if z1 == C::BaseField::ZERO { + return rhs.to_projective(); + } + if zero2 { + return self; + } + + let x2z1 = x2 * z1; + let y2z1 = y2 * z1; + + // Check if we're doubling or adding inverses. + if x1 == x2z1 { + if y1 == y2z1 { + // TODO: inline to avoid redundant muls. + return self.double(); + } + if y1 == -y2z1 { + return ProjectivePoint::ZERO; + } + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/addition/madd-1998-cmo + let u = y2z1 - y1; + let uu = u.square(); + let v = x2z1 - x1; + let vv = v.square(); + let vvv = v * vv; + let r = vv * x1; + let a = uu * z1 - vvv - r.double(); + let x3 = v * a; + let y3 = u * (r - a) - vvv * y1; + let z3 = vvv * z1; + ProjectivePoint::nonzero(x3, y3, z3) + } +} + +impl Add> for AffinePoint { + type Output = ProjectivePoint; + + fn add(self, rhs: AffinePoint) -> Self::Output { + let AffinePoint { + x: x1, + y: y1, + zero: zero1, + } = self; + let AffinePoint { + x: x2, + y: y2, + zero: zero2, + } = rhs; + + if zero1 { + return rhs.to_projective(); + } + if zero2 { + return self.to_projective(); + } + + // Check if we're doubling or adding inverses. + if x1 == x2 { + if y1 == y2 { + return self.to_projective().double(); + } + if y1 == -y2 { + return ProjectivePoint::ZERO; + } + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/addition/mmadd-1998-cmo + let u = y2 - y1; + let uu = u.square(); + let v = x2 - x1; + let vv = v.square(); + let vvv = v * vv; + let r = vv * x1; + let a = uu - vvv - r.double(); + let x3 = v * a; + let y3 = u * (r - a) - vvv * y1; + let z3 = vvv; + ProjectivePoint::nonzero(x3, y3, z3) + } +} diff --git a/src/curve/curve_msm.rs b/src/curve/curve_msm.rs new file mode 100644 index 00000000..d2cb8049 --- /dev/null +++ b/src/curve/curve_msm.rs @@ -0,0 +1,263 @@ +use itertools::Itertools; +use rayon::prelude::*; + +use crate::curve::curve_summation::affine_multisummation_best; +use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; +use crate::field::field_types::Field; + +/// In Yao's method, we compute an affine summation for each digit. In a parallel setting, it would +/// be easiest to assign individual summations to threads, but this would be sub-optimal because +/// multi-summations can be more efficient than repeating individual summations (see +/// `affine_multisummation_best`). Thus we divide digits into large chunks, and assign chunks of +/// digits to threads. Note that there is a delicate balance here, as large chunks can result in +/// uneven distributions of work among threads. +const DIGITS_PER_CHUNK: usize = 80; + +#[derive(Clone, Debug)] +pub struct MsmPrecomputation { + /// For each generator (in the order they were passed to `msm_precompute`), contains a vector + /// of powers, i.e. [(2^w)^i] for i < DIGITS. + // TODO: Use compressed coordinates here. + powers_per_generator: Vec>>, + + /// The window size. + w: usize, +} + +pub fn msm_precompute( + generators: &[ProjectivePoint], + w: usize, +) -> MsmPrecomputation { + MsmPrecomputation { + powers_per_generator: generators + .into_par_iter() + .map(|&g| precompute_single_generator(g, w)) + .collect(), + w, + } +} + +fn precompute_single_generator(g: ProjectivePoint, w: usize) -> Vec> { + let digits = (C::ScalarField::BITS + w - 1) / w; + let mut powers: Vec> = Vec::with_capacity(digits); + powers.push(g); + for i in 1..digits { + let mut power_i_proj = powers[i - 1]; + for _j in 0..w { + power_i_proj = power_i_proj.double(); + } + powers.push(power_i_proj); + } + ProjectivePoint::batch_to_affine(&powers) +} + +pub fn msm_parallel( + scalars: &[C::ScalarField], + generators: &[ProjectivePoint], + w: usize, +) -> ProjectivePoint { + let precomputation = msm_precompute(generators, w); + msm_execute_parallel(&precomputation, scalars) +} + +pub fn msm_execute( + precomputation: &MsmPrecomputation, + scalars: &[C::ScalarField], +) -> ProjectivePoint { + assert_eq!(precomputation.powers_per_generator.len(), scalars.len()); + let w = precomputation.w; + let digits = (C::ScalarField::BITS + w - 1) / w; + let base = 1 << w; + + // This is a variant of Yao's method, adapted to the multi-scalar setting. Because we use + // extremely large windows, the repeated scans in Yao's method could be more expensive than the + // actual group operations. To avoid this, we store a multimap from each possible digit to the + // positions in which that digit occurs in the scalars. These positions have the form (i, j), + // where i is the index of the generator and j is an index into the digits of the scalar + // associated with that generator. + let mut digit_occurrences: Vec> = Vec::with_capacity(digits); + for _i in 0..base { + digit_occurrences.push(Vec::new()); + } + for (i, scalar) in scalars.iter().enumerate() { + let digits = to_digits::(scalar, w); + for (j, &digit) in digits.iter().enumerate() { + digit_occurrences[digit].push((i, j)); + } + } + + let mut y = ProjectivePoint::ZERO; + let mut u = ProjectivePoint::ZERO; + + for digit in (1..base).rev() { + for &(i, j) in &digit_occurrences[digit] { + u = u + precomputation.powers_per_generator[i][j]; + } + y = y + u; + } + + y +} + +pub fn msm_execute_parallel( + precomputation: &MsmPrecomputation, + scalars: &[C::ScalarField], +) -> ProjectivePoint { + assert_eq!(precomputation.powers_per_generator.len(), scalars.len()); + let w = precomputation.w; + let digits = (C::ScalarField::BITS + w - 1) / w; + let base = 1 << w; + + // This is a variant of Yao's method, adapted to the multi-scalar setting. Because we use + // extremely large windows, the repeated scans in Yao's method could be more expensive than the + // actual group operations. To avoid this, we store a multimap from each possible digit to the + // positions in which that digit occurs in the scalars. These positions have the form (i, j), + // where i is the index of the generator and j is an index into the digits of the scalar + // associated with that generator. + let mut digit_occurrences: Vec> = Vec::with_capacity(digits); + for _i in 0..base { + digit_occurrences.push(Vec::new()); + } + for (i, scalar) in scalars.iter().enumerate() { + let digits = to_digits::(scalar, w); + for (j, &digit) in digits.iter().enumerate() { + digit_occurrences[digit].push((i, j)); + } + } + + // For each digit, we add up the powers associated with all occurrences that digit. + let digits: Vec = (0..base).collect(); + let digit_acc: Vec> = digits + .par_chunks(DIGITS_PER_CHUNK) + .flat_map(|chunk| { + let summations: Vec>> = chunk + .iter() + .map(|&digit| { + digit_occurrences[digit] + .iter() + .map(|&(i, j)| precomputation.powers_per_generator[i][j]) + .collect() + }) + .collect(); + affine_multisummation_best(summations) + }) + .collect(); + // println!("Computing the per-digit summations (in parallel) took {}s", start.elapsed().as_secs_f64()); + + let mut y = ProjectivePoint::ZERO; + let mut u = ProjectivePoint::ZERO; + for digit in (1..base).rev() { + u = u + digit_acc[digit]; + y = y + u; + } + // println!("Final summation (sequential) {}s", start.elapsed().as_secs_f64()); + y +} + +pub(crate) fn to_digits(x: &C::ScalarField, w: usize) -> Vec { + let scalar_bits = C::ScalarField::BITS; + let num_digits = (scalar_bits + w - 1) / w; + + // Convert x to a bool array. + let x_canonical: Vec<_> = x + .to_biguint() + .to_u64_digits() + .iter() + .cloned() + .pad_using(scalar_bits / 64, |_| 0) + .collect(); + let mut x_bits = Vec::with_capacity(scalar_bits); + for i in 0..scalar_bits { + x_bits.push((x_canonical[i / 64] >> (i as u64 % 64) & 1) != 0); + } + + let mut digits = Vec::with_capacity(num_digits); + for i in 0..num_digits { + let mut digit = 0; + for j in ((i * w)..((i + 1) * w).min(scalar_bits)).rev() { + digit <<= 1; + digit |= x_bits[j] as usize; + } + digits.push(digit); + } + digits +} + +#[cfg(test)] +mod tests { + use num::BigUint; + + use crate::curve::curve_msm::{msm_execute, msm_precompute, to_digits}; + use crate::curve::curve_types::Curve; + use crate::curve::secp256k1::Secp256K1; + use crate::field::field_types::Field; + use crate::field::secp256k1_scalar::Secp256K1Scalar; + + #[test] + fn test_to_digits() { + let x_canonical = [ + 0b10101010101010101010101010101010, + 0b10101010101010101010101010101010, + 0b11001100110011001100110011001100, + 0b11001100110011001100110011001100, + 0b11110000111100001111000011110000, + 0b11110000111100001111000011110000, + 0b00001111111111111111111111111111, + 0b11111111111111111111111111111111, + ]; + let x = Secp256K1Scalar::from_biguint(BigUint::from_slice(&x_canonical)); + assert_eq!(x.to_biguint().to_u32_digits(), x_canonical); + assert_eq!( + to_digits::(&x, 17), + vec![ + 0b01010101010101010, + 0b10101010101010101, + 0b01010101010101010, + 0b11001010101010101, + 0b01100110011001100, + 0b00110011001100110, + 0b10011001100110011, + 0b11110000110011001, + 0b01111000011110000, + 0b00111100001111000, + 0b00011110000111100, + 0b11111111111111110, + 0b01111111111111111, + 0b11111111111111000, + 0b11111111111111111, + 0b1, + ] + ); + } + + #[test] + fn test_msm() { + let w = 5; + + let generator_1 = Secp256K1::GENERATOR_PROJECTIVE; + let generator_2 = generator_1 + generator_1; + let generator_3 = generator_1 + generator_2; + + let scalar_1 = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ + 11111111, 22222222, 33333333, 44444444, + ])); + let scalar_2 = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ + 22222222, 22222222, 33333333, 44444444, + ])); + let scalar_3 = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ + 33333333, 22222222, 33333333, 44444444, + ])); + + let generators = vec![generator_1, generator_2, generator_3]; + let scalars = vec![scalar_1, scalar_2, scalar_3]; + + let precomputation = msm_precompute(&generators, w); + let result_msm = msm_execute(&precomputation, &scalars); + + let result_naive = Secp256K1::convert(scalar_1) * generator_1 + + Secp256K1::convert(scalar_2) * generator_2 + + Secp256K1::convert(scalar_3) * generator_3; + + assert_eq!(result_msm, result_naive); + } +} diff --git a/src/curve/curve_multiplication.rs b/src/curve/curve_multiplication.rs new file mode 100644 index 00000000..eb5bade1 --- /dev/null +++ b/src/curve/curve_multiplication.rs @@ -0,0 +1,97 @@ +use std::ops::Mul; + +use crate::curve::curve_types::{Curve, CurveScalar, ProjectivePoint}; +use crate::field::field_types::Field; + +const WINDOW_BITS: usize = 4; +const BASE: usize = 1 << WINDOW_BITS; + +fn digits_per_scalar() -> usize { + (C::ScalarField::BITS + WINDOW_BITS - 1) / WINDOW_BITS +} + +/// Precomputed state used for scalar x ProjectivePoint multiplications, +/// specific to a particular generator. +#[derive(Clone)] +pub struct MultiplicationPrecomputation { + /// [(2^w)^i] g for each i < digits_per_scalar. + powers: Vec>, +} + +impl ProjectivePoint { + pub fn mul_precompute(&self) -> MultiplicationPrecomputation { + let num_digits = digits_per_scalar::(); + let mut powers = Vec::with_capacity(num_digits); + powers.push(*self); + for i in 1..num_digits { + let mut power_i = powers[i - 1]; + for _j in 0..WINDOW_BITS { + power_i = power_i.double(); + } + powers.push(power_i); + } + + MultiplicationPrecomputation { powers } + } + + pub fn mul_with_precomputation( + &self, + scalar: C::ScalarField, + precomputation: MultiplicationPrecomputation, + ) -> Self { + // Yao's method; see https://koclab.cs.ucsb.edu/teaching/ecc/eccPapers/Doche-ch09.pdf + let precomputed_powers = precomputation.powers; + + let digits = to_digits::(&scalar); + + let mut y = ProjectivePoint::ZERO; + let mut u = ProjectivePoint::ZERO; + let mut all_summands = Vec::new(); + for j in (1..BASE).rev() { + let mut u_summands = Vec::new(); + for (i, &digit) in digits.iter().enumerate() { + if digit == j as u64 { + u_summands.push(precomputed_powers[i]); + } + } + all_summands.push(u_summands); + } + + let all_sums: Vec> = all_summands + .iter() + .cloned() + .map(|vec| vec.iter().fold(ProjectivePoint::ZERO, |a, &b| a + b)) + .collect(); + for i in 0..all_sums.len() { + u = u + all_sums[i]; + y = y + u; + } + y + } +} + +impl Mul> for CurveScalar { + type Output = ProjectivePoint; + + fn mul(self, rhs: ProjectivePoint) -> Self::Output { + let precomputation = rhs.mul_precompute(); + rhs.mul_with_precomputation(self.0, precomputation) + } +} + +#[allow(clippy::assertions_on_constants)] +fn to_digits(x: &C::ScalarField) -> Vec { + debug_assert!( + 64 % WINDOW_BITS == 0, + "For simplicity, only power-of-two window sizes are handled for now" + ); + let digits_per_u64 = 64 / WINDOW_BITS; + let mut digits = Vec::with_capacity(digits_per_scalar::()); + for limb in x.to_biguint().to_u64_digits() { + for j in 0..digits_per_u64 { + digits.push((limb >> (j * WINDOW_BITS) as u64) % BASE as u64); + } + } + + digits +} diff --git a/src/curve/curve_summation.rs b/src/curve/curve_summation.rs new file mode 100644 index 00000000..c67bc026 --- /dev/null +++ b/src/curve/curve_summation.rs @@ -0,0 +1,237 @@ +use std::iter::Sum; + +use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; +use crate::field::field_types::Field; + +impl Sum> for ProjectivePoint { + fn sum>>(iter: I) -> ProjectivePoint { + let points: Vec<_> = iter.collect(); + affine_summation_best(points) + } +} + +impl Sum for ProjectivePoint { + fn sum>>(iter: I) -> ProjectivePoint { + iter.fold(ProjectivePoint::ZERO, |acc, x| acc + x) + } +} + +pub fn affine_summation_best(summation: Vec>) -> ProjectivePoint { + let result = affine_multisummation_best(vec![summation]); + debug_assert_eq!(result.len(), 1); + result[0] +} + +pub fn affine_multisummation_best( + summations: Vec>>, +) -> Vec> { + let pairwise_sums: usize = summations.iter().map(|summation| summation.len() / 2).sum(); + + // This threshold is chosen based on data from the summation benchmarks. + if pairwise_sums < 70 { + affine_multisummation_pairwise(summations) + } else { + affine_multisummation_batch_inversion(summations) + } +} + +/// Adds each pair of points using an affine + affine = projective formula, then adds up the +/// intermediate sums using a projective formula. +pub fn affine_multisummation_pairwise( + summations: Vec>>, +) -> Vec> { + summations + .into_iter() + .map(affine_summation_pairwise) + .collect() +} + +/// Adds each pair of points using an affine + affine = projective formula, then adds up the +/// intermediate sums using a projective formula. +pub fn affine_summation_pairwise(points: Vec>) -> ProjectivePoint { + let mut reduced_points: Vec> = Vec::new(); + for chunk in points.chunks(2) { + match chunk.len() { + 1 => reduced_points.push(chunk[0].to_projective()), + 2 => reduced_points.push(chunk[0] + chunk[1]), + _ => panic!(), + } + } + // TODO: Avoid copying (deref) + reduced_points + .iter() + .fold(ProjectivePoint::ZERO, |sum, x| sum + *x) +} + +/// Computes several summations of affine points by applying an affine group law, except that the +/// divisions are batched via Montgomery's trick. +pub fn affine_summation_batch_inversion( + summation: Vec>, +) -> ProjectivePoint { + let result = affine_multisummation_batch_inversion(vec![summation]); + debug_assert_eq!(result.len(), 1); + result[0] +} + +/// Computes several summations of affine points by applying an affine group law, except that the +/// divisions are batched via Montgomery's trick. +pub fn affine_multisummation_batch_inversion( + summations: Vec>>, +) -> Vec> { + let mut elements_to_invert = Vec::new(); + + // For each pair of points, (x1, y1) and (x2, y2), that we're going to add later, we want to + // invert either y (if the points are equal) or x1 - x2 (otherwise). We will use these later. + for summation in &summations { + let n = summation.len(); + // The special case for n=0 is to avoid underflow. + let range_end = if n == 0 { 0 } else { n - 1 }; + + for i in (0..range_end).step_by(2) { + let p1 = summation[i]; + let p2 = summation[i + 1]; + let AffinePoint { + x: x1, + y: y1, + zero: zero1, + } = p1; + let AffinePoint { + x: x2, + y: _y2, + zero: zero2, + } = p2; + + if zero1 || zero2 || p1 == -p2 { + // These are trivial cases where we won't need any inverse. + } else if p1 == p2 { + elements_to_invert.push(y1.double()); + } else { + elements_to_invert.push(x1 - x2); + } + } + } + + let inverses: Vec = + C::BaseField::batch_multiplicative_inverse(&elements_to_invert); + + let mut all_reduced_points = Vec::with_capacity(summations.len()); + let mut inverse_index = 0; + for summation in summations { + let n = summation.len(); + let mut reduced_points = Vec::with_capacity((n + 1) / 2); + + // The special case for n=0 is to avoid underflow. + let range_end = if n == 0 { 0 } else { n - 1 }; + + for i in (0..range_end).step_by(2) { + let p1 = summation[i]; + let p2 = summation[i + 1]; + let AffinePoint { + x: x1, + y: y1, + zero: zero1, + } = p1; + let AffinePoint { + x: x2, + y: y2, + zero: zero2, + } = p2; + + let sum = if zero1 { + p2 + } else if zero2 { + p1 + } else if p1 == -p2 { + AffinePoint::ZERO + } else { + // It's a non-trivial case where we need one of the inverses we computed earlier. + let inverse = inverses[inverse_index]; + inverse_index += 1; + + if p1 == p2 { + // This is the doubling case. + let mut numerator = x1.square().triple(); + if C::A.is_nonzero() { + numerator += C::A; + } + let quotient = numerator * inverse; + let x3 = quotient.square() - x1.double(); + let y3 = quotient * (x1 - x3) - y1; + AffinePoint::nonzero(x3, y3) + } else { + // This is the general case. We use the incomplete addition formulas 4.3 and 4.4. + let quotient = (y1 - y2) * inverse; + let x3 = quotient.square() - x1 - x2; + let y3 = quotient * (x1 - x3) - y1; + AffinePoint::nonzero(x3, y3) + } + }; + reduced_points.push(sum); + } + + // If n is odd, the last point was not part of a pair. + if n % 2 == 1 { + reduced_points.push(summation[n - 1]); + } + + all_reduced_points.push(reduced_points); + } + + // We should have consumed all of the inverses from the batch computation. + debug_assert_eq!(inverse_index, inverses.len()); + + // Recurse with our smaller set of points. + affine_multisummation_best(all_reduced_points) +} + +#[cfg(test)] +mod tests { + use crate::curve::curve_summation::{ + affine_summation_batch_inversion, affine_summation_pairwise, + }; + use crate::curve::curve_types::{Curve, ProjectivePoint}; + use crate::curve::secp256k1::Secp256K1; + + #[test] + fn test_pairwise_affine_summation() { + let g_affine = Secp256K1::GENERATOR_AFFINE; + let g2_affine = (g_affine + g_affine).to_affine(); + let g3_affine = (g_affine + g_affine + g_affine).to_affine(); + let g2_proj = g2_affine.to_projective(); + let g3_proj = g3_affine.to_projective(); + assert_eq!( + affine_summation_pairwise::(vec![g_affine, g_affine]), + g2_proj + ); + assert_eq!( + affine_summation_pairwise::(vec![g_affine, g2_affine]), + g3_proj + ); + assert_eq!( + affine_summation_pairwise::(vec![g_affine, g_affine, g_affine]), + g3_proj + ); + assert_eq!( + affine_summation_pairwise::(vec![]), + ProjectivePoint::ZERO + ); + } + + #[test] + fn test_pairwise_affine_summation_batch_inversion() { + let g = Secp256K1::GENERATOR_AFFINE; + let g_proj = g.to_projective(); + assert_eq!( + affine_summation_batch_inversion::(vec![g, g]), + g_proj + g_proj + ); + assert_eq!( + affine_summation_batch_inversion::(vec![g, g, g]), + g_proj + g_proj + g_proj + ); + assert_eq!( + affine_summation_batch_inversion::(vec![]), + ProjectivePoint::ZERO + ); + } +} diff --git a/src/curve/curve_types.rs b/src/curve/curve_types.rs new file mode 100644 index 00000000..ef1f6186 --- /dev/null +++ b/src/curve/curve_types.rs @@ -0,0 +1,260 @@ +use std::fmt::Debug; +use std::ops::Neg; + +use crate::field::field_types::Field; + +// To avoid implementation conflicts from associated types, +// see https://github.com/rust-lang/rust/issues/20400 +pub struct CurveScalar(pub ::ScalarField); + +/// A short Weierstrass curve. +pub trait Curve: 'static + Sync + Sized + Copy + Debug { + type BaseField: Field; + type ScalarField: Field; + + const A: Self::BaseField; + const B: Self::BaseField; + + const GENERATOR_AFFINE: AffinePoint; + + const GENERATOR_PROJECTIVE: ProjectivePoint = ProjectivePoint { + x: Self::GENERATOR_AFFINE.x, + y: Self::GENERATOR_AFFINE.y, + z: Self::BaseField::ONE, + }; + + fn convert(x: Self::ScalarField) -> CurveScalar { + CurveScalar(x) + } + + fn is_safe_curve() -> bool { + // Added additional check to prevent using vulnerabilties in case a discriminant is equal to 0. + (Self::A.cube().double().double() + Self::B.square().triple().triple().triple()) + .is_nonzero() + } +} + +/// A point on a short Weierstrass curve, represented in affine coordinates. +#[derive(Copy, Clone, Debug)] +pub struct AffinePoint { + pub x: C::BaseField, + pub y: C::BaseField, + pub zero: bool, +} + +impl AffinePoint { + pub const ZERO: Self = Self { + x: C::BaseField::ZERO, + y: C::BaseField::ZERO, + zero: true, + }; + + pub fn nonzero(x: C::BaseField, y: C::BaseField) -> Self { + let point = Self { x, y, zero: false }; + debug_assert!(point.is_valid()); + point + } + + pub fn is_valid(&self) -> bool { + let Self { x, y, zero } = *self; + zero || y.square() == x.cube() + C::A * x + C::B + } + + pub fn to_projective(&self) -> ProjectivePoint { + let Self { x, y, zero } = *self; + let z = if zero { + C::BaseField::ZERO + } else { + C::BaseField::ONE + }; + + ProjectivePoint { x, y, z } + } + + pub fn batch_to_projective(affine_points: &[Self]) -> Vec> { + affine_points.iter().map(Self::to_projective).collect() + } + + pub fn double(&self) -> Self { + let AffinePoint { x: x1, y: y1, zero } = *self; + + if zero { + return AffinePoint::ZERO; + } + + let double_y = y1.double(); + let inv_double_y = double_y.inverse(); // (2y)^(-1) + let triple_xx = x1.square().triple(); // 3x^2 + let lambda = (triple_xx + C::A) * inv_double_y; + let x3 = lambda.square() - self.x.double(); + let y3 = lambda * (x1 - x3) - y1; + + Self { + x: x3, + y: y3, + zero: false, + } + } +} + +impl PartialEq for AffinePoint { + fn eq(&self, other: &Self) -> bool { + let AffinePoint { + x: x1, + y: y1, + zero: zero1, + } = *self; + let AffinePoint { + x: x2, + y: y2, + zero: zero2, + } = *other; + if zero1 || zero2 { + return zero1 == zero2; + } + x1 == x2 && y1 == y2 + } +} + +impl Eq for AffinePoint {} + +/// A point on a short Weierstrass curve, represented in projective coordinates. +#[derive(Copy, Clone, Debug)] +pub struct ProjectivePoint { + pub x: C::BaseField, + pub y: C::BaseField, + pub z: C::BaseField, +} + +impl ProjectivePoint { + pub const ZERO: Self = Self { + x: C::BaseField::ZERO, + y: C::BaseField::ONE, + z: C::BaseField::ZERO, + }; + + pub fn nonzero(x: C::BaseField, y: C::BaseField, z: C::BaseField) -> Self { + let point = Self { x, y, z }; + debug_assert!(point.is_valid()); + point + } + + pub fn is_valid(&self) -> bool { + let Self { x, y, z } = *self; + z.is_zero() || y.square() * z == x.cube() + C::A * x * z.square() + C::B * z.cube() + } + + pub fn to_affine(&self) -> AffinePoint { + let Self { x, y, z } = *self; + if z == C::BaseField::ZERO { + AffinePoint::ZERO + } else { + let z_inv = z.inverse(); + AffinePoint::nonzero(x * z_inv, y * z_inv) + } + } + + pub fn batch_to_affine(proj_points: &[Self]) -> Vec> { + let n = proj_points.len(); + let zs: Vec = proj_points.iter().map(|pp| pp.z).collect(); + let z_invs = C::BaseField::batch_multiplicative_inverse(&zs); + + let mut result = Vec::with_capacity(n); + for i in 0..n { + let Self { x, y, z } = proj_points[i]; + result.push(if z == C::BaseField::ZERO { + AffinePoint::ZERO + } else { + let z_inv = z_invs[i]; + AffinePoint::nonzero(x * z_inv, y * z_inv) + }); + } + result + } + + // From https://www.hyperelliptic.org/EFD/g1p/data/shortw/projective/doubling/dbl-2007-bl + pub fn double(&self) -> Self { + let Self { x, y, z } = *self; + if z == C::BaseField::ZERO { + return ProjectivePoint::ZERO; + } + + let xx = x.square(); + let zz = z.square(); + let mut w = xx.triple(); + if C::A.is_nonzero() { + w += C::A * zz; + } + let s = y.double() * z; + let r = y * s; + let rr = r.square(); + let b = (x + r).square() - (xx + rr); + let h = w.square() - b.double(); + let x3 = h * s; + let y3 = w * (b - h) - rr.double(); + let z3 = s.cube(); + Self { + x: x3, + y: y3, + z: z3, + } + } + + pub fn add_slices(a: &[Self], b: &[Self]) -> Vec { + assert_eq!(a.len(), b.len()); + a.iter() + .zip(b.iter()) + .map(|(&a_i, &b_i)| a_i + b_i) + .collect() + } + + pub fn neg(&self) -> Self { + Self { + x: self.x, + y: -self.y, + z: self.z, + } + } +} + +impl PartialEq for ProjectivePoint { + fn eq(&self, other: &Self) -> bool { + let ProjectivePoint { + x: x1, + y: y1, + z: z1, + } = *self; + let ProjectivePoint { + x: x2, + y: y2, + z: z2, + } = *other; + if z1 == C::BaseField::ZERO || z2 == C::BaseField::ZERO { + return z1 == z2; + } + + // We want to compare (x1/z1, y1/z1) == (x2/z2, y2/z2). + // But to avoid field division, it is better to compare (x1*z2, y1*z2) == (x2*z1, y2*z1). + x1 * z2 == x2 * z1 && y1 * z2 == y2 * z1 + } +} + +impl Eq for ProjectivePoint {} + +impl Neg for AffinePoint { + type Output = AffinePoint; + + fn neg(self) -> Self::Output { + let AffinePoint { x, y, zero } = self; + AffinePoint { x, y: -y, zero } + } +} + +impl Neg for ProjectivePoint { + type Output = ProjectivePoint; + + fn neg(self) -> Self::Output { + let ProjectivePoint { x, y, z } = self; + ProjectivePoint { x, y: -y, z } + } +} diff --git a/src/curve/mod.rs b/src/curve/mod.rs new file mode 100644 index 00000000..d31e373e --- /dev/null +++ b/src/curve/mod.rs @@ -0,0 +1,6 @@ +pub mod curve_adds; +pub mod curve_msm; +pub mod curve_multiplication; +pub mod curve_summation; +pub mod curve_types; +pub mod secp256k1; diff --git a/src/curve/secp256k1.rs b/src/curve/secp256k1.rs new file mode 100644 index 00000000..58472eb4 --- /dev/null +++ b/src/curve/secp256k1.rs @@ -0,0 +1,98 @@ +use crate::curve::curve_types::{AffinePoint, Curve}; +use crate::field::field_types::Field; +use crate::field::secp256k1_base::Secp256K1Base; +use crate::field::secp256k1_scalar::Secp256K1Scalar; + +#[derive(Debug, Copy, Clone)] +pub struct Secp256K1; + +impl Curve for Secp256K1 { + type BaseField = Secp256K1Base; + type ScalarField = Secp256K1Scalar; + + const A: Secp256K1Base = Secp256K1Base::ZERO; + const B: Secp256K1Base = Secp256K1Base([7, 0, 0, 0]); + const GENERATOR_AFFINE: AffinePoint = AffinePoint { + x: SECP256K1_GENERATOR_X, + y: SECP256K1_GENERATOR_Y, + zero: false, + }; +} + +// 55066263022277343669578718895168534326250603453777594175500187360389116729240 +const SECP256K1_GENERATOR_X: Secp256K1Base = Secp256K1Base([ + 0x59F2815B16F81798, + 0x029BFCDB2DCE28D9, + 0x55A06295CE870B07, + 0x79BE667EF9DCBBAC, +]); + +/// 32670510020758816978083085130507043184471273380659243275938904335757337482424 +const SECP256K1_GENERATOR_Y: Secp256K1Base = Secp256K1Base([ + 0x9C47D08FFB10D4B8, + 0xFD17B448A6855419, + 0x5DA4FBFC0E1108A8, + 0x483ADA7726A3C465, +]); + +#[cfg(test)] +mod tests { + use num::BigUint; + + use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; + use crate::curve::secp256k1::Secp256K1; + use crate::field::field_types::Field; + use crate::field::secp256k1_scalar::Secp256K1Scalar; + + #[test] + fn test_generator() { + let g = Secp256K1::GENERATOR_AFFINE; + assert!(g.is_valid()); + + let neg_g = AffinePoint:: { + x: g.x, + y: -g.y, + zero: g.zero, + }; + assert!(neg_g.is_valid()); + } + + #[test] + fn test_naive_multiplication() { + let g = Secp256K1::GENERATOR_PROJECTIVE; + let ten = Secp256K1Scalar::from_canonical_u64(10); + let product = mul_naive(ten, g); + let sum = g + g + g + g + g + g + g + g + g + g; + assert_eq!(product, sum); + } + + #[test] + fn test_g1_multiplication() { + let lhs = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ + 1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888, + ])); + assert_eq!( + Secp256K1::convert(lhs) * Secp256K1::GENERATOR_PROJECTIVE, + mul_naive(lhs, Secp256K1::GENERATOR_PROJECTIVE) + ); + } + + /// A simple, somewhat inefficient implementation of multiplication which is used as a reference + /// for correctness. + fn mul_naive( + lhs: Secp256K1Scalar, + rhs: ProjectivePoint, + ) -> ProjectivePoint { + let mut g = rhs; + let mut sum = ProjectivePoint::ZERO; + for limb in lhs.to_biguint().to_u64_digits().iter() { + for j in 0..64 { + if (limb >> j & 1u64) != 0u64 { + sum = sum + g; + } + g = g.double(); + } + } + sum + } +} diff --git a/src/field/extension_field/quadratic.rs b/src/field/extension_field/quadratic.rs index e2794330..b724095a 100644 --- a/src/field/extension_field/quadratic.rs +++ b/src/field/extension_field/quadratic.rs @@ -67,6 +67,8 @@ impl> Field for QuadraticExtension { const MULTIPLICATIVE_GROUP_GENERATOR: Self = Self(F::EXT_MULTIPLICATIVE_GROUP_GENERATOR); const POWER_OF_TWO_GENERATOR: Self = Self(F::EXT_POWER_OF_TWO_GENERATOR); + const BITS: usize = F::BITS * 2; + fn order() -> BigUint { F::order() * F::order() } diff --git a/src/field/extension_field/quartic.rs b/src/field/extension_field/quartic.rs index 01918ff3..0d221401 100644 --- a/src/field/extension_field/quartic.rs +++ b/src/field/extension_field/quartic.rs @@ -69,6 +69,8 @@ impl> Field for QuarticExtension { const MULTIPLICATIVE_GROUP_GENERATOR: Self = Self(F::EXT_MULTIPLICATIVE_GROUP_GENERATOR); const POWER_OF_TWO_GENERATOR: Self = Self(F::EXT_POWER_OF_TWO_GENERATOR); + const BITS: usize = F::BITS * 4; + fn order() -> BigUint { F::order().pow(4u32) } diff --git a/src/field/field_testing.rs b/src/field/field_testing.rs index 767a3cf2..b4ee0595 100644 --- a/src/field/field_testing.rs +++ b/src/field/field_testing.rs @@ -84,6 +84,19 @@ macro_rules! test_field_arithmetic { assert_eq!(base.exp_biguint(&pow), base.exp_biguint(&big_pow)); assert_ne!(base.exp_biguint(&pow), base.exp_biguint(&big_pow_wrong)); } + + #[test] + fn inverses() { + type F = $field; + + let x = F::rand(); + let x1 = x.inverse(); + let x2 = x1.inverse(); + let x3 = x2.inverse(); + + assert_eq!(x, x2); + assert_eq!(x1, x3); + } } }; } diff --git a/src/field/field_types.rs b/src/field/field_types.rs index 036793cc..b7b9ddf4 100644 --- a/src/field/field_types.rs +++ b/src/field/field_types.rs @@ -59,6 +59,8 @@ pub trait Field: /// Generator of a multiplicative subgroup of order `2^TWO_ADICITY`. const POWER_OF_TWO_GENERATOR: Self; + const BITS: usize; + fn order() -> BigUint; #[inline] @@ -91,6 +93,10 @@ pub trait Field: self.square() * *self } + fn triple(&self) -> Self { + *self * (Self::ONE + Self::TWO) + } + /// Compute the multiplicative inverse of this field element. fn try_inverse(&self) -> Option; diff --git a/src/field/goldilocks_field.rs b/src/field/goldilocks_field.rs index cb85d56d..058b6db8 100644 --- a/src/field/goldilocks_field.rs +++ b/src/field/goldilocks_field.rs @@ -82,6 +82,8 @@ impl Field for GoldilocksField { // ``` const POWER_OF_TWO_GENERATOR: Self = Self(1753635133440165772); + const BITS: usize = 64; + fn order() -> BigUint { Self::ORDER.into() } diff --git a/src/field/mod.rs b/src/field/mod.rs index 5ed64a54..74e0fbf4 100644 --- a/src/field/mod.rs +++ b/src/field/mod.rs @@ -7,7 +7,8 @@ pub(crate) mod interpolation; mod inversion; pub(crate) mod packable; pub(crate) mod packed_field; -pub mod secp256k1; +pub mod secp256k1_base; +pub mod secp256k1_scalar; #[cfg(target_feature = "avx2")] pub(crate) mod packed_avx2; diff --git a/src/field/secp256k1.rs b/src/field/secp256k1_base.rs similarity index 97% rename from src/field/secp256k1.rs rename to src/field/secp256k1_base.rs index 75221a1f..b3fb0148 100644 --- a/src/field/secp256k1.rs +++ b/src/field/secp256k1_base.rs @@ -91,6 +91,8 @@ impl Field for Secp256K1Base { // Sage: `g_2 = g^((p - 1) / 2)` const POWER_OF_TWO_GENERATOR: Self = Self::NEG_ONE; + const BITS: usize = 256; + fn order() -> BigUint { BigUint::from_slice(&[ 0xFFFFFC2F, 0xFFFFFFFE, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, @@ -239,3 +241,10 @@ impl DivAssign for Secp256K1Base { *self = *self / rhs; } } + +#[cfg(test)] +mod tests { + use crate::test_field_arithmetic; + + test_field_arithmetic!(crate::field::secp256k1_base::Secp256K1Base); +} diff --git a/src/field/secp256k1_scalar.rs b/src/field/secp256k1_scalar.rs new file mode 100644 index 00000000..f4f2e6ab --- /dev/null +++ b/src/field/secp256k1_scalar.rs @@ -0,0 +1,260 @@ +use std::convert::TryInto; +use std::fmt; +use std::fmt::{Debug, Display, Formatter}; +use std::hash::{Hash, Hasher}; +use std::iter::{Product, Sum}; +use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}; + +use itertools::Itertools; +use num::bigint::{BigUint, RandBigInt}; +use num::{Integer, One}; +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use crate::field::field_types::Field; +use crate::field::goldilocks_field::GoldilocksField; + +/// The base field of the secp256k1 elliptic curve. +/// +/// Its order is +/// ```ignore +/// P = 0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE BAAEDCE6 AF48A03B BFD25E8C D0364141 +/// = 115792089237316195423570985008687907852837564279074904382605163141518161494337 +/// = 2**256 - 432420386565659656852420866394968145599 +/// ``` +#[derive(Copy, Clone, Serialize, Deserialize)] +pub struct Secp256K1Scalar(pub [u64; 4]); + +fn biguint_from_array(arr: [u64; 4]) -> BigUint { + BigUint::from_slice(&[ + arr[0] as u32, + (arr[0] >> 32) as u32, + arr[1] as u32, + (arr[1] >> 32) as u32, + arr[2] as u32, + (arr[2] >> 32) as u32, + arr[3] as u32, + (arr[3] >> 32) as u32, + ]) +} + +impl Default for Secp256K1Scalar { + fn default() -> Self { + Self::ZERO + } +} + +impl PartialEq for Secp256K1Scalar { + fn eq(&self, other: &Self) -> bool { + self.to_biguint() == other.to_biguint() + } +} + +impl Eq for Secp256K1Scalar {} + +impl Hash for Secp256K1Scalar { + fn hash(&self, state: &mut H) { + self.to_biguint().hash(state) + } +} + +impl Display for Secp256K1Scalar { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.to_biguint(), f) + } +} + +impl Debug for Secp256K1Scalar { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.to_biguint(), f) + } +} + +impl Field for Secp256K1Scalar { + // TODO: fix + type PrimeField = GoldilocksField; + + const ZERO: Self = Self([0; 4]); + const ONE: Self = Self([1, 0, 0, 0]); + const TWO: Self = Self([2, 0, 0, 0]); + const NEG_ONE: Self = Self([ + 0xBFD25E8CD0364140, + 0xBAAEDCE6AF48A03B, + 0xFFFFFFFFFFFFFFFE, + 0xFFFFFFFFFFFFFFFF, + ]); + + // TODO: fix + const CHARACTERISTIC: u64 = 0; + + const TWO_ADICITY: usize = 6; + + // Sage: `g = GF(p).multiplicative_generator()` + const MULTIPLICATIVE_GROUP_GENERATOR: Self = Self([7, 0, 0, 0]); + + // Sage: `g_2 = power_mod(g, (p - 1) // 2^6), p)` + // 5480320495727936603795231718619559942670027629901634955707709633242980176626 + const POWER_OF_TWO_GENERATOR: Self = Self([ + 0x992f4b5402b052f2, + 0x98BDEAB680756045, + 0xDF9879A3FBC483A8, + 0xC1DC060E7A91986, + ]); + + const BITS: usize = 256; + + fn order() -> BigUint { + BigUint::from_slice(&[ + 0xD0364141, 0xBFD25E8C, 0xAF48A03B, 0xBAAEDCE6, 0xFFFFFFFE, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, + ]) + } + + fn try_inverse(&self) -> Option { + if self.is_zero() { + return None; + } + + // Fermat's Little Theorem + Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one()))) + } + + fn to_biguint(&self) -> BigUint { + let mut result = biguint_from_array(self.0); + if result >= Self::order() { + result -= Self::order(); + } + result + } + + fn from_biguint(val: BigUint) -> Self { + Self( + val.to_u64_digits() + .into_iter() + .pad_using(4, |_| 0) + .collect::>()[..] + .try_into() + .expect("error converting to u64 array"), + ) + } + + #[inline] + fn from_canonical_u64(n: u64) -> Self { + Self([n, 0, 0, 0]) + } + + #[inline] + fn from_noncanonical_u128(n: u128) -> Self { + Self([n as u64, (n >> 64) as u64, 0, 0]) + } + + #[inline] + fn from_noncanonical_u96(n: (u64, u32)) -> Self { + Self([n.0, n.1 as u64, 0, 0]) + } + + fn rand_from_rng(rng: &mut R) -> Self { + Self::from_biguint(rng.gen_biguint_below(&Self::order())) + } +} + +impl Neg for Secp256K1Scalar { + type Output = Self; + + #[inline] + fn neg(self) -> Self { + if self.is_zero() { + Self::ZERO + } else { + Self::from_biguint(Self::order() - self.to_biguint()) + } + } +} + +impl Add for Secp256K1Scalar { + type Output = Self; + + #[inline] + fn add(self, rhs: Self) -> Self { + let mut result = self.to_biguint() + rhs.to_biguint(); + if result >= Self::order() { + result -= Self::order(); + } + Self::from_biguint(result) + } +} + +impl AddAssign for Secp256K1Scalar { + #[inline] + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs; + } +} + +impl Sum for Secp256K1Scalar { + fn sum>(iter: I) -> Self { + iter.fold(Self::ZERO, |acc, x| acc + x) + } +} + +impl Sub for Secp256K1Scalar { + type Output = Self; + + #[inline] + #[allow(clippy::suspicious_arithmetic_impl)] + fn sub(self, rhs: Self) -> Self { + self + -rhs + } +} + +impl SubAssign for Secp256K1Scalar { + #[inline] + fn sub_assign(&mut self, rhs: Self) { + *self = *self - rhs; + } +} + +impl Mul for Secp256K1Scalar { + type Output = Self; + + #[inline] + fn mul(self, rhs: Self) -> Self { + Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order())) + } +} + +impl MulAssign for Secp256K1Scalar { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + *self = *self * rhs; + } +} + +impl Product for Secp256K1Scalar { + #[inline] + fn product>(iter: I) -> Self { + iter.reduce(|acc, x| acc * x).unwrap_or(Self::ONE) + } +} + +impl Div for Secp256K1Scalar { + type Output = Self; + + #[allow(clippy::suspicious_arithmetic_impl)] + fn div(self, rhs: Self) -> Self::Output { + self * rhs.inverse() + } +} + +impl DivAssign for Secp256K1Scalar { + fn div_assign(&mut self, rhs: Self) { + *self = *self / rhs; + } +} + +#[cfg(test)] +mod tests { + use crate::test_field_arithmetic; + + test_field_arithmetic!(crate::field::secp256k1_scalar::Secp256K1Scalar); +} diff --git a/src/gadgets/biguint.rs b/src/gadgets/biguint.rs index fa5aa0b6..9e14cdb7 100644 --- a/src/gadgets/biguint.rs +++ b/src/gadgets/biguint.rs @@ -110,8 +110,8 @@ impl, const D: usize> CircuitBuilder { // Subtract two `BigUintTarget`s. We assume that the first is larger than the second. pub fn sub_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { - let num_limbs = a.limbs.len(); let (a, b) = self.pad_biguints(a, b); + let num_limbs = a.limbs.len(); let mut result_limbs = vec![]; @@ -155,14 +155,31 @@ impl, const D: usize> CircuitBuilder { } } + // Returns x * y + z. This is no more efficient than mul-then-add; it's purely for convenience (only need to call one CircuitBuilder function). + pub fn mul_add_biguint( + &mut self, + x: &BigUintTarget, + y: &BigUintTarget, + z: &BigUintTarget, + ) -> BigUintTarget { + let prod = self.mul_biguint(x, y); + self.add_biguint(&prod, z) + } + pub fn div_rem_biguint( &mut self, a: &BigUintTarget, b: &BigUintTarget, ) -> (BigUintTarget, BigUintTarget) { - let num_limbs = a.limbs.len(); - let div = self.add_virtual_biguint_target(num_limbs); - let rem = self.add_virtual_biguint_target(num_limbs); + let a_len = a.limbs.len(); + let b_len = b.limbs.len(); + let div_num_limbs = if b_len > a_len + 1 { + 0 + } else { + a_len - b_len + 1 + }; + let div = self.add_virtual_biguint_target(div_num_limbs); + let rem = self.add_virtual_biguint_target(b_len); self.add_simple_generator(BigUintDivRemGenerator:: { a: a.clone(), diff --git a/src/gadgets/curve.rs b/src/gadgets/curve.rs new file mode 100644 index 00000000..c86c3c0d --- /dev/null +++ b/src/gadgets/curve.rs @@ -0,0 +1,368 @@ +use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; +use crate::field::extension_field::Extendable; +use crate::field::field_types::{Field, RichField}; +use crate::gadgets::nonnative::NonNativeTarget; +use crate::plonk::circuit_builder::CircuitBuilder; + +/// A Target representing an affine point on the curve `C`. We use incomplete arithmetic for efficiency, +/// so we assume these points are not zero. +#[derive(Clone, Debug)] +pub struct AffinePointTarget { + pub x: NonNativeTarget, + pub y: NonNativeTarget, +} + +impl AffinePointTarget { + pub fn to_vec(&self) -> Vec> { + vec![self.x.clone(), self.y.clone()] + } +} + +impl, const D: usize> CircuitBuilder { + pub fn constant_affine_point( + &mut self, + point: AffinePoint, + ) -> AffinePointTarget { + debug_assert!(!point.zero); + AffinePointTarget { + x: self.constant_nonnative(point.x), + y: self.constant_nonnative(point.y), + } + } + + pub fn connect_affine_point( + &mut self, + lhs: &AffinePointTarget, + rhs: &AffinePointTarget, + ) { + self.connect_nonnative(&lhs.x, &rhs.x); + self.connect_nonnative(&lhs.y, &rhs.y); + } + + pub fn add_virtual_affine_point_target(&mut self) -> AffinePointTarget { + let x = self.add_virtual_nonnative_target(); + let y = self.add_virtual_nonnative_target(); + + AffinePointTarget { x, y } + } + + pub fn curve_assert_valid(&mut self, p: &AffinePointTarget) { + let a = self.constant_nonnative(C::A); + let b = self.constant_nonnative(C::B); + + let y_squared = self.mul_nonnative(&p.y, &p.y); + let x_squared = self.mul_nonnative(&p.x, &p.x); + let x_cubed = self.mul_nonnative(&x_squared, &p.x); + let a_x = self.mul_nonnative(&a, &p.x); + let a_x_plus_b = self.add_nonnative(&a_x, &b); + let rhs = self.add_nonnative(&x_cubed, &a_x_plus_b); + + self.connect_nonnative(&y_squared, &rhs); + } + + pub fn curve_neg(&mut self, p: &AffinePointTarget) -> AffinePointTarget { + let neg_y = self.neg_nonnative(&p.y); + AffinePointTarget { + x: p.x.clone(), + y: neg_y, + } + } + + pub fn curve_double(&mut self, p: &AffinePointTarget) -> AffinePointTarget { + let AffinePointTarget { x, y } = p; + let double_y = self.add_nonnative(y, y); + let inv_double_y = self.inv_nonnative(&double_y); + let x_squared = self.mul_nonnative(x, x); + let double_x_squared = self.add_nonnative(&x_squared, &x_squared); + let triple_x_squared = self.add_nonnative(&double_x_squared, &x_squared); + + let a = self.constant_nonnative(C::A); + let triple_xx_a = self.add_nonnative(&triple_x_squared, &a); + let lambda = self.mul_nonnative(&triple_xx_a, &inv_double_y); + let lambda_squared = self.mul_nonnative(&lambda, &lambda); + let x_double = self.add_nonnative(x, x); + + let x3 = self.sub_nonnative(&lambda_squared, &x_double); + + let x_diff = self.sub_nonnative(x, &x3); + let lambda_x_diff = self.mul_nonnative(&lambda, &x_diff); + + let y3 = self.sub_nonnative(&lambda_x_diff, y); + + AffinePointTarget { x: x3, y: y3 } + } + + // Add two points, which are assumed to be non-equal. + pub fn curve_add( + &mut self, + p1: &AffinePointTarget, + p2: &AffinePointTarget, + ) -> AffinePointTarget { + let AffinePointTarget { x: x1, y: y1 } = p1; + let AffinePointTarget { x: x2, y: y2 } = p2; + + let u = self.sub_nonnative(y2, y1); + let uu = self.mul_nonnative(&u, &u); + let v = self.sub_nonnative(x2, x1); + let vv = self.mul_nonnative(&v, &v); + let vvv = self.mul_nonnative(&v, &vv); + let r = self.mul_nonnative(&vv, x1); + let diff = self.sub_nonnative(&uu, &vvv); + let r2 = self.add_nonnative(&r, &r); + let a = self.sub_nonnative(&diff, &r2); + let x3 = self.mul_nonnative(&v, &a); + + let r_a = self.sub_nonnative(&r, &a); + let y3_first = self.mul_nonnative(&u, &r_a); + let y3_second = self.mul_nonnative(&vvv, y1); + let y3 = self.sub_nonnative(&y3_first, &y3_second); + + let z3_inv = self.inv_nonnative(&vvv); + let x3_norm = self.mul_nonnative(&x3, &z3_inv); + let y3_norm = self.mul_nonnative(&y3, &z3_inv); + + AffinePointTarget { + x: x3_norm, + y: y3_norm, + } + } + + pub fn curve_scalar_mul( + &mut self, + p: &AffinePointTarget, + n: &NonNativeTarget, + ) -> AffinePointTarget { + let one = self.constant_nonnative(C::BaseField::ONE); + + let bits = self.split_nonnative_to_bits(n); + let bits_as_base: Vec> = + bits.iter().map(|b| self.bool_to_nonnative(b)).collect(); + + let rando = (CurveScalar(C::ScalarField::rand()) * C::GENERATOR_PROJECTIVE).to_affine(); + let randot = self.constant_affine_point(rando); + // Result starts at `rando`, which is later subtracted, because we don't support arithmetic with the zero point. + let mut result = self.add_virtual_affine_point_target(); + self.connect_affine_point(&randot, &result); + + let mut two_i_times_p = self.add_virtual_affine_point_target(); + self.connect_affine_point(p, &two_i_times_p); + + for bit in bits_as_base.iter() { + let not_bit = self.sub_nonnative(&one, bit); + + let result_plus_2_i_p = self.curve_add(&result, &two_i_times_p); + + let new_x_if_bit = self.mul_nonnative(bit, &result_plus_2_i_p.x); + let new_x_if_not_bit = self.mul_nonnative(¬_bit, &result.x); + let new_y_if_bit = self.mul_nonnative(bit, &result_plus_2_i_p.y); + let new_y_if_not_bit = self.mul_nonnative(¬_bit, &result.y); + + let new_x = self.add_nonnative(&new_x_if_bit, &new_x_if_not_bit); + let new_y = self.add_nonnative(&new_y_if_bit, &new_y_if_not_bit); + + result = AffinePointTarget { x: new_x, y: new_y }; + + two_i_times_p = self.curve_double(&two_i_times_p); + } + + // Subtract off result's intial value of `rando`. + let neg_r = self.curve_neg(&randot); + result = self.curve_add(&result, &neg_r); + + result + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + + use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; + use crate::curve::secp256k1::Secp256K1; + use crate::field::field_types::Field; + use crate::field::goldilocks_field::GoldilocksField; + use crate::field::secp256k1_base::Secp256K1Base; + use crate::field::secp256k1_scalar::Secp256K1Scalar; + use crate::iop::witness::PartialWitness; + use crate::plonk::circuit_builder::CircuitBuilder; + use crate::plonk::circuit_data::CircuitConfig; + use crate::plonk::verifier::verify; + + #[test] + fn test_curve_point_is_valid() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let g_target = builder.constant_affine_point(g); + let neg_g_target = builder.curve_neg(&g_target); + + builder.curve_assert_valid(&g_target); + builder.curve_assert_valid(&neg_g_target); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common) + } + + #[test] + #[should_panic] + fn test_curve_point_is_not_valid() { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let not_g = AffinePoint:: { + x: g.x, + y: g.y + Secp256K1Base::ONE, + zero: g.zero, + }; + let not_g_target = builder.constant_affine_point(not_g); + + builder.curve_assert_valid(¬_g_target); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common).unwrap(); + } + + #[test] + fn test_curve_double() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let g_target = builder.constant_affine_point(g); + let neg_g_target = builder.curve_neg(&g_target); + + let double_g = g.double(); + let double_g_expected = builder.constant_affine_point(double_g); + builder.curve_assert_valid(&double_g_expected); + + let double_neg_g = (-g).double(); + let double_neg_g_expected = builder.constant_affine_point(double_neg_g); + builder.curve_assert_valid(&double_neg_g_expected); + + let double_g_actual = builder.curve_double(&g_target); + let double_neg_g_actual = builder.curve_double(&neg_g_target); + builder.curve_assert_valid(&double_g_actual); + builder.curve_assert_valid(&double_neg_g_actual); + + builder.connect_affine_point(&double_g_expected, &double_g_actual); + builder.connect_affine_point(&double_neg_g_expected, &double_neg_g_actual); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common) + } + + #[test] + fn test_curve_add() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let double_g = g.double(); + let g_plus_2g = (g + double_g).to_affine(); + let g_plus_2g_expected = builder.constant_affine_point(g_plus_2g); + builder.curve_assert_valid(&g_plus_2g_expected); + + let g_target = builder.constant_affine_point(g); + let double_g_target = builder.curve_double(&g_target); + let g_plus_2g_actual = builder.curve_add(&g_target, &double_g_target); + builder.curve_assert_valid(&g_plus_2g_actual); + + builder.connect_affine_point(&g_plus_2g_expected, &g_plus_2g_actual); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common) + } + + #[test] + #[ignore] + fn test_curve_mul() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig { + num_routed_wires: 33, + ..CircuitConfig::standard_recursion_config() + }; + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let five = Secp256K1Scalar::from_canonical_usize(5); + let five_scalar = CurveScalar::(five); + let five_g = (five_scalar * g.to_projective()).to_affine(); + let five_g_expected = builder.constant_affine_point(five_g); + builder.curve_assert_valid(&five_g_expected); + + let g_target = builder.constant_affine_point(g); + let five_target = builder.constant_nonnative(five); + let five_g_actual = builder.curve_scalar_mul(&g_target, &five_target); + builder.curve_assert_valid(&five_g_actual); + + builder.connect_affine_point(&five_g_expected, &five_g_actual); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common) + } + + #[test] + #[ignore] + fn test_curve_random() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig { + num_routed_wires: 33, + ..CircuitConfig::standard_recursion_config() + }; + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let rando = + (CurveScalar(Secp256K1Scalar::rand()) * Secp256K1::GENERATOR_PROJECTIVE).to_affine(); + let randot = builder.constant_affine_point(rando); + + let two_target = builder.constant_nonnative(Secp256K1Scalar::TWO); + let randot_doubled = builder.curve_double(&randot); + let randot_times_two = builder.curve_scalar_mul(&randot, &two_target); + builder.connect_affine_point(&randot_doubled, &randot_times_two); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common) + } +} diff --git a/src/gadgets/mod.rs b/src/gadgets/mod.rs index 8b6e60f6..09acb9de 100644 --- a/src/gadgets/mod.rs +++ b/src/gadgets/mod.rs @@ -2,6 +2,7 @@ pub mod arithmetic; pub mod arithmetic_extension; pub mod arithmetic_u32; pub mod biguint; +pub mod curve; pub mod hash; pub mod insert; pub mod interpolation; diff --git a/src/gadgets/nonnative.rs b/src/gadgets/nonnative.rs index 84691421..56d717e3 100644 --- a/src/gadgets/nonnative.rs +++ b/src/gadgets/nonnative.rs @@ -1,60 +1,81 @@ use std::marker::PhantomData; -use num::{BigUint, One}; +use num::{BigUint, Zero}; use crate::field::field_types::RichField; use crate::field::{extension_field::Extendable, field_types::Field}; +use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; +use crate::iop::generator::{GeneratedValues, SimpleGenerator}; +use crate::iop::target::{BoolTarget, Target}; +use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; +use crate::util::ceil_div_usize; -pub struct ForeignFieldTarget { - value: BigUintTarget, +#[derive(Clone, Debug)] +pub struct NonNativeTarget { + pub(crate) value: BigUintTarget, _phantom: PhantomData, } impl, const D: usize> CircuitBuilder { - pub fn biguint_to_nonnative(&mut self, x: &BigUintTarget) -> ForeignFieldTarget { - ForeignFieldTarget { + fn num_nonnative_limbs() -> usize { + ceil_div_usize(FF::BITS, 32) + } + + pub fn biguint_to_nonnative(&mut self, x: &BigUintTarget) -> NonNativeTarget { + NonNativeTarget { value: x.clone(), _phantom: PhantomData, } } - pub fn nonnative_to_biguint(&mut self, x: &ForeignFieldTarget) -> BigUintTarget { + pub fn nonnative_to_biguint(&mut self, x: &NonNativeTarget) -> BigUintTarget { x.value.clone() } - pub fn constant_nonnative(&mut self, x: FF) -> ForeignFieldTarget { + pub fn constant_nonnative(&mut self, x: FF) -> NonNativeTarget { let x_biguint = self.constant_biguint(&x.to_biguint()); self.biguint_to_nonnative(&x_biguint) } - // Assert that two ForeignFieldTarget's, both assumed to be in reduced form, are equal. + // Assert that two NonNativeTarget's, both assumed to be in reduced form, are equal. pub fn connect_nonnative( &mut self, - lhs: &ForeignFieldTarget, - rhs: &ForeignFieldTarget, + lhs: &NonNativeTarget, + rhs: &NonNativeTarget, ) { self.connect_biguint(&lhs.value, &rhs.value); } - // Add two `ForeignFieldTarget`s. + pub fn add_virtual_nonnative_target(&mut self) -> NonNativeTarget { + let num_limbs = Self::num_nonnative_limbs::(); + let value = self.add_virtual_biguint_target(num_limbs); + + NonNativeTarget { + value, + _phantom: PhantomData, + } + } + + // Add two `NonNativeTarget`s. pub fn add_nonnative( &mut self, - a: &ForeignFieldTarget, - b: &ForeignFieldTarget, - ) -> ForeignFieldTarget { + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget { let result = self.add_biguint(&a.value, &b.value); + // TODO: reduce add result with only one conditional subtraction self.reduce(&result) } - // Subtract two `ForeignFieldTarget`s. + // Subtract two `NonNativeTarget`s. pub fn sub_nonnative( &mut self, - a: &ForeignFieldTarget, - b: &ForeignFieldTarget, - ) -> ForeignFieldTarget { + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget { let order = self.constant_biguint(&FF::order()); let a_plus_order = self.add_biguint(&order, &a.value); let result = self.sub_biguint(&a_plus_order, &b.value); @@ -65,45 +86,127 @@ impl, const D: usize> CircuitBuilder { pub fn mul_nonnative( &mut self, - a: &ForeignFieldTarget, - b: &ForeignFieldTarget, - ) -> ForeignFieldTarget { + a: &NonNativeTarget, + b: &NonNativeTarget, + ) -> NonNativeTarget { let result = self.mul_biguint(&a.value, &b.value); self.reduce(&result) } - pub fn neg_nonnative( - &mut self, - x: &ForeignFieldTarget, - ) -> ForeignFieldTarget { - let neg_one = FF::order() - BigUint::one(); - let neg_one_target = self.constant_biguint(&neg_one); - let neg_one_ff = self.biguint_to_nonnative(&neg_one_target); + pub fn neg_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + let zero_target = self.constant_biguint(&BigUint::zero()); + let zero_ff = self.biguint_to_nonnative(&zero_target); - self.mul_nonnative(&neg_one_ff, x) + self.sub_nonnative(&zero_ff, x) } - /// Returns `x % |FF|` as a `ForeignFieldTarget`. - fn reduce(&mut self, x: &BigUintTarget) -> ForeignFieldTarget { + pub fn inv_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + let num_limbs = x.value.num_limbs(); + let inv_biguint = self.add_virtual_biguint_target(num_limbs); + let inv = NonNativeTarget:: { + value: inv_biguint, + _phantom: PhantomData, + }; + + self.add_simple_generator(NonNativeInverseGenerator:: { + x: x.clone(), + inv: inv.clone(), + _phantom: PhantomData, + }); + + let product = self.mul_nonnative(x, &inv); + let one = self.constant_nonnative(FF::ONE); + self.connect_nonnative(&product, &one); + + inv + } + + pub fn div_rem_nonnative( + &mut self, + x: &NonNativeTarget, + y: &NonNativeTarget, + ) -> (NonNativeTarget, NonNativeTarget) { + let x_biguint = self.nonnative_to_biguint(x); + let y_biguint = self.nonnative_to_biguint(y); + + let (div_biguint, rem_biguint) = self.div_rem_biguint(&x_biguint, &y_biguint); + let div = self.biguint_to_nonnative(&div_biguint); + let rem = self.biguint_to_nonnative(&rem_biguint); + (div, rem) + } + + /// Returns `x % |FF|` as a `NonNativeTarget`. + fn reduce(&mut self, x: &BigUintTarget) -> NonNativeTarget { let modulus = FF::order(); let order_target = self.constant_biguint(&modulus); let value = self.rem_biguint(x, &order_target); - ForeignFieldTarget { + NonNativeTarget { value, _phantom: PhantomData, } } #[allow(dead_code)] - fn reduce_nonnative( - &mut self, - x: &ForeignFieldTarget, - ) -> ForeignFieldTarget { + fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { let x_biguint = self.nonnative_to_biguint(x); self.reduce(&x_biguint) } + + pub fn bool_to_nonnative(&mut self, b: &BoolTarget) -> NonNativeTarget { + let limbs = vec![U32Target(b.target)]; + let value = BigUintTarget { limbs }; + + NonNativeTarget { + value, + _phantom: PhantomData, + } + } + + // Split a nonnative field element to bits. + pub fn split_nonnative_to_bits( + &mut self, + x: &NonNativeTarget, + ) -> Vec { + let num_limbs = x.value.num_limbs(); + let mut result = Vec::with_capacity(num_limbs * 32); + + for i in 0..num_limbs { + let limb = x.value.get_limb(i); + let bit_targets = self.split_le_base::<2>(limb.0, 32); + let mut bits: Vec<_> = bit_targets + .iter() + .map(|&t| BoolTarget::new_unsafe(t)) + .collect(); + + result.append(&mut bits); + } + + result + } +} + +#[derive(Debug)] +struct NonNativeInverseGenerator, const D: usize, FF: Field> { + x: NonNativeTarget, + inv: NonNativeTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: Field> SimpleGenerator + for NonNativeInverseGenerator +{ + fn dependencies(&self) -> Vec { + self.x.value.limbs.iter().map(|&l| l.0).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let x = witness.get_nonnative_target(self.x.clone()); + let inv = x.inverse(); + + out_buffer.set_nonnative_target(self.inv.clone(), inv); + } } #[cfg(test)] @@ -112,7 +215,7 @@ mod tests { use crate::field::field_types::Field; use crate::field::goldilocks_field::GoldilocksField; - use crate::field::secp256k1::Secp256K1Base; + use crate::field::secp256k1_base::Secp256K1Base; use crate::iop::witness::PartialWitness; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; @@ -214,4 +317,26 @@ mod tests { let proof = data.prove(pw).unwrap(); verify(proof, &data.verifier_only, &data.common) } + + #[test] + fn test_nonnative_inv() -> Result<()> { + type FF = Secp256K1Base; + let x_ff = FF::rand(); + let inv_x_ff = x_ff.inverse(); + + type F = GoldilocksField; + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let x = builder.constant_nonnative(x_ff); + let inv_x = builder.inv_nonnative(&x); + + let inv_x_expected = builder.constant_nonnative(inv_x_ff); + builder.connect_nonnative(&inv_x, &inv_x_expected); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + verify(proof, &data.verifier_only, &data.common) + } } diff --git a/src/iop/generator.rs b/src/iop/generator.rs index ae973d7c..ea4ac1f6 100644 --- a/src/iop/generator.rs +++ b/src/iop/generator.rs @@ -8,6 +8,7 @@ use crate::field::extension_field::{Extendable, FieldExtension}; use crate::field::field_types::{Field, RichField}; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; +use crate::gadgets::nonnative::NonNativeTarget; use crate::hash::hash_types::{HashOut, HashOutTarget}; use crate::iop::target::Target; use crate::iop::wire::Wire; @@ -168,6 +169,10 @@ impl GeneratedValues { } } + pub fn set_nonnative_target(&mut self, target: NonNativeTarget, value: FF) { + self.set_biguint_target(target.value, value.to_biguint()) + } + pub fn set_hash_target(&mut self, ht: HashOutTarget, value: HashOut) { ht.elements .iter() diff --git a/src/iop/witness.rs b/src/iop/witness.rs index a773c1a9..8b6df90a 100644 --- a/src/iop/witness.rs +++ b/src/iop/witness.rs @@ -6,6 +6,7 @@ use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::{Extendable, FieldExtension}; use crate::field::field_types::Field; use crate::gadgets::biguint::BigUintTarget; +use crate::gadgets::nonnative::NonNativeTarget; use crate::hash::hash_types::HashOutTarget; use crate::hash::hash_types::{HashOut, MerkleCapTarget}; use crate::hash::merkle_tree::MerkleCap; @@ -68,6 +69,11 @@ pub trait Witness { result } + fn get_nonnative_target(&self, target: NonNativeTarget) -> FF { + let val = self.get_biguint_target(target.value); + FF::from_biguint(val) + } + fn get_hash_target(&self, ht: HashOutTarget) -> HashOut { HashOut { elements: self.get_targets(&ht.elements).try_into().unwrap(), diff --git a/src/lib.rs b/src/lib.rs index e76e312c..b0158d7a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,6 +11,7 @@ #![feature(specialization)] #![feature(stdsimd)] +pub mod curve; pub mod field; pub mod fri; pub mod gadgets;