Change from_biguint's behavior with extension fields

These appear to be unused for extension fields, so we're free to change the mapping without breaking anything.

As the TODO says, the mapping that's currently implemented doesn't seem natural or useful. It seems more natural to treat the `BigUint` as a base field element, potentially in a non-canonical form.
This commit is contained in:
Daniel Lubarov 2022-08-19 09:21:10 -07:00
parent cbfc13c33f
commit 2fd5fbbe01
16 changed files with 66 additions and 54 deletions

View File

@ -207,7 +207,7 @@ mod tests {
0b00001111111111111111111111111111, 0b00001111111111111111111111111111,
0b11111111111111111111111111111111, 0b11111111111111111111111111111111,
]; ];
let x = Secp256K1Scalar::from_biguint(BigUint::from_slice(&x_canonical)); let x = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&x_canonical));
assert_eq!(x.to_canonical_biguint().to_u32_digits(), x_canonical); assert_eq!(x.to_canonical_biguint().to_u32_digits(), x_canonical);
assert_eq!( assert_eq!(
to_digits::<Secp256K1>(&x, 17), to_digits::<Secp256K1>(&x, 17),
@ -240,13 +240,13 @@ mod tests {
let generator_2 = generator_1 + generator_1; let generator_2 = generator_1 + generator_1;
let generator_3 = generator_1 + generator_2; let generator_3 = generator_1 + generator_2;
let scalar_1 = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ let scalar_1 = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[
11111111, 22222222, 33333333, 44444444, 11111111, 22222222, 33333333, 44444444,
])); ]));
let scalar_2 = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ let scalar_2 = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[
22222222, 22222222, 33333333, 44444444, 22222222, 22222222, 33333333, 44444444,
])); ]));
let scalar_3 = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ let scalar_3 = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[
33333333, 22222222, 33333333, 44444444, 33333333, 22222222, 33333333, 44444444,
])); ]));

View File

@ -277,9 +277,9 @@ impl<C: Curve> Neg for ProjectivePoint<C> {
} }
pub fn base_to_scalar<C: Curve>(x: C::BaseField) -> C::ScalarField { pub fn base_to_scalar<C: Curve>(x: C::BaseField) -> C::ScalarField {
C::ScalarField::from_biguint(x.to_canonical_biguint()) C::ScalarField::from_noncanonical_biguint(x.to_canonical_biguint())
} }
pub fn scalar_to_base<C: Curve>(x: C::ScalarField) -> C::BaseField { pub fn scalar_to_base<C: Curve>(x: C::ScalarField) -> C::BaseField {
C::BaseField::from_biguint(x.to_canonical_biguint()) C::BaseField::from_noncanonical_biguint(x.to_canonical_biguint())
} }

View File

@ -45,14 +45,14 @@ pub fn decompose_secp256k1_scalar(
) )
.round() .round()
.to_integer(); .to_integer();
let c1 = Secp256K1Scalar::from_biguint(c1_biguint); let c1 = Secp256K1Scalar::from_noncanonical_biguint(c1_biguint);
let c2_biguint = Ratio::new( let c2_biguint = Ratio::new(
MINUS_B1.to_canonical_biguint() * k.to_canonical_biguint(), MINUS_B1.to_canonical_biguint() * k.to_canonical_biguint(),
p.clone(), p.clone(),
) )
.round() .round()
.to_integer(); .to_integer();
let c2 = Secp256K1Scalar::from_biguint(c2_biguint); let c2 = Secp256K1Scalar::from_noncanonical_biguint(c2_biguint);
let k1_raw = k - c1 * A1 - c2 * A2; let k1_raw = k - c1 * A1 - c2 * A2;
let k2_raw = c1 * MINUS_B1 - c2 * B2; let k2_raw = c1 * MINUS_B1 - c2 * B2;
@ -61,13 +61,13 @@ pub fn decompose_secp256k1_scalar(
let two = BigUint::from_slice(&[2]); let two = BigUint::from_slice(&[2]);
let k1_neg = k1_raw.to_canonical_biguint() > p.clone() / two.clone(); let k1_neg = k1_raw.to_canonical_biguint() > p.clone() / two.clone();
let k1 = if k1_neg { let k1 = if k1_neg {
Secp256K1Scalar::from_biguint(p.clone() - k1_raw.to_canonical_biguint()) Secp256K1Scalar::from_noncanonical_biguint(p.clone() - k1_raw.to_canonical_biguint())
} else { } else {
k1_raw k1_raw
}; };
let k2_neg = k2_raw.to_canonical_biguint() > p.clone() / two; let k2_neg = k2_raw.to_canonical_biguint() > p.clone() / two;
let k2 = if k2_neg { let k2 = if k2_neg {
Secp256K1Scalar::from_biguint(p - k2_raw.to_canonical_biguint()) Secp256K1Scalar::from_noncanonical_biguint(p - k2_raw.to_canonical_biguint())
} else { } else {
k2_raw k2_raw
}; };

View File

@ -71,7 +71,7 @@ mod tests {
#[test] #[test]
fn test_g1_multiplication() { fn test_g1_multiplication() {
let lhs = Secp256K1Scalar::from_biguint(BigUint::from_slice(&[ let lhs = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[
1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888, 1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888,
])); ]));
assert_eq!( assert_eq!(

View File

@ -30,7 +30,7 @@ pub fn fixed_base_curve_mul_circuit<C: Curve, F: RichField + Extendable<D>, cons
let limbs = builder.split_nonnative_to_4_bit_limbs(scalar); let limbs = builder.split_nonnative_to_4_bit_limbs(scalar);
let hash_0 = KeccakHash::<32>::hash_no_pad(&[F::ZERO]); let hash_0 = KeccakHash::<32>::hash_no_pad(&[F::ZERO]);
let hash_0_scalar = C::ScalarField::from_biguint(BigUint::from_bytes_le( let hash_0_scalar = C::ScalarField::from_noncanonical_biguint(BigUint::from_bytes_le(
&GenericHashOut::<F>::to_bytes(&hash_0), &GenericHashOut::<F>::to_bytes(&hash_0),
)); ));
let rando = (CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE).to_affine(); let rando = (CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE).to_affine();

View File

@ -29,7 +29,7 @@ pub fn curve_msm_circuit<C: Curve, F: RichField + Extendable<D>, const D: usize>
let num_limbs = limbs_n.len(); let num_limbs = limbs_n.len();
let hash_0 = KeccakHash::<32>::hash_no_pad(&[F::ZERO]); let hash_0 = KeccakHash::<32>::hash_no_pad(&[F::ZERO]);
let hash_0_scalar = C::ScalarField::from_biguint(BigUint::from_bytes_le( let hash_0_scalar = C::ScalarField::from_noncanonical_biguint(BigUint::from_bytes_le(
&GenericHashOut::<F>::to_bytes(&hash_0), &GenericHashOut::<F>::to_bytes(&hash_0),
)); ));
let rando = (CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE).to_affine(); let rando = (CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE).to_affine();

View File

@ -131,7 +131,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilderWindowedMul<F,
n: &NonNativeTarget<C::ScalarField>, n: &NonNativeTarget<C::ScalarField>,
) -> AffinePointTarget<C> { ) -> AffinePointTarget<C> {
let hash_0 = KeccakHash::<25>::hash_no_pad(&[F::ZERO]); let hash_0 = KeccakHash::<25>::hash_no_pad(&[F::ZERO]);
let hash_0_scalar = C::ScalarField::from_biguint(BigUint::from_bytes_le( let hash_0_scalar = C::ScalarField::from_noncanonical_biguint(BigUint::from_bytes_le(
&GenericHashOut::<F>::to_bytes(&hash_0), &GenericHashOut::<F>::to_bytes(&hash_0),
)); ));
let starting_point = CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE; let starting_point = CurveScalar(hash_0_scalar) * C::GENERATOR_PROJECTIVE;

View File

@ -116,7 +116,7 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
} }
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) { fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let k = Secp256K1Scalar::from_biguint(witness_get_biguint_target( let k = Secp256K1Scalar::from_noncanonical_biguint(witness_get_biguint_target(
witness, witness,
self.k.value.clone(), self.k.value.clone(),
)); ));

View File

@ -467,8 +467,14 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerat
} }
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) { fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let a = FF::from_biguint(witness_get_biguint_target(witness, self.a.value.clone())); let a = FF::from_noncanonical_biguint(witness_get_biguint_target(
let b = FF::from_biguint(witness_get_biguint_target(witness, self.b.value.clone())); witness,
self.a.value.clone(),
));
let b = FF::from_noncanonical_biguint(witness_get_biguint_target(
witness,
self.b.value.clone(),
));
let a_biguint = a.to_canonical_biguint(); let a_biguint = a.to_canonical_biguint();
let b_biguint = b.to_canonical_biguint(); let b_biguint = b.to_canonical_biguint();
let sum_biguint = a_biguint + b_biguint; let sum_biguint = a_biguint + b_biguint;
@ -508,7 +514,10 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerat
.summands .summands
.iter() .iter()
.map(|summand| { .map(|summand| {
FF::from_biguint(witness_get_biguint_target(witness, summand.value.clone())) FF::from_noncanonical_biguint(witness_get_biguint_target(
witness,
summand.value.clone(),
))
}) })
.collect(); .collect();
let summand_biguints: Vec<_> = summands let summand_biguints: Vec<_> = summands
@ -553,8 +562,14 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerat
} }
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) { fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let a = FF::from_biguint(witness_get_biguint_target(witness, self.a.value.clone())); let a = FF::from_noncanonical_biguint(witness_get_biguint_target(
let b = FF::from_biguint(witness_get_biguint_target(witness, self.b.value.clone())); witness,
self.a.value.clone(),
));
let b = FF::from_noncanonical_biguint(witness_get_biguint_target(
witness,
self.b.value.clone(),
));
let a_biguint = a.to_canonical_biguint(); let a_biguint = a.to_canonical_biguint();
let b_biguint = b.to_canonical_biguint(); let b_biguint = b.to_canonical_biguint();
@ -594,8 +609,14 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerat
} }
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) { fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let a = FF::from_biguint(witness_get_biguint_target(witness, self.a.value.clone())); let a = FF::from_noncanonical_biguint(witness_get_biguint_target(
let b = FF::from_biguint(witness_get_biguint_target(witness, self.b.value.clone())); witness,
self.a.value.clone(),
));
let b = FF::from_noncanonical_biguint(witness_get_biguint_target(
witness,
self.b.value.clone(),
));
let a_biguint = a.to_canonical_biguint(); let a_biguint = a.to_canonical_biguint();
let b_biguint = b.to_canonical_biguint(); let b_biguint = b.to_canonical_biguint();
@ -625,7 +646,10 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerat
} }
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) { fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let x = FF::from_biguint(witness_get_biguint_target(witness, self.x.value.clone())); let x = FF::from_noncanonical_biguint(witness_get_biguint_target(
witness,
self.x.value.clone(),
));
let inv = x.inverse(); let inv = x.inverse();
let x_biguint = x.to_canonical_biguint(); let x_biguint = x.to_canonical_biguint();

View File

@ -3,7 +3,6 @@ use std::iter::{Product, Sum};
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use num::bigint::BigUint; use num::bigint::BigUint;
use num::Integer;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::extension::{Extendable, FieldExtension, Frobenius, OEF}; use crate::extension::{Extendable, FieldExtension, Frobenius, OEF};
@ -89,9 +88,8 @@ impl<F: Extendable<2>> Field for QuadraticExtension<F> {
)) ))
} }
fn from_biguint(n: BigUint) -> Self { fn from_noncanonical_biguint(n: BigUint) -> Self {
let (high, low) = n.div_rem(&F::order()); F::from_noncanonical_biguint(n).into()
Self([F::from_biguint(low), F::from_biguint(high)])
} }
fn from_canonical_u64(n: u64) -> Self { fn from_canonical_u64(n: u64) -> Self {

View File

@ -4,7 +4,6 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssi
use num::bigint::BigUint; use num::bigint::BigUint;
use num::traits::Pow; use num::traits::Pow;
use num::Integer;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::extension::{Extendable, FieldExtension, Frobenius, OEF}; use crate::extension::{Extendable, FieldExtension, Frobenius, OEF};
@ -94,16 +93,8 @@ impl<F: Extendable<4>> Field for QuarticExtension<F> {
)) ))
} }
fn from_biguint(n: BigUint) -> Self { fn from_noncanonical_biguint(n: BigUint) -> Self {
let (rest, first) = n.div_rem(&F::order()); F::from_noncanonical_biguint(n).into()
let (rest, second) = rest.div_rem(&F::order());
let (rest, third) = rest.div_rem(&F::order());
Self([
F::from_biguint(first),
F::from_biguint(second),
F::from_biguint(third),
F::from_biguint(rest),
])
} }
fn from_canonical_u64(n: u64) -> Self { fn from_canonical_u64(n: u64) -> Self {

View File

@ -99,8 +99,8 @@ impl<F: Extendable<5>> Field for QuinticExtension<F> {
Some(FieldExtension::<5>::scalar_mul(&f, g.inverse())) Some(FieldExtension::<5>::scalar_mul(&f, g.inverse()))
} }
fn from_biguint(n: BigUint) -> Self { fn from_noncanonical_biguint(n: BigUint) -> Self {
Self([F::from_biguint(n), F::ZERO, F::ZERO, F::ZERO, F::ZERO]) F::from_noncanonical_biguint(n).into()
} }
fn from_canonical_u64(n: u64) -> Self { fn from_canonical_u64(n: u64) -> Self {

View File

@ -90,7 +90,7 @@ impl Field for GoldilocksField {
try_inverse_u64(self) try_inverse_u64(self)
} }
fn from_biguint(n: BigUint) -> Self { fn from_noncanonical_biguint(n: BigUint) -> Self {
Self(n.mod_floor(&Self::order()).to_u64_digits()[0]) Self(n.mod_floor(&Self::order()).to_u64_digits()[0])
} }

View File

@ -106,7 +106,7 @@ impl Field for Secp256K1Base {
Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one()))) Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one())))
} }
fn from_biguint(val: BigUint) -> Self { fn from_noncanonical_biguint(val: BigUint) -> Self {
Self( Self(
val.to_u64_digits() val.to_u64_digits()
.into_iter() .into_iter()
@ -135,7 +135,7 @@ impl Field for Secp256K1Base {
#[cfg(feature = "rand")] #[cfg(feature = "rand")]
fn rand_from_rng<R: rand::Rng>(rng: &mut R) -> Self { fn rand_from_rng<R: rand::Rng>(rng: &mut R) -> Self {
use num::bigint::RandBigInt; use num::bigint::RandBigInt;
Self::from_biguint(rng.gen_biguint_below(&Self::order())) Self::from_noncanonical_biguint(rng.gen_biguint_below(&Self::order()))
} }
} }
@ -157,7 +157,7 @@ impl Neg for Secp256K1Base {
if self.is_zero() { if self.is_zero() {
Self::ZERO Self::ZERO
} else { } else {
Self::from_biguint(Self::order() - self.to_canonical_biguint()) Self::from_noncanonical_biguint(Self::order() - self.to_canonical_biguint())
} }
} }
} }
@ -171,7 +171,7 @@ impl Add for Secp256K1Base {
if result >= Self::order() { if result >= Self::order() {
result -= Self::order(); result -= Self::order();
} }
Self::from_biguint(result) Self::from_noncanonical_biguint(result)
} }
} }
@ -210,7 +210,7 @@ impl Mul for Secp256K1Base {
#[inline] #[inline]
fn mul(self, rhs: Self) -> Self { fn mul(self, rhs: Self) -> Self {
Self::from_biguint( Self::from_noncanonical_biguint(
(self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()), (self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()),
) )
} }

View File

@ -115,7 +115,7 @@ impl Field for Secp256K1Scalar {
Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one()))) Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one())))
} }
fn from_biguint(val: BigUint) -> Self { fn from_noncanonical_biguint(val: BigUint) -> Self {
Self( Self(
val.to_u64_digits() val.to_u64_digits()
.into_iter() .into_iter()
@ -144,7 +144,7 @@ impl Field for Secp256K1Scalar {
#[cfg(feature = "rand")] #[cfg(feature = "rand")]
fn rand_from_rng<R: rand::Rng>(rng: &mut R) -> Self { fn rand_from_rng<R: rand::Rng>(rng: &mut R) -> Self {
use num::bigint::RandBigInt; use num::bigint::RandBigInt;
Self::from_biguint(rng.gen_biguint_below(&Self::order())) Self::from_noncanonical_biguint(rng.gen_biguint_below(&Self::order()))
} }
} }
@ -166,7 +166,7 @@ impl Neg for Secp256K1Scalar {
if self.is_zero() { if self.is_zero() {
Self::ZERO Self::ZERO
} else { } else {
Self::from_biguint(Self::order() - self.to_canonical_biguint()) Self::from_noncanonical_biguint(Self::order() - self.to_canonical_biguint())
} }
} }
} }
@ -180,7 +180,7 @@ impl Add for Secp256K1Scalar {
if result >= Self::order() { if result >= Self::order() {
result -= Self::order(); result -= Self::order();
} }
Self::from_biguint(result) Self::from_noncanonical_biguint(result)
} }
} }
@ -219,7 +219,7 @@ impl Mul for Secp256K1Scalar {
#[inline] #[inline]
fn mul(self, rhs: Self) -> Self { fn mul(self, rhs: Self) -> Self {
Self::from_biguint( Self::from_noncanonical_biguint(
(self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()), (self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()),
) )
} }

View File

@ -270,9 +270,8 @@ pub trait Field:
subgroup.into_iter().map(|x| x * shift).collect() subgroup.into_iter().map(|x| x * shift).collect()
} }
// TODO: The current behavior for composite fields doesn't seem natural or useful. /// Returns `n % Self::characteristic()`.
// Rename to `from_noncanonical_biguint` and have it return `n % Self::characteristic()`. fn from_noncanonical_biguint(n: BigUint) -> Self;
fn from_biguint(n: BigUint) -> Self;
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
// TODO: Should probably be unsafe. // TODO: Should probably be unsafe.