mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-03 14:23:07 +00:00
Merge branch 'main' into batchable
# Conflicts: # plonky2/src/gates/gmimc.rs # plonky2/src/hash/gmimc.rs # plonky2/src/plonk/circuit_builder.rs
This commit is contained in:
commit
6d2c9b11a6
10
Cargo.toml
10
Cargo.toml
@ -1,2 +1,10 @@
|
||||
[workspace]
|
||||
members = ["field", "insertion", "plonky2", "util", "waksman"]
|
||||
members = ["field", "insertion", "plonky2", "starky", "system_zero", "util", "waksman"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
#lto = "fat"
|
||||
#codegen-units = 1
|
||||
|
||||
[profile.bench]
|
||||
opt-level = 3
|
||||
|
||||
31
README.md
31
README.md
@ -1,8 +1,24 @@
|
||||
# Plonky2
|
||||
|
||||
Plonky2 is an implementation of recursive arguments based on Plonk and FRI. It uses FRI to check systems of polynomial constraints, similar to the DEEP-ALI method described in the [DEEP-FRI](https://arxiv.org/abs/1903.12243) paper. It is the successor of [plonky](https://github.com/mir-protocol/plonky), which was based on Plonk and Halo.
|
||||
Plonky2 is a SNARK implementation based on techniques from PLONK and FRI. It is the successor of [Plonky](https://github.com/mir-protocol/plonky), which was based on PLONK and Halo.
|
||||
|
||||
Plonky2 is largely focused on recursion performance. We use custom gates to mitigate the bottlenecks of FRI verification, such as hashing and interpolation. We also encode witness data in a ~64 bit field, so field operations take just a few cycles. To achieve 128-bit security, we repeat certain checks, and run certain parts of the argument in an extension field.
|
||||
Plonky2 is built for speed, and features a highly efficient recursive circuit. On a Macbook Pro, recursive proofs can be generated in about 170 ms.
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
For more details about the Plonky2 argument system, see this [writeup](plonky2.pdf).
|
||||
|
||||
|
||||
## Building
|
||||
|
||||
Plonky2 requires a recent nightly toolchain, although we plan to transition to stable in the future.
|
||||
|
||||
To use a nightly toolchain for Plonky2 by default, you can run
|
||||
```
|
||||
rustup override set nightly
|
||||
```
|
||||
in the Plonky2 directory.
|
||||
|
||||
|
||||
## Running
|
||||
@ -10,10 +26,17 @@ Plonky2 is largely focused on recursion performance. We use custom gates to miti
|
||||
To see recursion performance, one can run this test, which generates a chain of three recursion proofs:
|
||||
|
||||
```sh
|
||||
RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive_recursive_verifier -- --ignored
|
||||
RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive_recursive_verifier
|
||||
```
|
||||
|
||||
|
||||
## Jemalloc
|
||||
|
||||
By default, Plonky2 uses the [Jemalloc](http://jemalloc.net) memory allocator due to its superior performance. Currently, it changes the default allocator of any binary to which it is linked. You can disable this behavior by removing the corresponding lines in [`plonky2/src/lib.rs`](https://github.com/mir-protocol/plonky2/blob/main/plonky2/src/lib.rs).
|
||||
|
||||
Jemalloc is known to cause crashes when a binary compiled for x86 is run on an Apple silicon-based Mac under [Rosetta 2](https://support.apple.com/en-us/HT211861). If you are experiencing crashes on your Apple silicon Mac, run `rustc --print target-libdir`. The output should contain `aarch64-apple-darwin`. If the output contains `x86_64-apple-darwin`, then you are running the Rust toolchain for x86; we recommend switching to the native ARM version.
|
||||
|
||||
|
||||
## Copyright
|
||||
|
||||
Plonky2 was developed by Polygon Zero (formerly Mir). While we plan to adopt an open source license, we haven't selected one yet, so all rights are reserved for the time being. Please reach out to us if you have thoughts on licensing.
|
||||
@ -21,5 +44,5 @@ Plonky2 was developed by Polygon Zero (formerly Mir). While we plan to adopt an
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This code has not been thoroughly reviewed or tested, and should not be used in any production systems.
|
||||
This code has not yet been audited, and should not be used in any production systems.
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
[package]
|
||||
name = "plonky2_field"
|
||||
description = "Finite field arithmetic"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@ use std::iter::{Product, Sum};
|
||||
use std::mem::transmute;
|
||||
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
|
||||
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
use crate::field_types::{Field, Field64};
|
||||
use crate::goldilocks_field::GoldilocksField;
|
||||
use crate::ops::Square;
|
||||
use crate::packed_field::PackedField;
|
||||
@ -510,7 +510,7 @@ unsafe fn interleave2(x: __m256i, y: __m256i) -> (__m256i, __m256i) {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::arch::x86_64::avx2_goldilocks_field::Avx2GoldilocksField;
|
||||
use crate::field_types::PrimeField;
|
||||
use crate::field_types::Field64;
|
||||
use crate::goldilocks_field::GoldilocksField;
|
||||
use crate::ops::Square;
|
||||
use crate::packed_field::PackedField;
|
||||
|
||||
656
field/src/arch/x86_64/avx512_goldilocks_field.rs
Normal file
656
field/src/arch/x86_64/avx512_goldilocks_field.rs
Normal file
@ -0,0 +1,656 @@
|
||||
use core::arch::x86_64::*;
|
||||
use std::fmt;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::iter::{Product, Sum};
|
||||
use std::mem::transmute;
|
||||
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign};
|
||||
|
||||
use crate::field_types::{Field, Field64};
|
||||
use crate::goldilocks_field::GoldilocksField;
|
||||
use crate::ops::Square;
|
||||
use crate::packed_field::PackedField;
|
||||
|
||||
// Ideally `Avx512GoldilocksField` would wrap `__m512i`. Unfortunately, `__m512i` has an alignment
|
||||
// of 64B, which would preclude us from casting `[GoldilocksField; 8]` (alignment 8B) to
|
||||
// `Avx512GoldilocksField`. We need to ensure that `Avx512GoldilocksField` has the same alignment as
|
||||
// `GoldilocksField`. Thus we wrap `[GoldilocksField; 8]` and use the `new` and `get` methods to
|
||||
// convert to and from `__m512i`.
|
||||
#[derive(Copy, Clone)]
|
||||
#[repr(transparent)]
|
||||
pub struct Avx512GoldilocksField(pub [GoldilocksField; 8]);
|
||||
|
||||
impl Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn new(x: __m512i) -> Self {
|
||||
unsafe { transmute(x) }
|
||||
}
|
||||
#[inline]
|
||||
fn get(&self) -> __m512i {
|
||||
unsafe { transmute(*self) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl PackedField for Avx512GoldilocksField {
|
||||
const WIDTH: usize = 8;
|
||||
|
||||
type Scalar = GoldilocksField;
|
||||
|
||||
const ZEROS: Self = Self([GoldilocksField::ZERO; 8]);
|
||||
const ONES: Self = Self([GoldilocksField::ONE; 8]);
|
||||
|
||||
#[inline]
|
||||
fn from_arr(arr: [Self::Scalar; Self::WIDTH]) -> Self {
|
||||
Self(arr)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn as_arr(&self) -> [Self::Scalar; Self::WIDTH] {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_slice(slice: &[Self::Scalar]) -> &Self {
|
||||
assert_eq!(slice.len(), Self::WIDTH);
|
||||
unsafe { &*slice.as_ptr().cast() }
|
||||
}
|
||||
#[inline]
|
||||
fn from_slice_mut(slice: &mut [Self::Scalar]) -> &mut Self {
|
||||
assert_eq!(slice.len(), Self::WIDTH);
|
||||
unsafe { &mut *slice.as_mut_ptr().cast() }
|
||||
}
|
||||
#[inline]
|
||||
fn as_slice(&self) -> &[Self::Scalar] {
|
||||
&self.0[..]
|
||||
}
|
||||
#[inline]
|
||||
fn as_slice_mut(&mut self) -> &mut [Self::Scalar] {
|
||||
&mut self.0[..]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn interleave(&self, other: Self, block_len: usize) -> (Self, Self) {
|
||||
let (v0, v1) = (self.get(), other.get());
|
||||
let (res0, res1) = match block_len {
|
||||
1 => unsafe { interleave1(v0, v1) },
|
||||
2 => unsafe { interleave2(v0, v1) },
|
||||
4 => unsafe { interleave4(v0, v1) },
|
||||
8 => (v0, v1),
|
||||
_ => panic!("unsupported block_len"),
|
||||
};
|
||||
(Self::new(res0), Self::new(res1))
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<Self> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn add(self, rhs: Self) -> Self {
|
||||
Self::new(unsafe { add(self.get(), rhs.get()) })
|
||||
}
|
||||
}
|
||||
impl Add<GoldilocksField> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn add(self, rhs: GoldilocksField) -> Self {
|
||||
self + Self::from(rhs)
|
||||
}
|
||||
}
|
||||
impl Add<Avx512GoldilocksField> for GoldilocksField {
|
||||
type Output = Avx512GoldilocksField;
|
||||
#[inline]
|
||||
fn add(self, rhs: Self::Output) -> Self::Output {
|
||||
Self::Output::from(self) + rhs
|
||||
}
|
||||
}
|
||||
impl AddAssign<Self> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn add_assign(&mut self, rhs: Self) {
|
||||
*self = *self + rhs;
|
||||
}
|
||||
}
|
||||
impl AddAssign<GoldilocksField> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn add_assign(&mut self, rhs: GoldilocksField) {
|
||||
*self = *self + rhs;
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "({:?})", self.get())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Self::ZEROS
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<GoldilocksField> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn div(self, rhs: GoldilocksField) -> Self {
|
||||
self * rhs.inverse()
|
||||
}
|
||||
}
|
||||
impl DivAssign<GoldilocksField> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn div_assign(&mut self, rhs: GoldilocksField) {
|
||||
*self *= rhs.inverse();
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GoldilocksField> for Avx512GoldilocksField {
|
||||
fn from(x: GoldilocksField) -> Self {
|
||||
Self([x; 8])
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<Self> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn mul(self, rhs: Self) -> Self {
|
||||
Self::new(unsafe { mul(self.get(), rhs.get()) })
|
||||
}
|
||||
}
|
||||
impl Mul<GoldilocksField> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn mul(self, rhs: GoldilocksField) -> Self {
|
||||
self * Self::from(rhs)
|
||||
}
|
||||
}
|
||||
impl Mul<Avx512GoldilocksField> for GoldilocksField {
|
||||
type Output = Avx512GoldilocksField;
|
||||
#[inline]
|
||||
fn mul(self, rhs: Avx512GoldilocksField) -> Self::Output {
|
||||
Self::Output::from(self) * rhs
|
||||
}
|
||||
}
|
||||
impl MulAssign<Self> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn mul_assign(&mut self, rhs: Self) {
|
||||
*self = *self * rhs;
|
||||
}
|
||||
}
|
||||
impl MulAssign<GoldilocksField> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn mul_assign(&mut self, rhs: GoldilocksField) {
|
||||
*self = *self * rhs;
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn neg(self) -> Self {
|
||||
Self::new(unsafe { neg(self.get()) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Product for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
|
||||
iter.reduce(|x, y| x * y).unwrap_or(Self::ONES)
|
||||
}
|
||||
}
|
||||
|
||||
impl Square for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn square(&self) -> Self {
|
||||
Self::new(unsafe { square(self.get()) })
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub<Self> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn sub(self, rhs: Self) -> Self {
|
||||
Self::new(unsafe { sub(self.get(), rhs.get()) })
|
||||
}
|
||||
}
|
||||
impl Sub<GoldilocksField> for Avx512GoldilocksField {
|
||||
type Output = Self;
|
||||
#[inline]
|
||||
fn sub(self, rhs: GoldilocksField) -> Self {
|
||||
self - Self::from(rhs)
|
||||
}
|
||||
}
|
||||
impl Sub<Avx512GoldilocksField> for GoldilocksField {
|
||||
type Output = Avx512GoldilocksField;
|
||||
#[inline]
|
||||
fn sub(self, rhs: Avx512GoldilocksField) -> Self::Output {
|
||||
Self::Output::from(self) - rhs
|
||||
}
|
||||
}
|
||||
impl SubAssign<Self> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn sub_assign(&mut self, rhs: Self) {
|
||||
*self = *self - rhs;
|
||||
}
|
||||
}
|
||||
impl SubAssign<GoldilocksField> for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn sub_assign(&mut self, rhs: GoldilocksField) {
|
||||
*self = *self - rhs;
|
||||
}
|
||||
}
|
||||
|
||||
impl Sum for Avx512GoldilocksField {
|
||||
#[inline]
|
||||
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
|
||||
iter.reduce(|x, y| x + y).unwrap_or(Self::ZEROS)
|
||||
}
|
||||
}
|
||||
|
||||
const FIELD_ORDER: __m512i = unsafe { transmute([GoldilocksField::ORDER; 8]) };
|
||||
const EPSILON: __m512i = unsafe { transmute([GoldilocksField::ORDER.wrapping_neg(); 8]) };
|
||||
|
||||
#[inline]
|
||||
unsafe fn canonicalize(x: __m512i) -> __m512i {
|
||||
let mask = _mm512_cmpge_epu64_mask(x, FIELD_ORDER);
|
||||
_mm512_mask_sub_epi64(x, mask, x, FIELD_ORDER)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn add_no_double_overflow_64_64(x: __m512i, y: __m512i) -> __m512i {
|
||||
let res_wrapped = _mm512_add_epi64(x, y);
|
||||
let mask = _mm512_cmplt_epu64_mask(res_wrapped, y); // mask set if add overflowed
|
||||
let res = _mm512_mask_sub_epi64(res_wrapped, mask, res_wrapped, FIELD_ORDER);
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn sub_no_double_overflow_64_64(x: __m512i, y: __m512i) -> __m512i {
|
||||
let mask = _mm512_cmplt_epu64_mask(x, y); // mask set if sub will underflow (x < y)
|
||||
let res_wrapped = _mm512_sub_epi64(x, y);
|
||||
let res = _mm512_mask_add_epi64(res_wrapped, mask, res_wrapped, FIELD_ORDER);
|
||||
res
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn add(x: __m512i, y: __m512i) -> __m512i {
|
||||
add_no_double_overflow_64_64(x, canonicalize(y))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn sub(x: __m512i, y: __m512i) -> __m512i {
|
||||
sub_no_double_overflow_64_64(x, canonicalize(y))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn neg(y: __m512i) -> __m512i {
|
||||
_mm512_sub_epi64(FIELD_ORDER, canonicalize(y))
|
||||
}
|
||||
|
||||
const LO_32_BITS_MASK: __mmask16 = unsafe { transmute(0b0101010101010101u16) };
|
||||
|
||||
#[inline]
|
||||
unsafe fn mul64_64(x: __m512i, y: __m512i) -> (__m512i, __m512i) {
|
||||
// We want to move the high 32 bits to the low position. The multiplication instruction ignores
|
||||
// the high 32 bits, so it's ok to just duplicate it into the low position. This duplication can
|
||||
// be done on port 5; bitshifts run on port 0, competing with multiplication.
|
||||
// This instruction is only provided for 32-bit floats, not integers. Idk why Intel makes the
|
||||
// distinction; the casts are free and it guarantees that the exact bit pattern is preserved.
|
||||
// Using a swizzle instruction of the wrong domain (float vs int) does not increase latency
|
||||
// since Haswell.
|
||||
let x_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(x)));
|
||||
let y_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(y)));
|
||||
|
||||
// All four pairwise multiplications
|
||||
let mul_ll = _mm512_mul_epu32(x, y);
|
||||
let mul_lh = _mm512_mul_epu32(x, y_hi);
|
||||
let mul_hl = _mm512_mul_epu32(x_hi, y);
|
||||
let mul_hh = _mm512_mul_epu32(x_hi, y_hi);
|
||||
|
||||
// Bignum addition
|
||||
// Extract high 32 bits of mul_ll and add to mul_hl. This cannot overflow.
|
||||
let mul_ll_hi = _mm512_srli_epi64::<32>(mul_ll);
|
||||
let t0 = _mm512_add_epi64(mul_hl, mul_ll_hi);
|
||||
// Extract low 32 bits of t0 and add to mul_lh. Again, this cannot overflow.
|
||||
// Also, extract high 32 bits of t0 and add to mul_hh.
|
||||
let t0_lo = _mm512_and_si512(t0, EPSILON);
|
||||
let t0_hi = _mm512_srli_epi64::<32>(t0);
|
||||
let t1 = _mm512_add_epi64(mul_lh, t0_lo);
|
||||
let t2 = _mm512_add_epi64(mul_hh, t0_hi);
|
||||
// Lastly, extract the high 32 bits of t1 and add to t2.
|
||||
let t1_hi = _mm512_srli_epi64::<32>(t1);
|
||||
let res_hi = _mm512_add_epi64(t2, t1_hi);
|
||||
|
||||
// Form res_lo by combining the low half of mul_ll with the low half of t1 (shifted into high
|
||||
// position).
|
||||
let t1_lo = _mm512_castps_si512(_mm512_moveldup_ps(_mm512_castsi512_ps(t1)));
|
||||
let res_lo = _mm512_mask_blend_epi32(LO_32_BITS_MASK, t1_lo, mul_ll);
|
||||
|
||||
(res_hi, res_lo)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn square64(x: __m512i) -> (__m512i, __m512i) {
|
||||
// Get high 32 bits of x. See comment in mul64_64_s.
|
||||
let x_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(x)));
|
||||
|
||||
// All pairwise multiplications.
|
||||
let mul_ll = _mm512_mul_epu32(x, x);
|
||||
let mul_lh = _mm512_mul_epu32(x, x_hi);
|
||||
let mul_hh = _mm512_mul_epu32(x_hi, x_hi);
|
||||
|
||||
// Bignum addition, but mul_lh is shifted by 33 bits (not 32).
|
||||
let mul_ll_hi = _mm512_srli_epi64::<33>(mul_ll);
|
||||
let t0 = _mm512_add_epi64(mul_lh, mul_ll_hi);
|
||||
let t0_hi = _mm512_srli_epi64::<31>(t0);
|
||||
let res_hi = _mm512_add_epi64(mul_hh, t0_hi);
|
||||
|
||||
// Form low result by adding the mul_ll and the low 31 bits of mul_lh (shifted to the high
|
||||
// position).
|
||||
let mul_lh_lo = _mm512_slli_epi64::<33>(mul_lh);
|
||||
let res_lo = _mm512_add_epi64(mul_ll, mul_lh_lo);
|
||||
|
||||
(res_hi, res_lo)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn reduce128(x: (__m512i, __m512i)) -> __m512i {
|
||||
let (hi0, lo0) = x;
|
||||
let hi_hi0 = _mm512_srli_epi64::<32>(hi0);
|
||||
let lo1 = sub_no_double_overflow_64_64(lo0, hi_hi0);
|
||||
let t1 = _mm512_mul_epu32(hi0, EPSILON);
|
||||
let lo2 = add_no_double_overflow_64_64(lo1, t1);
|
||||
lo2
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn mul(x: __m512i, y: __m512i) -> __m512i {
|
||||
reduce128(mul64_64(x, y))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn square(x: __m512i) -> __m512i {
|
||||
reduce128(square64(x))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn interleave1(x: __m512i, y: __m512i) -> (__m512i, __m512i) {
|
||||
let a = _mm512_unpacklo_epi64(x, y);
|
||||
let b = _mm512_unpackhi_epi64(x, y);
|
||||
(a, b)
|
||||
}
|
||||
|
||||
const INTERLEAVE2_IDX_A: __m512i = unsafe {
|
||||
transmute([
|
||||
0o00u64, 0o01u64, 0o10u64, 0o11u64, 0o04u64, 0o05u64, 0o14u64, 0o15u64,
|
||||
])
|
||||
};
|
||||
const INTERLEAVE2_IDX_B: __m512i = unsafe {
|
||||
transmute([
|
||||
0o02u64, 0o03u64, 0o12u64, 0o13u64, 0o06u64, 0o07u64, 0o16u64, 0o17u64,
|
||||
])
|
||||
};
|
||||
|
||||
#[inline]
|
||||
unsafe fn interleave2(x: __m512i, y: __m512i) -> (__m512i, __m512i) {
|
||||
let a = _mm512_permutex2var_epi64(x, INTERLEAVE2_IDX_A, y);
|
||||
let b = _mm512_permutex2var_epi64(x, INTERLEAVE2_IDX_B, y);
|
||||
(a, b)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn interleave4(x: __m512i, y: __m512i) -> (__m512i, __m512i) {
|
||||
let a = _mm512_shuffle_i64x2::<0x44>(x, y);
|
||||
let b = _mm512_shuffle_i64x2::<0xee>(x, y);
|
||||
(a, b)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::arch::x86_64::avx512_goldilocks_field::Avx512GoldilocksField;
|
||||
use crate::field_types::Field64;
|
||||
use crate::goldilocks_field::GoldilocksField;
|
||||
use crate::ops::Square;
|
||||
use crate::packed_field::PackedField;
|
||||
|
||||
fn test_vals_a() -> [GoldilocksField; 8] {
|
||||
[
|
||||
GoldilocksField::from_noncanonical_u64(14479013849828404771),
|
||||
GoldilocksField::from_noncanonical_u64(9087029921428221768),
|
||||
GoldilocksField::from_noncanonical_u64(2441288194761790662),
|
||||
GoldilocksField::from_noncanonical_u64(5646033492608483824),
|
||||
GoldilocksField::from_noncanonical_u64(2779181197214900072),
|
||||
GoldilocksField::from_noncanonical_u64(2989742820063487116),
|
||||
GoldilocksField::from_noncanonical_u64(727880025589250743),
|
||||
GoldilocksField::from_noncanonical_u64(3803926346107752679),
|
||||
]
|
||||
}
|
||||
fn test_vals_b() -> [GoldilocksField; 8] {
|
||||
[
|
||||
GoldilocksField::from_noncanonical_u64(17891926589593242302),
|
||||
GoldilocksField::from_noncanonical_u64(11009798273260028228),
|
||||
GoldilocksField::from_noncanonical_u64(2028722748960791447),
|
||||
GoldilocksField::from_noncanonical_u64(7929433601095175579),
|
||||
GoldilocksField::from_noncanonical_u64(6632528436085461172),
|
||||
GoldilocksField::from_noncanonical_u64(2145438710786785567),
|
||||
GoldilocksField::from_noncanonical_u64(11821483668392863016),
|
||||
GoldilocksField::from_noncanonical_u64(15638272883309521929),
|
||||
]
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add() {
|
||||
let a_arr = test_vals_a();
|
||||
let b_arr = test_vals_b();
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(a_arr);
|
||||
let packed_b = Avx512GoldilocksField::from_arr(b_arr);
|
||||
let packed_res = packed_a + packed_b;
|
||||
let arr_res = packed_res.as_arr();
|
||||
|
||||
let expected = a_arr.iter().zip(b_arr).map(|(&a, b)| a + b);
|
||||
for (exp, res) in expected.zip(arr_res) {
|
||||
assert_eq!(res, exp);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mul() {
|
||||
let a_arr = test_vals_a();
|
||||
let b_arr = test_vals_b();
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(a_arr);
|
||||
let packed_b = Avx512GoldilocksField::from_arr(b_arr);
|
||||
let packed_res = packed_a * packed_b;
|
||||
let arr_res = packed_res.as_arr();
|
||||
|
||||
let expected = a_arr.iter().zip(b_arr).map(|(&a, b)| a * b);
|
||||
for (exp, res) in expected.zip(arr_res) {
|
||||
assert_eq!(res, exp);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_square() {
|
||||
let a_arr = test_vals_a();
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(a_arr);
|
||||
let packed_res = packed_a.square();
|
||||
let arr_res = packed_res.as_arr();
|
||||
|
||||
let expected = a_arr.iter().map(|&a| a.square());
|
||||
for (exp, res) in expected.zip(arr_res) {
|
||||
assert_eq!(res, exp);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_neg() {
|
||||
let a_arr = test_vals_a();
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(a_arr);
|
||||
let packed_res = -packed_a;
|
||||
let arr_res = packed_res.as_arr();
|
||||
|
||||
let expected = a_arr.iter().map(|&a| -a);
|
||||
for (exp, res) in expected.zip(arr_res) {
|
||||
assert_eq!(res, exp);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sub() {
|
||||
let a_arr = test_vals_a();
|
||||
let b_arr = test_vals_b();
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(a_arr);
|
||||
let packed_b = Avx512GoldilocksField::from_arr(b_arr);
|
||||
let packed_res = packed_a - packed_b;
|
||||
let arr_res = packed_res.as_arr();
|
||||
|
||||
let expected = a_arr.iter().zip(b_arr).map(|(&a, b)| a - b);
|
||||
for (exp, res) in expected.zip(arr_res) {
|
||||
assert_eq!(res, exp);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interleave_is_involution() {
|
||||
let a_arr = test_vals_a();
|
||||
let b_arr = test_vals_b();
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(a_arr);
|
||||
let packed_b = Avx512GoldilocksField::from_arr(b_arr);
|
||||
{
|
||||
// Interleave, then deinterleave.
|
||||
let (x, y) = packed_a.interleave(packed_b, 1);
|
||||
let (res_a, res_b) = x.interleave(y, 1);
|
||||
assert_eq!(res_a.as_arr(), a_arr);
|
||||
assert_eq!(res_b.as_arr(), b_arr);
|
||||
}
|
||||
{
|
||||
let (x, y) = packed_a.interleave(packed_b, 2);
|
||||
let (res_a, res_b) = x.interleave(y, 2);
|
||||
assert_eq!(res_a.as_arr(), a_arr);
|
||||
assert_eq!(res_b.as_arr(), b_arr);
|
||||
}
|
||||
{
|
||||
let (x, y) = packed_a.interleave(packed_b, 4);
|
||||
let (res_a, res_b) = x.interleave(y, 4);
|
||||
assert_eq!(res_a.as_arr(), a_arr);
|
||||
assert_eq!(res_b.as_arr(), b_arr);
|
||||
}
|
||||
{
|
||||
let (x, y) = packed_a.interleave(packed_b, 8);
|
||||
let (res_a, res_b) = x.interleave(y, 8);
|
||||
assert_eq!(res_a.as_arr(), a_arr);
|
||||
assert_eq!(res_b.as_arr(), b_arr);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interleave() {
|
||||
let in_a: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(00),
|
||||
GoldilocksField::from_noncanonical_u64(01),
|
||||
GoldilocksField::from_noncanonical_u64(02),
|
||||
GoldilocksField::from_noncanonical_u64(03),
|
||||
GoldilocksField::from_noncanonical_u64(04),
|
||||
GoldilocksField::from_noncanonical_u64(05),
|
||||
GoldilocksField::from_noncanonical_u64(06),
|
||||
GoldilocksField::from_noncanonical_u64(07),
|
||||
];
|
||||
let in_b: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(10),
|
||||
GoldilocksField::from_noncanonical_u64(11),
|
||||
GoldilocksField::from_noncanonical_u64(12),
|
||||
GoldilocksField::from_noncanonical_u64(13),
|
||||
GoldilocksField::from_noncanonical_u64(14),
|
||||
GoldilocksField::from_noncanonical_u64(15),
|
||||
GoldilocksField::from_noncanonical_u64(16),
|
||||
GoldilocksField::from_noncanonical_u64(17),
|
||||
];
|
||||
let int1_a: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(00),
|
||||
GoldilocksField::from_noncanonical_u64(10),
|
||||
GoldilocksField::from_noncanonical_u64(02),
|
||||
GoldilocksField::from_noncanonical_u64(12),
|
||||
GoldilocksField::from_noncanonical_u64(04),
|
||||
GoldilocksField::from_noncanonical_u64(14),
|
||||
GoldilocksField::from_noncanonical_u64(06),
|
||||
GoldilocksField::from_noncanonical_u64(16),
|
||||
];
|
||||
let int1_b: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(01),
|
||||
GoldilocksField::from_noncanonical_u64(11),
|
||||
GoldilocksField::from_noncanonical_u64(03),
|
||||
GoldilocksField::from_noncanonical_u64(13),
|
||||
GoldilocksField::from_noncanonical_u64(05),
|
||||
GoldilocksField::from_noncanonical_u64(15),
|
||||
GoldilocksField::from_noncanonical_u64(07),
|
||||
GoldilocksField::from_noncanonical_u64(17),
|
||||
];
|
||||
let int2_a: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(00),
|
||||
GoldilocksField::from_noncanonical_u64(01),
|
||||
GoldilocksField::from_noncanonical_u64(10),
|
||||
GoldilocksField::from_noncanonical_u64(11),
|
||||
GoldilocksField::from_noncanonical_u64(04),
|
||||
GoldilocksField::from_noncanonical_u64(05),
|
||||
GoldilocksField::from_noncanonical_u64(14),
|
||||
GoldilocksField::from_noncanonical_u64(15),
|
||||
];
|
||||
let int2_b: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(02),
|
||||
GoldilocksField::from_noncanonical_u64(03),
|
||||
GoldilocksField::from_noncanonical_u64(12),
|
||||
GoldilocksField::from_noncanonical_u64(13),
|
||||
GoldilocksField::from_noncanonical_u64(06),
|
||||
GoldilocksField::from_noncanonical_u64(07),
|
||||
GoldilocksField::from_noncanonical_u64(16),
|
||||
GoldilocksField::from_noncanonical_u64(17),
|
||||
];
|
||||
let int4_a: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(00),
|
||||
GoldilocksField::from_noncanonical_u64(01),
|
||||
GoldilocksField::from_noncanonical_u64(02),
|
||||
GoldilocksField::from_noncanonical_u64(03),
|
||||
GoldilocksField::from_noncanonical_u64(10),
|
||||
GoldilocksField::from_noncanonical_u64(11),
|
||||
GoldilocksField::from_noncanonical_u64(12),
|
||||
GoldilocksField::from_noncanonical_u64(13),
|
||||
];
|
||||
let int4_b: [GoldilocksField; 8] = [
|
||||
GoldilocksField::from_noncanonical_u64(04),
|
||||
GoldilocksField::from_noncanonical_u64(05),
|
||||
GoldilocksField::from_noncanonical_u64(06),
|
||||
GoldilocksField::from_noncanonical_u64(07),
|
||||
GoldilocksField::from_noncanonical_u64(14),
|
||||
GoldilocksField::from_noncanonical_u64(15),
|
||||
GoldilocksField::from_noncanonical_u64(16),
|
||||
GoldilocksField::from_noncanonical_u64(17),
|
||||
];
|
||||
|
||||
let packed_a = Avx512GoldilocksField::from_arr(in_a);
|
||||
let packed_b = Avx512GoldilocksField::from_arr(in_b);
|
||||
{
|
||||
let (x1, y1) = packed_a.interleave(packed_b, 1);
|
||||
assert_eq!(x1.as_arr(), int1_a);
|
||||
assert_eq!(y1.as_arr(), int1_b);
|
||||
}
|
||||
{
|
||||
let (x2, y2) = packed_a.interleave(packed_b, 2);
|
||||
assert_eq!(x2.as_arr(), int2_a);
|
||||
assert_eq!(y2.as_arr(), int2_b);
|
||||
}
|
||||
{
|
||||
let (x4, y4) = packed_a.interleave(packed_b, 4);
|
||||
assert_eq!(x4.as_arr(), int4_a);
|
||||
assert_eq!(y4.as_arr(), int4_b);
|
||||
}
|
||||
{
|
||||
let (x8, y8) = packed_a.interleave(packed_b, 8);
|
||||
assert_eq!(x8.as_arr(), in_a);
|
||||
assert_eq!(y8.as_arr(), in_b);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,2 +1,20 @@
|
||||
#[cfg(target_feature = "avx2")]
|
||||
#[cfg(all(
|
||||
target_feature = "avx2",
|
||||
not(all(
|
||||
target_feature = "avx512bw",
|
||||
target_feature = "avx512cd",
|
||||
target_feature = "avx512dq",
|
||||
target_feature = "avx512f",
|
||||
target_feature = "avx512vl"
|
||||
))
|
||||
))]
|
||||
pub mod avx2_goldilocks_field;
|
||||
|
||||
#[cfg(all(
|
||||
target_feature = "avx512bw",
|
||||
target_feature = "avx512cd",
|
||||
target_feature = "avx512dq",
|
||||
target_feature = "avx512f",
|
||||
target_feature = "avx512vl"
|
||||
))]
|
||||
pub mod avx512_goldilocks_field;
|
||||
|
||||
@ -95,10 +95,6 @@ impl<F: Extendable<2>> Field for QuadraticExtension<F> {
|
||||
Self([F::from_biguint(low), F::from_biguint(high)])
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
self.0[0].to_biguint() + F::order() * self.0[1].to_biguint()
|
||||
}
|
||||
|
||||
fn from_canonical_u64(n: u64) -> Self {
|
||||
F::from_canonical_u64(n).into()
|
||||
}
|
||||
|
||||
@ -107,14 +107,6 @@ impl<F: Extendable<4>> Field for QuarticExtension<F> {
|
||||
])
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
let mut result = self.0[3].to_biguint();
|
||||
result = result * F::order() + self.0[2].to_biguint();
|
||||
result = result * F::order() + self.0[1].to_biguint();
|
||||
result = result * F::order() + self.0[0].to_biguint();
|
||||
result
|
||||
}
|
||||
|
||||
fn from_canonical_u64(n: u64) -> Self {
|
||||
F::from_canonical_u64(n).into()
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use std::cmp::{max, min};
|
||||
use std::option::Option;
|
||||
|
||||
use plonky2_util::{log2_strict, reverse_index_bits};
|
||||
use plonky2_util::{log2_strict, reverse_index_bits_in_place};
|
||||
use unroll::unroll_for_loops;
|
||||
|
||||
use crate::field_types::Field;
|
||||
@ -34,10 +34,10 @@ pub fn fft_root_table<F: Field>(n: usize) -> FftRootTable<F> {
|
||||
|
||||
#[inline]
|
||||
fn fft_dispatch<F: Field>(
|
||||
input: &[F],
|
||||
input: &mut [F],
|
||||
zero_factor: Option<usize>,
|
||||
root_table: Option<&FftRootTable<F>>,
|
||||
) -> Vec<F> {
|
||||
) {
|
||||
let computed_root_table = if root_table.is_some() {
|
||||
None
|
||||
} else {
|
||||
@ -45,33 +45,32 @@ fn fft_dispatch<F: Field>(
|
||||
};
|
||||
let used_root_table = root_table.or(computed_root_table.as_ref()).unwrap();
|
||||
|
||||
fft_classic(input, zero_factor.unwrap_or(0), used_root_table)
|
||||
fft_classic(input, zero_factor.unwrap_or(0), used_root_table);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn fft<F: Field>(poly: &PolynomialCoeffs<F>) -> PolynomialValues<F> {
|
||||
pub fn fft<F: Field>(poly: PolynomialCoeffs<F>) -> PolynomialValues<F> {
|
||||
fft_with_options(poly, None, None)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn fft_with_options<F: Field>(
|
||||
poly: &PolynomialCoeffs<F>,
|
||||
poly: PolynomialCoeffs<F>,
|
||||
zero_factor: Option<usize>,
|
||||
root_table: Option<&FftRootTable<F>>,
|
||||
) -> PolynomialValues<F> {
|
||||
let PolynomialCoeffs { coeffs } = poly;
|
||||
PolynomialValues {
|
||||
values: fft_dispatch(coeffs, zero_factor, root_table),
|
||||
}
|
||||
let PolynomialCoeffs { coeffs: mut buffer } = poly;
|
||||
fft_dispatch(&mut buffer, zero_factor, root_table);
|
||||
PolynomialValues { values: buffer }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ifft<F: Field>(poly: &PolynomialValues<F>) -> PolynomialCoeffs<F> {
|
||||
pub fn ifft<F: Field>(poly: PolynomialValues<F>) -> PolynomialCoeffs<F> {
|
||||
ifft_with_options(poly, None, None)
|
||||
}
|
||||
|
||||
pub fn ifft_with_options<F: Field>(
|
||||
poly: &PolynomialValues<F>,
|
||||
poly: PolynomialValues<F>,
|
||||
zero_factor: Option<usize>,
|
||||
root_table: Option<&FftRootTable<F>>,
|
||||
) -> PolynomialCoeffs<F> {
|
||||
@ -79,20 +78,20 @@ pub fn ifft_with_options<F: Field>(
|
||||
let lg_n = log2_strict(n);
|
||||
let n_inv = F::inverse_2exp(lg_n);
|
||||
|
||||
let PolynomialValues { values } = poly;
|
||||
let mut coeffs = fft_dispatch(values, zero_factor, root_table);
|
||||
let PolynomialValues { values: mut buffer } = poly;
|
||||
fft_dispatch(&mut buffer, zero_factor, root_table);
|
||||
|
||||
// We reverse all values except the first, and divide each by n.
|
||||
coeffs[0] *= n_inv;
|
||||
coeffs[n / 2] *= n_inv;
|
||||
buffer[0] *= n_inv;
|
||||
buffer[n / 2] *= n_inv;
|
||||
for i in 1..(n / 2) {
|
||||
let j = n - i;
|
||||
let coeffs_i = coeffs[j] * n_inv;
|
||||
let coeffs_j = coeffs[i] * n_inv;
|
||||
coeffs[i] = coeffs_i;
|
||||
coeffs[j] = coeffs_j;
|
||||
let coeffs_i = buffer[j] * n_inv;
|
||||
let coeffs_j = buffer[i] * n_inv;
|
||||
buffer[i] = coeffs_i;
|
||||
buffer[j] = coeffs_j;
|
||||
}
|
||||
PolynomialCoeffs { coeffs }
|
||||
PolynomialCoeffs { coeffs: buffer }
|
||||
}
|
||||
|
||||
/// Generic FFT implementation that works with both scalar and packed inputs.
|
||||
@ -167,8 +166,8 @@ fn fft_classic_simd<P: PackedField>(
|
||||
/// The parameter r signifies that the first 1/2^r of the entries of
|
||||
/// input may be non-zero, but the last 1 - 1/2^r entries are
|
||||
/// definitely zero.
|
||||
pub(crate) fn fft_classic<F: Field>(input: &[F], r: usize, root_table: &FftRootTable<F>) -> Vec<F> {
|
||||
let mut values = reverse_index_bits(input);
|
||||
pub(crate) fn fft_classic<F: Field>(values: &mut [F], r: usize, root_table: &FftRootTable<F>) {
|
||||
reverse_index_bits_in_place(values);
|
||||
|
||||
let n = values.len();
|
||||
let lg_n = log2_strict(n);
|
||||
@ -200,11 +199,10 @@ pub(crate) fn fft_classic<F: Field>(input: &[F], r: usize, root_table: &FftRootT
|
||||
if lg_n <= lg_packed_width {
|
||||
// Need the slice to be at least the width of two packed vectors for the vectorized version
|
||||
// to work. Do this tiny problem in scalar.
|
||||
fft_classic_simd::<F>(&mut values[..], r, lg_n, root_table);
|
||||
fft_classic_simd::<F>(values, r, lg_n, root_table);
|
||||
} else {
|
||||
fft_classic_simd::<<F as Packable>::Packing>(&mut values[..], r, lg_n, root_table);
|
||||
fft_classic_simd::<<F as Packable>::Packing>(values, r, lg_n, root_table);
|
||||
}
|
||||
values
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -231,10 +229,10 @@ mod tests {
|
||||
assert_eq!(coeffs.len(), degree_padded);
|
||||
let coefficients = PolynomialCoeffs { coeffs };
|
||||
|
||||
let points = fft(&coefficients);
|
||||
let points = fft(coefficients.clone());
|
||||
assert_eq!(points, evaluate_naive(&coefficients));
|
||||
|
||||
let interpolated_coefficients = ifft(&points);
|
||||
let interpolated_coefficients = ifft(points);
|
||||
for i in 0..degree {
|
||||
assert_eq!(interpolated_coefficients.coeffs[i], coefficients.coeffs[i]);
|
||||
}
|
||||
@ -245,7 +243,10 @@ mod tests {
|
||||
for r in 0..4 {
|
||||
// expand coefficients by factor 2^r by filling with zeros
|
||||
let zero_tail = coefficients.lde(r);
|
||||
assert_eq!(fft(&zero_tail), fft_with_options(&zero_tail, Some(r), None));
|
||||
assert_eq!(
|
||||
fft(zero_tail.clone()),
|
||||
fft_with_options(zero_tail, Some(r), None)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -264,17 +264,28 @@ pub trait Field:
|
||||
subgroup.into_iter().map(|x| x * shift).collect()
|
||||
}
|
||||
|
||||
// TODO: move these to a new `PrimeField` trait (for all prime fields, not just 64-bit ones)
|
||||
// TODO: The current behavior for composite fields doesn't seem natural or useful.
|
||||
// Rename to `from_noncanonical_biguint` and have it return `n % Self::characteristic()`.
|
||||
fn from_biguint(n: BigUint) -> Self;
|
||||
|
||||
fn to_biguint(&self) -> BigUint;
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_u64(n: u64) -> Self;
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_u32(n: u32) -> Self {
|
||||
Self::from_canonical_u64(n as u64)
|
||||
}
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_u16(n: u16) -> Self {
|
||||
Self::from_canonical_u64(n as u64)
|
||||
}
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_usize(n: usize) -> Self {
|
||||
Self::from_canonical_u64(n as u64)
|
||||
}
|
||||
@ -283,11 +294,11 @@ pub trait Field:
|
||||
Self::from_canonical_u64(b as u64)
|
||||
}
|
||||
|
||||
/// Returns `n % Self::CHARACTERISTIC`.
|
||||
/// Returns `n % Self::characteristic()`.
|
||||
fn from_noncanonical_u128(n: u128) -> Self;
|
||||
|
||||
/// Returns `n % Self::CHARACTERISTIC`. May be cheaper than from_noncanonical_u128 when we know
|
||||
/// that n < 2 ** 96.
|
||||
/// Returns `n % Self::characteristic()`. May be cheaper than from_noncanonical_u128 when we know
|
||||
/// that `n < 2 ** 96`.
|
||||
#[inline]
|
||||
fn from_noncanonical_u96((n_lo, n_hi): (u64, u32)) -> Self {
|
||||
// Default implementation.
|
||||
@ -399,22 +410,26 @@ pub trait Field:
|
||||
}
|
||||
}
|
||||
|
||||
/// A finite field of prime order less than 2^64.
|
||||
pub trait PrimeField: Field {
|
||||
fn to_canonical_biguint(&self) -> BigUint;
|
||||
}
|
||||
|
||||
/// A finite field of order less than 2^64.
|
||||
pub trait Field64: Field {
|
||||
const ORDER: u64;
|
||||
|
||||
fn to_canonical_u64(&self) -> u64;
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64;
|
||||
|
||||
/// Returns `x % Self::CHARACTERISTIC`.
|
||||
// TODO: Move to `Field`.
|
||||
fn from_noncanonical_u64(n: u64) -> Self;
|
||||
|
||||
#[inline]
|
||||
// TODO: Move to `Field`.
|
||||
fn add_one(&self) -> Self {
|
||||
unsafe { self.add_canonical_u64(1) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// TODO: Move to `Field`.
|
||||
fn sub_one(&self) -> Self {
|
||||
unsafe { self.sub_canonical_u64(1) }
|
||||
}
|
||||
@ -423,6 +438,7 @@ pub trait PrimeField: Field {
|
||||
/// Equivalent to *self + Self::from_canonical_u64(rhs), but may be cheaper. The caller must
|
||||
/// ensure that 0 <= rhs < Self::ORDER. The function may return incorrect results if this
|
||||
/// precondition is not met. It is marked unsafe for this reason.
|
||||
// TODO: Move to `Field`.
|
||||
#[inline]
|
||||
unsafe fn add_canonical_u64(&self, rhs: u64) -> Self {
|
||||
// Default implementation.
|
||||
@ -433,6 +449,7 @@ pub trait PrimeField: Field {
|
||||
/// Equivalent to *self - Self::from_canonical_u64(rhs), but may be cheaper. The caller must
|
||||
/// ensure that 0 <= rhs < Self::ORDER. The function may return incorrect results if this
|
||||
/// precondition is not met. It is marked unsafe for this reason.
|
||||
// TODO: Move to `Field`.
|
||||
#[inline]
|
||||
unsafe fn sub_canonical_u64(&self, rhs: u64) -> Self {
|
||||
// Default implementation.
|
||||
@ -440,6 +457,13 @@ pub trait PrimeField: Field {
|
||||
}
|
||||
}
|
||||
|
||||
/// A finite field of prime order less than 2^64.
|
||||
pub trait PrimeField64: PrimeField + Field64 {
|
||||
fn to_canonical_u64(&self) -> u64;
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64;
|
||||
}
|
||||
|
||||
/// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`.
|
||||
#[derive(Clone)]
|
||||
pub struct Powers<F: Field> {
|
||||
|
||||
@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::extension_field::quadratic::QuadraticExtension;
|
||||
use crate::extension_field::quartic::QuarticExtension;
|
||||
use crate::extension_field::{Extendable, Frobenius};
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
use crate::field_types::{Field, Field64, PrimeField, PrimeField64};
|
||||
use crate::inversion::try_inverse_u64;
|
||||
|
||||
const EPSILON: u64 = (1 << 32) - 1;
|
||||
@ -98,10 +98,6 @@ impl Field for GoldilocksField {
|
||||
Self(n.mod_floor(&Self::order()).to_u64_digits()[0])
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
self.to_canonical_u64().into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_canonical_u64(n: u64) -> Self {
|
||||
debug_assert!(n < Self::ORDER);
|
||||
@ -124,22 +120,14 @@ impl Field for GoldilocksField {
|
||||
}
|
||||
|
||||
impl PrimeField for GoldilocksField {
|
||||
fn to_canonical_biguint(&self) -> BigUint {
|
||||
self.to_canonical_u64().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl Field64 for GoldilocksField {
|
||||
const ORDER: u64 = 0xFFFFFFFF00000001;
|
||||
|
||||
#[inline]
|
||||
fn to_canonical_u64(&self) -> u64 {
|
||||
let mut c = self.0;
|
||||
// We only need one condition subtraction, since 2 * ORDER would not fit in a u64.
|
||||
if c >= Self::ORDER {
|
||||
c -= Self::ORDER;
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_noncanonical_u64(n: u64) -> Self {
|
||||
Self(n)
|
||||
@ -160,6 +148,22 @@ impl PrimeField for GoldilocksField {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField64 for GoldilocksField {
|
||||
#[inline]
|
||||
fn to_canonical_u64(&self) -> u64 {
|
||||
let mut c = self.0;
|
||||
// We only need one condition subtraction, since 2 * ORDER would not fit in a u64.
|
||||
if c >= Self::ORDER {
|
||||
c -= Self::ORDER;
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for GoldilocksField {
|
||||
type Output = Self;
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ pub fn interpolant<F: Field>(points: &[(F, F)]) -> PolynomialCoeffs<F> {
|
||||
.map(|x| interpolate(points, x, &barycentric_weights))
|
||||
.collect();
|
||||
|
||||
let mut coeffs = ifft(&PolynomialValues {
|
||||
let mut coeffs = ifft(PolynomialValues {
|
||||
values: subgroup_evals,
|
||||
});
|
||||
coeffs.trim();
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::field_types::PrimeField;
|
||||
use crate::field_types::PrimeField64;
|
||||
|
||||
/// This is a 'safe' iteration for the modular inversion algorithm. It
|
||||
/// is safe in the sense that it will produce the right answer even
|
||||
@ -63,7 +63,7 @@ unsafe fn unsafe_iteration(f: &mut u64, g: &mut u64, c: &mut i128, d: &mut i128,
|
||||
/// Elliptic and Hyperelliptic Cryptography, Algorithms 11.6
|
||||
/// and 11.12.
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
pub(crate) fn try_inverse_u64<F: PrimeField>(x: &F) -> Option<F> {
|
||||
pub(crate) fn try_inverse_u64<F: PrimeField64>(x: &F) -> Option<F> {
|
||||
let mut f = x.to_noncanonical_u64();
|
||||
let mut g = F::ORDER;
|
||||
// NB: These two are very rarely such that their absolute
|
||||
|
||||
@ -7,6 +7,7 @@
|
||||
#![allow(clippy::return_self_not_must_use)]
|
||||
#![feature(generic_const_exprs)]
|
||||
#![feature(specialization)]
|
||||
#![feature(stdsimd)]
|
||||
|
||||
pub(crate) mod arch;
|
||||
pub mod batch_util;
|
||||
@ -23,6 +24,7 @@ pub mod packed_field;
|
||||
pub mod polynomial;
|
||||
pub mod secp256k1_base;
|
||||
pub mod secp256k1_scalar;
|
||||
pub mod zero_poly_coset;
|
||||
|
||||
#[cfg(test)]
|
||||
mod field_testing;
|
||||
|
||||
@ -12,7 +12,29 @@ impl<F: Field> Packable for F {
|
||||
default type Packing = Self;
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_feature = "avx2"))]
|
||||
#[cfg(all(
|
||||
target_arch = "x86_64",
|
||||
target_feature = "avx2",
|
||||
not(all(
|
||||
target_feature = "avx512bw",
|
||||
target_feature = "avx512cd",
|
||||
target_feature = "avx512dq",
|
||||
target_feature = "avx512f",
|
||||
target_feature = "avx512vl"
|
||||
))
|
||||
))]
|
||||
impl Packable for crate::goldilocks_field::GoldilocksField {
|
||||
type Packing = crate::arch::x86_64::avx2_goldilocks_field::Avx2GoldilocksField;
|
||||
}
|
||||
|
||||
#[cfg(all(
|
||||
target_arch = "x86_64",
|
||||
target_feature = "avx512bw",
|
||||
target_feature = "avx512cd",
|
||||
target_feature = "avx512dq",
|
||||
target_feature = "avx512f",
|
||||
target_feature = "avx512vl"
|
||||
))]
|
||||
impl Packable for crate::goldilocks_field::GoldilocksField {
|
||||
type Packing = crate::arch::x86_64::avx512_goldilocks_field::Avx512GoldilocksField;
|
||||
}
|
||||
|
||||
@ -67,9 +67,9 @@ impl<F: Field> PolynomialCoeffs<F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Let `self=p(X)`, this returns `(p(X)-p(z))/(X-z)` and `p(z)`.
|
||||
/// Let `self=p(X)`, this returns `(p(X)-p(z))/(X-z)`.
|
||||
/// See https://en.wikipedia.org/wiki/Horner%27s_method
|
||||
pub fn divide_by_linear(&self, z: F) -> (PolynomialCoeffs<F>, F) {
|
||||
pub fn divide_by_linear(&self, z: F) -> PolynomialCoeffs<F> {
|
||||
let mut bs = self
|
||||
.coeffs
|
||||
.iter()
|
||||
@ -79,9 +79,9 @@ impl<F: Field> PolynomialCoeffs<F> {
|
||||
Some(*acc)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let ev = bs.pop().unwrap_or(F::ZERO);
|
||||
bs.pop();
|
||||
bs.reverse();
|
||||
(Self { coeffs: bs }, ev)
|
||||
Self { coeffs: bs }
|
||||
}
|
||||
|
||||
/// Computes the inverse of `self` modulo `x^n`.
|
||||
@ -125,7 +125,7 @@ impl<F: Field> PolynomialCoeffs<F> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Instant;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
use crate::extension_field::quartic::QuarticExtension;
|
||||
use crate::field_types::Field;
|
||||
@ -133,47 +133,17 @@ mod tests {
|
||||
use crate::polynomial::PolynomialCoeffs;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_division_by_linear() {
|
||||
type F = QuarticExtension<GoldilocksField>;
|
||||
let n = 1_000_000;
|
||||
let n = thread_rng().gen_range(1..1000);
|
||||
let poly = PolynomialCoeffs::new(F::rand_vec(n));
|
||||
let z = F::rand();
|
||||
let ev = poly.eval(z);
|
||||
|
||||
let timer = Instant::now();
|
||||
let (_quotient, ev2) = poly.div_rem(&PolynomialCoeffs::new(vec![-z, F::ONE]));
|
||||
println!("{:.3}s for usual", timer.elapsed().as_secs_f32());
|
||||
assert_eq!(ev2.trimmed().coeffs, vec![ev]);
|
||||
|
||||
let timer = Instant::now();
|
||||
let (quotient, ev3) = poly.div_rem_long_division(&PolynomialCoeffs::new(vec![-z, F::ONE]));
|
||||
println!("{:.3}s for long division", timer.elapsed().as_secs_f32());
|
||||
assert_eq!(ev3.trimmed().coeffs, vec![ev]);
|
||||
|
||||
let timer = Instant::now();
|
||||
let horn = poly.divide_by_linear(z);
|
||||
println!("{:.3}s for Horner", timer.elapsed().as_secs_f32());
|
||||
assert_eq!((quotient, ev), horn);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_division_by_quadratic() {
|
||||
type F = QuarticExtension<GoldilocksField>;
|
||||
let n = 1_000_000;
|
||||
let poly = PolynomialCoeffs::new(F::rand_vec(n));
|
||||
let quad = PolynomialCoeffs::new(F::rand_vec(2));
|
||||
|
||||
let timer = Instant::now();
|
||||
let (quotient0, rem0) = poly.div_rem(&quad);
|
||||
println!("{:.3}s for usual", timer.elapsed().as_secs_f32());
|
||||
|
||||
let timer = Instant::now();
|
||||
let (quotient1, rem1) = poly.div_rem_long_division(&quad);
|
||||
println!("{:.3}s for long division", timer.elapsed().as_secs_f32());
|
||||
|
||||
assert_eq!(quotient0.trimmed(), quotient1.trimmed());
|
||||
assert_eq!(rem0.trimmed(), rem1.trimmed());
|
||||
let quotient = poly.divide_by_linear(z);
|
||||
assert_eq!(
|
||||
poly,
|
||||
&("ient * &vec![-z, F::ONE].into()) + &vec![ev].into() // `quotient * (X-z) + ev`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,17 +26,28 @@ impl<F: Field> PolynomialValues<F> {
|
||||
PolynomialValues { values }
|
||||
}
|
||||
|
||||
pub fn zero(len: usize) -> Self {
|
||||
Self::new(vec![F::ZERO; len])
|
||||
}
|
||||
|
||||
/// Returns the polynomial whole value is one at the given index, and zero elsewhere.
|
||||
pub fn selector(len: usize, index: usize) -> Self {
|
||||
let mut result = Self::zero(len);
|
||||
result.values[index] = F::ONE;
|
||||
result
|
||||
}
|
||||
|
||||
/// The number of values stored.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
pub fn len(&self) -> usize {
|
||||
self.values.len()
|
||||
}
|
||||
|
||||
pub fn ifft(&self) -> PolynomialCoeffs<F> {
|
||||
pub fn ifft(self) -> PolynomialCoeffs<F> {
|
||||
ifft(self)
|
||||
}
|
||||
|
||||
/// Returns the polynomial whose evaluation on the coset `shift*H` is `self`.
|
||||
pub fn coset_ifft(&self, shift: F) -> PolynomialCoeffs<F> {
|
||||
pub fn coset_ifft(self, shift: F) -> PolynomialCoeffs<F> {
|
||||
let mut shifted_coeffs = self.ifft();
|
||||
shifted_coeffs
|
||||
.coeffs
|
||||
@ -52,9 +63,15 @@ impl<F: Field> PolynomialValues<F> {
|
||||
polys.into_iter().map(|p| p.lde(rate_bits)).collect()
|
||||
}
|
||||
|
||||
pub fn lde(&self, rate_bits: usize) -> Self {
|
||||
pub fn lde(self, rate_bits: usize) -> Self {
|
||||
let coeffs = ifft(self).lde(rate_bits);
|
||||
fft_with_options(&coeffs, Some(rate_bits), None)
|
||||
fft_with_options(coeffs, Some(rate_bits), None)
|
||||
}
|
||||
|
||||
/// Low-degree extend `Self` (seen as evaluations over the subgroup) onto a coset.
|
||||
pub fn lde_onto_coset(self, rate_bits: usize) -> Self {
|
||||
let coeffs = ifft(self).lde(rate_bits);
|
||||
coeffs.coset_fft_with_options(F::coset_shift(), Some(rate_bits), None)
|
||||
}
|
||||
|
||||
pub fn degree(&self) -> usize {
|
||||
@ -64,7 +81,7 @@ impl<F: Field> PolynomialValues<F> {
|
||||
}
|
||||
|
||||
pub fn degree_plus_one(&self) -> usize {
|
||||
self.ifft().degree_plus_one()
|
||||
self.clone().ifft().degree_plus_one()
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,12 +197,21 @@ impl<F: Field> PolynomialCoeffs<F> {
|
||||
poly
|
||||
}
|
||||
|
||||
/// Removes leading zero coefficients.
|
||||
/// Removes any leading zero coefficients.
|
||||
pub fn trim(&mut self) {
|
||||
self.coeffs.truncate(self.degree_plus_one());
|
||||
}
|
||||
|
||||
/// Removes leading zero coefficients.
|
||||
/// Removes some leading zero coefficients, such that a desired length is reached. Fails if a
|
||||
/// nonzero coefficient is encountered before then.
|
||||
pub fn trim_to_len(&mut self, len: usize) -> Result<()> {
|
||||
ensure!(self.len() >= len);
|
||||
ensure!(self.coeffs[len..].iter().all(F::is_zero));
|
||||
self.coeffs.truncate(len);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes any leading zero coefficients.
|
||||
pub fn trimmed(&self) -> Self {
|
||||
let coeffs = self.coeffs[..self.degree_plus_one()].to_vec();
|
||||
Self { coeffs }
|
||||
@ -213,12 +239,12 @@ impl<F: Field> PolynomialCoeffs<F> {
|
||||
Self::new(self.trimmed().coeffs.into_iter().rev().collect())
|
||||
}
|
||||
|
||||
pub fn fft(&self) -> PolynomialValues<F> {
|
||||
pub fn fft(self) -> PolynomialValues<F> {
|
||||
fft(self)
|
||||
}
|
||||
|
||||
pub fn fft_with_options(
|
||||
&self,
|
||||
self,
|
||||
zero_factor: Option<usize>,
|
||||
root_table: Option<&FftRootTable<F>>,
|
||||
) -> PolynomialValues<F> {
|
||||
@ -386,7 +412,7 @@ impl<F: Field> Mul for &PolynomialCoeffs<F> {
|
||||
.zip(b_evals.values)
|
||||
.map(|(pa, pb)| pa * pb)
|
||||
.collect();
|
||||
ifft(&mul_evals.into())
|
||||
ifft(mul_evals.into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -454,7 +480,7 @@ mod tests {
|
||||
let n = 1 << k;
|
||||
let evals = PolynomialValues::new(F::rand_vec(n));
|
||||
let shift = F::rand();
|
||||
let coeffs = evals.coset_ifft(shift);
|
||||
let coeffs = evals.clone().coset_ifft(shift);
|
||||
|
||||
let generator = F::primitive_root_of_unity(k);
|
||||
let naive_coset_evals = F::cyclic_subgroup_coset_known_order(generator, shift, n)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::field_types::PrimeField;
|
||||
use crate::field_types::PrimeField64;
|
||||
|
||||
/// Generates a series of non-negative integers less than `modulus` which cover a range of
|
||||
/// interesting test values.
|
||||
@ -19,7 +19,7 @@ pub fn test_inputs(modulus: u64) -> Vec<u64> {
|
||||
/// word_bits)` and panic if the two resulting vectors differ.
|
||||
pub fn run_unaryop_test_cases<F, UnaryOp, ExpectedOp>(op: UnaryOp, expected_op: ExpectedOp)
|
||||
where
|
||||
F: PrimeField,
|
||||
F: PrimeField64,
|
||||
UnaryOp: Fn(F) -> F,
|
||||
ExpectedOp: Fn(u64) -> u64,
|
||||
{
|
||||
@ -43,7 +43,7 @@ where
|
||||
/// Apply the binary functions `op` and `expected_op` to each pair of inputs.
|
||||
pub fn run_binaryop_test_cases<F, BinaryOp, ExpectedOp>(op: BinaryOp, expected_op: ExpectedOp)
|
||||
where
|
||||
F: PrimeField,
|
||||
F: PrimeField64,
|
||||
BinaryOp: Fn(F, F) -> F,
|
||||
ExpectedOp: Fn(u64, u64) -> u64,
|
||||
{
|
||||
@ -70,7 +70,7 @@ macro_rules! test_prime_field_arithmetic {
|
||||
mod prime_field_arithmetic {
|
||||
use std::ops::{Add, Mul, Neg, Sub};
|
||||
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
use crate::field_types::{Field, Field64};
|
||||
use crate::ops::Square;
|
||||
|
||||
#[test]
|
||||
|
||||
@ -10,7 +10,7 @@ use num::{Integer, One};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::field_types::Field;
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
|
||||
/// The base field of the secp256k1 elliptic curve.
|
||||
///
|
||||
@ -42,7 +42,7 @@ impl Default for Secp256K1Base {
|
||||
|
||||
impl PartialEq for Secp256K1Base {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.to_biguint() == other.to_biguint()
|
||||
self.to_canonical_biguint() == other.to_canonical_biguint()
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,19 +50,19 @@ impl Eq for Secp256K1Base {}
|
||||
|
||||
impl Hash for Secp256K1Base {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.to_biguint().hash(state)
|
||||
self.to_canonical_biguint().hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Secp256K1Base {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(&self.to_biguint(), f)
|
||||
Display::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Secp256K1Base {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Debug::fmt(&self.to_biguint(), f)
|
||||
Debug::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,14 +107,6 @@ impl Field for Secp256K1Base {
|
||||
Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one())))
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn from_biguint(val: BigUint) -> Self {
|
||||
Self(
|
||||
val.to_u64_digits()
|
||||
@ -146,6 +138,16 @@ impl Field for Secp256K1Base {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField for Secp256K1Base {
|
||||
fn to_canonical_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for Secp256K1Base {
|
||||
type Output = Self;
|
||||
|
||||
@ -154,7 +156,7 @@ impl Neg for Secp256K1Base {
|
||||
if self.is_zero() {
|
||||
Self::ZERO
|
||||
} else {
|
||||
Self::from_biguint(Self::order() - self.to_biguint())
|
||||
Self::from_biguint(Self::order() - self.to_canonical_biguint())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -164,7 +166,7 @@ impl Add for Secp256K1Base {
|
||||
|
||||
#[inline]
|
||||
fn add(self, rhs: Self) -> Self {
|
||||
let mut result = self.to_biguint() + rhs.to_biguint();
|
||||
let mut result = self.to_canonical_biguint() + rhs.to_canonical_biguint();
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
@ -207,7 +209,9 @@ impl Mul for Secp256K1Base {
|
||||
|
||||
#[inline]
|
||||
fn mul(self, rhs: Self) -> Self {
|
||||
Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order()))
|
||||
Self::from_biguint(
|
||||
(self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ use num::{Integer, One};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::field_types::Field;
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
|
||||
/// The base field of the secp256k1 elliptic curve.
|
||||
///
|
||||
@ -45,7 +45,7 @@ impl Default for Secp256K1Scalar {
|
||||
|
||||
impl PartialEq for Secp256K1Scalar {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.to_biguint() == other.to_biguint()
|
||||
self.to_canonical_biguint() == other.to_canonical_biguint()
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,19 +53,19 @@ impl Eq for Secp256K1Scalar {}
|
||||
|
||||
impl Hash for Secp256K1Scalar {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.to_biguint().hash(state)
|
||||
self.to_canonical_biguint().hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Secp256K1Scalar {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(&self.to_biguint(), f)
|
||||
Display::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Secp256K1Scalar {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Debug::fmt(&self.to_biguint(), f)
|
||||
Debug::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,14 +116,6 @@ impl Field for Secp256K1Scalar {
|
||||
Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one())))
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn from_biguint(val: BigUint) -> Self {
|
||||
Self(
|
||||
val.to_u64_digits()
|
||||
@ -155,6 +147,16 @@ impl Field for Secp256K1Scalar {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField for Secp256K1Scalar {
|
||||
fn to_canonical_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for Secp256K1Scalar {
|
||||
type Output = Self;
|
||||
|
||||
@ -163,7 +165,7 @@ impl Neg for Secp256K1Scalar {
|
||||
if self.is_zero() {
|
||||
Self::ZERO
|
||||
} else {
|
||||
Self::from_biguint(Self::order() - self.to_biguint())
|
||||
Self::from_biguint(Self::order() - self.to_canonical_biguint())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -173,7 +175,7 @@ impl Add for Secp256K1Scalar {
|
||||
|
||||
#[inline]
|
||||
fn add(self, rhs: Self) -> Self {
|
||||
let mut result = self.to_biguint() + rhs.to_biguint();
|
||||
let mut result = self.to_canonical_biguint() + rhs.to_canonical_biguint();
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
@ -216,7 +218,9 @@ impl Mul for Secp256K1Scalar {
|
||||
|
||||
#[inline]
|
||||
fn mul(self, rhs: Self) -> Self {
|
||||
Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order()))
|
||||
Self::from_biguint(
|
||||
(self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
47
field/src/zero_poly_coset.rs
Normal file
47
field/src/zero_poly_coset.rs
Normal file
@ -0,0 +1,47 @@
|
||||
use crate::field_types::Field;
|
||||
|
||||
/// Precomputations of the evaluation of `Z_H(X) = X^n - 1` on a coset `gK` with `H <= K`.
|
||||
pub struct ZeroPolyOnCoset<F: Field> {
|
||||
/// `n = |H|`.
|
||||
n: F,
|
||||
/// `rate = |K|/|H|`.
|
||||
rate: usize,
|
||||
/// Holds `g^n * (w^n)^i - 1 = g^n * v^i - 1` for `i in 0..rate`, with `w` a generator of `K` and `v` a
|
||||
/// `rate`-primitive root of unity.
|
||||
evals: Vec<F>,
|
||||
/// Holds the multiplicative inverses of `evals`.
|
||||
inverses: Vec<F>,
|
||||
}
|
||||
|
||||
impl<F: Field> ZeroPolyOnCoset<F> {
|
||||
pub fn new(n_log: usize, rate_bits: usize) -> Self {
|
||||
let g_pow_n = F::coset_shift().exp_power_of_2(n_log);
|
||||
let evals = F::two_adic_subgroup(rate_bits)
|
||||
.into_iter()
|
||||
.map(|x| g_pow_n * x - F::ONE)
|
||||
.collect::<Vec<_>>();
|
||||
let inverses = F::batch_multiplicative_inverse(&evals);
|
||||
Self {
|
||||
n: F::from_canonical_usize(1 << n_log),
|
||||
rate: 1 << rate_bits,
|
||||
evals,
|
||||
inverses,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Z_H(g * w^i)`.
|
||||
pub fn eval(&self, i: usize) -> F {
|
||||
self.evals[i % self.rate]
|
||||
}
|
||||
|
||||
/// Returns `1 / Z_H(g * w^i)`.
|
||||
pub fn eval_inverse(&self, i: usize) -> F {
|
||||
self.inverses[i % self.rate]
|
||||
}
|
||||
|
||||
/// Returns `L_1(x) = Z_H(x)/(n * (x - 1))` with `x = w^i`.
|
||||
pub fn eval_l1(&self, i: usize, x: F) -> F {
|
||||
// Could also precompute the inverses using Montgomery.
|
||||
self.eval(i) * (self.n * (x - F::ONE)).inverse()
|
||||
}
|
||||
}
|
||||
@ -415,7 +415,7 @@ mod tests {
|
||||
v.extend(equality_dummy_vals);
|
||||
v.extend(insert_here_vals);
|
||||
|
||||
v.iter().map(|&x| x.into()).collect::<Vec<_>>()
|
||||
v.iter().map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
let orig_vec = vec![FF::rand(); 3];
|
||||
|
||||
BIN
plonky2.pdf
Normal file
BIN
plonky2.pdf
Normal file
Binary file not shown.
@ -1,15 +1,14 @@
|
||||
[package]
|
||||
name = "plonky2"
|
||||
description = "Recursive SNARKs based on Plonk and FRI"
|
||||
description = "Recursive SNARKs based on PLONK and FRI"
|
||||
version = "0.1.0"
|
||||
authors = ["Daniel Lubarov <daniel@mirprotocol.org>"]
|
||||
authors = ["Polygon Zero <daniel@mirprotocol.org>"]
|
||||
readme = "README.md"
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/mir-protocol/plonky2"
|
||||
keywords = ["cryptography", "SNARK", "FRI"]
|
||||
keywords = ["cryptography", "SNARK", "PLONK", "FRI"]
|
||||
categories = ["cryptography"]
|
||||
edition = "2021"
|
||||
default-run = "bench_recursion"
|
||||
default-run = "generate_constants"
|
||||
|
||||
[dependencies]
|
||||
plonky2_field = { path = "../field" }
|
||||
@ -48,14 +47,14 @@ harness = false
|
||||
name = "hashing"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "merkle"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "transpose"
|
||||
harness = false
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
#lto = "fat"
|
||||
#codegen-units = 1
|
||||
|
||||
[profile.bench]
|
||||
opt-level = 3
|
||||
[[bench]]
|
||||
name = "reverse_index_bits"
|
||||
harness = false
|
||||
|
||||
@ -11,7 +11,7 @@ pub(crate) fn bench_ffts<F: Field>(c: &mut Criterion) {
|
||||
let size = 1 << size_log;
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| {
|
||||
let coeffs = PolynomialCoeffs::new(F::rand_vec(size));
|
||||
b.iter(|| coeffs.fft_with_options(None, None));
|
||||
b.iter(|| coeffs.clone().fft_with_options(None, None));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,17 +1,20 @@
|
||||
#![allow(incomplete_features)]
|
||||
#![feature(generic_const_exprs)]
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
|
||||
use plonky2::field::goldilocks_field::GoldilocksField;
|
||||
use plonky2::hash::gmimc::GMiMC;
|
||||
use plonky2::hash::hash_types::{BytesHash, RichField};
|
||||
use plonky2::hash::hashing::SPONGE_WIDTH;
|
||||
use plonky2::hash::keccak::KeccakHash;
|
||||
use plonky2::hash::poseidon::Poseidon;
|
||||
use plonky2::plonk::config::Hasher;
|
||||
use tynm::type_name;
|
||||
|
||||
pub(crate) fn bench_gmimc<F: GMiMC<WIDTH>, const WIDTH: usize>(c: &mut Criterion) {
|
||||
c.bench_function(&format!("gmimc<{}, {}>", type_name::<F>(), WIDTH), |b| {
|
||||
pub(crate) fn bench_keccak<F: RichField>(c: &mut Criterion) {
|
||||
c.bench_function("keccak256", |b| {
|
||||
b.iter_batched(
|
||||
|| F::rand_arr::<WIDTH>(),
|
||||
|state| F::gmimc_permute(state),
|
||||
|| (BytesHash::<32>::rand(), BytesHash::<32>::rand()),
|
||||
|(left, right)| <KeccakHash<32> as Hasher<F>>::two_to_one(left, right),
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
@ -31,8 +34,8 @@ pub(crate) fn bench_poseidon<F: Poseidon>(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
bench_gmimc::<GoldilocksField, 12>(c);
|
||||
bench_poseidon::<GoldilocksField>(c);
|
||||
bench_keccak::<GoldilocksField>(c);
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
|
||||
40
plonky2/benches/merkle.rs
Normal file
40
plonky2/benches/merkle.rs
Normal file
@ -0,0 +1,40 @@
|
||||
#![feature(generic_const_exprs)]
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use plonky2::field::goldilocks_field::GoldilocksField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::hash::keccak::KeccakHash;
|
||||
use plonky2::hash::merkle_tree::MerkleTree;
|
||||
use plonky2::hash::poseidon::PoseidonHash;
|
||||
use plonky2::plonk::config::Hasher;
|
||||
use tynm::type_name;
|
||||
|
||||
const ELEMS_PER_LEAF: usize = 135;
|
||||
|
||||
pub(crate) fn bench_merkle_tree<F: RichField, H: Hasher<F>>(c: &mut Criterion)
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let mut group = c.benchmark_group(&format!(
|
||||
"merkle-tree<{}, {}>",
|
||||
type_name::<F>(),
|
||||
type_name::<H>()
|
||||
));
|
||||
group.sample_size(10);
|
||||
|
||||
for size_log in [13, 14, 15] {
|
||||
let size = 1 << size_log;
|
||||
group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| {
|
||||
let leaves = vec![F::rand_vec(ELEMS_PER_LEAF); size];
|
||||
b.iter(|| MerkleTree::<F, H>::new(leaves.clone(), 0));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
bench_merkle_tree::<GoldilocksField, PoseidonHash>(c);
|
||||
bench_merkle_tree::<GoldilocksField, KeccakHash<25>>(c);
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
||||
30
plonky2/benches/reverse_index_bits.rs
Normal file
30
plonky2/benches/reverse_index_bits.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use plonky2::field::field_types::Field;
|
||||
use plonky2::field::goldilocks_field::GoldilocksField;
|
||||
use plonky2_util::{reverse_index_bits, reverse_index_bits_in_place};
|
||||
|
||||
type F = GoldilocksField;
|
||||
|
||||
fn benchmark_in_place(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("reverse-index-bits-in-place");
|
||||
for width in [1 << 8, 1 << 16, 1 << 24] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(width), &width, |b, _| {
|
||||
let mut values = F::rand_vec(width);
|
||||
b.iter(|| reverse_index_bits_in_place(&mut values));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn benchmark_out_of_place(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("reverse-index-bits");
|
||||
for width in [1 << 8, 1 << 16, 1 << 24] {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(width), &width, |b, _| {
|
||||
let values = F::rand_vec(width);
|
||||
b.iter(|| reverse_index_bits(&values));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches_in_place, benchmark_in_place);
|
||||
criterion_group!(benches_out_of_place, benchmark_out_of_place);
|
||||
criterion_main!(benches_in_place, benches_out_of_place);
|
||||
@ -1,34 +0,0 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use plonky2_field::polynomial::PolynomialValues;
|
||||
use rayon::prelude::*;
|
||||
|
||||
type F = GoldilocksField;
|
||||
|
||||
// This is an estimate of how many LDEs the prover will compute. The biggest component, 86, comes
|
||||
// from wire polynomials which "store" the outputs of S-boxes in our Poseidon gate.
|
||||
const NUM_LDES: usize = 8 + 8 + 3 + 86 + 3 + 8;
|
||||
|
||||
const DEGREE: usize = 1 << 14;
|
||||
|
||||
const RATE_BITS: usize = 3;
|
||||
|
||||
fn main() {
|
||||
// We start with random polynomials.
|
||||
let all_poly_values = (0..NUM_LDES)
|
||||
.map(|_| PolynomialValues::new(F::rand_vec(DEGREE)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
all_poly_values.into_par_iter().for_each(|poly_values| {
|
||||
let start = Instant::now();
|
||||
let lde = poly_values.lde(RATE_BITS);
|
||||
let duration = start.elapsed();
|
||||
println!("LDE took {:?}", duration);
|
||||
println!("LDE result: {:?}", lde.values[0]);
|
||||
});
|
||||
println!("All LDEs took {:?}", start.elapsed());
|
||||
}
|
||||
@ -1,60 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use env_logger::Env;
|
||||
use log::info;
|
||||
use plonky2::fri::reduction_strategies::FriReductionStrategy;
|
||||
use plonky2::fri::FriConfig;
|
||||
use plonky2::hash::hashing::SPONGE_WIDTH;
|
||||
use plonky2::iop::witness::PartialWitness;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2::plonk::circuit_data::CircuitConfig;
|
||||
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// Set the default log filter. This can be overridden using the `RUST_LOG` environment variable,
|
||||
// e.g. `RUST_LOG=debug`.
|
||||
// We default to debug for now, since there aren't many logs anyway, but we should probably
|
||||
// change this to info or warn later.
|
||||
env_logger::Builder::from_env(Env::default().default_filter_or("debug")).init();
|
||||
|
||||
bench_prove::<PoseidonGoldilocksConfig, 2>()
|
||||
}
|
||||
|
||||
fn bench_prove<C: GenericConfig<D>, const D: usize>() -> Result<()> {
|
||||
let config = CircuitConfig {
|
||||
num_wires: 126,
|
||||
num_routed_wires: 33,
|
||||
constant_gate_size: 6,
|
||||
use_base_arithmetic_gate: false,
|
||||
security_bits: 128,
|
||||
num_challenges: 3,
|
||||
zero_knowledge: false,
|
||||
fri_config: FriConfig {
|
||||
rate_bits: 3,
|
||||
cap_height: 1,
|
||||
proof_of_work_bits: 15,
|
||||
reduction_strategy: FriReductionStrategy::ConstantArityBits(3, 5),
|
||||
num_query_rounds: 35,
|
||||
},
|
||||
};
|
||||
|
||||
let inputs = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<C::F, D>::new(config);
|
||||
|
||||
let zero = builder.zero();
|
||||
let zero_ext = builder.zero_extension();
|
||||
|
||||
let mut state = [zero; SPONGE_WIDTH];
|
||||
for _ in 0..10000 {
|
||||
state = builder.permute::<<C as GenericConfig<D>>::InnerHasher>(state);
|
||||
}
|
||||
|
||||
// Random other gates.
|
||||
builder.add(zero, zero);
|
||||
builder.add_extension(zero_ext, zero_ext);
|
||||
|
||||
let circuit = builder.build::<C>();
|
||||
let proof_with_pis = circuit.prove(inputs)?;
|
||||
let proof_bytes = serde_cbor::to_vec(&proof_with_pis).unwrap();
|
||||
info!("Proof length: {} bytes", proof_bytes.len());
|
||||
circuit.verify(proof_with_pis)
|
||||
}
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
#![allow(clippy::needless_range_loop)]
|
||||
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::field_types::Field64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::{Rng, SeedableRng};
|
||||
use rand_chacha::ChaCha8Rng;
|
||||
@ -11,7 +11,6 @@ use rand_chacha::ChaCha8Rng;
|
||||
// range of GoldilocksField, then verify that each constant also fits in GoldilocksField.
|
||||
const SAMPLE_RANGE_END: u64 = 0xffffffff70000001;
|
||||
|
||||
// const N: usize = 101; // For GMiMC
|
||||
// const N: usize = 8 * 30; // For Posiedon-8
|
||||
const N: usize = 12 * 30; // For Posiedon-12
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
use itertools::Itertools;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::curve::curve_summation::affine_multisummation_best;
|
||||
@ -160,7 +161,7 @@ pub(crate) fn to_digits<C: Curve>(x: &C::ScalarField, w: usize) -> Vec<usize> {
|
||||
|
||||
// Convert x to a bool array.
|
||||
let x_canonical: Vec<_> = x
|
||||
.to_biguint()
|
||||
.to_canonical_biguint()
|
||||
.to_u64_digits()
|
||||
.iter()
|
||||
.cloned()
|
||||
@ -187,6 +188,7 @@ pub(crate) fn to_digits<C: Curve>(x: &C::ScalarField, w: usize) -> Vec<usize> {
|
||||
mod tests {
|
||||
use num::BigUint;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::secp256k1_scalar::Secp256K1Scalar;
|
||||
|
||||
use crate::curve::curve_msm::{msm_execute, msm_precompute, to_digits};
|
||||
@ -206,7 +208,7 @@ mod tests {
|
||||
0b11111111111111111111111111111111,
|
||||
];
|
||||
let x = Secp256K1Scalar::from_biguint(BigUint::from_slice(&x_canonical));
|
||||
assert_eq!(x.to_biguint().to_u32_digits(), x_canonical);
|
||||
assert_eq!(x.to_canonical_biguint().to_u32_digits(), x_canonical);
|
||||
assert_eq!(
|
||||
to_digits::<Secp256K1>(&x, 17),
|
||||
vec![
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
use std::ops::Mul;
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
|
||||
use crate::curve::curve_types::{Curve, CurveScalar, ProjectivePoint};
|
||||
|
||||
@ -88,7 +89,7 @@ fn to_digits<C: Curve>(x: &C::ScalarField) -> Vec<u64> {
|
||||
);
|
||||
let digits_per_u64 = 64 / WINDOW_BITS;
|
||||
let mut digits = Vec::with_capacity(digits_per_scalar::<C>());
|
||||
for limb in x.to_biguint().to_u64_digits() {
|
||||
for limb in x.to_canonical_biguint().to_u64_digits() {
|
||||
for j in 0..digits_per_u64 {
|
||||
digits.push((limb >> (j * WINDOW_BITS) as u64) % BASE as u64);
|
||||
}
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::ops::Neg;
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::ops::Square;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// To avoid implementation conflicts from associated types,
|
||||
// see https://github.com/rust-lang/rust/issues/20400
|
||||
@ -10,8 +12,8 @@ pub struct CurveScalar<C: Curve>(pub <C as Curve>::ScalarField);
|
||||
|
||||
/// A short Weierstrass curve.
|
||||
pub trait Curve: 'static + Sync + Sized + Copy + Debug {
|
||||
type BaseField: Field;
|
||||
type ScalarField: Field;
|
||||
type BaseField: PrimeField;
|
||||
type ScalarField: PrimeField;
|
||||
|
||||
const A: Self::BaseField;
|
||||
const B: Self::BaseField;
|
||||
@ -36,7 +38,7 @@ pub trait Curve: 'static + Sync + Sized + Copy + Debug {
|
||||
}
|
||||
|
||||
/// A point on a short Weierstrass curve, represented in affine coordinates.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct AffinePoint<C: Curve> {
|
||||
pub x: C::BaseField,
|
||||
pub y: C::BaseField,
|
||||
@ -119,6 +121,17 @@ impl<C: Curve> PartialEq for AffinePoint<C> {
|
||||
|
||||
impl<C: Curve> Eq for AffinePoint<C> {}
|
||||
|
||||
impl<C: Curve> Hash for AffinePoint<C> {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
if self.zero {
|
||||
self.zero.hash(state);
|
||||
} else {
|
||||
self.x.hash(state);
|
||||
self.y.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A point on a short Weierstrass curve, represented in projective coordinates.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ProjectivePoint<C: Curve> {
|
||||
@ -259,3 +272,11 @@ impl<C: Curve> Neg for ProjectivePoint<C> {
|
||||
ProjectivePoint { x, y: -y, z }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn base_to_scalar<C: Curve>(x: C::BaseField) -> C::ScalarField {
|
||||
C::ScalarField::from_biguint(x.to_canonical_biguint())
|
||||
}
|
||||
|
||||
pub fn scalar_to_base<C: Curve>(x: C::ScalarField) -> C::BaseField {
|
||||
C::BaseField::from_biguint(x.to_canonical_biguint())
|
||||
}
|
||||
|
||||
78
plonky2/src/curve/ecdsa.rs
Normal file
78
plonky2/src/curve/ecdsa.rs
Normal file
@ -0,0 +1,78 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::curve::curve_msm::msm_parallel;
|
||||
use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar};
|
||||
use crate::field::field_types::Field;
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct ECDSASignature<C: Curve> {
|
||||
pub r: C::ScalarField,
|
||||
pub s: C::ScalarField,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct ECDSASecretKey<C: Curve>(pub C::ScalarField);
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct ECDSAPublicKey<C: Curve>(pub AffinePoint<C>);
|
||||
|
||||
pub fn sign_message<C: Curve>(msg: C::ScalarField, sk: ECDSASecretKey<C>) -> ECDSASignature<C> {
|
||||
let (k, rr) = {
|
||||
let mut k = C::ScalarField::rand();
|
||||
let mut rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine();
|
||||
while rr.x == C::BaseField::ZERO {
|
||||
k = C::ScalarField::rand();
|
||||
rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine();
|
||||
}
|
||||
(k, rr)
|
||||
};
|
||||
let r = base_to_scalar::<C>(rr.x);
|
||||
|
||||
let s = k.inverse() * (msg + r * sk.0);
|
||||
|
||||
ECDSASignature { r, s }
|
||||
}
|
||||
|
||||
pub fn verify_message<C: Curve>(
|
||||
msg: C::ScalarField,
|
||||
sig: ECDSASignature<C>,
|
||||
pk: ECDSAPublicKey<C>,
|
||||
) -> bool {
|
||||
let ECDSASignature { r, s } = sig;
|
||||
|
||||
assert!(pk.0.is_valid());
|
||||
|
||||
let c = s.inverse();
|
||||
let u1 = msg * c;
|
||||
let u2 = r * c;
|
||||
|
||||
let g = C::GENERATOR_PROJECTIVE;
|
||||
let w = 5; // Experimentally fastest
|
||||
let point_proj = msm_parallel(&[u1, u2], &[g, pk.0.to_projective()], w);
|
||||
let point = point_proj.to_affine();
|
||||
|
||||
let x = base_to_scalar::<C>(point.x);
|
||||
r == x
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::curve::curve_types::{Curve, CurveScalar};
|
||||
use crate::curve::ecdsa::{sign_message, verify_message, ECDSAPublicKey, ECDSASecretKey};
|
||||
use crate::curve::secp256k1::Secp256K1;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::field::secp256k1_scalar::Secp256K1Scalar;
|
||||
|
||||
#[test]
|
||||
fn test_ecdsa_native() {
|
||||
type C = Secp256K1;
|
||||
|
||||
let msg = Secp256K1Scalar::rand();
|
||||
let sk = ECDSASecretKey(Secp256K1Scalar::rand());
|
||||
let pk = ECDSAPublicKey((CurveScalar(sk.0) * C::GENERATOR_PROJECTIVE).to_affine());
|
||||
|
||||
let sig = sign_message(msg, sk);
|
||||
let result = verify_message(msg, sig, pk);
|
||||
assert!(result);
|
||||
}
|
||||
}
|
||||
@ -3,4 +3,5 @@ pub mod curve_msm;
|
||||
pub mod curve_multiplication;
|
||||
pub mod curve_summation;
|
||||
pub mod curve_types;
|
||||
pub mod ecdsa;
|
||||
pub mod secp256k1;
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::secp256k1_base::Secp256K1Base;
|
||||
use plonky2_field::secp256k1_scalar::Secp256K1Scalar;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::curve::curve_types::{AffinePoint, Curve};
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[derive(Debug, Copy, Clone, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct Secp256K1;
|
||||
|
||||
impl Curve for Secp256K1 {
|
||||
@ -40,6 +41,7 @@ const SECP256K1_GENERATOR_Y: Secp256K1Base = Secp256K1Base([
|
||||
mod tests {
|
||||
use num::BigUint;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::secp256k1_scalar::Secp256K1Scalar;
|
||||
|
||||
use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint};
|
||||
@ -86,7 +88,7 @@ mod tests {
|
||||
) -> ProjectivePoint<Secp256K1> {
|
||||
let mut g = rhs;
|
||||
let mut sum = ProjectivePoint::ZERO;
|
||||
for limb in lhs.to_biguint().to_u64_digits().iter() {
|
||||
for limb in lhs.to_canonical_biguint().to_u64_digits().iter() {
|
||||
for j in 0..64 {
|
||||
if (limb >> j & 1u64) != 0u64 {
|
||||
sum = sum + g;
|
||||
|
||||
131
plonky2/src/fri/challenges.rs
Normal file
131
plonky2/src/fri/challenges.rs
Normal file
@ -0,0 +1,131 @@
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::polynomial::PolynomialCoeffs;
|
||||
|
||||
use crate::fri::proof::{FriChallenges, FriChallengesTarget};
|
||||
use crate::fri::structure::{FriOpenings, FriOpeningsTarget};
|
||||
use crate::fri::FriConfig;
|
||||
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
|
||||
use crate::hash::hash_types::{MerkleCapTarget, RichField};
|
||||
use crate::hash::merkle_tree::MerkleCap;
|
||||
use crate::iop::challenger::{Challenger, RecursiveChallenger};
|
||||
use crate::iop::target::Target;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CommonCircuitData;
|
||||
use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
|
||||
|
||||
impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
|
||||
pub fn observe_openings<const D: usize>(&mut self, openings: &FriOpenings<F, D>)
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
for v in &openings.batches {
|
||||
self.observe_extension_elements(&v.values);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fri_challenges<C: GenericConfig<D, F = F>, const D: usize>(
|
||||
&mut self,
|
||||
commit_phase_merkle_caps: &[MerkleCap<F, C::Hasher>],
|
||||
final_poly: &PolynomialCoeffs<F::Extension>,
|
||||
pow_witness: F,
|
||||
degree_bits: usize,
|
||||
config: &FriConfig,
|
||||
) -> FriChallenges<F, D>
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let num_fri_queries = config.num_query_rounds;
|
||||
let lde_size = 1 << (degree_bits + config.rate_bits);
|
||||
// Scaling factor to combine polynomials.
|
||||
let fri_alpha = self.get_extension_challenge::<D>();
|
||||
|
||||
// Recover the random betas used in the FRI reductions.
|
||||
let fri_betas = commit_phase_merkle_caps
|
||||
.iter()
|
||||
.map(|cap| {
|
||||
self.observe_cap(cap);
|
||||
self.get_extension_challenge::<D>()
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.observe_extension_elements(&final_poly.coeffs);
|
||||
|
||||
let fri_pow_response = C::InnerHasher::hash_no_pad(
|
||||
&self
|
||||
.get_hash()
|
||||
.elements
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(Some(pow_witness))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.elements[0];
|
||||
|
||||
let fri_query_indices = (0..num_fri_queries)
|
||||
.map(|_| self.get_challenge().to_canonical_u64() as usize % lde_size)
|
||||
.collect();
|
||||
|
||||
FriChallenges {
|
||||
fri_alpha,
|
||||
fri_betas,
|
||||
fri_pow_response,
|
||||
fri_query_indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
|
||||
RecursiveChallenger<F, H, D>
|
||||
{
|
||||
pub fn observe_openings(&mut self, openings: &FriOpeningsTarget<D>) {
|
||||
for v in &openings.batches {
|
||||
self.observe_extension_elements(&v.values);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fri_challenges<C: GenericConfig<D, F = F>>(
|
||||
&mut self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
commit_phase_merkle_caps: &[MerkleCapTarget],
|
||||
final_poly: &PolynomialCoeffsExtTarget<D>,
|
||||
pow_witness: Target,
|
||||
inner_common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> FriChallengesTarget<D> {
|
||||
let num_fri_queries = inner_common_data.config.fri_config.num_query_rounds;
|
||||
// Scaling factor to combine polynomials.
|
||||
let fri_alpha = self.get_extension_challenge(builder);
|
||||
|
||||
// Recover the random betas used in the FRI reductions.
|
||||
let fri_betas = commit_phase_merkle_caps
|
||||
.iter()
|
||||
.map(|cap| {
|
||||
self.observe_cap(cap);
|
||||
self.get_extension_challenge(builder)
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.observe_extension_elements(&final_poly.0);
|
||||
|
||||
let pow_inputs = self
|
||||
.get_hash(builder)
|
||||
.elements
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(Some(pow_witness))
|
||||
.collect();
|
||||
let fri_pow_response = builder
|
||||
.hash_n_to_hash_no_pad::<C::InnerHasher>(pow_inputs)
|
||||
.elements[0];
|
||||
|
||||
let fri_query_indices = (0..num_fri_queries)
|
||||
.map(|_| self.get_challenge(builder))
|
||||
.collect();
|
||||
|
||||
FriChallengesTarget {
|
||||
fri_alpha,
|
||||
fri_betas,
|
||||
fri_pow_response,
|
||||
fri_query_indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,11 +1,14 @@
|
||||
use crate::fri::reduction_strategies::FriReductionStrategy;
|
||||
|
||||
pub mod commitment;
|
||||
mod challenges;
|
||||
pub mod oracle;
|
||||
pub mod proof;
|
||||
pub mod prover;
|
||||
pub mod recursive_verifier;
|
||||
pub mod reduction_strategies;
|
||||
pub mod structure;
|
||||
pub mod verifier;
|
||||
pub mod witness_util;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct FriConfig {
|
||||
@ -23,6 +26,12 @@ pub struct FriConfig {
|
||||
pub num_query_rounds: usize,
|
||||
}
|
||||
|
||||
impl FriConfig {
|
||||
pub fn rate(&self) -> f64 {
|
||||
1.0 / ((1 << self.rate_bits) as f64)
|
||||
}
|
||||
}
|
||||
|
||||
/// FRI parameters, including generated parameters which are specific to an instance size, in
|
||||
/// contrast to `FriConfig` which is user-specified and independent of instance size.
|
||||
#[derive(Debug)]
|
||||
@ -30,6 +39,9 @@ pub struct FriParams {
|
||||
/// User-specified FRI configuration.
|
||||
pub config: FriConfig,
|
||||
|
||||
/// Whether to use a hiding variant of Merkle trees (where random salts are added to leaves).
|
||||
pub hiding: bool,
|
||||
|
||||
/// The degree of the purported codeword, measured in bits.
|
||||
pub degree_bits: usize,
|
||||
|
||||
|
||||
@ -7,13 +7,12 @@ use rayon::prelude::*;
|
||||
|
||||
use crate::fri::proof::FriProof;
|
||||
use crate::fri::prover::fri_proof;
|
||||
use crate::fri::structure::{FriBatchInfo, FriInstanceInfo};
|
||||
use crate::fri::FriParams;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::hash::merkle_tree::MerkleTree;
|
||||
use crate::iop::challenger::Challenger;
|
||||
use crate::plonk::circuit_data::CommonCircuitData;
|
||||
use crate::plonk::config::GenericConfig;
|
||||
use crate::plonk::plonk_common::PlonkPolynomials;
|
||||
use crate::plonk::proof::OpeningSet;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::timed;
|
||||
use crate::util::reducing::ReducingFactor;
|
||||
use crate::util::reverse_bits;
|
||||
@ -23,12 +22,9 @@ use crate::util::transpose;
|
||||
/// Four (~64 bit) field elements gives ~128 bit security.
|
||||
pub const SALT_SIZE: usize = 4;
|
||||
|
||||
/// Represents a batch FRI based commitment to a list of polynomials.
|
||||
pub struct PolynomialBatchCommitment<
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
> {
|
||||
/// Represents a FRI oracle, i.e. a batch of polynomials which have been Merklized.
|
||||
pub struct PolynomialBatch<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
{
|
||||
pub polynomials: Vec<PolynomialCoeffs<F>>,
|
||||
pub merkle_tree: MerkleTree<F, C::Hasher>,
|
||||
pub degree_log: usize,
|
||||
@ -37,21 +33,24 @@ pub struct PolynomialBatchCommitment<
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
PolynomialBatchCommitment<F, C, D>
|
||||
PolynomialBatch<F, C, D>
|
||||
{
|
||||
/// Creates a list polynomial commitment for the polynomials interpolating the values in `values`.
|
||||
pub(crate) fn from_values(
|
||||
pub fn from_values(
|
||||
values: Vec<PolynomialValues<F>>,
|
||||
rate_bits: usize,
|
||||
blinding: bool,
|
||||
cap_height: usize,
|
||||
timing: &mut TimingTree,
|
||||
fft_root_table: Option<&FftRootTable<F>>,
|
||||
) -> Self {
|
||||
) -> Self
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let coeffs = timed!(
|
||||
timing,
|
||||
"IFFT",
|
||||
values.par_iter().map(|v| v.ifft()).collect::<Vec<_>>()
|
||||
values.into_par_iter().map(|v| v.ifft()).collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
Self::from_coeffs(
|
||||
@ -65,14 +64,17 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
}
|
||||
|
||||
/// Creates a list polynomial commitment for the polynomials `polynomials`.
|
||||
pub(crate) fn from_coeffs(
|
||||
pub fn from_coeffs(
|
||||
polynomials: Vec<PolynomialCoeffs<F>>,
|
||||
rate_bits: usize,
|
||||
blinding: bool,
|
||||
cap_height: usize,
|
||||
timing: &mut TimingTree,
|
||||
fft_root_table: Option<&FftRootTable<F>>,
|
||||
) -> Self {
|
||||
) -> Self
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let degree = polynomials[0].len();
|
||||
let lde_values = timed!(
|
||||
timing,
|
||||
@ -130,78 +132,42 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
&slice[..slice.len() - if self.blinding { SALT_SIZE } else { 0 }]
|
||||
}
|
||||
|
||||
/// Takes the commitments to the constants - sigmas - wires - zs - quotient — polynomials,
|
||||
/// and an opening point `zeta` and produces a batched opening proof + opening set.
|
||||
pub(crate) fn open_plonk(
|
||||
commitments: &[&Self; 4],
|
||||
zeta: F::Extension,
|
||||
/// Produces a batch opening proof.
|
||||
pub fn prove_openings(
|
||||
instance: &FriInstanceInfo<F, D>,
|
||||
oracles: &[&Self],
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
fri_params: &FriParams,
|
||||
timing: &mut TimingTree,
|
||||
) -> (FriProof<F, C::Hasher, D>, OpeningSet<F, D>) {
|
||||
let config = &common_data.config;
|
||||
) -> FriProof<F, C::Hasher, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
assert!(D > 1, "Not implemented for D=1.");
|
||||
let degree_log = commitments[0].degree_log;
|
||||
let g = F::Extension::primitive_root_of_unity(degree_log);
|
||||
for p in &[zeta, g * zeta] {
|
||||
assert_ne!(
|
||||
p.exp_u64(1 << degree_log as u64),
|
||||
F::Extension::ONE,
|
||||
"Opening point is in the subgroup."
|
||||
);
|
||||
}
|
||||
|
||||
let os = timed!(
|
||||
timing,
|
||||
"construct the opening set",
|
||||
OpeningSet::new(
|
||||
zeta,
|
||||
g,
|
||||
commitments[0],
|
||||
commitments[1],
|
||||
commitments[2],
|
||||
commitments[3],
|
||||
common_data,
|
||||
)
|
||||
);
|
||||
challenger.observe_opening_set(&os);
|
||||
|
||||
let alpha = challenger.get_extension_challenge::<D>();
|
||||
let mut alpha = ReducingFactor::new(alpha);
|
||||
|
||||
// Final low-degree polynomial that goes into FRI.
|
||||
let mut final_poly = PolynomialCoeffs::empty();
|
||||
|
||||
// All polynomials are opened at `zeta`.
|
||||
let single_polys = [
|
||||
PlonkPolynomials::CONSTANTS_SIGMAS,
|
||||
PlonkPolynomials::WIRES,
|
||||
PlonkPolynomials::ZS_PARTIAL_PRODUCTS,
|
||||
PlonkPolynomials::QUOTIENT,
|
||||
]
|
||||
.iter()
|
||||
.flat_map(|&p| &commitments[p.index].polynomials);
|
||||
let single_composition_poly = timed!(
|
||||
timing,
|
||||
"reduce single polys",
|
||||
alpha.reduce_polys_base(single_polys)
|
||||
);
|
||||
for FriBatchInfo { point, polynomials } in &instance.batches {
|
||||
let polys_coeff = polynomials.iter().map(|fri_poly| {
|
||||
&oracles[fri_poly.oracle_index].polynomials[fri_poly.polynomial_index]
|
||||
});
|
||||
let composition_poly = timed!(
|
||||
timing,
|
||||
&format!("reduce batch of {} polynomials", polynomials.len()),
|
||||
alpha.reduce_polys_base(polys_coeff)
|
||||
);
|
||||
let quotient = composition_poly.divide_by_linear(*point);
|
||||
alpha.shift_poly(&mut final_poly);
|
||||
final_poly += quotient;
|
||||
}
|
||||
// Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for
|
||||
// which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details.
|
||||
final_poly.coeffs.insert(0, F::Extension::ZERO);
|
||||
|
||||
let single_quotient = Self::compute_quotient([zeta], single_composition_poly);
|
||||
final_poly += single_quotient;
|
||||
alpha.reset();
|
||||
|
||||
// Z polynomials have an additional opening at `g zeta`.
|
||||
let zs_polys = &commitments[PlonkPolynomials::ZS_PARTIAL_PRODUCTS.index].polynomials
|
||||
[common_data.zs_range()];
|
||||
let zs_composition_poly =
|
||||
timed!(timing, "reduce Z polys", alpha.reduce_polys_base(zs_polys));
|
||||
|
||||
let zs_quotient = Self::compute_quotient([g * zeta], zs_composition_poly);
|
||||
alpha.shift_poly(&mut final_poly);
|
||||
final_poly += zs_quotient;
|
||||
|
||||
let lde_final_poly = final_poly.lde(config.fri_config.rate_bits);
|
||||
let lde_final_poly = final_poly.lde(fri_params.config.rate_bits);
|
||||
let lde_final_values = timed!(
|
||||
timing,
|
||||
&format!("perform final FFT {}", lde_final_poly.len()),
|
||||
@ -209,41 +175,17 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
);
|
||||
|
||||
let fri_proof = fri_proof::<F, C, D>(
|
||||
&commitments
|
||||
&oracles
|
||||
.par_iter()
|
||||
.map(|c| &c.merkle_tree)
|
||||
.collect::<Vec<_>>(),
|
||||
lde_final_poly,
|
||||
lde_final_values,
|
||||
challenger,
|
||||
&common_data.fri_params,
|
||||
fri_params,
|
||||
timing,
|
||||
);
|
||||
|
||||
(fri_proof, os)
|
||||
}
|
||||
|
||||
/// Given `points=(x_i)`, `evals=(y_i)` and `poly=P` with `P(x_i)=y_i`, computes the polynomial
|
||||
/// `Q=(P-I)/Z` where `I` interpolates `(x_i, y_i)` and `Z` is the vanishing polynomial on `(x_i)`.
|
||||
fn compute_quotient<const N: usize>(
|
||||
points: [F::Extension; N],
|
||||
poly: PolynomialCoeffs<F::Extension>,
|
||||
) -> PolynomialCoeffs<F::Extension> {
|
||||
let quotient = if N == 1 {
|
||||
poly.divide_by_linear(points[0]).0
|
||||
} else if N == 2 {
|
||||
// The denominator is `(X - p0)(X - p1) = p0 p1 - (p0 + p1) X + X^2`.
|
||||
let denominator = vec![
|
||||
points[0] * points[1],
|
||||
-points[0] - points[1],
|
||||
F::Extension::ONE,
|
||||
]
|
||||
.into();
|
||||
poly.div_rem_long_division(&denominator).0 // Could also use `divide_by_linear` twice.
|
||||
} else {
|
||||
unreachable!("This shouldn't happen. Plonk should open polynomials at 1 or 2 points.")
|
||||
};
|
||||
|
||||
quotient.padded(quotient.degree_plus_one().next_power_of_two())
|
||||
fri_proof
|
||||
}
|
||||
}
|
||||
@ -5,6 +5,7 @@ use plonky2_field::extension_field::{flatten, unflatten, Extendable};
|
||||
use plonky2_field::polynomial::PolynomialCoeffs;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::fri::FriParams;
|
||||
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
|
||||
use crate::hash::hash_types::MerkleCapTarget;
|
||||
use crate::hash::hash_types::RichField;
|
||||
@ -13,9 +14,8 @@ use crate::hash::merkle_tree::MerkleCap;
|
||||
use crate::hash::path_compression::{compress_merkle_proofs, decompress_merkle_proofs};
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::target::Target;
|
||||
use crate::plonk::circuit_data::CommonCircuitData;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::plonk_common::PolynomialsIndexBlinding;
|
||||
use crate::plonk::plonk_common::salt_size;
|
||||
use crate::plonk::proof::{FriInferredElements, ProofChallenges};
|
||||
|
||||
/// Evaluations and Merkle proof produced by the prover in a FRI query step.
|
||||
@ -26,7 +26,7 @@ pub struct FriQueryStep<F: RichField + Extendable<D>, H: Hasher<F>, const D: usi
|
||||
pub merkle_proof: MerkleProof<F, H>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FriQueryStepTarget<const D: usize> {
|
||||
pub evals: Vec<ExtensionTarget<D>>,
|
||||
pub merkle_proof: MerkleProofTarget,
|
||||
@ -41,29 +41,34 @@ pub struct FriInitialTreeProof<F: RichField, H: Hasher<F>> {
|
||||
}
|
||||
|
||||
impl<F: RichField, H: Hasher<F>> FriInitialTreeProof<F, H> {
|
||||
pub(crate) fn unsalted_evals(
|
||||
&self,
|
||||
polynomials: PolynomialsIndexBlinding,
|
||||
zero_knowledge: bool,
|
||||
) -> &[F] {
|
||||
let evals = &self.evals_proofs[polynomials.index].0;
|
||||
&evals[..evals.len() - polynomials.salt_size(zero_knowledge)]
|
||||
pub(crate) fn unsalted_eval(&self, oracle_index: usize, poly_index: usize, salted: bool) -> F {
|
||||
self.unsalted_evals(oracle_index, salted)[poly_index]
|
||||
}
|
||||
|
||||
fn unsalted_evals(&self, oracle_index: usize, salted: bool) -> &[F] {
|
||||
let evals = &self.evals_proofs[oracle_index].0;
|
||||
&evals[..evals.len() - salt_size(salted)]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FriInitialTreeProofTarget {
|
||||
pub evals_proofs: Vec<(Vec<Target>, MerkleProofTarget)>,
|
||||
}
|
||||
|
||||
impl FriInitialTreeProofTarget {
|
||||
pub(crate) fn unsalted_evals(
|
||||
pub(crate) fn unsalted_eval(
|
||||
&self,
|
||||
polynomials: PolynomialsIndexBlinding,
|
||||
zero_knowledge: bool,
|
||||
) -> &[Target] {
|
||||
let evals = &self.evals_proofs[polynomials.index].0;
|
||||
&evals[..evals.len() - polynomials.salt_size(zero_knowledge)]
|
||||
oracle_index: usize,
|
||||
poly_index: usize,
|
||||
salted: bool,
|
||||
) -> Target {
|
||||
self.unsalted_evals(oracle_index, salted)[poly_index]
|
||||
}
|
||||
|
||||
fn unsalted_evals(&self, oracle_index: usize, salted: bool) -> &[Target] {
|
||||
let evals = &self.evals_proofs[oracle_index].0;
|
||||
&evals[..evals.len() - salt_size(salted)]
|
||||
}
|
||||
}
|
||||
|
||||
@ -75,7 +80,7 @@ pub struct FriQueryRound<F: RichField + Extendable<D>, H: Hasher<F>, const D: us
|
||||
pub steps: Vec<FriQueryStep<F, H, D>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FriQueryRoundTarget<const D: usize> {
|
||||
pub initial_trees_proof: FriInitialTreeProofTarget,
|
||||
pub steps: Vec<FriQueryStepTarget<D>>,
|
||||
@ -106,6 +111,7 @@ pub struct FriProof<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize>
|
||||
pub pow_witness: F,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FriProofTarget<const D: usize> {
|
||||
pub commit_phase_merkle_caps: Vec<MerkleCapTarget>,
|
||||
pub query_round_proofs: Vec<FriQueryRoundTarget<D>>,
|
||||
@ -131,7 +137,7 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> FriProof<F, H,
|
||||
pub fn compress<C: GenericConfig<D, F = F, Hasher = H>>(
|
||||
self,
|
||||
indices: &[usize],
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
params: &FriParams,
|
||||
) -> CompressedFriProof<F, H, D> {
|
||||
let FriProof {
|
||||
commit_phase_merkle_caps,
|
||||
@ -140,8 +146,8 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> FriProof<F, H,
|
||||
pow_witness,
|
||||
..
|
||||
} = self;
|
||||
let cap_height = common_data.config.fri_config.cap_height;
|
||||
let reduction_arity_bits = &common_data.fri_params.reduction_arity_bits;
|
||||
let cap_height = params.config.cap_height;
|
||||
let reduction_arity_bits = ¶ms.reduction_arity_bits;
|
||||
let num_reductions = reduction_arity_bits.len();
|
||||
let num_initial_trees = query_round_proofs[0].initial_trees_proof.evals_proofs.len();
|
||||
|
||||
@ -238,8 +244,11 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriPr
|
||||
self,
|
||||
challenges: &ProofChallenges<F, D>,
|
||||
fri_inferred_elements: FriInferredElements<F, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> FriProof<F, H, D> {
|
||||
params: &FriParams,
|
||||
) -> FriProof<F, H, D>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let CompressedFriProof {
|
||||
commit_phase_merkle_caps,
|
||||
query_round_proofs,
|
||||
@ -247,13 +256,13 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriPr
|
||||
pow_witness,
|
||||
..
|
||||
} = self;
|
||||
let ProofChallenges {
|
||||
let FriChallenges {
|
||||
fri_query_indices: indices,
|
||||
..
|
||||
} = challenges;
|
||||
} = &challenges.fri_challenges;
|
||||
let mut fri_inferred_elements = fri_inferred_elements.0.into_iter();
|
||||
let cap_height = common_data.config.fri_config.cap_height;
|
||||
let reduction_arity_bits = &common_data.fri_params.reduction_arity_bits;
|
||||
let cap_height = params.config.cap_height;
|
||||
let reduction_arity_bits = ¶ms.reduction_arity_bits;
|
||||
let num_reductions = reduction_arity_bits.len();
|
||||
let num_initial_trees = query_round_proofs
|
||||
.initial_trees_proofs
|
||||
@ -270,7 +279,7 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriPr
|
||||
let mut steps_indices = vec![vec![]; num_reductions];
|
||||
let mut steps_evals = vec![vec![]; num_reductions];
|
||||
let mut steps_proofs = vec![vec![]; num_reductions];
|
||||
let height = common_data.degree_bits + common_data.config.fri_config.rate_bits;
|
||||
let height = params.degree_bits + params.config.rate_bits;
|
||||
let heights = reduction_arity_bits
|
||||
.iter()
|
||||
.scan(height, |acc, &bits| {
|
||||
@ -280,10 +289,8 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriPr
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Holds the `evals` vectors that have already been reconstructed at each reduction depth.
|
||||
let mut evals_by_depth = vec![
|
||||
HashMap::<usize, Vec<_>>::new();
|
||||
common_data.fri_params.reduction_arity_bits.len()
|
||||
];
|
||||
let mut evals_by_depth =
|
||||
vec![HashMap::<usize, Vec<_>>::new(); params.reduction_arity_bits.len()];
|
||||
for &(mut index) in indices {
|
||||
let initial_trees_proof = query_round_proofs.initial_trees_proofs[&index].clone();
|
||||
for (i, (leaves_data, proof)) in
|
||||
@ -358,3 +365,23 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriPr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FriChallenges<F: RichField + Extendable<D>, const D: usize> {
|
||||
// Scaling factor to combine polynomials.
|
||||
pub fri_alpha: F::Extension,
|
||||
|
||||
// Betas used in the FRI commit phase reductions.
|
||||
pub fri_betas: Vec<F::Extension>,
|
||||
|
||||
pub fri_pow_response: F,
|
||||
|
||||
// Indices at which the oracle is queried in FRI.
|
||||
pub fri_query_indices: Vec<usize>,
|
||||
}
|
||||
|
||||
pub struct FriChallengesTarget<const D: usize> {
|
||||
pub fri_alpha: ExtensionTarget<D>,
|
||||
pub fri_betas: Vec<ExtensionTarget<D>>,
|
||||
pub fri_pow_response: Target,
|
||||
pub fri_query_indices: Vec<Target>,
|
||||
}
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
use itertools::Itertools;
|
||||
use plonky2_field::extension_field::{flatten, unflatten, Extendable};
|
||||
use plonky2_field::polynomial::{PolynomialCoeffs, PolynomialValues};
|
||||
use plonky2_util::reverse_index_bits_in_place;
|
||||
@ -23,9 +24,12 @@ pub fn fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
fri_params: &FriParams,
|
||||
timing: &mut TimingTree,
|
||||
) -> FriProof<F, C::Hasher, D> {
|
||||
let n = lde_polynomial_values.values.len();
|
||||
assert_eq!(lde_polynomial_coeffs.coeffs.len(), n);
|
||||
) -> FriProof<F, C::Hasher, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let n = lde_polynomial_values.len();
|
||||
assert_eq!(lde_polynomial_coeffs.len(), n);
|
||||
|
||||
// Commit phase
|
||||
let (trees, final_coeffs) = timed!(
|
||||
@ -67,13 +71,15 @@ fn fri_committed_trees<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
|
||||
) -> (
|
||||
Vec<MerkleTree<F, C::Hasher>>,
|
||||
PolynomialCoeffs<F::Extension>,
|
||||
) {
|
||||
)
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let mut trees = Vec::new();
|
||||
|
||||
let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR;
|
||||
let num_reductions = fri_params.reduction_arity_bits.len();
|
||||
for i in 0..num_reductions {
|
||||
let arity = 1 << fri_params.reduction_arity_bits[i];
|
||||
for arity_bits in &fri_params.reduction_arity_bits {
|
||||
let arity = 1 << arity_bits;
|
||||
|
||||
reverse_index_bits_in_place(&mut values.values);
|
||||
let chunked_values = values
|
||||
@ -115,14 +121,13 @@ fn fri_proof_of_work<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, c
|
||||
(0..=F::NEG_ONE.to_canonical_u64())
|
||||
.into_par_iter()
|
||||
.find_any(|&i| {
|
||||
C::InnerHasher::hash(
|
||||
current_hash
|
||||
C::InnerHasher::hash_no_pad(
|
||||
¤t_hash
|
||||
.elements
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(Some(F::from_canonical_u64(i)))
|
||||
.collect(),
|
||||
false,
|
||||
.collect_vec(),
|
||||
)
|
||||
.elements[0]
|
||||
.to_canonical_u64()
|
||||
|
||||
@ -1,9 +1,13 @@
|
||||
use itertools::Itertools;
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_util::{log2_strict, reverse_index_bits_in_place};
|
||||
|
||||
use crate::fri::proof::{FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget};
|
||||
use crate::fri::FriConfig;
|
||||
use crate::fri::proof::{
|
||||
FriChallengesTarget, FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget,
|
||||
FriQueryStepTarget,
|
||||
};
|
||||
use crate::fri::structure::{FriBatchInfoTarget, FriInstanceInfoTarget, FriOpeningsTarget};
|
||||
use crate::fri::{FriConfig, FriParams};
|
||||
use crate::gadgets::interpolation::InterpolationGate;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::interpolation::HighDegreeInterpolationGate;
|
||||
@ -11,14 +15,10 @@ use crate::gates::low_degree_interpolation::LowDegreeInterpolationGate;
|
||||
use crate::gates::random_access::RandomAccessGate;
|
||||
use crate::hash::hash_types::MerkleCapTarget;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::challenger::RecursiveChallenger;
|
||||
use crate::iop::ext_target::{flatten_target, ExtensionTarget};
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData};
|
||||
use crate::plonk::config::{AlgebraicConfig, AlgebraicHasher, GenericConfig};
|
||||
use crate::plonk::plonk_common::PlonkPolynomials;
|
||||
use crate::plonk::proof::OpeningSetTarget;
|
||||
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
|
||||
use crate::util::reducing::ReducingFactorTarget;
|
||||
use crate::with_context;
|
||||
|
||||
@ -32,7 +32,6 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
arity_bits: usize,
|
||||
evals: &[ExtensionTarget<D>],
|
||||
beta: ExtensionTarget<D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> ExtensionTarget<D> {
|
||||
let arity = 1 << arity_bits;
|
||||
debug_assert_eq!(evals.len(), arity);
|
||||
@ -51,7 +50,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
// The answer is gotten by interpolating {(x*g^i, P(x*g^i))} and evaluating at beta.
|
||||
// `HighDegreeInterpolationGate` has degree `arity`, so we use the low-degree gate if
|
||||
// the arity is too large.
|
||||
if arity > common_data.quotient_degree_factor {
|
||||
if arity > self.config.max_quotient_degree_factor {
|
||||
self.interpolate_coset::<LowDegreeInterpolationGate<F, D>>(
|
||||
arity_bits,
|
||||
coset_start,
|
||||
@ -71,17 +70,13 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
/// Make sure we have enough wires and routed wires to do the FRI checks efficiently. This check
|
||||
/// isn't required -- without it we'd get errors elsewhere in the stack -- but just gives more
|
||||
/// helpful errors.
|
||||
fn check_recursion_config<C: GenericConfig<D, F = F>>(
|
||||
&self,
|
||||
max_fri_arity_bits: usize,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) {
|
||||
fn check_recursion_config<C: GenericConfig<D, F = F>>(&self, max_fri_arity_bits: usize) {
|
||||
let random_access = RandomAccessGate::<F, D>::new_from_config(
|
||||
&self.config,
|
||||
max_fri_arity_bits.max(self.config.fri_config.cap_height),
|
||||
);
|
||||
let (interpolation_wires, interpolation_routed_wires) =
|
||||
if 1 << max_fri_arity_bits > common_data.quotient_degree_factor {
|
||||
if 1 << max_fri_arity_bits > self.config.max_quotient_degree_factor {
|
||||
let gate = LowDegreeInterpolationGate::<F, D>::new(max_fri_arity_bits);
|
||||
(gate.num_wires(), gate.num_routed_wires())
|
||||
} else {
|
||||
@ -111,74 +106,48 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
fn fri_verify_proof_of_work<H: AlgebraicHasher<F>>(
|
||||
&mut self,
|
||||
proof: &FriProofTarget<D>,
|
||||
challenger: &mut RecursiveChallenger<F, H, D>,
|
||||
fri_pow_response: Target,
|
||||
config: &FriConfig,
|
||||
) {
|
||||
let mut inputs = challenger.get_hash(self).elements.to_vec();
|
||||
inputs.push(proof.pow_witness);
|
||||
|
||||
let hash = self.hash_n_to_m::<H>(inputs, 1, false)[0];
|
||||
self.assert_leading_zeros(
|
||||
hash,
|
||||
fri_pow_response,
|
||||
config.proof_of_work_bits + (64 - F::order().bits()) as u32,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn verify_fri_proof<C: AlgebraicConfig<D, F = F>>(
|
||||
pub fn verify_fri_proof<C: GenericConfig<D, F = F>>(
|
||||
&mut self,
|
||||
// Openings of the PLONK polynomials.
|
||||
os: &OpeningSetTarget<D>,
|
||||
// Point at which the PLONK polynomials are opened.
|
||||
zeta: ExtensionTarget<D>,
|
||||
instance: &FriInstanceInfoTarget<D>,
|
||||
openings: &FriOpeningsTarget<D>,
|
||||
challenges: &FriChallengesTarget<D>,
|
||||
initial_merkle_caps: &[MerkleCapTarget],
|
||||
proof: &FriProofTarget<D>,
|
||||
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) {
|
||||
let config = &common_data.config;
|
||||
|
||||
if let Some(max_arity_bits) = common_data.fri_params.max_arity_bits() {
|
||||
self.check_recursion_config(max_arity_bits, common_data);
|
||||
params: &FriParams,
|
||||
) where
|
||||
C::Hasher: AlgebraicHasher<F>,
|
||||
{
|
||||
if let Some(max_arity_bits) = params.max_arity_bits() {
|
||||
self.check_recursion_config::<C>(max_arity_bits);
|
||||
}
|
||||
|
||||
debug_assert_eq!(
|
||||
common_data.fri_params.final_poly_len(),
|
||||
params.final_poly_len(),
|
||||
proof.final_poly.len(),
|
||||
"Final polynomial has wrong degree."
|
||||
);
|
||||
|
||||
// Size of the LDE domain.
|
||||
let n = common_data.lde_size();
|
||||
|
||||
challenger.observe_opening_set(os);
|
||||
|
||||
// Scaling factor to combine polynomials.
|
||||
let alpha = challenger.get_extension_challenge(self);
|
||||
|
||||
let betas = with_context!(
|
||||
self,
|
||||
"recover the random betas used in the FRI reductions.",
|
||||
proof
|
||||
.commit_phase_merkle_caps
|
||||
.iter()
|
||||
.map(|cap| {
|
||||
challenger.observe_cap(cap);
|
||||
challenger.get_extension_challenge(self)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
challenger.observe_extension_elements(&proof.final_poly.0);
|
||||
let n = params.lde_size();
|
||||
|
||||
with_context!(
|
||||
self,
|
||||
"check PoW",
|
||||
self.fri_verify_proof_of_work::<C::Hasher>(proof, challenger, &config.fri_config)
|
||||
self.fri_verify_proof_of_work::<C::Hasher>(challenges.fri_pow_response, ¶ms.config)
|
||||
);
|
||||
|
||||
// Check that parameters are coherent.
|
||||
debug_assert_eq!(
|
||||
config.fri_config.num_query_rounds,
|
||||
params.config.num_query_rounds,
|
||||
proof.query_round_proofs.len(),
|
||||
"Number of query rounds does not match config."
|
||||
);
|
||||
@ -186,11 +155,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let precomputed_reduced_evals = with_context!(
|
||||
self,
|
||||
"precompute reduced evaluations",
|
||||
PrecomputedReducedEvalsTarget::from_os_and_alpha(
|
||||
os,
|
||||
alpha,
|
||||
common_data.degree_bits,
|
||||
zeta,
|
||||
PrecomputedReducedOpeningsTarget::from_os_and_alpha(
|
||||
openings,
|
||||
challenges.fri_alpha,
|
||||
self
|
||||
)
|
||||
);
|
||||
@ -210,17 +177,16 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
self,
|
||||
level,
|
||||
&format!("verify one (of {}) query rounds", num_queries),
|
||||
self.fri_verifier_query_round(
|
||||
zeta,
|
||||
alpha,
|
||||
precomputed_reduced_evals,
|
||||
self.fri_verifier_query_round::<C>(
|
||||
instance,
|
||||
challenges,
|
||||
&precomputed_reduced_evals,
|
||||
initial_merkle_caps,
|
||||
proof,
|
||||
challenger,
|
||||
challenges.fri_query_indices[i],
|
||||
n,
|
||||
&betas,
|
||||
round_proof,
|
||||
common_data,
|
||||
params,
|
||||
)
|
||||
);
|
||||
}
|
||||
@ -255,85 +221,73 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
fn fri_combine_initial<C: GenericConfig<D, F = F>>(
|
||||
&mut self,
|
||||
instance: &FriInstanceInfoTarget<D>,
|
||||
proof: &FriInitialTreeProofTarget,
|
||||
alpha: ExtensionTarget<D>,
|
||||
subgroup_x: Target,
|
||||
vanish_zeta: ExtensionTarget<D>,
|
||||
precomputed_reduced_evals: PrecomputedReducedEvalsTarget<D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget<D>,
|
||||
params: &FriParams,
|
||||
) -> ExtensionTarget<D> {
|
||||
assert!(D > 1, "Not implemented for D=1.");
|
||||
let config = &common_data.config;
|
||||
let degree_log = common_data.degree_bits;
|
||||
let degree_log = params.degree_bits;
|
||||
debug_assert_eq!(
|
||||
degree_log,
|
||||
common_data.config.fri_config.cap_height + proof.evals_proofs[0].1.siblings.len()
|
||||
- config.fri_config.rate_bits
|
||||
params.config.cap_height + proof.evals_proofs[0].1.siblings.len()
|
||||
- params.config.rate_bits
|
||||
);
|
||||
let subgroup_x = self.convert_to_ext(subgroup_x);
|
||||
let mut alpha = ReducingFactorTarget::new(alpha);
|
||||
let mut sum = self.zero_extension();
|
||||
|
||||
// We will add two terms to `sum`: one for openings at `x`, and one for openings at `g x`.
|
||||
// All polynomials are opened at `x`.
|
||||
let single_evals = [
|
||||
PlonkPolynomials::CONSTANTS_SIGMAS,
|
||||
PlonkPolynomials::WIRES,
|
||||
PlonkPolynomials::ZS_PARTIAL_PRODUCTS,
|
||||
PlonkPolynomials::QUOTIENT,
|
||||
]
|
||||
.iter()
|
||||
.flat_map(|&p| proof.unsalted_evals(p, config.zero_knowledge))
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
let single_composition_eval = alpha.reduce_base(&single_evals, self);
|
||||
let single_numerator =
|
||||
self.sub_extension(single_composition_eval, precomputed_reduced_evals.single);
|
||||
sum = self.div_add_extension(single_numerator, vanish_zeta, sum);
|
||||
alpha.reset();
|
||||
|
||||
// Polynomials opened at `x` and `g x`, i.e., the Zs polynomials.
|
||||
let zs_evals = proof
|
||||
.unsalted_evals(PlonkPolynomials::ZS_PARTIAL_PRODUCTS, config.zero_knowledge)
|
||||
for (batch, reduced_openings) in instance
|
||||
.batches
|
||||
.iter()
|
||||
.take(common_data.zs_range().end)
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
let zs_composition_eval = alpha.reduce_base(&zs_evals, self);
|
||||
.zip(&precomputed_reduced_evals.reduced_openings_at_point)
|
||||
{
|
||||
let FriBatchInfoTarget { point, polynomials } = batch;
|
||||
let evals = polynomials
|
||||
.iter()
|
||||
.map(|p| {
|
||||
let poly_blinding = instance.oracles[p.oracle_index].blinding;
|
||||
let salted = params.hiding && poly_blinding;
|
||||
proof.unsalted_eval(p.oracle_index, p.polynomial_index, salted)
|
||||
})
|
||||
.collect_vec();
|
||||
let reduced_evals = alpha.reduce_base(&evals, self);
|
||||
let numerator = self.sub_extension(reduced_evals, *reduced_openings);
|
||||
let denominator = self.sub_extension(subgroup_x, *point);
|
||||
sum = alpha.shift(sum, self);
|
||||
sum = self.div_add_extension(numerator, denominator, sum);
|
||||
}
|
||||
|
||||
let zs_numerator =
|
||||
self.sub_extension(zs_composition_eval, precomputed_reduced_evals.zs_right);
|
||||
let zs_denominator = self.sub_extension(subgroup_x, precomputed_reduced_evals.zeta_right);
|
||||
sum = alpha.shift(sum, self); // TODO: alpha^count could be precomputed.
|
||||
sum = self.div_add_extension(zs_numerator, zs_denominator, sum);
|
||||
|
||||
sum
|
||||
// Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for
|
||||
// which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details.
|
||||
self.mul_extension(sum, subgroup_x)
|
||||
}
|
||||
|
||||
fn fri_verifier_query_round<C: AlgebraicConfig<D, F = F>>(
|
||||
fn fri_verifier_query_round<C: GenericConfig<D, F = F>>(
|
||||
&mut self,
|
||||
zeta: ExtensionTarget<D>,
|
||||
alpha: ExtensionTarget<D>,
|
||||
precomputed_reduced_evals: PrecomputedReducedEvalsTarget<D>,
|
||||
instance: &FriInstanceInfoTarget<D>,
|
||||
challenges: &FriChallengesTarget<D>,
|
||||
precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget<D>,
|
||||
initial_merkle_caps: &[MerkleCapTarget],
|
||||
proof: &FriProofTarget<D>,
|
||||
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
|
||||
x_index: Target,
|
||||
n: usize,
|
||||
betas: &[ExtensionTarget<D>],
|
||||
round_proof: &FriQueryRoundTarget<D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) {
|
||||
params: &FriParams,
|
||||
) where
|
||||
C::Hasher: AlgebraicHasher<F>,
|
||||
{
|
||||
let n_log = log2_strict(n);
|
||||
|
||||
// Note that this `low_bits` decomposition permits non-canonical binary encodings. Here we
|
||||
// verify that this has a negligible impact on soundness error.
|
||||
Self::assert_noncanonical_indices_ok(&common_data.config);
|
||||
let x_index = challenger.get_challenge(self);
|
||||
Self::assert_noncanonical_indices_ok(¶ms.config);
|
||||
let mut x_index_bits = self.low_bits(x_index, n_log, F::BITS);
|
||||
|
||||
let cap_index = self.le_sum(
|
||||
x_index_bits[x_index_bits.len() - common_data.config.fri_config.cap_height..].iter(),
|
||||
);
|
||||
let cap_index =
|
||||
self.le_sum(x_index_bits[x_index_bits.len() - params.config.cap_height..].iter());
|
||||
with_context!(
|
||||
self,
|
||||
"check FRI initial proof",
|
||||
@ -346,16 +300,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
);
|
||||
|
||||
// `subgroup_x` is `subgroup[x_index]`, i.e., the actual field element in the domain.
|
||||
let (mut subgroup_x, vanish_zeta) = with_context!(self, "compute x from its index", {
|
||||
let mut subgroup_x = with_context!(self, "compute x from its index", {
|
||||
let g = self.constant(F::coset_shift());
|
||||
let phi = F::primitive_root_of_unity(n_log);
|
||||
let phi = self.exp_from_bits_const_base(phi, x_index_bits.iter().rev());
|
||||
let g_ext = self.convert_to_ext(g);
|
||||
let phi_ext = self.convert_to_ext(phi);
|
||||
// `subgroup_x = g*phi, vanish_zeta = g*phi - zeta`
|
||||
let subgroup_x = self.mul(g, phi);
|
||||
let vanish_zeta = self.mul_sub_extension(g_ext, phi_ext, zeta);
|
||||
(subgroup_x, vanish_zeta)
|
||||
// subgroup_x = g * phi
|
||||
self.mul(g, phi)
|
||||
});
|
||||
|
||||
// old_eval is the last derived evaluation; it will be checked for consistency with its
|
||||
@ -363,22 +313,17 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let mut old_eval = with_context!(
|
||||
self,
|
||||
"combine initial oracles",
|
||||
self.fri_combine_initial(
|
||||
self.fri_combine_initial::<C>(
|
||||
instance,
|
||||
&round_proof.initial_trees_proof,
|
||||
alpha,
|
||||
challenges.fri_alpha,
|
||||
subgroup_x,
|
||||
vanish_zeta,
|
||||
precomputed_reduced_evals,
|
||||
common_data,
|
||||
params,
|
||||
)
|
||||
);
|
||||
|
||||
for (i, &arity_bits) in common_data
|
||||
.fri_params
|
||||
.reduction_arity_bits
|
||||
.iter()
|
||||
.enumerate()
|
||||
{
|
||||
for (i, &arity_bits) in params.reduction_arity_bits.iter().enumerate() {
|
||||
let evals = &round_proof.steps[i].evals;
|
||||
|
||||
// Split x_index into the index of the coset x is in, and the index of x within that coset.
|
||||
@ -393,13 +338,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
old_eval = with_context!(
|
||||
self,
|
||||
"infer evaluation using interpolation",
|
||||
self.compute_evaluation(
|
||||
self.compute_evaluation::<C>(
|
||||
subgroup_x,
|
||||
x_index_within_coset_bits,
|
||||
arity_bits,
|
||||
evals,
|
||||
betas[i],
|
||||
common_data
|
||||
challenges.fri_betas[i],
|
||||
)
|
||||
);
|
||||
|
||||
@ -446,52 +390,110 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
/// Thus ambiguous elements contribute a negligible amount to soundness error.
|
||||
///
|
||||
/// Here we compare the probabilities as a sanity check, to verify the claim above.
|
||||
fn assert_noncanonical_indices_ok(config: &CircuitConfig) {
|
||||
fn assert_noncanonical_indices_ok(config: &FriConfig) {
|
||||
let num_ambiguous_elems = u64::MAX - F::ORDER + 1;
|
||||
let query_error = config.rate();
|
||||
let p_ambiguous = (num_ambiguous_elems as f64) / (F::ORDER as f64);
|
||||
assert!(p_ambiguous < query_error * 1e-5,
|
||||
"A non-negligible portion of field elements are in the range that permits non-canonical encodings. Need to do more analysis or enforce canonical encodings.");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct PrecomputedReducedEvalsTarget<const D: usize> {
|
||||
pub single: ExtensionTarget<D>,
|
||||
pub zs_right: ExtensionTarget<D>,
|
||||
pub zeta_right: ExtensionTarget<D>,
|
||||
}
|
||||
pub(crate) fn add_virtual_fri_proof(
|
||||
&mut self,
|
||||
num_leaves_per_oracle: &[usize],
|
||||
params: &FriParams,
|
||||
) -> FriProofTarget<D> {
|
||||
let cap_height = params.config.cap_height;
|
||||
let num_queries = params.config.num_query_rounds;
|
||||
let commit_phase_merkle_caps = (0..params.reduction_arity_bits.len())
|
||||
.map(|_| self.add_virtual_cap(cap_height))
|
||||
.collect();
|
||||
let query_round_proofs = (0..num_queries)
|
||||
.map(|_| self.add_virtual_fri_query(num_leaves_per_oracle, params))
|
||||
.collect();
|
||||
let final_poly = self.add_virtual_poly_coeff_ext(params.final_poly_len());
|
||||
let pow_witness = self.add_virtual_target();
|
||||
FriProofTarget {
|
||||
commit_phase_merkle_caps,
|
||||
query_round_proofs,
|
||||
final_poly,
|
||||
pow_witness,
|
||||
}
|
||||
}
|
||||
|
||||
impl<const D: usize> PrecomputedReducedEvalsTarget<D> {
|
||||
fn from_os_and_alpha<F: RichField + Extendable<D>>(
|
||||
os: &OpeningSetTarget<D>,
|
||||
alpha: ExtensionTarget<D>,
|
||||
degree_log: usize,
|
||||
zeta: ExtensionTarget<D>,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
) -> Self {
|
||||
let mut alpha = ReducingFactorTarget::new(alpha);
|
||||
let single = alpha.reduce(
|
||||
&os.constants
|
||||
.iter()
|
||||
.chain(&os.plonk_sigmas)
|
||||
.chain(&os.wires)
|
||||
.chain(&os.plonk_zs)
|
||||
.chain(&os.partial_products)
|
||||
.chain(&os.quotient_polys)
|
||||
.copied()
|
||||
.collect::<Vec<_>>(),
|
||||
builder,
|
||||
);
|
||||
let zs_right = alpha.reduce(&os.plonk_zs_right, builder);
|
||||
fn add_virtual_fri_query(
|
||||
&mut self,
|
||||
num_leaves_per_oracle: &[usize],
|
||||
params: &FriParams,
|
||||
) -> FriQueryRoundTarget<D> {
|
||||
let cap_height = params.config.cap_height;
|
||||
assert!(params.lde_bits() >= cap_height);
|
||||
let mut merkle_proof_len = params.lde_bits() - cap_height;
|
||||
|
||||
let g = builder.constant_extension(F::Extension::primitive_root_of_unity(degree_log));
|
||||
let zeta_right = builder.mul_extension(g, zeta);
|
||||
let initial_trees_proof =
|
||||
self.add_virtual_fri_initial_trees_proof(num_leaves_per_oracle, merkle_proof_len);
|
||||
|
||||
Self {
|
||||
single,
|
||||
zs_right,
|
||||
zeta_right,
|
||||
let mut steps = vec![];
|
||||
for &arity_bits in ¶ms.reduction_arity_bits {
|
||||
assert!(merkle_proof_len >= arity_bits);
|
||||
merkle_proof_len -= arity_bits;
|
||||
steps.push(self.add_virtual_fri_query_step(arity_bits, merkle_proof_len));
|
||||
}
|
||||
|
||||
FriQueryRoundTarget {
|
||||
initial_trees_proof,
|
||||
steps,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_virtual_fri_initial_trees_proof(
|
||||
&mut self,
|
||||
num_leaves_per_oracle: &[usize],
|
||||
initial_merkle_proof_len: usize,
|
||||
) -> FriInitialTreeProofTarget {
|
||||
let evals_proofs = num_leaves_per_oracle
|
||||
.iter()
|
||||
.map(|&num_oracle_leaves| {
|
||||
let leaves = self.add_virtual_targets(num_oracle_leaves);
|
||||
let merkle_proof = self.add_virtual_merkle_proof(initial_merkle_proof_len);
|
||||
(leaves, merkle_proof)
|
||||
})
|
||||
.collect();
|
||||
FriInitialTreeProofTarget { evals_proofs }
|
||||
}
|
||||
|
||||
fn add_virtual_fri_query_step(
|
||||
&mut self,
|
||||
arity_bits: usize,
|
||||
merkle_proof_len: usize,
|
||||
) -> FriQueryStepTarget<D> {
|
||||
FriQueryStepTarget {
|
||||
evals: self.add_virtual_extension_targets(1 << arity_bits),
|
||||
merkle_proof: self.add_virtual_merkle_proof(merkle_proof_len),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For each opening point, holds the reduced (by `alpha`) evaluations of each polynomial that's
|
||||
/// opened at that point.
|
||||
#[derive(Clone)]
|
||||
struct PrecomputedReducedOpeningsTarget<const D: usize> {
|
||||
reduced_openings_at_point: Vec<ExtensionTarget<D>>,
|
||||
}
|
||||
|
||||
impl<const D: usize> PrecomputedReducedOpeningsTarget<D> {
|
||||
fn from_os_and_alpha<F: RichField + Extendable<D>>(
|
||||
openings: &FriOpeningsTarget<D>,
|
||||
alpha: ExtensionTarget<D>,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
) -> Self {
|
||||
let reduced_openings_at_point = openings
|
||||
.batches
|
||||
.iter()
|
||||
.map(|batch| ReducingFactorTarget::new(alpha).reduce(&batch.values, builder))
|
||||
.collect();
|
||||
Self {
|
||||
reduced_openings_at_point,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -22,7 +22,7 @@ pub enum FriReductionStrategy {
|
||||
|
||||
impl FriReductionStrategy {
|
||||
/// The arity of each FRI reduction step, expressed as the log2 of the actual arity.
|
||||
pub(crate) fn reduction_arity_bits(
|
||||
pub fn reduction_arity_bits(
|
||||
&self,
|
||||
mut degree_bits: usize,
|
||||
rate_bits: usize,
|
||||
|
||||
83
plonky2/src/fri/structure.rs
Normal file
83
plonky2/src/fri/structure.rs
Normal file
@ -0,0 +1,83 @@
|
||||
//! Information about the structure of a FRI instance, in terms of the oracles and polynomials
|
||||
//! involved, and the points they are opened at.
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
use crate::field::extension_field::Extendable;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
|
||||
/// Describes an instance of a FRI-based batch opening.
|
||||
pub struct FriInstanceInfo<F: RichField + Extendable<D>, const D: usize> {
|
||||
/// The oracles involved, not counting oracles created during the commit phase.
|
||||
pub oracles: Vec<FriOracleInfo>,
|
||||
/// Batches of openings, where each batch is associated with a particular point.
|
||||
pub batches: Vec<FriBatchInfo<F, D>>,
|
||||
}
|
||||
|
||||
/// Describes an instance of a FRI-based batch opening.
|
||||
pub struct FriInstanceInfoTarget<const D: usize> {
|
||||
/// The oracles involved, not counting oracles created during the commit phase.
|
||||
pub oracles: Vec<FriOracleInfo>,
|
||||
/// Batches of openings, where each batch is associated with a particular point.
|
||||
pub batches: Vec<FriBatchInfoTarget<D>>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct FriOracleInfo {
|
||||
pub blinding: bool,
|
||||
}
|
||||
|
||||
/// A batch of openings at a particular point.
|
||||
pub struct FriBatchInfo<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub point: F::Extension,
|
||||
pub polynomials: Vec<FriPolynomialInfo>,
|
||||
}
|
||||
|
||||
/// A batch of openings at a particular point.
|
||||
pub struct FriBatchInfoTarget<const D: usize> {
|
||||
pub point: ExtensionTarget<D>,
|
||||
pub polynomials: Vec<FriPolynomialInfo>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct FriPolynomialInfo {
|
||||
/// Index into `FriInstanceInfoTarget`'s `oracles` list.
|
||||
pub oracle_index: usize,
|
||||
/// Index of the polynomial within the oracle.
|
||||
pub polynomial_index: usize,
|
||||
}
|
||||
|
||||
impl FriPolynomialInfo {
|
||||
pub fn from_range(
|
||||
oracle_index: usize,
|
||||
polynomial_indices: Range<usize>,
|
||||
) -> Vec<FriPolynomialInfo> {
|
||||
polynomial_indices
|
||||
.map(|polynomial_index| FriPolynomialInfo {
|
||||
oracle_index,
|
||||
polynomial_index,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Opened values of each polynomial.
|
||||
pub struct FriOpenings<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub batches: Vec<FriOpeningBatch<F, D>>,
|
||||
}
|
||||
|
||||
/// Opened values of each polynomial that's opened at a particular point.
|
||||
pub struct FriOpeningBatch<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub values: Vec<F::Extension>,
|
||||
}
|
||||
|
||||
/// Opened values of each polynomial.
|
||||
pub struct FriOpeningsTarget<const D: usize> {
|
||||
pub batches: Vec<FriOpeningBatchTarget<D>>,
|
||||
}
|
||||
|
||||
/// Opened values of each polynomial that's opened at a particular point.
|
||||
pub struct FriOpeningBatchTarget<const D: usize> {
|
||||
pub values: Vec<ExtensionTarget<D>>,
|
||||
}
|
||||
@ -4,15 +4,13 @@ use plonky2_field::field_types::Field;
|
||||
use plonky2_field::interpolation::{barycentric_weights, interpolate};
|
||||
use plonky2_util::{log2_strict, reverse_index_bits_in_place};
|
||||
|
||||
use crate::fri::proof::{FriInitialTreeProof, FriProof, FriQueryRound};
|
||||
use crate::fri::FriConfig;
|
||||
use crate::fri::proof::{FriChallenges, FriInitialTreeProof, FriProof, FriQueryRound};
|
||||
use crate::fri::structure::{FriBatchInfo, FriInstanceInfo, FriOpenings};
|
||||
use crate::fri::{FriConfig, FriParams};
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::hash::merkle_proofs::verify_merkle_proof;
|
||||
use crate::hash::merkle_tree::MerkleCap;
|
||||
use crate::plonk::circuit_data::CommonCircuitData;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::plonk_common::PlonkPolynomials;
|
||||
use crate::plonk::proof::{OpeningSet, ProofChallenges};
|
||||
use crate::util::reducing::ReducingFactor;
|
||||
use crate::util::reverse_bits;
|
||||
|
||||
@ -58,52 +56,51 @@ pub(crate) fn fri_verify_proof_of_work<F: RichField + Extendable<D>, const D: us
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn verify_fri_proof<
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
>(
|
||||
// Openings of the PLONK polynomials.
|
||||
os: &OpeningSet<F, D>,
|
||||
challenges: &ProofChallenges<F, D>,
|
||||
pub fn verify_fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
|
||||
instance: &FriInstanceInfo<F, D>,
|
||||
openings: &FriOpenings<F, D>,
|
||||
challenges: &FriChallenges<F, D>,
|
||||
initial_merkle_caps: &[MerkleCap<F, C::Hasher>],
|
||||
proof: &FriProof<F, C::Hasher, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> Result<()> {
|
||||
let config = &common_data.config;
|
||||
params: &FriParams,
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
ensure!(
|
||||
common_data.fri_params.final_poly_len() == proof.final_poly.len(),
|
||||
params.final_poly_len() == proof.final_poly.len(),
|
||||
"Final polynomial has wrong degree."
|
||||
);
|
||||
|
||||
// Size of the LDE domain.
|
||||
let n = common_data.lde_size();
|
||||
let n = params.lde_size();
|
||||
|
||||
// Check PoW.
|
||||
fri_verify_proof_of_work(challenges.fri_pow_response, &config.fri_config)?;
|
||||
fri_verify_proof_of_work(challenges.fri_pow_response, ¶ms.config)?;
|
||||
|
||||
// Check that parameters are coherent.
|
||||
ensure!(
|
||||
config.fri_config.num_query_rounds == proof.query_round_proofs.len(),
|
||||
params.config.num_query_rounds == proof.query_round_proofs.len(),
|
||||
"Number of query rounds does not match config."
|
||||
);
|
||||
|
||||
let precomputed_reduced_evals =
|
||||
PrecomputedReducedEvals::from_os_and_alpha(os, challenges.fri_alpha);
|
||||
PrecomputedReducedOpenings::from_os_and_alpha(openings, challenges.fri_alpha);
|
||||
for (&x_index, round_proof) in challenges
|
||||
.fri_query_indices
|
||||
.iter()
|
||||
.zip(&proof.query_round_proofs)
|
||||
{
|
||||
fri_verifier_query_round::<F, C, D>(
|
||||
instance,
|
||||
challenges,
|
||||
precomputed_reduced_evals,
|
||||
&precomputed_reduced_evals,
|
||||
initial_merkle_caps,
|
||||
proof,
|
||||
x_index,
|
||||
n,
|
||||
round_proof,
|
||||
common_data,
|
||||
params,
|
||||
)?;
|
||||
}
|
||||
|
||||
@ -114,7 +111,10 @@ fn fri_verify_initial_proof<F: RichField, H: Hasher<F>>(
|
||||
x_index: usize,
|
||||
proof: &FriInitialTreeProof<F, H>,
|
||||
initial_merkle_caps: &[MerkleCap<F, H>],
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
for ((evals, merkle_proof), cap) in proof.evals_proofs.iter().zip(initial_merkle_caps) {
|
||||
verify_merkle_proof::<F, H>(evals.clone(), x_index, cap, merkle_proof)?;
|
||||
}
|
||||
@ -127,51 +127,42 @@ pub(crate) fn fri_combine_initial<
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
>(
|
||||
instance: &FriInstanceInfo<F, D>,
|
||||
proof: &FriInitialTreeProof<F, C::Hasher>,
|
||||
alpha: F::Extension,
|
||||
zeta: F::Extension,
|
||||
subgroup_x: F,
|
||||
precomputed_reduced_evals: PrecomputedReducedEvals<F, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
precomputed_reduced_evals: &PrecomputedReducedOpenings<F, D>,
|
||||
params: &FriParams,
|
||||
) -> F::Extension {
|
||||
let config = &common_data.config;
|
||||
assert!(D > 1, "Not implemented for D=1.");
|
||||
let degree_log = common_data.degree_bits;
|
||||
let subgroup_x = F::Extension::from_basefield(subgroup_x);
|
||||
let mut alpha = ReducingFactor::new(alpha);
|
||||
let mut sum = F::Extension::ZERO;
|
||||
|
||||
// We will add two terms to `sum`: one for openings at `x`, and one for openings at `g x`.
|
||||
// All polynomials are opened at `x`.
|
||||
let single_evals = [
|
||||
PlonkPolynomials::CONSTANTS_SIGMAS,
|
||||
PlonkPolynomials::WIRES,
|
||||
PlonkPolynomials::ZS_PARTIAL_PRODUCTS,
|
||||
PlonkPolynomials::QUOTIENT,
|
||||
]
|
||||
.iter()
|
||||
.flat_map(|&p| proof.unsalted_evals(p, config.zero_knowledge))
|
||||
.map(|&e| F::Extension::from_basefield(e));
|
||||
let single_composition_eval = alpha.reduce(single_evals);
|
||||
let single_numerator = single_composition_eval - precomputed_reduced_evals.single;
|
||||
let single_denominator = subgroup_x - zeta;
|
||||
sum += single_numerator / single_denominator;
|
||||
alpha.reset();
|
||||
|
||||
// Z polynomials have an additional opening at `g x`.
|
||||
let zs_evals = proof
|
||||
.unsalted_evals(PlonkPolynomials::ZS_PARTIAL_PRODUCTS, config.zero_knowledge)
|
||||
for (batch, reduced_openings) in instance
|
||||
.batches
|
||||
.iter()
|
||||
.map(|&e| F::Extension::from_basefield(e))
|
||||
.take(common_data.zs_range().end);
|
||||
let zs_composition_eval = alpha.reduce(zs_evals);
|
||||
let zeta_right = F::Extension::primitive_root_of_unity(degree_log) * zeta;
|
||||
let zs_numerator = zs_composition_eval - precomputed_reduced_evals.zs_right;
|
||||
let zs_denominator = subgroup_x - zeta_right;
|
||||
sum = alpha.shift(sum);
|
||||
sum += zs_numerator / zs_denominator;
|
||||
.zip(&precomputed_reduced_evals.reduced_openings_at_point)
|
||||
{
|
||||
let FriBatchInfo { point, polynomials } = batch;
|
||||
let evals = polynomials
|
||||
.iter()
|
||||
.map(|p| {
|
||||
let poly_blinding = instance.oracles[p.oracle_index].blinding;
|
||||
let salted = params.hiding && poly_blinding;
|
||||
proof.unsalted_eval(p.oracle_index, p.polynomial_index, salted)
|
||||
})
|
||||
.map(F::Extension::from_basefield);
|
||||
let reduced_evals = alpha.reduce(evals);
|
||||
let numerator = reduced_evals - *reduced_openings;
|
||||
let denominator = subgroup_x - *point;
|
||||
sum = alpha.shift(sum);
|
||||
sum += numerator / denominator;
|
||||
}
|
||||
|
||||
sum
|
||||
// Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for
|
||||
// which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details.
|
||||
sum * subgroup_x
|
||||
}
|
||||
|
||||
fn fri_verifier_query_round<
|
||||
@ -179,15 +170,19 @@ fn fri_verifier_query_round<
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
>(
|
||||
challenges: &ProofChallenges<F, D>,
|
||||
precomputed_reduced_evals: PrecomputedReducedEvals<F, D>,
|
||||
instance: &FriInstanceInfo<F, D>,
|
||||
challenges: &FriChallenges<F, D>,
|
||||
precomputed_reduced_evals: &PrecomputedReducedOpenings<F, D>,
|
||||
initial_merkle_caps: &[MerkleCap<F, C::Hasher>],
|
||||
proof: &FriProof<F, C::Hasher, D>,
|
||||
mut x_index: usize,
|
||||
n: usize,
|
||||
round_proof: &FriQueryRound<F, C::Hasher, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> Result<()> {
|
||||
params: &FriParams,
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
fri_verify_initial_proof::<F, C::Hasher>(
|
||||
x_index,
|
||||
&round_proof.initial_trees_proof,
|
||||
@ -200,21 +195,16 @@ fn fri_verifier_query_round<
|
||||
|
||||
// old_eval is the last derived evaluation; it will be checked for consistency with its
|
||||
// committed "parent" value in the next iteration.
|
||||
let mut old_eval = fri_combine_initial(
|
||||
let mut old_eval = fri_combine_initial::<F, C, D>(
|
||||
instance,
|
||||
&round_proof.initial_trees_proof,
|
||||
challenges.fri_alpha,
|
||||
challenges.plonk_zeta,
|
||||
subgroup_x,
|
||||
precomputed_reduced_evals,
|
||||
common_data,
|
||||
params,
|
||||
);
|
||||
|
||||
for (i, &arity_bits) in common_data
|
||||
.fri_params
|
||||
.reduction_arity_bits
|
||||
.iter()
|
||||
.enumerate()
|
||||
{
|
||||
for (i, &arity_bits) in params.reduction_arity_bits.iter().enumerate() {
|
||||
let arity = 1 << arity_bits;
|
||||
let evals = &round_proof.steps[i].evals;
|
||||
|
||||
@ -257,28 +247,22 @@ fn fri_verifier_query_round<
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Holds the reduced (by `alpha`) evaluations at `zeta` for the polynomial opened just at
|
||||
/// zeta, for `Z` at zeta and for `Z` at `g*zeta`.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct PrecomputedReducedEvals<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub single: F::Extension,
|
||||
pub zs_right: F::Extension,
|
||||
/// For each opening point, holds the reduced (by `alpha`) evaluations of each polynomial that's
|
||||
/// opened at that point.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct PrecomputedReducedOpenings<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub reduced_openings_at_point: Vec<F::Extension>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> PrecomputedReducedEvals<F, D> {
|
||||
pub(crate) fn from_os_and_alpha(os: &OpeningSet<F, D>, alpha: F::Extension) -> Self {
|
||||
let mut alpha = ReducingFactor::new(alpha);
|
||||
let single = alpha.reduce(
|
||||
os.constants
|
||||
.iter()
|
||||
.chain(&os.plonk_sigmas)
|
||||
.chain(&os.wires)
|
||||
.chain(&os.plonk_zs)
|
||||
.chain(&os.partial_products)
|
||||
.chain(&os.quotient_polys),
|
||||
);
|
||||
let zs_right = alpha.reduce(os.plonk_zs_right.iter());
|
||||
|
||||
Self { single, zs_right }
|
||||
impl<F: RichField + Extendable<D>, const D: usize> PrecomputedReducedOpenings<F, D> {
|
||||
pub(crate) fn from_os_and_alpha(openings: &FriOpenings<F, D>, alpha: F::Extension) -> Self {
|
||||
let reduced_openings_at_point = openings
|
||||
.batches
|
||||
.iter()
|
||||
.map(|batch| ReducingFactor::new(alpha).reduce(batch.values.iter()))
|
||||
.collect();
|
||||
Self {
|
||||
reduced_openings_at_point,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
71
plonky2/src/fri/witness_util.rs
Normal file
71
plonky2/src/fri/witness_util.rs
Normal file
@ -0,0 +1,71 @@
|
||||
use itertools::Itertools;
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
|
||||
use crate::fri::proof::{FriProof, FriProofTarget};
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::witness::Witness;
|
||||
use crate::plonk::config::AlgebraicHasher;
|
||||
|
||||
/// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`.
|
||||
pub fn set_fri_proof_target<F, W, H, const D: usize>(
|
||||
witness: &mut W,
|
||||
fri_proof_target: &FriProofTarget<D>,
|
||||
fri_proof: &FriProof<F, H, D>,
|
||||
) where
|
||||
F: RichField + Extendable<D>,
|
||||
W: Witness<F> + ?Sized,
|
||||
H: AlgebraicHasher<F>,
|
||||
{
|
||||
witness.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness);
|
||||
|
||||
for (&t, &x) in fri_proof_target
|
||||
.final_poly
|
||||
.0
|
||||
.iter()
|
||||
.zip_eq(&fri_proof.final_poly.coeffs)
|
||||
{
|
||||
witness.set_extension_target(t, x);
|
||||
}
|
||||
|
||||
for (t, x) in fri_proof_target
|
||||
.commit_phase_merkle_caps
|
||||
.iter()
|
||||
.zip_eq(&fri_proof.commit_phase_merkle_caps)
|
||||
{
|
||||
witness.set_cap_target(t, x);
|
||||
}
|
||||
|
||||
for (qt, q) in fri_proof_target
|
||||
.query_round_proofs
|
||||
.iter()
|
||||
.zip_eq(&fri_proof.query_round_proofs)
|
||||
{
|
||||
for (at, a) in qt
|
||||
.initial_trees_proof
|
||||
.evals_proofs
|
||||
.iter()
|
||||
.zip_eq(&q.initial_trees_proof.evals_proofs)
|
||||
{
|
||||
for (&t, &x) in at.0.iter().zip_eq(&a.0) {
|
||||
witness.set_target(t, x);
|
||||
}
|
||||
for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) {
|
||||
witness.set_hash_target(t, x);
|
||||
}
|
||||
}
|
||||
|
||||
for (st, s) in qt.steps.iter().zip_eq(&q.steps) {
|
||||
for (&t, &x) in st.evals.iter().zip_eq(&s.evals) {
|
||||
witness.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in st
|
||||
.merkle_proof
|
||||
.siblings
|
||||
.iter()
|
||||
.zip_eq(&s.merkle_proof.siblings)
|
||||
{
|
||||
witness.set_hash_target(t, x);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,7 +1,7 @@
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::field_types::Field64;
|
||||
|
||||
use crate::gates::arithmetic_base::ArithmeticGate;
|
||||
use crate::gates::exponentiation::ExponentiationGate;
|
||||
@ -317,11 +317,17 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let x_ext = self.convert_to_ext(x);
|
||||
self.inverse_extension(x_ext).0[0]
|
||||
}
|
||||
|
||||
pub fn not(&mut self, b: BoolTarget) -> BoolTarget {
|
||||
let one = self.one();
|
||||
let res = self.sub(one, b.target);
|
||||
BoolTarget::new_unsafe(res)
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a base arithmetic operation in the circuit. Used to memoize results.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
||||
pub(crate) struct BaseArithmeticOperation<F: PrimeField> {
|
||||
pub(crate) struct BaseArithmeticOperation<F: Field64> {
|
||||
const_0: F,
|
||||
const_1: F,
|
||||
multiplicand_0: Target,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use plonky2_field::extension_field::FieldExtension;
|
||||
use plonky2_field::extension_field::{Extendable, OEF};
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_util::bits_u64;
|
||||
|
||||
use crate::gates::arithmetic_extension::ArithmeticExtensionGate;
|
||||
@ -548,7 +548,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
/// Represents an extension arithmetic operation in the circuit. Used to memoize results.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
||||
pub(crate) struct ExtensionArithmeticOperation<F: PrimeField + Extendable<D>, const D: usize> {
|
||||
pub(crate) struct ExtensionArithmeticOperation<F: Field64 + Extendable<D>, const D: usize> {
|
||||
const_0: F,
|
||||
const_1: F,
|
||||
multiplicand_0: ExtensionTarget<D>,
|
||||
|
||||
@ -1,9 +1,14 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
|
||||
use crate::gates::add_many_u32::U32AddManyGate;
|
||||
use crate::gates::arithmetic_u32::U32ArithmeticGate;
|
||||
use crate::gates::subtraction_u32::U32SubtractionGate;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
@ -113,18 +118,57 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
1 => (to_add[0], self.zero_u32()),
|
||||
2 => self.add_u32(to_add[0], to_add[1]),
|
||||
_ => {
|
||||
let (mut low, mut carry) = self.add_u32(to_add[0], to_add[1]);
|
||||
for i in 2..to_add.len() {
|
||||
let (new_low, new_carry) = self.add_u32(to_add[i], low);
|
||||
let (combined_carry, _zero) = self.add_u32(carry, new_carry);
|
||||
low = new_low;
|
||||
carry = combined_carry;
|
||||
let num_addends = to_add.len();
|
||||
let gate = U32AddManyGate::<F, D>::new_from_config(&self.config, num_addends);
|
||||
let (gate_index, copy) = self.find_u32_add_many_gate(num_addends);
|
||||
|
||||
for j in 0..num_addends {
|
||||
self.connect(
|
||||
Target::wire(gate_index, gate.wire_ith_op_jth_addend(copy, j)),
|
||||
to_add[j].0,
|
||||
);
|
||||
}
|
||||
(low, carry)
|
||||
let zero = self.zero();
|
||||
self.connect(Target::wire(gate_index, gate.wire_ith_carry(copy)), zero);
|
||||
|
||||
let output_low =
|
||||
U32Target(Target::wire(gate_index, gate.wire_ith_output_result(copy)));
|
||||
let output_high =
|
||||
U32Target(Target::wire(gate_index, gate.wire_ith_output_carry(copy)));
|
||||
|
||||
(output_low, output_high)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_u32s_with_carry(
|
||||
&mut self,
|
||||
to_add: &[U32Target],
|
||||
carry: U32Target,
|
||||
) -> (U32Target, U32Target) {
|
||||
if to_add.len() == 1 {
|
||||
return self.add_u32(to_add[0], carry);
|
||||
}
|
||||
|
||||
let num_addends = to_add.len();
|
||||
|
||||
let gate = U32AddManyGate::<F, D>::new_from_config(&self.config, num_addends);
|
||||
let (gate_index, copy) = self.find_u32_add_many_gate(num_addends);
|
||||
|
||||
for j in 0..num_addends {
|
||||
self.connect(
|
||||
Target::wire(gate_index, gate.wire_ith_op_jth_addend(copy, j)),
|
||||
to_add[j].0,
|
||||
);
|
||||
}
|
||||
self.connect(Target::wire(gate_index, gate.wire_ith_carry(copy)), carry.0);
|
||||
|
||||
let output = U32Target(Target::wire(gate_index, gate.wire_ith_output_result(copy)));
|
||||
let output_carry = U32Target(Target::wire(gate_index, gate.wire_ith_output_carry(copy)));
|
||||
|
||||
(output, output_carry)
|
||||
}
|
||||
|
||||
pub fn mul_u32(&mut self, a: U32Target, b: U32Target) -> (U32Target, U32Target) {
|
||||
let zero = self.zero_u32();
|
||||
self.mul_add_u32(a, b, zero)
|
||||
@ -153,3 +197,75 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
(output_result, output_borrow)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SplitToU32Generator<F: RichField + Extendable<D>, const D: usize> {
|
||||
x: Target,
|
||||
low: U32Target,
|
||||
high: U32Target,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for SplitToU32Generator<F, D>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
vec![self.x]
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let x = witness.get_target(self.x);
|
||||
let x_u64 = x.to_canonical_u64();
|
||||
let low = x_u64 as u32;
|
||||
let high = (x_u64 >> 32) as u32;
|
||||
|
||||
out_buffer.set_u32_target(self.low, low);
|
||||
out_buffer.set_u32_target(self.high, high);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
use crate::iop::witness::PartialWitness;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
use crate::plonk::verifier::verify;
|
||||
|
||||
#[test]
|
||||
pub fn test_add_many_u32s() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
const NUM_ADDENDS: usize = 15;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let mut to_add = Vec::new();
|
||||
let mut sum = 0u64;
|
||||
for _ in 0..NUM_ADDENDS {
|
||||
let x: u32 = rng.gen();
|
||||
sum += x as u64;
|
||||
to_add.push(builder.constant_u32(x));
|
||||
}
|
||||
let carry = builder.zero_u32();
|
||||
let (result_low, result_high) = builder.add_u32s_with_carry(&to_add, carry);
|
||||
let expected_low = builder.constant_u32((sum % (1 << 32)) as u32);
|
||||
let expected_high = builder.constant_u32((sum >> 32) as u32);
|
||||
|
||||
builder.connect_u32(result_low, expected_low);
|
||||
builder.connect_u32(result_high, expected_high);
|
||||
|
||||
let data = builder.build::<C>();
|
||||
let proof = data.prove(pw).unwrap();
|
||||
verify(proof, &data.verifier_only, &data.common)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use num::{BigUint, Integer};
|
||||
use num::{BigUint, Integer, Zero};
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
|
||||
use crate::gadgets::arithmetic_u32::U32Target;
|
||||
@ -33,6 +33,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
BigUintTarget { limbs }
|
||||
}
|
||||
|
||||
pub fn zero_biguint(&mut self) -> BigUintTarget {
|
||||
self.constant_biguint(&BigUint::zero())
|
||||
}
|
||||
|
||||
pub fn connect_biguint(&mut self, lhs: &BigUintTarget, rhs: &BigUintTarget) {
|
||||
let min_limbs = lhs.num_limbs().min(rhs.num_limbs());
|
||||
for i in 0..min_limbs {
|
||||
@ -76,9 +80,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
pub fn add_virtual_biguint_target(&mut self, num_limbs: usize) -> BigUintTarget {
|
||||
let limbs = (0..num_limbs)
|
||||
.map(|_| self.add_virtual_u32_target())
|
||||
.collect();
|
||||
let limbs = self.add_virtual_u32_targets(num_limbs);
|
||||
|
||||
BigUintTarget { limbs }
|
||||
}
|
||||
@ -143,8 +145,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let mut combined_limbs = vec![];
|
||||
let mut carry = self.zero_u32();
|
||||
for summands in &mut to_add {
|
||||
summands.push(carry);
|
||||
let (new_result, new_carry) = self.add_many_u32(summands);
|
||||
let (new_result, new_carry) = self.add_u32s_with_carry(summands, carry);
|
||||
combined_limbs.push(new_result);
|
||||
carry = new_carry;
|
||||
}
|
||||
@ -155,6 +156,18 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mul_biguint_by_bool(&mut self, a: &BigUintTarget, b: BoolTarget) -> BigUintTarget {
|
||||
let t = b.target;
|
||||
|
||||
BigUintTarget {
|
||||
limbs: a
|
||||
.limbs
|
||||
.iter()
|
||||
.map(|&l| U32Target(self.mul(l.0, t)))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns x * y + z. This is no more efficient than mul-then-add; it's purely for convenience (only need to call one CircuitBuilder function).
|
||||
pub fn mul_add_biguint(
|
||||
&mut self,
|
||||
|
||||
@ -104,29 +104,17 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let AffinePointTarget { x: x2, y: y2 } = p2;
|
||||
|
||||
let u = self.sub_nonnative(y2, y1);
|
||||
let uu = self.mul_nonnative(&u, &u);
|
||||
let v = self.sub_nonnative(x2, x1);
|
||||
let vv = self.mul_nonnative(&v, &v);
|
||||
let vvv = self.mul_nonnative(&v, &vv);
|
||||
let r = self.mul_nonnative(&vv, x1);
|
||||
let diff = self.sub_nonnative(&uu, &vvv);
|
||||
let r2 = self.add_nonnative(&r, &r);
|
||||
let a = self.sub_nonnative(&diff, &r2);
|
||||
let x3 = self.mul_nonnative(&v, &a);
|
||||
let v_inv = self.inv_nonnative(&v);
|
||||
let s = self.mul_nonnative(&u, &v_inv);
|
||||
let s_squared = self.mul_nonnative(&s, &s);
|
||||
let x_sum = self.add_nonnative(x2, x1);
|
||||
let x3 = self.sub_nonnative(&s_squared, &x_sum);
|
||||
let x_diff = self.sub_nonnative(x1, &x3);
|
||||
let prod = self.mul_nonnative(&s, &x_diff);
|
||||
let y3 = self.sub_nonnative(&prod, y1);
|
||||
|
||||
let r_a = self.sub_nonnative(&r, &a);
|
||||
let y3_first = self.mul_nonnative(&u, &r_a);
|
||||
let y3_second = self.mul_nonnative(&vvv, y1);
|
||||
let y3 = self.sub_nonnative(&y3_first, &y3_second);
|
||||
|
||||
let z3_inv = self.inv_nonnative(&vvv);
|
||||
let x3_norm = self.mul_nonnative(&x3, &z3_inv);
|
||||
let y3_norm = self.mul_nonnative(&y3, &z3_inv);
|
||||
|
||||
AffinePointTarget {
|
||||
x: x3_norm,
|
||||
y: y3_norm,
|
||||
}
|
||||
AffinePointTarget { x: x3, y: y3 }
|
||||
}
|
||||
|
||||
pub fn curve_scalar_mul<C: Curve>(
|
||||
@ -134,11 +122,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
p: &AffinePointTarget<C>,
|
||||
n: &NonNativeTarget<C::ScalarField>,
|
||||
) -> AffinePointTarget<C> {
|
||||
let one = self.constant_nonnative(C::BaseField::ONE);
|
||||
|
||||
let bits = self.split_nonnative_to_bits(n);
|
||||
let bits_as_base: Vec<NonNativeTarget<C::BaseField>> =
|
||||
bits.iter().map(|b| self.bool_to_nonnative(b)).collect();
|
||||
|
||||
let rando = (CurveScalar(C::ScalarField::rand()) * C::GENERATOR_PROJECTIVE).to_affine();
|
||||
let randot = self.constant_affine_point(rando);
|
||||
@ -149,15 +133,15 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let mut two_i_times_p = self.add_virtual_affine_point_target();
|
||||
self.connect_affine_point(p, &two_i_times_p);
|
||||
|
||||
for bit in bits_as_base.iter() {
|
||||
let not_bit = self.sub_nonnative(&one, bit);
|
||||
for &bit in bits.iter() {
|
||||
let not_bit = self.not(bit);
|
||||
|
||||
let result_plus_2_i_p = self.curve_add(&result, &two_i_times_p);
|
||||
|
||||
let new_x_if_bit = self.mul_nonnative(bit, &result_plus_2_i_p.x);
|
||||
let new_x_if_not_bit = self.mul_nonnative(¬_bit, &result.x);
|
||||
let new_y_if_bit = self.mul_nonnative(bit, &result_plus_2_i_p.y);
|
||||
let new_y_if_not_bit = self.mul_nonnative(¬_bit, &result.y);
|
||||
let new_x_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.x, bit);
|
||||
let new_x_if_not_bit = self.mul_nonnative_by_bool(&result.x, not_bit);
|
||||
let new_y_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.y, bit);
|
||||
let new_y_if_not_bit = self.mul_nonnative_by_bool(&result.y, not_bit);
|
||||
|
||||
let new_x = self.add_nonnative(&new_x_if_bit, &new_x_if_not_bit);
|
||||
let new_y = self.add_nonnative(&new_y_if_bit, &new_y_if_not_bit);
|
||||
@ -177,6 +161,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Neg;
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::secp256k1_base::Secp256K1Base;
|
||||
@ -196,7 +182,7 @@ mod tests {
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
@ -221,7 +207,7 @@ mod tests {
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
@ -248,7 +234,7 @@ mod tests {
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
@ -285,7 +271,7 @@ mod tests {
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
@ -316,27 +302,25 @@ mod tests {
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig {
|
||||
num_routed_wires: 33,
|
||||
..CircuitConfig::standard_recursion_config()
|
||||
};
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
let g = Secp256K1::GENERATOR_AFFINE;
|
||||
let five = Secp256K1Scalar::from_canonical_usize(5);
|
||||
let five_scalar = CurveScalar::<Secp256K1>(five);
|
||||
let five_g = (five_scalar * g.to_projective()).to_affine();
|
||||
let five_g_expected = builder.constant_affine_point(five_g);
|
||||
builder.curve_assert_valid(&five_g_expected);
|
||||
let neg_five = five.neg();
|
||||
let neg_five_scalar = CurveScalar::<Secp256K1>(neg_five);
|
||||
let neg_five_g = (neg_five_scalar * g.to_projective()).to_affine();
|
||||
let neg_five_g_expected = builder.constant_affine_point(neg_five_g);
|
||||
builder.curve_assert_valid(&neg_five_g_expected);
|
||||
|
||||
let g_target = builder.constant_affine_point(g);
|
||||
let five_target = builder.constant_nonnative(five);
|
||||
let five_g_actual = builder.curve_scalar_mul(&g_target, &five_target);
|
||||
builder.curve_assert_valid(&five_g_actual);
|
||||
let neg_five_target = builder.constant_nonnative(neg_five);
|
||||
let neg_five_g_actual = builder.curve_scalar_mul(&g_target, &neg_five_target);
|
||||
builder.curve_assert_valid(&neg_five_g_actual);
|
||||
|
||||
builder.connect_affine_point(&five_g_expected, &five_g_actual);
|
||||
builder.connect_affine_point(&neg_five_g_expected, &neg_five_g_actual);
|
||||
|
||||
let data = builder.build::<C>();
|
||||
let proof = data.prove(pw).unwrap();
|
||||
@ -345,16 +329,12 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_curve_random() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let config = CircuitConfig {
|
||||
num_routed_wires: 33,
|
||||
..CircuitConfig::standard_recursion_config()
|
||||
};
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
104
plonky2/src/gadgets/ecdsa.rs
Normal file
104
plonky2/src/gadgets/ecdsa.rs
Normal file
@ -0,0 +1,104 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::curve::curve_types::Curve;
|
||||
use crate::field::extension_field::Extendable;
|
||||
use crate::gadgets::curve::AffinePointTarget;
|
||||
use crate::gadgets::nonnative::NonNativeTarget;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ECDSASecretKeyTarget<C: Curve>(NonNativeTarget<C::ScalarField>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ECDSAPublicKeyTarget<C: Curve>(AffinePointTarget<C>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ECDSASignatureTarget<C: Curve> {
|
||||
pub r: NonNativeTarget<C::ScalarField>,
|
||||
pub s: NonNativeTarget<C::ScalarField>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
pub fn verify_message<C: Curve>(
|
||||
&mut self,
|
||||
msg: NonNativeTarget<C::ScalarField>,
|
||||
sig: ECDSASignatureTarget<C>,
|
||||
pk: ECDSAPublicKeyTarget<C>,
|
||||
) {
|
||||
let ECDSASignatureTarget { r, s } = sig;
|
||||
|
||||
self.curve_assert_valid(&pk.0);
|
||||
|
||||
let c = self.inv_nonnative(&s);
|
||||
let u1 = self.mul_nonnative(&msg, &c);
|
||||
let u2 = self.mul_nonnative(&r, &c);
|
||||
|
||||
let g = self.constant_affine_point(C::GENERATOR_AFFINE);
|
||||
let point1 = self.curve_scalar_mul(&g, &u1);
|
||||
let point2 = self.curve_scalar_mul(&pk.0, &u2);
|
||||
let point = self.curve_add(&point1, &point2);
|
||||
|
||||
let x = NonNativeTarget::<C::ScalarField> {
|
||||
value: point.x.value,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
self.connect_nonnative(&r, &x);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::curve::curve_types::{Curve, CurveScalar};
|
||||
use crate::curve::ecdsa::{sign_message, ECDSAPublicKey, ECDSASecretKey, ECDSASignature};
|
||||
use crate::curve::secp256k1::Secp256K1;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::field::secp256k1_scalar::Secp256K1Scalar;
|
||||
use crate::gadgets::ecdsa::{ECDSAPublicKeyTarget, ECDSASignatureTarget};
|
||||
use crate::iop::witness::PartialWitness;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
use crate::plonk::verifier::verify;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ecdsa_circuit() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
type Curve = Secp256K1;
|
||||
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
let msg = Secp256K1Scalar::rand();
|
||||
let msg_target = builder.constant_nonnative(msg);
|
||||
|
||||
let sk = ECDSASecretKey::<Curve>(Secp256K1Scalar::rand());
|
||||
let pk = ECDSAPublicKey((CurveScalar(sk.0) * Curve::GENERATOR_PROJECTIVE).to_affine());
|
||||
|
||||
let pk_target = ECDSAPublicKeyTarget(builder.constant_affine_point(pk.0));
|
||||
|
||||
let sig = sign_message(msg, sk);
|
||||
|
||||
let ECDSASignature { r, s } = sig;
|
||||
let r_target = builder.constant_nonnative(r);
|
||||
let s_target = builder.constant_nonnative(s);
|
||||
let sig_target = ECDSASignatureTarget {
|
||||
r: r_target,
|
||||
s: s_target,
|
||||
};
|
||||
|
||||
builder.verify_message(msg_target, sig_target, pk_target);
|
||||
|
||||
let data = builder.build::<C>();
|
||||
let proof = data.prove(pw).unwrap();
|
||||
verify(proof, &data.verifier_only, &data.common)
|
||||
}
|
||||
}
|
||||
@ -3,6 +3,7 @@ pub mod arithmetic_extension;
|
||||
pub mod arithmetic_u32;
|
||||
pub mod biguint;
|
||||
pub mod curve;
|
||||
pub mod ecdsa;
|
||||
pub mod hash;
|
||||
pub mod interpolation;
|
||||
pub mod multiple_comparison;
|
||||
|
||||
@ -60,8 +60,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
/// Helper function for comparing, specifically, lists of `U32Target`s.
|
||||
pub fn list_le_u32(&mut self, a: Vec<U32Target>, b: Vec<U32Target>) -> BoolTarget {
|
||||
let a_targets = a.iter().map(|&t| t.0).collect();
|
||||
let b_targets = b.iter().map(|&t| t.0).collect();
|
||||
let a_targets: Vec<Target> = a.iter().map(|&t| t.0).collect();
|
||||
let b_targets: Vec<Target> = b.iter().map(|&t| t.0).collect();
|
||||
|
||||
self.list_le(a_targets, b_targets, 32)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use num::{BigUint, Zero};
|
||||
use num::{BigUint, Integer, One, Zero};
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::{extension_field::Extendable, field_types::Field};
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
@ -15,7 +16,7 @@ use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NonNativeTarget<FF: Field> {
|
||||
pub(crate) value: BigUintTarget,
|
||||
_phantom: PhantomData<FF>,
|
||||
pub(crate) _phantom: PhantomData<FF>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
@ -34,11 +35,15 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
x.value.clone()
|
||||
}
|
||||
|
||||
pub fn constant_nonnative<FF: Field>(&mut self, x: FF) -> NonNativeTarget<FF> {
|
||||
let x_biguint = self.constant_biguint(&x.to_biguint());
|
||||
pub fn constant_nonnative<FF: PrimeField>(&mut self, x: FF) -> NonNativeTarget<FF> {
|
||||
let x_biguint = self.constant_biguint(&x.to_canonical_biguint());
|
||||
self.biguint_to_nonnative(&x_biguint)
|
||||
}
|
||||
|
||||
pub fn zero_nonnative<FF: PrimeField>(&mut self) -> NonNativeTarget<FF> {
|
||||
self.constant_nonnative(FF::ZERO)
|
||||
}
|
||||
|
||||
// Assert that two NonNativeTarget's, both assumed to be in reduced form, are equal.
|
||||
pub fn connect_nonnative<FF: Field>(
|
||||
&mut self,
|
||||
@ -58,82 +63,204 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
// Add two `NonNativeTarget`s.
|
||||
pub fn add_nonnative<FF: Field>(
|
||||
pub fn add_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let result = self.add_biguint(&a.value, &b.value);
|
||||
let sum = self.add_virtual_nonnative_target::<FF>();
|
||||
let overflow = self.add_virtual_bool_target();
|
||||
|
||||
// TODO: reduce add result with only one conditional subtraction
|
||||
self.reduce(&result)
|
||||
self.add_simple_generator(NonNativeAdditionGenerator::<F, D, FF> {
|
||||
a: a.clone(),
|
||||
b: b.clone(),
|
||||
sum: sum.clone(),
|
||||
overflow,
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
let sum_expected = self.add_biguint(&a.value, &b.value);
|
||||
|
||||
let modulus = self.constant_biguint(&FF::order());
|
||||
let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow);
|
||||
let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow);
|
||||
self.connect_biguint(&sum_expected, &sum_actual);
|
||||
|
||||
// Range-check result.
|
||||
// TODO: can potentially leave unreduced until necessary (e.g. when connecting values).
|
||||
let cmp = self.cmp_biguint(&sum.value, &modulus);
|
||||
let one = self.one();
|
||||
self.connect(cmp.target, one);
|
||||
|
||||
sum
|
||||
}
|
||||
|
||||
pub fn mul_nonnative_by_bool<FF: Field>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: BoolTarget,
|
||||
) -> NonNativeTarget<FF> {
|
||||
NonNativeTarget {
|
||||
value: self.mul_biguint_by_bool(&a.value, b),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_many_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
to_add: &[NonNativeTarget<FF>],
|
||||
) -> NonNativeTarget<FF> {
|
||||
if to_add.len() == 1 {
|
||||
return to_add[0].clone();
|
||||
}
|
||||
|
||||
let sum = self.add_virtual_nonnative_target::<FF>();
|
||||
let overflow = self.add_virtual_u32_target();
|
||||
let summands = to_add.to_vec();
|
||||
|
||||
self.add_simple_generator(NonNativeMultipleAddsGenerator::<F, D, FF> {
|
||||
summands: summands.clone(),
|
||||
sum: sum.clone(),
|
||||
overflow,
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
self.range_check_u32(sum.value.limbs.clone());
|
||||
self.range_check_u32(vec![overflow]);
|
||||
|
||||
let sum_expected = summands
|
||||
.iter()
|
||||
.fold(self.zero_biguint(), |a, b| self.add_biguint(&a, &b.value));
|
||||
|
||||
let modulus = self.constant_biguint(&FF::order());
|
||||
let overflow_biguint = BigUintTarget {
|
||||
limbs: vec![overflow],
|
||||
};
|
||||
let mod_times_overflow = self.mul_biguint(&modulus, &overflow_biguint);
|
||||
let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow);
|
||||
self.connect_biguint(&sum_expected, &sum_actual);
|
||||
|
||||
// Range-check result.
|
||||
// TODO: can potentially leave unreduced until necessary (e.g. when connecting values).
|
||||
let cmp = self.cmp_biguint(&sum.value, &modulus);
|
||||
let one = self.one();
|
||||
self.connect(cmp.target, one);
|
||||
|
||||
sum
|
||||
}
|
||||
|
||||
// Subtract two `NonNativeTarget`s.
|
||||
pub fn sub_nonnative<FF: Field>(
|
||||
pub fn sub_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let order = self.constant_biguint(&FF::order());
|
||||
let a_plus_order = self.add_biguint(&order, &a.value);
|
||||
let result = self.sub_biguint(&a_plus_order, &b.value);
|
||||
let diff = self.add_virtual_nonnative_target::<FF>();
|
||||
let overflow = self.add_virtual_bool_target();
|
||||
|
||||
// TODO: reduce sub result with only one conditional addition?
|
||||
self.reduce(&result)
|
||||
self.add_simple_generator(NonNativeSubtractionGenerator::<F, D, FF> {
|
||||
a: a.clone(),
|
||||
b: b.clone(),
|
||||
diff: diff.clone(),
|
||||
overflow,
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
self.range_check_u32(diff.value.limbs.clone());
|
||||
self.assert_bool(overflow);
|
||||
|
||||
let diff_plus_b = self.add_biguint(&diff.value, &b.value);
|
||||
let modulus = self.constant_biguint(&FF::order());
|
||||
let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow);
|
||||
let diff_plus_b_reduced = self.sub_biguint(&diff_plus_b, &mod_times_overflow);
|
||||
self.connect_biguint(&a.value, &diff_plus_b_reduced);
|
||||
|
||||
diff
|
||||
}
|
||||
|
||||
pub fn mul_nonnative<FF: Field>(
|
||||
pub fn mul_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let result = self.mul_biguint(&a.value, &b.value);
|
||||
let prod = self.add_virtual_nonnative_target::<FF>();
|
||||
let modulus = self.constant_biguint(&FF::order());
|
||||
let overflow = self.add_virtual_biguint_target(
|
||||
a.value.num_limbs() + b.value.num_limbs() - modulus.num_limbs(),
|
||||
);
|
||||
|
||||
self.reduce(&result)
|
||||
self.add_simple_generator(NonNativeMultiplicationGenerator::<F, D, FF> {
|
||||
a: a.clone(),
|
||||
b: b.clone(),
|
||||
prod: prod.clone(),
|
||||
overflow: overflow.clone(),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
self.range_check_u32(prod.value.limbs.clone());
|
||||
self.range_check_u32(overflow.limbs.clone());
|
||||
|
||||
let prod_expected = self.mul_biguint(&a.value, &b.value);
|
||||
|
||||
let mod_times_overflow = self.mul_biguint(&modulus, &overflow);
|
||||
let prod_actual = self.add_biguint(&prod.value, &mod_times_overflow);
|
||||
self.connect_biguint(&prod_expected, &prod_actual);
|
||||
|
||||
prod
|
||||
}
|
||||
|
||||
pub fn neg_nonnative<FF: Field>(&mut self, x: &NonNativeTarget<FF>) -> NonNativeTarget<FF> {
|
||||
pub fn mul_many_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
to_mul: &[NonNativeTarget<FF>],
|
||||
) -> NonNativeTarget<FF> {
|
||||
if to_mul.len() == 1 {
|
||||
return to_mul[0].clone();
|
||||
}
|
||||
|
||||
let mut accumulator = self.mul_nonnative(&to_mul[0], &to_mul[1]);
|
||||
for i in 2..to_mul.len() {
|
||||
accumulator = self.mul_nonnative(&accumulator, &to_mul[i]);
|
||||
}
|
||||
accumulator
|
||||
}
|
||||
|
||||
pub fn neg_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
x: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let zero_target = self.constant_biguint(&BigUint::zero());
|
||||
let zero_ff = self.biguint_to_nonnative(&zero_target);
|
||||
|
||||
self.sub_nonnative(&zero_ff, x)
|
||||
}
|
||||
|
||||
pub fn inv_nonnative<FF: Field>(&mut self, x: &NonNativeTarget<FF>) -> NonNativeTarget<FF> {
|
||||
pub fn inv_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
x: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let num_limbs = x.value.num_limbs();
|
||||
let inv_biguint = self.add_virtual_biguint_target(num_limbs);
|
||||
let inv = NonNativeTarget::<FF> {
|
||||
value: inv_biguint,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
let div = self.add_virtual_biguint_target(num_limbs);
|
||||
|
||||
self.add_simple_generator(NonNativeInverseGenerator::<F, D, FF> {
|
||||
x: x.clone(),
|
||||
inv: inv.clone(),
|
||||
inv: inv_biguint.clone(),
|
||||
div: div.clone(),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
let product = self.mul_nonnative(x, &inv);
|
||||
let one = self.constant_nonnative(FF::ONE);
|
||||
self.connect_nonnative(&product, &one);
|
||||
let product = self.mul_biguint(&x.value, &inv_biguint);
|
||||
|
||||
inv
|
||||
}
|
||||
let modulus = self.constant_biguint(&FF::order());
|
||||
let mod_times_div = self.mul_biguint(&modulus, &div);
|
||||
let one = self.constant_biguint(&BigUint::one());
|
||||
let expected_product = self.add_biguint(&mod_times_div, &one);
|
||||
self.connect_biguint(&product, &expected_product);
|
||||
|
||||
pub fn div_rem_nonnative<FF: Field>(
|
||||
&mut self,
|
||||
x: &NonNativeTarget<FF>,
|
||||
y: &NonNativeTarget<FF>,
|
||||
) -> (NonNativeTarget<FF>, NonNativeTarget<FF>) {
|
||||
let x_biguint = self.nonnative_to_biguint(x);
|
||||
let y_biguint = self.nonnative_to_biguint(y);
|
||||
|
||||
let (div_biguint, rem_biguint) = self.div_rem_biguint(&x_biguint, &y_biguint);
|
||||
let div = self.biguint_to_nonnative(&div_biguint);
|
||||
let rem = self.biguint_to_nonnative(&rem_biguint);
|
||||
(div, rem)
|
||||
NonNativeTarget::<FF> {
|
||||
value: inv_biguint,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `x % |FF|` as a `NonNativeTarget`.
|
||||
@ -148,8 +275,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn reduce_nonnative<FF: Field>(&mut self, x: &NonNativeTarget<FF>) -> NonNativeTarget<FF> {
|
||||
pub fn reduce_nonnative<FF: Field>(&mut self, x: &NonNativeTarget<FF>) -> NonNativeTarget<FF> {
|
||||
let x_biguint = self.nonnative_to_biguint(x);
|
||||
self.reduce(&x_biguint)
|
||||
}
|
||||
@ -188,13 +314,178 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeInverseGenerator<F: RichField + Extendable<D>, const D: usize, FF: Field> {
|
||||
x: NonNativeTarget<FF>,
|
||||
inv: NonNativeTarget<FF>,
|
||||
struct NonNativeAdditionGenerator<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> {
|
||||
a: NonNativeTarget<FF>,
|
||||
b: NonNativeTarget<FF>,
|
||||
sum: NonNativeTarget<FF>,
|
||||
overflow: BoolTarget,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeAdditionGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
self.a
|
||||
.value
|
||||
.limbs
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(self.b.value.limbs.clone())
|
||||
.map(|l| l.0)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let a = witness.get_nonnative_target(self.a.clone());
|
||||
let b = witness.get_nonnative_target(self.b.clone());
|
||||
let a_biguint = a.to_canonical_biguint();
|
||||
let b_biguint = b.to_canonical_biguint();
|
||||
let sum_biguint = a_biguint + b_biguint;
|
||||
let modulus = FF::order();
|
||||
let (overflow, sum_reduced) = if sum_biguint > modulus {
|
||||
(true, sum_biguint - modulus)
|
||||
} else {
|
||||
(false, sum_biguint)
|
||||
};
|
||||
|
||||
out_buffer.set_biguint_target(self.sum.value.clone(), sum_reduced);
|
||||
out_buffer.set_bool_target(self.overflow, overflow);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeMultipleAddsGenerator<F: RichField + Extendable<D>, const D: usize, FF: PrimeField>
|
||||
{
|
||||
summands: Vec<NonNativeTarget<FF>>,
|
||||
sum: NonNativeTarget<FF>,
|
||||
overflow: U32Target,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeMultipleAddsGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
self.summands
|
||||
.iter()
|
||||
.flat_map(|summand| summand.value.limbs.iter().map(|limb| limb.0))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let summands: Vec<_> = self
|
||||
.summands
|
||||
.iter()
|
||||
.map(|summand| witness.get_nonnative_target(summand.clone()))
|
||||
.collect();
|
||||
let summand_biguints: Vec<_> = summands
|
||||
.iter()
|
||||
.map(|summand| summand.to_canonical_biguint())
|
||||
.collect();
|
||||
|
||||
let sum_biguint = summand_biguints
|
||||
.iter()
|
||||
.fold(BigUint::zero(), |a, b| a + b.clone());
|
||||
|
||||
let modulus = FF::order();
|
||||
let (overflow_biguint, sum_reduced) = sum_biguint.div_rem(&modulus);
|
||||
let overflow = overflow_biguint.to_u64_digits()[0] as u32;
|
||||
|
||||
out_buffer.set_biguint_target(self.sum.value.clone(), sum_reduced);
|
||||
out_buffer.set_u32_target(self.overflow, overflow);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeSubtractionGenerator<F: RichField + Extendable<D>, const D: usize, FF: Field> {
|
||||
a: NonNativeTarget<FF>,
|
||||
b: NonNativeTarget<FF>,
|
||||
diff: NonNativeTarget<FF>,
|
||||
overflow: BoolTarget,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeSubtractionGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
self.a
|
||||
.value
|
||||
.limbs
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(self.b.value.limbs.clone())
|
||||
.map(|l| l.0)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let a = witness.get_nonnative_target(self.a.clone());
|
||||
let b = witness.get_nonnative_target(self.b.clone());
|
||||
let a_biguint = a.to_canonical_biguint();
|
||||
let b_biguint = b.to_canonical_biguint();
|
||||
|
||||
let modulus = FF::order();
|
||||
let (diff_biguint, overflow) = if a_biguint > b_biguint {
|
||||
(a_biguint - b_biguint, false)
|
||||
} else {
|
||||
(modulus + a_biguint - b_biguint, true)
|
||||
};
|
||||
|
||||
out_buffer.set_biguint_target(self.diff.value.clone(), diff_biguint);
|
||||
out_buffer.set_bool_target(self.overflow, overflow);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeMultiplicationGenerator<F: RichField + Extendable<D>, const D: usize, FF: Field> {
|
||||
a: NonNativeTarget<FF>,
|
||||
b: NonNativeTarget<FF>,
|
||||
prod: NonNativeTarget<FF>,
|
||||
overflow: BigUintTarget,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeMultiplicationGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
self.a
|
||||
.value
|
||||
.limbs
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(self.b.value.limbs.clone())
|
||||
.map(|l| l.0)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let a = witness.get_nonnative_target(self.a.clone());
|
||||
let b = witness.get_nonnative_target(self.b.clone());
|
||||
let a_biguint = a.to_canonical_biguint();
|
||||
let b_biguint = b.to_canonical_biguint();
|
||||
|
||||
let prod_biguint = a_biguint * b_biguint;
|
||||
|
||||
let modulus = FF::order();
|
||||
let (overflow_biguint, prod_reduced) = prod_biguint.div_rem(&modulus);
|
||||
|
||||
out_buffer.set_biguint_target(self.prod.value.clone(), prod_reduced);
|
||||
out_buffer.set_biguint_target(self.overflow.clone(), overflow_biguint);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeInverseGenerator<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> {
|
||||
x: NonNativeTarget<FF>,
|
||||
inv: BigUintTarget,
|
||||
div: BigUintTarget,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeInverseGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
@ -205,14 +496,21 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
let x = witness.get_nonnative_target(self.x.clone());
|
||||
let inv = x.inverse();
|
||||
|
||||
out_buffer.set_nonnative_target(self.inv.clone(), inv);
|
||||
let x_biguint = x.to_canonical_biguint();
|
||||
let inv_biguint = inv.to_canonical_biguint();
|
||||
let prod = x_biguint * &inv_biguint;
|
||||
let modulus = FF::order();
|
||||
let (div, _rem) = prod.div_rem(&modulus);
|
||||
|
||||
out_buffer.set_biguint_target(self.div.clone(), div);
|
||||
out_buffer.set_biguint_target(self.inv.clone(), inv_biguint);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::secp256k1_base::Secp256K1Base;
|
||||
|
||||
use crate::iop::witness::PartialWitness;
|
||||
@ -227,11 +525,12 @@ mod tests {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let x_ff = FF::rand();
|
||||
let y_ff = FF::rand();
|
||||
let sum_ff = x_ff + y_ff;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
@ -247,20 +546,61 @@ mod tests {
|
||||
verify(proof, &data.verifier_only, &data.common)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonnative_many_adds() -> Result<()> {
|
||||
type FF = Secp256K1Base;
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let a_ff = FF::rand();
|
||||
let b_ff = FF::rand();
|
||||
let c_ff = FF::rand();
|
||||
let d_ff = FF::rand();
|
||||
let e_ff = FF::rand();
|
||||
let f_ff = FF::rand();
|
||||
let g_ff = FF::rand();
|
||||
let h_ff = FF::rand();
|
||||
let sum_ff = a_ff + b_ff + c_ff + d_ff + e_ff + f_ff + g_ff + h_ff;
|
||||
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
let a = builder.constant_nonnative(a_ff);
|
||||
let b = builder.constant_nonnative(b_ff);
|
||||
let c = builder.constant_nonnative(c_ff);
|
||||
let d = builder.constant_nonnative(d_ff);
|
||||
let e = builder.constant_nonnative(e_ff);
|
||||
let f = builder.constant_nonnative(f_ff);
|
||||
let g = builder.constant_nonnative(g_ff);
|
||||
let h = builder.constant_nonnative(h_ff);
|
||||
let all = [a, b, c, d, e, f, g, h];
|
||||
let sum = builder.add_many_nonnative(&all);
|
||||
|
||||
let sum_expected = builder.constant_nonnative(sum_ff);
|
||||
builder.connect_nonnative(&sum, &sum_expected);
|
||||
|
||||
let data = builder.build::<C>();
|
||||
let proof = data.prove(pw).unwrap();
|
||||
verify(proof, &data.verifier_only, &data.common)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonnative_sub() -> Result<()> {
|
||||
type FF = Secp256K1Base;
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let x_ff = FF::rand();
|
||||
let mut y_ff = FF::rand();
|
||||
while y_ff.to_biguint() > x_ff.to_biguint() {
|
||||
while y_ff.to_canonical_biguint() > x_ff.to_canonical_biguint() {
|
||||
y_ff = FF::rand();
|
||||
}
|
||||
let diff_ff = x_ff - y_ff;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
@ -286,7 +626,7 @@ mod tests {
|
||||
let y_ff = FF::rand();
|
||||
let product_ff = x_ff * y_ff;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
@ -311,7 +651,7 @@ mod tests {
|
||||
let x_ff = FF::rand();
|
||||
let neg_x_ff = -x_ff;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
@ -335,7 +675,7 @@ mod tests {
|
||||
let x_ff = FF::rand();
|
||||
let inv_x_ff = x_ff.inverse();
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let config = CircuitConfig::standard_ecc_config();
|
||||
let pw = PartialWitness::new();
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config);
|
||||
|
||||
|
||||
@ -6,6 +6,7 @@ use crate::iop::target::Target;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::util::reducing::ReducingFactorTarget;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PolynomialCoeffsExtTarget<const D: usize>(pub Vec<ExtensionTarget<D>>);
|
||||
|
||||
impl<const D: usize> PolynomialCoeffsExtTarget<D> {
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
|
||||
use crate::gadgets::arithmetic_u32::U32Target;
|
||||
use crate::gates::range_check_u32::U32RangeCheckGate;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
@ -41,6 +43,25 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
(low, high)
|
||||
}
|
||||
|
||||
pub fn range_check_u32(&mut self, vals: Vec<U32Target>) {
|
||||
let num_input_limbs = vals.len();
|
||||
let gate = U32RangeCheckGate::<F, D>::new(num_input_limbs);
|
||||
let gate_index = self.add_gate(gate, vec![]);
|
||||
|
||||
for i in 0..num_input_limbs {
|
||||
self.connect(
|
||||
Target::wire(gate_index, gate.wire_ith_input_limb(i)),
|
||||
vals[i].0,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn assert_bool(&mut self, b: BoolTarget) {
|
||||
let z = self.mul_sub(b.target, b.target, b.target);
|
||||
let zero = self.zero();
|
||||
self.connect(z, zero);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
461
plonky2/src/gates/add_many_u32.rs
Normal file
461
plonky2/src/gates/add_many_u32.rs
Normal file
@ -0,0 +1,461 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use itertools::unfold;
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
use crate::field::extension_field::Extendable;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartitionWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
|
||||
const LOG2_MAX_NUM_ADDENDS: usize = 4;
|
||||
const MAX_NUM_ADDENDS: usize = 16;
|
||||
|
||||
/// A gate to perform addition on `num_addends` different 32-bit values, plus a small carry
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct U32AddManyGate<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub num_addends: usize,
|
||||
pub num_ops: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> U32AddManyGate<F, D> {
|
||||
pub fn new_from_config(config: &CircuitConfig, num_addends: usize) -> Self {
|
||||
Self {
|
||||
num_addends,
|
||||
num_ops: Self::num_ops(num_addends, config),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn num_ops(num_addends: usize, config: &CircuitConfig) -> usize {
|
||||
debug_assert!(num_addends <= MAX_NUM_ADDENDS);
|
||||
let wires_per_op = (num_addends + 3) + Self::num_limbs();
|
||||
let routed_wires_per_op = num_addends + 3;
|
||||
(config.num_wires / wires_per_op).min(config.num_routed_wires / routed_wires_per_op)
|
||||
}
|
||||
|
||||
pub fn wire_ith_op_jth_addend(&self, i: usize, j: usize) -> usize {
|
||||
debug_assert!(i < self.num_ops);
|
||||
debug_assert!(j < self.num_addends);
|
||||
(self.num_addends + 3) * i + j
|
||||
}
|
||||
pub fn wire_ith_carry(&self, i: usize) -> usize {
|
||||
debug_assert!(i < self.num_ops);
|
||||
(self.num_addends + 3) * i + self.num_addends
|
||||
}
|
||||
|
||||
pub fn wire_ith_output_result(&self, i: usize) -> usize {
|
||||
debug_assert!(i < self.num_ops);
|
||||
(self.num_addends + 3) * i + self.num_addends + 1
|
||||
}
|
||||
pub fn wire_ith_output_carry(&self, i: usize) -> usize {
|
||||
debug_assert!(i < self.num_ops);
|
||||
(self.num_addends + 3) * i + self.num_addends + 2
|
||||
}
|
||||
|
||||
pub fn limb_bits() -> usize {
|
||||
2
|
||||
}
|
||||
pub fn num_result_limbs() -> usize {
|
||||
ceil_div_usize(32, Self::limb_bits())
|
||||
}
|
||||
pub fn num_carry_limbs() -> usize {
|
||||
ceil_div_usize(LOG2_MAX_NUM_ADDENDS, Self::limb_bits())
|
||||
}
|
||||
pub fn num_limbs() -> usize {
|
||||
Self::num_result_limbs() + Self::num_carry_limbs()
|
||||
}
|
||||
|
||||
pub fn wire_ith_output_jth_limb(&self, i: usize, j: usize) -> usize {
|
||||
debug_assert!(i < self.num_ops);
|
||||
debug_assert!(j < Self::num_limbs());
|
||||
(self.num_addends + 3) * self.num_ops + Self::num_limbs() * i + j
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32AddManyGate<F, D> {
|
||||
fn id(&self) -> String {
|
||||
format!("{:?}", self)
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
for i in 0..self.num_ops {
|
||||
let addends: Vec<F::Extension> = (0..self.num_addends)
|
||||
.map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)])
|
||||
.collect();
|
||||
let carry = vars.local_wires[self.wire_ith_carry(i)];
|
||||
|
||||
let computed_output = addends.iter().fold(F::Extension::ZERO, |x, &y| x + y) + carry;
|
||||
|
||||
let output_result = vars.local_wires[self.wire_ith_output_result(i)];
|
||||
let output_carry = vars.local_wires[self.wire_ith_output_carry(i)];
|
||||
|
||||
let base = F::Extension::from_canonical_u64(1 << 32u64);
|
||||
let combined_output = output_carry * base + output_result;
|
||||
|
||||
constraints.push(combined_output - computed_output);
|
||||
|
||||
let mut combined_result_limbs = F::Extension::ZERO;
|
||||
let mut combined_carry_limbs = F::Extension::ZERO;
|
||||
let base = F::Extension::from_canonical_u64(1u64 << Self::limb_bits());
|
||||
for j in (0..Self::num_limbs()).rev() {
|
||||
let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)];
|
||||
let max_limb = 1 << Self::limb_bits();
|
||||
let product = (0..max_limb)
|
||||
.map(|x| this_limb - F::Extension::from_canonical_usize(x))
|
||||
.product();
|
||||
constraints.push(product);
|
||||
|
||||
if j < Self::num_result_limbs() {
|
||||
combined_result_limbs = base * combined_result_limbs + this_limb;
|
||||
} else {
|
||||
combined_carry_limbs = base * combined_carry_limbs + this_limb;
|
||||
}
|
||||
}
|
||||
constraints.push(combined_result_limbs - output_result);
|
||||
constraints.push(combined_carry_limbs - output_carry);
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
fn eval_unfiltered_base_one(
|
||||
&self,
|
||||
vars: EvaluationVarsBase<F>,
|
||||
mut yield_constr: StridedConstraintConsumer<F>,
|
||||
) {
|
||||
for i in 0..self.num_ops {
|
||||
let addends: Vec<F> = (0..self.num_addends)
|
||||
.map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)])
|
||||
.collect();
|
||||
let carry = vars.local_wires[self.wire_ith_carry(i)];
|
||||
|
||||
let computed_output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry;
|
||||
|
||||
let output_result = vars.local_wires[self.wire_ith_output_result(i)];
|
||||
let output_carry = vars.local_wires[self.wire_ith_output_carry(i)];
|
||||
|
||||
let base = F::from_canonical_u64(1 << 32u64);
|
||||
let combined_output = output_carry * base + output_result;
|
||||
|
||||
yield_constr.one(combined_output - computed_output);
|
||||
|
||||
let mut combined_result_limbs = F::ZERO;
|
||||
let mut combined_carry_limbs = F::ZERO;
|
||||
let base = F::from_canonical_u64(1u64 << Self::limb_bits());
|
||||
for j in (0..Self::num_limbs()).rev() {
|
||||
let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)];
|
||||
let max_limb = 1 << Self::limb_bits();
|
||||
let product = (0..max_limb)
|
||||
.map(|x| this_limb - F::from_canonical_usize(x))
|
||||
.product();
|
||||
yield_constr.one(product);
|
||||
|
||||
if j < Self::num_result_limbs() {
|
||||
combined_result_limbs = base * combined_result_limbs + this_limb;
|
||||
} else {
|
||||
combined_carry_limbs = base * combined_carry_limbs + this_limb;
|
||||
}
|
||||
}
|
||||
yield_constr.one(combined_result_limbs - output_result);
|
||||
yield_constr.one(combined_carry_limbs - output_carry);
|
||||
}
|
||||
}
|
||||
|
||||
fn eval_unfiltered_recursively(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: EvaluationTargets<D>,
|
||||
) -> Vec<ExtensionTarget<D>> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
for i in 0..self.num_ops {
|
||||
let addends: Vec<ExtensionTarget<D>> = (0..self.num_addends)
|
||||
.map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)])
|
||||
.collect();
|
||||
let carry = vars.local_wires[self.wire_ith_carry(i)];
|
||||
|
||||
let mut computed_output = carry;
|
||||
for addend in addends {
|
||||
computed_output = builder.add_extension(computed_output, addend);
|
||||
}
|
||||
|
||||
let output_result = vars.local_wires[self.wire_ith_output_result(i)];
|
||||
let output_carry = vars.local_wires[self.wire_ith_output_carry(i)];
|
||||
|
||||
let base: F::Extension = F::from_canonical_u64(1 << 32u64).into();
|
||||
let base_target = builder.constant_extension(base);
|
||||
let combined_output =
|
||||
builder.mul_add_extension(output_carry, base_target, output_result);
|
||||
|
||||
constraints.push(builder.sub_extension(combined_output, computed_output));
|
||||
|
||||
let mut combined_result_limbs = builder.zero_extension();
|
||||
let mut combined_carry_limbs = builder.zero_extension();
|
||||
let base = builder
|
||||
.constant_extension(F::Extension::from_canonical_u64(1u64 << Self::limb_bits()));
|
||||
for j in (0..Self::num_limbs()).rev() {
|
||||
let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)];
|
||||
let max_limb = 1 << Self::limb_bits();
|
||||
|
||||
let mut product = builder.one_extension();
|
||||
for x in 0..max_limb {
|
||||
let x_target =
|
||||
builder.constant_extension(F::Extension::from_canonical_usize(x));
|
||||
let diff = builder.sub_extension(this_limb, x_target);
|
||||
product = builder.mul_extension(product, diff);
|
||||
}
|
||||
constraints.push(product);
|
||||
|
||||
if j < Self::num_result_limbs() {
|
||||
combined_result_limbs =
|
||||
builder.mul_add_extension(base, combined_result_limbs, this_limb);
|
||||
} else {
|
||||
combined_carry_limbs =
|
||||
builder.mul_add_extension(base, combined_carry_limbs, this_limb);
|
||||
}
|
||||
}
|
||||
constraints.push(builder.sub_extension(combined_result_limbs, output_result));
|
||||
constraints.push(builder.sub_extension(combined_carry_limbs, output_carry));
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(
|
||||
&self,
|
||||
gate_index: usize,
|
||||
_local_constants: &[F],
|
||||
) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
(0..self.num_ops)
|
||||
.map(|i| {
|
||||
let g: Box<dyn WitnessGenerator<F>> = Box::new(
|
||||
U32AddManyGenerator {
|
||||
gate: *self,
|
||||
gate_index,
|
||||
i,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
.adapter(),
|
||||
);
|
||||
g
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
(self.num_addends + 3) * self.num_ops + Self::num_limbs() * self.num_ops
|
||||
}
|
||||
|
||||
fn num_constants(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
fn degree(&self) -> usize {
|
||||
1 << Self::limb_bits()
|
||||
}
|
||||
|
||||
fn num_constraints(&self) -> usize {
|
||||
self.num_ops * (3 + Self::num_limbs())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct U32AddManyGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
gate: U32AddManyGate<F, D>,
|
||||
gate_index: usize,
|
||||
i: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for U32AddManyGenerator<F, D>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let local_target = |input| Target::wire(self.gate_index, input);
|
||||
|
||||
(0..self.gate.num_addends)
|
||||
.map(|j| local_target(self.gate.wire_ith_op_jth_addend(self.i, j)))
|
||||
.chain([local_target(self.gate.wire_ith_carry(self.i))])
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let local_wire = |input| Wire {
|
||||
gate: self.gate_index,
|
||||
input,
|
||||
};
|
||||
|
||||
let get_local_wire = |input| witness.get_wire(local_wire(input));
|
||||
|
||||
let addends: Vec<_> = (0..self.gate.num_addends)
|
||||
.map(|j| get_local_wire(self.gate.wire_ith_op_jth_addend(self.i, j)))
|
||||
.collect();
|
||||
let carry = get_local_wire(self.gate.wire_ith_carry(self.i));
|
||||
|
||||
let output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry;
|
||||
let output_u64 = output.to_canonical_u64();
|
||||
|
||||
let output_carry_u64 = output_u64 >> 32;
|
||||
let output_result_u64 = output_u64 & ((1 << 32) - 1);
|
||||
|
||||
let output_carry = F::from_canonical_u64(output_carry_u64);
|
||||
let output_result = F::from_canonical_u64(output_result_u64);
|
||||
|
||||
let output_carry_wire = local_wire(self.gate.wire_ith_output_carry(self.i));
|
||||
let output_result_wire = local_wire(self.gate.wire_ith_output_result(self.i));
|
||||
|
||||
out_buffer.set_wire(output_carry_wire, output_carry);
|
||||
out_buffer.set_wire(output_result_wire, output_result);
|
||||
|
||||
let num_result_limbs = U32AddManyGate::<F, D>::num_result_limbs();
|
||||
let num_carry_limbs = U32AddManyGate::<F, D>::num_carry_limbs();
|
||||
let limb_base = 1 << U32AddManyGate::<F, D>::limb_bits();
|
||||
|
||||
let split_to_limbs = |mut val, num| {
|
||||
unfold((), move |_| {
|
||||
let ret = val % limb_base;
|
||||
val /= limb_base;
|
||||
Some(ret)
|
||||
})
|
||||
.take(num)
|
||||
.map(F::from_canonical_u64)
|
||||
};
|
||||
|
||||
let result_limbs = split_to_limbs(output_result_u64, num_result_limbs);
|
||||
let carry_limbs = split_to_limbs(output_carry_u64, num_carry_limbs);
|
||||
|
||||
for (j, limb) in result_limbs.chain(carry_limbs).enumerate() {
|
||||
let wire = local_wire(self.gate.wire_ith_output_jth_limb(self.i, j));
|
||||
out_buffer.set_wire(wire, limb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use anyhow::Result;
|
||||
use itertools::unfold;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::field::extension_field::quartic::QuarticExtension;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::field::goldilocks_field::GoldilocksField;
|
||||
use crate::gates::add_many_u32::U32AddManyGate;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::gate_testing::{test_eval_fns, test_low_degree};
|
||||
use crate::hash::hash_types::HashOut;
|
||||
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
use crate::plonk::vars::EvaluationVars;
|
||||
|
||||
#[test]
|
||||
fn low_degree() {
|
||||
test_low_degree::<GoldilocksField, _, 4>(U32AddManyGate::<GoldilocksField, 4> {
|
||||
num_addends: 4,
|
||||
num_ops: 3,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eval_fns() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
test_eval_fns::<F, C, _, D>(U32AddManyGate::<GoldilocksField, D> {
|
||||
num_addends: 4,
|
||||
num_ops: 3,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_constraint() {
|
||||
type F = GoldilocksField;
|
||||
type FF = QuarticExtension<GoldilocksField>;
|
||||
const D: usize = 4;
|
||||
const NUM_ADDENDS: usize = 10;
|
||||
const NUM_U32_ADD_MANY_OPS: usize = 3;
|
||||
|
||||
fn get_wires(addends: Vec<Vec<u64>>, carries: Vec<u64>) -> Vec<FF> {
|
||||
let mut v0 = Vec::new();
|
||||
let mut v1 = Vec::new();
|
||||
|
||||
let num_result_limbs = U32AddManyGate::<F, D>::num_result_limbs();
|
||||
let num_carry_limbs = U32AddManyGate::<F, D>::num_carry_limbs();
|
||||
let limb_base = 1 << U32AddManyGate::<F, D>::limb_bits();
|
||||
for op in 0..NUM_U32_ADD_MANY_OPS {
|
||||
let adds = &addends[op];
|
||||
let ca = carries[op];
|
||||
|
||||
let output = adds.iter().sum::<u64>() + ca;
|
||||
let output_result = output & ((1 << 32) - 1);
|
||||
let output_carry = output >> 32;
|
||||
|
||||
let split_to_limbs = |mut val, num| {
|
||||
unfold((), move |_| {
|
||||
let ret = val % limb_base;
|
||||
val /= limb_base;
|
||||
Some(ret)
|
||||
})
|
||||
.take(num)
|
||||
.map(F::from_canonical_u64)
|
||||
};
|
||||
|
||||
let mut result_limbs: Vec<_> =
|
||||
split_to_limbs(output_result, num_result_limbs).collect();
|
||||
let mut carry_limbs: Vec<_> =
|
||||
split_to_limbs(output_carry, num_carry_limbs).collect();
|
||||
|
||||
for a in adds {
|
||||
v0.push(F::from_canonical_u64(*a));
|
||||
}
|
||||
v0.push(F::from_canonical_u64(ca));
|
||||
v0.push(F::from_canonical_u64(output_result));
|
||||
v0.push(F::from_canonical_u64(output_carry));
|
||||
v1.append(&mut result_limbs);
|
||||
v1.append(&mut carry_limbs);
|
||||
}
|
||||
|
||||
v0.iter().chain(v1.iter()).map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let addends: Vec<Vec<_>> = (0..NUM_U32_ADD_MANY_OPS)
|
||||
.map(|_| (0..NUM_ADDENDS).map(|_| rng.gen::<u32>() as u64).collect())
|
||||
.collect();
|
||||
let carries: Vec<_> = (0..NUM_U32_ADD_MANY_OPS)
|
||||
.map(|_| rng.gen::<u32>() as u64)
|
||||
.collect();
|
||||
|
||||
let gate = U32AddManyGate::<F, D> {
|
||||
num_addends: NUM_ADDENDS,
|
||||
num_ops: NUM_U32_ADD_MANY_OPS,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
let vars = EvaluationVars {
|
||||
local_constants: &[],
|
||||
local_wires: &get_wires(addends, carries),
|
||||
public_inputs_hash: &HashOut::rand(),
|
||||
};
|
||||
|
||||
assert!(
|
||||
gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()),
|
||||
"Gate constraints are not satisfied."
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -132,7 +132,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticGate
|
||||
);
|
||||
g
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
|
||||
@ -139,7 +139,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticExte
|
||||
);
|
||||
g
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
|
||||
@ -213,7 +213,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32ArithmeticG
|
||||
);
|
||||
g
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
@ -440,10 +440,7 @@ mod tests {
|
||||
v1.append(&mut output_limbs_f);
|
||||
}
|
||||
|
||||
v0.iter()
|
||||
.chain(v1.iter())
|
||||
.map(|&x| x.into())
|
||||
.collect::<Vec<_>>()
|
||||
v0.iter().chain(v1.iter()).map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::packed_field::PackedField;
|
||||
use plonky2_util::{bits_u64, ceil_div_usize};
|
||||
|
||||
@ -26,7 +26,7 @@ use crate::plonk::vars::{
|
||||
|
||||
/// A gate for checking that one value is less than or equal to another.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AssertLessThanGate<F: PrimeField + Extendable<D>, const D: usize> {
|
||||
pub struct AssertLessThanGate<F: Field64 + Extendable<D>, const D: usize> {
|
||||
pub(crate) num_bits: usize,
|
||||
pub(crate) num_chunks: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
@ -466,7 +466,8 @@ mod tests {
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::extension_field::quartic::QuarticExtension;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
|
||||
@ -589,7 +590,7 @@ mod tests {
|
||||
v.append(&mut chunks_equal);
|
||||
v.append(&mut intermediate_values);
|
||||
|
||||
v.iter().map(|&x| x.into()).collect::<Vec<_>>()
|
||||
v.iter().map(|&x| x.into()).collect()
|
||||
};
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::packed_field::PackedField;
|
||||
|
||||
use crate::gates::batchable::MultiOpsGate;
|
||||
@ -32,7 +32,7 @@ impl<const B: usize> BaseSumGate<B> {
|
||||
Self { num_limbs }
|
||||
}
|
||||
|
||||
pub fn new_from_config<F: PrimeField>(config: &CircuitConfig) -> Self {
|
||||
pub fn new_from_config<F: Field64>(config: &CircuitConfig) -> Self {
|
||||
let num_limbs = F::BITS.min(config.num_routed_wires - Self::START_LIMBS);
|
||||
Self::new(num_limbs)
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::packed_field::PackedField;
|
||||
use plonky2_util::{bits_u64, ceil_div_usize};
|
||||
|
||||
@ -24,7 +24,7 @@ use crate::plonk::vars::{
|
||||
|
||||
/// A gate for checking that one value is less than or equal to another.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ComparisonGate<F: PrimeField + Extendable<D>, const D: usize> {
|
||||
pub struct ComparisonGate<F: Field64 + Extendable<D>, const D: usize> {
|
||||
pub(crate) num_bits: usize,
|
||||
pub(crate) num_chunks: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
@ -541,7 +541,8 @@ mod tests {
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
|
||||
@ -679,7 +680,7 @@ mod tests {
|
||||
v.append(&mut intermediate_values);
|
||||
v.append(&mut msd_bits);
|
||||
|
||||
v.iter().map(|&x| x.into()).collect::<Vec<_>>()
|
||||
v.iter().map(|&x| x.into()).collect()
|
||||
};
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
@ -112,7 +112,7 @@ pub trait Gate<F: RichField + Extendable<D>, const D: usize>: 'static + Send + S
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
mut vars: EvaluationTargets<D>,
|
||||
prefix: &[bool],
|
||||
combined_gate_constraints: &mut Vec<ExtensionTarget<D>>,
|
||||
combined_gate_constraints: &mut [ExtensionTarget<D>],
|
||||
) {
|
||||
let filter = compute_filter_recursively(builder, prefix, vars.local_constants);
|
||||
vars.remove_prefix(prefix);
|
||||
|
||||
@ -10,7 +10,7 @@ use crate::hash::hash_types::RichField;
|
||||
use crate::iop::witness::{PartialWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::config::GenericConfig;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch};
|
||||
use crate::plonk::verifier::verify;
|
||||
use crate::util::transpose;
|
||||
@ -92,7 +92,10 @@ pub fn test_eval_fns<
|
||||
const D: usize,
|
||||
>(
|
||||
gate: G,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
// Test that `eval_unfiltered` and `eval_unfiltered_base` are coherent.
|
||||
let wires_base = F::rand_vec(gate.num_wires());
|
||||
let constants_base = F::rand_vec(gate.num_constants());
|
||||
|
||||
@ -228,9 +228,9 @@ mod tests {
|
||||
use crate::gates::arithmetic_extension::ArithmeticExtensionGate;
|
||||
use crate::gates::base_sum::BaseSumGate;
|
||||
use crate::gates::constant::ConstantGate;
|
||||
use crate::gates::gmimc::GMiMCGate;
|
||||
use crate::gates::interpolation::HighDegreeInterpolationGate;
|
||||
use crate::gates::noop::NoopGate;
|
||||
use crate::gates::poseidon::PoseidonGate;
|
||||
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
|
||||
#[test]
|
||||
@ -245,7 +245,7 @@ mod tests {
|
||||
GateRef::new(ConstantGate { num_consts: 4 }),
|
||||
GateRef::new(ArithmeticExtensionGate { num_ops: 4 }),
|
||||
GateRef::new(BaseSumGate::<4>::new(4)),
|
||||
GateRef::new(GMiMCGate::<F, D, 12>::new()),
|
||||
GateRef::new(PoseidonGate::<F, D>::new()),
|
||||
GateRef::new(HighDegreeInterpolationGate::new(2)),
|
||||
];
|
||||
|
||||
@ -276,7 +276,7 @@ mod tests {
|
||||
assert!(
|
||||
gates_with_prefix
|
||||
.iter()
|
||||
.all(|(g, p)| g.0.degree() + g.0.num_constants() + p.len() <= 8),
|
||||
.all(|(g, p)| g.0.degree() + g.0.num_constants() + p.len() <= 9),
|
||||
"Total degree is larger than 8."
|
||||
);
|
||||
|
||||
|
||||
@ -1,445 +1 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::packed_field::PackedField;
|
||||
|
||||
use crate::gates::batchable::MultiOpsGate;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::packed_util::PackedEvaluableBase;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::gmimc;
|
||||
use crate::hash::gmimc::GMiMC;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartitionWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::vars::{
|
||||
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
|
||||
EvaluationVarsBasePacked,
|
||||
};
|
||||
|
||||
/// Evaluates a full GMiMC permutation with 12 state elements.
|
||||
///
|
||||
/// This also has some extra features to make it suitable for efficiently verifying Merkle proofs.
|
||||
/// It has a flag which can be used to swap the first four inputs with the next four, for ordering
|
||||
/// sibling digests.
|
||||
#[derive(Debug)]
|
||||
pub struct GMiMCGate<
|
||||
F: RichField + Extendable<D> + GMiMC<WIDTH>,
|
||||
const D: usize,
|
||||
const WIDTH: usize,
|
||||
> {
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D> + GMiMC<WIDTH>, const D: usize, const WIDTH: usize>
|
||||
GMiMCGate<F, D, WIDTH>
|
||||
{
|
||||
pub fn new() -> Self {
|
||||
GMiMCGate {
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// The wire index for the `i`th input to the permutation.
|
||||
pub fn wire_input(i: usize) -> usize {
|
||||
i
|
||||
}
|
||||
|
||||
/// The wire index for the `i`th output to the permutation.
|
||||
pub fn wire_output(i: usize) -> usize {
|
||||
WIDTH + i
|
||||
}
|
||||
|
||||
/// If this is set to 1, the first four inputs will be swapped with the next four inputs. This
|
||||
/// is useful for ordering hashes in Merkle proofs. Otherwise, this should be set to 0.
|
||||
pub const WIRE_SWAP: usize = 2 * WIDTH;
|
||||
|
||||
/// A wire which stores the input to the `i`th cubing.
|
||||
fn wire_cubing_input(i: usize) -> usize {
|
||||
2 * WIDTH + 1 + i
|
||||
}
|
||||
|
||||
/// End of wire indices, exclusive.
|
||||
fn end() -> usize {
|
||||
2 * WIDTH + 1 + gmimc::NUM_ROUNDS
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D> + GMiMC<WIDTH>, const D: usize, const WIDTH: usize> Gate<F, D>
|
||||
for GMiMCGate<F, D, WIDTH>
|
||||
{
|
||||
fn id(&self) -> String {
|
||||
format!("<WIDTH={}> {:?}", WIDTH, self)
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
// Assert that `swap` is binary.
|
||||
let swap = vars.local_wires[Self::WIRE_SWAP];
|
||||
constraints.push(swap * (swap - F::Extension::ONE));
|
||||
|
||||
let mut state = Vec::with_capacity(12);
|
||||
for i in 0..4 {
|
||||
let a = vars.local_wires[i];
|
||||
let b = vars.local_wires[i + 4];
|
||||
state.push(a + swap * (b - a));
|
||||
}
|
||||
for i in 0..4 {
|
||||
let a = vars.local_wires[i + 4];
|
||||
let b = vars.local_wires[i];
|
||||
state.push(a + swap * (b - a));
|
||||
}
|
||||
for i in 8..12 {
|
||||
state.push(vars.local_wires[i]);
|
||||
}
|
||||
|
||||
// Value that is implicitly added to each element.
|
||||
// See https://affine.group/2020/02/starkware-challenge
|
||||
let mut addition_buffer = F::Extension::ZERO;
|
||||
|
||||
for r in 0..gmimc::NUM_ROUNDS {
|
||||
let active = r % WIDTH;
|
||||
let constant = F::from_canonical_u64(<F as GMiMC<WIDTH>>::ROUND_CONSTANTS[r]);
|
||||
let cubing_input = state[active] + addition_buffer + constant.into();
|
||||
let cubing_input_wire = vars.local_wires[Self::wire_cubing_input(r)];
|
||||
constraints.push(cubing_input - cubing_input_wire);
|
||||
let f = cubing_input_wire.cube();
|
||||
addition_buffer += f;
|
||||
state[active] -= f;
|
||||
}
|
||||
|
||||
for i in 0..WIDTH {
|
||||
state[i] += addition_buffer;
|
||||
constraints.push(state[i] - vars.local_wires[Self::wire_output(i)]);
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
fn eval_unfiltered_base_one(
|
||||
&self,
|
||||
_vars: EvaluationVarsBase<F>,
|
||||
_yield_constr: StridedConstraintConsumer<F>,
|
||||
) {
|
||||
panic!("use eval_unfiltered_base_packed instead");
|
||||
}
|
||||
|
||||
fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch<F>) -> Vec<F> {
|
||||
self.eval_unfiltered_base_batch_packed(vars_base)
|
||||
}
|
||||
|
||||
fn eval_unfiltered_recursively(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: EvaluationTargets<D>,
|
||||
) -> Vec<ExtensionTarget<D>> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
let swap = vars.local_wires[Self::WIRE_SWAP];
|
||||
constraints.push(builder.mul_sub_extension(swap, swap, swap));
|
||||
|
||||
let mut state = Vec::with_capacity(12);
|
||||
for i in 0..4 {
|
||||
let a = vars.local_wires[i];
|
||||
let b = vars.local_wires[i + 4];
|
||||
let delta = builder.sub_extension(b, a);
|
||||
state.push(builder.mul_add_extension(swap, delta, a));
|
||||
}
|
||||
for i in 0..4 {
|
||||
let a = vars.local_wires[i + 4];
|
||||
let b = vars.local_wires[i];
|
||||
let delta = builder.sub_extension(b, a);
|
||||
state.push(builder.mul_add_extension(swap, delta, a));
|
||||
}
|
||||
for i in 8..12 {
|
||||
state.push(vars.local_wires[i]);
|
||||
}
|
||||
|
||||
// Value that is implicitly added to each element.
|
||||
// See https://affine.group/2020/02/starkware-challenge
|
||||
let mut addition_buffer = builder.zero_extension();
|
||||
|
||||
for r in 0..gmimc::NUM_ROUNDS {
|
||||
let active = r % WIDTH;
|
||||
|
||||
let constant = F::from_canonical_u64(<F as GMiMC<WIDTH>>::ROUND_CONSTANTS[r]);
|
||||
let constant = builder.constant_extension(constant.into());
|
||||
let cubing_input =
|
||||
builder.add_many_extension(&[state[active], addition_buffer, constant]);
|
||||
let cubing_input_wire = vars.local_wires[Self::wire_cubing_input(r)];
|
||||
constraints.push(builder.sub_extension(cubing_input, cubing_input_wire));
|
||||
let f = builder.cube_extension(cubing_input_wire);
|
||||
addition_buffer = builder.add_extension(addition_buffer, f);
|
||||
state[active] = builder.sub_extension(state[active], f);
|
||||
}
|
||||
|
||||
for i in 0..WIDTH {
|
||||
state[i] = builder.add_extension(state[i], addition_buffer);
|
||||
constraints
|
||||
.push(builder.sub_extension(state[i], vars.local_wires[Self::wire_output(i)]));
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(
|
||||
&self,
|
||||
gate_index: usize,
|
||||
_local_constants: &[F],
|
||||
) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
let gen = GMiMCGenerator::<F, D, WIDTH> {
|
||||
gate_index,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
vec![Box::new(gen.adapter())]
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
Self::end()
|
||||
}
|
||||
|
||||
fn num_constants(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
fn degree(&self) -> usize {
|
||||
3
|
||||
}
|
||||
|
||||
fn num_constraints(&self) -> usize {
|
||||
gmimc::NUM_ROUNDS + WIDTH + 1
|
||||
}
|
||||
}
|
||||
impl<F: RichField + Extendable<D> + GMiMC<WIDTH>, const D: usize, const WIDTH: usize>
|
||||
MultiOpsGate<F, D> for GMiMCGate<F, D, WIDTH>
|
||||
{
|
||||
fn num_ops(&self) -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
fn dependencies_ith_op(&self, _gate_index: usize, _i: usize) -> Vec<Target> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D> + GMiMC<WIDTH>, const D: usize, const WIDTH: usize>
|
||||
PackedEvaluableBase<F, D> for GMiMCGate<F, D, WIDTH>
|
||||
{
|
||||
fn eval_unfiltered_base_packed<P: PackedField<Scalar = F>>(
|
||||
&self,
|
||||
vars: EvaluationVarsBasePacked<P>,
|
||||
mut yield_constr: StridedConstraintConsumer<P>,
|
||||
) {
|
||||
// Assert that `swap` is binary.
|
||||
let swap = vars.local_wires[Self::WIRE_SWAP];
|
||||
yield_constr.one(swap * (swap - F::ONE));
|
||||
|
||||
let mut state = Vec::with_capacity(12);
|
||||
for i in 0..4 {
|
||||
let a = vars.local_wires[i];
|
||||
let b = vars.local_wires[i + 4];
|
||||
state.push(a + swap * (b - a));
|
||||
}
|
||||
for i in 0..4 {
|
||||
let a = vars.local_wires[i + 4];
|
||||
let b = vars.local_wires[i];
|
||||
state.push(a + swap * (b - a));
|
||||
}
|
||||
for i in 8..12 {
|
||||
state.push(vars.local_wires[i]);
|
||||
}
|
||||
|
||||
// Value that is implicitly added to each element.
|
||||
// See https://affine.group/2020/02/starkware-challenge
|
||||
let mut addition_buffer = P::ZEROS;
|
||||
|
||||
for r in 0..gmimc::NUM_ROUNDS {
|
||||
let active = r % WIDTH;
|
||||
let constant = F::from_canonical_u64(<F as GMiMC<WIDTH>>::ROUND_CONSTANTS[r]);
|
||||
let cubing_input = state[active] + addition_buffer + constant;
|
||||
let cubing_input_wire = vars.local_wires[Self::wire_cubing_input(r)];
|
||||
yield_constr.one(cubing_input - cubing_input_wire);
|
||||
let f = cubing_input_wire.square() * cubing_input_wire;
|
||||
addition_buffer += f;
|
||||
state[active] -= f;
|
||||
}
|
||||
|
||||
for i in 0..WIDTH {
|
||||
state[i] += addition_buffer;
|
||||
yield_constr.one(state[i] - vars.local_wires[Self::wire_output(i)]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct GMiMCGenerator<
|
||||
F: RichField + Extendable<D> + GMiMC<WIDTH>,
|
||||
const D: usize,
|
||||
const WIDTH: usize,
|
||||
> {
|
||||
gate_index: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D> + GMiMC<WIDTH>, const D: usize, const WIDTH: usize>
|
||||
SimpleGenerator<F> for GMiMCGenerator<F, D, WIDTH>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let mut dep_input_indices = Vec::with_capacity(WIDTH + 1);
|
||||
for i in 0..WIDTH {
|
||||
dep_input_indices.push(GMiMCGate::<F, D, WIDTH>::wire_input(i));
|
||||
}
|
||||
dep_input_indices.push(GMiMCGate::<F, D, WIDTH>::WIRE_SWAP);
|
||||
|
||||
dep_input_indices
|
||||
.into_iter()
|
||||
.map(|input| {
|
||||
Target::Wire(Wire {
|
||||
gate: self.gate_index,
|
||||
input,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let mut state = (0..WIDTH)
|
||||
.map(|i| {
|
||||
witness.get_wire(Wire {
|
||||
gate: self.gate_index,
|
||||
input: GMiMCGate::<F, D, WIDTH>::wire_input(i),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let swap_value = witness.get_wire(Wire {
|
||||
gate: self.gate_index,
|
||||
input: GMiMCGate::<F, D, WIDTH>::WIRE_SWAP,
|
||||
});
|
||||
debug_assert!(swap_value == F::ZERO || swap_value == F::ONE);
|
||||
if swap_value == F::ONE {
|
||||
for i in 0..4 {
|
||||
state.swap(i, 4 + i);
|
||||
}
|
||||
}
|
||||
|
||||
// Value that is implicitly added to each element.
|
||||
// See https://affine.group/2020/02/starkware-challenge
|
||||
let mut addition_buffer = F::ZERO;
|
||||
|
||||
for r in 0..gmimc::NUM_ROUNDS {
|
||||
let active = r % WIDTH;
|
||||
let constant = F::from_canonical_u64(<F as GMiMC<WIDTH>>::ROUND_CONSTANTS[r]);
|
||||
let cubing_input = state[active] + addition_buffer + constant;
|
||||
out_buffer.set_wire(
|
||||
Wire {
|
||||
gate: self.gate_index,
|
||||
input: GMiMCGate::<F, D, WIDTH>::wire_cubing_input(r),
|
||||
},
|
||||
cubing_input,
|
||||
);
|
||||
let f = cubing_input.cube();
|
||||
addition_buffer += f;
|
||||
state[active] -= f;
|
||||
}
|
||||
|
||||
for i in 0..WIDTH {
|
||||
state[i] += addition_buffer;
|
||||
out_buffer.set_wire(
|
||||
Wire {
|
||||
gate: self.gate_index,
|
||||
input: GMiMCGate::<F, D, WIDTH>::wire_output(i),
|
||||
},
|
||||
state[i],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
|
||||
use crate::gates::gate_testing::{test_eval_fns, test_low_degree};
|
||||
use crate::gates::gmimc::GMiMCGate;
|
||||
use crate::hash::gmimc::GMiMC;
|
||||
use crate::iop::generator::generate_partial_witness;
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartialWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
|
||||
#[test]
|
||||
fn generated_output() {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
const WIDTH: usize = 12;
|
||||
|
||||
let config = CircuitConfig::standard_recursion_config();
|
||||
let mut builder = CircuitBuilder::new(config);
|
||||
type Gate = GMiMCGate<F, D, WIDTH>;
|
||||
let gate = Gate::new();
|
||||
let gate_index = builder.add_gate(gate, vec![], vec![]);
|
||||
let circuit = builder.build_prover::<C>();
|
||||
|
||||
let permutation_inputs = (0..WIDTH).map(F::from_canonical_usize).collect::<Vec<_>>();
|
||||
|
||||
let mut inputs = PartialWitness::new();
|
||||
inputs.set_wire(
|
||||
Wire {
|
||||
gate: gate_index,
|
||||
input: Gate::WIRE_SWAP,
|
||||
},
|
||||
F::ZERO,
|
||||
);
|
||||
for i in 0..WIDTH {
|
||||
inputs.set_wire(
|
||||
Wire {
|
||||
gate: gate_index,
|
||||
input: Gate::wire_input(i),
|
||||
},
|
||||
permutation_inputs[i],
|
||||
);
|
||||
}
|
||||
|
||||
let witness = generate_partial_witness(inputs, &circuit.prover_only, &circuit.common);
|
||||
|
||||
let expected_outputs: [F; WIDTH] =
|
||||
F::gmimc_permute_naive(permutation_inputs.try_into().unwrap());
|
||||
for i in 0..WIDTH {
|
||||
let out = witness.get_wire(Wire {
|
||||
gate: 0,
|
||||
input: Gate::wire_output(i),
|
||||
});
|
||||
assert_eq!(out, expected_outputs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn low_degree() {
|
||||
type F = GoldilocksField;
|
||||
const WIDTH: usize = 12;
|
||||
let gate = GMiMCGate::<F, 4, WIDTH>::new();
|
||||
test_low_degree(gate)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eval_fns() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
const WIDTH: usize = 12;
|
||||
let gate = GMiMCGate::<F, D, WIDTH>::new();
|
||||
test_eval_fns::<F, C, _, D>(gate)
|
||||
}
|
||||
}
|
||||
|
||||
@ -355,7 +355,7 @@ mod tests {
|
||||
for i in 0..coeffs.len() {
|
||||
v.extend(coeffs.coeffs[i].0);
|
||||
}
|
||||
v.iter().map(|&x| x.into()).collect::<Vec<_>>()
|
||||
v.iter().map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
// Get a working row for InterpolationGate.
|
||||
|
||||
@ -456,7 +456,7 @@ mod tests {
|
||||
.take(gate.num_points() - 2)
|
||||
.flat_map(|ff| ff.0),
|
||||
);
|
||||
v.iter().map(|&x| x.into()).collect::<Vec<_>>()
|
||||
v.iter().map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
// Get a working row for LowDegreeInterpolationGate.
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
// Gates have `new` methods that return `GateRef`s.
|
||||
#![allow(clippy::new_ret_no_self)]
|
||||
|
||||
pub mod add_many_u32;
|
||||
pub mod arithmetic_base;
|
||||
pub mod arithmetic_extension;
|
||||
pub mod arithmetic_u32;
|
||||
@ -12,7 +13,6 @@ pub mod constant;
|
||||
pub mod exponentiation;
|
||||
pub mod gate;
|
||||
pub mod gate_tree;
|
||||
pub mod gmimc;
|
||||
pub mod interpolation;
|
||||
pub mod low_degree_interpolation;
|
||||
pub mod multiplication_extension;
|
||||
@ -22,6 +22,7 @@ pub mod poseidon;
|
||||
pub(crate) mod poseidon_mds;
|
||||
pub(crate) mod public_input;
|
||||
pub mod random_access;
|
||||
pub mod range_check_u32;
|
||||
pub mod reducing;
|
||||
pub mod reducing_extension;
|
||||
pub mod subtraction_u32;
|
||||
|
||||
@ -126,7 +126,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for MulExtensionGa
|
||||
);
|
||||
g
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
|
||||
@ -210,7 +210,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for RandomAccessGa
|
||||
);
|
||||
g
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
|
||||
322
plonky2/src/gates/range_check_u32.rs
Normal file
322
plonky2/src/gates/range_check_u32.rs
Normal file
@ -0,0 +1,322 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
use crate::field::extension_field::Extendable;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::util::StridedConstraintConsumer;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::{PartitionWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_recursive};
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
|
||||
|
||||
/// A gate which can decompose a number into base B little-endian limbs.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct U32RangeCheckGate<F: RichField + Extendable<D>, const D: usize> {
|
||||
pub num_input_limbs: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> U32RangeCheckGate<F, D> {
|
||||
pub fn new(num_input_limbs: usize) -> Self {
|
||||
Self {
|
||||
num_input_limbs,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub const AUX_LIMB_BITS: usize = 2;
|
||||
pub const BASE: usize = 1 << Self::AUX_LIMB_BITS;
|
||||
|
||||
fn aux_limbs_per_input_limb(&self) -> usize {
|
||||
ceil_div_usize(32, Self::AUX_LIMB_BITS)
|
||||
}
|
||||
pub fn wire_ith_input_limb(&self, i: usize) -> usize {
|
||||
debug_assert!(i < self.num_input_limbs);
|
||||
i
|
||||
}
|
||||
pub fn wire_ith_input_limb_jth_aux_limb(&self, i: usize, j: usize) -> usize {
|
||||
debug_assert!(i < self.num_input_limbs);
|
||||
debug_assert!(j < self.aux_limbs_per_input_limb());
|
||||
self.num_input_limbs + self.aux_limbs_per_input_limb() * i + j
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32RangeCheckGate<F, D> {
|
||||
fn id(&self) -> String {
|
||||
format!("{:?}", self)
|
||||
}
|
||||
|
||||
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
let base = F::Extension::from_canonical_usize(Self::BASE);
|
||||
for i in 0..self.num_input_limbs {
|
||||
let input_limb = vars.local_wires[self.wire_ith_input_limb(i)];
|
||||
let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb())
|
||||
.map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)])
|
||||
.collect();
|
||||
let computed_sum = reduce_with_powers(&aux_limbs, base);
|
||||
|
||||
constraints.push(computed_sum - input_limb);
|
||||
for aux_limb in aux_limbs {
|
||||
constraints.push(
|
||||
(0..Self::BASE)
|
||||
.map(|i| aux_limb - F::Extension::from_canonical_usize(i))
|
||||
.product(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
fn eval_unfiltered_base_one(
|
||||
&self,
|
||||
vars: EvaluationVarsBase<F>,
|
||||
mut yield_constr: StridedConstraintConsumer<F>,
|
||||
) {
|
||||
let base = F::from_canonical_usize(Self::BASE);
|
||||
for i in 0..self.num_input_limbs {
|
||||
let input_limb = vars.local_wires[self.wire_ith_input_limb(i)];
|
||||
let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb())
|
||||
.map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)])
|
||||
.collect();
|
||||
let computed_sum = reduce_with_powers(&aux_limbs, base);
|
||||
|
||||
yield_constr.one(computed_sum - input_limb);
|
||||
for aux_limb in aux_limbs {
|
||||
yield_constr.one(
|
||||
(0..Self::BASE)
|
||||
.map(|i| aux_limb - F::from_canonical_usize(i))
|
||||
.product(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn eval_unfiltered_recursively(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: EvaluationTargets<D>,
|
||||
) -> Vec<ExtensionTarget<D>> {
|
||||
let mut constraints = Vec::with_capacity(self.num_constraints());
|
||||
|
||||
let base = builder.constant(F::from_canonical_usize(Self::BASE));
|
||||
for i in 0..self.num_input_limbs {
|
||||
let input_limb = vars.local_wires[self.wire_ith_input_limb(i)];
|
||||
let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb())
|
||||
.map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)])
|
||||
.collect();
|
||||
let computed_sum = reduce_with_powers_ext_recursive(builder, &aux_limbs, base);
|
||||
|
||||
constraints.push(builder.sub_extension(computed_sum, input_limb));
|
||||
for aux_limb in aux_limbs {
|
||||
constraints.push({
|
||||
let mut acc = builder.one_extension();
|
||||
(0..Self::BASE).for_each(|i| {
|
||||
// We update our accumulator as:
|
||||
// acc' = acc (x - i)
|
||||
// = acc x + (-i) acc
|
||||
// Since -i is constant, we can do this in one arithmetic_extension call.
|
||||
let neg_i = -F::from_canonical_usize(i);
|
||||
acc = builder.arithmetic_extension(F::ONE, neg_i, acc, aux_limb, acc)
|
||||
});
|
||||
acc
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
fn generators(
|
||||
&self,
|
||||
gate_index: usize,
|
||||
_local_constants: &[F],
|
||||
) -> Vec<Box<dyn WitnessGenerator<F>>> {
|
||||
let gen = U32RangeCheckGenerator {
|
||||
gate: *self,
|
||||
gate_index,
|
||||
};
|
||||
vec![Box::new(gen.adapter())]
|
||||
}
|
||||
|
||||
fn num_wires(&self) -> usize {
|
||||
self.num_input_limbs * (1 + self.aux_limbs_per_input_limb())
|
||||
}
|
||||
|
||||
fn num_constants(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
// Bounded by the range-check (x-0)*(x-1)*...*(x-BASE+1).
|
||||
fn degree(&self) -> usize {
|
||||
Self::BASE
|
||||
}
|
||||
|
||||
// 1 for checking the each sum of aux limbs, plus a range check for each aux limb.
|
||||
fn num_constraints(&self) -> usize {
|
||||
self.num_input_limbs * (1 + self.aux_limbs_per_input_limb())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct U32RangeCheckGenerator<F: RichField + Extendable<D>, const D: usize> {
|
||||
gate: U32RangeCheckGate<F, D>,
|
||||
gate_index: usize,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
for U32RangeCheckGenerator<F, D>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
let num_input_limbs = self.gate.num_input_limbs;
|
||||
(0..num_input_limbs)
|
||||
.map(|i| Target::wire(self.gate_index, self.gate.wire_ith_input_limb(i)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let num_input_limbs = self.gate.num_input_limbs;
|
||||
for i in 0..num_input_limbs {
|
||||
let sum_value = witness
|
||||
.get_target(Target::wire(
|
||||
self.gate_index,
|
||||
self.gate.wire_ith_input_limb(i),
|
||||
))
|
||||
.to_canonical_u64() as u32;
|
||||
|
||||
let base = U32RangeCheckGate::<F, D>::BASE as u32;
|
||||
let limbs = (0..self.gate.aux_limbs_per_input_limb()).map(|j| {
|
||||
Target::wire(
|
||||
self.gate_index,
|
||||
self.gate.wire_ith_input_limb_jth_aux_limb(i, j),
|
||||
)
|
||||
});
|
||||
let limbs_value = (0..self.gate.aux_limbs_per_input_limb())
|
||||
.scan(sum_value, |acc, _| {
|
||||
let tmp = *acc % base;
|
||||
*acc /= base;
|
||||
Some(F::from_canonical_u32(tmp))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (b, b_value) in limbs.zip(limbs_value) {
|
||||
out_buffer.set_target(b, b_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use anyhow::Result;
|
||||
use itertools::unfold;
|
||||
use plonky2_util::ceil_div_usize;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::field::extension_field::quartic::QuarticExtension;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::field::goldilocks_field::GoldilocksField;
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::gate_testing::{test_eval_fns, test_low_degree};
|
||||
use crate::gates::range_check_u32::U32RangeCheckGate;
|
||||
use crate::hash::hash_types::HashOut;
|
||||
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
use crate::plonk::vars::EvaluationVars;
|
||||
|
||||
#[test]
|
||||
fn low_degree() {
|
||||
test_low_degree::<GoldilocksField, _, 4>(U32RangeCheckGate::new(8))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eval_fns() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
test_eval_fns::<F, C, _, D>(U32RangeCheckGate::new(8))
|
||||
}
|
||||
|
||||
fn test_gate_constraint(input_limbs: Vec<u64>) {
|
||||
type F = GoldilocksField;
|
||||
type FF = QuarticExtension<GoldilocksField>;
|
||||
const D: usize = 4;
|
||||
const AUX_LIMB_BITS: usize = 2;
|
||||
const BASE: usize = 1 << AUX_LIMB_BITS;
|
||||
const AUX_LIMBS_PER_INPUT_LIMB: usize = ceil_div_usize(32, AUX_LIMB_BITS);
|
||||
|
||||
fn get_wires(input_limbs: Vec<u64>) -> Vec<FF> {
|
||||
let num_input_limbs = input_limbs.len();
|
||||
let mut v = Vec::new();
|
||||
|
||||
for i in 0..num_input_limbs {
|
||||
let input_limb = input_limbs[i];
|
||||
|
||||
let split_to_limbs = |mut val, num| {
|
||||
unfold((), move |_| {
|
||||
let ret = val % (BASE as u64);
|
||||
val /= BASE as u64;
|
||||
Some(ret)
|
||||
})
|
||||
.take(num)
|
||||
.map(F::from_canonical_u64)
|
||||
};
|
||||
|
||||
let mut aux_limbs: Vec<_> =
|
||||
split_to_limbs(input_limb, AUX_LIMBS_PER_INPUT_LIMB).collect();
|
||||
|
||||
v.append(&mut aux_limbs);
|
||||
}
|
||||
|
||||
input_limbs
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(F::from_canonical_u64)
|
||||
.chain(v.iter().cloned())
|
||||
.map(|x| x.into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
let gate = U32RangeCheckGate::<F, D> {
|
||||
num_input_limbs: 8,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
let vars = EvaluationVars {
|
||||
local_constants: &[],
|
||||
local_wires: &get_wires(input_limbs),
|
||||
public_inputs_hash: &HashOut::rand(),
|
||||
};
|
||||
|
||||
assert!(
|
||||
gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()),
|
||||
"Gate constraints are not satisfied."
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gate_constraint_good() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let input_limbs: Vec<_> = (0..8).map(|_| rng.gen::<u32>() as u64).collect();
|
||||
|
||||
test_gate_constraint(input_limbs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_gate_constraint_bad() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let input_limbs: Vec<_> = (0..8).map(|_| rng.gen()).collect();
|
||||
|
||||
test_gate_constraint(input_limbs);
|
||||
}
|
||||
}
|
||||
@ -355,7 +355,8 @@ mod tests {
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::extension_field::quartic::QuarticExtension;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
|
||||
@ -433,10 +434,7 @@ mod tests {
|
||||
v1.append(&mut output_limbs);
|
||||
}
|
||||
|
||||
v0.iter()
|
||||
.chain(v1.iter())
|
||||
.map(|&x| x.into())
|
||||
.collect::<Vec<_>>()
|
||||
v0.iter().chain(v1.iter()).map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
@ -448,7 +448,7 @@ mod tests {
|
||||
v.push(F::from_bool(switch));
|
||||
}
|
||||
|
||||
v.iter().map(|&x| x.into()).collect::<Vec<_>>()
|
||||
v.iter().map(|&x| x.into()).collect()
|
||||
}
|
||||
|
||||
let first_inputs: Vec<Vec<F>> = (0..num_copies).map(|_| F::rand_vec(CHUNK_SIZE)).collect();
|
||||
|
||||
@ -3,8 +3,9 @@
|
||||
use std::arch::aarch64::*;
|
||||
use std::arch::asm;
|
||||
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::field_types::Field64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use plonky2_util::branch_hint;
|
||||
use static_assertions::const_assert;
|
||||
use unroll::unroll_for_loops;
|
||||
|
||||
@ -108,6 +109,8 @@ const_assert!(check_round_const_bounds_init());
|
||||
|
||||
// ====================================== SCALAR ARITHMETIC =======================================
|
||||
|
||||
const EPSILON: u64 = 0xffffffff;
|
||||
|
||||
/// Addition modulo ORDER accounting for wraparound. Correct only when a + b < 2**64 + ORDER.
|
||||
#[inline(always)]
|
||||
unsafe fn add_with_wraparound(a: u64, b: u64) -> u64 {
|
||||
@ -124,39 +127,36 @@ unsafe fn add_with_wraparound(a: u64, b: u64) -> u64 {
|
||||
adj = lateout(reg) adj,
|
||||
options(pure, nomem, nostack),
|
||||
);
|
||||
res.wrapping_add(adj) // adj is EPSILON if wraparound occured and 0 otherwise
|
||||
res + adj // adj is EPSILON if wraparound occured and 0 otherwise
|
||||
}
|
||||
|
||||
/// Addition of a and (b >> 32) modulo ORDER accounting for wraparound.
|
||||
/// Subtraction of a and (b >> 32) modulo ORDER accounting for wraparound.
|
||||
#[inline(always)]
|
||||
unsafe fn sub_with_wraparound_lsr32(a: u64, b: u64) -> u64 {
|
||||
let res: u64;
|
||||
let adj: u64;
|
||||
asm!(
|
||||
"subs {res}, {a}, {b}, lsr #32",
|
||||
// Set adj to 0xffffffff if subtraction underflowed and 0 otherwise.
|
||||
// 'cc' for 'carry clear'.
|
||||
// NB: The CF in ARM subtraction is the opposite of x86: CF set == underflow did not occur.
|
||||
"csetm {adj:w}, cc",
|
||||
a = in(reg) a,
|
||||
b = in(reg) b,
|
||||
res = lateout(reg) res,
|
||||
adj = lateout(reg) adj,
|
||||
options(pure, nomem, nostack),
|
||||
);
|
||||
res.wrapping_sub(adj) // adj is EPSILON if underflow occured and 0 otherwise.
|
||||
let b_hi = b >> 32;
|
||||
// This could be done with a.overflowing_add(b_hi), but `checked_sub` signals to the compiler
|
||||
// that overflow is unlikely (note: this is a standard library implementation detail, not part
|
||||
// of the spec).
|
||||
match a.checked_sub(b_hi) {
|
||||
Some(res) => res,
|
||||
None => {
|
||||
// Super rare. Better off branching.
|
||||
branch_hint();
|
||||
let res_wrapped = a.wrapping_sub(b_hi);
|
||||
res_wrapped - EPSILON
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Multiplication of the low word (i.e., x as u32) by EPSILON.
|
||||
#[inline(always)]
|
||||
unsafe fn mul_epsilon(x: u64) -> u64 {
|
||||
let res;
|
||||
let epsilon: u64 = 0xffffffff;
|
||||
asm!(
|
||||
// Use UMULL to save one instruction. The compiler emits two: extract the low word and then multiply.
|
||||
"umull {res}, {x:w}, {epsilon:w}",
|
||||
x = in(reg) x,
|
||||
epsilon = in(reg) epsilon,
|
||||
epsilon = in(reg) EPSILON,
|
||||
res = lateout(reg) res,
|
||||
options(pure, nomem, nostack, preserves_flags),
|
||||
);
|
||||
|
||||
@ -4,6 +4,7 @@ use std::mem::size_of;
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use plonky2_util::branch_hint;
|
||||
use static_assertions::const_assert;
|
||||
|
||||
use crate::hash::poseidon::{
|
||||
@ -141,6 +142,16 @@ macro_rules! map3 {
|
||||
($f:ident::<$l:literal>, $v:ident) => {
|
||||
($f::<$l>($v.0), $f::<$l>($v.1), $f::<$l>($v.2))
|
||||
};
|
||||
($f:ident::<$l:literal>, $v1:ident, $v2:ident) => {
|
||||
(
|
||||
$f::<$l>($v1.0, $v2.0),
|
||||
$f::<$l>($v1.1, $v2.1),
|
||||
$f::<$l>($v1.2, $v2.2),
|
||||
)
|
||||
};
|
||||
($f:ident, $v:ident) => {
|
||||
($f($v.0), $f($v.1), $f($v.2))
|
||||
};
|
||||
($f:ident, $v0:ident, $v1:ident) => {
|
||||
($f($v0.0, $v1.0), $f($v0.1, $v1.1), $f($v0.2, $v1.2))
|
||||
};
|
||||
@ -188,19 +199,32 @@ unsafe fn const_layer(
|
||||
unsafe fn square3(
|
||||
x: (__m256i, __m256i, __m256i),
|
||||
) -> ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)) {
|
||||
let sign_bit = _mm256_set1_epi64x(i64::MIN);
|
||||
let x_hi = map3!(_mm256_srli_epi64::<32>, x);
|
||||
let x_hi = {
|
||||
// Move high bits to low position. The high bits of x_hi are ignored. Swizzle is faster than
|
||||
// bitshift. This instruction only has a floating-point flavor, so we cast to/from float.
|
||||
// This is safe and free.
|
||||
let x_ps = map3!(_mm256_castsi256_ps, x);
|
||||
let x_hi_ps = map3!(_mm256_movehdup_ps, x_ps);
|
||||
map3!(_mm256_castps_si256, x_hi_ps)
|
||||
};
|
||||
|
||||
// All pairwise multiplications.
|
||||
let mul_ll = map3!(_mm256_mul_epu32, x, x);
|
||||
let mul_lh = map3!(_mm256_mul_epu32, x, x_hi);
|
||||
let mul_hh = map3!(_mm256_mul_epu32, x_hi, x_hi);
|
||||
let res_lo0_s = map3!(_mm256_xor_si256, mul_ll, rep sign_bit);
|
||||
|
||||
// Bignum addition, but mul_lh is shifted by 33 bits (not 32).
|
||||
let mul_ll_hi = map3!(_mm256_srli_epi64::<33>, mul_ll);
|
||||
let t0 = map3!(_mm256_add_epi64, mul_lh, mul_ll_hi);
|
||||
let t0_hi = map3!(_mm256_srli_epi64::<31>, t0);
|
||||
let res_hi = map3!(_mm256_add_epi64, mul_hh, t0_hi);
|
||||
|
||||
// Form low result by adding the mul_ll and the low 31 bits of mul_lh (shifted to the high
|
||||
// position).
|
||||
let mul_lh_lo = map3!(_mm256_slli_epi64::<33>, mul_lh);
|
||||
let res_lo1_s = map3!(_mm256_add_epi64, res_lo0_s, mul_lh_lo);
|
||||
let carry = map3!(_mm256_cmpgt_epi64, res_lo0_s, res_lo1_s);
|
||||
let mul_lh_hi = map3!(_mm256_srli_epi64::<31>, mul_lh);
|
||||
let res_hi0 = map3!(_mm256_add_epi64, mul_hh, mul_lh_hi);
|
||||
let res_hi1 = map3!(_mm256_sub_epi64, res_hi0, carry);
|
||||
(res_lo1_s, res_hi1)
|
||||
let res_lo = map3!(_mm256_add_epi64, mul_ll, mul_lh_lo);
|
||||
|
||||
(res_lo, res_hi)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -208,49 +232,110 @@ unsafe fn mul3(
|
||||
x: (__m256i, __m256i, __m256i),
|
||||
y: (__m256i, __m256i, __m256i),
|
||||
) -> ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)) {
|
||||
let sign_bit = _mm256_set1_epi64x(i64::MIN);
|
||||
let y_hi = map3!(_mm256_srli_epi64::<32>, y);
|
||||
let x_hi = map3!(_mm256_srli_epi64::<32>, x);
|
||||
let epsilon = _mm256_set1_epi64x(0xffffffff);
|
||||
let x_hi = {
|
||||
// Move high bits to low position. The high bits of x_hi are ignored. Swizzle is faster than
|
||||
// bitshift. This instruction only has a floating-point flavor, so we cast to/from float.
|
||||
// This is safe and free.
|
||||
let x_ps = map3!(_mm256_castsi256_ps, x);
|
||||
let x_hi_ps = map3!(_mm256_movehdup_ps, x_ps);
|
||||
map3!(_mm256_castps_si256, x_hi_ps)
|
||||
};
|
||||
let y_hi = {
|
||||
let y_ps = map3!(_mm256_castsi256_ps, y);
|
||||
let y_hi_ps = map3!(_mm256_movehdup_ps, y_ps);
|
||||
map3!(_mm256_castps_si256, y_hi_ps)
|
||||
};
|
||||
|
||||
// All four pairwise multiplications
|
||||
let mul_ll = map3!(_mm256_mul_epu32, x, y);
|
||||
let mul_lh = map3!(_mm256_mul_epu32, x, y_hi);
|
||||
let mul_hl = map3!(_mm256_mul_epu32, x_hi, y);
|
||||
let mul_hh = map3!(_mm256_mul_epu32, x_hi, y_hi);
|
||||
let mul_lh_lo = map3!(_mm256_slli_epi64::<32>, mul_lh);
|
||||
let res_lo0_s = map3!(_mm256_xor_si256, mul_ll, rep sign_bit);
|
||||
let mul_hl_lo = map3!(_mm256_slli_epi64::<32>, mul_hl);
|
||||
let res_lo1_s = map3!(_mm256_add_epi64, res_lo0_s, mul_lh_lo);
|
||||
let carry0 = map3!(_mm256_cmpgt_epi64, res_lo0_s, res_lo1_s);
|
||||
let mul_lh_hi = map3!(_mm256_srli_epi64::<32>, mul_lh);
|
||||
let res_lo2_s = map3!(_mm256_add_epi64, res_lo1_s, mul_hl_lo);
|
||||
let carry1 = map3!(_mm256_cmpgt_epi64, res_lo1_s, res_lo2_s);
|
||||
let mul_hl_hi = map3!(_mm256_srli_epi64::<32>, mul_hl);
|
||||
let res_hi0 = map3!(_mm256_add_epi64, mul_hh, mul_lh_hi);
|
||||
let res_hi1 = map3!(_mm256_add_epi64, res_hi0, mul_hl_hi);
|
||||
let res_hi2 = map3!(_mm256_sub_epi64, res_hi1, carry0);
|
||||
let res_hi3 = map3!(_mm256_sub_epi64, res_hi2, carry1);
|
||||
(res_lo2_s, res_hi3)
|
||||
|
||||
// Bignum addition
|
||||
// Extract high 32 bits of mul_ll and add to mul_hl. This cannot overflow.
|
||||
let mul_ll_hi = map3!(_mm256_srli_epi64::<32>, mul_ll);
|
||||
let t0 = map3!(_mm256_add_epi64, mul_hl, mul_ll_hi);
|
||||
// Extract low 32 bits of t0 and add to mul_lh. Again, this cannot overflow.
|
||||
// Also, extract high 32 bits of t0 and add to mul_hh.
|
||||
let t0_lo = map3!(_mm256_and_si256, t0, rep epsilon);
|
||||
let t0_hi = map3!(_mm256_srli_epi64::<32>, t0);
|
||||
let t1 = map3!(_mm256_add_epi64, mul_lh, t0_lo);
|
||||
let t2 = map3!(_mm256_add_epi64, mul_hh, t0_hi);
|
||||
// Lastly, extract the high 32 bits of t1 and add to t2.
|
||||
let t1_hi = map3!(_mm256_srli_epi64::<32>, t1);
|
||||
let res_hi = map3!(_mm256_add_epi64, t2, t1_hi);
|
||||
|
||||
// Form res_lo by combining the low half of mul_ll with the low half of t1 (shifted into high
|
||||
// position).
|
||||
let t1_lo = {
|
||||
let t1_ps = map3!(_mm256_castsi256_ps, t1);
|
||||
let t1_lo_ps = map3!(_mm256_moveldup_ps, t1_ps);
|
||||
map3!(_mm256_castps_si256, t1_lo_ps)
|
||||
};
|
||||
let res_lo = map3!(_mm256_blend_epi32::<0xaa>, mul_ll, t1_lo);
|
||||
|
||||
(res_lo, res_hi)
|
||||
}
|
||||
|
||||
/// Addition, where the second operand is `0 <= y < 0xffffffff00000001`.
|
||||
#[inline(always)]
|
||||
unsafe fn add_small(
|
||||
x_s: (__m256i, __m256i, __m256i),
|
||||
y: (__m256i, __m256i, __m256i),
|
||||
) -> (__m256i, __m256i, __m256i) {
|
||||
let res_wrapped_s = map3!(_mm256_add_epi64, x_s, y);
|
||||
let mask = map3!(_mm256_cmpgt_epi32, x_s, res_wrapped_s);
|
||||
let wrapback_amt = map3!(_mm256_srli_epi64::<32>, mask); // EPSILON if overflowed else 0.
|
||||
let res_s = map3!(_mm256_add_epi64, res_wrapped_s, wrapback_amt);
|
||||
res_s
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn maybe_adj_sub(res_wrapped_s: __m256i, mask: __m256i) -> __m256i {
|
||||
// The subtraction is very unlikely to overflow so we're best off branching.
|
||||
// The even u32s in `mask` are meaningless, so we want to ignore them. `_mm256_testz_pd`
|
||||
// branches depending on the sign bit of double-precision (64-bit) floats. Bit cast `mask` to
|
||||
// floating-point (this is free).
|
||||
let mask_pd = _mm256_castsi256_pd(mask);
|
||||
// `_mm256_testz_pd(mask_pd, mask_pd) == 1` iff all sign bits are 0, meaning that underflow
|
||||
// did not occur for any of the vector elements.
|
||||
if _mm256_testz_pd(mask_pd, mask_pd) == 1 {
|
||||
res_wrapped_s
|
||||
} else {
|
||||
branch_hint();
|
||||
// Highly unlikely: underflow did occur. Find adjustment per element and apply it.
|
||||
let adj_amount = _mm256_srli_epi64::<32>(mask); // EPSILON if underflow.
|
||||
_mm256_sub_epi64(res_wrapped_s, adj_amount)
|
||||
}
|
||||
}
|
||||
|
||||
/// Addition, where the second operand is much smaller than `0xffffffff00000001`.
|
||||
#[inline(always)]
|
||||
unsafe fn sub_tiny(
|
||||
x_s: (__m256i, __m256i, __m256i),
|
||||
y: (__m256i, __m256i, __m256i),
|
||||
) -> (__m256i, __m256i, __m256i) {
|
||||
let res_wrapped_s = map3!(_mm256_sub_epi64, x_s, y);
|
||||
let mask = map3!(_mm256_cmpgt_epi32, res_wrapped_s, x_s);
|
||||
let res_s = map3!(maybe_adj_sub, res_wrapped_s, mask);
|
||||
res_s
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn reduce3(
|
||||
(x_lo_s, x_hi): ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)),
|
||||
(lo0, hi0): ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)),
|
||||
) -> (__m256i, __m256i, __m256i) {
|
||||
let epsilon = _mm256_set1_epi64x(0xffffffff);
|
||||
let sign_bit = _mm256_set1_epi64x(i64::MIN);
|
||||
let x_hi_hi = map3!(_mm256_srli_epi64::<32>, x_hi);
|
||||
let res0_s = map3!(_mm256_sub_epi64, x_lo_s, x_hi_hi);
|
||||
let wraparound_mask0 = map3!(_mm256_cmpgt_epi32, res0_s, x_lo_s);
|
||||
let wraparound_adj0 = map3!(_mm256_srli_epi64::<32>, wraparound_mask0);
|
||||
let x_hi_lo = map3!(_mm256_and_si256, x_hi, rep epsilon);
|
||||
let x_hi_lo_shifted = map3!(_mm256_slli_epi64::<32>, x_hi);
|
||||
let res1_s = map3!(_mm256_sub_epi64, res0_s, wraparound_adj0);
|
||||
let x_hi_lo_mul_epsilon = map3!(_mm256_sub_epi64, x_hi_lo_shifted, x_hi_lo);
|
||||
let res2_s = map3!(_mm256_add_epi64, res1_s, x_hi_lo_mul_epsilon);
|
||||
let wraparound_mask2 = map3!(_mm256_cmpgt_epi32, res1_s, res2_s);
|
||||
let wraparound_adj2 = map3!(_mm256_srli_epi64::<32>, wraparound_mask2);
|
||||
let res3_s = map3!(_mm256_add_epi64, res2_s, wraparound_adj2);
|
||||
let res3 = map3!(_mm256_xor_si256, res3_s, rep sign_bit);
|
||||
res3
|
||||
let epsilon = _mm256_set1_epi64x(0xffffffff);
|
||||
let lo0_s = map3!(_mm256_xor_si256, lo0, rep sign_bit);
|
||||
let hi_hi0 = map3!(_mm256_srli_epi64::<32>, hi0);
|
||||
let lo1_s = sub_tiny(lo0_s, hi_hi0);
|
||||
let t1 = map3!(_mm256_mul_epu32, hi0, rep epsilon);
|
||||
let lo2_s = add_small(lo1_s, t1);
|
||||
let lo2 = map3!(_mm256_xor_si256, lo2_s, rep sign_bit);
|
||||
lo2
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -757,9 +842,9 @@ unsafe fn partial_round(
|
||||
// multiplication where we've set the first element to 0.) Add the remaining bits now.
|
||||
// TODO: This is a bit of an afterthought, which is why these constants are loaded 22
|
||||
// times... There's likely a better way of merging those results.
|
||||
"vmovdqu ymm6, {mds_matrix}[rip]",
|
||||
"vmovdqu ymm7, {mds_matrix}[rip + 32]",
|
||||
"vmovdqu ymm8, {mds_matrix}[rip + 64]",
|
||||
"vmovdqu ymm6, [{mds_matrix}]",
|
||||
"vmovdqu ymm7, [{mds_matrix} + 32]",
|
||||
"vmovdqu ymm8, [{mds_matrix} + 64]",
|
||||
"vpsllvq ymm9, ymm13, ymm6",
|
||||
"vpsllvq ymm10, ymm13, ymm7",
|
||||
"vpsllvq ymm11, ymm13, ymm8",
|
||||
@ -775,7 +860,7 @@ unsafe fn partial_round(
|
||||
// Reduction required.
|
||||
|
||||
state0a = in(reg) state0a,
|
||||
mds_matrix = sym TOP_ROW_EXPS,
|
||||
mds_matrix = in(reg) &TOP_ROW_EXPS,
|
||||
inout("ymm0") unreduced_lo0_s,
|
||||
inout("ymm1") unreduced_lo1_s,
|
||||
inout("ymm2") unreduced_lo2_s,
|
||||
|
||||
@ -1,168 +1 @@
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use unroll::unroll_for_loops;
|
||||
|
||||
use crate::gates::gmimc::GMiMCGate;
|
||||
use crate::hash::hash_types::{HashOut, RichField};
|
||||
use crate::hash::hashing::{compress, hash_n_to_hash, PlonkyPermutation, SPONGE_WIDTH};
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::config::{AlgebraicHasher, Hasher};
|
||||
|
||||
pub(crate) const NUM_ROUNDS: usize = 101;
|
||||
|
||||
pub trait GMiMC<const WIDTH: usize>: Field
|
||||
where
|
||||
[u64; NUM_ROUNDS]: Sized,
|
||||
{
|
||||
const ROUND_CONSTANTS: [u64; NUM_ROUNDS];
|
||||
|
||||
#[unroll_for_loops]
|
||||
fn gmimc_permute(mut xs: [Self; WIDTH]) -> [Self; WIDTH] {
|
||||
// Value that is implicitly added to each element.
|
||||
// See https://affine.group/2020/02/starkware-challenge
|
||||
let mut addition_buffer = Self::ZERO;
|
||||
|
||||
for (r, &constant) in Self::ROUND_CONSTANTS.iter().enumerate() {
|
||||
let active = r % WIDTH;
|
||||
let f = (xs[active] + addition_buffer + Self::from_canonical_u64(constant)).cube();
|
||||
addition_buffer += f;
|
||||
xs[active] -= f;
|
||||
}
|
||||
|
||||
for i in 0..WIDTH {
|
||||
xs[i] += addition_buffer;
|
||||
}
|
||||
|
||||
xs
|
||||
}
|
||||
|
||||
#[unroll_for_loops]
|
||||
fn gmimc_permute_naive(mut xs: [Self; WIDTH]) -> [Self; WIDTH] {
|
||||
for (r, &constant) in Self::ROUND_CONSTANTS.iter().enumerate() {
|
||||
let active = r % WIDTH;
|
||||
let f = (xs[active] + Self::from_canonical_u64(constant)).cube();
|
||||
for i in 0..WIDTH {
|
||||
if i != active {
|
||||
xs[i] += f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
xs
|
||||
}
|
||||
}
|
||||
|
||||
/// See `generate_constants` about how these were generated.
|
||||
#[rustfmt::skip]
|
||||
const GOLDILOCKS_ROUND_CONSTANTS: [u64; NUM_ROUNDS] = [
|
||||
0xb585f767417ee042, 0x7746a55f77c10331, 0xb2fb0d321d356f7a, 0x0f6760a486f1621f,
|
||||
0xe10d6666b36abcdf, 0x8cae14cb455cc50b, 0xd438539cf2cee334, 0xef781c7d4c1fd8b4,
|
||||
0xcdc4a23a0aca4b1f, 0x277fa208d07b52e3, 0xe17653a300493d38, 0xc54302f27c287dc1,
|
||||
0x8628782231d47d10, 0x59cd1a8a690b49f2, 0xc3b919ad9efec0b0, 0xa484c4c637641d97,
|
||||
0x308bbd23f191398b, 0x6e4a40c1bf713cf1, 0x9a2eedb7510414fb, 0xe360c6e111c2c63b,
|
||||
0xd5c771901d4d89aa, 0xc35eae076e7d6b2f, 0x849c2656d0a09cad, 0xc0572c8c5cf1df2b,
|
||||
0xe9fa634a883b8bf3, 0xf56f6d4900fb1fdd, 0xf7d713e872a72a1b, 0x8297132b6ba47612,
|
||||
0xad6805e12ee8af1c, 0xac51d9f6485c22b9, 0x502ad7dc3bd56bf8, 0x57a1550c3761c577,
|
||||
0x66bbd30e99d311da, 0x0da2abef5e948f87, 0xf0612750443f8e94, 0x28b8ec3afb937d8c,
|
||||
0x92a756e6be54ca18, 0x70e741ec304e925d, 0x019d5ee2b037c59f, 0x6f6f2ed7a30707d1,
|
||||
0x7cf416d01e8c169c, 0x61df517bb17617df, 0x85dc499b4c67dbaa, 0x4b959b48dad27b23,
|
||||
0xe8be3e5e0dd779a0, 0xf5c0bc1e525ed8e6, 0x40b12cbf263cf853, 0xa637093f13e2ea3c,
|
||||
0x3cc3f89232e3b0c8, 0x2e479dc16bfe86c0, 0x6f49de07d6d39469, 0x213ce7beecc232de,
|
||||
0x5b043134851fc00a, 0xa2de45784a861506, 0x7103aaf97bed8dd5, 0x5326fc0dbb88a147,
|
||||
0xa9ceb750364cb77a, 0x27f8ec88cc9e991f, 0xfceb4fda8c93fb83, 0xfac6ff13b45b260e,
|
||||
0x7131aa455813380b, 0x93510360d5d68119, 0xad535b24fb96e3db, 0x4627f5c6b7efc045,
|
||||
0x645cf794e4da78a9, 0x241c70ed1ac2877f, 0xacb8e076b009e825, 0x3737e9db6477bd9d,
|
||||
0xe7ea5e344cd688ed, 0x90dee4a009214640, 0xd1b1edf7c77e74af, 0x0b65481bab42158e,
|
||||
0x99ad1aab4b4fe3e7, 0x438a7c91f1a360cd, 0xb60de3bd159088bf, 0xc99cab6b47a3e3bb,
|
||||
0x69a5ed92d5677cef, 0x5e7b329c482a9396, 0x5fc0ac0829f893c9, 0x32db82924fb757ea,
|
||||
0x0ade699c5cf24145, 0x7cc5583b46d7b5bb, 0x85df9ed31bf8abcb, 0x6604df501ad4de64,
|
||||
0xeb84f60941611aec, 0xda60883523989bd4, 0x8f97fe40bf3470bf, 0xa93f485ce0ff2b32,
|
||||
0x6704e8eebc2afb4b, 0xcee3e9ac788ad755, 0x510d0e66062a270d, 0xf6323f48d74634a0,
|
||||
0x0b508cdf04990c90, 0xf241708a4ef7ddf9, 0x60e75c28bb368f82, 0xa6217d8c3f0f9989,
|
||||
0x7159cd30f5435b53, 0x839b4e8fe97ec79f, 0x0d3f3e5e885db625, 0x8f7d83be1daea54b,
|
||||
0x780f22441e8dbc04,
|
||||
];
|
||||
|
||||
impl GMiMC<8> for GoldilocksField {
|
||||
const ROUND_CONSTANTS: [u64; NUM_ROUNDS] = GOLDILOCKS_ROUND_CONSTANTS;
|
||||
}
|
||||
|
||||
impl GMiMC<12> for GoldilocksField {
|
||||
const ROUND_CONSTANTS: [u64; NUM_ROUNDS] = GOLDILOCKS_ROUND_CONSTANTS;
|
||||
}
|
||||
|
||||
pub struct GMiMCPermutation;
|
||||
impl<F: RichField> PlonkyPermutation<F> for GMiMCPermutation {
|
||||
fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH] {
|
||||
F::gmimc_permute(input)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub struct GMiMCHash;
|
||||
impl<F: RichField> Hasher<F> for GMiMCHash {
|
||||
const HASH_SIZE: usize = 4 * 8;
|
||||
type Hash = HashOut<F>;
|
||||
type Permutation = GMiMCPermutation;
|
||||
|
||||
fn hash(input: Vec<F>, pad: bool) -> Self::Hash {
|
||||
hash_n_to_hash::<F, Self::Permutation>(input, pad)
|
||||
}
|
||||
|
||||
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash {
|
||||
compress::<F, Self::Permutation>(left, right)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField> AlgebraicHasher<F> for GMiMCHash {
|
||||
fn permute_swapped<const D: usize>(
|
||||
inputs: [Target; SPONGE_WIDTH],
|
||||
swap: BoolTarget,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
) -> [Target; SPONGE_WIDTH]
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let gate_type = GMiMCGate::<F, D, SPONGE_WIDTH>::new();
|
||||
let gate = builder.add_gate(gate_type, vec![], vec![]);
|
||||
|
||||
let swap_wire = GMiMCGate::<F, D, SPONGE_WIDTH>::WIRE_SWAP;
|
||||
let swap_wire = Target::wire(gate, swap_wire);
|
||||
builder.connect(swap.target, swap_wire);
|
||||
|
||||
// Route input wires.
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let in_wire = GMiMCGate::<F, D, SPONGE_WIDTH>::wire_input(i);
|
||||
let in_wire = Target::wire(gate, in_wire);
|
||||
builder.connect(inputs[i], in_wire);
|
||||
}
|
||||
|
||||
// Collect output wires.
|
||||
(0..SPONGE_WIDTH)
|
||||
.map(|i| Target::wire(gate, GMiMCGate::<F, D, SPONGE_WIDTH>::wire_output(i)))
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
|
||||
use crate::hash::gmimc::GMiMC;
|
||||
|
||||
fn check_consistency<F: GMiMC<WIDTH>, const WIDTH: usize>() {
|
||||
let xs = F::rand_arr::<WIDTH>();
|
||||
let out = F::gmimc_permute(xs);
|
||||
let out_naive = F::gmimc_permute_naive(xs);
|
||||
assert_eq!(out, out_naive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn consistency() {
|
||||
check_consistency::<GoldilocksField, 12>();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,15 +1,14 @@
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::{Field, PrimeField64};
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::hash::gmimc::GMiMC;
|
||||
use crate::hash::poseidon::Poseidon;
|
||||
use crate::iop::target::Target;
|
||||
use crate::plonk::config::GenericHashOut;
|
||||
|
||||
/// A prime order field with the features we need to use it as a base field in our argument system.
|
||||
pub trait RichField: PrimeField + GMiMC<12> + Poseidon {}
|
||||
pub trait RichField: PrimeField64 + Poseidon {}
|
||||
|
||||
impl RichField for GoldilocksField {}
|
||||
|
||||
@ -32,14 +31,10 @@ impl<F: Field> HashOut<F> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_partial(mut elements: Vec<F>) -> Self {
|
||||
debug_assert!(elements.len() <= 4);
|
||||
while elements.len() < 4 {
|
||||
elements.push(F::ZERO);
|
||||
}
|
||||
Self {
|
||||
elements: [elements[0], elements[1], elements[2], elements[3]],
|
||||
}
|
||||
pub fn from_partial(elements_in: &[F]) -> Self {
|
||||
let mut elements = [F::ZERO; 4];
|
||||
elements[0..elements_in.len()].copy_from_slice(elements_in);
|
||||
Self { elements }
|
||||
}
|
||||
|
||||
pub fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
|
||||
@ -94,25 +89,21 @@ impl<F: Field> Default for HashOut<F> {
|
||||
/// Represents a ~256 bit hash output.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct HashOutTarget {
|
||||
pub(crate) elements: [Target; 4],
|
||||
pub elements: [Target; 4],
|
||||
}
|
||||
|
||||
impl HashOutTarget {
|
||||
pub(crate) fn from_vec(elements: Vec<Target>) -> Self {
|
||||
pub fn from_vec(elements: Vec<Target>) -> Self {
|
||||
debug_assert!(elements.len() == 4);
|
||||
Self {
|
||||
elements: elements.try_into().unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn from_partial(mut elements: Vec<Target>, zero: Target) -> Self {
|
||||
debug_assert!(elements.len() <= 4);
|
||||
while elements.len() < 4 {
|
||||
elements.push(zero);
|
||||
}
|
||||
Self {
|
||||
elements: [elements[0], elements[1], elements[2], elements[3]],
|
||||
}
|
||||
pub fn from_partial(elements_in: &[Target], zero: Target) -> Self {
|
||||
let mut elements = [zero; 4];
|
||||
elements[0..elements_in.len()].copy_from_slice(elements_in);
|
||||
Self { elements }
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,6 +114,18 @@ pub struct MerkleCapTarget(pub Vec<HashOutTarget>);
|
||||
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
|
||||
pub struct BytesHash<const N: usize>(pub [u8; N]);
|
||||
|
||||
impl<const N: usize> BytesHash<N> {
|
||||
pub fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
|
||||
let mut buf = [0; N];
|
||||
rng.fill_bytes(&mut buf);
|
||||
Self(buf)
|
||||
}
|
||||
|
||||
pub fn rand() -> Self {
|
||||
Self::rand_from_rng(&mut rand::thread_rng())
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField, const N: usize> GenericHashOut<F> for BytesHash<N> {
|
||||
fn to_bytes(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
|
||||
@ -12,50 +12,29 @@ pub(crate) const SPONGE_RATE: usize = 8;
|
||||
pub(crate) const SPONGE_CAPACITY: usize = 4;
|
||||
pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY;
|
||||
|
||||
/// Hash the vector if necessary to reduce its length to ~256 bits. If it already fits, this is a
|
||||
/// no-op.
|
||||
pub fn hash_or_noop<F: RichField, P: PlonkyPermutation<F>>(inputs: Vec<F>) -> HashOut<F> {
|
||||
if inputs.len() <= 4 {
|
||||
HashOut::from_partial(inputs)
|
||||
} else {
|
||||
hash_n_to_hash::<F, P>(inputs, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
pub fn hash_or_noop<H: AlgebraicHasher<F>>(&mut self, inputs: Vec<Target>) -> HashOutTarget {
|
||||
let zero = self.zero();
|
||||
if inputs.len() <= 4 {
|
||||
HashOutTarget::from_partial(inputs, zero)
|
||||
HashOutTarget::from_partial(&inputs, zero)
|
||||
} else {
|
||||
self.hash_n_to_hash::<H>(inputs, false)
|
||||
self.hash_n_to_hash_no_pad::<H>(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash_n_to_hash<H: AlgebraicHasher<F>>(
|
||||
pub fn hash_n_to_hash_no_pad<H: AlgebraicHasher<F>>(
|
||||
&mut self,
|
||||
inputs: Vec<Target>,
|
||||
pad: bool,
|
||||
) -> HashOutTarget {
|
||||
HashOutTarget::from_vec(self.hash_n_to_m::<H>(inputs, 4, pad))
|
||||
HashOutTarget::from_vec(self.hash_n_to_m_no_pad::<H>(inputs, 4))
|
||||
}
|
||||
|
||||
pub fn hash_n_to_m<H: AlgebraicHasher<F>>(
|
||||
pub fn hash_n_to_m_no_pad<H: AlgebraicHasher<F>>(
|
||||
&mut self,
|
||||
mut inputs: Vec<Target>,
|
||||
inputs: Vec<Target>,
|
||||
num_outputs: usize,
|
||||
pad: bool,
|
||||
) -> Vec<Target> {
|
||||
let zero = self.zero();
|
||||
let one = self.one();
|
||||
|
||||
if pad {
|
||||
inputs.push(zero);
|
||||
while (inputs.len() + 1) % SPONGE_WIDTH != 0 {
|
||||
inputs.push(one);
|
||||
}
|
||||
inputs.push(zero);
|
||||
}
|
||||
|
||||
let mut state = [zero; SPONGE_WIDTH];
|
||||
|
||||
@ -69,7 +48,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
// Squeeze until we have the desired number of outputs.
|
||||
let mut outputs = Vec::new();
|
||||
let mut outputs = Vec::with_capacity(num_outputs);
|
||||
loop {
|
||||
for i in 0..SPONGE_RATE {
|
||||
outputs.push(state[i]);
|
||||
@ -97,22 +76,12 @@ pub trait PlonkyPermutation<F: RichField> {
|
||||
fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH];
|
||||
}
|
||||
|
||||
/// If `pad` is enabled, the message is padded using the pad10*1 rule. In general this is required
|
||||
/// for the hash to be secure, but it can safely be disabled in certain cases, like if the input
|
||||
/// length is fixed.
|
||||
pub fn hash_n_to_m<F: RichField, P: PlonkyPermutation<F>>(
|
||||
mut inputs: Vec<F>,
|
||||
/// Hash a message without any padding step. Note that this can enable length-extension attacks.
|
||||
/// However, it is still collision-resistant in cases where the input has a fixed length.
|
||||
pub fn hash_n_to_m_no_pad<F: RichField, P: PlonkyPermutation<F>>(
|
||||
inputs: &[F],
|
||||
num_outputs: usize,
|
||||
pad: bool,
|
||||
) -> Vec<F> {
|
||||
if pad {
|
||||
inputs.push(F::ZERO);
|
||||
while (inputs.len() + 1) % SPONGE_WIDTH != 0 {
|
||||
inputs.push(F::ONE);
|
||||
}
|
||||
inputs.push(F::ZERO);
|
||||
}
|
||||
|
||||
let mut state = [F::ZERO; SPONGE_WIDTH];
|
||||
|
||||
// Absorb all input chunks.
|
||||
@ -134,9 +103,6 @@ pub fn hash_n_to_m<F: RichField, P: PlonkyPermutation<F>>(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash_n_to_hash<F: RichField, P: PlonkyPermutation<F>>(
|
||||
inputs: Vec<F>,
|
||||
pad: bool,
|
||||
) -> HashOut<F> {
|
||||
HashOut::from_vec(hash_n_to_m::<F, P>(inputs, 4, pad))
|
||||
pub fn hash_n_to_hash_no_pad<F: RichField, P: PlonkyPermutation<F>>(inputs: &[F]) -> HashOut<F> {
|
||||
HashOut::from_vec(hash_n_to_m_no_pad::<F, P>(inputs, 4))
|
||||
}
|
||||
|
||||
@ -56,9 +56,9 @@ impl<F: RichField, const N: usize> Hasher<F> for KeccakHash<N> {
|
||||
type Hash = BytesHash<N>;
|
||||
type Permutation = KeccakPermutation;
|
||||
|
||||
fn hash(input: Vec<F>, _pad: bool) -> Self::Hash {
|
||||
fn hash_no_pad(input: &[F]) -> Self::Hash {
|
||||
let mut buffer = Buffer::new(Vec::new());
|
||||
buffer.write_field_vec(&input).unwrap();
|
||||
buffer.write_field_vec(input).unwrap();
|
||||
let mut arr = [0; N];
|
||||
let hash_bytes = keccak(buffer.bytes()).0;
|
||||
arr.copy_from_slice(&hash_bytes[..N]);
|
||||
|
||||
@ -17,7 +17,7 @@ pub struct MerkleProof<F: RichField, H: Hasher<F>> {
|
||||
pub siblings: Vec<H::Hash>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MerkleProofTarget {
|
||||
/// The Merkle digest of each sibling subtree, staying from the bottommost layer.
|
||||
pub siblings: Vec<HashOutTarget>,
|
||||
@ -30,9 +30,12 @@ pub(crate) fn verify_merkle_proof<F: RichField, H: Hasher<F>>(
|
||||
leaf_index: usize,
|
||||
merkle_cap: &MerkleCap<F, H>,
|
||||
proof: &MerkleProof<F, H>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let mut index = leaf_index;
|
||||
let mut current_digest = H::hash(leaf_data, false);
|
||||
let mut current_digest = H::hash_or_noop(&leaf_data);
|
||||
for &sibling_digest in proof.siblings.iter() {
|
||||
let bit = index & 1;
|
||||
index >>= 1;
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
use std::mem::MaybeUninit;
|
||||
use std::slice;
|
||||
|
||||
use plonky2_util::log2_strict;
|
||||
use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@ -27,33 +31,131 @@ pub struct MerkleTree<F: RichField, H: Hasher<F>> {
|
||||
/// The data in the leaves of the Merkle tree.
|
||||
pub leaves: Vec<Vec<F>>,
|
||||
|
||||
/// The layers of hashes in the tree. The first layer is the one at the bottom.
|
||||
pub layers: Vec<Vec<H::Hash>>,
|
||||
/// The digests in the tree. Consists of `cap.len()` sub-trees, each corresponding to one
|
||||
/// element in `cap`. Each subtree is contiguous and located at
|
||||
/// `digests[digests.len() / cap.len() * i..digests.len() / cap.len() * (i + 1)]`.
|
||||
/// Within each subtree, siblings are stored next to each other. The layout is,
|
||||
/// left_child_subtree || left_child_digest || right_child_digest || right_child_subtree, where
|
||||
/// left_child_digest and right_child_digest are H::Hash and left_child_subtree and
|
||||
/// right_child_subtree recurse. Observe that the digest of a node is stored by its _parent_.
|
||||
/// Consequently, the digests of the roots are not stored here (they can be found in `cap`).
|
||||
pub digests: Vec<H::Hash>,
|
||||
|
||||
/// The Merkle cap.
|
||||
pub cap: MerkleCap<F, H>,
|
||||
}
|
||||
|
||||
fn capacity_up_to_mut<T>(v: &mut Vec<T>, len: usize) -> &mut [MaybeUninit<T>] {
|
||||
assert!(v.capacity() >= len);
|
||||
let v_ptr = v.as_mut_ptr().cast::<MaybeUninit<T>>();
|
||||
unsafe {
|
||||
// SAFETY: `v_ptr` is a valid pointer to a buffer of length at least `len`. Upon return, the
|
||||
// lifetime will be bound to that of `v`. The underlying memory will not be deallocated as
|
||||
// we hold the sole mutable reference to `v`. The contents of the slice may be
|
||||
// uninitialized, but the `MaybeUninit` makes it safe.
|
||||
slice::from_raw_parts_mut(v_ptr, len)
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_subtree<F: RichField, H: Hasher<F>>(
|
||||
digests_buf: &mut [MaybeUninit<H::Hash>],
|
||||
leaves: &[Vec<F>],
|
||||
) -> H::Hash
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
assert_eq!(leaves.len(), digests_buf.len() / 2 + 1);
|
||||
if digests_buf.is_empty() {
|
||||
H::hash_or_noop(&leaves[0])
|
||||
} else {
|
||||
// Layout is: left recursive output || left child digest
|
||||
// || right child digest || right recursive output.
|
||||
// Split `digests_buf` into the two recursive outputs (slices) and two child digests
|
||||
// (references).
|
||||
let (left_digests_buf, right_digests_buf) = digests_buf.split_at_mut(digests_buf.len() / 2);
|
||||
let (left_digest_mem, left_digests_buf) = left_digests_buf.split_last_mut().unwrap();
|
||||
let (right_digest_mem, right_digests_buf) = right_digests_buf.split_first_mut().unwrap();
|
||||
// Split `leaves` between both children.
|
||||
let (left_leaves, right_leaves) = leaves.split_at(leaves.len() / 2);
|
||||
let (left_digest, right_digest) = rayon::join(
|
||||
|| fill_subtree::<F, H>(left_digests_buf, left_leaves),
|
||||
|| fill_subtree::<F, H>(right_digests_buf, right_leaves),
|
||||
);
|
||||
left_digest_mem.write(left_digest);
|
||||
right_digest_mem.write(right_digest);
|
||||
H::two_to_one(left_digest, right_digest)
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_digests_buf<F: RichField, H: Hasher<F>>(
|
||||
digests_buf: &mut [MaybeUninit<H::Hash>],
|
||||
cap_buf: &mut [MaybeUninit<H::Hash>],
|
||||
leaves: &[Vec<F>],
|
||||
cap_height: usize,
|
||||
) where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
// Special case of a tree that's all cap. The usual case will panic because we'll try to split
|
||||
// an empty slice into chunks of `0`. (We would not need this if there was a way to split into
|
||||
// `blah` chunks as opposed to chunks _of_ `blah`.)
|
||||
if digests_buf.is_empty() {
|
||||
debug_assert_eq!(cap_buf.len(), leaves.len());
|
||||
cap_buf
|
||||
.par_iter_mut()
|
||||
.zip(leaves)
|
||||
.for_each(|(cap_buf, leaf)| {
|
||||
cap_buf.write(H::hash_or_noop(leaf));
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
let subtree_digests_len = digests_buf.len() >> cap_height;
|
||||
let subtree_leaves_len = leaves.len() >> cap_height;
|
||||
let digests_chunks = digests_buf.par_chunks_exact_mut(subtree_digests_len);
|
||||
let leaves_chunks = leaves.par_chunks_exact(subtree_leaves_len);
|
||||
assert_eq!(digests_chunks.len(), cap_buf.len());
|
||||
assert_eq!(digests_chunks.len(), leaves_chunks.len());
|
||||
digests_chunks.zip(cap_buf).zip(leaves_chunks).for_each(
|
||||
|((subtree_digests, subtree_cap), subtree_leaves)| {
|
||||
// We have `1 << cap_height` sub-trees, one for each entry in `cap`. They are totally
|
||||
// independent, so we schedule one task for each. `digests_buf` and `leaves` are split
|
||||
// into `1 << cap_height` slices, one for each sub-tree.
|
||||
subtree_cap.write(fill_subtree::<F, H>(subtree_digests, subtree_leaves));
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
|
||||
pub fn new(leaves: Vec<Vec<F>>, cap_height: usize) -> Self {
|
||||
let mut layers = vec![leaves
|
||||
.par_iter()
|
||||
.map(|l| H::hash(l.clone(), false))
|
||||
.collect::<Vec<_>>()];
|
||||
while let Some(l) = layers.last() {
|
||||
if l.len() == 1 << cap_height {
|
||||
break;
|
||||
}
|
||||
let next_layer = l
|
||||
.par_chunks(2)
|
||||
.map(|chunk| H::two_to_one(chunk[0], chunk[1]))
|
||||
.collect::<Vec<_>>();
|
||||
layers.push(next_layer);
|
||||
pub fn new(leaves: Vec<Vec<F>>, cap_height: usize) -> Self
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let log2_leaves_len = log2_strict(leaves.len());
|
||||
assert!(
|
||||
cap_height <= log2_leaves_len,
|
||||
"cap height should be at most log2(leaves.len())"
|
||||
);
|
||||
|
||||
let num_digests = 2 * (leaves.len() - (1 << cap_height));
|
||||
let mut digests = Vec::with_capacity(num_digests);
|
||||
|
||||
let len_cap = 1 << cap_height;
|
||||
let mut cap = Vec::with_capacity(len_cap);
|
||||
|
||||
let digests_buf = capacity_up_to_mut(&mut digests, num_digests);
|
||||
let cap_buf = capacity_up_to_mut(&mut cap, len_cap);
|
||||
fill_digests_buf::<F, H>(digests_buf, cap_buf, &leaves[..], cap_height);
|
||||
|
||||
unsafe {
|
||||
// SAFETY: `fill_digests_buf` and `cap` initialized the spare capacity up to
|
||||
// `num_digests` and `len_cap`, resp.
|
||||
digests.set_len(num_digests);
|
||||
cap.set_len(len_cap);
|
||||
}
|
||||
let cap = layers.pop().unwrap();
|
||||
|
||||
Self {
|
||||
leaves,
|
||||
layers,
|
||||
digests,
|
||||
cap: MerkleCap(cap),
|
||||
}
|
||||
}
|
||||
@ -64,17 +166,40 @@ impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
|
||||
|
||||
/// Create a Merkle proof from a leaf index.
|
||||
pub fn prove(&self, leaf_index: usize) -> MerkleProof<F, H> {
|
||||
MerkleProof {
|
||||
siblings: self
|
||||
.layers
|
||||
.iter()
|
||||
.scan(leaf_index, |acc, layer| {
|
||||
let index = *acc ^ 1;
|
||||
*acc >>= 1;
|
||||
Some(layer[index])
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
let cap_height = log2_strict(self.cap.len());
|
||||
let num_layers = log2_strict(self.leaves.len()) - cap_height;
|
||||
debug_assert_eq!(leaf_index >> (cap_height + num_layers), 0);
|
||||
|
||||
let digest_tree = {
|
||||
let tree_index = leaf_index >> num_layers;
|
||||
let tree_len = self.digests.len() >> cap_height;
|
||||
&self.digests[tree_len * tree_index..tree_len * (tree_index + 1)]
|
||||
};
|
||||
|
||||
// Mask out high bits to get the index within the sub-tree.
|
||||
let mut pair_index = leaf_index & ((1 << num_layers) - 1);
|
||||
let siblings = (0..num_layers)
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
let parity = pair_index & 1;
|
||||
pair_index >>= 1;
|
||||
|
||||
// The layers' data is interleaved as follows:
|
||||
// [layer 0, layer 1, layer 0, layer 2, layer 0, layer 1, layer 0, layer 3, ...].
|
||||
// Each of the above is a pair of siblings.
|
||||
// `pair_index` is the index of the pair within layer `i`.
|
||||
// The index of that the pair within `digests` is
|
||||
// `pair_index * 2 ** (i + 1) + (2 ** i - 1)`.
|
||||
let siblings_index = (pair_index << (i + 1)) + (1 << i) - 1;
|
||||
// We have an index for the _pair_, but we want the index of the _sibling_.
|
||||
// Double the pair index to get the index of the left sibling. Conditionally add `1`
|
||||
// if we are to retrieve the right sibling.
|
||||
let sibling_index = 2 * siblings_index + (1 - parity);
|
||||
digest_tree[sibling_index]
|
||||
})
|
||||
.collect();
|
||||
|
||||
MerkleProof { siblings }
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,22 +216,50 @@ mod tests {
|
||||
(0..n).map(|_| F::rand_vec(k)).collect()
|
||||
}
|
||||
|
||||
fn verify_all_leaves<
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
>(
|
||||
fn verify_all_leaves<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
|
||||
leaves: Vec<Vec<F>>,
|
||||
n: usize,
|
||||
) -> Result<()> {
|
||||
let tree = MerkleTree::<F, C::Hasher>::new(leaves.clone(), 1);
|
||||
for i in 0..n {
|
||||
cap_height: usize,
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let tree = MerkleTree::<F, C::Hasher>::new(leaves.clone(), cap_height);
|
||||
for (i, leaf) in leaves.into_iter().enumerate() {
|
||||
let proof = tree.prove(i);
|
||||
verify_merkle_proof(leaves[i].clone(), i, &tree.cap, &proof)?;
|
||||
verify_merkle_proof(leaf, i, &tree.cap, &proof)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_cap_height_too_big() {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let log_n = 8;
|
||||
let cap_height = log_n + 1; // Should panic if `cap_height > len_n`.
|
||||
|
||||
let leaves = random_data::<F>(1 << log_n, 7);
|
||||
let _ = MerkleTree::<F, <C as GenericConfig<D>>::Hasher>::new(leaves, cap_height);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cap_height_eq_log2_len() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
type F = <C as GenericConfig<D>>::F;
|
||||
|
||||
let log_n = 8;
|
||||
let n = 1 << log_n;
|
||||
let leaves = random_data::<F>(n, 7);
|
||||
|
||||
verify_all_leaves::<F, C, D>(leaves, log_n)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_merkle_trees() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
@ -117,7 +270,7 @@ mod tests {
|
||||
let n = 1 << log_n;
|
||||
let leaves = random_data::<F>(n, 7);
|
||||
|
||||
verify_all_leaves::<F, C, D>(leaves, n)?;
|
||||
verify_all_leaves::<F, C, D>(leaves, 1)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
mod arch;
|
||||
pub mod gmimc;
|
||||
pub mod hash_types;
|
||||
pub mod hashing;
|
||||
pub mod keccak;
|
||||
@ -8,4 +7,3 @@ pub mod merkle_tree;
|
||||
pub mod path_compression;
|
||||
pub mod poseidon;
|
||||
pub mod poseidon_goldilocks;
|
||||
pub mod rescue;
|
||||
|
||||
@ -57,7 +57,10 @@ pub(crate) fn decompress_merkle_proofs<F: RichField, H: Hasher<F>>(
|
||||
compressed_proofs: &[MerkleProof<F, H>],
|
||||
height: usize,
|
||||
cap_height: usize,
|
||||
) -> Vec<MerkleProof<F, H>> {
|
||||
) -> Vec<MerkleProof<F, H>>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let num_leaves = 1 << height;
|
||||
let compressed_proofs = compressed_proofs.to_vec();
|
||||
let mut decompressed_proofs = Vec::with_capacity(compressed_proofs.len());
|
||||
@ -66,7 +69,7 @@ pub(crate) fn decompress_merkle_proofs<F: RichField, H: Hasher<F>>(
|
||||
|
||||
for (&i, v) in leaves_indices.iter().zip(leaves_data) {
|
||||
// Observe the leaves.
|
||||
seen.insert(i + num_leaves, H::hash(v.to_vec(), false));
|
||||
seen.insert(i + num_leaves, H::hash_or_noop(v));
|
||||
}
|
||||
|
||||
// Iterators over the siblings.
|
||||
|
||||
@ -2,14 +2,14 @@
|
||||
//! https://eprint.iacr.org/2019/458.pdf
|
||||
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::{Field, PrimeField64};
|
||||
use unroll::unroll_for_loops;
|
||||
|
||||
use crate::gates::gate::Gate;
|
||||
use crate::gates::poseidon::PoseidonGate;
|
||||
use crate::gates::poseidon_mds::PoseidonMdsGate;
|
||||
use crate::hash::hash_types::{HashOut, RichField};
|
||||
use crate::hash::hashing::{compress, hash_n_to_hash, PlonkyPermutation, SPONGE_WIDTH};
|
||||
use crate::hash::hashing::{compress, hash_n_to_hash_no_pad, PlonkyPermutation, SPONGE_WIDTH};
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
@ -21,10 +21,10 @@ use crate::plonk::config::{AlgebraicHasher, Hasher};
|
||||
//
|
||||
// NB: Changing any of these values will require regenerating all of
|
||||
// the precomputed constant arrays in this file.
|
||||
pub(crate) const HALF_N_FULL_ROUNDS: usize = 4;
|
||||
pub const HALF_N_FULL_ROUNDS: usize = 4;
|
||||
pub(crate) const N_FULL_ROUNDS_TOTAL: usize = 2 * HALF_N_FULL_ROUNDS;
|
||||
pub(crate) const N_PARTIAL_ROUNDS: usize = 22;
|
||||
pub(crate) const N_ROUNDS: usize = N_FULL_ROUNDS_TOTAL + N_PARTIAL_ROUNDS;
|
||||
pub const N_PARTIAL_ROUNDS: usize = 22;
|
||||
pub const N_ROUNDS: usize = N_FULL_ROUNDS_TOTAL + N_PARTIAL_ROUNDS;
|
||||
const MAX_WIDTH: usize = 12; // we only have width 8 and 12, and 12 is bigger. :)
|
||||
|
||||
#[inline(always)]
|
||||
@ -35,7 +35,7 @@ fn add_u160_u128((x_lo, x_hi): (u128, u32), y: u128) -> (u128, u32) {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_u160<F: PrimeField>((n_lo, n_hi): (u128, u32)) -> F {
|
||||
fn reduce_u160<F: PrimeField64>((n_lo, n_hi): (u128, u32)) -> F {
|
||||
let n_lo_hi = (n_lo >> 64) as u64;
|
||||
let n_lo_lo = n_lo as u64;
|
||||
let reduced_hi: u64 = F::from_noncanonical_u96((n_lo_hi, n_hi)).to_noncanonical_u64();
|
||||
@ -148,7 +148,7 @@ pub const ALL_ROUND_CONSTANTS: [u64; MAX_WIDTH * N_ROUNDS] = [
|
||||
];
|
||||
|
||||
const WIDTH: usize = SPONGE_WIDTH;
|
||||
pub trait Poseidon: PrimeField {
|
||||
pub trait Poseidon: PrimeField64 {
|
||||
// Total number of round constants required: width of the input
|
||||
// times number of rounds.
|
||||
const N_ROUND_CONSTANTS: usize = WIDTH * N_ROUNDS;
|
||||
@ -633,8 +633,8 @@ impl<F: RichField> Hasher<F> for PoseidonHash {
|
||||
type Hash = HashOut<F>;
|
||||
type Permutation = PoseidonPermutation;
|
||||
|
||||
fn hash(input: Vec<F>, pad: bool) -> Self::Hash {
|
||||
hash_n_to_hash::<F, Self::Permutation>(input, pad)
|
||||
fn hash_no_pad(input: &[F]) -> Self::Hash {
|
||||
hash_n_to_hash_no_pad::<F, Self::Permutation>(input)
|
||||
}
|
||||
|
||||
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash {
|
||||
|
||||
@ -270,7 +270,8 @@ impl Poseidon for GoldilocksField {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField as F;
|
||||
|
||||
use crate::hash::poseidon::test_helpers::{check_consistency, check_test_vectors};
|
||||
|
||||
@ -1,457 +0,0 @@
|
||||
//! Implements Rescue Prime.
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use unroll::unroll_for_loops;
|
||||
|
||||
const ROUNDS: usize = 8;
|
||||
|
||||
const W: usize = 12;
|
||||
|
||||
const MDS: [[u64; W]; W] = [
|
||||
[
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
3074457345215605419,
|
||||
11068046442776179508,
|
||||
13835058053470224385,
|
||||
6148914690431210838,
|
||||
9223372035646816257,
|
||||
1,
|
||||
],
|
||||
[
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
3074457345215605419,
|
||||
11068046442776179508,
|
||||
13835058053470224385,
|
||||
6148914690431210838,
|
||||
9223372035646816257,
|
||||
],
|
||||
[
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
3074457345215605419,
|
||||
11068046442776179508,
|
||||
13835058053470224385,
|
||||
6148914690431210838,
|
||||
],
|
||||
[
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
3074457345215605419,
|
||||
11068046442776179508,
|
||||
13835058053470224385,
|
||||
],
|
||||
[
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
3074457345215605419,
|
||||
11068046442776179508,
|
||||
],
|
||||
[
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
3074457345215605419,
|
||||
],
|
||||
[
|
||||
1024819115071868473,
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
2635249153041947502,
|
||||
],
|
||||
[
|
||||
9708812669101911849,
|
||||
1024819115071868473,
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
16140901062381928449,
|
||||
],
|
||||
[
|
||||
2767011610694044877,
|
||||
9708812669101911849,
|
||||
1024819115071868473,
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
2049638230143736946,
|
||||
],
|
||||
[
|
||||
878416384347315834,
|
||||
2767011610694044877,
|
||||
9708812669101911849,
|
||||
1024819115071868473,
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
5534023221388089754,
|
||||
],
|
||||
[
|
||||
17608255704416649217,
|
||||
878416384347315834,
|
||||
2767011610694044877,
|
||||
9708812669101911849,
|
||||
1024819115071868473,
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
16769767337539665921,
|
||||
],
|
||||
[
|
||||
15238614667590392076,
|
||||
17608255704416649217,
|
||||
878416384347315834,
|
||||
2767011610694044877,
|
||||
9708812669101911849,
|
||||
1024819115071868473,
|
||||
3255307777287111620,
|
||||
17293822566837780481,
|
||||
15987178195121148178,
|
||||
1317624576520973751,
|
||||
5675921252705733081,
|
||||
10760600708254618966,
|
||||
],
|
||||
];
|
||||
|
||||
const RESCUE_CONSTANTS: [[u64; W]; ROUNDS * 2] = [
|
||||
[
|
||||
12050887499329086906,
|
||||
1748247961703512657,
|
||||
315780861775001585,
|
||||
2827656358919812970,
|
||||
13335864861236723579,
|
||||
3010729529365640897,
|
||||
8463534053828271146,
|
||||
2528500966106598845,
|
||||
8969871077123422281,
|
||||
1002624930202741107,
|
||||
599979829006456404,
|
||||
4386170815218774254,
|
||||
],
|
||||
[
|
||||
5771413917591851532,
|
||||
11946802620311685142,
|
||||
4759792267858670262,
|
||||
6879094914431255667,
|
||||
3985911073214909073,
|
||||
1542850118294175816,
|
||||
5393560436452023029,
|
||||
8331250756632997735,
|
||||
3395511836281190608,
|
||||
17601255793194446503,
|
||||
12848459944475727152,
|
||||
11995465655754698601,
|
||||
],
|
||||
[
|
||||
14063960046551560130,
|
||||
14790209580166185143,
|
||||
5509023472758717841,
|
||||
1274395897760495573,
|
||||
16719545989415697758,
|
||||
17865948122414223407,
|
||||
3919263713959798649,
|
||||
5633741078654387163,
|
||||
15665612362287352054,
|
||||
3418834727998553015,
|
||||
5324019631954832682,
|
||||
17962066557010997431,
|
||||
],
|
||||
[
|
||||
3282193104189649752,
|
||||
18423507935939999211,
|
||||
9035104445528866459,
|
||||
30842260240043277,
|
||||
3896337933354935129,
|
||||
6615548113269323045,
|
||||
6625827707190475694,
|
||||
6677757329269550670,
|
||||
11419013193186889337,
|
||||
17111888851716383760,
|
||||
12075517898615128691,
|
||||
8139844272075088233,
|
||||
],
|
||||
[
|
||||
8872892112814161072,
|
||||
17529364346566228604,
|
||||
7526576514327158912,
|
||||
850359069964902700,
|
||||
9679332912197531902,
|
||||
10591229741059812071,
|
||||
12759208863825924546,
|
||||
14552519355635838750,
|
||||
16066249893409806278,
|
||||
11283035366525176262,
|
||||
1047378652379935387,
|
||||
17032498397644511356,
|
||||
],
|
||||
[
|
||||
2938626421478254042,
|
||||
10375267398354586672,
|
||||
13728514869380643947,
|
||||
16707318479225743731,
|
||||
9785828188762698567,
|
||||
8610686976269299752,
|
||||
5478372191917042178,
|
||||
12716344455538470365,
|
||||
9968276048553747246,
|
||||
14746805727771473956,
|
||||
4822070620124107028,
|
||||
9901161649549513416,
|
||||
],
|
||||
[
|
||||
13458162407040644078,
|
||||
4045792126424269312,
|
||||
9709263167782315020,
|
||||
2163173014916005515,
|
||||
17079206331095671215,
|
||||
2556388076102629669,
|
||||
6582772486087242347,
|
||||
1239959540200663058,
|
||||
18268236910639895687,
|
||||
12499012548657350745,
|
||||
17213068585339946119,
|
||||
7641451088868756688,
|
||||
],
|
||||
[
|
||||
14674555473338434116,
|
||||
14624532976317185113,
|
||||
13625541984298615970,
|
||||
7612892294159054770,
|
||||
12294028208969561574,
|
||||
6067206081581804358,
|
||||
5778082506883496792,
|
||||
7389487446513884800,
|
||||
12929525660730020877,
|
||||
18244350162788654296,
|
||||
15285920877034454694,
|
||||
3640669683987215349,
|
||||
],
|
||||
[
|
||||
6737585134029996281,
|
||||
1826890539455248546,
|
||||
289376081355380231,
|
||||
10782622161517803787,
|
||||
12978425540147835172,
|
||||
9828233103297278473,
|
||||
16384075371934678711,
|
||||
3187492301890791304,
|
||||
12985433735185968457,
|
||||
9470935291631377473,
|
||||
16328323199113140151,
|
||||
16218490552434224203,
|
||||
],
|
||||
[
|
||||
6188809977565251499,
|
||||
18437718710937437067,
|
||||
4530469469895539008,
|
||||
9596355277372723349,
|
||||
13602518824447658705,
|
||||
8759976068576854281,
|
||||
10504320064094929535,
|
||||
3980760429843656150,
|
||||
14609448298151012462,
|
||||
5839843841558860609,
|
||||
10283805260656050418,
|
||||
7239168159249274821,
|
||||
],
|
||||
[
|
||||
3604243611640027441,
|
||||
5237321927316578323,
|
||||
5071861664926666316,
|
||||
13025405632646149705,
|
||||
3285281651566464074,
|
||||
12121596060272825779,
|
||||
1900602777802961569,
|
||||
8122527981264852045,
|
||||
6731303887159752901,
|
||||
9197659817406857040,
|
||||
844741616904786364,
|
||||
14249777686667858094,
|
||||
],
|
||||
[
|
||||
8602844218963499297,
|
||||
10133401373828451640,
|
||||
11618292280328565166,
|
||||
8828272598402499582,
|
||||
4252246265076774689,
|
||||
9760449011955070998,
|
||||
10233981507028897480,
|
||||
10427510555228840014,
|
||||
1007817664531124790,
|
||||
4465396600980659145,
|
||||
7727267420665314215,
|
||||
7904022788946844554,
|
||||
],
|
||||
[
|
||||
11418297156527169222,
|
||||
15865399053509010196,
|
||||
1727198235391450850,
|
||||
16557095577717348672,
|
||||
1524052121709169653,
|
||||
14531367160053894310,
|
||||
4071756280138432327,
|
||||
10333204220115446291,
|
||||
16584144375833061215,
|
||||
12237566480526488368,
|
||||
11090440024401607208,
|
||||
18281335018830792766,
|
||||
],
|
||||
[
|
||||
16152169547074248135,
|
||||
18338155611216027761,
|
||||
15842640128213925612,
|
||||
14687926435880145351,
|
||||
13259626900273707210,
|
||||
6187877366876303234,
|
||||
10312881470701795438,
|
||||
1924945292721719446,
|
||||
2278209355262975917,
|
||||
3250749056007953206,
|
||||
11589006946114672195,
|
||||
241829012299953928,
|
||||
],
|
||||
[
|
||||
11244459446597052449,
|
||||
7319043416418482137,
|
||||
8148526814449636806,
|
||||
9054933038587901070,
|
||||
550333919248348827,
|
||||
5513167392062632770,
|
||||
12644459803778263764,
|
||||
9903621375535446226,
|
||||
16390581784506871871,
|
||||
14586524717888286021,
|
||||
6975796306584548762,
|
||||
5200407948555191573,
|
||||
],
|
||||
[
|
||||
2855794043288846965,
|
||||
1259443213892506318,
|
||||
6145351706926586935,
|
||||
3853784494234324998,
|
||||
5871277378086513850,
|
||||
9414363368707862566,
|
||||
11946957446931890832,
|
||||
308083693687568600,
|
||||
12712587722369770461,
|
||||
6792392698104204991,
|
||||
16465224002344550280,
|
||||
10282380383506806095,
|
||||
],
|
||||
];
|
||||
|
||||
pub fn rescue<F: Field>(mut xs: [F; W]) -> [F; W] {
|
||||
for r in 0..8 {
|
||||
xs = sbox_layer_a(xs);
|
||||
xs = mds_layer(xs);
|
||||
xs = constant_layer(xs, &RESCUE_CONSTANTS[r * 2]);
|
||||
|
||||
xs = sbox_layer_b(xs);
|
||||
xs = mds_layer(xs);
|
||||
xs = constant_layer(xs, &RESCUE_CONSTANTS[r * 2 + 1]);
|
||||
}
|
||||
xs
|
||||
}
|
||||
|
||||
#[unroll_for_loops]
|
||||
fn sbox_layer_a<F: Field>(x: [F; W]) -> [F; W] {
|
||||
let mut result = [F::ZERO; W];
|
||||
for i in 0..W {
|
||||
result[i] = x[i].cube();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
#[unroll_for_loops]
|
||||
fn sbox_layer_b<F: Field>(x: [F; W]) -> [F; W] {
|
||||
let mut result = [F::ZERO; W];
|
||||
for i in 0..W {
|
||||
result[i] = x[i].cube_root();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
#[unroll_for_loops]
|
||||
fn mds_layer<F: Field>(x: [F; W]) -> [F; W] {
|
||||
let mut result = [F::ZERO; W];
|
||||
for r in 0..W {
|
||||
for c in 0..W {
|
||||
result[r] += F::from_canonical_u64(MDS[r][c]) * x[c];
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
#[unroll_for_loops]
|
||||
fn constant_layer<F: Field>(xs: [F; W], con: &[u64; W]) -> [F; W] {
|
||||
let mut result = [F::ZERO; W];
|
||||
for i in 0..W {
|
||||
result[i] = xs[i] + F::from_canonical_u64(con[i]);
|
||||
}
|
||||
result
|
||||
}
|
||||
@ -11,7 +11,6 @@ use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::target::Target;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::config::{AlgebraicHasher, GenericHashOut, Hasher};
|
||||
use crate::plonk::proof::{OpeningSet, OpeningSetTarget};
|
||||
|
||||
/// Observes prover messages, and generates challenges by hashing the transcript, a la Fiat-Shamir.
|
||||
#[derive(Clone)]
|
||||
@ -69,32 +68,6 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_opening_set<const D: usize>(&mut self, os: &OpeningSet<F, D>)
|
||||
where
|
||||
F: RichField + Extendable<D>,
|
||||
{
|
||||
let OpeningSet {
|
||||
constants,
|
||||
plonk_sigmas,
|
||||
wires,
|
||||
plonk_zs,
|
||||
plonk_zs_right,
|
||||
partial_products,
|
||||
quotient_polys,
|
||||
} = os;
|
||||
for v in &[
|
||||
constants,
|
||||
plonk_sigmas,
|
||||
wires,
|
||||
plonk_zs,
|
||||
plonk_zs_right,
|
||||
partial_products,
|
||||
quotient_polys,
|
||||
] {
|
||||
self.observe_extension_elements(v);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_hash<OH: Hasher<F>>(&mut self, hash: OH::Hash) {
|
||||
self.observe_elements(&hash.to_vec())
|
||||
}
|
||||
@ -215,29 +188,6 @@ impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_opening_set(&mut self, os: &OpeningSetTarget<D>) {
|
||||
let OpeningSetTarget {
|
||||
constants,
|
||||
plonk_sigmas,
|
||||
wires,
|
||||
plonk_zs,
|
||||
plonk_zs_right,
|
||||
partial_products,
|
||||
quotient_polys,
|
||||
} = os;
|
||||
for v in &[
|
||||
constants,
|
||||
plonk_sigmas,
|
||||
wires,
|
||||
plonk_zs,
|
||||
plonk_zs_right,
|
||||
partial_products,
|
||||
quotient_polys,
|
||||
] {
|
||||
self.observe_extension_elements(v);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_hash(&mut self, hash: &HashOutTarget) {
|
||||
self.observe_elements(&hash.elements)
|
||||
}
|
||||
|
||||
@ -3,14 +3,14 @@ use std::marker::PhantomData;
|
||||
|
||||
use num::BigUint;
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
|
||||
use crate::gadgets::arithmetic_u32::U32Target;
|
||||
use crate::gadgets::biguint::BigUintTarget;
|
||||
use crate::gadgets::nonnative::NonNativeTarget;
|
||||
use crate::hash::hash_types::{HashOut, HashOutTarget, RichField};
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::iop::witness::{PartialWitness, PartitionWitness, Witness};
|
||||
use crate::plonk::circuit_data::{CommonCircuitData, ProverOnlyCircuitData};
|
||||
@ -166,12 +166,17 @@ impl<F: Field> GeneratedValues<F> {
|
||||
self.target_values.push((target, value))
|
||||
}
|
||||
|
||||
fn set_u32_target(&mut self, target: U32Target, value: u32) {
|
||||
pub fn set_bool_target(&mut self, target: BoolTarget, value: bool) {
|
||||
self.set_target(target.target, F::from_bool(value))
|
||||
}
|
||||
|
||||
pub fn set_u32_target(&mut self, target: U32Target, value: u32) {
|
||||
self.set_target(target.0, F::from_canonical_u32(value))
|
||||
}
|
||||
|
||||
pub fn set_biguint_target(&mut self, target: BigUintTarget, value: BigUint) {
|
||||
let mut limbs = value.to_u32_digits();
|
||||
|
||||
assert!(target.num_limbs() >= limbs.len());
|
||||
|
||||
limbs.resize(target.num_limbs(), 0);
|
||||
@ -180,8 +185,8 @@ impl<F: Field> GeneratedValues<F> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_nonnative_target<FF: Field>(&mut self, target: NonNativeTarget<FF>, value: FF) {
|
||||
self.set_biguint_target(target.value, value.to_biguint())
|
||||
pub fn set_nonnative_target<FF: PrimeField>(&mut self, target: NonNativeTarget<FF>, value: FF) {
|
||||
self.set_biguint_target(target.value, value.to_canonical_biguint())
|
||||
}
|
||||
|
||||
pub fn set_hash_target(&mut self, ht: HashOutTarget, value: HashOut<F>) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
//! Logic common to multiple IOPs.
|
||||
pub(crate) mod challenger;
|
||||
pub mod challenger;
|
||||
pub mod ext_target;
|
||||
pub mod generator;
|
||||
pub mod target;
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use itertools::Itertools;
|
||||
use num::{BigUint, FromPrimitive, Zero};
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
|
||||
use crate::fri::witness_util::set_fri_proof_target;
|
||||
use crate::gadgets::arithmetic_u32::U32Target;
|
||||
use crate::gadgets::biguint::BigUintTarget;
|
||||
use crate::gadgets::nonnative::NonNativeTarget;
|
||||
@ -14,7 +16,8 @@ use crate::hash::merkle_tree::MerkleCap;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::target::{BoolTarget, Target};
|
||||
use crate::iop::wire::Wire;
|
||||
use crate::plonk::config::AlgebraicHasher;
|
||||
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
|
||||
use crate::plonk::proof::{Proof, ProofTarget, ProofWithPublicInputs, ProofWithPublicInputsTarget};
|
||||
|
||||
/// A witness holds information on the values of targets in a circuit.
|
||||
pub trait Witness<F: Field> {
|
||||
@ -59,20 +62,26 @@ pub trait Witness<F: Field> {
|
||||
panic!("not a bool")
|
||||
}
|
||||
|
||||
fn get_biguint_target(&self, target: BigUintTarget) -> BigUint {
|
||||
fn get_biguint_target(&self, target: BigUintTarget) -> BigUint
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
let mut result = BigUint::zero();
|
||||
|
||||
let limb_base = BigUint::from_u64(1 << 32u64).unwrap();
|
||||
for i in (0..target.num_limbs()).rev() {
|
||||
let limb = target.get_limb(i);
|
||||
result *= &limb_base;
|
||||
result += self.get_target(limb.0).to_biguint();
|
||||
result += self.get_target(limb.0).to_canonical_biguint();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn get_nonnative_target<FF: Field>(&self, target: NonNativeTarget<FF>) -> FF {
|
||||
fn get_nonnative_target<FF: PrimeField>(&self, target: NonNativeTarget<FF>) -> FF
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
let val = self.get_biguint_target(target.value);
|
||||
FF::from_biguint(val)
|
||||
}
|
||||
@ -155,6 +164,109 @@ pub trait Witness<F: Field> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the targets in a `ProofWithPublicInputsTarget` to their corresponding values in a
|
||||
/// `ProofWithPublicInputs`.
|
||||
fn set_proof_with_pis_target<C: GenericConfig<D, F = F>, const D: usize>(
|
||||
&mut self,
|
||||
proof_with_pis_target: &ProofWithPublicInputsTarget<D>,
|
||||
proof_with_pis: &ProofWithPublicInputs<F, C, D>,
|
||||
) where
|
||||
F: RichField + Extendable<D>,
|
||||
C::Hasher: AlgebraicHasher<F>,
|
||||
{
|
||||
let ProofWithPublicInputs {
|
||||
proof,
|
||||
public_inputs,
|
||||
} = proof_with_pis;
|
||||
let ProofWithPublicInputsTarget {
|
||||
proof: pt,
|
||||
public_inputs: pi_targets,
|
||||
} = proof_with_pis_target;
|
||||
|
||||
// Set public inputs.
|
||||
for (&pi_t, &pi) in pi_targets.iter().zip_eq(public_inputs) {
|
||||
self.set_target(pi_t, pi);
|
||||
}
|
||||
|
||||
self.set_proof_target(pt, proof);
|
||||
}
|
||||
|
||||
/// Set the targets in a `ProofTarget` to their corresponding values in a `Proof`.
|
||||
fn set_proof_target<C: GenericConfig<D, F = F>, const D: usize>(
|
||||
&mut self,
|
||||
proof_target: &ProofTarget<D>,
|
||||
proof: &Proof<F, C, D>,
|
||||
) where
|
||||
F: RichField + Extendable<D>,
|
||||
C::Hasher: AlgebraicHasher<F>,
|
||||
{
|
||||
self.set_cap_target(&proof_target.wires_cap, &proof.wires_cap);
|
||||
self.set_cap_target(
|
||||
&proof_target.plonk_zs_partial_products_cap,
|
||||
&proof.plonk_zs_partial_products_cap,
|
||||
);
|
||||
self.set_cap_target(&proof_target.quotient_polys_cap, &proof.quotient_polys_cap);
|
||||
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.wires
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.wires)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.constants
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.constants)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.plonk_sigmas
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.plonk_sigmas)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.plonk_zs
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.plonk_zs)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.plonk_zs_right
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.plonk_zs_right)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.partial_products
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.partial_products)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
for (&t, &x) in proof_target
|
||||
.openings
|
||||
.quotient_polys
|
||||
.iter()
|
||||
.zip_eq(&proof.openings.quotient_polys)
|
||||
{
|
||||
self.set_extension_target(t, x);
|
||||
}
|
||||
|
||||
set_fri_proof_target(self, &proof_target.opening_proof, &proof.opening_proof);
|
||||
}
|
||||
|
||||
fn set_wire(&mut self, wire: Wire, value: F) {
|
||||
self.set_target(Target::Wire(wire), value)
|
||||
}
|
||||
|
||||
@ -6,7 +6,6 @@
|
||||
#![allow(clippy::len_without_is_empty)]
|
||||
#![allow(clippy::needless_range_loop)]
|
||||
#![allow(clippy::return_self_not_must_use)]
|
||||
#![feature(asm_sym)]
|
||||
#![feature(generic_const_exprs)]
|
||||
#![feature(specialization)]
|
||||
#![feature(stdsimd)]
|
||||
|
||||
@ -10,11 +10,13 @@ use plonky2_field::field_types::Field;
|
||||
use plonky2_field::polynomial::PolynomialValues;
|
||||
use plonky2_util::{log2_ceil, log2_strict};
|
||||
|
||||
use crate::fri::commitment::PolynomialBatchCommitment;
|
||||
use crate::fri::oracle::PolynomialBatch;
|
||||
use crate::fri::{FriConfig, FriParams};
|
||||
use crate::gadgets::arithmetic::BaseArithmeticOperation;
|
||||
use crate::gadgets::arithmetic_extension::ExtensionArithmeticOperation;
|
||||
use crate::gadgets::arithmetic_u32::U32Target;
|
||||
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
|
||||
use crate::gates::add_many_u32::U32AddManyGate;
|
||||
use crate::gates::arithmetic_base::ArithmeticGate;
|
||||
use crate::gates::arithmetic_extension::ArithmeticExtensionGate;
|
||||
use crate::gates::batchable::{BatchableGate, CurrentSlot, GateRef};
|
||||
@ -24,6 +26,7 @@ use crate::gates::gate_tree::Tree;
|
||||
use crate::gates::noop::NoopGate;
|
||||
use crate::gates::public_input::PublicInputGate;
|
||||
use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField};
|
||||
use crate::hash::merkle_proofs::MerkleProofTarget;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::{
|
||||
CopyGenerator, RandomValueGenerator, SimpleGenerator, WitnessGenerator,
|
||||
@ -37,7 +40,8 @@ use crate::plonk::circuit_data::{
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::copy_constraint::CopyConstraint;
|
||||
use crate::plonk::permutation_argument::Forest;
|
||||
use crate::plonk::plonk_common::PlonkPolynomials;
|
||||
use crate::plonk::plonk_common::PlonkOracle;
|
||||
use crate::timed;
|
||||
use crate::util::context_tree::ContextTree;
|
||||
use crate::util::marking::{Markable, MarkedTargets};
|
||||
use crate::util::partial_products::num_partial_products;
|
||||
@ -167,6 +171,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
(0..n).map(|_i| self.add_virtual_hash()).collect()
|
||||
}
|
||||
|
||||
pub(crate) fn add_virtual_merkle_proof(&mut self, len: usize) -> MerkleProofTarget {
|
||||
MerkleProofTarget {
|
||||
siblings: self.add_virtual_hashes(len),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_virtual_extension_target(&mut self) -> ExtensionTarget<D> {
|
||||
ExtensionTarget(self.add_virtual_targets(D).try_into().unwrap())
|
||||
}
|
||||
@ -177,11 +187,25 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn add_virtual_poly_coeff_ext(
|
||||
&mut self,
|
||||
num_coeffs: usize,
|
||||
) -> PolynomialCoeffsExtTarget<D> {
|
||||
let coeffs = self.add_virtual_extension_targets(num_coeffs);
|
||||
PolynomialCoeffsExtTarget(coeffs)
|
||||
}
|
||||
|
||||
// TODO: Unsafe
|
||||
pub fn add_virtual_bool_target(&mut self) -> BoolTarget {
|
||||
BoolTarget::new_unsafe(self.add_virtual_target())
|
||||
}
|
||||
|
||||
pub fn add_virtual_bool_target_safe(&mut self) -> BoolTarget {
|
||||
let b = BoolTarget::new_unsafe(self.add_virtual_target());
|
||||
self.assert_bool(b);
|
||||
b
|
||||
}
|
||||
|
||||
/// Adds a gate to the circuit, and returns its index.
|
||||
pub fn add_gate<G: BatchableGate<F, D>>(
|
||||
&mut self,
|
||||
@ -219,7 +243,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
fn check_gate_compatibility<G: Gate<F, D>>(&self, gate: &G) {
|
||||
assert!(
|
||||
gate.num_wires() <= self.config.num_wires,
|
||||
"{:?} requires {} wires, but our GateConfig has only {}",
|
||||
"{:?} requires {} wires, but our CircuitConfig has only {}",
|
||||
gate.id(),
|
||||
gate.num_wires(),
|
||||
self.config.num_wires
|
||||
@ -418,11 +442,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
let fri_config = &self.config.fri_config;
|
||||
let reduction_arity_bits = fri_config.reduction_strategy.reduction_arity_bits(
|
||||
degree_bits,
|
||||
self.config.fri_config.rate_bits,
|
||||
fri_config.rate_bits,
|
||||
fri_config.num_query_rounds,
|
||||
);
|
||||
FriParams {
|
||||
config: fri_config.clone(),
|
||||
hiding: self.config.zero_knowledge,
|
||||
degree_bits,
|
||||
reduction_arity_bits,
|
||||
}
|
||||
@ -631,16 +656,21 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
/// Builds a "full circuit", with both prover and verifier data.
|
||||
pub fn build<C: GenericConfig<D, F = F>>(mut self) -> CircuitData<F, C, D> {
|
||||
pub fn build<C: GenericConfig<D, F = F>>(mut self) -> CircuitData<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let mut timing = TimingTree::new("preprocess", Level::Trace);
|
||||
let start = Instant::now();
|
||||
let rate_bits = self.config.fri_config.rate_bits;
|
||||
|
||||
self.fill_batched_gates();
|
||||
|
||||
// Hash the public inputs, and route them to a `PublicInputGate` which will enforce that
|
||||
// those hash wires match the claimed public inputs.
|
||||
let num_public_inputs = self.public_inputs.len();
|
||||
let public_inputs_hash =
|
||||
self.hash_n_to_hash::<C::InnerHasher>(self.public_inputs.clone(), true);
|
||||
self.hash_n_to_hash_no_pad::<C::InnerHasher>(self.public_inputs.clone());
|
||||
let pi_gate = self.add_gate(PublicInputGate, vec![], vec![]);
|
||||
for (&hash_part, wire) in public_inputs_hash
|
||||
.elements
|
||||
@ -666,31 +696,41 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
|
||||
let gates = self.gates.iter().cloned().collect();
|
||||
let (gate_tree, max_filtered_constraint_degree, num_constants) = Tree::from_gates(gates);
|
||||
let prefixed_gates = PrefixedGate::from_tree(gate_tree);
|
||||
|
||||
// `quotient_degree_factor` has to be between `max_filtered_constraint_degree-1` and `1<<rate_bits`.
|
||||
// We find the value that minimizes `num_partial_product + quotient_degree_factor`.
|
||||
let rate_bits = self.config.fri_config.rate_bits;
|
||||
let quotient_degree_factor = (max_filtered_constraint_degree - 1..=1 << rate_bits)
|
||||
.min_by_key(|&q| num_partial_products(self.config.num_routed_wires, q).0 + q)
|
||||
let min_quotient_degree_factor = (max_filtered_constraint_degree - 1).max(2);
|
||||
let max_quotient_degree_factor = self.config.max_quotient_degree_factor.min(1 << rate_bits);
|
||||
let quotient_degree_factor = (min_quotient_degree_factor..=max_quotient_degree_factor)
|
||||
.min_by_key(|&q| num_partial_products(self.config.num_routed_wires, q) + q)
|
||||
.unwrap();
|
||||
debug!("Quotient degree factor set to: {}.", quotient_degree_factor);
|
||||
let prefixed_gates = PrefixedGate::from_tree(gate_tree);
|
||||
|
||||
let subgroup = F::two_adic_subgroup(degree_bits);
|
||||
|
||||
let constant_vecs = self.constant_polys(&prefixed_gates, num_constants);
|
||||
let constant_vecs = timed!(
|
||||
timing,
|
||||
"generate constant polynomials",
|
||||
self.constant_polys(&prefixed_gates, num_constants)
|
||||
);
|
||||
|
||||
let k_is = get_unique_coset_shifts(degree, self.config.num_routed_wires);
|
||||
let (sigma_vecs, forest) = self.sigma_vecs(&k_is, &subgroup);
|
||||
let (sigma_vecs, forest) = timed!(
|
||||
timing,
|
||||
"generate sigma polynomials",
|
||||
self.sigma_vecs(&k_is, &subgroup)
|
||||
);
|
||||
|
||||
// Precompute FFT roots.
|
||||
let max_fft_points = 1 << (degree_bits + max(rate_bits, log2_ceil(quotient_degree_factor)));
|
||||
let fft_root_table = fft_root_table(max_fft_points);
|
||||
|
||||
let constants_sigmas_vecs = [constant_vecs, sigma_vecs.clone()].concat();
|
||||
let constants_sigmas_commitment = PolynomialBatchCommitment::from_values(
|
||||
let constants_sigmas_commitment = PolynomialBatch::from_values(
|
||||
constants_sigmas_vecs,
|
||||
rate_bits,
|
||||
self.config.zero_knowledge & PlonkPolynomials::CONSTANTS_SIGMAS.blinding,
|
||||
PlonkOracle::CONSTANTS_SIGMAS.blinding,
|
||||
self.config.fri_config.cap_height,
|
||||
&mut timing,
|
||||
Some(&fft_root_table),
|
||||
@ -763,7 +803,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
constants_sigmas_cap.flatten(),
|
||||
vec![/* Add other circuit data here */],
|
||||
];
|
||||
let circuit_digest = C::Hasher::hash(circuit_digest_parts.concat(), false);
|
||||
let circuit_digest = C::Hasher::hash_no_pad(&circuit_digest_parts.concat());
|
||||
|
||||
let common = CommonCircuitData {
|
||||
config: self.config,
|
||||
@ -774,11 +814,13 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
num_gate_constraints,
|
||||
num_constants,
|
||||
num_virtual_targets: self.virtual_target_index,
|
||||
num_public_inputs,
|
||||
k_is,
|
||||
num_partial_products,
|
||||
circuit_digest,
|
||||
};
|
||||
|
||||
timing.print();
|
||||
debug!("Building circuit took {}s", start.elapsed().as_secs_f32());
|
||||
CircuitData {
|
||||
prover_only,
|
||||
@ -788,7 +830,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
/// Builds a "prover circuit", with data needed to generate proofs but not verify them.
|
||||
pub fn build_prover<C: GenericConfig<D, F = F>>(self) -> ProverCircuitData<F, C, D> {
|
||||
pub fn build_prover<C: GenericConfig<D, F = F>>(self) -> ProverCircuitData<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
// TODO: Can skip parts of this.
|
||||
let CircuitData {
|
||||
prover_only,
|
||||
@ -802,7 +847,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
/// Builds a "verifier circuit", with data needed to verify proofs but not generate them.
|
||||
pub fn build_verifier<C: GenericConfig<D, F = F>>(self) -> VerifierCircuitData<F, C, D> {
|
||||
pub fn build_verifier<C: GenericConfig<D, F = F>>(self) -> VerifierCircuitData<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
// TODO: Can skip parts of this.
|
||||
let CircuitData {
|
||||
verifier_only,
|
||||
@ -817,332 +865,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
// /// Finds the last available arithmetic gate with the given constants or add one if there aren't any.
|
||||
// /// Returns `(g,i)` such that there is an arithmetic gate with the given constants at index
|
||||
// /// `g` and the gate's `i`-th operation is available.
|
||||
// pub(crate) fn find_base_arithmetic_gate(&mut self, const_0: F, const_1: F) -> (usize, usize) {
|
||||
// let (gate, i) = self
|
||||
// .batched_gates
|
||||
// .free_base_arithmetic
|
||||
// .get(&(const_0, const_1))
|
||||
// .copied()
|
||||
// .unwrap_or_else(|| {
|
||||
// let gate = self.add_gate(
|
||||
// ArithmeticGate::new_from_config(&self.config),
|
||||
// vec![const_0, const_1],
|
||||
// );
|
||||
// (gate, 0)
|
||||
// });
|
||||
//
|
||||
// // Update `free_arithmetic` with new values.
|
||||
// if i < ArithmeticGate::num_ops(&self.config) - 1 {
|
||||
// self.batched_gates
|
||||
// .free_base_arithmetic
|
||||
// .insert((const_0, const_1), (gate, i + 1));
|
||||
// } else {
|
||||
// self.batched_gates
|
||||
// .free_base_arithmetic
|
||||
// .remove(&(const_0, const_1));
|
||||
// }
|
||||
//
|
||||
// (gate, i)
|
||||
// }
|
||||
//
|
||||
// /// Finds the last available arithmetic gate with the given constants or add one if there aren't any.
|
||||
// /// Returns `(g,i)` such that there is an arithmetic gate with the given constants at index
|
||||
// /// `g` and the gate's `i`-th operation is available.
|
||||
// pub(crate) fn find_arithmetic_gate(&mut self, const_0: F, const_1: F) -> (usize, usize) {
|
||||
// let (gate, i) = self
|
||||
// .batched_gates
|
||||
// .free_arithmetic
|
||||
// .get(&(const_0, const_1))
|
||||
// .copied()
|
||||
// .unwrap_or_else(|| {
|
||||
// let gate = self.add_gate(
|
||||
// ArithmeticExtensionGate::new_from_config(&self.config),
|
||||
// vec![const_0, const_1],
|
||||
// );
|
||||
// (gate, 0)
|
||||
// });
|
||||
//
|
||||
// // Update `free_arithmetic` with new values.
|
||||
// if i < ArithmeticExtensionGate::<D>::num_ops(&self.config) - 1 {
|
||||
// self.batched_gates
|
||||
// .free_arithmetic
|
||||
// .insert((const_0, const_1), (gate, i + 1));
|
||||
// } else {
|
||||
// self.batched_gates
|
||||
// .free_arithmetic
|
||||
// .remove(&(const_0, const_1));
|
||||
// }
|
||||
//
|
||||
// (gate, i)
|
||||
// }
|
||||
//
|
||||
// /// Finds the last available arithmetic gate with the given constants or add one if there aren't any.
|
||||
// /// Returns `(g,i)` such that there is an arithmetic gate with the given constants at index
|
||||
// /// `g` and the gate's `i`-th operation is available.
|
||||
// pub(crate) fn find_mul_gate(&mut self, const_0: F) -> (usize, usize) {
|
||||
// let (gate, i) = self
|
||||
// .batched_gates
|
||||
// .free_mul
|
||||
// .get(&const_0)
|
||||
// .copied()
|
||||
// .unwrap_or_else(|| {
|
||||
// let gate = self.add_gate(
|
||||
// MulExtensionGate::new_from_config(&self.config),
|
||||
// vec![const_0],
|
||||
// );
|
||||
// (gate, 0)
|
||||
// });
|
||||
//
|
||||
// // Update `free_arithmetic` with new values.
|
||||
// if i < MulExtensionGate::<D>::num_ops(&self.config) - 1 {
|
||||
// self.batched_gates.free_mul.insert(const_0, (gate, i + 1));
|
||||
// } else {
|
||||
// self.batched_gates.free_mul.remove(&const_0);
|
||||
// }
|
||||
//
|
||||
// (gate, i)
|
||||
// }
|
||||
//
|
||||
// /// Finds the last available random access gate with the given `vec_size` or add one if there aren't any.
|
||||
// /// Returns `(g,i)` such that there is a random access gate with the given `vec_size` at index
|
||||
// /// `g` and the gate's `i`-th random access is available.
|
||||
// pub(crate) fn find_random_access_gate(&mut self, bits: usize) -> (usize, usize) {
|
||||
// let (gate, i) = self
|
||||
// .batched_gates
|
||||
// .free_random_access
|
||||
// .get(&bits)
|
||||
// .copied()
|
||||
// .unwrap_or_else(|| {
|
||||
// let gate = self.add_gate(
|
||||
// RandomAccessGate::new_from_config(&self.config, bits),
|
||||
// vec![],
|
||||
// );
|
||||
// (gate, 0)
|
||||
// });
|
||||
//
|
||||
// // Update `free_random_access` with new values.
|
||||
// if i + 1 < RandomAccessGate::<F, D>::new_from_config(&self.config, bits).num_copies {
|
||||
// self.batched_gates
|
||||
// .free_random_access
|
||||
// .insert(bits, (gate, i + 1));
|
||||
// } else {
|
||||
// self.batched_gates.free_random_access.remove(&bits);
|
||||
// }
|
||||
//
|
||||
// (gate, i)
|
||||
// }
|
||||
//
|
||||
// pub fn find_switch_gate(&mut self, chunk_size: usize) -> (SwitchGate<F, D>, usize, usize) {
|
||||
// if self.batched_gates.current_switch_gates.len() < chunk_size {
|
||||
// self.batched_gates.current_switch_gates.extend(vec![
|
||||
// None;
|
||||
// chunk_size
|
||||
// - self
|
||||
// .batched_gates
|
||||
// .current_switch_gates
|
||||
// .len()
|
||||
// ]);
|
||||
// }
|
||||
//
|
||||
// let (gate, gate_index, next_copy) =
|
||||
// match self.batched_gates.current_switch_gates[chunk_size - 1].clone() {
|
||||
// None => {
|
||||
// let gate = SwitchGate::<F, D>::new_from_config(&self.config, chunk_size);
|
||||
// let gate_index = self.add_gate(gate.clone(), vec![]);
|
||||
// (gate, gate_index, 0)
|
||||
// }
|
||||
// Some((gate, idx, next_copy)) => (gate, idx, next_copy),
|
||||
// };
|
||||
//
|
||||
// let num_copies = gate.num_copies;
|
||||
//
|
||||
// if next_copy == num_copies - 1 {
|
||||
// self.batched_gates.current_switch_gates[chunk_size - 1] = None;
|
||||
// } else {
|
||||
// self.batched_gates.current_switch_gates[chunk_size - 1] =
|
||||
// Some((gate.clone(), gate_index, next_copy + 1));
|
||||
// }
|
||||
//
|
||||
// (gate, gate_index, next_copy)
|
||||
// }
|
||||
//
|
||||
// pub(crate) fn find_u32_arithmetic_gate(&mut self) -> (usize, usize) {
|
||||
// let (gate_index, copy) = match self.batched_gates.current_u32_arithmetic_gate {
|
||||
// None => {
|
||||
// let gate = U32ArithmeticGate::new_from_config(&self.config);
|
||||
// let gate_index = self.add_gate(gate, vec![]);
|
||||
// (gate_index, 0)
|
||||
// }
|
||||
// Some((gate_index, copy)) => (gate_index, copy),
|
||||
// };
|
||||
//
|
||||
// if copy == U32ArithmeticGate::<F, D>::num_ops(&self.config) - 1 {
|
||||
// self.batched_gates.current_u32_arithmetic_gate = None;
|
||||
// } else {
|
||||
// self.batched_gates.current_u32_arithmetic_gate = Some((gate_index, copy + 1));
|
||||
// }
|
||||
//
|
||||
// (gate_index, copy)
|
||||
// }
|
||||
//
|
||||
// pub(crate) fn find_u32_subtraction_gate(&mut self) -> (usize, usize) {
|
||||
// let (gate_index, copy) = match self.batched_gates.current_u32_subtraction_gate {
|
||||
// None => {
|
||||
// let gate = U32SubtractionGate::new_from_config(&self.config);
|
||||
// let gate_index = self.add_gate(gate, vec![]);
|
||||
// (gate_index, 0)
|
||||
// }
|
||||
// Some((gate_index, copy)) => (gate_index, copy),
|
||||
// };
|
||||
//
|
||||
// if copy == U32SubtractionGate::<F, D>::num_ops(&self.config) - 1 {
|
||||
// self.batched_gates.current_u32_subtraction_gate = None;
|
||||
// } else {
|
||||
// self.batched_gates.current_u32_subtraction_gate = Some((gate_index, copy + 1));
|
||||
// }
|
||||
//
|
||||
// (gate_index, copy)
|
||||
// }
|
||||
//
|
||||
// /// Returns the gate index and copy index of a free `ConstantGate` slot, potentially adding a
|
||||
// /// new `ConstantGate` if needed.
|
||||
// fn constant_gate_instance(&mut self) -> (usize, usize) {
|
||||
// if self.batched_gates.free_constant.is_none() {
|
||||
// let num_consts = self.config.constant_gate_size;
|
||||
// // We will fill this `ConstantGate` with zero constants initially.
|
||||
// // These will be overwritten by `constant` as the gate instances are filled.
|
||||
// let gate = self.add_gate(ConstantGate { num_consts }, vec![F::ZERO; num_consts]);
|
||||
// self.batched_gates.free_constant = Some((gate, 0));
|
||||
// }
|
||||
//
|
||||
// let (gate, instance) = self.batched_gates.free_constant.unwrap();
|
||||
// if instance + 1 < self.config.constant_gate_size {
|
||||
// self.batched_gates.free_constant = Some((gate, instance + 1));
|
||||
// } else {
|
||||
// self.batched_gates.free_constant = None;
|
||||
// }
|
||||
// (gate, instance)
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused arithmetic operations with zeros, so that all
|
||||
// /// `ArithmeticGate` are run.
|
||||
// fn fill_base_arithmetic_gates(&mut self) {
|
||||
// let zero = self.zero();
|
||||
// for ((c0, c1), (_gate, i)) in self.batched_gates.free_base_arithmetic.clone() {
|
||||
// for _ in i..ArithmeticGate::num_ops(&self.config) {
|
||||
// // If we directly wire in zero, an optimization will skip doing anything and return
|
||||
// // zero. So we pass in a virtual target and connect it to zero afterward.
|
||||
// let dummy = self.add_virtual_target();
|
||||
// self.arithmetic(c0, c1, dummy, dummy, dummy);
|
||||
// self.connect(dummy, zero);
|
||||
// }
|
||||
// }
|
||||
// assert!(self.batched_gates.free_base_arithmetic.is_empty());
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused arithmetic operations with zeros, so that all
|
||||
// /// `ArithmeticExtensionGenerator`s are run.
|
||||
// fn fill_arithmetic_gates(&mut self) {
|
||||
// let zero = self.zero_extension();
|
||||
// for ((c0, c1), (_gate, i)) in self.batched_gates.free_arithmetic.clone() {
|
||||
// for _ in i..ArithmeticExtensionGate::<D>::num_ops(&self.config) {
|
||||
// // If we directly wire in zero, an optimization will skip doing anything and return
|
||||
// // zero. So we pass in a virtual target and connect it to zero afterward.
|
||||
// let dummy = self.add_virtual_extension_target();
|
||||
// self.arithmetic_extension(c0, c1, dummy, dummy, dummy);
|
||||
// self.connect_extension(dummy, zero);
|
||||
// }
|
||||
// }
|
||||
// assert!(self.batched_gates.free_arithmetic.is_empty());
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused arithmetic operations with zeros, so that all
|
||||
// /// `ArithmeticExtensionGenerator`s are run.
|
||||
// fn fill_mul_gates(&mut self) {
|
||||
// let zero = self.zero_extension();
|
||||
// for (c0, (_gate, i)) in self.batched_gates.free_mul.clone() {
|
||||
// for _ in i..MulExtensionGate::<D>::num_ops(&self.config) {
|
||||
// // If we directly wire in zero, an optimization will skip doing anything and return
|
||||
// // zero. So we pass in a virtual target and connect it to zero afterward.
|
||||
// let dummy = self.add_virtual_extension_target();
|
||||
// self.arithmetic_extension(c0, F::ZERO, dummy, dummy, zero);
|
||||
// self.connect_extension(dummy, zero);
|
||||
// }
|
||||
// }
|
||||
// assert!(self.batched_gates.free_mul.is_empty());
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused random access operations with zeros, so that all
|
||||
// /// `RandomAccessGenerator`s are run.
|
||||
// fn fill_random_access_gates(&mut self) {
|
||||
// let zero = self.zero();
|
||||
// for (bits, (_, i)) in self.batched_gates.free_random_access.clone() {
|
||||
// let max_copies =
|
||||
// RandomAccessGate::<F, D>::new_from_config(&self.config, bits).num_copies;
|
||||
// for _ in i..max_copies {
|
||||
// self.random_access(zero, zero, vec![zero; 1 << bits]);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused switch gates with dummy values, so that all
|
||||
// /// `SwitchGenerator`s are run.
|
||||
// fn fill_switch_gates(&mut self) {
|
||||
// let zero = self.zero();
|
||||
//
|
||||
// for chunk_size in 1..=self.batched_gates.current_switch_gates.len() {
|
||||
// if let Some((gate, gate_index, mut copy)) =
|
||||
// self.batched_gates.current_switch_gates[chunk_size - 1].clone()
|
||||
// {
|
||||
// while copy < gate.num_copies {
|
||||
// for element in 0..chunk_size {
|
||||
// let wire_first_input =
|
||||
// Target::wire(gate_index, gate.wire_first_input(copy, element));
|
||||
// let wire_second_input =
|
||||
// Target::wire(gate_index, gate.wire_second_input(copy, element));
|
||||
// let wire_switch_bool =
|
||||
// Target::wire(gate_index, gate.wire_switch_bool(copy));
|
||||
// self.connect(zero, wire_first_input);
|
||||
// self.connect(zero, wire_second_input);
|
||||
// self.connect(zero, wire_switch_bool);
|
||||
// }
|
||||
// copy += 1;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused U32 arithmetic operations with zeros, so that all
|
||||
// /// `U32ArithmeticGenerator`s are run.
|
||||
// fn fill_u32_arithmetic_gates(&mut self) {
|
||||
// let zero = self.zero_u32();
|
||||
// if let Some((_gate_index, copy)) = self.batched_gates.current_u32_arithmetic_gate {
|
||||
// for _ in copy..U32ArithmeticGate::<F, D>::num_ops(&self.config) {
|
||||
// let dummy = self.add_virtual_u32_target();
|
||||
// self.mul_add_u32(dummy, dummy, dummy);
|
||||
// self.connect_u32(dummy, zero);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// /// Fill the remaining unused U32 subtraction operations with zeros, so that all
|
||||
// /// `U32SubtractionGenerator`s are run.
|
||||
// fn fill_u32_subtraction_gates(&mut self) {
|
||||
// let zero = self.zero_u32();
|
||||
// if let Some((_gate_index, copy)) = self.batched_gates.current_u32_subtraction_gate {
|
||||
// for _i in copy..U32SubtractionGate::<F, D>::num_ops(&self.config) {
|
||||
// let dummy = self.add_virtual_u32_target();
|
||||
// self.sub_u32(dummy, dummy, dummy);
|
||||
// self.connect_u32(dummy, zero);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
fn fill_batched_gates(&mut self) {
|
||||
dbg!(&self.current_slots);
|
||||
let instances = self.gate_instances.clone();
|
||||
for gate in instances {
|
||||
if let Some(slot) = self.current_slots.get(&gate.gate_ref) {
|
||||
|
||||
@ -5,16 +5,23 @@ use anyhow::Result;
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::fft::FftRootTable;
|
||||
|
||||
use crate::fri::commitment::PolynomialBatchCommitment;
|
||||
use crate::field::field_types::Field;
|
||||
use crate::fri::oracle::PolynomialBatch;
|
||||
use crate::fri::reduction_strategies::FriReductionStrategy;
|
||||
use crate::fri::structure::{
|
||||
FriBatchInfo, FriBatchInfoTarget, FriInstanceInfo, FriInstanceInfoTarget, FriPolynomialInfo,
|
||||
};
|
||||
use crate::fri::{FriConfig, FriParams};
|
||||
use crate::gates::gate::PrefixedGate;
|
||||
use crate::hash::hash_types::{MerkleCapTarget, RichField};
|
||||
use crate::hash::merkle_tree::MerkleCap;
|
||||
use crate::iop::ext_target::ExtensionTarget;
|
||||
use crate::iop::generator::WitnessGenerator;
|
||||
use crate::iop::target::Target;
|
||||
use crate::iop::witness::PartialWitness;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::plonk_common::{PlonkOracle, FRI_ORACLES};
|
||||
use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs};
|
||||
use crate::plonk::prover::prove;
|
||||
use crate::plonk::verifier::verify;
|
||||
@ -34,21 +41,19 @@ pub struct CircuitConfig {
|
||||
/// `degree / |F|`.
|
||||
pub num_challenges: usize,
|
||||
pub zero_knowledge: bool,
|
||||
|
||||
/// A cap on the quotient polynomial's degree factor. The actual degree factor is derived
|
||||
/// systematically, but will never exceed this value.
|
||||
pub max_quotient_degree_factor: usize,
|
||||
pub fri_config: FriConfig,
|
||||
}
|
||||
|
||||
impl Default for CircuitConfig {
|
||||
fn default() -> Self {
|
||||
CircuitConfig::standard_recursion_config()
|
||||
Self::standard_recursion_config()
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitConfig {
|
||||
pub fn rate(&self) -> f64 {
|
||||
1.0 / ((1 << self.fri_config.rate_bits) as f64)
|
||||
}
|
||||
|
||||
pub fn num_advice_wires(&self) -> usize {
|
||||
self.num_wires - self.num_routed_wires
|
||||
}
|
||||
@ -63,6 +68,7 @@ impl CircuitConfig {
|
||||
security_bits: 100,
|
||||
num_challenges: 2,
|
||||
zero_knowledge: false,
|
||||
max_quotient_degree_factor: 8,
|
||||
fri_config: FriConfig {
|
||||
rate_bits: 3,
|
||||
cap_height: 4,
|
||||
@ -73,6 +79,13 @@ impl CircuitConfig {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn standard_ecc_config() -> Self {
|
||||
Self {
|
||||
num_wires: 136,
|
||||
..Self::standard_recursion_config()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn standard_recursion_zk_config() -> Self {
|
||||
CircuitConfig {
|
||||
zero_knowledge: true,
|
||||
@ -91,7 +104,10 @@ pub struct CircuitData<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
CircuitData<F, C, D>
|
||||
{
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>> {
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
prove(
|
||||
&self.prover_only,
|
||||
&self.common,
|
||||
@ -100,14 +116,20 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()> {
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
verify(proof_with_pis, &self.verifier_only, &self.common)
|
||||
}
|
||||
|
||||
pub fn verify_compressed(
|
||||
&self,
|
||||
compressed_proof_with_pis: CompressedProofWithPublicInputs<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
compressed_proof_with_pis.verify(&self.verifier_only, &self.common)
|
||||
}
|
||||
}
|
||||
@ -131,7 +153,10 @@ pub struct ProverCircuitData<
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
ProverCircuitData<F, C, D>
|
||||
{
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>> {
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
prove(
|
||||
&self.prover_only,
|
||||
&self.common,
|
||||
@ -155,14 +180,20 @@ pub struct VerifierCircuitData<
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
VerifierCircuitData<F, C, D>
|
||||
{
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()> {
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
verify(proof_with_pis, &self.verifier_only, &self.common)
|
||||
}
|
||||
|
||||
pub fn verify_compressed(
|
||||
&self,
|
||||
compressed_proof_with_pis: CompressedProofWithPublicInputs<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
compressed_proof_with_pis.verify(&self.verifier_only, &self.common)
|
||||
}
|
||||
}
|
||||
@ -178,7 +209,7 @@ pub(crate) struct ProverOnlyCircuitData<
|
||||
/// they watch.
|
||||
pub generator_indices_by_watches: BTreeMap<usize, Vec<usize>>,
|
||||
/// Commitments to the constants polynomials and sigma polynomials.
|
||||
pub constants_sigmas_commitment: PolynomialBatchCommitment<F, C, D>,
|
||||
pub constants_sigmas_commitment: PolynomialBatch<F, C, D>,
|
||||
/// The transpose of the list of sigma polynomials.
|
||||
pub sigmas: Vec<Vec<F>>,
|
||||
/// Subgroup of order `degree`.
|
||||
@ -228,12 +259,13 @@ pub struct CommonCircuitData<
|
||||
|
||||
pub(crate) num_virtual_targets: usize,
|
||||
|
||||
pub(crate) num_public_inputs: usize,
|
||||
|
||||
/// The `{k_i}` valued used in `S_ID_i` in Plonk's permutation argument.
|
||||
pub(crate) k_is: Vec<F>,
|
||||
|
||||
/// The number of partial products needed to compute the `Z` polynomials and
|
||||
/// the number of original elements consumed in `partial_products()`.
|
||||
pub(crate) num_partial_products: (usize, usize),
|
||||
/// The number of partial products needed to compute the `Z` polynomials.
|
||||
pub(crate) num_partial_products: usize,
|
||||
|
||||
/// A digest of the "circuit" (i.e. the instance, minus public inputs), which can be used to
|
||||
/// seed Fiat-Shamir.
|
||||
@ -286,6 +318,103 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
pub fn partial_products_range(&self) -> RangeFrom<usize> {
|
||||
self.config.num_challenges..
|
||||
}
|
||||
|
||||
pub(crate) fn get_fri_instance(&self, zeta: F::Extension) -> FriInstanceInfo<F, D> {
|
||||
// All polynomials are opened at zeta.
|
||||
let zeta_batch = FriBatchInfo {
|
||||
point: zeta,
|
||||
polynomials: self.fri_all_polys(),
|
||||
};
|
||||
|
||||
// The Z polynomials are also opened at g * zeta.
|
||||
let g = F::Extension::primitive_root_of_unity(self.degree_bits);
|
||||
let zeta_right = g * zeta;
|
||||
let zeta_right_batch = FriBatchInfo {
|
||||
point: zeta_right,
|
||||
polynomials: self.fri_zs_polys(),
|
||||
};
|
||||
|
||||
let openings = vec![zeta_batch, zeta_right_batch];
|
||||
FriInstanceInfo {
|
||||
oracles: FRI_ORACLES.to_vec(),
|
||||
batches: openings,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_fri_instance_target(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
zeta: ExtensionTarget<D>,
|
||||
) -> FriInstanceInfoTarget<D> {
|
||||
// All polynomials are opened at zeta.
|
||||
let zeta_batch = FriBatchInfoTarget {
|
||||
point: zeta,
|
||||
polynomials: self.fri_all_polys(),
|
||||
};
|
||||
|
||||
// The Z polynomials are also opened at g * zeta.
|
||||
let g = F::primitive_root_of_unity(self.degree_bits);
|
||||
let zeta_right = builder.mul_const_extension(g, zeta);
|
||||
let zeta_right_batch = FriBatchInfoTarget {
|
||||
point: zeta_right,
|
||||
polynomials: self.fri_zs_polys(),
|
||||
};
|
||||
|
||||
let openings = vec![zeta_batch, zeta_right_batch];
|
||||
FriInstanceInfoTarget {
|
||||
oracles: FRI_ORACLES.to_vec(),
|
||||
batches: openings,
|
||||
}
|
||||
}
|
||||
|
||||
fn fri_preprocessed_polys(&self) -> Vec<FriPolynomialInfo> {
|
||||
FriPolynomialInfo::from_range(
|
||||
PlonkOracle::CONSTANTS_SIGMAS.index,
|
||||
0..self.num_preprocessed_polys(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn num_preprocessed_polys(&self) -> usize {
|
||||
self.sigmas_range().end
|
||||
}
|
||||
|
||||
fn fri_wire_polys(&self) -> Vec<FriPolynomialInfo> {
|
||||
let num_wire_polys = self.config.num_wires;
|
||||
FriPolynomialInfo::from_range(PlonkOracle::WIRES.index, 0..num_wire_polys)
|
||||
}
|
||||
|
||||
fn fri_zs_partial_products_polys(&self) -> Vec<FriPolynomialInfo> {
|
||||
FriPolynomialInfo::from_range(
|
||||
PlonkOracle::ZS_PARTIAL_PRODUCTS.index,
|
||||
0..self.num_zs_partial_products_polys(),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn num_zs_partial_products_polys(&self) -> usize {
|
||||
self.config.num_challenges * (1 + self.num_partial_products)
|
||||
}
|
||||
|
||||
fn fri_zs_polys(&self) -> Vec<FriPolynomialInfo> {
|
||||
FriPolynomialInfo::from_range(PlonkOracle::ZS_PARTIAL_PRODUCTS.index, self.zs_range())
|
||||
}
|
||||
|
||||
fn fri_quotient_polys(&self) -> Vec<FriPolynomialInfo> {
|
||||
FriPolynomialInfo::from_range(PlonkOracle::QUOTIENT.index, 0..self.num_quotient_polys())
|
||||
}
|
||||
|
||||
pub(crate) fn num_quotient_polys(&self) -> usize {
|
||||
self.config.num_challenges * self.quotient_degree_factor
|
||||
}
|
||||
|
||||
fn fri_all_polys(&self) -> Vec<FriPolynomialInfo> {
|
||||
[
|
||||
self.fri_preprocessed_polys(),
|
||||
self.fri_wire_polys(),
|
||||
self.fri_zs_partial_products_polys(),
|
||||
self.fri_quotient_polys(),
|
||||
]
|
||||
.concat()
|
||||
}
|
||||
}
|
||||
|
||||
/// The `Target` version of `VerifierCircuitData`, for use inside recursive circuits. Note that this
|
||||
|
||||
@ -5,7 +5,6 @@ use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
|
||||
use crate::hash::gmimc::GMiMCHash;
|
||||
use crate::hash::hash_types::HashOut;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::hash::hashing::{PlonkyPermutation, SPONGE_WIDTH};
|
||||
@ -32,7 +31,39 @@ pub trait Hasher<F: RichField>: Sized + Clone + Debug + Eq + PartialEq {
|
||||
/// Permutation used in the sponge construction.
|
||||
type Permutation: PlonkyPermutation<F>;
|
||||
|
||||
fn hash(input: Vec<F>, pad: bool) -> Self::Hash;
|
||||
/// Hash a message without any padding step. Note that this can enable length-extension attacks.
|
||||
/// However, it is still collision-resistant in cases where the input has a fixed length.
|
||||
fn hash_no_pad(input: &[F]) -> Self::Hash;
|
||||
|
||||
/// Pad the message using the `pad10*1` rule, then hash it.
|
||||
fn hash_pad(input: &[F]) -> Self::Hash {
|
||||
let mut padded_input = input.to_vec();
|
||||
padded_input.push(F::ONE);
|
||||
while (padded_input.len() + 1) % SPONGE_WIDTH != 0 {
|
||||
padded_input.push(F::ZERO);
|
||||
}
|
||||
padded_input.push(F::ONE);
|
||||
Self::hash_no_pad(&padded_input)
|
||||
}
|
||||
|
||||
/// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a
|
||||
/// no-op.
|
||||
fn hash_or_noop(inputs: &[F]) -> Self::Hash
|
||||
where
|
||||
[(); Self::HASH_SIZE]:,
|
||||
{
|
||||
if inputs.len() <= 4 {
|
||||
let mut inputs_bytes = [0u8; Self::HASH_SIZE];
|
||||
for i in 0..inputs.len() {
|
||||
inputs_bytes[i * 8..(i + 1) * 8]
|
||||
.copy_from_slice(&inputs[i].to_canonical_u64().to_le_bytes());
|
||||
}
|
||||
Self::Hash::from_bytes(&inputs_bytes)
|
||||
} else {
|
||||
Self::hash_no_pad(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash;
|
||||
}
|
||||
|
||||
@ -66,45 +97,16 @@ pub trait GenericConfig<const D: usize>:
|
||||
type InnerHasher: AlgebraicHasher<Self::F>;
|
||||
}
|
||||
|
||||
/// Configuration trait for "algebraic" configurations, i.e., those using an algebraic hash function
|
||||
/// in Merkle trees.
|
||||
/// Same as `GenericConfig` trait but with `InnerHasher: AlgebraicHasher<F>`.
|
||||
pub trait AlgebraicConfig<const D: usize>:
|
||||
Debug + Clone + Sync + Sized + Send + Eq + PartialEq
|
||||
{
|
||||
type F: RichField + Extendable<D, Extension = Self::FE>;
|
||||
type FE: FieldExtension<D, BaseField = Self::F>;
|
||||
type Hasher: AlgebraicHasher<Self::F>;
|
||||
type InnerHasher: AlgebraicHasher<Self::F>;
|
||||
}
|
||||
|
||||
impl<A: AlgebraicConfig<D>, const D: usize> GenericConfig<D> for A {
|
||||
type F = <Self as AlgebraicConfig<D>>::F;
|
||||
type FE = <Self as AlgebraicConfig<D>>::FE;
|
||||
type Hasher = <Self as AlgebraicConfig<D>>::Hasher;
|
||||
type InnerHasher = <Self as AlgebraicConfig<D>>::InnerHasher;
|
||||
}
|
||||
|
||||
/// Configuration using Poseidon over the Goldilocks field.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct PoseidonGoldilocksConfig;
|
||||
impl AlgebraicConfig<2> for PoseidonGoldilocksConfig {
|
||||
impl GenericConfig<2> for PoseidonGoldilocksConfig {
|
||||
type F = GoldilocksField;
|
||||
type FE = QuadraticExtension<Self::F>;
|
||||
type Hasher = PoseidonHash;
|
||||
type InnerHasher = PoseidonHash;
|
||||
}
|
||||
|
||||
/// Configuration using GMiMC over the Goldilocks field.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct GMiMCGoldilocksConfig;
|
||||
impl AlgebraicConfig<2> for GMiMCGoldilocksConfig {
|
||||
type F = GoldilocksField;
|
||||
type FE = QuadraticExtension<Self::F>;
|
||||
type Hasher = GMiMCHash;
|
||||
type InnerHasher = GMiMCHash;
|
||||
}
|
||||
|
||||
/// Configuration using truncated Keccak over the Goldilocks field.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct KeccakGoldilocksConfig;
|
||||
|
||||
@ -3,16 +3,20 @@ use std::collections::HashSet;
|
||||
use plonky2_field::extension_field::Extendable;
|
||||
use plonky2_field::polynomial::PolynomialCoeffs;
|
||||
|
||||
use crate::fri::proof::{CompressedFriProof, FriProof};
|
||||
use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedEvals};
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof, FriProofTarget};
|
||||
use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedOpenings};
|
||||
use crate::gadgets::polynomial::PolynomialCoeffsExtTarget;
|
||||
use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField};
|
||||
use crate::hash::merkle_tree::MerkleCap;
|
||||
use crate::iop::challenger::Challenger;
|
||||
use crate::iop::challenger::{Challenger, RecursiveChallenger};
|
||||
use crate::iop::target::Target;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CommonCircuitData;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
|
||||
use crate::plonk::proof::{
|
||||
CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet, Proof,
|
||||
ProofChallenges, ProofWithPublicInputs,
|
||||
CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet,
|
||||
OpeningSetTarget, Proof, ProofChallenges, ProofChallengesTarget, ProofTarget,
|
||||
ProofWithPublicInputs, ProofWithPublicInputsTarget,
|
||||
};
|
||||
use crate::util::reverse_bits;
|
||||
|
||||
@ -29,8 +33,6 @@ fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, cons
|
||||
) -> anyhow::Result<ProofChallenges<F, D>> {
|
||||
let config = &common_data.config;
|
||||
let num_challenges = config.num_challenges;
|
||||
let num_fri_queries = config.fri_config.num_query_rounds;
|
||||
let lde_size = common_data.lde_size();
|
||||
|
||||
let mut challenger = Challenger::<F, C::Hasher>::new();
|
||||
|
||||
@ -48,47 +50,20 @@ fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, cons
|
||||
challenger.observe_cap(quotient_polys_cap);
|
||||
let plonk_zeta = challenger.get_extension_challenge::<D>();
|
||||
|
||||
challenger.observe_opening_set(openings);
|
||||
|
||||
// Scaling factor to combine polynomials.
|
||||
let fri_alpha = challenger.get_extension_challenge::<D>();
|
||||
|
||||
// Recover the random betas used in the FRI reductions.
|
||||
let fri_betas = commit_phase_merkle_caps
|
||||
.iter()
|
||||
.map(|cap| {
|
||||
challenger.observe_cap(cap);
|
||||
challenger.get_extension_challenge::<D>()
|
||||
})
|
||||
.collect();
|
||||
|
||||
challenger.observe_extension_elements(&final_poly.coeffs);
|
||||
|
||||
let fri_pow_response = C::InnerHasher::hash(
|
||||
challenger
|
||||
.get_hash()
|
||||
.elements
|
||||
.iter()
|
||||
.copied()
|
||||
.chain(Some(pow_witness))
|
||||
.collect(),
|
||||
false,
|
||||
)
|
||||
.elements[0];
|
||||
|
||||
let fri_query_indices = (0..num_fri_queries)
|
||||
.map(|_| challenger.get_challenge().to_canonical_u64() as usize % lde_size)
|
||||
.collect();
|
||||
challenger.observe_openings(&openings.to_fri_openings());
|
||||
|
||||
Ok(ProofChallenges {
|
||||
plonk_betas,
|
||||
plonk_gammas,
|
||||
plonk_alphas,
|
||||
plonk_zeta,
|
||||
fri_alpha,
|
||||
fri_betas,
|
||||
fri_pow_response,
|
||||
fri_query_indices,
|
||||
fri_challenges: challenger.fri_challenges::<C, D>(
|
||||
commit_phase_merkle_caps,
|
||||
final_poly,
|
||||
pow_witness,
|
||||
common_data.degree_bits,
|
||||
&config.fri_config,
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
@ -99,12 +74,16 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
&self,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> anyhow::Result<Vec<usize>> {
|
||||
Ok(self.get_challenges(common_data)?.fri_query_indices)
|
||||
Ok(self
|
||||
.get_challenges(self.get_public_inputs_hash(), common_data)?
|
||||
.fri_challenges
|
||||
.fri_query_indices)
|
||||
}
|
||||
|
||||
/// Computes all Fiat-Shamir challenges used in the Plonk proof.
|
||||
pub(crate) fn get_challenges(
|
||||
&self,
|
||||
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> anyhow::Result<ProofChallenges<F, D>> {
|
||||
let Proof {
|
||||
@ -122,7 +101,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
} = &self.proof;
|
||||
|
||||
get_challenges(
|
||||
self.get_public_inputs_hash(),
|
||||
public_inputs_hash,
|
||||
wires_cap,
|
||||
plonk_zs_partial_products_cap,
|
||||
quotient_polys_cap,
|
||||
@ -141,6 +120,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
/// Computes all Fiat-Shamir challenges used in the Plonk proof.
|
||||
pub(crate) fn get_challenges(
|
||||
&self,
|
||||
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> anyhow::Result<ProofChallenges<F, D>> {
|
||||
let CompressedProof {
|
||||
@ -158,7 +138,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
} = &self.proof;
|
||||
|
||||
get_challenges(
|
||||
self.get_public_inputs_hash(),
|
||||
public_inputs_hash,
|
||||
wires_cap,
|
||||
plonk_zs_partial_products_cap,
|
||||
quotient_polys_cap,
|
||||
@ -178,34 +158,40 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
) -> FriInferredElements<F, D> {
|
||||
let ProofChallenges {
|
||||
plonk_zeta,
|
||||
fri_alpha,
|
||||
fri_betas,
|
||||
fri_query_indices,
|
||||
fri_challenges:
|
||||
FriChallenges {
|
||||
fri_alpha,
|
||||
fri_betas,
|
||||
fri_query_indices,
|
||||
..
|
||||
},
|
||||
..
|
||||
} = challenges;
|
||||
let mut fri_inferred_elements = Vec::new();
|
||||
// Holds the indices that have already been seen at each reduction depth.
|
||||
let mut seen_indices_by_depth =
|
||||
vec![HashSet::new(); common_data.fri_params.reduction_arity_bits.len()];
|
||||
let precomputed_reduced_evals =
|
||||
PrecomputedReducedEvals::from_os_and_alpha(&self.proof.openings, *fri_alpha);
|
||||
let precomputed_reduced_evals = PrecomputedReducedOpenings::from_os_and_alpha(
|
||||
&self.proof.openings.to_fri_openings(),
|
||||
*fri_alpha,
|
||||
);
|
||||
let log_n = common_data.degree_bits + common_data.config.fri_config.rate_bits;
|
||||
// Simulate the proof verification and collect the inferred elements.
|
||||
// The content of the loop is basically the same as the `fri_verifier_query_round` function.
|
||||
for &(mut x_index) in fri_query_indices {
|
||||
let mut subgroup_x = F::MULTIPLICATIVE_GROUP_GENERATOR
|
||||
* F::primitive_root_of_unity(log_n).exp_u64(reverse_bits(x_index, log_n) as u64);
|
||||
let mut old_eval = fri_combine_initial(
|
||||
let mut old_eval = fri_combine_initial::<F, C, D>(
|
||||
&common_data.get_fri_instance(*plonk_zeta),
|
||||
&self
|
||||
.proof
|
||||
.opening_proof
|
||||
.query_round_proofs
|
||||
.initial_trees_proofs[&x_index],
|
||||
*fri_alpha,
|
||||
*plonk_zeta,
|
||||
subgroup_x,
|
||||
precomputed_reduced_evals,
|
||||
common_data,
|
||||
&precomputed_reduced_evals,
|
||||
&common_data.fri_params,
|
||||
);
|
||||
for (i, &arity_bits) in common_data
|
||||
.fri_params
|
||||
@ -239,3 +225,96 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
FriInferredElements(fri_inferred_elements)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
fn get_challenges<C: GenericConfig<D, F = F>>(
|
||||
&mut self,
|
||||
public_inputs_hash: HashOutTarget,
|
||||
wires_cap: &MerkleCapTarget,
|
||||
plonk_zs_partial_products_cap: &MerkleCapTarget,
|
||||
quotient_polys_cap: &MerkleCapTarget,
|
||||
openings: &OpeningSetTarget<D>,
|
||||
commit_phase_merkle_caps: &[MerkleCapTarget],
|
||||
final_poly: &PolynomialCoeffsExtTarget<D>,
|
||||
pow_witness: Target,
|
||||
inner_common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> ProofChallengesTarget<D>
|
||||
where
|
||||
C::Hasher: AlgebraicHasher<F>,
|
||||
{
|
||||
let config = &inner_common_data.config;
|
||||
let num_challenges = config.num_challenges;
|
||||
|
||||
let mut challenger = RecursiveChallenger::<F, C::Hasher, D>::new(self);
|
||||
|
||||
// Observe the instance.
|
||||
let digest =
|
||||
HashOutTarget::from_vec(self.constants(&inner_common_data.circuit_digest.elements));
|
||||
challenger.observe_hash(&digest);
|
||||
challenger.observe_hash(&public_inputs_hash);
|
||||
|
||||
challenger.observe_cap(wires_cap);
|
||||
let plonk_betas = challenger.get_n_challenges(self, num_challenges);
|
||||
let plonk_gammas = challenger.get_n_challenges(self, num_challenges);
|
||||
|
||||
challenger.observe_cap(plonk_zs_partial_products_cap);
|
||||
let plonk_alphas = challenger.get_n_challenges(self, num_challenges);
|
||||
|
||||
challenger.observe_cap(quotient_polys_cap);
|
||||
let plonk_zeta = challenger.get_extension_challenge(self);
|
||||
|
||||
challenger.observe_openings(&openings.to_fri_openings());
|
||||
|
||||
ProofChallengesTarget {
|
||||
plonk_betas,
|
||||
plonk_gammas,
|
||||
plonk_alphas,
|
||||
plonk_zeta,
|
||||
fri_challenges: challenger.fri_challenges::<C>(
|
||||
self,
|
||||
commit_phase_merkle_caps,
|
||||
final_poly,
|
||||
pow_witness,
|
||||
inner_common_data,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const D: usize> ProofWithPublicInputsTarget<D> {
|
||||
pub(crate) fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>>(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
public_inputs_hash: HashOutTarget,
|
||||
inner_common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> ProofChallengesTarget<D>
|
||||
where
|
||||
C::Hasher: AlgebraicHasher<F>,
|
||||
{
|
||||
let ProofTarget {
|
||||
wires_cap,
|
||||
plonk_zs_partial_products_cap,
|
||||
quotient_polys_cap,
|
||||
openings,
|
||||
opening_proof:
|
||||
FriProofTarget {
|
||||
commit_phase_merkle_caps,
|
||||
final_poly,
|
||||
pow_witness,
|
||||
..
|
||||
},
|
||||
} = &self.proof;
|
||||
|
||||
builder.get_challenges(
|
||||
public_inputs_hash,
|
||||
wires_cap,
|
||||
plonk_zs_partial_products_cap,
|
||||
quotient_polys_cap,
|
||||
openings,
|
||||
commit_phase_merkle_caps,
|
||||
final_poly,
|
||||
*pow_witness,
|
||||
inner_common_data,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,7 +4,7 @@ pub mod config;
|
||||
pub(crate) mod copy_constraint;
|
||||
mod get_challenges;
|
||||
pub(crate) mod permutation_argument;
|
||||
pub(crate) mod plonk_common;
|
||||
pub mod plonk_common;
|
||||
pub mod proof;
|
||||
pub mod prover;
|
||||
pub mod recursive_verifier;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user