From 58258938452b1ac80540bb3423c3359ec643a885 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Thu, 6 Jan 2022 08:37:34 -0800 Subject: [PATCH 001/143] Remove feature(asm_sym) (#418) --- .../src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs | 8 ++++---- plonky2/src/lib.rs | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs b/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs index 0fddeba7..934583d6 100644 --- a/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs +++ b/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs @@ -757,9 +757,9 @@ unsafe fn partial_round( // multiplication where we've set the first element to 0.) Add the remaining bits now. // TODO: This is a bit of an afterthought, which is why these constants are loaded 22 // times... There's likely a better way of merging those results. - "vmovdqu ymm6, {mds_matrix}[rip]", - "vmovdqu ymm7, {mds_matrix}[rip + 32]", - "vmovdqu ymm8, {mds_matrix}[rip + 64]", + "vmovdqu ymm6, [{mds_matrix}]", + "vmovdqu ymm7, [{mds_matrix} + 32]", + "vmovdqu ymm8, [{mds_matrix} + 64]", "vpsllvq ymm9, ymm13, ymm6", "vpsllvq ymm10, ymm13, ymm7", "vpsllvq ymm11, ymm13, ymm8", @@ -775,7 +775,7 @@ unsafe fn partial_round( // Reduction required. state0a = in(reg) state0a, - mds_matrix = sym TOP_ROW_EXPS, + mds_matrix = in(reg) &TOP_ROW_EXPS, inout("ymm0") unreduced_lo0_s, inout("ymm1") unreduced_lo1_s, inout("ymm2") unreduced_lo2_s, diff --git a/plonky2/src/lib.rs b/plonky2/src/lib.rs index 3bddec82..e5e77bb9 100644 --- a/plonky2/src/lib.rs +++ b/plonky2/src/lib.rs @@ -6,7 +6,6 @@ #![allow(clippy::len_without_is_empty)] #![allow(clippy::needless_range_loop)] #![allow(clippy::return_self_not_must_use)] -#![feature(asm_sym)] #![feature(generic_const_exprs)] #![feature(specialization)] #![feature(stdsimd)] From a6e1f7ccad8677f5f432db2534eb26aa760b6afa Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Thu, 6 Jan 2022 09:16:54 -0800 Subject: [PATCH 002/143] Aarch64: Minor optimization to Poseidon full layers (#420) * Aarch64: Minor optimization to Poseidon full layers * Daniel PR comment --- .../arch/aarch64/poseidon_goldilocks_neon.rs | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs b/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs index a7f61bf5..f903cd96 100644 --- a/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs +++ b/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs @@ -5,6 +5,7 @@ use std::arch::asm; use plonky2_field::field_types::PrimeField; use plonky2_field::goldilocks_field::GoldilocksField; +use plonky2_util::branch_hint; use static_assertions::const_assert; use unroll::unroll_for_loops; @@ -108,6 +109,8 @@ const_assert!(check_round_const_bounds_init()); // ====================================== SCALAR ARITHMETIC ======================================= +const EPSILON: u64 = 0xffffffff; + /// Addition modulo ORDER accounting for wraparound. Correct only when a + b < 2**64 + ORDER. #[inline(always)] unsafe fn add_with_wraparound(a: u64, b: u64) -> u64 { @@ -124,39 +127,36 @@ unsafe fn add_with_wraparound(a: u64, b: u64) -> u64 { adj = lateout(reg) adj, options(pure, nomem, nostack), ); - res.wrapping_add(adj) // adj is EPSILON if wraparound occured and 0 otherwise + res + adj // adj is EPSILON if wraparound occured and 0 otherwise } -/// Addition of a and (b >> 32) modulo ORDER accounting for wraparound. +/// Subtraction of a and (b >> 32) modulo ORDER accounting for wraparound. #[inline(always)] unsafe fn sub_with_wraparound_lsr32(a: u64, b: u64) -> u64 { - let res: u64; - let adj: u64; - asm!( - "subs {res}, {a}, {b}, lsr #32", - // Set adj to 0xffffffff if subtraction underflowed and 0 otherwise. - // 'cc' for 'carry clear'. - // NB: The CF in ARM subtraction is the opposite of x86: CF set == underflow did not occur. - "csetm {adj:w}, cc", - a = in(reg) a, - b = in(reg) b, - res = lateout(reg) res, - adj = lateout(reg) adj, - options(pure, nomem, nostack), - ); - res.wrapping_sub(adj) // adj is EPSILON if underflow occured and 0 otherwise. + let b_hi = b >> 32; + // This could be done with a.overflowing_add(b_hi), but `checked_sub` signals to the compiler + // that overflow is unlikely (note: this is a standard library implementation detail, not part + // of the spec). + match a.checked_sub(b_hi) { + Some(res) => res, + None => { + // Super rare. Better off branching. + branch_hint(); + let res_wrapped = a.wrapping_sub(b_hi); + res_wrapped - EPSILON + } + } } /// Multiplication of the low word (i.e., x as u32) by EPSILON. #[inline(always)] unsafe fn mul_epsilon(x: u64) -> u64 { let res; - let epsilon: u64 = 0xffffffff; asm!( // Use UMULL to save one instruction. The compiler emits two: extract the low word and then multiply. "umull {res}, {x:w}, {epsilon:w}", x = in(reg) x, - epsilon = in(reg) epsilon, + epsilon = in(reg) EPSILON, res = lateout(reg) res, options(pure, nomem, nostack, preserves_flags), ); From f072d09ae43a525e72cff8e1dfbc3f8276393fc0 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Thu, 6 Jan 2022 09:19:32 -0800 Subject: [PATCH 003/143] AVX-512 packed Goldilocks (#400) * WIP AVX-512 Goldilocks * Fix tests * fmt * Hamish PR comment --- .../arch/x86_64/avx512_goldilocks_field.rs | 656 ++++++++++++++++++ field/src/arch/x86_64/mod.rs | 20 +- field/src/lib.rs | 1 + field/src/packable.rs | 24 +- 4 files changed, 699 insertions(+), 2 deletions(-) create mode 100644 field/src/arch/x86_64/avx512_goldilocks_field.rs diff --git a/field/src/arch/x86_64/avx512_goldilocks_field.rs b/field/src/arch/x86_64/avx512_goldilocks_field.rs new file mode 100644 index 00000000..ede87626 --- /dev/null +++ b/field/src/arch/x86_64/avx512_goldilocks_field.rs @@ -0,0 +1,656 @@ +use core::arch::x86_64::*; +use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::iter::{Product, Sum}; +use std::mem::transmute; +use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}; + +use crate::field_types::{Field, PrimeField}; +use crate::goldilocks_field::GoldilocksField; +use crate::ops::Square; +use crate::packed_field::PackedField; + +// Ideally `Avx512GoldilocksField` would wrap `__m512i`. Unfortunately, `__m512i` has an alignment +// of 64B, which would preclude us from casting `[GoldilocksField; 8]` (alignment 8B) to +// `Avx512GoldilocksField`. We need to ensure that `Avx512GoldilocksField` has the same alignment as +// `GoldilocksField`. Thus we wrap `[GoldilocksField; 8]` and use the `new` and `get` methods to +// convert to and from `__m512i`. +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct Avx512GoldilocksField(pub [GoldilocksField; 8]); + +impl Avx512GoldilocksField { + #[inline] + fn new(x: __m512i) -> Self { + unsafe { transmute(x) } + } + #[inline] + fn get(&self) -> __m512i { + unsafe { transmute(*self) } + } +} + +unsafe impl PackedField for Avx512GoldilocksField { + const WIDTH: usize = 8; + + type Scalar = GoldilocksField; + + const ZEROS: Self = Self([GoldilocksField::ZERO; 8]); + const ONES: Self = Self([GoldilocksField::ONE; 8]); + + #[inline] + fn from_arr(arr: [Self::Scalar; Self::WIDTH]) -> Self { + Self(arr) + } + + #[inline] + fn as_arr(&self) -> [Self::Scalar; Self::WIDTH] { + self.0 + } + + #[inline] + fn from_slice(slice: &[Self::Scalar]) -> &Self { + assert_eq!(slice.len(), Self::WIDTH); + unsafe { &*slice.as_ptr().cast() } + } + #[inline] + fn from_slice_mut(slice: &mut [Self::Scalar]) -> &mut Self { + assert_eq!(slice.len(), Self::WIDTH); + unsafe { &mut *slice.as_mut_ptr().cast() } + } + #[inline] + fn as_slice(&self) -> &[Self::Scalar] { + &self.0[..] + } + #[inline] + fn as_slice_mut(&mut self) -> &mut [Self::Scalar] { + &mut self.0[..] + } + + #[inline] + fn interleave(&self, other: Self, block_len: usize) -> (Self, Self) { + let (v0, v1) = (self.get(), other.get()); + let (res0, res1) = match block_len { + 1 => unsafe { interleave1(v0, v1) }, + 2 => unsafe { interleave2(v0, v1) }, + 4 => unsafe { interleave4(v0, v1) }, + 8 => (v0, v1), + _ => panic!("unsupported block_len"), + }; + (Self::new(res0), Self::new(res1)) + } +} + +impl Add for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn add(self, rhs: Self) -> Self { + Self::new(unsafe { add(self.get(), rhs.get()) }) + } +} +impl Add for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn add(self, rhs: GoldilocksField) -> Self { + self + Self::from(rhs) + } +} +impl Add for GoldilocksField { + type Output = Avx512GoldilocksField; + #[inline] + fn add(self, rhs: Self::Output) -> Self::Output { + Self::Output::from(self) + rhs + } +} +impl AddAssign for Avx512GoldilocksField { + #[inline] + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs; + } +} +impl AddAssign for Avx512GoldilocksField { + #[inline] + fn add_assign(&mut self, rhs: GoldilocksField) { + *self = *self + rhs; + } +} + +impl Debug for Avx512GoldilocksField { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "({:?})", self.get()) + } +} + +impl Default for Avx512GoldilocksField { + #[inline] + fn default() -> Self { + Self::ZEROS + } +} + +impl Div for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn div(self, rhs: GoldilocksField) -> Self { + self * rhs.inverse() + } +} +impl DivAssign for Avx512GoldilocksField { + #[inline] + fn div_assign(&mut self, rhs: GoldilocksField) { + *self *= rhs.inverse(); + } +} + +impl From for Avx512GoldilocksField { + fn from(x: GoldilocksField) -> Self { + Self([x; 8]) + } +} + +impl Mul for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn mul(self, rhs: Self) -> Self { + Self::new(unsafe { mul(self.get(), rhs.get()) }) + } +} +impl Mul for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn mul(self, rhs: GoldilocksField) -> Self { + self * Self::from(rhs) + } +} +impl Mul for GoldilocksField { + type Output = Avx512GoldilocksField; + #[inline] + fn mul(self, rhs: Avx512GoldilocksField) -> Self::Output { + Self::Output::from(self) * rhs + } +} +impl MulAssign for Avx512GoldilocksField { + #[inline] + fn mul_assign(&mut self, rhs: Self) { + *self = *self * rhs; + } +} +impl MulAssign for Avx512GoldilocksField { + #[inline] + fn mul_assign(&mut self, rhs: GoldilocksField) { + *self = *self * rhs; + } +} + +impl Neg for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn neg(self) -> Self { + Self::new(unsafe { neg(self.get()) }) + } +} + +impl Product for Avx512GoldilocksField { + #[inline] + fn product>(iter: I) -> Self { + iter.reduce(|x, y| x * y).unwrap_or(Self::ONES) + } +} + +impl Square for Avx512GoldilocksField { + #[inline] + fn square(&self) -> Self { + Self::new(unsafe { square(self.get()) }) + } +} + +impl Sub for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn sub(self, rhs: Self) -> Self { + Self::new(unsafe { sub(self.get(), rhs.get()) }) + } +} +impl Sub for Avx512GoldilocksField { + type Output = Self; + #[inline] + fn sub(self, rhs: GoldilocksField) -> Self { + self - Self::from(rhs) + } +} +impl Sub for GoldilocksField { + type Output = Avx512GoldilocksField; + #[inline] + fn sub(self, rhs: Avx512GoldilocksField) -> Self::Output { + Self::Output::from(self) - rhs + } +} +impl SubAssign for Avx512GoldilocksField { + #[inline] + fn sub_assign(&mut self, rhs: Self) { + *self = *self - rhs; + } +} +impl SubAssign for Avx512GoldilocksField { + #[inline] + fn sub_assign(&mut self, rhs: GoldilocksField) { + *self = *self - rhs; + } +} + +impl Sum for Avx512GoldilocksField { + #[inline] + fn sum>(iter: I) -> Self { + iter.reduce(|x, y| x + y).unwrap_or(Self::ZEROS) + } +} + +const FIELD_ORDER: __m512i = unsafe { transmute([GoldilocksField::ORDER; 8]) }; +const EPSILON: __m512i = unsafe { transmute([GoldilocksField::ORDER.wrapping_neg(); 8]) }; + +#[inline] +unsafe fn canonicalize(x: __m512i) -> __m512i { + let mask = _mm512_cmpge_epu64_mask(x, FIELD_ORDER); + _mm512_mask_sub_epi64(x, mask, x, FIELD_ORDER) +} + +#[inline] +unsafe fn add_no_double_overflow_64_64(x: __m512i, y: __m512i) -> __m512i { + let res_wrapped = _mm512_add_epi64(x, y); + let mask = _mm512_cmplt_epu64_mask(res_wrapped, y); // mask set if add overflowed + let res = _mm512_mask_sub_epi64(res_wrapped, mask, res_wrapped, FIELD_ORDER); + res +} + +#[inline] +unsafe fn sub_no_double_overflow_64_64(x: __m512i, y: __m512i) -> __m512i { + let mask = _mm512_cmplt_epu64_mask(x, y); // mask set if sub will underflow (x < y) + let res_wrapped = _mm512_sub_epi64(x, y); + let res = _mm512_mask_add_epi64(res_wrapped, mask, res_wrapped, FIELD_ORDER); + res +} + +#[inline] +unsafe fn add(x: __m512i, y: __m512i) -> __m512i { + add_no_double_overflow_64_64(x, canonicalize(y)) +} + +#[inline] +unsafe fn sub(x: __m512i, y: __m512i) -> __m512i { + sub_no_double_overflow_64_64(x, canonicalize(y)) +} + +#[inline] +unsafe fn neg(y: __m512i) -> __m512i { + _mm512_sub_epi64(FIELD_ORDER, canonicalize(y)) +} + +const LO_32_BITS_MASK: __mmask16 = unsafe { transmute(0b0101010101010101u16) }; + +#[inline] +unsafe fn mul64_64(x: __m512i, y: __m512i) -> (__m512i, __m512i) { + // We want to move the high 32 bits to the low position. The multiplication instruction ignores + // the high 32 bits, so it's ok to just duplicate it into the low position. This duplication can + // be done on port 5; bitshifts run on port 0, competing with multiplication. + // This instruction is only provided for 32-bit floats, not integers. Idk why Intel makes the + // distinction; the casts are free and it guarantees that the exact bit pattern is preserved. + // Using a swizzle instruction of the wrong domain (float vs int) does not increase latency + // since Haswell. + let x_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(x))); + let y_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(y))); + + // All four pairwise multiplications + let mul_ll = _mm512_mul_epu32(x, y); + let mul_lh = _mm512_mul_epu32(x, y_hi); + let mul_hl = _mm512_mul_epu32(x_hi, y); + let mul_hh = _mm512_mul_epu32(x_hi, y_hi); + + // Bignum addition + // Extract high 32 bits of mul_ll and add to mul_hl. This cannot overflow. + let mul_ll_hi = _mm512_srli_epi64::<32>(mul_ll); + let t0 = _mm512_add_epi64(mul_hl, mul_ll_hi); + // Extract low 32 bits of t0 and add to mul_lh. Again, this cannot overflow. + // Also, extract high 32 bits of t0 and add to mul_hh. + let t0_lo = _mm512_and_si512(t0, EPSILON); + let t0_hi = _mm512_srli_epi64::<32>(t0); + let t1 = _mm512_add_epi64(mul_lh, t0_lo); + let t2 = _mm512_add_epi64(mul_hh, t0_hi); + // Lastly, extract the high 32 bits of t1 and add to t2. + let t1_hi = _mm512_srli_epi64::<32>(t1); + let res_hi = _mm512_add_epi64(t2, t1_hi); + + // Form res_lo by combining the low half of mul_ll with the low half of t1 (shifted into high + // position). + let t1_lo = _mm512_castps_si512(_mm512_moveldup_ps(_mm512_castsi512_ps(t1))); + let res_lo = _mm512_mask_blend_epi32(LO_32_BITS_MASK, t1_lo, mul_ll); + + (res_hi, res_lo) +} + +#[inline] +unsafe fn square64(x: __m512i) -> (__m512i, __m512i) { + // Get high 32 bits of x. See comment in mul64_64_s. + let x_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(x))); + + // All pairwise multiplications. + let mul_ll = _mm512_mul_epu32(x, x); + let mul_lh = _mm512_mul_epu32(x, x_hi); + let mul_hh = _mm512_mul_epu32(x_hi, x_hi); + + // Bignum addition, but mul_lh is shifted by 33 bits (not 32). + let mul_ll_hi = _mm512_srli_epi64::<33>(mul_ll); + let t0 = _mm512_add_epi64(mul_lh, mul_ll_hi); + let t0_hi = _mm512_srli_epi64::<31>(t0); + let res_hi = _mm512_add_epi64(mul_hh, t0_hi); + + // Form low result by adding the mul_ll and the low 31 bits of mul_lh (shifted to the high + // position). + let mul_lh_lo = _mm512_slli_epi64::<33>(mul_lh); + let res_lo = _mm512_add_epi64(mul_ll, mul_lh_lo); + + (res_hi, res_lo) +} + +#[inline] +unsafe fn reduce128(x: (__m512i, __m512i)) -> __m512i { + let (hi0, lo0) = x; + let hi_hi0 = _mm512_srli_epi64::<32>(hi0); + let lo1 = sub_no_double_overflow_64_64(lo0, hi_hi0); + let t1 = _mm512_mul_epu32(hi0, EPSILON); + let lo2 = add_no_double_overflow_64_64(lo1, t1); + lo2 +} + +#[inline] +unsafe fn mul(x: __m512i, y: __m512i) -> __m512i { + reduce128(mul64_64(x, y)) +} + +#[inline] +unsafe fn square(x: __m512i) -> __m512i { + reduce128(square64(x)) +} + +#[inline] +unsafe fn interleave1(x: __m512i, y: __m512i) -> (__m512i, __m512i) { + let a = _mm512_unpacklo_epi64(x, y); + let b = _mm512_unpackhi_epi64(x, y); + (a, b) +} + +const INTERLEAVE2_IDX_A: __m512i = unsafe { + transmute([ + 0o00u64, 0o01u64, 0o10u64, 0o11u64, 0o04u64, 0o05u64, 0o14u64, 0o15u64, + ]) +}; +const INTERLEAVE2_IDX_B: __m512i = unsafe { + transmute([ + 0o02u64, 0o03u64, 0o12u64, 0o13u64, 0o06u64, 0o07u64, 0o16u64, 0o17u64, + ]) +}; + +#[inline] +unsafe fn interleave2(x: __m512i, y: __m512i) -> (__m512i, __m512i) { + let a = _mm512_permutex2var_epi64(x, INTERLEAVE2_IDX_A, y); + let b = _mm512_permutex2var_epi64(x, INTERLEAVE2_IDX_B, y); + (a, b) +} + +#[inline] +unsafe fn interleave4(x: __m512i, y: __m512i) -> (__m512i, __m512i) { + let a = _mm512_shuffle_i64x2::<0x44>(x, y); + let b = _mm512_shuffle_i64x2::<0xee>(x, y); + (a, b) +} + +#[cfg(test)] +mod tests { + use crate::arch::x86_64::avx512_goldilocks_field::Avx512GoldilocksField; + use crate::field_types::PrimeField; + use crate::goldilocks_field::GoldilocksField; + use crate::ops::Square; + use crate::packed_field::PackedField; + + fn test_vals_a() -> [GoldilocksField; 8] { + [ + GoldilocksField::from_noncanonical_u64(14479013849828404771), + GoldilocksField::from_noncanonical_u64(9087029921428221768), + GoldilocksField::from_noncanonical_u64(2441288194761790662), + GoldilocksField::from_noncanonical_u64(5646033492608483824), + GoldilocksField::from_noncanonical_u64(2779181197214900072), + GoldilocksField::from_noncanonical_u64(2989742820063487116), + GoldilocksField::from_noncanonical_u64(727880025589250743), + GoldilocksField::from_noncanonical_u64(3803926346107752679), + ] + } + fn test_vals_b() -> [GoldilocksField; 8] { + [ + GoldilocksField::from_noncanonical_u64(17891926589593242302), + GoldilocksField::from_noncanonical_u64(11009798273260028228), + GoldilocksField::from_noncanonical_u64(2028722748960791447), + GoldilocksField::from_noncanonical_u64(7929433601095175579), + GoldilocksField::from_noncanonical_u64(6632528436085461172), + GoldilocksField::from_noncanonical_u64(2145438710786785567), + GoldilocksField::from_noncanonical_u64(11821483668392863016), + GoldilocksField::from_noncanonical_u64(15638272883309521929), + ] + } + + #[test] + fn test_add() { + let a_arr = test_vals_a(); + let b_arr = test_vals_b(); + + let packed_a = Avx512GoldilocksField::from_arr(a_arr); + let packed_b = Avx512GoldilocksField::from_arr(b_arr); + let packed_res = packed_a + packed_b; + let arr_res = packed_res.as_arr(); + + let expected = a_arr.iter().zip(b_arr).map(|(&a, b)| a + b); + for (exp, res) in expected.zip(arr_res) { + assert_eq!(res, exp); + } + } + + #[test] + fn test_mul() { + let a_arr = test_vals_a(); + let b_arr = test_vals_b(); + + let packed_a = Avx512GoldilocksField::from_arr(a_arr); + let packed_b = Avx512GoldilocksField::from_arr(b_arr); + let packed_res = packed_a * packed_b; + let arr_res = packed_res.as_arr(); + + let expected = a_arr.iter().zip(b_arr).map(|(&a, b)| a * b); + for (exp, res) in expected.zip(arr_res) { + assert_eq!(res, exp); + } + } + + #[test] + fn test_square() { + let a_arr = test_vals_a(); + + let packed_a = Avx512GoldilocksField::from_arr(a_arr); + let packed_res = packed_a.square(); + let arr_res = packed_res.as_arr(); + + let expected = a_arr.iter().map(|&a| a.square()); + for (exp, res) in expected.zip(arr_res) { + assert_eq!(res, exp); + } + } + + #[test] + fn test_neg() { + let a_arr = test_vals_a(); + + let packed_a = Avx512GoldilocksField::from_arr(a_arr); + let packed_res = -packed_a; + let arr_res = packed_res.as_arr(); + + let expected = a_arr.iter().map(|&a| -a); + for (exp, res) in expected.zip(arr_res) { + assert_eq!(res, exp); + } + } + + #[test] + fn test_sub() { + let a_arr = test_vals_a(); + let b_arr = test_vals_b(); + + let packed_a = Avx512GoldilocksField::from_arr(a_arr); + let packed_b = Avx512GoldilocksField::from_arr(b_arr); + let packed_res = packed_a - packed_b; + let arr_res = packed_res.as_arr(); + + let expected = a_arr.iter().zip(b_arr).map(|(&a, b)| a - b); + for (exp, res) in expected.zip(arr_res) { + assert_eq!(res, exp); + } + } + + #[test] + fn test_interleave_is_involution() { + let a_arr = test_vals_a(); + let b_arr = test_vals_b(); + + let packed_a = Avx512GoldilocksField::from_arr(a_arr); + let packed_b = Avx512GoldilocksField::from_arr(b_arr); + { + // Interleave, then deinterleave. + let (x, y) = packed_a.interleave(packed_b, 1); + let (res_a, res_b) = x.interleave(y, 1); + assert_eq!(res_a.as_arr(), a_arr); + assert_eq!(res_b.as_arr(), b_arr); + } + { + let (x, y) = packed_a.interleave(packed_b, 2); + let (res_a, res_b) = x.interleave(y, 2); + assert_eq!(res_a.as_arr(), a_arr); + assert_eq!(res_b.as_arr(), b_arr); + } + { + let (x, y) = packed_a.interleave(packed_b, 4); + let (res_a, res_b) = x.interleave(y, 4); + assert_eq!(res_a.as_arr(), a_arr); + assert_eq!(res_b.as_arr(), b_arr); + } + { + let (x, y) = packed_a.interleave(packed_b, 8); + let (res_a, res_b) = x.interleave(y, 8); + assert_eq!(res_a.as_arr(), a_arr); + assert_eq!(res_b.as_arr(), b_arr); + } + } + + #[test] + fn test_interleave() { + let in_a: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(00), + GoldilocksField::from_noncanonical_u64(01), + GoldilocksField::from_noncanonical_u64(02), + GoldilocksField::from_noncanonical_u64(03), + GoldilocksField::from_noncanonical_u64(04), + GoldilocksField::from_noncanonical_u64(05), + GoldilocksField::from_noncanonical_u64(06), + GoldilocksField::from_noncanonical_u64(07), + ]; + let in_b: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(10), + GoldilocksField::from_noncanonical_u64(11), + GoldilocksField::from_noncanonical_u64(12), + GoldilocksField::from_noncanonical_u64(13), + GoldilocksField::from_noncanonical_u64(14), + GoldilocksField::from_noncanonical_u64(15), + GoldilocksField::from_noncanonical_u64(16), + GoldilocksField::from_noncanonical_u64(17), + ]; + let int1_a: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(00), + GoldilocksField::from_noncanonical_u64(10), + GoldilocksField::from_noncanonical_u64(02), + GoldilocksField::from_noncanonical_u64(12), + GoldilocksField::from_noncanonical_u64(04), + GoldilocksField::from_noncanonical_u64(14), + GoldilocksField::from_noncanonical_u64(06), + GoldilocksField::from_noncanonical_u64(16), + ]; + let int1_b: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(01), + GoldilocksField::from_noncanonical_u64(11), + GoldilocksField::from_noncanonical_u64(03), + GoldilocksField::from_noncanonical_u64(13), + GoldilocksField::from_noncanonical_u64(05), + GoldilocksField::from_noncanonical_u64(15), + GoldilocksField::from_noncanonical_u64(07), + GoldilocksField::from_noncanonical_u64(17), + ]; + let int2_a: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(00), + GoldilocksField::from_noncanonical_u64(01), + GoldilocksField::from_noncanonical_u64(10), + GoldilocksField::from_noncanonical_u64(11), + GoldilocksField::from_noncanonical_u64(04), + GoldilocksField::from_noncanonical_u64(05), + GoldilocksField::from_noncanonical_u64(14), + GoldilocksField::from_noncanonical_u64(15), + ]; + let int2_b: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(02), + GoldilocksField::from_noncanonical_u64(03), + GoldilocksField::from_noncanonical_u64(12), + GoldilocksField::from_noncanonical_u64(13), + GoldilocksField::from_noncanonical_u64(06), + GoldilocksField::from_noncanonical_u64(07), + GoldilocksField::from_noncanonical_u64(16), + GoldilocksField::from_noncanonical_u64(17), + ]; + let int4_a: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(00), + GoldilocksField::from_noncanonical_u64(01), + GoldilocksField::from_noncanonical_u64(02), + GoldilocksField::from_noncanonical_u64(03), + GoldilocksField::from_noncanonical_u64(10), + GoldilocksField::from_noncanonical_u64(11), + GoldilocksField::from_noncanonical_u64(12), + GoldilocksField::from_noncanonical_u64(13), + ]; + let int4_b: [GoldilocksField; 8] = [ + GoldilocksField::from_noncanonical_u64(04), + GoldilocksField::from_noncanonical_u64(05), + GoldilocksField::from_noncanonical_u64(06), + GoldilocksField::from_noncanonical_u64(07), + GoldilocksField::from_noncanonical_u64(14), + GoldilocksField::from_noncanonical_u64(15), + GoldilocksField::from_noncanonical_u64(16), + GoldilocksField::from_noncanonical_u64(17), + ]; + + let packed_a = Avx512GoldilocksField::from_arr(in_a); + let packed_b = Avx512GoldilocksField::from_arr(in_b); + { + let (x1, y1) = packed_a.interleave(packed_b, 1); + assert_eq!(x1.as_arr(), int1_a); + assert_eq!(y1.as_arr(), int1_b); + } + { + let (x2, y2) = packed_a.interleave(packed_b, 2); + assert_eq!(x2.as_arr(), int2_a); + assert_eq!(y2.as_arr(), int2_b); + } + { + let (x4, y4) = packed_a.interleave(packed_b, 4); + assert_eq!(x4.as_arr(), int4_a); + assert_eq!(y4.as_arr(), int4_b); + } + { + let (x8, y8) = packed_a.interleave(packed_b, 8); + assert_eq!(x8.as_arr(), in_a); + assert_eq!(y8.as_arr(), in_b); + } + } +} diff --git a/field/src/arch/x86_64/mod.rs b/field/src/arch/x86_64/mod.rs index bd9dccae..326deb78 100644 --- a/field/src/arch/x86_64/mod.rs +++ b/field/src/arch/x86_64/mod.rs @@ -1,2 +1,20 @@ -#[cfg(target_feature = "avx2")] +#[cfg(all( + target_feature = "avx2", + not(all( + target_feature = "avx512bw", + target_feature = "avx512cd", + target_feature = "avx512dq", + target_feature = "avx512f", + target_feature = "avx512vl" + )) +))] pub mod avx2_goldilocks_field; + +#[cfg(all( + target_feature = "avx512bw", + target_feature = "avx512cd", + target_feature = "avx512dq", + target_feature = "avx512f", + target_feature = "avx512vl" +))] +pub mod avx512_goldilocks_field; diff --git a/field/src/lib.rs b/field/src/lib.rs index 47dd9ccb..f190bcbc 100644 --- a/field/src/lib.rs +++ b/field/src/lib.rs @@ -7,6 +7,7 @@ #![allow(clippy::return_self_not_must_use)] #![feature(generic_const_exprs)] #![feature(specialization)] +#![feature(stdsimd)] pub(crate) mod arch; pub mod batch_util; diff --git a/field/src/packable.rs b/field/src/packable.rs index 754a7fb6..18fe07f7 100644 --- a/field/src/packable.rs +++ b/field/src/packable.rs @@ -12,7 +12,29 @@ impl Packable for F { default type Packing = Self; } -#[cfg(all(target_arch = "x86_64", target_feature = "avx2"))] +#[cfg(all( + target_arch = "x86_64", + target_feature = "avx2", + not(all( + target_feature = "avx512bw", + target_feature = "avx512cd", + target_feature = "avx512dq", + target_feature = "avx512f", + target_feature = "avx512vl" + )) +))] impl Packable for crate::goldilocks_field::GoldilocksField { type Packing = crate::arch::x86_64::avx2_goldilocks_field::Avx2GoldilocksField; } + +#[cfg(all( + target_arch = "x86_64", + target_feature = "avx512bw", + target_feature = "avx512cd", + target_feature = "avx512dq", + target_feature = "avx512f", + target_feature = "avx512vl" +))] +impl Packable for crate::goldilocks_field::GoldilocksField { + type Packing = crate::arch::x86_64::avx512_goldilocks_field::Avx512GoldilocksField; +} From bf30fed70158ab0c3a5586c93d39ffa38f54eb95 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Thu, 6 Jan 2022 11:40:08 -0800 Subject: [PATCH 004/143] Make FRI more generic (#419) * Make FRI more generic * PR feedback --- plonky2/src/fri/mod.rs | 3 +- plonky2/src/fri/{commitment.rs => oracle.rs} | 98 ++++---------- plonky2/src/fri/proof.rs | 33 +++-- plonky2/src/fri/recursive_verifier.rs | 132 +++++++------------ plonky2/src/fri/structure.rs | 83 ++++++++++++ plonky2/src/fri/verifier.rs | 101 +++++++------- plonky2/src/plonk/circuit_builder.rs | 8 +- plonky2/src/plonk/circuit_data.rs | 100 +++++++++++++- plonky2/src/plonk/get_challenges.rs | 12 +- plonky2/src/plonk/plonk_common.rs | 66 ++++++---- plonky2/src/plonk/proof.rs | 69 ++++++++-- plonky2/src/plonk/prover.rs | 58 +++++--- plonky2/src/plonk/recursive_verifier.rs | 3 +- plonky2/src/plonk/verifier.rs | 1 + plonky2/src/util/reducing.rs | 9 +- 15 files changed, 476 insertions(+), 300 deletions(-) rename plonky2/src/fri/{commitment.rs => oracle.rs} (69%) create mode 100644 plonky2/src/fri/structure.rs diff --git a/plonky2/src/fri/mod.rs b/plonky2/src/fri/mod.rs index c50d1ff7..d59310de 100644 --- a/plonky2/src/fri/mod.rs +++ b/plonky2/src/fri/mod.rs @@ -1,10 +1,11 @@ use crate::fri::reduction_strategies::FriReductionStrategy; -pub mod commitment; +pub mod oracle; pub mod proof; pub mod prover; pub mod recursive_verifier; pub mod reduction_strategies; +pub mod structure; pub mod verifier; #[derive(Debug, Clone, Eq, PartialEq)] diff --git a/plonky2/src/fri/commitment.rs b/plonky2/src/fri/oracle.rs similarity index 69% rename from plonky2/src/fri/commitment.rs rename to plonky2/src/fri/oracle.rs index 9d7ecf43..ee391953 100644 --- a/plonky2/src/fri/commitment.rs +++ b/plonky2/src/fri/oracle.rs @@ -7,13 +7,12 @@ use rayon::prelude::*; use crate::fri::proof::FriProof; use crate::fri::prover::fri_proof; +use crate::fri::structure::{FriBatchInfo, FriInstanceInfo}; use crate::hash::hash_types::RichField; use crate::hash::merkle_tree::MerkleTree; use crate::iop::challenger::Challenger; use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::GenericConfig; -use crate::plonk::plonk_common::PlonkPolynomials; -use crate::plonk::proof::OpeningSet; use crate::timed; use crate::util::reducing::ReducingFactor; use crate::util::reverse_bits; @@ -23,12 +22,9 @@ use crate::util::transpose; /// Four (~64 bit) field elements gives ~128 bit security. pub const SALT_SIZE: usize = 4; -/// Represents a batch FRI based commitment to a list of polynomials. -pub struct PolynomialBatchCommitment< - F: RichField + Extendable, - C: GenericConfig, - const D: usize, -> { +/// Represents a FRI oracle, i.e. a batch of polynomials which have been Merklized. +pub struct PolynomialBatch, C: GenericConfig, const D: usize> +{ pub polynomials: Vec>, pub merkle_tree: MerkleTree, pub degree_log: usize, @@ -37,7 +33,7 @@ pub struct PolynomialBatchCommitment< } impl, C: GenericConfig, const D: usize> - PolynomialBatchCommitment + PolynomialBatch { /// Creates a list polynomial commitment for the polynomials interpolating the values in `values`. pub(crate) fn from_values( @@ -130,78 +126,36 @@ impl, C: GenericConfig, const D: usize> &slice[..slice.len() - if self.blinding { SALT_SIZE } else { 0 }] } - /// Takes the commitments to the constants - sigmas - wires - zs - quotient — polynomials, - /// and an opening point `zeta` and produces a batched opening proof + opening set. - pub(crate) fn open_plonk( - commitments: &[&Self; 4], - zeta: F::Extension, + /// Produces a batch opening proof. + pub(crate) fn prove_openings( + instance: &FriInstanceInfo, + oracles: &[&Self], challenger: &mut Challenger, common_data: &CommonCircuitData, timing: &mut TimingTree, - ) -> (FriProof, OpeningSet) { - let config = &common_data.config; + ) -> FriProof { assert!(D > 1, "Not implemented for D=1."); - let degree_log = commitments[0].degree_log; - let g = F::Extension::primitive_root_of_unity(degree_log); - for p in &[zeta, g * zeta] { - assert_ne!( - p.exp_u64(1 << degree_log as u64), - F::Extension::ONE, - "Opening point is in the subgroup." - ); - } - - let os = timed!( - timing, - "construct the opening set", - OpeningSet::new( - zeta, - g, - commitments[0], - commitments[1], - commitments[2], - commitments[3], - common_data, - ) - ); - challenger.observe_opening_set(&os); - let alpha = challenger.get_extension_challenge::(); let mut alpha = ReducingFactor::new(alpha); // Final low-degree polynomial that goes into FRI. let mut final_poly = PolynomialCoeffs::empty(); - // All polynomials are opened at `zeta`. - let single_polys = [ - PlonkPolynomials::CONSTANTS_SIGMAS, - PlonkPolynomials::WIRES, - PlonkPolynomials::ZS_PARTIAL_PRODUCTS, - PlonkPolynomials::QUOTIENT, - ] - .iter() - .flat_map(|&p| &commitments[p.index].polynomials); - let single_composition_poly = timed!( - timing, - "reduce single polys", - alpha.reduce_polys_base(single_polys) - ); + for FriBatchInfo { point, polynomials } in &instance.batches { + let polys_coeff = polynomials.iter().map(|fri_poly| { + &oracles[fri_poly.oracle_index].polynomials[fri_poly.polynomial_index] + }); + let composition_poly = timed!( + timing, + &format!("reduce batch of {} polynomials", polynomials.len()), + alpha.reduce_polys_base(polys_coeff) + ); + let quotient = Self::compute_quotient([*point], composition_poly); + alpha.shift_poly(&mut final_poly); + final_poly += quotient; + } - let single_quotient = Self::compute_quotient([zeta], single_composition_poly); - final_poly += single_quotient; - alpha.reset(); - - // Z polynomials have an additional opening at `g zeta`. - let zs_polys = &commitments[PlonkPolynomials::ZS_PARTIAL_PRODUCTS.index].polynomials - [common_data.zs_range()]; - let zs_composition_poly = - timed!(timing, "reduce Z polys", alpha.reduce_polys_base(zs_polys)); - - let zs_quotient = Self::compute_quotient([g * zeta], zs_composition_poly); - alpha.shift_poly(&mut final_poly); - final_poly += zs_quotient; - - let lde_final_poly = final_poly.lde(config.fri_config.rate_bits); + let lde_final_poly = final_poly.lde(common_data.config.fri_config.rate_bits); let lde_final_values = timed!( timing, &format!("perform final FFT {}", lde_final_poly.len()), @@ -209,7 +163,7 @@ impl, C: GenericConfig, const D: usize> ); let fri_proof = fri_proof::( - &commitments + &oracles .par_iter() .map(|c| &c.merkle_tree) .collect::>(), @@ -220,7 +174,7 @@ impl, C: GenericConfig, const D: usize> timing, ); - (fri_proof, os) + fri_proof } /// Given `points=(x_i)`, `evals=(y_i)` and `poly=P` with `P(x_i)=y_i`, computes the polynomial diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index 784f3286..720ae378 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -15,7 +15,7 @@ use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{GenericConfig, Hasher}; -use crate::plonk::plonk_common::PolynomialsIndexBlinding; +use crate::plonk::plonk_common::salt_size; use crate::plonk::proof::{FriInferredElements, ProofChallenges}; /// Evaluations and Merkle proof produced by the prover in a FRI query step. @@ -41,13 +41,13 @@ pub struct FriInitialTreeProof> { } impl> FriInitialTreeProof { - pub(crate) fn unsalted_evals( - &self, - polynomials: PolynomialsIndexBlinding, - zero_knowledge: bool, - ) -> &[F] { - let evals = &self.evals_proofs[polynomials.index].0; - &evals[..evals.len() - polynomials.salt_size(zero_knowledge)] + pub(crate) fn unsalted_eval(&self, oracle_index: usize, poly_index: usize, salted: bool) -> F { + self.unsalted_evals(oracle_index, salted)[poly_index] + } + + fn unsalted_evals(&self, oracle_index: usize, salted: bool) -> &[F] { + let evals = &self.evals_proofs[oracle_index].0; + &evals[..evals.len() - salt_size(salted)] } } @@ -57,13 +57,18 @@ pub struct FriInitialTreeProofTarget { } impl FriInitialTreeProofTarget { - pub(crate) fn unsalted_evals( + pub(crate) fn unsalted_eval( &self, - polynomials: PolynomialsIndexBlinding, - zero_knowledge: bool, - ) -> &[Target] { - let evals = &self.evals_proofs[polynomials.index].0; - &evals[..evals.len() - polynomials.salt_size(zero_knowledge)] + oracle_index: usize, + poly_index: usize, + salted: bool, + ) -> Target { + self.unsalted_evals(oracle_index, salted)[poly_index] + } + + fn unsalted_evals(&self, oracle_index: usize, salted: bool) -> &[Target] { + let evals = &self.evals_proofs[oracle_index].0; + &evals[..evals.len() - salt_size(salted)] } } diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 276adc2c..447b045e 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -1,8 +1,9 @@ +use itertools::Itertools; use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::Field; use plonky2_util::{log2_strict, reverse_index_bits_in_place}; use crate::fri::proof::{FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget}; +use crate::fri::structure::{FriBatchInfoTarget, FriInstanceInfoTarget, FriOpeningsTarget}; use crate::fri::FriConfig; use crate::gadgets::interpolation::InterpolationGate; use crate::gates::gate::Gate; @@ -17,7 +18,6 @@ use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData}; use crate::plonk::config::{AlgebraicConfig, AlgebraicHasher, GenericConfig}; -use crate::plonk::plonk_common::PlonkPolynomials; use crate::plonk::proof::OpeningSetTarget; use crate::util::reducing::ReducingFactorTarget; use crate::with_context; @@ -127,10 +127,9 @@ impl, const D: usize> CircuitBuilder { pub fn verify_fri_proof>( &mut self, + instance: &FriInstanceInfoTarget, // Openings of the PLONK polynomials. os: &OpeningSetTarget, - // Point at which the PLONK polynomials are opened. - zeta: ExtensionTarget, initial_merkle_caps: &[MerkleCapTarget], proof: &FriProofTarget, challenger: &mut RecursiveChallenger, @@ -186,13 +185,7 @@ impl, const D: usize> CircuitBuilder { let precomputed_reduced_evals = with_context!( self, "precompute reduced evaluations", - PrecomputedReducedEvalsTarget::from_os_and_alpha( - os, - alpha, - common_data.degree_bits, - zeta, - self - ) + PrecomputedReducedOpeningsTarget::from_os_and_alpha(&os.to_fri_openings(), alpha, self) ); for (i, round_proof) in proof.query_round_proofs.iter().enumerate() { @@ -211,9 +204,9 @@ impl, const D: usize> CircuitBuilder { level, &format!("verify one (of {}) query rounds", num_queries), self.fri_verifier_query_round( - zeta, + instance, alpha, - precomputed_reduced_evals, + &precomputed_reduced_evals, initial_merkle_caps, proof, challenger, @@ -255,11 +248,11 @@ impl, const D: usize> CircuitBuilder { fn fri_combine_initial>( &mut self, + instance: &FriInstanceInfoTarget, proof: &FriInitialTreeProofTarget, alpha: ExtensionTarget, subgroup_x: Target, - vanish_zeta: ExtensionTarget, - precomputed_reduced_evals: PrecomputedReducedEvalsTarget, + precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget, common_data: &CommonCircuitData, ) -> ExtensionTarget { assert!(D > 1, "Not implemented for D=1."); @@ -274,47 +267,35 @@ impl, const D: usize> CircuitBuilder { let mut alpha = ReducingFactorTarget::new(alpha); let mut sum = self.zero_extension(); - // We will add two terms to `sum`: one for openings at `x`, and one for openings at `g x`. - // All polynomials are opened at `x`. - let single_evals = [ - PlonkPolynomials::CONSTANTS_SIGMAS, - PlonkPolynomials::WIRES, - PlonkPolynomials::ZS_PARTIAL_PRODUCTS, - PlonkPolynomials::QUOTIENT, - ] - .iter() - .flat_map(|&p| proof.unsalted_evals(p, config.zero_knowledge)) - .copied() - .collect::>(); - let single_composition_eval = alpha.reduce_base(&single_evals, self); - let single_numerator = - self.sub_extension(single_composition_eval, precomputed_reduced_evals.single); - sum = self.div_add_extension(single_numerator, vanish_zeta, sum); - alpha.reset(); - - // Polynomials opened at `x` and `g x`, i.e., the Zs polynomials. - let zs_evals = proof - .unsalted_evals(PlonkPolynomials::ZS_PARTIAL_PRODUCTS, config.zero_knowledge) + for (batch, reduced_openings) in instance + .batches .iter() - .take(common_data.zs_range().end) - .copied() - .collect::>(); - let zs_composition_eval = alpha.reduce_base(&zs_evals, self); - - let zs_numerator = - self.sub_extension(zs_composition_eval, precomputed_reduced_evals.zs_right); - let zs_denominator = self.sub_extension(subgroup_x, precomputed_reduced_evals.zeta_right); - sum = alpha.shift(sum, self); // TODO: alpha^count could be precomputed. - sum = self.div_add_extension(zs_numerator, zs_denominator, sum); + .zip(&precomputed_reduced_evals.reduced_openings_at_point) + { + let FriBatchInfoTarget { point, polynomials } = batch; + let evals = polynomials + .iter() + .map(|p| { + let poly_blinding = instance.oracles[p.oracle_index].blinding; + let salted = config.zero_knowledge && poly_blinding; + proof.unsalted_eval(p.oracle_index, p.polynomial_index, salted) + }) + .collect_vec(); + let reduced_evals = alpha.reduce_base(&evals, self); + let numerator = self.sub_extension(reduced_evals, *reduced_openings); + let denominator = self.sub_extension(subgroup_x, *point); + sum = alpha.shift(sum, self); + sum = self.div_add_extension(numerator, denominator, sum); + } sum } fn fri_verifier_query_round>( &mut self, - zeta: ExtensionTarget, + instance: &FriInstanceInfoTarget, alpha: ExtensionTarget, - precomputed_reduced_evals: PrecomputedReducedEvalsTarget, + precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget, initial_merkle_caps: &[MerkleCapTarget], proof: &FriProofTarget, challenger: &mut RecursiveChallenger, @@ -346,16 +327,12 @@ impl, const D: usize> CircuitBuilder { ); // `subgroup_x` is `subgroup[x_index]`, i.e., the actual field element in the domain. - let (mut subgroup_x, vanish_zeta) = with_context!(self, "compute x from its index", { + let mut subgroup_x = with_context!(self, "compute x from its index", { let g = self.constant(F::coset_shift()); let phi = F::primitive_root_of_unity(n_log); let phi = self.exp_from_bits_const_base(phi, x_index_bits.iter().rev()); - let g_ext = self.convert_to_ext(g); - let phi_ext = self.convert_to_ext(phi); - // `subgroup_x = g*phi, vanish_zeta = g*phi - zeta` - let subgroup_x = self.mul(g, phi); - let vanish_zeta = self.mul_sub_extension(g_ext, phi_ext, zeta); - (subgroup_x, vanish_zeta) + // subgroup_x = g * phi + self.mul(g, phi) }); // old_eval is the last derived evaluation; it will be checked for consistency with its @@ -364,10 +341,10 @@ impl, const D: usize> CircuitBuilder { self, "combine initial oracles", self.fri_combine_initial( + instance, &round_proof.initial_trees_proof, alpha, subgroup_x, - vanish_zeta, precomputed_reduced_evals, common_data, ) @@ -455,43 +432,26 @@ impl, const D: usize> CircuitBuilder { } } -#[derive(Copy, Clone)] -struct PrecomputedReducedEvalsTarget { - pub single: ExtensionTarget, - pub zs_right: ExtensionTarget, - pub zeta_right: ExtensionTarget, +/// For each opening point, holds the reduced (by `alpha`) evaluations of each polynomial that's +/// opened at that point. +#[derive(Clone)] +struct PrecomputedReducedOpeningsTarget { + reduced_openings_at_point: Vec>, } -impl PrecomputedReducedEvalsTarget { +impl PrecomputedReducedOpeningsTarget { fn from_os_and_alpha>( - os: &OpeningSetTarget, + openings: &FriOpeningsTarget, alpha: ExtensionTarget, - degree_log: usize, - zeta: ExtensionTarget, builder: &mut CircuitBuilder, ) -> Self { - let mut alpha = ReducingFactorTarget::new(alpha); - let single = alpha.reduce( - &os.constants - .iter() - .chain(&os.plonk_sigmas) - .chain(&os.wires) - .chain(&os.plonk_zs) - .chain(&os.partial_products) - .chain(&os.quotient_polys) - .copied() - .collect::>(), - builder, - ); - let zs_right = alpha.reduce(&os.plonk_zs_right, builder); - - let g = builder.constant_extension(F::Extension::primitive_root_of_unity(degree_log)); - let zeta_right = builder.mul_extension(g, zeta); - + let reduced_openings_at_point = openings + .batches + .iter() + .map(|batch| ReducingFactorTarget::new(alpha).reduce(&batch.values, builder)) + .collect(); Self { - single, - zs_right, - zeta_right, + reduced_openings_at_point, } } } diff --git a/plonky2/src/fri/structure.rs b/plonky2/src/fri/structure.rs new file mode 100644 index 00000000..240abd5d --- /dev/null +++ b/plonky2/src/fri/structure.rs @@ -0,0 +1,83 @@ +//! Information about the structure of a FRI instance, in terms of the oracles and polynomials +//! involved, and the points they are opened at. + +use std::ops::Range; + +use crate::field::extension_field::Extendable; +use crate::hash::hash_types::RichField; +use crate::iop::ext_target::ExtensionTarget; + +/// Describes an instance of a FRI-based batch opening. +pub struct FriInstanceInfo, const D: usize> { + /// The oracles involved, not counting oracles created during the commit phase. + pub oracles: Vec, + /// Batches of openings, where each batch is associated with a particular point. + pub batches: Vec>, +} + +/// Describes an instance of a FRI-based batch opening. +pub struct FriInstanceInfoTarget { + /// The oracles involved, not counting oracles created during the commit phase. + pub oracles: Vec, + /// Batches of openings, where each batch is associated with a particular point. + pub batches: Vec>, +} + +#[derive(Copy, Clone)] +pub struct FriOracleInfo { + pub blinding: bool, +} + +/// A batch of openings at a particular point. +pub struct FriBatchInfo, const D: usize> { + pub point: F::Extension, + pub polynomials: Vec, +} + +/// A batch of openings at a particular point. +pub struct FriBatchInfoTarget { + pub point: ExtensionTarget, + pub polynomials: Vec, +} + +#[derive(Copy, Clone, Debug)] +pub struct FriPolynomialInfo { + /// Index into `FriInstanceInfoTarget`'s `oracles` list. + pub oracle_index: usize, + /// Index of the polynomial within the oracle. + pub polynomial_index: usize, +} + +impl FriPolynomialInfo { + pub fn from_range( + oracle_index: usize, + polynomial_indices: Range, + ) -> Vec { + polynomial_indices + .map(|polynomial_index| FriPolynomialInfo { + oracle_index, + polynomial_index, + }) + .collect() + } +} + +/// Opened values of each polynomial. +pub struct FriOpenings, const D: usize> { + pub batches: Vec>, +} + +/// Opened values of each polynomial that's opened at a particular point. +pub struct FriOpeningBatch, const D: usize> { + pub values: Vec, +} + +/// Opened values of each polynomial. +pub struct FriOpeningsTarget { + pub batches: Vec>, +} + +/// Opened values of each polynomial that's opened at a particular point. +pub struct FriOpeningBatchTarget { + pub values: Vec>, +} diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 4c14f32a..34f7a3dd 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -5,13 +5,13 @@ use plonky2_field::interpolation::{barycentric_weights, interpolate}; use plonky2_util::{log2_strict, reverse_index_bits_in_place}; use crate::fri::proof::{FriInitialTreeProof, FriProof, FriQueryRound}; +use crate::fri::structure::{FriBatchInfo, FriInstanceInfo, FriOpenings}; use crate::fri::FriConfig; use crate::hash::hash_types::RichField; use crate::hash::merkle_proofs::verify_merkle_proof; use crate::hash::merkle_tree::MerkleCap; use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{GenericConfig, Hasher}; -use crate::plonk::plonk_common::PlonkPolynomials; use crate::plonk::proof::{OpeningSet, ProofChallenges}; use crate::util::reducing::ReducingFactor; use crate::util::reverse_bits; @@ -63,6 +63,7 @@ pub(crate) fn verify_fri_proof< C: GenericConfig, const D: usize, >( + instance: &FriInstanceInfo, // Openings of the PLONK polynomials. os: &OpeningSet, challenges: &ProofChallenges, @@ -89,15 +90,16 @@ pub(crate) fn verify_fri_proof< ); let precomputed_reduced_evals = - PrecomputedReducedEvals::from_os_and_alpha(os, challenges.fri_alpha); + PrecomputedReducedOpenings::from_os_and_alpha(&os.to_fri_openings(), challenges.fri_alpha); for (&x_index, round_proof) in challenges .fri_query_indices .iter() .zip(&proof.query_round_proofs) { fri_verifier_query_round::( + instance, challenges, - precomputed_reduced_evals, + &precomputed_reduced_evals, initial_merkle_caps, proof, x_index, @@ -127,49 +129,39 @@ pub(crate) fn fri_combine_initial< C: GenericConfig, const D: usize, >( + instance: &FriInstanceInfo, proof: &FriInitialTreeProof, alpha: F::Extension, - zeta: F::Extension, subgroup_x: F, - precomputed_reduced_evals: PrecomputedReducedEvals, + precomputed_reduced_evals: &PrecomputedReducedOpenings, common_data: &CommonCircuitData, ) -> F::Extension { let config = &common_data.config; assert!(D > 1, "Not implemented for D=1."); - let degree_log = common_data.degree_bits; let subgroup_x = F::Extension::from_basefield(subgroup_x); let mut alpha = ReducingFactor::new(alpha); let mut sum = F::Extension::ZERO; - // We will add two terms to `sum`: one for openings at `x`, and one for openings at `g x`. - // All polynomials are opened at `x`. - let single_evals = [ - PlonkPolynomials::CONSTANTS_SIGMAS, - PlonkPolynomials::WIRES, - PlonkPolynomials::ZS_PARTIAL_PRODUCTS, - PlonkPolynomials::QUOTIENT, - ] - .iter() - .flat_map(|&p| proof.unsalted_evals(p, config.zero_knowledge)) - .map(|&e| F::Extension::from_basefield(e)); - let single_composition_eval = alpha.reduce(single_evals); - let single_numerator = single_composition_eval - precomputed_reduced_evals.single; - let single_denominator = subgroup_x - zeta; - sum += single_numerator / single_denominator; - alpha.reset(); - - // Z polynomials have an additional opening at `g x`. - let zs_evals = proof - .unsalted_evals(PlonkPolynomials::ZS_PARTIAL_PRODUCTS, config.zero_knowledge) + for (batch, reduced_openings) in instance + .batches .iter() - .map(|&e| F::Extension::from_basefield(e)) - .take(common_data.zs_range().end); - let zs_composition_eval = alpha.reduce(zs_evals); - let zeta_right = F::Extension::primitive_root_of_unity(degree_log) * zeta; - let zs_numerator = zs_composition_eval - precomputed_reduced_evals.zs_right; - let zs_denominator = subgroup_x - zeta_right; - sum = alpha.shift(sum); - sum += zs_numerator / zs_denominator; + .zip(&precomputed_reduced_evals.reduced_openings_at_point) + { + let FriBatchInfo { point, polynomials } = batch; + let evals = polynomials + .iter() + .map(|p| { + let poly_blinding = instance.oracles[p.oracle_index].blinding; + let salted = config.zero_knowledge && poly_blinding; + proof.unsalted_eval(p.oracle_index, p.polynomial_index, salted) + }) + .map(F::Extension::from_basefield); + let reduced_evals = alpha.reduce(evals); + let numerator = reduced_evals - *reduced_openings; + let denominator = subgroup_x - *point; + sum = alpha.shift(sum); + sum += numerator / denominator; + } sum } @@ -179,8 +171,9 @@ fn fri_verifier_query_round< C: GenericConfig, const D: usize, >( + instance: &FriInstanceInfo, challenges: &ProofChallenges, - precomputed_reduced_evals: PrecomputedReducedEvals, + precomputed_reduced_evals: &PrecomputedReducedOpenings, initial_merkle_caps: &[MerkleCap], proof: &FriProof, mut x_index: usize, @@ -201,9 +194,9 @@ fn fri_verifier_query_round< // old_eval is the last derived evaluation; it will be checked for consistency with its // committed "parent" value in the next iteration. let mut old_eval = fri_combine_initial( + instance, &round_proof.initial_trees_proof, challenges.fri_alpha, - challenges.plonk_zeta, subgroup_x, precomputed_reduced_evals, common_data, @@ -257,28 +250,22 @@ fn fri_verifier_query_round< Ok(()) } -/// Holds the reduced (by `alpha`) evaluations at `zeta` for the polynomial opened just at -/// zeta, for `Z` at zeta and for `Z` at `g*zeta`. -#[derive(Copy, Clone, Debug)] -pub(crate) struct PrecomputedReducedEvals, const D: usize> { - pub single: F::Extension, - pub zs_right: F::Extension, +/// For each opening point, holds the reduced (by `alpha`) evaluations of each polynomial that's +/// opened at that point. +#[derive(Clone, Debug)] +pub(crate) struct PrecomputedReducedOpenings, const D: usize> { + pub reduced_openings_at_point: Vec, } -impl, const D: usize> PrecomputedReducedEvals { - pub(crate) fn from_os_and_alpha(os: &OpeningSet, alpha: F::Extension) -> Self { - let mut alpha = ReducingFactor::new(alpha); - let single = alpha.reduce( - os.constants - .iter() - .chain(&os.plonk_sigmas) - .chain(&os.wires) - .chain(&os.plonk_zs) - .chain(&os.partial_products) - .chain(&os.quotient_polys), - ); - let zs_right = alpha.reduce(os.plonk_zs_right.iter()); - - Self { single, zs_right } +impl, const D: usize> PrecomputedReducedOpenings { + pub(crate) fn from_os_and_alpha(openings: &FriOpenings, alpha: F::Extension) -> Self { + let reduced_openings_at_point = openings + .batches + .iter() + .map(|batch| ReducingFactor::new(alpha).reduce(batch.values.iter())) + .collect(); + Self { + reduced_openings_at_point, + } } } diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 2456d6fd..ca9f5a6b 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -10,7 +10,7 @@ use plonky2_field::field_types::Field; use plonky2_field::polynomial::PolynomialValues; use plonky2_util::{log2_ceil, log2_strict}; -use crate::fri::commitment::PolynomialBatchCommitment; +use crate::fri::oracle::PolynomialBatch; use crate::fri::{FriConfig, FriParams}; use crate::gadgets::arithmetic::BaseArithmeticOperation; use crate::gadgets::arithmetic_extension::ExtensionArithmeticOperation; @@ -41,7 +41,7 @@ use crate::plonk::circuit_data::{ use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::copy_constraint::CopyConstraint; use crate::plonk::permutation_argument::Forest; -use crate::plonk::plonk_common::PlonkPolynomials; +use crate::plonk::plonk_common::PlonkOracle; use crate::util::context_tree::ContextTree; use crate::util::marking::{Markable, MarkedTargets}; use crate::util::partial_products::num_partial_products; @@ -641,10 +641,10 @@ impl, const D: usize> CircuitBuilder { let fft_root_table = fft_root_table(max_fft_points); let constants_sigmas_vecs = [constant_vecs, sigma_vecs.clone()].concat(); - let constants_sigmas_commitment = PolynomialBatchCommitment::from_values( + let constants_sigmas_commitment = PolynomialBatch::from_values( constants_sigmas_vecs, rate_bits, - self.config.zero_knowledge & PlonkPolynomials::CONSTANTS_SIGMAS.blinding, + PlonkOracle::CONSTANTS_SIGMAS.blinding, self.config.fri_config.cap_height, &mut timing, Some(&fft_root_table), diff --git a/plonky2/src/plonk/circuit_data.rs b/plonky2/src/plonk/circuit_data.rs index 74308fc3..a0ef65e4 100644 --- a/plonky2/src/plonk/circuit_data.rs +++ b/plonky2/src/plonk/circuit_data.rs @@ -5,16 +5,23 @@ use anyhow::Result; use plonky2_field::extension_field::Extendable; use plonky2_field::fft::FftRootTable; -use crate::fri::commitment::PolynomialBatchCommitment; +use crate::field::field_types::Field; +use crate::fri::oracle::PolynomialBatch; use crate::fri::reduction_strategies::FriReductionStrategy; +use crate::fri::structure::{ + FriBatchInfo, FriBatchInfoTarget, FriInstanceInfo, FriInstanceInfoTarget, FriPolynomialInfo, +}; use crate::fri::{FriConfig, FriParams}; use crate::gates::gate::PrefixedGate; use crate::hash::hash_types::{MerkleCapTarget, RichField}; use crate::hash::merkle_tree::MerkleCap; +use crate::iop::ext_target::ExtensionTarget; use crate::iop::generator::WitnessGenerator; use crate::iop::target::Target; use crate::iop::witness::PartialWitness; +use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::config::{GenericConfig, Hasher}; +use crate::plonk::plonk_common::{PlonkOracle, FRI_ORACLES}; use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs}; use crate::plonk::prover::prove; use crate::plonk::verifier::verify; @@ -178,7 +185,7 @@ pub(crate) struct ProverOnlyCircuitData< /// they watch. pub generator_indices_by_watches: BTreeMap>, /// Commitments to the constants polynomials and sigma polynomials. - pub constants_sigmas_commitment: PolynomialBatchCommitment, + pub constants_sigmas_commitment: PolynomialBatch, /// The transpose of the list of sigma polynomials. pub sigmas: Vec>, /// Subgroup of order `degree`. @@ -286,6 +293,95 @@ impl, C: GenericConfig, const D: usize> pub fn partial_products_range(&self) -> RangeFrom { self.config.num_challenges.. } + + pub(crate) fn get_fri_instance(&self, zeta: F::Extension) -> FriInstanceInfo { + // All polynomials are opened at zeta. + let zeta_batch = FriBatchInfo { + point: zeta, + polynomials: self.fri_all_polys(), + }; + + // The Z polynomials are also opened at g * zeta. + let g = F::Extension::primitive_root_of_unity(self.degree_bits); + let zeta_right = g * zeta; + let zeta_right_batch = FriBatchInfo { + point: zeta_right, + polynomials: self.fri_zs_polys(), + }; + + let openings = vec![zeta_batch, zeta_right_batch]; + FriInstanceInfo { + oracles: FRI_ORACLES.to_vec(), + batches: openings, + } + } + + pub(crate) fn get_fri_instance_target( + &self, + builder: &mut CircuitBuilder, + zeta: ExtensionTarget, + ) -> FriInstanceInfoTarget { + // All polynomials are opened at zeta. + let zeta_batch = FriBatchInfoTarget { + point: zeta, + polynomials: self.fri_all_polys(), + }; + + // The Z polynomials are also opened at g * zeta. + let g = F::primitive_root_of_unity(self.degree_bits); + let zeta_right = builder.mul_const_extension(g, zeta); + let zeta_right_batch = FriBatchInfoTarget { + point: zeta_right, + polynomials: self.fri_zs_polys(), + }; + + let openings = vec![zeta_batch, zeta_right_batch]; + FriInstanceInfoTarget { + oracles: FRI_ORACLES.to_vec(), + batches: openings, + } + } + + fn fri_preprocessed_polys(&self) -> Vec { + let num_preprocessed_polys = self.sigmas_range().end; + FriPolynomialInfo::from_range( + PlonkOracle::CONSTANTS_SIGMAS.index, + 0..num_preprocessed_polys, + ) + } + + fn fri_wire_polys(&self) -> Vec { + let num_wire_polys = self.config.num_wires; + FriPolynomialInfo::from_range(PlonkOracle::WIRES.index, 0..num_wire_polys) + } + + fn fri_zs_partial_products_polys(&self) -> Vec { + let num_zs_partial_products_polys = + self.config.num_challenges * (1 + self.num_partial_products.0); + FriPolynomialInfo::from_range( + PlonkOracle::ZS_PARTIAL_PRODUCTS.index, + 0..num_zs_partial_products_polys, + ) + } + + fn fri_zs_polys(&self) -> Vec { + FriPolynomialInfo::from_range(PlonkOracle::ZS_PARTIAL_PRODUCTS.index, self.zs_range()) + } + + fn fri_quotient_polys(&self) -> Vec { + let num_quotient_polys = self.config.num_challenges * self.quotient_degree_factor; + FriPolynomialInfo::from_range(PlonkOracle::QUOTIENT.index, 0..num_quotient_polys) + } + + fn fri_all_polys(&self) -> Vec { + [ + self.fri_preprocessed_polys(), + self.fri_wire_polys(), + self.fri_zs_partial_products_polys(), + self.fri_quotient_polys(), + ] + .concat() + } } /// The `Target` version of `VerifierCircuitData`, for use inside recursive circuits. Note that this diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index 23e7f454..bbb91d79 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -4,7 +4,7 @@ use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::PolynomialCoeffs; use crate::fri::proof::{CompressedFriProof, FriProof}; -use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedEvals}; +use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedOpenings}; use crate::hash::hash_types::RichField; use crate::hash::merkle_tree::MerkleCap; use crate::iop::challenger::Challenger; @@ -187,8 +187,10 @@ impl, C: GenericConfig, const D: usize> // Holds the indices that have already been seen at each reduction depth. let mut seen_indices_by_depth = vec![HashSet::new(); common_data.fri_params.reduction_arity_bits.len()]; - let precomputed_reduced_evals = - PrecomputedReducedEvals::from_os_and_alpha(&self.proof.openings, *fri_alpha); + let precomputed_reduced_evals = PrecomputedReducedOpenings::from_os_and_alpha( + &self.proof.openings.to_fri_openings(), + *fri_alpha, + ); let log_n = common_data.degree_bits + common_data.config.fri_config.rate_bits; // Simulate the proof verification and collect the inferred elements. // The content of the loop is basically the same as the `fri_verifier_query_round` function. @@ -196,15 +198,15 @@ impl, C: GenericConfig, const D: usize> let mut subgroup_x = F::MULTIPLICATIVE_GROUP_GENERATOR * F::primitive_root_of_unity(log_n).exp_u64(reverse_bits(x_index, log_n) as u64); let mut old_eval = fri_combine_initial( + &common_data.get_fri_instance(*plonk_zeta), &self .proof .opening_proof .query_round_proofs .initial_trees_proofs[&x_index], *fri_alpha, - *plonk_zeta, subgroup_x, - precomputed_reduced_evals, + &precomputed_reduced_evals, common_data, ); for (i, &arity_bits) in common_data diff --git a/plonky2/src/plonk/plonk_common.rs b/plonky2/src/plonk/plonk_common.rs index 5b8119aa..92c4168d 100644 --- a/plonky2/src/plonk/plonk_common.rs +++ b/plonky2/src/plonk/plonk_common.rs @@ -2,49 +2,59 @@ use plonky2_field::extension_field::Extendable; use plonky2_field::field_types::Field; use plonky2_field::packed_field::PackedField; -use crate::fri::commitment::SALT_SIZE; +use crate::fri::oracle::SALT_SIZE; +use crate::fri::structure::FriOracleInfo; use crate::hash::hash_types::RichField; use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::circuit_builder::CircuitBuilder; use crate::util::reducing::ReducingFactorTarget; +pub(crate) const FRI_ORACLES: [FriOracleInfo; 4] = [ + PlonkOracle::CONSTANTS_SIGMAS.as_fri_oracle(), + PlonkOracle::WIRES.as_fri_oracle(), + PlonkOracle::ZS_PARTIAL_PRODUCTS.as_fri_oracle(), + PlonkOracle::QUOTIENT.as_fri_oracle(), +]; + /// Holds the Merkle tree index and blinding flag of a set of polynomials used in FRI. #[derive(Debug, Copy, Clone)] -pub struct PolynomialsIndexBlinding { +pub struct PlonkOracle { pub(crate) index: usize, pub(crate) blinding: bool, } -impl PolynomialsIndexBlinding { - pub fn salt_size(&self, zero_knowledge: bool) -> usize { - if zero_knowledge & self.blinding { - SALT_SIZE - } else { - 0 + +impl PlonkOracle { + pub const CONSTANTS_SIGMAS: PlonkOracle = PlonkOracle { + index: 0, + blinding: false, + }; + pub const WIRES: PlonkOracle = PlonkOracle { + index: 1, + blinding: true, + }; + pub const ZS_PARTIAL_PRODUCTS: PlonkOracle = PlonkOracle { + index: 2, + blinding: true, + }; + pub const QUOTIENT: PlonkOracle = PlonkOracle { + index: 3, + blinding: true, + }; + + pub(crate) const fn as_fri_oracle(&self) -> FriOracleInfo { + FriOracleInfo { + blinding: self.blinding, } } } -/// Holds the indices and blinding flags of the Plonk polynomials. -pub struct PlonkPolynomials; - -impl PlonkPolynomials { - pub const CONSTANTS_SIGMAS: PolynomialsIndexBlinding = PolynomialsIndexBlinding { - index: 0, - blinding: false, - }; - pub const WIRES: PolynomialsIndexBlinding = PolynomialsIndexBlinding { - index: 1, - blinding: true, - }; - pub const ZS_PARTIAL_PRODUCTS: PolynomialsIndexBlinding = PolynomialsIndexBlinding { - index: 2, - blinding: true, - }; - pub const QUOTIENT: PolynomialsIndexBlinding = PolynomialsIndexBlinding { - index: 3, - blinding: true, - }; +pub fn salt_size(salted: bool) -> usize { + if salted { + SALT_SIZE + } else { + 0 + } } /// Evaluate the polynomial which vanishes on any multiplicative subgroup of a given order `n`. diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 91db2b25..c2dd47f3 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -2,8 +2,11 @@ use plonky2_field::extension_field::Extendable; use rayon::prelude::*; use serde::{Deserialize, Serialize}; -use crate::fri::commitment::PolynomialBatchCommitment; +use crate::fri::oracle::PolynomialBatch; use crate::fri::proof::{CompressedFriProof, FriProof, FriProofTarget}; +use crate::fri::structure::{ + FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget, +}; use crate::hash::hash_types::{MerkleCapTarget, RichField}; use crate::hash::merkle_tree::MerkleCap; use crate::iop::ext_target::ExtensionTarget; @@ -274,33 +277,53 @@ pub struct OpeningSet, const D: usize> { impl, const D: usize> OpeningSet { pub fn new>( - z: F::Extension, + zeta: F::Extension, g: F::Extension, - constants_sigmas_commitment: &PolynomialBatchCommitment, - wires_commitment: &PolynomialBatchCommitment, - zs_partial_products_commitment: &PolynomialBatchCommitment, - quotient_polys_commitment: &PolynomialBatchCommitment, + constants_sigmas_commitment: &PolynomialBatch, + wires_commitment: &PolynomialBatch, + zs_partial_products_commitment: &PolynomialBatch, + quotient_polys_commitment: &PolynomialBatch, common_data: &CommonCircuitData, ) -> Self { - let eval_commitment = |z: F::Extension, c: &PolynomialBatchCommitment| { + let eval_commitment = |z: F::Extension, c: &PolynomialBatch| { c.polynomials .par_iter() .map(|p| p.to_extension().eval(z)) .collect::>() }; - let constants_sigmas_eval = eval_commitment(z, constants_sigmas_commitment); - let zs_partial_products_eval = eval_commitment(z, zs_partial_products_commitment); + let constants_sigmas_eval = eval_commitment(zeta, constants_sigmas_commitment); + let zs_partial_products_eval = eval_commitment(zeta, zs_partial_products_commitment); Self { constants: constants_sigmas_eval[common_data.constants_range()].to_vec(), plonk_sigmas: constants_sigmas_eval[common_data.sigmas_range()].to_vec(), - wires: eval_commitment(z, wires_commitment), + wires: eval_commitment(zeta, wires_commitment), plonk_zs: zs_partial_products_eval[common_data.zs_range()].to_vec(), - plonk_zs_right: eval_commitment(g * z, zs_partial_products_commitment) + plonk_zs_right: eval_commitment(g * zeta, zs_partial_products_commitment) [common_data.zs_range()] .to_vec(), partial_products: zs_partial_products_eval[common_data.partial_products_range()] .to_vec(), - quotient_polys: eval_commitment(z, quotient_polys_commitment), + quotient_polys: eval_commitment(zeta, quotient_polys_commitment), + } + } + + pub(crate) fn to_fri_openings(&self) -> FriOpenings { + let zeta_batch = FriOpeningBatch { + values: [ + self.constants.as_slice(), + self.plonk_sigmas.as_slice(), + self.wires.as_slice(), + self.plonk_zs.as_slice(), + self.partial_products.as_slice(), + self.quotient_polys.as_slice(), + ] + .concat(), + }; + let zeta_right_batch = FriOpeningBatch { + values: self.plonk_zs_right.clone(), + }; + FriOpenings { + batches: vec![zeta_batch, zeta_right_batch], } } } @@ -317,6 +340,28 @@ pub struct OpeningSetTarget { pub quotient_polys: Vec>, } +impl OpeningSetTarget { + pub(crate) fn to_fri_openings(&self) -> FriOpeningsTarget { + let zeta_batch = FriOpeningBatchTarget { + values: [ + self.constants.as_slice(), + self.plonk_sigmas.as_slice(), + self.wires.as_slice(), + self.plonk_zs.as_slice(), + self.partial_products.as_slice(), + self.quotient_polys.as_slice(), + ] + .concat(), + }; + let zeta_right_batch = FriOpeningBatchTarget { + values: self.plonk_zs_right.clone(), + }; + FriOpeningsTarget { + batches: vec![zeta_batch, zeta_right_batch], + } + } +} + #[cfg(test)] mod tests { use anyhow::Result; diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index f7196270..abd2c552 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -1,20 +1,22 @@ use std::mem::swap; +use anyhow::ensure; use anyhow::Result; use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2_util::log2_ceil; use rayon::prelude::*; -use crate::fri::commitment::PolynomialBatchCommitment; +use crate::field::field_types::Field; +use crate::fri::oracle::PolynomialBatch; use crate::hash::hash_types::RichField; use crate::iop::challenger::Challenger; use crate::iop::generator::generate_partial_witness; use crate::iop::witness::{MatrixWitness, PartialWitness, Witness}; use crate::plonk::circuit_data::{CommonCircuitData, ProverOnlyCircuitData}; use crate::plonk::config::{GenericConfig, Hasher}; -use crate::plonk::plonk_common::PlonkPolynomials; -use crate::plonk::plonk_common::ZeroPolyOnCoset; +use crate::plonk::plonk_common::{PlonkOracle, ZeroPolyOnCoset}; +use crate::plonk::proof::OpeningSet; use crate::plonk::proof::{Proof, ProofWithPublicInputs}; use crate::plonk::vanishing_poly::eval_vanishing_poly_base_batch; use crate::plonk::vars::EvaluationVarsBaseBatch; @@ -69,10 +71,10 @@ pub(crate) fn prove, C: GenericConfig, co let wires_commitment = timed!( timing, "compute wires commitment", - PolynomialBatchCommitment::from_values( + PolynomialBatch::from_values( wires_values, config.fri_config.rate_bits, - config.zero_knowledge & PlonkPolynomials::WIRES.blinding, + config.zero_knowledge && PlonkOracle::WIRES.blinding, config.fri_config.cap_height, timing, prover_data.fft_root_table.as_ref(), @@ -109,10 +111,10 @@ pub(crate) fn prove, C: GenericConfig, co let partial_products_and_zs_commitment = timed!( timing, "commit to partial products and Z's", - PolynomialBatchCommitment::from_values( + PolynomialBatch::from_values( zs_partial_products, config.fri_config.rate_bits, - config.zero_knowledge & PlonkPolynomials::ZS_PARTIAL_PRODUCTS.blinding, + config.zero_knowledge && PlonkOracle::ZS_PARTIAL_PRODUCTS.blinding, config.fri_config.cap_height, timing, prover_data.fft_root_table.as_ref(), @@ -158,10 +160,10 @@ pub(crate) fn prove, C: GenericConfig, co let quotient_polys_commitment = timed!( timing, "commit to quotient polys", - PolynomialBatchCommitment::from_coeffs( + PolynomialBatch::from_coeffs( all_quotient_poly_chunks, config.fri_config.rate_bits, - config.zero_knowledge & PlonkPolynomials::QUOTIENT.blinding, + config.zero_knowledge && PlonkOracle::QUOTIENT.blinding, config.fri_config.cap_height, timing, prover_data.fft_root_table.as_ref(), @@ -171,18 +173,41 @@ pub(crate) fn prove, C: GenericConfig, co challenger.observe_cap("ient_polys_commitment.merkle_tree.cap); let zeta = challenger.get_extension_challenge::(); + // To avoid leaking witness data, we want to ensure that our opening locations, `zeta` and + // `g * zeta`, are not in our subgroup `H`. It suffices to check `zeta` only, since + // `(g * zeta)^n = zeta^n`, where `n` is the order of `g`. + let g = F::Extension::primitive_root_of_unity(common_data.degree_bits); + ensure!( + zeta.exp_power_of_2(common_data.degree_bits) != F::Extension::ONE, + "Opening point is in the subgroup." + ); - let (opening_proof, openings) = timed!( + let openings = timed!( + timing, + "construct the opening set", + OpeningSet::new( + zeta, + g, + &prover_data.constants_sigmas_commitment, + &wires_commitment, + &partial_products_and_zs_commitment, + "ient_polys_commitment, + common_data, + ) + ); + challenger.observe_opening_set(&openings); + + let opening_proof = timed!( timing, "compute opening proofs", - PolynomialBatchCommitment::open_plonk( + PolynomialBatch::prove_openings( + &common_data.get_fri_instance(zeta), &[ &prover_data.constants_sigmas_commitment, &wires_commitment, &partial_products_and_zs_commitment, "ient_polys_commitment, ], - zeta, &mut challenger, common_data, timing, @@ -300,8 +325,8 @@ fn compute_quotient_polys< common_data: &CommonCircuitData, prover_data: &'a ProverOnlyCircuitData, public_inputs_hash: &<>::InnerHasher as Hasher>::Hash, - wires_commitment: &'a PolynomialBatchCommitment, - zs_partial_products_commitment: &'a PolynomialBatchCommitment, + wires_commitment: &'a PolynomialBatch, + zs_partial_products_commitment: &'a PolynomialBatch, betas: &[F], gammas: &[F], alphas: &[F], @@ -325,9 +350,8 @@ fn compute_quotient_polys< let lde_size = points.len(); // Retrieve the LDE values at index `i`. - let get_at_index = |comm: &'a PolynomialBatchCommitment, i: usize| -> &'a [F] { - comm.get_lde_values(i * step) - }; + let get_at_index = + |comm: &'a PolynomialBatch, i: usize| -> &'a [F] { comm.get_lde_values(i * step) }; let z_h_on_coset = ZeroPolyOnCoset::new(common_data.degree_bits, max_degree_bits); diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index dc6f5039..ba653e22 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -107,12 +107,13 @@ impl, const D: usize> CircuitBuilder { proof.quotient_polys_cap, ]; + let fri_instance = inner_common_data.get_fri_instance_target(self, zeta); with_context!( self, "verify FRI proof", self.verify_fri_proof( + &fri_instance, &proof.openings, - zeta, merkle_caps, &proof.opening_proof, &mut challenger, diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index c3cf4988..3dca02a7 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -86,6 +86,7 @@ pub(crate) fn verify_with_challenges< ]; verify_fri_proof( + &common_data.get_fri_instance(challenges.plonk_zeta), &proof.openings, &challenges, merkle_caps, diff --git a/plonky2/src/util/reducing.rs b/plonky2/src/util/reducing.rs index 8bfe45d1..b4e8d8cf 100644 --- a/plonky2/src/util/reducing.rs +++ b/plonky2/src/util/reducing.rs @@ -238,7 +238,14 @@ impl ReducingFactorTarget { where F: RichField + Extendable, { - let exp = builder.exp_u64_extension(self.base, self.count); + let zero_ext = builder.zero_extension(); + let exp = if x == zero_ext { + // The result will get zeroed out, so don't actually compute the exponentiation. + zero_ext + } else { + builder.exp_u64_extension(self.base, self.count) + }; + self.count = 0; builder.mul_extension(exp, x) } From 4e532f04faf665a7dbc2c14965dd950856ec979a Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Thu, 6 Jan 2022 15:50:56 -0800 Subject: [PATCH 005/143] AVX2 Poseidon S-box optimizations (#421) --- .../x86_64/poseidon_goldilocks_avx2_bmi2.rs | 169 +++++++++++++----- 1 file changed, 127 insertions(+), 42 deletions(-) diff --git a/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs b/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs index 934583d6..804524ee 100644 --- a/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs +++ b/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs @@ -4,6 +4,7 @@ use std::mem::size_of; use plonky2_field::field_types::Field; use plonky2_field::goldilocks_field::GoldilocksField; +use plonky2_util::branch_hint; use static_assertions::const_assert; use crate::hash::poseidon::{ @@ -141,6 +142,16 @@ macro_rules! map3 { ($f:ident::<$l:literal>, $v:ident) => { ($f::<$l>($v.0), $f::<$l>($v.1), $f::<$l>($v.2)) }; + ($f:ident::<$l:literal>, $v1:ident, $v2:ident) => { + ( + $f::<$l>($v1.0, $v2.0), + $f::<$l>($v1.1, $v2.1), + $f::<$l>($v1.2, $v2.2), + ) + }; + ($f:ident, $v:ident) => { + ($f($v.0), $f($v.1), $f($v.2)) + }; ($f:ident, $v0:ident, $v1:ident) => { ($f($v0.0, $v1.0), $f($v0.1, $v1.1), $f($v0.2, $v1.2)) }; @@ -188,19 +199,32 @@ unsafe fn const_layer( unsafe fn square3( x: (__m256i, __m256i, __m256i), ) -> ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)) { - let sign_bit = _mm256_set1_epi64x(i64::MIN); - let x_hi = map3!(_mm256_srli_epi64::<32>, x); + let x_hi = { + // Move high bits to low position. The high bits of x_hi are ignored. Swizzle is faster than + // bitshift. This instruction only has a floating-point flavor, so we cast to/from float. + // This is safe and free. + let x_ps = map3!(_mm256_castsi256_ps, x); + let x_hi_ps = map3!(_mm256_movehdup_ps, x_ps); + map3!(_mm256_castps_si256, x_hi_ps) + }; + + // All pairwise multiplications. let mul_ll = map3!(_mm256_mul_epu32, x, x); let mul_lh = map3!(_mm256_mul_epu32, x, x_hi); let mul_hh = map3!(_mm256_mul_epu32, x_hi, x_hi); - let res_lo0_s = map3!(_mm256_xor_si256, mul_ll, rep sign_bit); + + // Bignum addition, but mul_lh is shifted by 33 bits (not 32). + let mul_ll_hi = map3!(_mm256_srli_epi64::<33>, mul_ll); + let t0 = map3!(_mm256_add_epi64, mul_lh, mul_ll_hi); + let t0_hi = map3!(_mm256_srli_epi64::<31>, t0); + let res_hi = map3!(_mm256_add_epi64, mul_hh, t0_hi); + + // Form low result by adding the mul_ll and the low 31 bits of mul_lh (shifted to the high + // position). let mul_lh_lo = map3!(_mm256_slli_epi64::<33>, mul_lh); - let res_lo1_s = map3!(_mm256_add_epi64, res_lo0_s, mul_lh_lo); - let carry = map3!(_mm256_cmpgt_epi64, res_lo0_s, res_lo1_s); - let mul_lh_hi = map3!(_mm256_srli_epi64::<31>, mul_lh); - let res_hi0 = map3!(_mm256_add_epi64, mul_hh, mul_lh_hi); - let res_hi1 = map3!(_mm256_sub_epi64, res_hi0, carry); - (res_lo1_s, res_hi1) + let res_lo = map3!(_mm256_add_epi64, mul_ll, mul_lh_lo); + + (res_lo, res_hi) } #[inline(always)] @@ -208,49 +232,110 @@ unsafe fn mul3( x: (__m256i, __m256i, __m256i), y: (__m256i, __m256i, __m256i), ) -> ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)) { - let sign_bit = _mm256_set1_epi64x(i64::MIN); - let y_hi = map3!(_mm256_srli_epi64::<32>, y); - let x_hi = map3!(_mm256_srli_epi64::<32>, x); + let epsilon = _mm256_set1_epi64x(0xffffffff); + let x_hi = { + // Move high bits to low position. The high bits of x_hi are ignored. Swizzle is faster than + // bitshift. This instruction only has a floating-point flavor, so we cast to/from float. + // This is safe and free. + let x_ps = map3!(_mm256_castsi256_ps, x); + let x_hi_ps = map3!(_mm256_movehdup_ps, x_ps); + map3!(_mm256_castps_si256, x_hi_ps) + }; + let y_hi = { + let y_ps = map3!(_mm256_castsi256_ps, y); + let y_hi_ps = map3!(_mm256_movehdup_ps, y_ps); + map3!(_mm256_castps_si256, y_hi_ps) + }; + + // All four pairwise multiplications let mul_ll = map3!(_mm256_mul_epu32, x, y); let mul_lh = map3!(_mm256_mul_epu32, x, y_hi); let mul_hl = map3!(_mm256_mul_epu32, x_hi, y); let mul_hh = map3!(_mm256_mul_epu32, x_hi, y_hi); - let mul_lh_lo = map3!(_mm256_slli_epi64::<32>, mul_lh); - let res_lo0_s = map3!(_mm256_xor_si256, mul_ll, rep sign_bit); - let mul_hl_lo = map3!(_mm256_slli_epi64::<32>, mul_hl); - let res_lo1_s = map3!(_mm256_add_epi64, res_lo0_s, mul_lh_lo); - let carry0 = map3!(_mm256_cmpgt_epi64, res_lo0_s, res_lo1_s); - let mul_lh_hi = map3!(_mm256_srli_epi64::<32>, mul_lh); - let res_lo2_s = map3!(_mm256_add_epi64, res_lo1_s, mul_hl_lo); - let carry1 = map3!(_mm256_cmpgt_epi64, res_lo1_s, res_lo2_s); - let mul_hl_hi = map3!(_mm256_srli_epi64::<32>, mul_hl); - let res_hi0 = map3!(_mm256_add_epi64, mul_hh, mul_lh_hi); - let res_hi1 = map3!(_mm256_add_epi64, res_hi0, mul_hl_hi); - let res_hi2 = map3!(_mm256_sub_epi64, res_hi1, carry0); - let res_hi3 = map3!(_mm256_sub_epi64, res_hi2, carry1); - (res_lo2_s, res_hi3) + + // Bignum addition + // Extract high 32 bits of mul_ll and add to mul_hl. This cannot overflow. + let mul_ll_hi = map3!(_mm256_srli_epi64::<32>, mul_ll); + let t0 = map3!(_mm256_add_epi64, mul_hl, mul_ll_hi); + // Extract low 32 bits of t0 and add to mul_lh. Again, this cannot overflow. + // Also, extract high 32 bits of t0 and add to mul_hh. + let t0_lo = map3!(_mm256_and_si256, t0, rep epsilon); + let t0_hi = map3!(_mm256_srli_epi64::<32>, t0); + let t1 = map3!(_mm256_add_epi64, mul_lh, t0_lo); + let t2 = map3!(_mm256_add_epi64, mul_hh, t0_hi); + // Lastly, extract the high 32 bits of t1 and add to t2. + let t1_hi = map3!(_mm256_srli_epi64::<32>, t1); + let res_hi = map3!(_mm256_add_epi64, t2, t1_hi); + + // Form res_lo by combining the low half of mul_ll with the low half of t1 (shifted into high + // position). + let t1_lo = { + let t1_ps = map3!(_mm256_castsi256_ps, t1); + let t1_lo_ps = map3!(_mm256_moveldup_ps, t1_ps); + map3!(_mm256_castps_si256, t1_lo_ps) + }; + let res_lo = map3!(_mm256_blend_epi32::<0xaa>, mul_ll, t1_lo); + + (res_lo, res_hi) +} + +/// Addition, where the second operand is `0 <= y < 0xffffffff00000001`. +#[inline(always)] +unsafe fn add_small( + x_s: (__m256i, __m256i, __m256i), + y: (__m256i, __m256i, __m256i), +) -> (__m256i, __m256i, __m256i) { + let res_wrapped_s = map3!(_mm256_add_epi64, x_s, y); + let mask = map3!(_mm256_cmpgt_epi32, x_s, res_wrapped_s); + let wrapback_amt = map3!(_mm256_srli_epi64::<32>, mask); // EPSILON if overflowed else 0. + let res_s = map3!(_mm256_add_epi64, res_wrapped_s, wrapback_amt); + res_s +} + +#[inline(always)] +unsafe fn maybe_adj_sub(res_wrapped_s: __m256i, mask: __m256i) -> __m256i { + // The subtraction is very unlikely to overflow so we're best off branching. + // The even u32s in `mask` are meaningless, so we want to ignore them. `_mm256_testz_pd` + // branches depending on the sign bit of double-precision (64-bit) floats. Bit cast `mask` to + // floating-point (this is free). + let mask_pd = _mm256_castsi256_pd(mask); + // `_mm256_testz_pd(mask_pd, mask_pd) == 1` iff all sign bits are 0, meaning that underflow + // did not occur for any of the vector elements. + if _mm256_testz_pd(mask_pd, mask_pd) == 1 { + res_wrapped_s + } else { + branch_hint(); + // Highly unlikely: underflow did occur. Find adjustment per element and apply it. + let adj_amount = _mm256_srli_epi64::<32>(mask); // EPSILON if underflow. + _mm256_sub_epi64(res_wrapped_s, adj_amount) + } +} + +/// Addition, where the second operand is much smaller than `0xffffffff00000001`. +#[inline(always)] +unsafe fn sub_tiny( + x_s: (__m256i, __m256i, __m256i), + y: (__m256i, __m256i, __m256i), +) -> (__m256i, __m256i, __m256i) { + let res_wrapped_s = map3!(_mm256_sub_epi64, x_s, y); + let mask = map3!(_mm256_cmpgt_epi32, res_wrapped_s, x_s); + let res_s = map3!(maybe_adj_sub, res_wrapped_s, mask); + res_s } #[inline(always)] unsafe fn reduce3( - (x_lo_s, x_hi): ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)), + (lo0, hi0): ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)), ) -> (__m256i, __m256i, __m256i) { - let epsilon = _mm256_set1_epi64x(0xffffffff); let sign_bit = _mm256_set1_epi64x(i64::MIN); - let x_hi_hi = map3!(_mm256_srli_epi64::<32>, x_hi); - let res0_s = map3!(_mm256_sub_epi64, x_lo_s, x_hi_hi); - let wraparound_mask0 = map3!(_mm256_cmpgt_epi32, res0_s, x_lo_s); - let wraparound_adj0 = map3!(_mm256_srli_epi64::<32>, wraparound_mask0); - let x_hi_lo = map3!(_mm256_and_si256, x_hi, rep epsilon); - let x_hi_lo_shifted = map3!(_mm256_slli_epi64::<32>, x_hi); - let res1_s = map3!(_mm256_sub_epi64, res0_s, wraparound_adj0); - let x_hi_lo_mul_epsilon = map3!(_mm256_sub_epi64, x_hi_lo_shifted, x_hi_lo); - let res2_s = map3!(_mm256_add_epi64, res1_s, x_hi_lo_mul_epsilon); - let wraparound_mask2 = map3!(_mm256_cmpgt_epi32, res1_s, res2_s); - let wraparound_adj2 = map3!(_mm256_srli_epi64::<32>, wraparound_mask2); - let res3_s = map3!(_mm256_add_epi64, res2_s, wraparound_adj2); - let res3 = map3!(_mm256_xor_si256, res3_s, rep sign_bit); - res3 + let epsilon = _mm256_set1_epi64x(0xffffffff); + let lo0_s = map3!(_mm256_xor_si256, lo0, rep sign_bit); + let hi_hi0 = map3!(_mm256_srli_epi64::<32>, hi0); + let lo1_s = sub_tiny(lo0_s, hi_hi0); + let t1 = map3!(_mm256_mul_epu32, hi0, rep epsilon); + let lo2_s = add_small(lo1_s, t1); + let lo2 = map3!(_mm256_xor_si256, lo2_s, rep sign_bit); + lo2 } #[inline(always)] From f48d8c92bd3fd83e5bd0c24ba68e17031e3e15e4 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Thu, 6 Jan 2022 23:04:33 -0800 Subject: [PATCH 006/143] Finish making FRI generic (#422) * Finish making FRI generic * fix quotient poly factor * Bound quotient degree factor --- plonky2/src/bin/bench_recursion.rs | 19 +------ plonky2/src/fri/mod.rs | 9 ++++ plonky2/src/fri/oracle.rs | 8 +-- plonky2/src/fri/proof.rs | 22 ++++---- plonky2/src/fri/recursive_verifier.rs | 70 ++++++++++--------------- plonky2/src/fri/verifier.rs | 34 +++++------- plonky2/src/plonk/circuit_builder.rs | 10 ++-- plonky2/src/plonk/circuit_data.rs | 9 ++-- plonky2/src/plonk/get_challenges.rs | 4 +- plonky2/src/plonk/proof.rs | 19 +++---- plonky2/src/plonk/prover.rs | 2 +- plonky2/src/plonk/recursive_verifier.rs | 6 +-- plonky2/src/plonk/verifier.rs | 4 +- 13 files changed, 91 insertions(+), 125 deletions(-) diff --git a/plonky2/src/bin/bench_recursion.rs b/plonky2/src/bin/bench_recursion.rs index cb8eaca9..f8566b1b 100644 --- a/plonky2/src/bin/bench_recursion.rs +++ b/plonky2/src/bin/bench_recursion.rs @@ -1,8 +1,6 @@ use anyhow::Result; use env_logger::Env; use log::info; -use plonky2::fri::reduction_strategies::FriReductionStrategy; -use plonky2::fri::FriConfig; use plonky2::hash::hashing::SPONGE_WIDTH; use plonky2::iop::witness::PartialWitness; use plonky2::plonk::circuit_builder::CircuitBuilder; @@ -20,22 +18,7 @@ fn main() -> Result<()> { } fn bench_prove, const D: usize>() -> Result<()> { - let config = CircuitConfig { - num_wires: 126, - num_routed_wires: 33, - constant_gate_size: 6, - use_base_arithmetic_gate: false, - security_bits: 128, - num_challenges: 3, - zero_knowledge: false, - fri_config: FriConfig { - rate_bits: 3, - cap_height: 1, - proof_of_work_bits: 15, - reduction_strategy: FriReductionStrategy::ConstantArityBits(3, 5), - num_query_rounds: 35, - }, - }; + let config = CircuitConfig::standard_recursion_config(); let inputs = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); diff --git a/plonky2/src/fri/mod.rs b/plonky2/src/fri/mod.rs index d59310de..573a189b 100644 --- a/plonky2/src/fri/mod.rs +++ b/plonky2/src/fri/mod.rs @@ -24,6 +24,12 @@ pub struct FriConfig { pub num_query_rounds: usize, } +impl FriConfig { + pub fn rate(&self) -> f64 { + 1.0 / ((1 << self.rate_bits) as f64) + } +} + /// FRI parameters, including generated parameters which are specific to an instance size, in /// contrast to `FriConfig` which is user-specified and independent of instance size. #[derive(Debug)] @@ -31,6 +37,9 @@ pub struct FriParams { /// User-specified FRI configuration. pub config: FriConfig, + /// Whether to use a hiding variant of Merkle trees (where random salts are added to leaves). + pub hiding: bool, + /// The degree of the purported codeword, measured in bits. pub degree_bits: usize, diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index ee391953..2d42e899 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -8,10 +8,10 @@ use rayon::prelude::*; use crate::fri::proof::FriProof; use crate::fri::prover::fri_proof; use crate::fri::structure::{FriBatchInfo, FriInstanceInfo}; +use crate::fri::FriParams; use crate::hash::hash_types::RichField; use crate::hash::merkle_tree::MerkleTree; use crate::iop::challenger::Challenger; -use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::GenericConfig; use crate::timed; use crate::util::reducing::ReducingFactor; @@ -131,7 +131,7 @@ impl, C: GenericConfig, const D: usize> instance: &FriInstanceInfo, oracles: &[&Self], challenger: &mut Challenger, - common_data: &CommonCircuitData, + fri_params: &FriParams, timing: &mut TimingTree, ) -> FriProof { assert!(D > 1, "Not implemented for D=1."); @@ -155,7 +155,7 @@ impl, C: GenericConfig, const D: usize> final_poly += quotient; } - let lde_final_poly = final_poly.lde(common_data.config.fri_config.rate_bits); + let lde_final_poly = final_poly.lde(fri_params.config.rate_bits); let lde_final_values = timed!( timing, &format!("perform final FFT {}", lde_final_poly.len()), @@ -170,7 +170,7 @@ impl, C: GenericConfig, const D: usize> lde_final_poly, lde_final_values, challenger, - &common_data.fri_params, + fri_params, timing, ); diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index 720ae378..ff81c2c5 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -5,6 +5,7 @@ use plonky2_field::extension_field::{flatten, unflatten, Extendable}; use plonky2_field::polynomial::PolynomialCoeffs; use serde::{Deserialize, Serialize}; +use crate::fri::FriParams; use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; use crate::hash::hash_types::MerkleCapTarget; use crate::hash::hash_types::RichField; @@ -13,7 +14,6 @@ use crate::hash::merkle_tree::MerkleCap; use crate::hash::path_compression::{compress_merkle_proofs, decompress_merkle_proofs}; use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; -use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::plonk_common::salt_size; use crate::plonk::proof::{FriInferredElements, ProofChallenges}; @@ -136,7 +136,7 @@ impl, H: Hasher, const D: usize> FriProof>( self, indices: &[usize], - common_data: &CommonCircuitData, + params: &FriParams, ) -> CompressedFriProof { let FriProof { commit_phase_merkle_caps, @@ -145,8 +145,8 @@ impl, H: Hasher, const D: usize> FriProof, H: Hasher, const D: usize> CompressedFriPr self, challenges: &ProofChallenges, fri_inferred_elements: FriInferredElements, - common_data: &CommonCircuitData, + params: &FriParams, ) -> FriProof { let CompressedFriProof { commit_phase_merkle_caps, @@ -257,8 +257,8 @@ impl, H: Hasher, const D: usize> CompressedFriPr .. } = challenges; let mut fri_inferred_elements = fri_inferred_elements.0.into_iter(); - let cap_height = common_data.config.fri_config.cap_height; - let reduction_arity_bits = &common_data.fri_params.reduction_arity_bits; + let cap_height = params.config.cap_height; + let reduction_arity_bits = ¶ms.reduction_arity_bits; let num_reductions = reduction_arity_bits.len(); let num_initial_trees = query_round_proofs .initial_trees_proofs @@ -275,7 +275,7 @@ impl, H: Hasher, const D: usize> CompressedFriPr let mut steps_indices = vec![vec![]; num_reductions]; let mut steps_evals = vec![vec![]; num_reductions]; let mut steps_proofs = vec![vec![]; num_reductions]; - let height = common_data.degree_bits + common_data.config.fri_config.rate_bits; + let height = params.degree_bits + params.config.rate_bits; let heights = reduction_arity_bits .iter() .scan(height, |acc, &bits| { @@ -285,10 +285,8 @@ impl, H: Hasher, const D: usize> CompressedFriPr .collect::>(); // Holds the `evals` vectors that have already been reconstructed at each reduction depth. - let mut evals_by_depth = vec![ - HashMap::>::new(); - common_data.fri_params.reduction_arity_bits.len() - ]; + let mut evals_by_depth = + vec![HashMap::>::new(); params.reduction_arity_bits.len()]; for &(mut index) in indices { let initial_trees_proof = query_round_proofs.initial_trees_proofs[&index].clone(); for (i, (leaves_data, proof)) in diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 447b045e..ba6ebcba 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -4,7 +4,7 @@ use plonky2_util::{log2_strict, reverse_index_bits_in_place}; use crate::fri::proof::{FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget}; use crate::fri::structure::{FriBatchInfoTarget, FriInstanceInfoTarget, FriOpeningsTarget}; -use crate::fri::FriConfig; +use crate::fri::{FriConfig, FriParams}; use crate::gadgets::interpolation::InterpolationGate; use crate::gates::gate::Gate; use crate::gates::interpolation::HighDegreeInterpolationGate; @@ -16,7 +16,6 @@ use crate::iop::challenger::RecursiveChallenger; use crate::iop::ext_target::{flatten_target, ExtensionTarget}; use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData}; use crate::plonk::config::{AlgebraicConfig, AlgebraicHasher, GenericConfig}; use crate::plonk::proof::OpeningSetTarget; use crate::util::reducing::ReducingFactorTarget; @@ -32,7 +31,6 @@ impl, const D: usize> CircuitBuilder { arity_bits: usize, evals: &[ExtensionTarget], beta: ExtensionTarget, - common_data: &CommonCircuitData, ) -> ExtensionTarget { let arity = 1 << arity_bits; debug_assert_eq!(evals.len(), arity); @@ -51,7 +49,7 @@ impl, const D: usize> CircuitBuilder { // The answer is gotten by interpolating {(x*g^i, P(x*g^i))} and evaluating at beta. // `HighDegreeInterpolationGate` has degree `arity`, so we use the low-degree gate if // the arity is too large. - if arity > common_data.quotient_degree_factor { + if arity > self.config.max_quotient_degree_factor { self.interpolate_coset::>( arity_bits, coset_start, @@ -71,17 +69,13 @@ impl, const D: usize> CircuitBuilder { /// Make sure we have enough wires and routed wires to do the FRI checks efficiently. This check /// isn't required -- without it we'd get errors elsewhere in the stack -- but just gives more /// helpful errors. - fn check_recursion_config>( - &self, - max_fri_arity_bits: usize, - common_data: &CommonCircuitData, - ) { + fn check_recursion_config>(&self, max_fri_arity_bits: usize) { let random_access = RandomAccessGate::::new_from_config( &self.config, max_fri_arity_bits.max(self.config.fri_config.cap_height), ); let (interpolation_wires, interpolation_routed_wires) = - if 1 << max_fri_arity_bits > common_data.quotient_degree_factor { + if 1 << max_fri_arity_bits > self.config.max_quotient_degree_factor { let gate = LowDegreeInterpolationGate::::new(max_fri_arity_bits); (gate.num_wires(), gate.num_routed_wires()) } else { @@ -133,22 +127,20 @@ impl, const D: usize> CircuitBuilder { initial_merkle_caps: &[MerkleCapTarget], proof: &FriProofTarget, challenger: &mut RecursiveChallenger, - common_data: &CommonCircuitData, + params: &FriParams, ) { - let config = &common_data.config; - - if let Some(max_arity_bits) = common_data.fri_params.max_arity_bits() { - self.check_recursion_config(max_arity_bits, common_data); + if let Some(max_arity_bits) = params.max_arity_bits() { + self.check_recursion_config::(max_arity_bits); } debug_assert_eq!( - common_data.fri_params.final_poly_len(), + params.final_poly_len(), proof.final_poly.len(), "Final polynomial has wrong degree." ); // Size of the LDE domain. - let n = common_data.lde_size(); + let n = params.lde_size(); challenger.observe_opening_set(os); @@ -172,12 +164,12 @@ impl, const D: usize> CircuitBuilder { with_context!( self, "check PoW", - self.fri_verify_proof_of_work::(proof, challenger, &config.fri_config) + self.fri_verify_proof_of_work::(proof, challenger, ¶ms.config) ); // Check that parameters are coherent. debug_assert_eq!( - config.fri_config.num_query_rounds, + params.config.num_query_rounds, proof.query_round_proofs.len(), "Number of query rounds does not match config." ); @@ -203,7 +195,7 @@ impl, const D: usize> CircuitBuilder { self, level, &format!("verify one (of {}) query rounds", num_queries), - self.fri_verifier_query_round( + self.fri_verifier_query_round::( instance, alpha, &precomputed_reduced_evals, @@ -213,7 +205,7 @@ impl, const D: usize> CircuitBuilder { n, &betas, round_proof, - common_data, + params, ) ); } @@ -253,15 +245,14 @@ impl, const D: usize> CircuitBuilder { alpha: ExtensionTarget, subgroup_x: Target, precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget, - common_data: &CommonCircuitData, + params: &FriParams, ) -> ExtensionTarget { assert!(D > 1, "Not implemented for D=1."); - let config = &common_data.config; - let degree_log = common_data.degree_bits; + let degree_log = params.degree_bits; debug_assert_eq!( degree_log, - common_data.config.fri_config.cap_height + proof.evals_proofs[0].1.siblings.len() - - config.fri_config.rate_bits + params.config.cap_height + proof.evals_proofs[0].1.siblings.len() + - params.config.rate_bits ); let subgroup_x = self.convert_to_ext(subgroup_x); let mut alpha = ReducingFactorTarget::new(alpha); @@ -277,7 +268,7 @@ impl, const D: usize> CircuitBuilder { .iter() .map(|p| { let poly_blinding = instance.oracles[p.oracle_index].blinding; - let salted = config.zero_knowledge && poly_blinding; + let salted = params.hiding && poly_blinding; proof.unsalted_eval(p.oracle_index, p.polynomial_index, salted) }) .collect_vec(); @@ -302,19 +293,18 @@ impl, const D: usize> CircuitBuilder { n: usize, betas: &[ExtensionTarget], round_proof: &FriQueryRoundTarget, - common_data: &CommonCircuitData, + params: &FriParams, ) { let n_log = log2_strict(n); // Note that this `low_bits` decomposition permits non-canonical binary encodings. Here we // verify that this has a negligible impact on soundness error. - Self::assert_noncanonical_indices_ok(&common_data.config); + Self::assert_noncanonical_indices_ok(¶ms.config); let x_index = challenger.get_challenge(self); let mut x_index_bits = self.low_bits(x_index, n_log, F::BITS); - let cap_index = self.le_sum( - x_index_bits[x_index_bits.len() - common_data.config.fri_config.cap_height..].iter(), - ); + let cap_index = + self.le_sum(x_index_bits[x_index_bits.len() - params.config.cap_height..].iter()); with_context!( self, "check FRI initial proof", @@ -340,22 +330,17 @@ impl, const D: usize> CircuitBuilder { let mut old_eval = with_context!( self, "combine initial oracles", - self.fri_combine_initial( + self.fri_combine_initial::( instance, &round_proof.initial_trees_proof, alpha, subgroup_x, precomputed_reduced_evals, - common_data, + params, ) ); - for (i, &arity_bits) in common_data - .fri_params - .reduction_arity_bits - .iter() - .enumerate() - { + for (i, &arity_bits) in params.reduction_arity_bits.iter().enumerate() { let evals = &round_proof.steps[i].evals; // Split x_index into the index of the coset x is in, and the index of x within that coset. @@ -370,13 +355,12 @@ impl, const D: usize> CircuitBuilder { old_eval = with_context!( self, "infer evaluation using interpolation", - self.compute_evaluation( + self.compute_evaluation::( subgroup_x, x_index_within_coset_bits, arity_bits, evals, betas[i], - common_data ) ); @@ -423,7 +407,7 @@ impl, const D: usize> CircuitBuilder { /// Thus ambiguous elements contribute a negligible amount to soundness error. /// /// Here we compare the probabilities as a sanity check, to verify the claim above. - fn assert_noncanonical_indices_ok(config: &CircuitConfig) { + fn assert_noncanonical_indices_ok(config: &FriConfig) { let num_ambiguous_elems = u64::MAX - F::ORDER + 1; let query_error = config.rate(); let p_ambiguous = (num_ambiguous_elems as f64) / (F::ORDER as f64); diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 34f7a3dd..e95cb80a 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -6,11 +6,10 @@ use plonky2_util::{log2_strict, reverse_index_bits_in_place}; use crate::fri::proof::{FriInitialTreeProof, FriProof, FriQueryRound}; use crate::fri::structure::{FriBatchInfo, FriInstanceInfo, FriOpenings}; -use crate::fri::FriConfig; +use crate::fri::{FriConfig, FriParams}; use crate::hash::hash_types::RichField; use crate::hash::merkle_proofs::verify_merkle_proof; use crate::hash::merkle_tree::MerkleCap; -use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::proof::{OpeningSet, ProofChallenges}; use crate::util::reducing::ReducingFactor; @@ -69,23 +68,22 @@ pub(crate) fn verify_fri_proof< challenges: &ProofChallenges, initial_merkle_caps: &[MerkleCap], proof: &FriProof, - common_data: &CommonCircuitData, + params: &FriParams, ) -> Result<()> { - let config = &common_data.config; ensure!( - common_data.fri_params.final_poly_len() == proof.final_poly.len(), + params.final_poly_len() == proof.final_poly.len(), "Final polynomial has wrong degree." ); // Size of the LDE domain. - let n = common_data.lde_size(); + let n = params.lde_size(); // Check PoW. - fri_verify_proof_of_work(challenges.fri_pow_response, &config.fri_config)?; + fri_verify_proof_of_work(challenges.fri_pow_response, ¶ms.config)?; // Check that parameters are coherent. ensure!( - config.fri_config.num_query_rounds == proof.query_round_proofs.len(), + params.config.num_query_rounds == proof.query_round_proofs.len(), "Number of query rounds does not match config." ); @@ -105,7 +103,7 @@ pub(crate) fn verify_fri_proof< x_index, n, round_proof, - common_data, + params, )?; } @@ -134,9 +132,8 @@ pub(crate) fn fri_combine_initial< alpha: F::Extension, subgroup_x: F, precomputed_reduced_evals: &PrecomputedReducedOpenings, - common_data: &CommonCircuitData, + params: &FriParams, ) -> F::Extension { - let config = &common_data.config; assert!(D > 1, "Not implemented for D=1."); let subgroup_x = F::Extension::from_basefield(subgroup_x); let mut alpha = ReducingFactor::new(alpha); @@ -152,7 +149,7 @@ pub(crate) fn fri_combine_initial< .iter() .map(|p| { let poly_blinding = instance.oracles[p.oracle_index].blinding; - let salted = config.zero_knowledge && poly_blinding; + let salted = params.hiding && poly_blinding; proof.unsalted_eval(p.oracle_index, p.polynomial_index, salted) }) .map(F::Extension::from_basefield); @@ -179,7 +176,7 @@ fn fri_verifier_query_round< mut x_index: usize, n: usize, round_proof: &FriQueryRound, - common_data: &CommonCircuitData, + params: &FriParams, ) -> Result<()> { fri_verify_initial_proof::( x_index, @@ -193,21 +190,16 @@ fn fri_verifier_query_round< // old_eval is the last derived evaluation; it will be checked for consistency with its // committed "parent" value in the next iteration. - let mut old_eval = fri_combine_initial( + let mut old_eval = fri_combine_initial::( instance, &round_proof.initial_trees_proof, challenges.fri_alpha, subgroup_x, precomputed_reduced_evals, - common_data, + params, ); - for (i, &arity_bits) in common_data - .fri_params - .reduction_arity_bits - .iter() - .enumerate() - { + for (i, &arity_bits) in params.reduction_arity_bits.iter().enumerate() { let arity = 1 << arity_bits; let evals = &round_proof.steps[i].evals; diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index ca9f5a6b..d83e2b9f 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -377,6 +377,7 @@ impl, const D: usize> CircuitBuilder { ); FriParams { config: fri_config.clone(), + hiding: self.config.zero_knowledge, degree_bits, reduction_arity_bits, } @@ -588,6 +589,7 @@ impl, const D: usize> CircuitBuilder { pub fn build>(mut self) -> CircuitData { let mut timing = TimingTree::new("preprocess", Level::Trace); let start = Instant::now(); + let rate_bits = self.config.fri_config.rate_bits; self.fill_batched_gates(); @@ -620,14 +622,16 @@ impl, const D: usize> CircuitBuilder { let gates = self.gates.iter().cloned().collect(); let (gate_tree, max_filtered_constraint_degree, num_constants) = Tree::from_gates(gates); + let prefixed_gates = PrefixedGate::from_tree(gate_tree); + // `quotient_degree_factor` has to be between `max_filtered_constraint_degree-1` and `1< f64 { - 1.0 / ((1 << self.fri_config.rate_bits) as f64) - } - pub fn num_advice_wires(&self) -> usize { self.num_wires - self.num_routed_wires } @@ -70,6 +68,7 @@ impl CircuitConfig { security_bits: 100, num_challenges: 2, zero_knowledge: false, + max_quotient_degree_factor: 8, fri_config: FriConfig { rate_bits: 3, cap_height: 4, diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index bbb91d79..c340cef9 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -197,7 +197,7 @@ impl, C: GenericConfig, const D: usize> for &(mut x_index) in fri_query_indices { let mut subgroup_x = F::MULTIPLICATIVE_GROUP_GENERATOR * F::primitive_root_of_unity(log_n).exp_u64(reverse_bits(x_index, log_n) as u64); - let mut old_eval = fri_combine_initial( + let mut old_eval = fri_combine_initial::( &common_data.get_fri_instance(*plonk_zeta), &self .proof @@ -207,7 +207,7 @@ impl, C: GenericConfig, const D: usize> *fri_alpha, subgroup_x, &precomputed_reduced_evals, - common_data, + &common_data.fri_params, ); for (i, &arity_bits) in common_data .fri_params diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index c2dd47f3..cd49de89 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -7,6 +7,7 @@ use crate::fri::proof::{CompressedFriProof, FriProof, FriProofTarget}; use crate::fri::structure::{ FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget, }; +use crate::fri::FriParams; use crate::hash::hash_types::{MerkleCapTarget, RichField}; use crate::hash::merkle_tree::MerkleCap; use crate::iop::ext_target::ExtensionTarget; @@ -41,11 +42,7 @@ pub struct ProofTarget { impl, C: GenericConfig, const D: usize> Proof { /// Compress the proof. - pub fn compress( - self, - indices: &[usize], - common_data: &CommonCircuitData, - ) -> CompressedProof { + pub fn compress(self, indices: &[usize], params: &FriParams) -> CompressedProof { let Proof { wires_cap, plonk_zs_partial_products_cap, @@ -59,7 +56,7 @@ impl, C: GenericConfig, const D: usize> P plonk_zs_partial_products_cap, quotient_polys_cap, openings, - opening_proof: opening_proof.compress(indices, common_data), + opening_proof: opening_proof.compress::(indices, params), } } } @@ -83,7 +80,7 @@ impl, C: GenericConfig, const D: usize> common_data: &CommonCircuitData, ) -> anyhow::Result> { let indices = self.fri_query_indices(common_data)?; - let compressed_proof = self.proof.compress(&indices, common_data); + let compressed_proof = self.proof.compress(&indices, &common_data.fri_params); Ok(CompressedProofWithPublicInputs { public_inputs: self.public_inputs, proof: compressed_proof, @@ -136,7 +133,7 @@ impl, C: GenericConfig, const D: usize> self, challenges: &ProofChallenges, fri_inferred_elements: FriInferredElements, - common_data: &CommonCircuitData, + params: &FriParams, ) -> Proof { let CompressedProof { wires_cap, @@ -151,7 +148,7 @@ impl, C: GenericConfig, const D: usize> plonk_zs_partial_products_cap, quotient_polys_cap, openings, - opening_proof: opening_proof.decompress(challenges, fri_inferred_elements, common_data), + opening_proof: opening_proof.decompress::(challenges, fri_inferred_elements, params), } } } @@ -178,7 +175,7 @@ impl, C: GenericConfig, const D: usize> let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data); let decompressed_proof = self.proof - .decompress(&challenges, fri_inferred_elements, common_data); + .decompress(&challenges, fri_inferred_elements, &common_data.fri_params); Ok(ProofWithPublicInputs { public_inputs: self.public_inputs, proof: decompressed_proof, @@ -194,7 +191,7 @@ impl, C: GenericConfig, const D: usize> let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data); let decompressed_proof = self.proof - .decompress(&challenges, fri_inferred_elements, common_data); + .decompress(&challenges, fri_inferred_elements, &common_data.fri_params); verify_with_challenges( ProofWithPublicInputs { public_inputs: self.public_inputs, diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index abd2c552..d371d7b7 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -209,7 +209,7 @@ pub(crate) fn prove, C: GenericConfig, co "ient_polys_commitment, ], &mut challenger, - common_data, + &common_data.fri_params, timing, ) ); diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index ba653e22..4e8583d3 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -30,7 +30,7 @@ impl, const D: usize> CircuitBuilder { let public_inputs_hash = &self.hash_n_to_hash::(public_inputs, true); - let mut challenger = RecursiveChallenger::new(self); + let mut challenger = RecursiveChallenger::::new(self); let (betas, gammas, alphas, zeta) = with_context!(self, "observe proof and generates challenges", { @@ -111,13 +111,13 @@ impl, const D: usize> CircuitBuilder { with_context!( self, "verify FRI proof", - self.verify_fri_proof( + self.verify_fri_proof::( &fri_instance, &proof.openings, merkle_caps, &proof.opening_proof, &mut challenger, - inner_common_data, + &inner_common_data.fri_params, ) ); } diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index 3dca02a7..e612a1c9 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -85,13 +85,13 @@ pub(crate) fn verify_with_challenges< proof.quotient_polys_cap, ]; - verify_fri_proof( + verify_fri_proof::( &common_data.get_fri_instance(challenges.plonk_zeta), &proof.openings, &challenges, merkle_caps, &proof.opening_proof, - common_data, + &common_data.fri_params, )?; Ok(()) From 3fc5ff4fff3ce1ba395853ef6e6d5e8fa36443fb Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Fri, 7 Jan 2022 10:24:54 -0800 Subject: [PATCH 007/143] Remove old binaries (#423) FFTs became proper benches, while recursion became tests. We might consider having either bins or benches for recursion in the future, but the code in this old recursion bin won't be useful, so might as well delete it for now. --- plonky2/Cargo.toml | 2 +- plonky2/src/bin/bench_ldes.rs | 34 ----------------------- plonky2/src/bin/bench_recursion.rs | 43 ------------------------------ 3 files changed, 1 insertion(+), 78 deletions(-) delete mode 100644 plonky2/src/bin/bench_ldes.rs delete mode 100644 plonky2/src/bin/bench_recursion.rs diff --git a/plonky2/Cargo.toml b/plonky2/Cargo.toml index 0dbfa2d7..54cf5c1f 100644 --- a/plonky2/Cargo.toml +++ b/plonky2/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/mir-protocol/plonky2" keywords = ["cryptography", "SNARK", "FRI"] categories = ["cryptography"] edition = "2021" -default-run = "bench_recursion" +default-run = "generate_constants" [dependencies] plonky2_field = { path = "../field" } diff --git a/plonky2/src/bin/bench_ldes.rs b/plonky2/src/bin/bench_ldes.rs deleted file mode 100644 index 57f31290..00000000 --- a/plonky2/src/bin/bench_ldes.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::time::Instant; - -use plonky2_field::field_types::Field; -use plonky2_field::goldilocks_field::GoldilocksField; -use plonky2_field::polynomial::PolynomialValues; -use rayon::prelude::*; - -type F = GoldilocksField; - -// This is an estimate of how many LDEs the prover will compute. The biggest component, 86, comes -// from wire polynomials which "store" the outputs of S-boxes in our Poseidon gate. -const NUM_LDES: usize = 8 + 8 + 3 + 86 + 3 + 8; - -const DEGREE: usize = 1 << 14; - -const RATE_BITS: usize = 3; - -fn main() { - // We start with random polynomials. - let all_poly_values = (0..NUM_LDES) - .map(|_| PolynomialValues::new(F::rand_vec(DEGREE))) - .collect::>(); - - let start = Instant::now(); - - all_poly_values.into_par_iter().for_each(|poly_values| { - let start = Instant::now(); - let lde = poly_values.lde(RATE_BITS); - let duration = start.elapsed(); - println!("LDE took {:?}", duration); - println!("LDE result: {:?}", lde.values[0]); - }); - println!("All LDEs took {:?}", start.elapsed()); -} diff --git a/plonky2/src/bin/bench_recursion.rs b/plonky2/src/bin/bench_recursion.rs deleted file mode 100644 index f8566b1b..00000000 --- a/plonky2/src/bin/bench_recursion.rs +++ /dev/null @@ -1,43 +0,0 @@ -use anyhow::Result; -use env_logger::Env; -use log::info; -use plonky2::hash::hashing::SPONGE_WIDTH; -use plonky2::iop::witness::PartialWitness; -use plonky2::plonk::circuit_builder::CircuitBuilder; -use plonky2::plonk::circuit_data::CircuitConfig; -use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; - -fn main() -> Result<()> { - // Set the default log filter. This can be overridden using the `RUST_LOG` environment variable, - // e.g. `RUST_LOG=debug`. - // We default to debug for now, since there aren't many logs anyway, but we should probably - // change this to info or warn later. - env_logger::Builder::from_env(Env::default().default_filter_or("debug")).init(); - - bench_prove::() -} - -fn bench_prove, const D: usize>() -> Result<()> { - let config = CircuitConfig::standard_recursion_config(); - - let inputs = PartialWitness::new(); - let mut builder = CircuitBuilder::::new(config); - - let zero = builder.zero(); - let zero_ext = builder.zero_extension(); - - let mut state = [zero; SPONGE_WIDTH]; - for _ in 0..10000 { - state = builder.permute::<>::InnerHasher>(state); - } - - // Random other gates. - builder.add(zero, zero); - builder.add_extension(zero_ext, zero_ext); - - let circuit = builder.build::(); - let proof_with_pis = circuit.prove(inputs)?; - let proof_bytes = serde_cbor::to_vec(&proof_with_pis).unwrap(); - info!("Proof length: {} bytes", proof_bytes.len()); - circuit.verify(proof_with_pis) -} From 8ec78fc0c15c1ab3b42d151789085d917f126b51 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sat, 8 Jan 2022 23:44:12 -0800 Subject: [PATCH 008/143] tweak len --- field/src/polynomial/mod.rs | 2 +- plonky2/src/fri/prover.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/field/src/polynomial/mod.rs b/field/src/polynomial/mod.rs index 1f777ca3..624e8212 100644 --- a/field/src/polynomial/mod.rs +++ b/field/src/polynomial/mod.rs @@ -27,7 +27,7 @@ impl PolynomialValues { } /// The number of values stored. - pub(crate) fn len(&self) -> usize { + pub fn len(&self) -> usize { self.values.len() } diff --git a/plonky2/src/fri/prover.rs b/plonky2/src/fri/prover.rs index 05135c91..43674ad1 100644 --- a/plonky2/src/fri/prover.rs +++ b/plonky2/src/fri/prover.rs @@ -24,8 +24,8 @@ pub fn fri_proof, C: GenericConfig, const fri_params: &FriParams, timing: &mut TimingTree, ) -> FriProof { - let n = lde_polynomial_values.values.len(); - assert_eq!(lde_polynomial_coeffs.coeffs.len(), n); + let n = lde_polynomial_values.len(); + assert_eq!(lde_polynomial_coeffs.len(), n); // Commit phase let (trees, final_coeffs) = timed!( From bde6114428b3107d304fb54f6e7afb3bba4acc94 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 9 Jan 2022 08:33:12 -0800 Subject: [PATCH 009/143] Replace `AlgebraicConfig` with `GenericConfig` (#425) It works fine if we bound recursion methods with `C::Hasher: AlgebraicHasher`. This bound feels natural to me - it's like saying "the recursion methods assume the inner hasher has a circuit implementation". --- plonky2/src/fri/recursive_verifier.rs | 14 +++++++++----- plonky2/src/plonk/config.rs | 23 ++--------------------- plonky2/src/plonk/recursive_verifier.rs | 25 ++++++++++++++----------- 3 files changed, 25 insertions(+), 37 deletions(-) diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index ba6ebcba..526456b6 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -16,7 +16,7 @@ use crate::iop::challenger::RecursiveChallenger; use crate::iop::ext_target::{flatten_target, ExtensionTarget}; use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::config::{AlgebraicConfig, AlgebraicHasher, GenericConfig}; +use crate::plonk::config::{AlgebraicHasher, GenericConfig}; use crate::plonk::proof::OpeningSetTarget; use crate::util::reducing::ReducingFactorTarget; use crate::with_context; @@ -119,7 +119,7 @@ impl, const D: usize> CircuitBuilder { ); } - pub fn verify_fri_proof>( + pub fn verify_fri_proof>( &mut self, instance: &FriInstanceInfoTarget, // Openings of the PLONK polynomials. @@ -128,7 +128,9 @@ impl, const D: usize> CircuitBuilder { proof: &FriProofTarget, challenger: &mut RecursiveChallenger, params: &FriParams, - ) { + ) where + C::Hasher: AlgebraicHasher, + { if let Some(max_arity_bits) = params.max_arity_bits() { self.check_recursion_config::(max_arity_bits); } @@ -282,7 +284,7 @@ impl, const D: usize> CircuitBuilder { sum } - fn fri_verifier_query_round>( + fn fri_verifier_query_round>( &mut self, instance: &FriInstanceInfoTarget, alpha: ExtensionTarget, @@ -294,7 +296,9 @@ impl, const D: usize> CircuitBuilder { betas: &[ExtensionTarget], round_proof: &FriQueryRoundTarget, params: &FriParams, - ) { + ) where + C::Hasher: AlgebraicHasher, + { let n_log = log2_strict(n); // Note that this `low_bits` decomposition permits non-canonical binary encodings. Here we diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index 34f92f58..461a9573 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -66,29 +66,10 @@ pub trait GenericConfig: type InnerHasher: AlgebraicHasher; } -/// Configuration trait for "algebraic" configurations, i.e., those using an algebraic hash function -/// in Merkle trees. -/// Same as `GenericConfig` trait but with `InnerHasher: AlgebraicHasher`. -pub trait AlgebraicConfig: - Debug + Clone + Sync + Sized + Send + Eq + PartialEq -{ - type F: RichField + Extendable; - type FE: FieldExtension; - type Hasher: AlgebraicHasher; - type InnerHasher: AlgebraicHasher; -} - -impl, const D: usize> GenericConfig for A { - type F = >::F; - type FE = >::FE; - type Hasher = >::Hasher; - type InnerHasher = >::InnerHasher; -} - /// Configuration using Poseidon over the Goldilocks field. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct PoseidonGoldilocksConfig; -impl AlgebraicConfig<2> for PoseidonGoldilocksConfig { +impl GenericConfig<2> for PoseidonGoldilocksConfig { type F = GoldilocksField; type FE = QuadraticExtension; type Hasher = PoseidonHash; @@ -98,7 +79,7 @@ impl AlgebraicConfig<2> for PoseidonGoldilocksConfig { /// Configuration using GMiMC over the Goldilocks field. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct GMiMCGoldilocksConfig; -impl AlgebraicConfig<2> for GMiMCGoldilocksConfig { +impl GenericConfig<2> for GMiMCGoldilocksConfig { type F = GoldilocksField; type FE = QuadraticExtension; type Hasher = GMiMCHash; diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index 4e8583d3..38024eea 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -4,7 +4,7 @@ use crate::hash::hash_types::{HashOutTarget, RichField}; use crate::iop::challenger::RecursiveChallenger; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierCircuitTarget}; -use crate::plonk::config::AlgebraicConfig; +use crate::plonk::config::{AlgebraicHasher, GenericConfig}; use crate::plonk::proof::ProofWithPublicInputsTarget; use crate::plonk::vanishing_poly::eval_vanishing_poly_recursively; use crate::plonk::vars::EvaluationTargets; @@ -13,13 +13,15 @@ use crate::with_context; impl, const D: usize> CircuitBuilder { /// Recursively verifies an inner proof. - pub fn add_recursive_verifier>( + pub fn add_recursive_verifier>( &mut self, proof_with_pis: ProofWithPublicInputsTarget, inner_config: &CircuitConfig, inner_verifier_data: &VerifierCircuitTarget, inner_common_data: &CommonCircuitData, - ) { + ) where + C::Hasher: AlgebraicHasher, + { let ProofWithPublicInputsTarget { proof, public_inputs, @@ -253,15 +255,13 @@ mod tests { } // Set the targets in a `ProofTarget` to their corresponding values in a `Proof`. - fn set_proof_target< - F: RichField + Extendable, - C: AlgebraicConfig, - const D: usize, - >( + fn set_proof_target, C: GenericConfig, const D: usize>( proof: &ProofWithPublicInputs, pt: &ProofWithPublicInputsTarget, pw: &mut PartialWitness, - ) { + ) where + C::Hasher: AlgebraicHasher, + { let ProofWithPublicInputs { proof, public_inputs, @@ -561,7 +561,7 @@ mod tests { fn recursive_proof< F: RichField + Extendable, C: GenericConfig, - InnerC: AlgebraicConfig, + InnerC: GenericConfig, const D: usize, >( inner_proof: ProofWithPublicInputs, @@ -576,7 +576,10 @@ mod tests { ProofWithPublicInputs, VerifierOnlyCircuitData, CommonCircuitData, - )> { + )> + where + InnerC::Hasher: AlgebraicHasher, + { let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); let pt = proof_to_proof_target(&inner_proof, &mut builder); From 3ab0a37af39c883f30ff479b794064985922d899 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 9 Jan 2022 09:44:13 -0800 Subject: [PATCH 010/143] No longer need to store number of PP polynomials (#424) * No longer need to store number of PP polynomials It's unused after the refactoring we did. * PR feedback --- plonky2/src/plonk/circuit_builder.rs | 2 +- plonky2/src/plonk/circuit_data.rs | 7 +++---- plonky2/src/plonk/prover.rs | 2 +- plonky2/src/plonk/vanishing_poly.rs | 6 +++--- plonky2/src/util/partial_products.rs | 12 +++++------- plonky2/src/util/serialization.rs | 7 +++---- 6 files changed, 16 insertions(+), 20 deletions(-) diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index d83e2b9f..3d3c7197 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -629,7 +629,7 @@ impl, const D: usize> CircuitBuilder { let min_quotient_degree_factor = max_filtered_constraint_degree - 1; let max_quotient_degree_factor = self.config.max_quotient_degree_factor.min(1 << rate_bits); let quotient_degree_factor = (min_quotient_degree_factor..=max_quotient_degree_factor) - .min_by_key(|&q| num_partial_products(self.config.num_routed_wires, q).0 + q) + .min_by_key(|&q| num_partial_products(self.config.num_routed_wires, q) + q) .unwrap(); debug!("Quotient degree factor set to: {}.", quotient_degree_factor); diff --git a/plonky2/src/plonk/circuit_data.rs b/plonky2/src/plonk/circuit_data.rs index 599d5461..35c37991 100644 --- a/plonky2/src/plonk/circuit_data.rs +++ b/plonky2/src/plonk/circuit_data.rs @@ -237,9 +237,8 @@ pub struct CommonCircuitData< /// The `{k_i}` valued used in `S_ID_i` in Plonk's permutation argument. pub(crate) k_is: Vec, - /// The number of partial products needed to compute the `Z` polynomials and - /// the number of original elements consumed in `partial_products()`. - pub(crate) num_partial_products: (usize, usize), + /// The number of partial products needed to compute the `Z` polynomials. + pub(crate) num_partial_products: usize, /// A digest of the "circuit" (i.e. the instance, minus public inputs), which can be used to /// seed Fiat-Shamir. @@ -356,7 +355,7 @@ impl, C: GenericConfig, const D: usize> fn fri_zs_partial_products_polys(&self) -> Vec { let num_zs_partial_products_polys = - self.config.num_challenges * (1 + self.num_partial_products.0); + self.config.num_challenges * (1 + self.num_partial_products); FriPolynomialInfo::from_range( PlonkOracle::ZS_PARTIAL_PRODUCTS.index, 0..num_zs_partial_products_polys, diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index d371d7b7..64730ea3 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -269,7 +269,7 @@ fn wires_permutation_partial_products_and_zs< let degree = common_data.quotient_degree_factor; let subgroup = &prover_data.subgroup; let k_is = &common_data.k_is; - let (num_prods, _final_num_prod) = common_data.num_partial_products; + let num_prods = common_data.num_partial_products; let all_quotient_chunk_products = subgroup .par_iter() .enumerate() diff --git a/plonky2/src/plonk/vanishing_poly.rs b/plonky2/src/plonk/vanishing_poly.rs index d4c227de..74e0fab3 100644 --- a/plonky2/src/plonk/vanishing_poly.rs +++ b/plonky2/src/plonk/vanishing_poly.rs @@ -37,7 +37,7 @@ pub(crate) fn eval_vanishing_poly< alphas: &[F], ) -> Vec { let max_degree = common_data.quotient_degree_factor; - let (num_prods, _final_num_prod) = common_data.num_partial_products; + let num_prods = common_data.num_partial_products; let constraint_terms = evaluate_gate_constraints(&common_data.gates, common_data.num_gate_constraints, vars); @@ -123,7 +123,7 @@ pub(crate) fn eval_vanishing_poly_base_batch< assert_eq!(s_sigmas_batch.len(), n); let max_degree = common_data.quotient_degree_factor; - let (num_prods, _final_num_prod) = common_data.num_partial_products; + let num_prods = common_data.num_partial_products; let num_gate_constraints = common_data.num_gate_constraints; @@ -302,7 +302,7 @@ pub(crate) fn eval_vanishing_poly_recursively< alphas: &[Target], ) -> Vec> { let max_degree = common_data.quotient_degree_factor; - let (num_prods, _final_num_prod) = common_data.num_partial_products; + let num_prods = common_data.num_partial_products; let constraint_terms = with_context!( builder, diff --git a/plonky2/src/util/partial_products.rs b/plonky2/src/util/partial_products.rs index cc9012ed..56e9d6ed 100644 --- a/plonky2/src/util/partial_products.rs +++ b/plonky2/src/util/partial_products.rs @@ -35,16 +35,14 @@ pub(crate) fn partial_products_and_z_gx(z_x: F, quotient_chunk_product res } -/// Returns a tuple `(a,b)`, where `a` is the length of the output of `partial_products()` on a -/// vector of length `n`, and `b` is the number of original elements consumed in `partial_products()`. -pub(crate) fn num_partial_products(n: usize, max_degree: usize) -> (usize, usize) { +/// Returns the length of the output of `partial_products()` on a vector of length `n`. +pub(crate) fn num_partial_products(n: usize, max_degree: usize) -> usize { debug_assert!(max_degree > 1); let chunk_size = max_degree; // We'll split the product into `ceil_div_usize(n, chunk_size)` chunks, but the last chunk will // be associated with Z(gx) itself. Thus we subtract one to get the chunks associated with // partial products. - let num_chunks = ceil_div_usize(n, chunk_size) - 1; - (num_chunks, num_chunks * chunk_size) + ceil_div_usize(n, chunk_size) - 1 } /// Checks the relationship between each pair of partial product accumulators. In particular, this @@ -127,7 +125,7 @@ mod tests { assert_eq!(pps_and_z_gx, field_vec(&[2, 24, 720])); let nums = num_partial_products(v.len(), 2); - assert_eq!(pps.len(), nums.0); + assert_eq!(pps.len(), nums); assert!(check_partial_products(&v, &denominators, pps, z_x, z_gx, 2) .iter() .all(|x| x.is_zero())); @@ -138,7 +136,7 @@ mod tests { let pps = &pps_and_z_gx[..pps_and_z_gx.len() - 1]; assert_eq!(pps_and_z_gx, field_vec(&[6, 720])); let nums = num_partial_products(v.len(), 3); - assert_eq!(pps.len(), nums.0); + assert_eq!(pps.len(), nums); assert!(check_partial_products(&v, &denominators, pps, z_x, z_gx, 3) .iter() .all(|x| x.is_zero())); diff --git a/plonky2/src/util/serialization.rs b/plonky2/src/util/serialization.rs index a9284bf4..45a463a8 100644 --- a/plonky2/src/util/serialization.rs +++ b/plonky2/src/util/serialization.rs @@ -172,9 +172,8 @@ impl Buffer { let wires = self.read_field_ext_vec::(config.num_wires)?; let plonk_zs = self.read_field_ext_vec::(config.num_challenges)?; let plonk_zs_right = self.read_field_ext_vec::(config.num_challenges)?; - let partial_products = self.read_field_ext_vec::( - common_data.num_partial_products.0 * config.num_challenges, - )?; + let partial_products = self + .read_field_ext_vec::(common_data.num_partial_products * config.num_challenges)?; let quotient_polys = self.read_field_ext_vec::( common_data.quotient_degree_factor * config.num_challenges, )?; @@ -248,7 +247,7 @@ impl Buffer { evals_proofs.push((wires_v, wires_p)); let zs_partial_v = - self.read_field_vec(config.num_challenges * (1 + common_data.num_partial_products.0))?; + self.read_field_vec(config.num_challenges * (1 + common_data.num_partial_products))?; let zs_partial_p = self.read_merkle_proof()?; evals_proofs.push((zs_partial_v, zs_partial_p)); From ac59f2bc45d9cabb5ea70d93874e063815e3f406 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 9 Jan 2022 09:52:19 -0800 Subject: [PATCH 011/143] readme updates --- README.md | 11 ++++++++--- plonky2.pdf | Bin 0 -> 215152 bytes 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 plonky2.pdf diff --git a/README.md b/README.md index f677a7c6..cf9a90c1 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,13 @@ # Plonky2 -Plonky2 is an implementation of recursive arguments based on Plonk and FRI. It uses FRI to check systems of polynomial constraints, similar to the DEEP-ALI method described in the [DEEP-FRI](https://arxiv.org/abs/1903.12243) paper. It is the successor of [plonky](https://github.com/mir-protocol/plonky), which was based on Plonk and Halo. +Plonky2 is a SNARK implementation based on techniques from PLONK and FRI. It is the successor of [Plonky](https://github.com/mir-protocol/plonky), which was based on PLONK and Halo. -Plonky2 is largely focused on recursion performance. We use custom gates to mitigate the bottlenecks of FRI verification, such as hashing and interpolation. We also encode witness data in a ~64 bit field, so field operations take just a few cycles. To achieve 128-bit security, we repeat certain checks, and run certain parts of the argument in an extension field. +Plonky2 is built for speed, particularly fast recursion. On a Macbook Pro, recursive proofs can be generated in about 170 ms. + + +## Documentation + +For more details about the Plonky2 argument system, see this [writeup](plonky2.pdf). ## Running @@ -21,5 +26,5 @@ Plonky2 was developed by Polygon Zero (formerly Mir). While we plan to adopt an ## Disclaimer -This code has not been thoroughly reviewed or tested, and should not be used in any production systems. +This code has not yet been audited, and should not be used in any production systems. diff --git a/plonky2.pdf b/plonky2.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7aba5eb70423d2fd9e24206dcef989a107c38c98 GIT binary patch literal 215152 zcma&NQ>-vRqh)z)+qP}nwr$(CZTma6ZQHhOpZR;HGwI~sq;I7jD)nBgR(90eCSV}2H?o4_;en!;F|{*yu^?b%VB#S7e+Lx3n5B)2sS^Rcn2n)} zsfekuy@@FlA0L#ni<7CLEtJP*tlE~{5gUTK>|)*rZWfBoEzqxUpvJ?y1A*E8!xqKdSs({k!_P< z@4L%@*6t&p^w8x8%G`*qE6Y_=aFd>a0}kF%-7I=_-uC^@=iyFEH4~xcR*Pxt?l1J1 z*Yxk?-ky$bU9|ryzwSNA*VoHh!XKFbS?(X}FMmJ|YitE#frXcvJCQZjgw+BrhH)?oAkXEgncf3ex80Rs>3>v#4HF%6yh&MCloXd;g}+Q zFndHJcNdZ?Knq%jEhL_#8pB!35Xgi zl3{~ZnG%`M7uRlRaUQ&P3kFHIy;FL(_9B$VuOI~?OD7mRz7digBSai(RoUT3xQm)1 z#X4)(__zhoqI1!SB;f0i>F{`dvGhuSuPSKd-Y7SsCaJTitMiz^6zZADv{}}45xDtS zGb@MIhGkU`RF+v3n*J#;HMk1V=x$@gWy>I`#U9PL3gk(c&@5NRTx4D+$mZgq7%c+k zd)b92rVHIuvApLnxN9byffAz-LHJs@B5k2^$ZSEn!F3$j zRKUlrpb85)Zb7MoML=<%r>12*Qc%;e3!Cj&j2KffnqkP;T%MB#a_wF$ZjnsqI$l4( z@4xOu$$M?|@w=Du;s?t%qCF6y)ha;2Yg9ho8`xvUc`AbxFRVhF#*~krU^2scVI*y* z5ML&Ol@)8FCNIqhf8*p~i_P(sQIb?%y$I~vhYmyB-yVxFCbn6eciKG`-+5CxCL)bBST#SpC6#BoJ zcc8atg)S&lJCpy7ga54l7fKkJSpO?jIM~?#Kc*1;-=VUjA+5Z{fY5uU{!W2!L!voH ztgyJ~rUk{B`f6Z<%;IWu37+!%DV27}!-e21Q9f%zmTl2pwPW9>TLaV60p8PwT+&w< z&O>4n5pmCuM0mO_jh39BoFEKWdL~l;v3Bk%zi>;gOV_VpWV{`b`eV=`Sl>D{VdKO? zniwuQ4l2HT{YuYo_Rk%5R|ikt{P%6+3)00de@5@|JNlO)8rrwH$mP%H%_yyW+S zF;G}A;^nlN<{KiAtf`1CPLmczN~&QYXp;uI!GUTVp2TE-+e)Pr-y*YBvbdS{ha?^A5XL7s!sWG8fZ!Pz>Yx`S(P@rbqhS{HPHTt&lK{JF3llmmp=JnQ^PQg0@Ar9N zu#cf*h(HyrnS?E}!dI9u>IO`h3GpS9!}h97XPssGh-|5~efHuH^mNzxz;c?D431D8s$|eT(j_y3kC!|j8@ZBa%ouBpJQ~TTlc(JuZ}&7 zJ|&jGtv4D*mz~itKTUU-z4Cms9Fk*!TuXUmGR!7Ja8vOj07pQ-sS?HyrIa#2oLSf8 zLI)oo2k+5^MvosCnH#q+`l_oosXieAfYSwv><49AyFR6v+|~K4EJs9DvI@)D94TB- zWR=uM^DnZcJPe}hY%J!2s>TU_$71I-$$<^^h;M?n*x$#IU|W_ zwfu&v@&#mNjZWO8Hnr}e%WGwOwo(U%zo!6f&dB-3>A>fQkC*2y!A6i)lB?mWhRf?SCo)Bbw6AN27?nSL*kz2Bwh4|KaMt zNkT`WEy?a(3(b?@HE;-=NaR2Oba=|gI{7yjU=Yd#YI{z{e^MEiFlL_*FV+~ex9AIp z+0=l4#i1GzEYP8H=t?GqEM|(=wS2mvWK4>!`(u|LQF|Y%4fvzLgiy?p*+)n9e^wj@ ztegk*S5SagW74F^`nFV&a{Nu{(p61${hivp| z^}m5;y1p(!FuVRgdAljKO|rW$uzl~5sL@iEx0>ASdpe$yb=mKSt~Z)8inuMdVY4 zmaAxvU34!Cy2icHcd^w2z0-5qVuw_onoMYxh7*4uFP&72!n zEIn5WFLP5WAcf{&-BfW+#&NxMh~Zi$5s1<`>G!cP0irdg_qKYg&HF(>E6|{e);U8EBmF7->UAfEXO!PCKo)k{wl?fv+9b0Hm~59BF|yk6TF! z#j^FnA6|pZidq0&5CEI7GvpYHp!) zX{Fu-_?Sb*WxD&S9#)Q+;ucU-71CX5&0T6i-d*k{VPU;TO+=MhPO{TRJr$ozVwuRz zMX53`{p$OW7%T9Js!G>FL0!1@t}2TfxY1C7VUZ-YXRP49;d=To7IyY`T~*cIISp<}_|pA)z8N5GEf zJh{`BcX;#1h_D#rI8q&Qf;y_krzNQHd7Ef^1b6lPjEnYR>Dy;~3jz?SmeR1{omEQR znYTG%J)ofl76tWQgC_&U$m~5w07n@%mQ!&*ipfJ#Fu7Y1w4EH#G0lO|A^Zid!23bW zS~yH49Uw1p^Hpa+q#`8%N5epvHe$&5?&Q?rq24Hf@=gEUs~%((x#!BqLG@XDV2_4@ z&kJ*(Id#Ynf%TxS^4u93$K)aa@wyt{*(@2{KUiD&dF9C>7JmbgY9{Zy$LkYkV7St<+#CB2Ncg_n%rw^U=A3TKB+>+T!m)&XQxz*6#g+1}T)MConT;-M`lC@x(frD3FTCs)# zEIxwac$*Uf$H<+BQlg+;ODdFiz$T8{D=T0Kky3s(K5Ysp?Y63{5C-6wl75E` zNqV9DElGZtn<}^%G}c#e*V?}08Wh=6nH7*tzRb9v2@i$#*5Q0QOvzVwol!XRy`t-p(PJ>^WK++hWj=cFJqS%B(qM^tt; zIUXWb;?T?gmbE&(0J2D?0-=JK71OabCs`mItc|bs(U|p#p{Bv6Tc?sS{;0hdlGMe`0-4{3o27;aBQ#E)wmsOEy2=uAZF<$A?M9GL?ZRm>Q*wUWa6eA~`JQ*DZs5aA1e@(vYbSZ06 zQNp8E_lDX@QbDFpMe4c*L_;L{35qt6v^Pieu)v21!^nmGlUaZ*d;!8p2c4|)mnV>c z7zCH=0OmboO$*(^A}-_urwD~&@?0mpoHI-vNWeJwW~gv9U&eW)cpPhkIH5XIg(C5}GSfw(NL}!K{jdd&_MY!&*FB zpejjOPCRo4)5&#m)nvw|Y*|e(#$?;tDLY7mYx;)n`EVc=Soe32XtQXf}k7Cs9w>KHb@e3HNX|=el)4ASjZE&R#JO6%LJf zA=3Hv?%(t4+=H@19!=7K$Y(?xiMICqClxwmfJ+Ybra7y7&&@^MFk5yHp+Rit;2V8O z{{fqtfSaqM|NW4f2vwkAW7OB#<10AwYVpATzPf2rUrWcY`}0NQYSh=uFZyLO}8`@9?rR&bH#0%_3zK}skxhb3pFnTfK%yf?tI9j^ZCsB}vlzEOP znYX6WVTMi0llejrJ7@)CI_kInKvE!3>v?qR6(u%1v}g{D_@XhLD2@n4T*P1|U5RhP zc%g_^WDX@*<12+2=*O;6V(}#iUAyI+Czqt|G$(yd5}3(D=bJnWI6}~&WqzrpQoboxg@A+>+(%!U(RL|ZQD52P1FU93gdYXO2}H;4^OBnyD{5)A@<8o<}pIx zL``1IY38h#1&BE@IaJI>mI0jeFP@os8>%2H12X_64x-Nv8ie(0)|li7ln(%{rSWyU zWE;VJBOrzhj7kZlC(~F_mD|x5*k6{igaFa+>row?jJvsHtatQ{Hq{q-NA6mH2o?wrPCY| zsnYCY)hwRbD(N5->mD`zer zKDT7ax!g!PSgK}<8G4IXs(Jl~2J;Z1mTH!Cm8_*pABV*r)_F6ZeR2I3+TAgp3RxS3S|W)K>VyK?S4iUrWjeqoWfymVaUo8~

2H+EnAw0B&r3l{T_7z5?uyaZ~Se6oE&S4Pm=e1?)sRMgaau~;;z?gG?)aeyYz?ZP8~QSD%RJ#}fNN_{s-uaSOPE_XM1lJ?OLP1BnmBmKjZDHw3PJpZ8+3Tch zx0?$)`7xWQs&1Q}6t&YPDk?mLCEH7<1>ozDWQudS?5ZF|BuuH zD^U88_&w^^E_~r_Lgu~(@QNa6eCA$21{IYB! zT;gBS{tn*skJ$wgtAu_y_^Tfk&cBGjnjF>h{iWJV56O?01)p#7R zH8OE}E`}mrO$HZxx`qC6Kpm-^1apj%hMO!rl7^wp4o1OjcRMm(FY%mNAZ0+i&)x() zfbzZUSX|X$h6Ur5f)5#3!jkF^ZaI}%8)1R|s8I;>0%Gax)4Wdvg2fl)XMv+4V43m0 zvMt@r;(b9_{n)aJ>}FCSf`cpsBBOC}F{xc%|65{p35WTCABw^LthwFZ0V0fBFB4A? z_EBW!cuM-+o^HRRE%0wfJPSjN8{5=>6M@{v5+qFHxw!`wKpo3pFQa!6j1{u^1006$ z{j0N){I*(~TsQg87=r<_eD&oO=awzOv62LB_-R+0p9`tbVEb%joC2eRpiii#S{=~8 zsyCls=Ol*eF$k@kIj#<1qE)%s`t1dIuN|Ntz?+iG=E}m0iEU4sbaepvHgB=oerbvx zI%RmitsBRb2pvVs1-+#t1=Y%p7U@JW11X#{qR<)&RJJWZQ_>?Wy&CZ*`}jH06doTf zQ&njQ1aUT}iRsk&Muw2;u?^DQqx1mFjokw_VR{S@ZOhFehsel?ltKssI~~JqbafT< zrJM-OYiN=MJIV%Q&}|veQ`BBfevz@QVb7gF9ZZu;dK*kansJ1?$-X`ONsdDanJ3Hh z4-ZiwJSZpyJ@(E@p`nU22XZ11b;w{k140IJ%5Aq_G^N8x;o4_-nt}R)T#yj*baUn! zhAh!0+ukeG;VB0o^2TI@V6xD_UBsb)I$k+1yV+9=h_HO$XNlW_@hz3{V`zxRBCqKw zM4XhAq2|(3{+~>6M5aoih+|U@@l>k@&Hx=4(X7b%QJMP*VA=$6%`e~_we=-+1yW}k zeLrVwf4@Ru6cMB*k$Nq!E#)cP>-)v@RmTy-L(me| z@4|%fWiNQ~)(YK@#MC^stErJH8pYXin(*8j&O>0EEx^HA_r>+m^Pz{MsPk*^K?3E) z537#B?Oqsk@v0d0^LM>tw;4E;-jWgGQlMyRsctzz3{EL6v|N@|kt5Ml4CTL-F8lk~ zb7Wn7^1=te`##kc_FoUTX~8{uFhm~uh$Yaf?1c-x_vH55tt8Hx_XNj|B4I=kkQ!-J zZAO7Or22r6C!Owgl!0W%__N^3e+b6`VJ*GGW9L)N3c&~8jtOq$N(phg+W9zASA8>$ z+NkezD@tk`S{Ge>KY}ND=vFM9>-6u(8BD>PDdM(iV#WSY+!=qd_3M2wu+4Vyp#djI zEJv4)6*|y_HBo~5l-2fRzc)jwC3RJ6UJ@H(2!bL%SjR2%A&;8)ip7loK8JCs9}dO3 zw0kSqakj^(#pp7l-rJ@g#!chwxZwhhee~t2qcY~7@_H3ci#ZTUlgWlc6hiyaR1OikdzFxG7ySShl=P?dx-Tf zY-CKy>4PMD>DVW+fi3zMe#h6z~Lk)Wzs2441x$oXpH>@Cd{<>*dk>`$T2T(Gf3 zo|;&uo|s--4p;USry!JObvl9vVFF8wXK=QcaLW5N(C$OTL?y9}C3$RdG?JK$UJ9Gh zz|;A~@ib&k#_1tr-rE+y1)(8(9nLG8{^5r~3rqhue$V#b`8_ks|M5M^wQlE3?_yeQM_?1%*ns`Dv~9lrnL5p&(?~|7P;bHZTlFxqN*1#uUZ>8 zxDJra#|yN{QG)!yWF*7VBHddLvrZ||JSln~G8rHk%EZ$!Gt4Yjb4a_X^A(d-Z_KIE z;F<_y;KfhLH)(BBqQEemR>iYcOtPLB`=6_O*yah6`J6W8JQDP|qSJE8a=%uWPvlbI zugN0&^QLynj5Mq@pIBwJY#wyWsOs`TtS=(So9JhAe`mKboDKc(JM@hmpYo0!S=OAq z1qKR7o-cT5Z=rw*k(}Wti6`Nuu9wkCo=f);Em7a|_P7gE;A1bmwS?W9tRuhZ8=7Gb zEQc4@rL?&4EU!O-=Q1*>t$jB4ZdvDt9kf#7&7E2*$J<%$Y7kQ`LtCMoblI)W z+tvK-WrwU;mu2?n9X0%_+Ck(7cjf0=@iC_MPkaABK+0Q|HaeqtsWcE!615N*jsGFDiDpA;O2%L`E>!3x6s2zr4Ri#Za44GgPqn1k&5?!Or z;E6(Kn10p1C_(Z`of0rY0JJVP;PBH+Je9Qh(b?nxayGBer&?t( z2K%Eh#f?Q~LtJ$BHZ9{#&n?}DB>G;sY%Vo-sSW3lU%M@R#f11#c+>?p3{XN0I6w#3 zt1vQE_G#_Xq#Dj#@$Gd~R~de zkcTP~D_Ml*-=!VL0shUtzVvwm$QS3lmSFuW5 zadc}Pv0CApoUz4KMNbrvS%iu(2Y#~+&zuURIlzBF4ua&I6k+`ha-;&9cU12u>eb< z-h_b3E84TSfl^`CZ{pN-^_Q`sU?T{tC(aOB!bdcCGH^;Mwbl1M*M$L2u}zm=mW67+ z*U0T{5U^{-I(R`1eC7mtH-tG8&UrrHuxhDn)RvbiDlRK)kNHRDwP)Rmt~UIBbeKAh<|pKRJ}Kr-_{P6`}> zIrkez6E6>#e1djl_A?{8og%sZO#~?2 z>(`u8c^m`AJ?PGOY(WQ#)};%V)BihD0kUP@9(oai1kT{h9ocRojrr7p*VB`u7?Pw3 z2g?=ZzO>!3rQpqy6g(Qcw*xi50%;oj7V9&f>wna7IsoclMW>T^_`FXsz=x}Kmo6y^ zN%FnwS(vl+u$_yxkMwk-svR>_X5>rmn(KZ@068B-ti3tWv6*s96~Z`IUe3LM=)-7`ziug_=@It zmT6{*x1<6lEu9;%^RYq_%L9oo7xje$@l$*L!3pq1ysFcib<_ZCP8=hT%)=veHM>V_ zEJq{xE<5qqM(cscW(igRXCbcu_A2IeAA-YUa#(|HfcC}!>peuIx)xULLl6ujZ^8x& zMTiHY2r2W&hX{nh-hxV$?I1G%1j(N^2|&P@p&(`Dy>swok~8!$Hk>`-T0*SjS8;VM zIOfddJ%Prz_GFi-h3+rGTrh`zhdRab_5u}%F1{_i+5u#)39OzL`4a)%tGTLdZ6738 zx%PrQ@a}qxZ(HA1W#i(wkU_h5g;DYCP26>+vS4bt(d+Y9{s)}?B4;F4JD-G;)wD* ze5A+vIs$}Opv=ymWyH>*-_0sQf0ar*`Kvz~EJ5sjqZh`vpP;w$d1WK$Zlx z=I8vNl#C?77IF_nYlDh)KP59vFhMvd2;%NWt;KPV#m`QO-!EDLQNo6E?PP+=?iPuha3~DPH)E$rwc|_bksN@2a z2zitQ-1P1{NU%69qk>$vg%N+kK+VBX;#5ULI%)^m`NGrC}KMkV$i!# zuX0w%NfX0Uz1Y-{5Fz-0`z&K41YZ^E7>Ut9^@xB3doS1GdUBLP0Ma=XJu#CzTwC`=Z^kydpWi% z`Fucv{xOEiK1K5f>oWUO)~(KKvbp1$*I^y_w|^>~rh4*XcN^IoeWr`P|4W)ZWU24; zgw5XP{Dg1!>*^lWCsl@i(mZo$t_#C5AHQhOY2lH#(lMcQ6?25P{N=QHIc_rIqs!bo z`>Jws>XvuKGpF~&z5}I^X0iGqHZ=SNNX8ZOV3~LAEa@J2f9GS*J1mknjZ^$n=hJdM ze{%*5kRP_=2ocXa?3@XD%M{N3Q8|gVu^q}Z1#E`-x3(B6$AGkDNRkaJ6`QtFf=k^W^QPHM*ktF?BOB?2574#G|Y`yBuueTzLp? zGx>7vkS?i}rsoQ65Pzlo@3;{2=OD&EO>K$hirkLxRPyqAc#3I|rCQ~up?kYudehsA$!E8o zZPkh!vBJ6GzR73h=5p6d_olGvJJe^EpdhJEuh#hqH$$)oLFe?TC`lnG6^Eras;9v+Ii~b>p!d76j-6FJ3#17X;!`d6g=xqPnc^tVX-s z_iA=)o9*4^Zkw;g*ZS+;>fp;kTW_j(4Ug4{B6Yl$=kjE+8eYp|MQXXWXJ=}8-$a+V zY*XCCvy*#|E#F_OhHs8c{>oLh)s8~JuAIJ%e=c8N(J62;8~9fBQ<bTuifS~J*w3V)PPJa|wR5@3#d6Vt zW(Oiz3J1fna>LCjj70_%?%o*<57{Hpdn#m1tZ9t}NjBp-!(lcq%l8nLLU{I?bJhgS z3;BBr{!cxFi3ae8iGMBtK>Fc^_&ZS7nCA*4H3#Hq48s5z4$iFI=6%;hBCdaarM5YJe&?LdUwP^gkS}~AHgn%n>qk+de>j4Pt6=?o( zxC<6G6JXlS4-By&Hdxv%*rQHixC<2&oDF0oN;A&*B`)I(;SCuJu%hBZY8N1c&Wtv# zl5b|IqLCf+1Mh{amLpo6NmVTcG#Crz%humE3S+^CH{Q`K$na4tM}2J)OSXz8n?>Wz zqKTH#G)rijWwebk-Z~8>AUlesuHE`YLJnG;{ql;$MvfvK#|!S+D?SZ1$2Pik<|#@( zF`B6wF!cy)GG&1#IA;jo0UQ$c75J0iS3a7yqeF$Pm8IkG6@~`i8fau2JcMCapvj+s zJ{h!E4+i~44ZJ{LRO)`+x26_&^+YdCh4{mjzdYFRAV{($bc0ye5;H@_af2Z^Kol`o z;tzH`81Ny%;rHC3TOMY<8-t@$*SF&AASV+X)l*Y zKkMd${~GErHk_4KIBR!U@ezp~fvwsqfBNku*SCau{od_gZW#Rb2Df(?-W{krJW+>v zsSZ0`5FB;icO$MgBQF2fG^Rcq;|W`&;Cy!l0mo*Xk;aX%Q$P?$joN|xKGfB z2OdvRk?~)xiIJV{|L~^&{Y8O^f#ZMdMK@Yk&PT0?f46-?ds4DQAo55OXME4DmYXv8 zoO3RlB={DQX&i;3;qlIO{=V*kr6Q9Fq;D?0U72y_G5}h1Xj40TbZGlcFEMky9Z2{2 z|LK=%jD@r^Q{p6RvT5(?{y2>=-Vf$k+DwMkeRKu#UHop&GCgFh{w|@CH~cyLq#XN)_XR%Qdxi;^k+CRq zrBWnJW#p8qF;%%Hh%vJ2LlDY7cKzDbiY%&os`rklG3r-Llf22o!X{RKnywXX>-VTJ zlpD-q%%u!fCq=Tr;_N-Yy*-6aCv}R#0b%(D!)DV=nQf%&V(c6b%XTrc6QVM!0jtl| zM9!RNIC)2P-bJbIe<-ktPMt#6m^`-K{eGtN|5d{B7l^P?W_5A$e8Qz^*aypaGNVs) z_EEPwCuEvX-wi_7gV2*uXh@ziDKJP{BqTbo$hB3S?wQ9^`GfKWX@|?mf?3C zFxq8d7K0V}kFA%`gpTILLWqRTG0kJ6lO!`>-7LVKibu1XOl)KXB|MPa>&+d+LOwN%!13N56d!Pvxh?YMp8X%WHAkH;JKjrB{ zptnZ57^YZ7EC$e5`z%p(C@bZ*MF|*H)0w4MHXh+0IK9Ha78r=$x?3GsY@TPYZdiYl9^sKaKQ&~mT+0qEqzyEV z%cTW>9x=6E`(=gN$<8N$=<&9tx{xO8r;JJy(8DMLEvaE2Wbl`voe9Hy-27YSVKEu^ zuD}A|3G9uJuzNT>U?`BKJ|p*MeA$E(CNtJ1gs(d&cJt@NGX@>iWbp~@tUz^_p8!c~(_8i6j~P2=TJ z$F=7pekjgI;juqWDsZ_Q@*bMya)%-L*h>?V)l;+c9dU}c{F&UG(=7`|AIHJAIeE?{ z(=09h_4Z+iv;R0OFhv&jsRByopjd1#Huj_RNDeFcC>|ile(+FD&hU!wC-(7^(<+;t zpVm42<>yjQGib#GovMd+$$giPolYBL2S<|KDJht80Sjopq`UquT?9d<@)1<38gTG22UDiQ)N3zE2LGO5Fg>2E` zt8?g6m8OW9M8@Kgfx~id`EmQkTrU%rev4ZTulBEBV-+>H5NHhQ)Qa2OWpGu>ACrff!|FBkADq3*ZJY9Ncn2>nXxDxLv!p^#|%F*dvV=Tiuk7hMx`V9x&Nl zUDR=e6AC!d31d(S4OE(3l}V+LE9#w(yH)67b`Bw%@|bZFxEizLJn{M06eqKi-N%B{ z-tkhWO+6dx(ov^za5AeQ`W!w5B3!zZ{%~3`p1}jSf$b)MqJ9-XlV_@x?2yLT3AK8yr^xLEO}k)F4s+NJu#_xz z&j`;8``7;h?E(*AYt3ZUJ9__4q4eg+(1#87$$dyeaSCtl7=TCKp6>6(PYOSX(K{>D z5WrX>qBoyhw4tKbE#IUE?G{*Eg>Mju&xgxs2};C*m@JZo+?h+@e=VZo($9y^1yQ($ zsQ!e*75NhHLu3=fy|p!T*L> zo83KCJ(~_F8u(|z)ePWcTQy!~H(>vPAQi@O!_;lxG9&QGthg1nRSq$i<6ni1tAU#E zrRvlD%D9Pq2X%OV8dRLzsJx^wZJ*5`^cy}M*m}DI!}3XU#03TDC&n)yO~JA5Ok9mO z^$#L4YJJoHX_o^3H{QX)_}_R3>;L?z>_kg5dAk+S zZ%?1m{y>F=!7jk*f>5m}1xcMoTRU~=0oXmPYbZmwb^;Uk_vHet|9~;WxCeWJGQCm+ z;d)@<{I~OKXmMjFX7<--WZ$35nAuEuuq4+p4asiWrlaHYA_~LGf(tr+)jtR%&{ zvNkv=62-8v5S|wO}~0az++%SquK_o%2+(Pw&1MIs3cBV^`5&nA9%% zR*SvqZv$19PY0~7s+ylo^lINq+waHemHqGQ+Ft0aH$PrAGTxVKCw{o7Q?|{74`HV4 z?(MDOE}3h#ci{~eG53C6k%Kg<`BoN|*%uDbMmr~-;hB70$`KgFi))s}6~MDY}Q z7(||%@h>@>wUmn$SEO6pu&|bUQz4;fY8?kmxBj)Rje%^1Lb^q$e3&ANN+Q-|ry@xN zq@lbGQ11{V4VY0yUxuj0BK_q8nBxLtURzl7>kf843oxfB03@6_b}T7`{KznbSTKZ9 z4UU$cjebzZv%=VZYZTQmd>Y7A6j|QX0k;uf0-IBieX%LI~E=N>$<#iQ=cfwY0eh`h| z%j$!Rgl9*(3@F~kon_h(Qx7X|#xnr~UC+laiV%%O;M-l8Vq<1N&uP87hXoAAEZ8t* zM;f-AcpMy@88-y>R{B|jErxaCln-0%wS{)Fe3IIK@}@dtNazX?3h-M~iKb=$ax&IR zsk`U(=5SY@))^3x0D1)vpaGk-m}gx70YDf<@- z42qBWOz2cwC#K>X7#V2+i%|g@5Y7-;@$m2Pz$E+fQl8X-{l(|hctL(gAG`n)5W?x< zK3Y`WYyj&PCPHB3$V3)V+^whU+kRO+uq1H2uzd7{9%2VI6*3$pPI|Cx zRRruBU-Eqc_rg2@_O15dO^F2dxbsiO7-pCN;^s~)3|njgnwF7L@dg@AZv=1-@Z`kk zqq{Frt`=w;iL3{3X9rpcx35Ek&bV&|>gBrb`R3{Nk{NVModH_0+?GF2=%Cfhx|To; z#n*-fkXxrUKT8TccX8a3H+w!iBS_Tj#YX$3tgpvIT4kV z9Q7oazWgVkAD$Cs3U3&1EmGLz`yP-Lwfq|?X5dm{;_5hr(2+VUSn#^F{*qY#L;qk| z!kz6%Luv94`L1f1BaY@2TofT=q-~&XW;k%m6^y+dog^y$^b$i83AdJm32pPKTW%OV zmO@Q?Mk6EV)IC+K8vs;Nbw6f~ku8BPBA+9F&88pEhq^<* z-4#?%4e3mKwoH4p?61c0p)49;2{Aa_=K$cE!~UYcUX=>=~D$(@Qz!^^q&Fn5-1uA0hJC@duDC%KZ_-^}W>( z(YQ|*I6~|r-%1RF)BAp*;a{ogEnQk2lvMih7_9U7dezP4c4|T`AFlHLQ7juKs^#(? z-aj+hXgAtW{>F#i=`GTr2Akb4yqW&Z9nLV9S2W}u{Id5kubR&?O7rYHRYsVhP*nBZ z!2EDp96++;@^B?UuAeygoBT3w?~Cqq#uY;l?vxt*N|>Z^&k4W+8j%P~rHRfRx#7a8tlvFk`DktpV}9wcXRa94-yp zshQIMmf%MBr?_>*c2P1Ik7MtwpP2MJyKp@wzK=K>V3Cn!!(DqLI&oQ)9ka&6_G!yS zUIl~Jxp$fCC?h9ga_)gQ0K@O5H@TYhaKVc?`k7LZU?VYdpfp4?4o$zgqM;uMF5iT4 zYFfAiGvdxA;VfB)V(3!Mr!~KUob!_Y0#tDzX~{1^{tz`$FnGxLa2f$54#Ob-ECvxd zzlbWSNo18{Rhm&x`-jiZ(}V{m;V5S!JKQKY^k~8mfhCa4KNbR)MJ-oC$$zIP#A^YZ zFjC+K1#a+I(Ck`b`&Q7T-l?_MxcX7fskQ!|ClUuIeYBh5hzUqFc=^@#EIC0xnYDNo zzj#!t%9B@?p?@&wjWi?{mz*U(3)(r-OMr7AAWYH0;{ zXZkh0q0s6CGjIQya&V`QyRH=!^?|)Ch*A^{1de@Z;bsK7-Y^U5e-ACL>7K5TTUeX; z7X8DbIneC>4L&aGo8p1We}uG7A?I{+AR(sXa~rn2jIdcj-Z0kk8ec&#*sYFXLWX)B zh zDsV<;!aA5vfPgKECGJ$FX#Ol`;!@Nf{N_?sbx46W zj-?1d*z?{xx~KD1UJ|SBJNOn20N{RFF>_&2A|xzA8Vd`VuzgApIGO*45Xt?Y zLL|rkX5s(8OT8srz1WT6yHB6s*;Ez3<~2pe^j>84b=fVfY7=555(gr;`JBNtG|YtY zxBCm;Mz$iUbjQmAMr|0B)Xv66w#Enf_EpBi{SAD`Z^wKrx{2V1HIYhP%Ny*A*C zep#jmNWq^A8k^Ypx&$3QA`!aej$T=JwSU2X7NYBKZh1LdPl@R4vDIR^A`{O#v}%3C zecU|}+ufuHZhsz0u&()byyG10^woxTdHH||+vO*XW-mx7T?gwIrDLTxi$$aUOgoaH zy-L7-ax_y@Y1MOI?Vs%|s#Xl0B5nGC_OVnQZ(@TMvmjDTo8Pekxtc`3-lQhIiESaW zu!tD@vRAjLHJ|)MYGYZ^W>C|lcux888Q_Z=FBcKQ;=&F-cvfNA(sirE`Hxc`>Ih_; za8{n1EJrkMm)R@pG%K;HmHC^v@4G>&GaA~^uYFvYr@FrISq~cJ%*Q9Y?EK_uN;)y4 z;@+aGb(S!KI%V4~_)`&LCCG%227~ZN3gOt?u?hFDi(ppVS=1R{PHJl$x(m;^noa1E zVm_2}lQ|4^%!<)Qd40Y)Cu}Cj-44f2QP%LIWS8{W^i0R?13$uYKGmIMH!T3{>p9hB z?jAnLRvgf4i5*xb32|P}N=33nYyRNJ_K($U4rI z&Wq%}Uup*b`6z761zd-dE`%qqc%%5!p0|7$??0sqSLcs0kGJMLXy%w{Lm;`(Ey7+P zpGrz7^d2v_r+9gt`?l=&ZC0|V;w7Z_CNNqMRjPvO7gZDyiQP$@n~|x9zexpj*g+M& z0!zD3LFsgQSt)ntCbwALR0w8rUvFHs!>)Rvhd-B092s(@aCC>BLmu}OLE7A!4BcKI zh=k>i46Q|(T<`?cOMw%|MPRS|7;QZAXEvrXZ;tsCH@3S#*d4Q{4jM&vHe~tL<(qcV zuQpfyfnfACwPo!o(s8kzzwk!rtYIn4)(bFD!^c8ih9kof7)hbzB?oSA=S~L6)^JHj zdv50fc$C~9Jz|+)rLPqg$TvBF^^(@W*L@Y$+>ZTjxGV}oKl>JfGXr1G-+gRKWL*=( zIkR%}k+K0^81t*tE+~B9*@P9$4NhsAfyx7VP2zqMxmTs4jNC!DeAbHGDBAc_?2pgq# zLHC&&@9F2!>HVO2>(~V3fP;>EZ7?;b#hyT7pN*gyMN;%5QJ^TKYQ~g^TKBkYI z_)cKU`VDD2pG#iy@z6B(v9bQBW!&(6V%(5RQBr#%rDJuQffx9;s_|#H3Cc_F(NTWR zCF|Kq)Pwmh5U?W9thf4mxEFV{ZcmqN$n@O)`}N&``*W9P%p@CNQ!)%T_!&WmF|DQl zr^^F*UZ~Gj;D#zE{#r9VFmE$d4{{WZ3r$Ti)WcvH4w>rW9Z55G3&pMTL=9zxA#V2Z zz04)^&o7#a>72rh8PBQSIqp=}q^|kf)Z##~3Z7sreAszP`|$yuS-cX(McjZ?Iw##J zt0S=*HuF^5BZzn;H^pr_la;5hRiUB#PVNr=QMz}oH6@$e0SWYcstkX zz(Nx^3r#SQY0v~s;ToK@qgB!SJk8Pk>HF+rrE91M>?7Y#qejGBCA@mfOacHfTI_n9 z8Gr$NULm5NWLf4C)I_-6w2_iD+7uF3x^cKn;BWA(=(yKJ<#DZkH!TL9&2GN=lsqND zhU*o_^L{*ZwT}V5jTMM-qug16B9H`6IVxp0HUiDVvhwHfRM(^lpz4^^TYMYTd!q0T z>*VM7V)e|C!KAw*63E;w+}My3i4(G8`r`xu>oLpaCj>c4Htb7fv_s%KH_>>6m1iNxXK5H+ z(AB+iT>$1%J?UX#5K3ZK9vcxIj(U3`_)zZmSb_0ZfR$)y{vmEX@y%wb?%v8)jQ-iV z5zy0961<}RkD{GH&tdv6_`bG{E8HQf!8)kIk%Ze$c56W*0aNV3!q@R*RAs2JS{6a#Y}zDp~ivbVGE)tb5LYJb+XKLO*!h2Lr~c8 z->I8QZ*Iy@p7H0xkT-&v3;EC%u8|hr1%#!r^z(feFWqo`=({MK`#)8 z2`+~LQA)`d5rG|zyi1v^lKTSEMj7vumO@=qe?>Cp=#j&gc2L?ycx!D2Lcugotk-3w z3EKdH2x^|bC`)3F)_pRvV&TgVw_rl;e#Q)|7G5Lc76peZ1ljEiSZo&b^e3w+*zj0# zUDVbL=kg+i#UAW%^pIQkCRq2efQX;jMShHtsHCX{r*xgs2fOR_^zXy%C%^|=UFl(M za<6oO*e59xvC2;a4XU8eBxr+)L>+bBFtD)U*}t3@1&pKNS$5AcK{w2tnbIEd|Gc-;m1>lE=3ZL1$k5Q?3})os{_ z%qJp}+42=9LT&L6;7GtWwgoy8QhW_+n4lIfC#mg#gO5h73n9jw-OF_x@fmS0L~CV4 z-ZaS?(No?M3KnYw2dv@HyPc^6#nXd1I#-H}?{NKxyg2JF?6Pw56;N5}Er|x!$>adO za~WZ2-rut!GPzMidYYh7cIy+n+Z^NY|IV(g%>U@QQ&KNUFYtEIr6`|iNSpC^Gsq$7L`^sB@@&g`KC%KA9Wm7Ufdl^6f6 z;3NBIeBXy_I=}Bv`8rsqW61dJGLi?i47-SfrBr&RCso0X(GfkQJJ&%>bleTtoe2!P znB0f28vfqw?AwFSCFB+5Hs$!#bHM86jsB>Eft>)i;7<3S?j4`*@0DaG`A%QrkmzvHf%LzPk;QWVK$FjH0dbLb;>3Wjqf$bI`JPY~Cu*D9e zWE!{%r66#-yXpeS=0bK}N>Xn;nPX%MX}NF*4sH$mpbS^Ti>@&+eS>%QWzf3spBdsE z<9lu*Vt{Kn298D+q?vWSeIA7FwAhuQos@%F`N?EoY#6v#{4#9(*CuoxEMnP+Bnil-AzKxJrSfeYT=ASxv_ri4J8J_WYpqIfFfPzfU(%{yh&3>NLrjuwhJPGB)zssRR_P1UwK^A;5$V;&KJ7{X}pu}E?AzU>s-Y~*o@z|9Bj zb6CDI+EZdF+8x(GKnkquKPccQs_dgA54dXJ(p$f^bfcg91V_pl^3n6*xf`~u=;~z$ zO;|tVwSEb1MsoKB5~I9(sA_{3z&N4k_M@W@XM!XV4WjM^fN4q@*NGjQ8I2Z*>u|!T zU4DS+z>Vr%;*z)fJh{bMoH5e3Q)!&>?()d&MVW)@~h8${dc%AxifSbrI@!B*4=AA+al8cnFEKtGnIH zP2&r}3O3eNuGvxf^%ywn6z2z$lhp~=4!>r%-f9@Ut2VL253_z0SatFNih$GE{g>{` zk}?ATWimhThTU0Dt7Ku_N(PJ!TwAh|a$TI(a9HyTx=5dd#qmoss38HS-5IYAmhXCA z@=PZH)iGg*QgH5I6YNF?|7Q4C)D8_a=4x^2<>+2Z9&%2 zy3A2THyDqkP9srAEaknNZp_{95CKG)I3x$D!E9`x^c$EB0e>3HU(E{txkA4#^=Ecu z@whl$jiQuY-cnI?i1mQg;*umch#?lKE1^I|$sj`$w;Oh%D@leVxZIsIxtk1(g2#0@ z-n?R6z zyMInJc&65Zea@R_%w>o``3+}B9+KqYvSQPeKW46gRkB~ z8FTu#AI&T_e;MvXs#9gs4b#Z>#Zkk!2aa1>=xpN!F>#g*WyOhdc}`;>6wpfvrW%%e zZnc7?ks@wt&MYvYAY_tOWk{pCAoBdJ;hEaX7&nT2>qKVO{2Go2fS+wIJ>REIx zg2AUFj%N|QriO#H#;xjPK}#wlx^i;CJqIrLEJMzj)*uq87rW>Sn6eQY*iENb4ltw9 z&GRurjIfNjMWV6fw!Pgv-|-VCJZ7;1m3J4K{Xw^q&mHnor{HIlmqYCt56&clA*LgM zx}-FSL+OvT{K$&V3dsa ztflp!F1UWsUugAm1DS;EF@fCY+lC!AB^Gdekj{~$6V%DObO3{< zCpViz>K%WtPq4c`&F^CiNl3zlkBpFuRjebVCqX&Tp={a{H-DdL)Ygs;t&um;1)jf? zu^BaWl=0*Zy3MP?+yLC!L?H{2xNwwDoN2+TUH0$+K*zlHYoO0OAreX1+pT`fKAKQl1C1Djv3rkh9CBWz*>vS6E`o^Ol9bJ-LYH_Y5 zV4{?-w~}EXliZ3ha1QmEj1j?r=fy`1M{>emUYp0CDw-J}R_WzzK~olW=58U(pbDv( zKeHz_Fx-L{A!d&}&L~^r1$vD+bvSzIKc%wv4(6+>ysPoU|3HkFxq~A3p@>Sm&Rn7K zNa`s{;Uqk}gRgLLj_)vU&#a(>$-%!BNr}|J0W!^4k!6 z3FK~&Nf=%D8VZzUe=ONXjXEpeQ?)u|rUG{2Iv9jQNH4CjdE`{#if_$d@`}z@7nU!I zKpCY9F^q^De7sd#(8ktIppRdK6&Hx_SNH3gGg zax4$DuUlUppyYk2mJ~+t9=lp$l z5-a=E^$bO98P*A-<8jKJ+|;mEZ2k?sa$OGkAHpWfe{hDfa+1={evSGAJjB zMPR%sqgHOoRQYts?Dl9V0B;z8yj&J(J``W244$rc-EULFg>9KZ) zNb9gBP)3z%`MuR-h-*5am+^yLsW^V^W}`;%|NiFnTb?GvVzG`tEij~L3 zbmloF#QTZ13|_SCI^ct8yKC@{;jd8W&wRAvtI8HqkF~!!mry>rHN8%(((E0$RYehP zN|%O)XB6>Pj~>+tWz{#C&q`}}SYr!iuHBO919T_TaFwGBDIpr}A@|gVfTrs$^9z^k z-(myeb&!nQNitW)Qu7Dh?@}71OyLO3 zDmXk3f=>o}gU9@=YpTU?NKwS!VY10G*8XaUUOF2-jUBHO3^=0=>2nV-ng28FCHhrd z6v381^BF1f8_QV~#u50$waV`jQXUihb7vR}c}pU@1Cga~7Z!bR*Uvmnn&Db}%HH9t zQB%?KWq>@?sbmYAc)tBhyy~m!E0(+aL*}z~%;@@gDJ`N-Hb&eCxdV9~CPp1Z%`QE9 z77QYQYEVIUFTO2#L^{DIv+b$bq5%WJ@xhWgPW+DB0$@SMt&kqMxZdBonV(9Jn5 zICR87ALolQ{#I{`OA|_t(hq`IkGDfoY^*Y`^ZVQ3c!~mBH;yi**k)-N>LI(;;o-|_ znQqC;fRoatY2lDloZDy$7iNx@8H`h18tm84!d>gFP$} zqlFp_8(cJuju}M>efEbppPSeeyoo8Rs%b!&8oBY5^P@F#c14Y{O(D6b>=H^I%RUN;p2#fpzNdV+U~3J?vH zUAlFjd4C3sskDN-fhn{m&GmZpXMu3j5{-dLbKnEP)1hsHIfGfXNKKS=2}DoqL}mEI8%S$Z1?vguuPhvN}fPnx!f=tcENf==%}i zpeG?Iq6py^vbT_aCIVL5fiO`*<#?!wtcBtOnJ^4Lb^1|+cG`!XTWSQ_heGorWb;a= zne=#iR0-Y}4=?);ncUUKU)9f2O=6J^_Ke179!SD2Y&;|bR{azkjP$c^Ck?>6@cT23 zb5u?z7Fe1~9>-r+Cz66_hL}2-2ChgxN&$z$@R9Su7?%}c*p;n=R+b;mYRF#Fl zjq!n3o#zKnDvXJ6#vWg#j=131z4C16YPDyu=ID+_`3wqEO{(n(e-atMA=qw$?-Z>4 zm>!+rJGgj3aZjUX>ho1w^iKFZnfOuqa1AeQxLZnDe^Tjzw5Z6of$M=B%_6j;_8`Vy z^-kQGM|_|?Jn+`ZMV>xY8u)yT7`TNu@EZ#abBa_CTgJR4gYo>n%f#YnN9irExv(AW zmLkeNx#wwLkB*~W41&VQyM(`M4#_^v2oah=@Z>#!Du|1fK{$Y>^42UxAv-5Mg+Fm^ zbj=B!{+&>ouHHy!*qVUdFbH=fZV+wX1u2-KSHbL2rDCzJyHGe`>4cM=d$19DDF(VR zhYUT76fY5d6E0+URd?iZ++NHij%5i9=0iN&*||$K!oL?^Ow79N$ZlCemzW|3S9E&C4$2E(=dJjZoo)il`p#__ ztgijH!2K>2PJa%4Y2HHT&9@ozkDNV>7#QY##)J0<*RdPEP(n!7w_VvUI$U;AknH~C z>JG;;k2p>IKEbwGnB0S!{WCSNSXETFpvR@cP*PtM*fY{4W0M$+RTBev@WzKh*#?1@nN+dqR+e-VKjP9xv$z@2t06B^0Ww6YA|o(o z$?)*X-;%dzKT%wcK_1Iy{G9siok0XSxcu_}Jcbq5Is7tY!^dGb{AYN5=)a9k;OcJm z(M?!e6+ujS#q%d(!T@7o-K>AWyKnarz&`)!!-Ay*#)AYC-I70vsF8gO3Z0Mz4}QwY z*oM#qLrEq(GwQE(ocoXmzO6YIm7edgWEUIShshI)ei+ht+x1Iy2R043s@0o-62t{I zM%w^qbIxD$4pT=&}-N-zyb09O5ygo8xBgy&~RW=nIWFz!KVI;<8SLjgp61_uS%Vib!WEA^-bEb>! zmcMoTBFN|^MqA>J_SBa@Xqo%lx)p2uhpz@lP;GI61D@|x zO#C|X&(FCpCEi7$ghD8|d7%W5bpG>$ZO)kbJsG+RcP|(uq{9Q<71GkS6QQ`4uk|KR z`-_-Sb7jt|xZ9X`n~@y@-?ts#@6SM-jB*7;d2F*P;r+q;cQI>;@dR;H4RH~Hlo9um zJx2#+D~ZS79FZxI=!0@r(E007+zP*$Yu2kh&|rXEBZ4IE^JHr{lU0|GQz?_gDH4=q z1j%kj>=Vl9yZmdTwmkOQyeJLUHK|netFuPeJ$+gT>m-#_6NSnjmXcgIADoK?v=h>i zK~Pe=YDVCu>va}nEqmfGW)`6}FS$c^VKI$Hi&)d8;B)gi4HeV=wVd>`UWaakIIt>51o>EncjIi(>+THoG!U zF%H=C_-@{wt$5<|^PqHeh&bUZSa2#YM4TWO%VkS+TH1md+7O3DZMJcKk?#9fCoQQd zqMsW?%O`H754+=9>V8If>Zh!@1H|b;jF{Fi*a13-`Q$7IPD?KQT0pay7{puuEli0N z*e=DSH`YEs@pcHp7Ne|$(p~V)9$jM}N8LE>k7ix9= zStPif_?(?wTJD7X)_+$k2B)bNi2`+&niL`ejEFl<>Q+hTk;rx!!yF(1y3aY%dL8fD*t7+R!_=92M<&2ciy96!~Z5 zFel@*u92`9!>`e~A0%Na&C1aH4ICkGn1ejLO#4*758g#LS6nN#TurC0Se>}F!DBZ`lmm1{4W}Q_V@2Y->3OmF`)C?WhS6@U5 zTV6N;HcX0;6m0aO$+s?P2?Y%S<(E0=Yg|e(4m+~6r_2>5=Rn0-OaX_76zei`QF++IwGk>+Bk_uS%~TL- zx!yR+vRm8HFUYm|DR>G;>(yRVQR4Pj3&WqJE{BLBhC7dr`glR+E)5)9p?r3Tut;K} z#!HwOVZ*Si`K4KB83_Z{yzbyrRZBCRBX<#-TB-VX0~!Z(b}l4cT8*CzFwSb0&JjHI z%EMx%3{sEK7LnQY`BVqN$kwukJ7zhQx$G)05=5-6KI>86ifxT7#|C+8=lJnSA<55M zZdFF~0c#1hd|yO(4jOPAF*$ka7?lcB047CzMVvY^pD#&G14p<88mPubVt(a;i+C?S z=786qel#=8&&(cX&P$y}fZ}jil_bpud)GuHcC`hhaE}Xt1(&a@nGL4NUr#j$weC?5Of$_yd}ungWMBtb><; zplG0vVtNYx*lF6>vEK?d}^Hd>zHFsGxRq`b%{JdGz?KZk+9-w8SX(hS;bd%ueW z+gSUWGT;wI2UcUsNc4#&n_|nC910EbkY8pZoXSVoy1l64<*x1=})d(tybf zw1an*qkpBQ_G}||;?XNXp0jRIqNloJ^M1_G4YC>;S zlirwKUpZG2wq4#H!audIdt@^5wU8y&z{PGpm`0f}WL9FJK>mpY=!Pafa?Q)PU;-Z? z8~a8JH${(T$TcetYpn9%rl%@5KP%$lp!>8A&a_C+u_%dH;Cn}3PAEDCr@|}ye1!#r zS3!ELHjpNdOuh`s4*oy`Ei@pTHF`~5i7?BcsGnYP-%pej#corcV#+bU6DL`itAMT>`-b%;e{D zW%-a%}V1A}l1sv93zPUwp72vZJ~D6iJs`X6bw?~DQI<;1wqWLQyIY`58a^FJ(XjlwllbRAmT+(yrI zEcvi$Y>T*uEi0b9SfQo&2ZH7yPzuSyCkt7kk1j0WMH20+PzZ{3A~>p1XiT>ZfEY0~ zmr5QlVU+~^k*?UuHS-&H2LMir*4{sK_t5s`wQ4A^zL#2AnCM!Mq{cAMkleR4Y-{mF*sQIMI=3L)V{>(Q)h#pd5g;X=BjI!pz^aEc&UaS8 zH%nM80W%|0OfVGKM{jhml6Pxn43jIK1vxVo#rNqoDWYqdFbnTmyukme(K~4iKYxwzLuWs;rUoq79j)-=~P;+uV{`mktL+Mc2gfwa@LBOcG z4hk2%f1V$8?JUK-w`6)66_%%EZ|ufIp8KTGHqq#^*3n$zPXHLkTUOOmkmM<550^7> z7sC!c$rRnJf!coo# zgzQm_gn=#ktlzzEoNRebEjBKEv+dob3(kgz6e@c?%Q5Mhwgz_N(D&%Qp5Pw5hE$J^ zzd(Ec5Pv?`eZ{Wka2A^|6)Yv_Bb6XY*5CJ)f>Ts`Wd(< zES?hnG|WFDU9-E(S z9*LOQZF-Y$op#(T<~U~zTac20q>yFpc8@UU*$12vO0J8rq@v~YA6No*chjw60zY)% z-b2A?cLHGSL2dXtV1&pP#det|pdZnBWQex7;%EcF$lbD%P4tn^qDZn)i{gvToQFx~ z1Uo*lTyTb_+}Z7wU&qlQ)e;E=bD=N&3rN)O74W-H8|oopQ77%ov;g=w18K!Xx@1m7 z^5Wh%JMmT%8b0=g#2r|4Q$ay$c=bsR(=YqYe!qvJ_>W`RrGYv>5xMPu>YL(3Dngv7 z!ETmG^-?Q39s0M99KD3zo1#$fHO>2zYYGJRn&_LFP5k{9J;)u5gLgg9*1)duRCbOV z#^aABS-F2Ol>zd{k|G(vZ3-eOZj?MV;7uRru|(;uXD$<=UnMTyxoN4Sree^E6q+vA8j-dM&$(Vl>o)k>d);292IqB*5iP&8V$KN%5> zKThzf3Zw{*fQ~sY^$tqvY*|`tRQ-aAUce2z{Ts!TONQ=k3fF|3LOCzif4Eln_pVIn z7oG5*Rf0r6=K+i{uZ`-wc);IUwTN=o88#Mn)B>%;ZX0k+u}lBjC1Kfa%(R5GTWpOP zCfgPEW<{fh+azfHT2iehIf`qFuKwG34*ZpSzgB^UT$T~RiD#p<;N_k{xd>fYAU~JA z&TeNlKAm6hP5aTfw}mj&zTR127hL3f|4H0xiCYcHQYACtnuno#iJt6UA-W*yk+Wyd zRc3|O)Pj5bvSDWLxDZjPalpl>FnVeiOnZ6Fw6k(b#0 z)LW->CW<0PS`39rs2w-kVJOJ;>OWfVpqB}JI zy3%04+3gY#2!EDguH#Dx&-@djRy+8>>KpW}2K}ByzD>#s&3j81g>2o`1;x zfjy#{vCe4Ego%e`8wBQG7D9bvX=Vk!7khN(J8(8V{v_rN=wLMu=8s`@5G{Eug*)T#oDl!Fh@vqKJV#mpjIv_>}&3+gm8Ou@HZjWwx5>o$g zC%7xRHom{vIo4)a6miNXA_&ZJ@lpv~aSmi;>=gjg@E~uooHB?vp={uAJ65=9_5B5{ z+UR5SK=4&p;8KQ)M%9A12ZRZsK+STu)U2A?Z{bPV#X*-(gpVHCNB`UO#H}&+V+KnO z56rK=1FrtUcPQpQ^Gj1<;W)KukXl;|W?Fy9Z7h8N(GEo6hP$GXeP=y-hrf4K z!ohwqfN<@Q{#JsNWKl0%tpm&ysbJzkmkj%9u52UgFl=%D67jn#VLaj%gqVh#SEczI zjlr6DkQ&7D{0%YyZP5E4UJN<@QzT?%{;%uoX03lRzZNu~m)hAZYRlmwAgSA3=Kdge z%$8U(U+F?gHjllBv62YX7?6Dl5*Fk^)5R}X{MIWltF+C3|IaKU29C?dhA;a-BuLsoFuOzL>Q z*Hal|8W-5~avi7@O%0*yd3wQWowZvl2ca>LmMBqNt!)58W+_Dlh1&5C5}@$}{ASgh z`7ya$SFzaqtLkJ6_PIKUBU~y{GeRn{I8)*T$|^lrV~fiSYaI=1DlWdFE>XR?CMCK; z<8_Ff?(}=Ykuh_mL;Si>k0%rGW>=XOm5ITw-#$e!T&iX3YOP(!d_@nhFD>Es0X!)# zdO(x#Y;-ab^o~R&x(Z}4^1-n;`L)#2+>I0McvL!N*=aYE9?EP6`VeT^IY`RaX4)NQ z0v)#sO2GpgM-n!suNCO!X`@wNtvr%d&a( zFAfEiJJ$Bk+ppt(INTyrd4H;%CaSh4S}`>m>px&~eWEgUn6GWR=G%Yf(E2BW+cs3GAGvvq@BSSjHMZraQHPkhzV(L-ZWF!xyx%_>7Yq5hI!Bo8IS&u zzrm|4i8cN6_ziJc4(`R^M!xYV?r0uG-w+ZyicAMR#s@-XL>ZpJadSk{{2(2M^r0eh z!4SqL5PENK7nscrZJYe_HvPz~H5z%N7VaLQ5E8^p?FiHC^o(cyjS$SrENruGbns6U ztD|`dLs$wBbRs_k252qFaUX1HL?M0h!Hv&G0;9;>2smZk{P{uxbXUCL9 zV?&9^)BYqDMVdpTOT7I{*& z{D4LSh*{h%P9r(~oojz$^P1@FJ<2*=7q{zn1L(n_S={_!Hax{zK7k4LD-1o26op{G z34fgxf1q3XN`=;J*M3!Ryd)cvetE2^AHX}qx-83RCn^U_opWS*TC&`i{~~&HI#B}6d_Q+(w)s;t4&9T(lmi(66P%kB)5`TsdjgG{Jy$5yd(Vh|io*E<_lt^R zU9hj6l!D#q+D!nuI4WKFR5rv+ovI-Bj-lL{;lT7}=a*hSe1DuzGQ5~v{;s$;cH5Vt z2-zNVxfOu!9x7_>CoLfTG|Lm_9hC-2Tc@yad3iFgd1;kZTQVna?g#9W&-No)Kvcv+ z3wF0#!X*yxuXoN>S`m=$aT)uPrXOhmGO8Bw_11%Vs_gOMi!U*}za3UCI)qRNUA>zk z`Xn)xmhHM$DAoqk(*~_F8M#0Z-FH^?WQHbNb5$ICD2nfWEKD;r$^d&|^|Ms*G57l* zh;2;8Tv~&d^E>gP;1LCLMwhpRU!CmP0!^ZbTuhvLJ~m)*;hgjv@qmhEqhy_Tu6kK& zjwu__aK22cMsq9OaFqjviglNYn8IE^D3spMKIwLNiDsHcvsMYy#3rkDMc$tlZ(Vft z7ybJ%KJloujfyUHDjsH%k@D%r>CS9&V_eLjE^(-F-204Wf2OIJm6v}H@7j-y z$F?KeM!?w9zZT3zCX)mv6>w%#Rz_e|6EVqJP}ffOHO@Lh0gD$3FklUG~e=N%(rF1&SH=P1@za>_Ik3@?k*a_VUgQcK6z!9 z5!fHQajGMxKD+E3jk@qtZj4W<6=HevA6{_UQ*AFj{w*-UE>%_kUS0 z6XycS(^-GPsT@1=6TrlbYil32uHxKGdtz2%`*!eoAvo>%0+p88f@^?=oZaGirQHau>^p1>i%N;G-Agn-uM!fPz; zJtqFUD!HWwh!*BYOJ*maa}#k+Bn}YP}@4#RRHI6|+t}9STg-NjYL&^u`!>m>q2>rLG3#zYU z=LHYr?L#^kPSG45wLb6}v&%dOfn0`BUDYOBkaxdXzkGAfJjR8>o@9gS-jaId2+_2o zlN#*cH&RR!ajwXa%919t<}Mr*LP>gG7AH6~c1bCy@h{#KcXK0|8W}wSXlns0(I%%S~>g?)URoSdg24*^^NpwZ~7n*1+XPh?cO@Q zylNwiCbtyW3BHsf6zZPt8G=|k+KBz|5t32p{cUsClc)4_Jr&1c+-HeZAgdO_kZD+9 zJ_dqCY$a#6scs@H(TgdVz?&f%WEUp7D4wzl6}p54#}H-|Gi4mAy$q=mrYKTiIhWiu zu#n!JK?YYg+-F9U6D_Af0pC>W6V!7NY@>T)aoL z0wf~~5&BcDV2=iAfhsKH5m5kBrKY#eTC&`W2MnCgvvlErs=kkITeKGjj+i+)vTd$B zi%maBQgh{1swfL5S1sS|7B|Dlm^>z_)iQ_~I*EbDH6RA(UYKq##XT%C7@-j-K1*_P zT57Q#VZBI}&3uB6brORhEWdl#kosRU5x;G^S(by7s~t`%XP6eeoHY+;r$$_5??j2iI+&LeK-pNcTQ8!FH;GGFHM%cnnkzx4_*e&k@8cE=zoTk- z=(T0V3aK|y_)C_dxp;mRjVC9GtrHih6!{qsnu zeawW5-hrZ703#{SG6ppxK%d(WG*yV8YfNb_tnhF5DK!f>R8+*;Vbl%64=idPML_Wr zJrLGaZ6AS2Be;)OCMpi6XTAdZoPl!u^xjeJ({vlXD7}*#pN{sJTv}7xNDGF&7DZP4SS+Rh#F77 z0x1sBJf(n)PG|_)sI}-maMwv)@kuD#3vjM7eWr|y)~E3<&ZUdo8o8&MuNKyq$v^MT z`UeBAB}D3i0dpQL+Qx2+dxhune{8O?R-PJ$(w~=hF2FK-;Hf_S%!c8k zAjjL-w;r+D?^nebJj!5ZS+l4V@($VW-UoyyajrPy(?#!wA#oJ?RZlGT#(SndB zVogOd6(zQw+G3aQYhp7;m*hDI=IV`r~3ld%UK0 z+m$AF_MIqu#uSvoV)4CCX>-m~t*dBcve7JFHCpL8Ixg2DsJiW+Xt4=I(?Hb~abQ1r zJ@nnptbwwjgl{4P3n+dptZi~JegOJ@uDVcFr)&G^GKgd%*3jB#?E{q|YORuWlu~b6 z7(^a5iXuToqUo~Sx1$uIDZIbGZ~F_r+w^}ic1}T>1dW<*+qR~yY1=lYZQHgvZJX1! zZQHhOyT9$-*gs+;&WSjCml<_Y7gbT275Ti+J2W(a^t_SEi-cQ@avzRttm(X zqYF&m@`C3hiuQ(&p3!xEva;n9NnZMgqe=-dAp24rqeTwkx|B%SQ3GUbu99DOgwt36puEy1KQXd{Mo|e2IrQuX#@B&8 zew*Wo2Zuf0efB2an5DpV=|=FP43TE@s2s||3=`gLmIh|(La<(>40fV=f3lkhQBQ#p za@91%2FC}N_osJn?>t|yPm7Nsw!8`G-JqtS5KB_tKJl*iPVHz+#0lvumG{0qdK1_% zb!Mq^;j;wUSZ9ilBx0~gxpK&&SOm!ri+(Q#WA&QZ2Cp0&+6^23W@0BuY`0!}$ouIi zN3>^!n9skIB^}7)k*;GGG`UTLLVUCW-o$1b&D<4{XV;owf;k+xBC1d>E~HGFe?HM8TS+bT9KQODj~WVnIE&Be3c z1jlvj8#aSyqeLj;($?&xYxjsC7c0d=^qV-U)ylX0zS(kj6oU*ix@aaYa9pX34jpbU z%(OL}U}j$GZI*(uhunhw$B$%BAtJ4SY@u>69$!z*7sYsuZ$#V06)t|N&qD8G3P&kX zAL&E+*9K-j=nvQ(d={VF*&HUnM zDFb~r=T+n-ar|wW#Nmo;^z}kbjn#H*q6quh$_m2I_u8rqx+5HHXWX#ZN!u=4=QHk8w-dB%I0ox5>Fz)|$! z5qW*(B>jYQHeu47Gjel1`Z8jfac< zsS2>s0y%F9cjWD(n0r1$=I}gB>ldJvzH_Es;GixnAtbmE!{e`Q4wv8;QP3{GF@gRVl#C;A2s{wqL>C$O zm)HH>Ta;|ZMG?UrZN_n;4F>xXUO;DM9G+?7ond|D@XE@Qw~9QyuLXu)N^WLbjR22b z%E3nD($>DwL+-Qt=rl!c=m&8BkZ08blOtj|Fff?YTZ2>+RXZ4K(0AM6wtA_uK-l7KBHRTxJjx3-B1uJFGh$x)2b|Lfk4iJkes zo^G-*{NJq`(f_Ql3HhTV-_L;Nvr}X4eU7rxq|6-VDV28iz2f!7)PsIzLBX;>YM$r?>N# z`IW0~J%L6c#2`+qm-$I-aHNcMZaCXNlBGdQ9sF&7R2IFV^&LSb4nSU278^iH?`nFIsVa zC}V-`RHUzYty#Iw@{RQ?N=#;<`dVo7>>!wJLhjDla47j)dovBXvdBw_ZbD_@62VPT z`G^ikz-Mbm^u4-Y?Yr?LT{UCL#`2Mvni?e%^=w42d{dIRC%9p`)9@74W1`>7qIyT58SxcdxH8 z@B)9=_8P)Y>=SJAy3c~8?qblo+Xr@7`IELD#cz7?e*nr~EGt}3oO7?wayWUL;8<|h zt^cSZ!N&0nLJtoQO;f?eDJHV*?t|Go-q$)gIvO!BocbFeiadY{JQ~ zz#iUf&}9BKGo&>zK!$~dL2}Q|CF6rMA)A?kyZak71JT9NK%FDhgM8Xz=OH+QeQn`z zLx2j5U|hbZ3ga5WKL7~{`cnjfuA&?szN76Q8-oXe_*>Vtp! zZ~)P}R{IrwaKF2e)Gp-e=M|9+&S9S(g1goOs|Vo=_^aKQUoyyJ(EyzqKV?EW|A5Lv zpj!d@kad-7)j{+&tWK(C8?J6~;@LVeQ=YOZRv12+Vmz9k-9rkoTs*x?1gMxl=* z?mGB17|@L0oUe89{Pe23PzMLbP6&BH; zAY4G--abnXdXOP7F?(UGp+VLCt$XFL0Mp;b5E_7x+-pLZXD}=WzOh)?-nWm3n&h#> z{z@V_1YJNepv@3LRv4X5qbb#|<@0w*Bg-J_?!fQ}qW;100hqnR-2(_%4u&rvTO1VX zyCVh=uHsl93KaZJ{c@J|O||xA2hZ@uOEl>Zm@Y!__(KoX`)HSs>vZp+$>-zn#Xs>S zKMKel_cMFzf%)>INpN$re?2gJkGuukf#UZe==w3|BwavwWq`1bF3@s({Z=7-Tsf5z z#;g2ZDFFlJn+i-yj2`HTLB6C$dWZ?mH6=fxo4W7RwIq+{WAi;op8t~}cqwa&g+Ve&ippQPp zMgZeo$|0>PLG6}t^82%bMcjRey}be9{ppeQi;DyD2Yt08h1#usWk&(ASO11}aJU2E z$O8c9-nV{39zxppe)j^(m*?SOd@|PfJv}Xt2tQ8(fC6{~Q?Otgkfh_0I@aT!FL0<} z5Sz0Z>nuX?zno3ka=}k2TD0&Mm5E(zvte{GbsAolTF%Ors_Hvy5+%FyWs zjN||kQ`j@nRQYk#{EJm0pSZ}{P$Qr`8|+c1jUYus0g`vZ!3F35_MD3)S0NMy{Nj+h zJs+h3Ps+Y`3KT!tH37JDX+nN$rXxn)nOnmom5IxSEs>*cbGa7EO>5&iEJ~bkq`$&`EMJ zDzf^ZW^nm!XkmolOy{w!2;)TfXgp>aPKW^bg^3Q}hE5A}c!ZPAAW7yyZUz-aKQYU% z$ypdgmAtqqX81;1%nkUd-pM+ieR}Sfa>?UaCDfHzdd|GXa^}xz4IRxZf zp2#pIcDHkCuMi>eC-@|SlDBYoEV&|tNDdJ;rdxoxYpE_Q%djV zUTy%yK4p~qDOSBkxs(&kZp?gUmi$f%y$A!G%am-;VqypVIIcHl2f-t}R_0oS#4}HojEI7(2A^2 z@}-rjS>5bqrLBDMrK~&+>KtG<6QFre{3?_FQcmjn>a1su$E(`5ak`$>Ex>;2Q{X3i z&ht?mk&$4{w5MdNVEuiO(h{pfycCpIXXrJic95axeNYnOlj=RiEi*~<6k%w^ z9nV1n-3|u1p87d*q|D0()p5tij`>Q8xEwYuZ1Vk*Be`wv(s@wonahc&kCerV*a<}Q>=)&8(0dQ zPV2W!FiRxX__b3tN2~7{s1SVvK?mvupO(j`!A$wE_DI6rk`U8u4L0iVh>GyqCUENCF<%8IrsPF&`TfZ6))_FyL~WN4lAQL2AwT03L0TQIX^i(h&r>} zlcu&)XUCr#w?f)F(DhpM9C;)3$4E`Cr03iicD|S86h-TLq0dqrt^U}g2UoD=6;Ye0 zT5BeQIQCQh2Xu$mM4yB)t78q)(|+*!-N*&iyCxUlq0LS=$+x6xpq}uJDqNuzVSxfZV{wwgCpMdXFEQ1J3}BN^n4-NNQ|=oHnpOU zC5Yx44w|7?fYP+bUfaq^r{ZM?i?uS@uJzZr-d~4HMc*bM?b38V8Tbva2dzQt|GRrE zt8%yT^n+S6*@|P}u}awNS|VADgNOud=5ToWd3GKWB2># z1LFdmJ;wv**W}U5szs{LbZG}8cekE@mhkIwMkvf((oq?*Os7^;1E$(rgs0Tgvr_uR zsp+B5DQczoH=66l)08lhA8{Kjf$=om?IYtFr##qyPFs)-)xtg&iyQ0E22WQN2^LHS*#-?N_ z7~q4BS+iQ^?s5~RoK-Z&^VS)oE8~YM2~x6F-iB^5&2(JCp|`*!LujFK$2?g~^&IzR zm1bQQt!GcYM#u`V{$z$47i-7>t})DDnz(}eyoxxEVrp6+8a16Ft2(8`jWakN)*Xw( zNfDYOtAiC2O^zcN-GElvPE{FqtMfapq$$3}K?L?;MIh9VK59YUs3@z~1?Dm(#=1MJ zQ*Z&h4VK*mlaaZHKxk(b!8hT_R=?KJ<738~2Im8A*uLJsDUzUJ_pjZ#y^YC$*N&oUzHTIVx){ zR?y8bELr@RkVay8A}qdOz%tyz=k-pDB`}m^8MMg&3&RW72V?t}$KRzL9BztyHsnRfUx z=_sK(3)qroNv)ttdZPsfg|h@y*QO1z?%y_ArtcY`yWOjH=@V5KpIF)olQ65LdNZ$& z5sT&|Bo>pC>LhZ6&!Wx@Ny3F>7!?7oYLWHx4n-x@?2&Ef^+^oU^wU^er86N*yu9gP zk{lMUeTUBhXT>etXWBXsP-K9Un5T}$`oh?xvT&uwNhcEk)ax5|7*+s1&bzaN6;08}Y zboz7$qY3tJ?BI2goeNbUCbD6SrBb(vv9v6YE&lv9Y!XB?LK5iTDS=loM@ zAU}9J&4d(RZQk2M49rY-K2f1#Y%PM+uWW4i3cDM~rgj|bpunNuB2KUXzp;cSBkgIA zb*^#~-jtX$lzfKJ?C?ZFkWIuW=c=#}lURvqd_|CoKlmb>dk6 zKPPZI2#J&%3i?ORHc&V8V^VPmsR*D63hDK}aR+Vz`nzdWT{3 zD-A&#NalF?r6F7VQZ%^^uTwxZ_N`58iWN{y#P!_5rAP}Q4>?Q}VHmacAk~TLXR#q$=koR*$f{|3fPuPv<$o5N zpn#+V9TW6PAg#A(74Q+VI^^*{G8sF)jsiEOzw>*Jok5S|S0jxf>>Mr(Fl$}rB^5Br zN6I=g`|v*E$VMls(?`c^@xaYnd`E zN%B?gp^o({i@___pOY`)O5y%Y0)E6M84_;<4#LNm1N4B9tV@g{3oTccP%Vdqe{-T8 zt9JB+U0tU5-An+_yee?oY@O0W)F?Y}x2jVvp<*VvS&Y`mU0US%&ooe{=I74Dm5?!! zJAU14UD}Y*9fg7uLC@*;z&oiOn;V+aeNXrcT9{9yWG${O?@PxYnjy99cdLW9?6ZN;c6POBhjmWuCz3tEl zlWmKlX^{y6s*sYSznBW% z0F08;3+w^o+>T`0EWG%1B8muqDlbq?x+>T3(SrO1>JqS43SSYfw9tJ+D?sYg)I0=V z-xlB$7Az?(9Wniz)+I8IwLf2wZ5$H%yXcF|0mv@@^S~jkf-tgg;KU;t(RB!$pDtS# zSxd(P{FBJJ5(oH(4j+*eg)2c`W!f~Wj~ z9F|)w2o&ffO>wx)y+Ct$*wC{b{Oq`F(A z$R-f)7M7l{tiDV|1?-CjdXCmyFVa=m8j#T^X7Ya3kvL9|1d#MgxM)Z!QnwU>!}5G~ zlxe)x5XW0HTsdS)KQs=vs#~5xhVH0`SGmiJifi##HnbMU=Th^qywx%0$n{I7;=$3D zYTFCmm@y$Bz-RJIEe;ptU8aj+g2h$#hy!2dBGw9}{F znXxfQC(*Hogy~xH-@w@l8-NsEaf3###F@uJ>d0a(l&7-E)?fMgUd&ai@jh|svU)Xk zd?!@1u26cmgTFG)GX+5$nN#18kJSra=|2Bk*Hoc0CX+f1K)Sb}FALw~QIlO)!K0`FWDc{4@1H=uB_3dXZISXLV`!SkQ= zssRqlZ|y1~0w=r9=?}$6KIztKGa9lp&2_p1>VblO@#oGEe(2Pux$Sql@Z0ci9WFdu zBwiU%^NS3UDA5t&teLxWKaGWZNh#3+%uC-Edt#no_WaJv*<6!3FLZ!*6-qP5M7fWt zQK+fueB1~&j*81@F@rUO)0IvO;7=brG~LcREwm*`NnC3qgtNwYOoJq)t4J=Rg@P0H zXL9KAwDK>l2}Qr|QcJ}cLVF}$vnG%BJQb??!ZIR?gRj) zM&*gp-tYBdyH1X0Ok%}bQb`yj&c?*%N$`=!f}u-|A0>T15!b}B5762H@-f%}VszQvA#U*tW0_&kQ3kagu`>Hp`d0_)Ml`>X=yHsWIxp}- zJ_4xR%mAV>GLX2GZfZ(IDfXj5I+>r;o55Dq=Xj%|yC%aZZG z#~^D8IUj65qa1o1Iur+6x^s!~DG1amTOAL0M~=*if zFSALvSog=nk&4nr$9;)FGmTN&#i;QKJXQdvu}fT6p8`@@+SQ8DBmMjJ%7j}~xN6qxE ze(nG3-v!mY zWxxm_G??7EBRPH6&ndE$DtWm{oJ1d7J{@mK#0vtFgtQh#wKOkdhdN3|ytd0HA_>;g7t^ZXN#x!dG!|)E_Fnx`Kb3naxx0 zVWL_3PR&Y!}OYJ^99SiT&Z3^sfOAi4yZFF3WEE&hwWf z^+148t^(B1LV6B+>?EuG3x7@4lTR6*vl2Boy)HVDs?lv z^+p(PsnU?$&?)BMYX!X~T^lV13Iq{0!|EyW7=4fHgv>jd@R};3gMNQ@tFjofG2qPR zQ3)o35wA9|#5zXJ7#q-B3@R73&`R8d{oe$YAGEUT7R0DXU?1;_v{Sa?d1MW5;`mJI z#aOvr841y@?o4*7JCkSON%V|AR9B<~B>=w^B0Jin5Q6$Aykr!5>q0ZfZ^&lMZa_PA zNE_T%ctin8Wy;7frnxZI&+>SP%GdGO^kt=fV^`lk&6AWoMi~^Fyiq zZjR}LEFId1OKDh)nCsDeZ_Ys&JmMy;ev?Gfza2}baH}euPE7uIOnmNaOV7b(|M#5$ zIkXA6nx)~RPUQ>5<9suvW=_A=jqt|a3KEYZWLqbcA+qUWa7}W5D^o_i88#?{7D~P2 zHuJWtHPOrX7mIQy8^Ru6w%`h!2ZiC*r{n05W3c(Sc z(c>AyAYp79$oLe_rTOPw-GrjQd4}q3?ptjm-ii`IR_uZFGtc3v*Wu>JaZ1#_@t5mz zg9ZiXzB_ALbRYb~j%;3ds=KflpQ>c9-0>YOI@z{8QsSY zqHX8jJ~#wZL`&_lHa$Vt&6*H$eKR|%SJ-k=4eM^%CZYq*?7I}|Tlv)6$%Y` zZo6zmiv4>Uh?=;Bt0sQLKlGQJ*6a`Lnh7%+P-X;j7GcTKT6tX~nuNiqPfCFi%NB=B z6vq7~8q86aJ<_nfS+bjAhF|2CMGeao`x%B3v6}|E+6Fo$UAoDo(abindixoFTn#eI z*~eEhR^NiO5jO{msw7<_4vJ*48bWr}$r}rG=N}i0Xa(w81BpDNf{O2#(x?3%- zZ{F(Zax73#!*(hGuRg&96sjme4otE7#y?^GnMunJd&lq^o5{95JQN4H@t1vRt(zt= z^1x3mYLNu>01MhF;MB#Glp|0?{J3synvn0AeMEDZ&~w~Dced-C@W$Rs)c$F<91`zu=I;sq~PSjSWwYieyRH|i#Q`;13pkN zOFS_Qf$vGDVi8Vw2RW3EhpdZxU9;&h=?iE3xnF4tt~PG$J?6ZLqx@viF8hFzDog7D zwI8tOTzWBAae;!Qw&Ao6-gBYYfLpT-9WHfv!c~S(b{>?_s z?))XeK^8!d$>`J{la}>&``%HP(63Xd-AWS(i&S-BDC{V!!0Uvbs}rStSGbE{zzT(M z`)Z5a)Ep0Uq`}!Q84rV45`|=JXW^Wmu;Fv9hY`ofk!Yt0mN<=vy3fsQm-R0sjU$f< zp~ohTinL@X)<34~n9n$D3*2Ip18mWUpoVL(3l!7J+-hj4K48jKypcr~MN0RRRn*NS z#*y&=MD`Bh3bztn?6NcNTC(hVtZZ8@u?oHm$Ap13juU%9hpH<0&nr(Mnzk<TyFAn#N(R2j9%5s%+}L z{&sjR){>(jEJ)`j6yemxL!;WaT|rTlDD^-^nx$O!s0=Q*pJWeBtR5;6P}llM$3~}~ zRLv!%?o43}^TV$<9YOal{!K8CJ;i;$fJBVW+a@5ZRCH=BYvqsN->1J!x_8ebU{Q$B zMbWcP&qvJd*Vr+od=ILZf1!)S1K%j)F?c}R-3K|MW;Ooyj`QJ$o}d)&V-~#W;WoYW zFQWKis@gZIE{!|r>)gxavpy~)^kcaiYPS3paz%_!K%R@)7EOJf}Ix4VLD(PcucT_e6ITi&gaM7*z z&5~cukoP;o=xJmU8fHMAYcpETC8XChdnTxQTBh}cUweRo78^O_;-TL3^P(WXAzB&2 z$dZu}W!%J}^HZ=;wfjez&S%=))grdXpOlc)pn~F3;0u?({UvDDLBHTR@os|kJS0ki zMvg<34dOoWCf4#{B_TlCn>G5zyU-_g+@)U((IF3K6}l z2IM-B_c0Qx$bHxaPp76yL8H4RI~XcgsOT&b(uZ6*LxLW|y6#kEszpU_fA?&^Qsqk( z7CnPo)g8sQC=^^2nN}nRc|{%-!AV;7+J%*xFOGTQNfqoKp;8X2UL3Y=qwj)!z>%M* z9QR6W8XH_C)Z8t*hXXNz&Er?yf$eX^zv{$AbM@S-lnDTt3Rf{uEvw)`WBw9BwMWU# z0;*%PzDgO{!YAIu8!3mPV(Zfv}*UllFU2ZO<3s-cMsr3w8Bh=I=1>Y#`Xgg)$ zXKUMGV)@1%BCXf?W!4)|!x|0s@L2DWM2MZQn= zLuko-5g+)m%0%s07Wj-amn30=({+5AsNP$xyK^HXhBRF(dqA%_;Gq(Hy`qfMe8#Hh zMU!0Eaf`CK0@J||@u{=tzcQ*!L%+MqZ6J{?nAr&Eclt27PIG7$<0j7zQj%#=Q5Ne! z7qos|@HXDh2BgKm@N5$D>0V2e5cy+qH{@U&TAJt6!LXy^u1skv43^)?(GK=9bF!W7 zF8otc`-SGq3)LMSyLgTQslLD?kcwM~JKh`|7WL;B-7OFAX_9_POk{{7Jj1KFxhrvRzMp^na>8$-NCG6^7?L%y@Kl8Z?Bw-+TZ^q^VlJDIWP{erqBuWe!i zU*qm-YhBF@zyr2h1GZ$k2O@)v`|6200+{pyy}CI8RmxNX;9u-+dO$SR09SAC((4WVM&fuDBAhO$V-}LxUacGLbKs#99 znYoE&1<64TsELWmS9*Bq0GNkmQ)n+{#@cNB(d{+ppMJi`WKz(0fI{oYCgyQeX5(qG?pne52>gz4a;*u7)@{ryuzus}W_{+qIN=>Vy` z>ph@%t+Dsi*XW?wRM1sm{ckbQLn9Le&kun|N09bFpy?QTBElL!>TgWq`g)MbN||h6 zaw1q7uR;E~ok5}pzrB_Rw?;?E)4I-RLK^?0nianu>`vJD*Ea| zL4e&jKqVp~a0^&BD(ezRj7?4zh=zvy_m4gbeD|TpAt^u=P#ts>TNBmAiPig2xExc(aNh=!01 z6Q6>1*oWVFEpYdKNQnL#cZi-7d5?O74=I9g|9kij*=3^e5ySyBc@7ZmwDWEEo5h9P zC45YoIsCb7_OAD+_L4sJ4Xgn?ndjHr0x}8c?TK6R+uLyL5?tXw&hk6!qmKdH`Z3qN zLV8!M-S@;DREeh$Af(u}ToFdHr!VpE;v#4I^|F&U1N`ljFTVwr*{S`69J<@qdt?{* zAMG}K{px!-g8<@Rs$BzwPaE-^oi^03gi{7SGyuXw^&Lx>_r#muV2)pq*wgGzB`vU5 zu>)E%C;aCvj#+g0f`HQctqQ% z4%9&Aqz+B0#wcCs7<~HJQhUip73=_hd^^#EP_H?)7BX|&;8aQ7&KS+}y?#wp<@KD- z+~|Gf64wnROut><+(~|vGF&1dB4cID?FZkt!zyfj&@GTPGU@cMu@FcVPRxt#D!uV8 z+dO9zT4!H1lbQ%=gtIp1jJpQ0MqhzBT9cTu7EW&_bu%#%cdkh%x5l;14eZIvF*TGm z4o{KXgPBWk-F94SJ%VGNmGPyutafNhDCfJsd3FpV#igAhhzTZl7@ZM6HfZ4jn5n;T zCz-)qv;MgpmJ%H)U{c}~?BaKb(IA6TbIm0}MlveRl%ipXHcRzfcZ>}et&zog>e2T| z*~(lmdiJ~p)TYdML43X~4t$EHu+mqNEo2DY`EO7g9!RcnoxM#GaUVYZD{dL>eBVp* z9JcXOq$3(NBe172j`Pq%K7M;YK4}xg+%mVSYA2GyZ_vP>Kq*JRrDVnsqLp_-8S7&g z%EOWv)V=W72^A&hMO{X2y-rSPjZZ|nc(EX1Dm?fzL2!IN8wOw5O*`1{jO6ox;_gC? zhq33?9^7QF2vRepiK=C39-LQl%fL5>N^M{!Bs}d29ThmRV+!%ZKRtE`4XyO;O62>Q zrMcAU2;y8U6)3EcFF6TRUk*4VHm4ZX{;C>m&?K0c{6=4@!_~=gEBJjZ&j;nwDbT4_ z`Hm?PM55UO2Cr$Af|RSsxuJR5jP^(r%-9|@;XFE}k}K^e%S>*q0eqqdEm}(#Lu~s_ zyrP$KJF#?MvUH;4bs#uX7GKOmUA+C5G25oDaLQ>K@v*7RVyAMvWH9;TvTj8Z*?K&a zOFR+mCmk|u48tE$9y;N8-QGPa+c*(bs?H8sCCQl1b+m6xm>j&Me050+>jgZxChCVl zJBuA|Y5~#}L!6S$acfQdL%6OJ=1^sE1Xxm=mfQk44GpD9DPagZ{-|nOuK4U0Dgq9UN^&-R4h69tdSzsi#T7o* zyOnF`dmA?3NWQ+vHXg_JgN~s$H_0pDAESHs?S$0M+(r4KxcJhnG6<#7NUj)(G>TtTcV-X>Q-@<+9wMLW$^djB3CO~4~0_S`wT zolYaQK%Cagd$3m%C@NZD4RlE=SKuwda&QRwK>(CO`+_=)-N{Y zJ3jE3&=;RMz8vh6PJ!!oHgPH$TnY?}NzVFabuQ~Bu)52u)cR?1=dU3R&?`La&t>EN z_wmOQGOhOGwK_y^u3-2mpm0hU7Xd8x> zxBhTR(C2U@iPoZQ(aN@e780&jk$MM$;y%v8T*mO;Bq%#vQ-z!6DC}Jb8gG2;~F)?_-L%XIx7ZE*3pFf)de7fY%WJPafc8-VCwV_xYZ^C1^OQqb@ zutbWHHnGM|InODVVS|gc?v=pCXaxHmoi!D%P>@xoZ=yo6O}J4Y<7L9h@B8noou>?^ zrD+EejI0U_-$ORh<)s`~@SBaY@?d+KB*s2|Q=zNF=$K6T^XtvvnPB>P?ZDU3PWaJF z*Vu(|2ntb3-wuZG&sz!mKbdUP`=`RGB4g{*@@*BYI3RG;9y>2r{E1Y1cSENfHVx zg(A_sLi5XFo?VgF-#)EsUvq!CfYVqA44wP~WBmyG46xC=pYavLa?XbkNr!(|Jt^cs ztUpl2{*^1HbZHFi=mPW*b7_yr)dVS_oVx)Y9DrvQ`v9QKjVjb1iJ)*S>f;e)Z)Me8Lh`{l)jzLfjgI*Lm{ zL;IfUn&4nI?lMgzn7e(|qH`TwedkUI{I8S#x6wmagHE)?U$68*VQAujdVdcRO$JePioBu8YdcE?v^4gx7bb9e`z(9anc z-HR}1RQOtEe4JQ%QmC1{mroAiq)B#k9f#&%=h7=nhSo-sHxy|YNGo5O>?Erc+-trD zdxeun;6>hS#qK^XG*J@rnHw-nd;g6Xr|PNk`yzj;aEdoXWMN(<>~e^Nr?_{dE8xEJ zp03o}wh#Q7_o~nrL&~Ui?EI{djBBI3Mp=BifSa*>o9+*l0-MBHx0p_6Y!4j ze#UTg@wa@kEC%mQ98$a$CU$77)d}egHo?|c*o;W-{5M3Z?^FBM_`x88aGV|n1`aHK z1s?*{2uM)a9Qh$?DRw?T9FbhczJ8Ke)pniq8(OI_%B4~X-O1ppTOuMhjNp)r*^H7I zb1{3*gWii?a_5b+lERLGa8E!u2;_x76elxmqIl3Lm6qtpAu z0$GM0MUnYzy~*Pd4EP?%z;Uv|vhL~!&29eXiV4%1U7|{9gZY~OF;g?N;-g`-gRSKn zPR4Kp+rPfYvI!U^Rg?6l)hTZB4eMkLi3jSV&_|OvvDacT|lYIb}e1 z!q!?va{HY2$-}rtJwv-Z$tfb2xX#zRU?oN8=0?}f5KXtL<7F?RAr6xtPia_6XwL$o zhdxZ&2EL6Ayx*5a^)R)i-Ppso*V_)`nQ1~=~4(e>%LIdIZy1D~zE5g$e z6oHp%fs+$o_zC8a5hf=#)OQ%saTYu>%3R1>yNA7;qm1)1`VGjx5ZnJU zC1Hk|cTEB|Q}7&trS0#`qKQ;|Lbg4;D%ig)GBG24!Oe zh?%{OM=rNrPvp9HNdU7?Fr-}(XmC5^g#DhLf;e(Gu2j8|Ok*~ta<5cNDlJk7ajM-5 zKX4V>+Gb8cs|(s?AwB?WLi_bQ<|R^q3rkuV^lN1f#f?mf&1{UV+x(!p9FC}|+8+bNmleQrq8lY%~|=IYKUBtf%B0g137PEo#4H#+(%aPrjp4<90h8t{Fs_2+stM4 zw3+`(@Y^uu8D@#w6C*gA?I zZN-PG+nRFY74xJ0@JF?eJ<%^yDtFOjp6|TU#3=YV2!ANmmx1`x+j{f}doZ$q3+g*u z@k?8sijWn?FpvFyiyX@%e{MMBeUc9UtLVH!94Pp(CTZ6ttlN{#@G>RXmk_)C$&*L1 zPLen_)s-QYP-u<65Z~ir2xW_zHLyr)G0w zVe;ZDKFmETT6Gvz&5qgajLC;qa`bybH9O9Trig^jcXLU2(lvu8##6&71@W?WH+Y(% zL<2{K@!ef0TeD%48G(6KMN)8&q`9BMI=khRq$wq%us%&*iKp@_ywS|{hwhp0IR}*n zd0oO7JaiuABnqyIe2EhxH9mto^_F~b7kS-yV6gM?tgQTUa2jjvMl{rHRzSFxs;3mx zZB3^|Nt1u=`&OX47u$j>GU&Zsj(Hb`^Wp6S7zIgZtkqk$e=q#(vznfVt!@`uhH4jfR zsW0!O{}A)JY$OG7hfc~$$!U6y1vK! zD=RRn317ZAThCiT3+)c<{}*HT(4Gkwu4y>7ZKq<}wr!gg+qP}zjcrzJ+qUiOuLnKq zJ?NjYo^{;k6))*;z(%Rx_oR7hPvwY&?rzB7npNF%bJFG$<}#Dm{nWlywb|2A!2Q_i zq%vQ;8k-V3`@Iw+x!Xt;q)W~5#sy3sH7CsBZ~0b%#mdFr21`%qz z`7ed91ZJ~uX4+tM>@=!WV`apNammlM296CWGxRbQJ{+aNd_v`z*vRJzamJ5pUTH}n zTqGhi^9q~-{7rXJb$XTZ;I(+9tnvPM{MqRHabkg~Y<|Ccl5H~;Un7z`hAmXgHD<{Y zt{y{O|E%xoC7JBMw=oLd;^3v&PN-+TLvcZASQfNh_CC$zYf!G5^+wWI+u1ppvCS<2 z^6=lnjy5>v0Kx(lnh;+~ox+jR?+U&wEH87N`70^|Tg%x5o{=8g5_z!7Pj=Y2}WA6f(VP<+oMT1+jUxxfjT*4w}u32*750Y1%Rn8Ckp+zU{irt;aqqJ4G$>;W}XruF{K3`47ga$i_pErpx1FcE>u+4cQt)tICTN zjD6Pz2)5+TAf?_b*YxS7{hRHP9jVVwvA{;hiZcdh31KCyxg8#Zk692gSufg}-mHE} zhq|b0CgamZX-7H)ge^khWe5~UsVyhU#37rjia=6*`~x;-ghTZ3oAn~@b*W2blFW64 zbKMtxV02Wte%_=Od++|5mHZenB zL`SYCeUa=5C*B+yH>C>a-wQ_*b(^4?5eMvFehidlmCB1FY{SwFK0c+FhXbP>qmmct zRh!p6Pf7N5JXYA5h=+r0!i4W=`*6q74=fE z=u7Yw7+pL(BnB!)%+EYu_Gc~3kJpVf67ri)nCik!upezj)qD-^R8cWjgW6akAi9IQ zK{1>{OGmln7sofL!~=>7T&)upG8hA{xy@-BxUbDAJi)USkF!#t)W)t7* zV`w3IOJWzRN)SE)hBln>nYWCBcpnPo+x-gaJmGYlC1e!hBLT{X6{&Za1yBm^0IsQc z*g`wj=ZLxpy*W~ry14GrwCQ9KZ&4l37yQK(NvB?rs^S|e9eM&mu?JXm72Z6;7eusi zFY(a89`u^NZDCT@Coi{W^WZ%cy4oI#}7=$Id zuYdWc<_CVuej&J`A9!0;!?x*5JPXg?gBp>sZtABjy0%Qb8la*tl%XBYULv(!!{a=Y zQ-SgeUTgxH@Se{n!rlE6?D2x)G9I+;3+vQno$mV(e^P7XzVNeikXPAsYVw#G2(=MY z-I#H-ujV%W@fs|AiwZSJj76w>;}GQa+RON)Ir{yg9E7Ff4rDzcBgJX{8sjFE83HV# z7q0{e2p1r?{$2Y>fMl=U4tBQ{#X@piy%=^)%l^SpItL9FOTy&?qa2W!qj5WM~ zwo0pcSpe2XrmTu^*aLY2HZfxJ2d?#JEcMB=WnNTu0)dnn55HzP78%1eLf>+<1H`h_ z{jWFw7UFZCSIM!!#VG>*wk)$6rY=|=+$^Au> zj0l5s%MJpaKV`&NXLpS_$uKMFGR}p*j!5etZsf8>xlG*O*WcG+0P52yU7}YA%CQad zbN)|9Q1gKk?W$$e*95XSF_Ir1$DIizQDzcOObPO%1+Ididnq(++gGhM&3Uq|`#ZpA z3Nk3gn@_R2{h6LJnd;)$hEDEsr%OOvZzPsyRLqgnhl4b8UEEE&uy1nW@agrusQ~RO+=y}$@d>;bOsSi<5%TD zu7~5o9A@%SN}H2&-+vZ^)OB^5s2-d8IVF;?boNtY?eJt||3UequO>NpnRs0aS6j%3 zwmu~Y=8gsBn&gvk$oVZr9Ten2-PdK}Uc*VT#-%yjgpK*;u+0Vs3Y+y|f-!cy= zDYcf+I@eBrzd2iE_~t@J8RK~Xuuxe2A@S9ijy@G(XUI(>bm?p40Sybsz(Lh~d0b7@ zPn6=8vwBOto3NLDUkdf~ghXg_F&P{!T{x^71%|B+lu^a9-)HTcA2WR~nA^(T`2-Ds zfvr%rL~^zv=CkkxIWCvm(csj!Us{uJ5l21Fba^pQu}pHMth<`G>b>kuBKd*|_7|Vk zEYoTcCL!7cX7>_SJycDOO3Vt77`A zy^=nM<2)OLzgjOQm|d6pzu6PRusRVX_`>#nAGQ6cgXFO6YU&rAO!{ ztMEZ*q=xGb4x|aq?9BODv(@kH3PEATGkCmuW)!qvQ>Ss}pEWEH>b2WJolDmD-eU6!%qooPTL9M}~c96Z*0;#Nmt`eHNwx|i<65$|o=)Cq}eL_OsWhEGAz%wxf6cb_k-uL?0r|`K4sjS``MY zVFCU($N7ojTuZB$GP;~>nhhB*<9Xo~Z(E)WaA|nOX8A8-5wQO*PQDcWWxQx`hC&P< zC7u*|#0-)=yV0%OMQPJFIV-Ded8;d_YV}nBJU6&{DLpLFv%v0lURy^wCia?c(>_Zb zdB(UhC`~D+si80H4CkN{yK;*{z*wjV`Ar(7VUB-pkZ}uZ<-pi3?w6k+;JTYCfTdE0 zU;g*hVR%$3;NcH?t68pvC{DgS=JkVGs*B=0ChkWH`LGKew_8ywm@Ka9F2kjD4CO*91Piep=*?=V&g#2f8UWO72-Zua+W0o7wVTh8;fMiFhkZ zb(Ji64=;Q>ew5YBfB!i=BvW}^$o957o#1|(%>xZ6xv1KsN@1zHCrIfzPotW?Lzp*+ z7E6pJx7N7S{T8|1A!#`Jx16@bx)Y|A7?0S8qnP0`aI^&MoeiZ{)o}fDy8e#w4bXjt z*V3V%*I@=m*ZKgLP`l?ZeAfrk!F<^evZDIcj#+1b=tWTiAyK7R|Y_tpt5eIDzUj|2o0icHXSIG<;88BV#OR_-eP z3?Ww*pGfJ#4)h5^&vLn>yV9MpSP?-?_(Cq8-JyazhqwYdNme=rjgnOVd%c+Q9NSGH zA0ag9|1`_y3G+(Dj3hN`$KkgS9Oo^tj!dbd;DlP1q}c#>F{y0N&QOO=b4@3gmwKT4 zq>N!1M1~}tYH+3KAhxDaVGQ?4cs|tUBx(7a=eXX9vzX(wjuR_%><~evIya940$ep$ zNd!ku-n!lSRtK_p7_bwkXH_?ek(TGMFuh5d-CKDZkqjDu+<(O{Rm=ooLxqNYwq(pr2_TH8EcW-Eg0U7oaVP2pOjYR(2;yRyZO zp1Yx3juPvTI5G;Wu}~J;PK=(p5jmMV@&1=%Yigkw9w~6Fd>V3&OmudskA@|Hy)0OQ z`m~N}dlHLIUHvM2pSb@V<+m}p5MR2o8GbY_wgPjJU`>% z=11lXgoov&0YJ1_%ZDhA{?>y%e`TK9%SL8H*P)pDISeOR-1RPQw z74v$LJ$4m{*)<@Kd{T2L6BD^P6~=0+IJy=^SO5k)T{FGSTGbVfjxf*_^l6J+?Zm1G zR$#LD0)Oxb>A|LNq7v`9nSErS-QLrB3bI3o7PAgM>E{@B)$z0Y3w9QkfK-KSsxUdE zt$*Z6A4_pDm5OA`T^+cduOx$P=iyaYg=b7o8Ydz>vl^%Y}F$9 za-=Q+TWAuyNRMbL!xlZ-(=w;Nu;L&6_m=>&+X*j`8|O!OeRDlB>(uvoo5y%OFC-&WR#Tth z0@H)AJvL4jvgWa4zA{?wr?JWhMg8qN%Vu#Hwf!rs_TTPJH_5^w0<}^WmnRL&-w&~L zyLXzz2rc=}&*KcMy#cAHA$?Hb;TP!2$5Wm+a%NHHRKcPagA`-?r88-R80+UXbRabG zioDPDRFim2JKM*ywUKNhpeq!N1)rhy zhS97ME1c(UnPiDoAt3K3bXqnlP~bVIrwZM&88GlYpB5*!_;%;0(PK@#1#os|(7VL+ z+*fv?z$OzAf|uMe2z;tj*r#S;o&7Q$ncQ3eZhQECLmG(#@rn-;U52*AH%Cc;-Ph8-?qzDF_c&QU z&^QN9zqJ%?ThERzNOR{v+>}?HpEJ}i(cX!>*wYV?;@69*>9-z}{IT+(cU1Wh>Kcpe z${xg|&fhkieM~O{qweJ!gyt!qrn|N)DBxRuh?j`YSgyc{f?&SwOvZr;zcry`63!3k z&rS0R{I4R{Nr9+_MzPmH&P81wTAxlU>OjUl!NHOxRS-q^h8j$slrtd}|1yGGi2QX<1|v@Kv>zW@ovD)6?v@xEGXcFDXvJ0QH=jMV2-Pc- zP7QhP_oqX&41=O8WT>s2oYLR4EqUk@oi~YSlQ~|a1@l97agbv*r#L+wFggi|$!(=_TsA>flcDF8t_yR%;d%Gq%PE(P$^(w>1Vn zQx$#>o2T_>) z7muJ;A@a1PlODQHzKlN8tLR)rJ`l2 zFB5kOc(ByCs}nAS&>f+imLq4yT@?e|H?De~u4Xj|UvOX=pE;k{wuO=x4fHxT)60*! z7!~-WKTWKK*#b&;bSx!5k}qFA`XetMnMChZ;^SU35d=@>XYQMBnK7XHJP_J94Htuv zaH0ZlJ3<=&)8D;H8S zNLC4hnlKgM+cGaL9lNyg1|Gqb`$LP4%BEH!5|-*fwaw+ifuBC1E`5YvrO{VZI(2mB zR48)6F&*)>j+k-0{&$Xeimmqe7XL!}po42rPonsCX|U4137_{(E)wJM6OgdhUEA>p z1zgP$^U(lQs5>0ZH_C#MD(%e06*3pR-kCD1E|Zyef)a~QIfx6wkVrn+?|6j5BhC9# zMJl(k++>*iZ+s;pR!BzxjFqg+U)vF?CnnpsQg3`+pquZM8Utn@WsX+csSv4@p3Yw# zJQqssE`_hcXhnD}#oXcz%m?H)z539%5n+WF#8FyXWWB3&xL-vKZFq*9EwXDVEK`x9 zGAY^&r!{QF8_KHI-C%yt3nmZIcEWztC8b@(9I74}DG@PLugH4+e`(sC?m6CwY9_`0 zE$E0WD`dj@#|SS;q<0C@H?6x-nr40%MtzAw zwO(-JJj~n?F$0Lj*{U)8t{6y3@X+6%jm=C){!TygH;YyaktcSI<=3QlT4i$@he{6~ ziSX5-qK+QmEWys%vHN#9oZa1E38HYXqH4j6;2<4DIuua$!lhEiIF>L~lAm)xOFDd+~+3VjB?o~ZJftikiZ^%trR8k;&pL<#LmR?lf}7a5#k2zEZ?ZNv5F;oc`V zTiP+;*xqt+3IVvH-5|&8`ELJgQhc=rQ!#r3?T<1ci~gY0X72O$gEiC@JYN&9hL#om@ZvxOlID!7zHAoL$kwzjRra1U8L-o z>f?oK*|$FSq+`|0#Q-kBa7XC*V3_ySXfj)+~72_9unf`J%S$E#2@!@r7^{)nRL=mfF!~A^r_QQiaA@%9Nhw;TxN8!_aJ8GgPbH^+ z*OL`=EZxX9JjtfRf23LSGwIMTWL06t8oLg1KFNSx@g!aCp!KR!x9}X#G}5egIj->< zBOGCWwwTlTvjEeXEs1}O+vz+8@Ff}!RJ`v=YSFnFO8@%+Rk7UcvV2bh@K`~d9G4NX zDSzS%z>$(#I8T@8K(3b28WDnIY%?~M6hM^mFgX3w|EOqJ=ag|%48*7k*KI?6Y7H*e zu%4!5M-jk}L(&F6p}NJwDSrZh3BbfnMcvGdih=TI=&!SDR&wm(@vFH<&unjTwqlIcOI=%hzQ zc*xrd2`ynl#8q=T&+#nqvo-ngDYSaExl*mzP-um&F#Kb@s)x;lDe)6-yh=HvF>;D7 z@kGpO>X^;rNsSvQ;voT1NEvK4mV9#eyb*y94|0f2B2Zr|GEo%_*U?;wV@i&gCHX~6 zAj0k1=^qT}t`G|_8o8#M*jh}-3+2NDUJ9n3^HrsY*N+CRt0_N;eXw)s+NyiGF_i*>qPTVT|FAIYWkSKS=a&ozh~Ka4x$;V(}Sl z8?~}fe8*|I3oGcze3HFBv6QV2P0?>=Dj1-mFtq>rPnhd55>%HZ4HeA4eo z!55SdeB zU{@RrlV%SSm)QDLf|ovzmB)d~q!)vvlCL0ttFpHDe8@8jMro*7NPbV2eFV*!+az_H zz8-^If*m@3n>P^a3Tr_}$P3}j$KIz}i}D1Az|D})VGs%qF9a4#=eE6fa8hk_9| zg{2GG_L3(X?}D=E(|eaQy>`GklHkQsYvUzu=hZGRX_Uu&p6xin-RGJesdgqd_CNLQ29ev;GVt%4;7;rNJu@xfZz`hdrzlWMa|(33Xz z+?RWRQ~d%QE(-0YSjE0v1>t9|ff)2mJd3S4GhuYlFM2B(bz;hnF?z(;>$^ z*V-zioz6T%fK9*L+Xa8{gW?SCnCtOd*U11!aau*^Bgel2cwqC3k0lnY`4-R8 zt5aD$6!NWV7Pi@z`YhI}?3313V%_%j+znlYJ2?G~NtcDchI$y?en5phQnhst5 zxK~5-6#DpuM+F_MDMm*Y#Leyu(sL9+x`jsebLMZ>VRsPG`i?CQvDWY}fp?IR9xANr zk)Os$9dbSIdCbkadj@n(pr07w>eTvS(JtrtCaS^04s*@<5k07JZ?s*WzIY=RSG^aR ztZyvwIr3q}$+OiCZS-*@s;2}8sn-H${qi6*D%9W<^s~oO!rM9{d0=1*`bgFyCI;4+ zUT&trHsVMYIx_QBas?GE&Zl1!@gD8>0}mtC=G{Jb`{CX1JMGWfnXCT{vOp6X`SMS1 zi_NX^6@>tS9C)T?fxNt01z?n=2(9`a^MftI{Ak~31XxUoOJ0D+mi-2nhWX=g-yrm4 z?d$fb2=(C>M?o`^uWR?O1P8a54SGdhyoL9?Bj zblc6!`o=;`p4-LxtQ9e+wB^>@EtE|f!H2LT^_I$Ia&v2*;rv62$ZW&rw5h}J1G3|* zgL2w56MH{XC?u_~!8jrSz9`gt*z;zBeDL!wgMN-6Hfco(86!}{B(dE&25&EcsN!ci za<4G_Q|e_@ za#}>+?6k2ICe9JsNfS;GwXIS%!jrQfyIm(6_pM`#sO^!YwEC1XcfU{ zO;Aj0Wq<8U<3W4#e&#^E@K&0}W|~fYYY;avd0vV%A8EUa#dXNsC6Un|(c|l&h`@IH zQI+&hi3Sd}j+xF&G}a}~hPm#~-bP^$N!HiIZ|t1tJ6SdCRCWF2q$o|RRi{iJ&CPjq z2@3i;59JV(3I|#01%Cu@1KTWMO5o$CfIPm0H_xU_C*zr~w8Pph=bgQS$M~m4CQ>8Q z)rb`m`I?{ajX%RBl`nrnM@=E=i!{{cV#@R0UT z&V5amkLA9&Q@cj69n@&BPi3c2!Tmv_N?OTRE9jq9>_FCm`0|i7Cd8P8g3AG&+}^)= zR~va@)$3hH8lQ7kv4GR2NQJ6mR8V|~xEhi~xC<#QI80(&VRjL&eL$PG^}()UeQK7D zQ`MfWqH_n!xz}A#t`ZLdSLlR*K|Y62gU|IMbvAqbyRCI`u6!#?s=LUo)lS*!XV6HX z-iP&vNoVpa4Fle}jE|@ooUfl}Ey~~>+zZ2M)d2x7^ypwR5!pr%lu9tItoR9`N(G#E_>E7P z$q&APQpOO~?FXc|=rNPB99a723-ZeJivn$J3*coYy15j{^y6f(GKIn8{*_sFDHVOE zF0b#r=$NBWzGz)Lb-3n%KUUuk%F%QReFY zRqrQYmf$6@dFxA6hP|4Nw2amxn`D2~E6(!@0h>U?isT{y6YgPk%Z4HgsJ4z2$~@DV z3Nfy3aHmj_Q$caevp6mX=jRF9lpHvbVC3sI=sy*sK&Ta)BU)DAO5egt_wMX#5M~8} ze{2BWKDC7Ukgic%_;Fq}tvTsgF9|J$VrkZd3#rSM9XH&WbQ1ed-11tgAf@QK$*K~k ztz~?>-?N__29`q7WHrh0_-HNVc;RD~sb9`E6w7`jei~{`zj|_`Any4B{Cp595B#$F zvz?|nXVPp2O#A)QMs8k&)<2@DI~z@)XqA}L;QHKHJqV7YkCD0~rs^6gNo`E;)f|*s zg_Y@5eqR~6Avw$H)wp){mYWg7{C*Ub!g(}=>;v8FHz6Na zkrFBHJX8@O8Sw=?9e$Bw684ohcX3!)dmJUs)Wb)ZV2v@xa5dsc&S+>~#mLKLs#mC2+#GSe%0nR-tvO2LKGg#6T`?DXs# zSNR+tAOf%51DPYbXNUykm#^Ni%(_cEC@VAp|I=p@L+^#W zB_`qv`q}&0&G$eDBGYmO9r2QVBgt;ur4(S5c@+inpYPBei7;|lGHMQ16U0FDK3c6MpvH$ zx;J{8l++31rmT(SpvZYUewfPWM{BKDn2Huis@m@Emyvh7OWd-HaL%ju=_n-|tT4*@ zo@@*{S)q)`dD{VUcwfnE_YE?nq1Qizh|sVS|C=np_W$I3{!i?Mlk5Ld0?b70>@5E) zEWp9e%J~0VY>aINS4GptWS5mD#a9N6aQP1s*rtMG0fl1<9!wCCX_KVV3u09!AtEBD zgpzcZ2~Scc^y<0wz5V>P@!id8F+aIFUH`l~&9NY4H|G=47h~K)D1rzl_5^9= zs5B1(0Rjd#;s*vM_F-oyrCb1iaL3D^1`BcsA23q>9aa<<3>e$YfI;$DUL{>cyk zIs}cBl7e>no{OO95=vA+Lf`_~-|SPv-70SPVW>0JXF0b7D>K`ttPcXBcs z@$6y-Qi5~L@d2p2KvE(AcnCk}E+hcBj||i$pf*8Z;q7i6NIXvQwGT+7oo@j)A^^x9 zlwJ`D?8Gno02ovRDPVIOI4nyWNRI>RilK4E0F0nNe-Iez0s5zWEC0$Mw4i=>To4KB z<^m$*UC;;t$Qu9)R!~(o9R4K!02IJ@$3QYSjPI9?_yi`b7vRmx7<0^~ZQ< z&L3GQioM+ym%X-?P$149$IjFM0MOZs%-!$$ZJ)}|ukBCO#P9O??`C3Fc=Y7ooI{3z zUr5MKVLhK8M*Et}z;1j{ySHn`{-1Pfn0Fhu7Ggs{_~ma=mgL?wC*=U1?vHbVqB|Ra z1FRZSEC}z{Y0ALy(4grABvj~W?`F=R4mLnTQE<<{eiIm!Yqyu^rmgi0UdaCRbXTWx zK+xf?P6!nR9TW%|ssQolnr*jM6qxegW+a$Mw?8g+|14xM>0cF~wudj^7YDGPIv#Xn zpu0`t0HO%kA%Y4pSBE$7UvYcjwoOQo7ufpXA5_dQ)L-A0UwBE$9TF(cHP8?+k?n2W z0RCS=6d*r$M2*$m*)#v(-vy8d!L1dspO?*=xxUpt10<*+T#$i$obcchk=&h-)*LmO z1<(6h_z6EV9TvIMBQZH*Z9YGg!x9H|voKmBc{G&|{)DMofD;~#EQ?-YYxac$$qwJ@ z-`=~fW>0g*?v)q;F45)3QU{%e)@gp zvHTm z51q{H?nv>xrtTosPsAzJXPrn^`2{EO)s*htFMwe^L$-(q(zeBn7=}$kyP5zc1RiHc zS~N8Wd?5)aXbAe=`aXL^I&nit{t;zT)n$|M*S7yEV(urn-a7uS!_wcAuplbbrR#|{ z46E#0ARvyIOvEz0Qnss6%c4hjVq%KhV7p_hdyhUfc9+7gRU>Paz=TGhadO;qm3aO) ztVxX)>aqcT5g?2Tv6qp}E9OmJZjOJL1({95)nW#j=rf|Q^Xz$|HMqf#Z7)Ns-tE2F zSP>N=b@8=4@j;x$rpYt5ZPHOqP$uW0cgiapeMqL-nl?LT8olXdJ&w>AONu#9tzD;u z)`w2jNqCnLJj=zVuIt{3Bo95!C}>{2%1RmR8W-3cd+@~xw&~}CUupqUZrqD)N=)Bf=VV>iOzMkDl(ZCeZoae#oHxu4oe+EOv-@V=~ZCs%L{ zW8w2G@AJl>b<)SdMh7@BvD#R_pR)26*6Gt{HazAf@bDSk5xxACtM;jBFTUtZJ1Y)}Jy@Rosat909B7QAsjPcB0%nbw)%{BEzxbo6%?NnO2_85KLrU4s>Q{=>D>j2=$l6T_MS)A@c`M^pnd zj%t3rk3mHSbidWxU02`|`*UKN2)&S38icDk*CpX^OFOMp-4?OP)}buY?T@m(@NCwo zTMi>&j8@K3;M;1u7T3#V1G&Q}L%+OwBeRk@a~>u%v`?V8$XL>FLQ z6zQQ?iz5)538%>+)^l{%WL+yjPh&RFn6BqOiy^^irBf8ELOn(I?ps!Pgb@{#X*7=M zfes)H9E%|cQ=>=ejY<{B$PN$`l-;osCH<>+)++W|(WF#A{)LL?FNA%Hk_l?_wyA>? zMPZv$#~XQUd>IOJmYRC}%qiR0y>?$6?J@CCzbMBq9y#j?=AkHr6$FH`1<#9ZWi%pz zi)yRySH1CGS)^k1657q&V;$1QQ(HOMf6pS_{Ft{^60#3Tiw55gadAQqFbs545k~g> zcL04TRQbIK*fNuk>O{F!%fC+ER?7FaoI%9h4tmu*$VQvoh|ky*t>W5hv%E6}g^K zMF=%>kpE9);IO;G>#BD52)8O2d0hpY1M+b{IIv`%sB)@{cx-PE(%v3eAxf^97-O^? zIU97S5zDF*SWi;6R5IarKF6}QY)+?DeoIXbN^4rp{nT=&i!?mEF-o{1x6w~!>s`7% z&m=km%~n0W`CO;b%37gLKIbzly?Tc(oP~H*P+cO0yLCC=C4_boDW}1QwHriE?s#`o z#}9DsFKS#rVN9cwY>)&e96mTrdsD*hCgZTlTgC};oTW|fdYGJ3cO4#=T>AvP;w!%_ znVFYt;5YFMc@pE94g-(z0Pi&D;Yt*s+$ih7&o!wjz@*9E5X+vphNpVSLSUlcSh1hR z{`*6+lCYMZ$3yEu{CC~F*}%VFSr!Esaa9OB%-7UA-56h@*y(;m-Ahm2xvSKzr@&&m zFp!HyVI8THX+0C6jqt*?AkMAuJ|PJ9^rM6si*V{#0Z$XyxxRwCq``eHpFw;*or>11 z`z}Y9KEAiLz0!$fPRYyUyE)<@VD^;mX-&2L5kvtE9sBd19~P&Bb^9dbz0_5!+m4<^ z^$yAPy=;V}8OLq)7E;?25J?)iailkx=wW^zv9`5i5V|S(h!gJqKeLA25SRR%5QZU2jVT>E;sc-tD(PsKgHAz zQr@0bWR(OlM7BsnhLur-HTtgM zsln*VXCungCB=j017a{5A_-Ex+kmiLpaAF3`x|(Jj}#}oX)*Gty0=MT66gjO|KC9E znR>mi%e`Q?9xGw{m$)y>u4Id7VKzz;`XfRn{F=xxJFp$jjOC%^(>pKZ%_b$2tt*YHIsi*inK~;-r$D z4SIp+k(tbrh+4>*%{Jrg`2ft!_A2WcKg=rY0)arbg|J+C8OJkO>X80K)VcyEEYLee z@gsirMyzd7x)h+2QHQ4&+WT3nAAQpmjS5^Kl3(!2oD3L$eU*umv%Q zcI&xJ=86md1&u_QamkhNouGUVvZl6;V;}oOfM1fIK z&ivlTF6Tu`+uCkd`FC#bGV6$}hXv=&_pL+2Te|O=1AzI?dELm_Q=osT22aDO2g$4V zTcOvh%Ue>QJ_OOC{{^|yC4{;ktlY0NI zXvOvPpm*WU28lukJu)u#`fKsf_odNU{*8zpTWRUiR(6Sr40ayLFI~Nbh$98%E2Dd}>Pduk1g*Ju9zB5})|pmRa*$-VC-kqK zjf%+g28duVY2j#ueG8YzQEJ{ew(PHJk{ok-%7+95=aM3=L&^%R{Eh%`nm4&l)ol0C ze)=dsOwj2!?vQMN9fCy4eYSVHNCHQ5tyveZ_1E6AOW2tb1P1)Q_XA-L6pP%Kh`e&> z-KyT5>!t1$=h758#FCUh{nAyG>Z2>F5L~3={ElY&*iyGtQN4K(tibon*G&CRJ4}p<=s*a13N-5 z+r9Asz-G#5ZSe{hE(i8JUfu(Gwg2%qr;LK5v%{4+ae>b_?WYn3YAYo3_}xin&G4d_ zE2xfmF&gAPek?*_X*KkV(HJQx$>zek;VJFSNTcUQsV9muMPKQkd#yL)rm(o3TnbaA z$Pz^Su$zb&up4`NjDi_0<36LaJ%WQNzODScqfOqgz*x^_&Y=>7Hr{*U*J{s^@=UX} zf!sA3DdBTgOWkttlelVs``g5=hYA2qznVbZNM6ID-nXLnoPC7zWWTOug-q>&1QZ^5 z^z+L(kzLdMc8W+f@7T@D(edja)<_3%dTjdlk7d`B!w}J4{CqqczEKbB-21{tBcX>` zg&^+NRpgT+1n<9un>FBC+Nvp(YJOGOJCL*xCytBP9HpVzuU;yxrVxi-h{zd*Tdp z_s!TwQHOdgTxy|ASkUVsNacuApogcG_hGoAsR;w3PfapJ|D<@>L^8XhN2oZeAKYbJ zysW)w8hu)|gwLZaLG8^uGGTuq_#kk=G~Oe#q2-A3SS>+DkSxmH&pEKGIOfIHG?Y>EOn5!T!hU&@#9SLNaS58KiC$ zx(!?`?(%J#z6fVKaG4c^)>|{mZ1ecJXB25Dx-^U|-9wfqsYm9{m;ZOx>W%=`l3#zU=N3isV;W-j+1%SJhWQ&m7M3X0|@3pcca}y`CCoi1bH_ndXhQ{}C{HK=o zje@e-PW-WBMo0Jn9if(seA=um6Qo8sO|AdK*f|7?0xL-L z+O}=JYum=Vwr$(CZQHhO+qRAWHd9km^%t{CHmR&qNxJ)V_KICY^~#e7EjjzQJ6Ekw zeuepDT*L+TuD-&nl?rbm7gYcW7*M|B{4t9n`s?XedoEEME46eYa3hm!UEm3I^z{Z~ zb(OLd_qP2P=W%n^zAeLQjCyY0_S4HXmu1-Q8Qm4elCf zy^%^I&ww`!D(q#_mHZ1=vx(cAHEN&+$t>*o67Kf9GzZ7Gp|FG96o8NFaJPap*_h}O z79X@9B6Zp`7wAp^5L29DCgs*mr$uz1zK9su2t-W1S2C*J_)OQG<)zC|`N^W->Q9p) zZ-b7e?Z)j^;t}C&aE_X|K1cOZ*M@M0#bT2RA64>AeA$J2ahk+g&*R>BG3uAZc&8gO zxs^8nc9Y`Tzx(|)@73EV11y(FsZ|vfwJLc}c{rrLWz9)JPR|gl%-sA0YRT4_0YQ?T zbkN8Q>MgK&KJ^ok-L>)Wr#>&wCR+&*@Zgmo-vH>!sOdZy4}F)`TNNP#9es!(kSpfQ zDMm*+_4ML`7t@G3uQrderX4GW+x#EawvoU03oGv7v}D?ggK%z;jUB9MF?V@@i!+AE zu;PTK$JmvwQ@WTVzg=U8Y%GrX3)|8JHBC4C$?T*Dt*dLL#ljoH?eCW8AY6CVb*f3E3 zRwkMXxqz88iwpEmSMZ-FTL@wf73~|+9H6V!-kXm4bGI~yV<>1Bo$n_Et8l^MOOCHH z-9#H4u?~-UfWbZZnC>bm5Zl)!I_H18^hHr^mtCWbY~bGgEwK5>2kuL>>-l$I6y`MJX4Y>VouQLHdEUrQ%*Mn+i(+ zRI6G6V-;uJ;v0!Df0DG=5ffJlmick=uTfy$mu1@&U!0%W9|+siyRLZwg8sX=0(g)_qTC-J>ueTnlT!Z@-U#+ z03x+Yx9JJvf0|!w;OpU;)ojkjvADTrjXz~t+U4Lj(~BSwqgI!zQ338wvWM99U~kdV z7Yt;V!oaI{;m+34;HPW5gv=eVj;y_jK%Ign&wH#%Zb8akW`t~#eG!|AjBGN~w!AH+ zU4U{Jw+F2(Ta5%Wk4uZ@KC}1zj2gKlUfoyM&{+tTt6Xr>th~WNwXgD!{Cf)+W2a4` z`7{`GBHJa46|3KbeBjPoJ~>#pG?rc}7jA>cud=d*xm~FQ?YR=N9)qs*p18) zl|%C|A}6n0E3A5SJk_%nn!dnf4J))l$v0_9NXBNv(KgGYCq;<`mHZNw+j!}|#mil= zj_(P}1x37qje&No&mw~9NIfZ^f?<~|1^mVPhgHh*6`{Pxl)S<<{djmVbvdK&V$Q3S zhUUdXT*jip8~&01NOB@jYy3zFP`tJ?g-Ge>6KP~iK<{$pO7931MmZO?S2ib+v|;mU z+W{Y!%8FK+MU^7b@@^C*OjJUL>N17HGcNm(nK zNwcg?e$|=}NBotopUQ_^D&pjdk`0Aebwv&oIlkE7ncBV!ial2j@!iI5RJ6sy=$Z*n z=wIG(A7yaxF^wjeQ3UBhhG;wa5S+E4wxNnoQ+%^x^h1WCy%v7W7F=`1#`6ig);kKQ zWs>!ZE8O7WhD7dOOL&O6!CaZ@lz}rra5-WvN%J4qS6c=s$5aHauve%0`t5Cwsj1wM zw2_X0GGN-Tuj5JR@)eS4M(muj-gI1mOmDGvb~J!e>!i_y>5%DMqE9_#GHFt|+Zpnx zZ*c?WYi>}q!WE=2!B(itZR0VO(aD_KO*1c>a$7CM-A2s$?8bezr70k}6983q%SOxp z)U(fG&4ybYcm3a|qXHeV%YjU(eaapbuS<;I<&}=DSolASc)UDoYGMpj*VylkRK;0YvR0;s{QIJNjg3o;*6a_ zBGowv-N>J!L63WD{~i5zoEMzm=spp$8n{K$PLbv3Bk13RF@0O(sEYHEB>o%z&(&%s zA{-7?B8(6c%qi3TTOY9UP;lN$Mq|@vWnXIfRd$QG8REyEE9Pet8PC(BMmUj?bE98P z^@WY6M3V|*?f_8@4@NP@L?Gl|Tb5WB>C;NM4%U-^8xBE0c)W!-6l#3BtUw9$?w59D zA!}v6DytNfzkgtu^KIGGvdkSztXH+?=Jq*E)E%as!8(^RkKbJms&9)G4R-kGhB)vW{ zh`(umX)$pPZ-aLI`o+lrb5J!dz+@P8>=;RBvN02DIoT|}K%lw|LbxMR0hRfXzTE^! zN>y_dbCKv1xD;h3$imiB$%49?qK`MTs_I55qW`UyoQe0)2%=R9U+r%kgH z#I|N{Yj$#DPbF5LDISrnn;S@a5wBr80C_7$cYA&uvxCs&DuF#E403CnsIg(J3w0W% z{Y>1Y)Af?Q?8czj$5Ko;Y-Wg5v}xzdLW9>;HWB`Yr(>g(X4Bl@n-o`N-hO}g3$R1F z;P&72WVZjNCv*H^{9iqpfr*8I{ePzahg8qZ#=!XhCDlKH%4cn^fJPjk;RHw>-rT_T zYX@ioNB)Y(5a-rqGg0IjL3zyBbg?M=bQG1~*D1EZt^I>5c~qmxiU zf{V$S#ucN!U1$Az0hvvo0a$r?aYy-<01sXH%@8mld-@eXu0ff-x)h{`)ADck!+?Nz z{>VXU2vGl7i_Z-n-rmk$ADRhX?_XC;i$UCj1JVRs@}uMbhok4%gP#B(MdsSuT}q2D z1He)tNbSYBfJ4)BMK&eao7RJ71m5WEDsJ1z1kMJuEd^dOMg^FN;s5LnK>dNz1Nq{? z0l;S3^qu_9{uU#Q`_3IFGckd4i6MK*A@G#)106$ykWW$31M~F51SESo@de3P>tOFX z&TLEfvyzeC1^Tht1|gs=f%@Za_{PSE&SV-#ycWC;YV-*%mclpD6|Fw8qczY!17`;B zBJfejClSXqcV&H(L*x4oa?~Bz+5PGp2RgR1bpVfbQWHay@j^hZnpbtS%V0}ZgCTI;&;9Ez zeIQ$KI@j8!#}D$)DQY9d1XaX&1jCoid#|j$Jvh7{8!|YyUwC#TYTwAj4D6o!=iT=% zCly&<@2B23x$4T!z&`C^tBh;ZY|B)ku~nn740s`^w7xo)Wmx#?hz5}13*TlU!RGJ@AwIQ=LtTv zDRcvP1$&#{kDI|xgPQtp?5*|Wl+{iSVk(>U&oQIDyC%n7F8Vt-SP$*;?+c-^?TOyc z=u5q$J+O3EwhVXt4&5)TJz&@R&dL(b*?nmTAkEYZ++Hh6AGWEkUvMDP0zUfmG_YR$ z+u#%Vfj)Sh*Bkx8F=*Y{7y5lYkebtPy^&KigHH^3IT?i>BIHRSpibeGfqD|An@>M(N#_ebPB)+Or)bXUXv zhu&*b4$YK0)VF_JzlZP(<+!1CaMp4G}$pyuC7LN33n(Ou7%_w;Wm zx9{*?TsxP1ug`oLjE>GOKFHg-vHh8QKY_nqJO2Fed6)|i0fFiKVa`J0r?s36g03aq zkOFIwO}d;{(#Rqq5PN{-n1%Qf7whWO52y>1yK6L zt{Bbn+UI18Z+1EwXh3m@c&it!S?n^;>qhWaY4K!kTC~0HWTSkHDip!myYyax6%#yZ zKUU_>?yyGK4r=H99c%%B3@sH_FIr4FrKdGN`a!F`@BTU8o>tr4 zZXK^HhVnRqLD4RC4t=l;FiGKqlz;Ilza;1061ljt=#tlFS?imnR+57u>m8zOQLfSdlEVr^}aDn zc|ZrMJEqE6Dsq+jeo@rSDzq_%wN20&v^JzqKF1`Ub zs7tB~5;5r#>NkNgA!Ew$t%qZ&Zz7np$dfW)ZgM8;*GIm51=N-*xX7I@sP+f?Tn$KzC!~(VUd=J%pkI&S0guhd^2bGnNtrF~giuL_y|#_VL5+Edlc_ zyaWOQ6;Uk!S2JlCEtMBxEmG$%jXR2RLYvtVm%CC51sDfmKQS}QDDJ&V3w|gG(Y}5P zfx2mt`W-VB13g%JEb_c_PT`@Bp3O*OuQ4-<7V(c~% zJiRiU3JK)j0B|FQ@W{WVbc8IcsqVf#gCxdgSI>+Q#aPD1^;p})7oHl8D%gW$v7&N- zA9XC1l#Z4Q7X7o8#kAgO`n|k#NhCGi+Vylrks;}$K3FyNbowM?kVo6wmt0t0jFd&F z&$L!`Qh{JevQsXOviKQZV@2K5#V?fQz^Q`Wsl;R5zHne^b~*4$Zk(xWHC%kI$G;mm8Bfd48P!sU0<1imR= zXo1DpCFN!4%aEO7#XDbqoq(Bd<4(8_MYrC(o$yNc{+RD?xT0I0Qi)qOW0uU*#?s@h zK<0T2OXQuwA9ZqO(d{X7^JGO}oNQt}9K+Z*uq5%So$I;3%e#Y28^)=-(Ln<1Xdpyw zG07Qn%xJbiTQ=TNql8=Fg4wPysWg7hraBLQIBmoi-$#72jr+XA3F#|v%Eg9J)4pVP zEF+_Ui0n;K222^VWTcUvh?Shx6%#wOhY-x-aynp?$4d;O@BDF;oq5njF`WR=7p#gxx_ zX0^5IIP$275Xx3Lpx)d~odb_MusDV_GiQclb{p+T@eFHeGk@K!sQjwr_50o zw%T?2Rv`}f=k?>wI6pYn>ob+ufqj_1+**!>Ua@(YH%DemF38n5r#@;ti`~;ZtUqug z3;i$k*UB-K1YXBKC0RI80{&oA_rl}a1A$a)35>v-%{g}9cG^zrk(dF3jhSn)CI?N} zUh>c4vVx%RD@2dohMWN}6Ipd)^!2K= z=LWopZD^s)0_4h~Gu6ww!X7=Qq($Y7P4PV)9HT0^N8C3 zlc0*(lC0`rhgIi+Xw5j@>rI;Jp#RW~4e!cjrDuYht>4^u$7&h8poL!2+4Q|Lm|kty;5X*hgQ z#Ez=+4{E?9T?#H5iZ#PmzDLblL)=f6hL0?%`JhjEtdyD~9TNeb*Wh-W`!o?!;9A0? zUpofW7Q@SliF0Yc+lWwR&0GKb1@GW?B9{csqUk$P%@*uLdJ|3~K-1-LyK2v8ePL_V5;pT#1IB20ra@SG0Ga+1HVl)QG70`u{`HpMS+E`wYCi5v3^{s z+g$L5*j~43p4RSdK~)*imJEj8h=}-F3~@aDn+p2F*et#&0OO$9;`7P(-$6cK7FZdzc=+h^h9i)1l-oXz4)ueRpDtNUTcn{^?n%ciOi?vqw0Wks=mLGT+jOZU znu8SIF2b-0CGwZHpLDaF5x!tp?iz&l4npatGAm*gJ3??i~ z=t_a%kNc^wb^MnYHa$%SN&s>5-{%FgszZl}P?5KGU$68U!RsDRC>Suv%2-8~xEd#G zhexk3xJ-1UnnJ`tFT!xoEUBEA097#dI*&W zH9WmDOZuek<^EnV+ShASE0RaUO-F|hI^AOV-$x7k?S(#$2%Alj-VVO8sMzWW*!nv2 zBZ#Vqjt0&^-#euP)Zh6`YO(4qRV?4UO0|hZisiXwUOhDE=!k?AgYz@~7I||=oEWRN zqq{@KOQI6?9wq3J_ZfC9vXxoDI#s3gH(+FZbRM}EHA4b89{t1dU=NE`{l+Ofu$y}aSaRqcY7pI7asF8N@Q$!K{JYcHc!kvEiQz_&fyeGK3guaTy z3cJrDkjJm~WKodw@0o2RlBa`_qaW9|=H{rDtLPb}LaK{-)fwa(r~PY>TEnMn$KM4j ziWAwJ7?91=yOXHObP#l44NK>R4lt#)VE*_UX}S#|CX?qJ0a%?hWGc~+avp3$kdMSj zMYCmJs&yyvM(DRbz6_W~g~$R_i4QwG8XJC%ZTZcl_fh|N3yIR*EthHP`aHV(twy*l z$J|%SPAVvl_A-CagjuV~30lg`F=U1!w3R>ko5-;8(dP{G=8p13@NraLaEswjqPS_G zNZqo3B}2HOO~|4%#EXPLC%HJn)-ik z@FWSl$_`ZewbK!1yIWb9A5#loq;10NiB;6BAZnR7BckcM7!nnStE=xG-JBTofvx~& z9!^0jlZ81}42mI-t3Ld1=i-z^5&H9YbrKMZpubpes#B3(Vd&?R?P^~El1b2FZ@5&T z$FJ7wlCr2wPMzf%@HW-$*i~lh9iCpq>qZSOz$jG+wVAMSM4gj0)r)~ga}V0K@c;ZJ z=@Lb&_!6l{%MYE>y)Ba7`>nqy^O3(3W{~Phs@4R+n>B0FUE} zuWeI8`-DV{DR;h<9XHX7dx!CdFmIhSzKhpN`OhY;CG?WZ)>Ab<+ZCdvZWgRp*VMQf zO}{F#tPr;&U~ZgK`t!`e!fLCA+e~sKI(je+VHoc9J(*qDdAx5+lk81BRG)6kS4)CM z6O;3)f*#V0W$Z<)y+#+_#PnZtYz(bLCrJ2L{z(vncl{v!o2a2y4(6`GrcrN;QJUh` zbL6x)a}X(>p~CHo^h%+_bh|F(eB{?)CQH$~P)V=D&ez*&?lGN`%0lihVV}gxF+FNi zA@O%N57IHY@KbK@5Cxt>>FB?@8xM3G-FSP@g(GEa5j3bLDb~}SrJg*B^*&nD9PNI8 zBT;M%p^-|}h&0kaA};C>2#wx66WKo}f?$tj}7m1B_6Oa=oY~Vg&(X7Wao~*!^WZ?|XPTgDn)hhjWB+FD=-#nHZ&Y~r` zL}Y+E-1!pNdA-+ zVfD5k)uZ4EAG+k{v(nsLf1?&C4c~(g6cm`1$?#1Sn!G0O6>Ij-LYvKwI@(EV&6KYS za&R-TAVUgmP@!&Q!LWtV-C89RH30-{4b2O9mQZ;`3hpPmU;s(X$z)xzNL}w`dyb2U zf;ZcrLAjnB64clVGv`)7=kGQ2L@ilMx6Q<#^of;S&LFmw3U|5v)Lsr9NL%ebmMtn{cYO#+lvF`Dsrj5{owD@~f}< z0uY}`Efc@~a}6r^%p*c?`VD1(5XBfxht1xcVJm?TT}rmIV6BeQ9nrarEKyLT!8FLa z!O-3!mfBzy7pmb?E>hP9xa59#Opsvso!S%GG~U@LyiTzmf0rVIp2_L5jlL;~KoGr@ z2tY5tM_~sLJCnH6Qq+FUzfO(&6Brgoubf=nbt-F2ULE5lbCoTT8kb@hkpCO>kIg*C=HbcYPx7R-35iS1hR>yx4hyv)Kp_Aby46Crrb4a zmHzh-mPnso;|xZ!*D`HgI5&?yduq1}$C@^XU6b~;EeT$_DW-T@^)&wEF*<8b zz+f4+-?*p14(@qFqKhq<4z7N~wct!ut6ndnbPKJg^G86`9}wHGvE5|YT}tB{IEO6T zz!^+_!6P560XW*3wali}rbmz+Z=}XDepe4B6fnGP)RqMYG}}d4Zajf#!hLoFMJhVq zBWslnq0cPc!8wyJRUc})*CLqXH8<~NpKM=} zaIdkJYqayp2pA)jxel0>-ZtbOVqF~{O&-+u@jRzH)FH%z;QAM6`f~?g2e3~p7H9y< z>_!sjAO!g)g}w@z`V88fqzJJv_v8>SCt0c8x=Wls)3Dt#bCkut;n+$e6U}4KG>yy$ zhNt-40jIBgYvj2@t4sO$ahT>_4kA-=+Mj2AB%` z9eb4vcj{Z5x(Ib(T;fivn=POv2sgy$>$dltc1n5;d`~)8tA?Xv=ao@a-o{rC>7U;& zrCF^O6%7__)8K^!O}d7a{Ax@iPt{E#wuH|aRM_so^XV#!wb}DT-mYR?r(YO$X+Zr;KF_>pd9JkLZ>V%I1sK?Dvx5JSW{L@pd5AjRCs-Qsgs1Uobt? z58y3wFXGfKLuvcbGT1@?d6$=D2xYL$he!p9rMruo-J0|s4R-{^>Sy$|gybKaG8N$C z{^nYnm`;UHLDMAz5Nl89vgFKoMXZS!z0c3Px^KjNdK7kBMy%z3H2GHs$lCdCNg;}i zg3w?3esQ9pN0GAkMWHE*SYUeqqfjpzNk72BE4Om6JVy&z#!z00K=Fr}Q*nC1F_0O5tyqYihBPA2J>wkBmlGwcZBItUf*5Slx~mBt4C3w3+Dm6n!#_&P&| z8+%KJd?qFetqJSB(^QrQmmm#B`#EV^wmLavaq+c)Hcb?*Iv-UK9Y>N=kg4bv!OXTJ zYkpKG**xh(Ff9G}YV>ANSLD1j1pnrEPYQ0{%`I{}APc&Qz+DWbAEjayo?^F3zIc;$-59PrXH0 zFH;3HK3LSsMML+TZ4C3v;P6inXH=6AiC{xe5$qdDpLOO^z}<48HDfjZs4CIw8+)6H zGedaSw(_KUs)u~TRh~EV&0bFW*1|>yTgwSosBwA_uv4?e*z56Q_$WfIt_Q~x=9AkA z76<2BzLuv!XwJ#TnNZfd#2!N0j|FiBUd}XTD%C=uGGV@=kNoP=}IyG z5Xk#2R`G-+SW(`?D11s6+!0|+lqhTHE#;SGU-e{Cr@j5?PI8eCJkx*>?e0So&%+yI zN&CTbArLCob|^k+pvHLe!RIq6tGrXPe!Z^xH*6`(45W`;{_TGEr&_gKSh{bV9e&*3 zUOYfqq;Mpk9v^{OJ#8RERW!8?ezpNZ^HrZiiuFvxDx<7&VYAdtl^(r1oL1Bu0>R`- zu92fDZsVYV;%P{$CcbIjKg6BuBvHlVYIP2~6-+@u+{3nByj=r^wFQfnxm?;lkWV4C zcFC%TwjY%9jpfLn1ONqsV*aWF7+K+dfbsy=S{ILU=Pi{W1>JgTmUtcwxtpaGI`e7! z2a*b=C%vu9C5FyVKoN7*_EzZTQw`;-&8W=rP=hEH>Mr^pB9Ls~QMkaCa}-;IcVl#$ zi3;H$5eId5C%n}h?ogplTUM^Nxgb#df%0vGJfM>t^wsA7V2$IWjv+zk6Ab^@I=gCI z)E|-Fan^`PlgX(Uej{+DVVX=;rF`CB&)4r$i9Y6K)>@+8LynFC8nYQ%5Y8^c_mx9s z4czPF7I67gIK)syyyiF;_RQX-`js1&qQc8MNmjHG<9`JE)lnLczYu>27#~I&hDx}( zdlBe~Bd#hswJoe%Z&xf*3g{x#ORuCQt`l>^X zib*bLbGXhu0)O{k9k8#hg2N~)AeG2)ipe{xV}~UF<&iXRMcg=2ZvOEKm#1TYf z(}KrD$(MEy-7=-oXfu`7awC${hB$%t_}>5B(f%m!@+!5*3mDBkK){0sS)rMSE)Srk zGVS1@Ab@7q;uiMB4h;Zuon`{o7&J!oUIlY|VaPo`-R@pWaQMasJ#tVQIsDPla+!G}v0H+i<4#l@j#7~BV%he9b+bxMPV1?__!N*VfG0La2AtOlhy zuQOFAvF}pj8_2v?cs`#sV=a?+)uBd=7sg4uGeD<70q+Yex3uL`YT>HBu9lm1V60Dp zD^TiYmq5_x(^ABTu9U!2P$SB98;XwgK<7BTjXwOo=7YxvA!Rvgl-B3RYZzkJ{-}ZY z*+VtC<`7NgjJe$wr!2OMAEjF;ed9Q0BF`p7t)Zv^d!c(C!^|Yhwo!mgF9vZeC_U#B zU>UxKx$|zqJEsFigpJam?g>tK_0Sc&u0_TIKJvOLF}WrdweXE@Y+o+E zMBMe3DPjoY9np9C2<{TXqWQc-3o_!tEu#inB?RvXJ?*S-eE7tem|G~by>=pGEmCP; zN3#n7Zd$@`9OoH3A5kW{GVw$EJyvyQBgLA;(AXxwIJC)vv4W$z<6ys2077M(g+X4x@)}o$n7#ptp%u$56UEJ zW{~nq5gZ|ml>%jKV0DB_cwTeF8JGwhpQ7>DK69MO5C)^uM1r9=7m6kOYdxGNWfQWz zicF>4XCI_R%MBN9bHhNOP{u~yjvy7u7uvrLiU%H+Gm^hGEN9v@=i_O_{HxdNWi?bF zlE@pnaX?l07n{+n>NM|0>?LjfH%j&Zpj&YQ z3)B>r<+qBvY{tu)k)FhEy`>X9YYCM{HqVE1AmkcG45@ufV-b`j&VlRgj4`7A?oQ`Y<@6wl5NagT0{F-mMZ{0Zc zs&0`m+ik|@^c%8i%1OFo{=U7L^OBp*t%dOb`5&mZlq)|3SWkfK!p0O}xdyAOPbMGA zPkzG}s|1yu+VN#|f^7Y1KJvu|h%^T3Rz91T9Y7LRy&mB`0TY*{5W@ZQckj6pY;~JZ z{pfdQxtBeytw;FjANcs;CY@ZjJs}>h7zsh)3B_~BX{QbLrnKlW<~Ks00HBqf=X#b+ zV_d=i)Ju0Y&7hG%THE;d0i^a`IV>eyS!xSv{MLWBkmXC_hqt!-pg1^dudo#wl>#Sd zTD(+Y?sD*UohE0s8CKQKU7&xleq-P#abz)#E)Jo6NKmMk+T{gV`k(EeRssw=k}8*A zEZp!>OgT1~LN|`#ut)AxN@^Ln!-T{9@)ifAW9O;9jDDF7N|%ciRymHL5Gt?TU&Vdr zR$R?ybuFs^b9)ob&WF~xEU5h6Y)Tw}P~Pw&?jh*L_x!=Ak280{hiSj>8Q*yv+qAg@ zD)5nR>e8lF+dmmLzp`WFULKkmS}GMjTLvQvLn%;_!*G}Mm_~S*vUDw6_R7RspSa9> zv`>5=gGFt8$I-#=ju^GzV%fIpGW6-rHVg~<1R*#++W9F`e8<*8WK22b&jWT(h^X`n zxAr}`JYyrydP-OtNpj1FRBWf#scFnoz$CWziP>6w%}u7HuE?Iuj{aEfBrH3E9m-tu zoLy2K9C}6+PD5HABPW5Z-XIYA8+!)u;H@P;%&XYlO z0*)~RZZT$<2fvX*5Iey4+G5fL6m#wKMlC6{!KClJ95eLAK-(xooubOoFig04gbSI2 zH5mb8PpWD%zkfGUcOH@I!<|q&IM$&T&h+5ALt?WzDr)Zx4F3=JM&WVd<3oIZT@adA(}=g$&H*7GT~yfON|<|3v_VNz zulTo-w4bugrsj*OKfXt)M0YoYp#~a5(q`m+7;cd;^ZHrL2lsj{(T zHG#y)jWaZJ(9j^OYvC@IYo{qZguM_(%!wp*iJYAcBDM0WL=;Z(1Es>gcaSezJA8RH zx+KSV$=}S+@}VXJYzTb(P36o|5Z6WMrfa9J0!uSc@osB1w$i9bWlz*!@EeNQROC$i zG-DMM(0_}|5*2%ZYgbITdqrtY`B0&qJ(TeH=*>KQu{y@IOFM%@s-(w32FGjLj^%7_ zlR%!=av-_pJVflwd8b@ghQeTKX4u@M2j0qEo5mL954ifo65u_-v_M6t&rCDBX6OI$ z^I~S$P}X_zf{VL$ZCE*J=Ep}v! zRsa2|-Ye&Aa_?e<6R(6JlFebh6Y2i?Y}vgA_4344%9L*T29d7i$T4wNDqAfXNQUu? zqdsQZ_`TK6Y3x7;WCKNmwYB5!mVXgCMCB4Jf2jnES%e5q$Gk>OUha@>VP929Yh#=2 zJpPkZ#P5V@zyoTcbFOcOfg^D(WqcxG>;pouSRB`dYjXzVsf`g3%H1PbypUcr%2}CI zm$vEz8_#I_i-oN_-|obGWHCDlIZ^xRSwV^I1tafg%)|rC7PhjN86uAoN5V`^X4x>v;!_1qwq2FR%`jJNx99;~zm2lK?b?d2hhM$Qb%l)DKnrpgK zxqiTF9+(8fMqUCyZ|Y@m2f$IRZljlWEW{F4$@79;bH={|a}M6mY+-PG>OL@{m% zwg5-U8Lbc#JD+*`$Gdal3Q_!l8q;dmF)8mB!>DvhzHcy+-KVF{uPHuxJIOW(&HFA) zekc@ij`uQOJ!=Aa`T3F0G?d;1T!UA))VipZolOjAcaOlG`w9jly{?AUN|A&YoEs1u zkHr1!zCu$+3p^X}jmW9B3rc3a8e_>}3bz!}RfrcYicZOS)9cYkfXBqy-MQ(Br}HI7 znJCa09Du=%K^ScQrDRbb=8V;nq$6KHdr8r_Lt~Hsy_143pm+aTsB5lYU)OQqif!yx zjJK6Xu`N>wGR=&;2_03S-BTv*aIzdH|23(b$#mwDhg5}>F6vL#K29Mq@WyZ6_`;*{ zIll5?{_%@%bY!TOXJ~OqFRQeRYp0aWV5Z1+-u$^(0X49pt5fWLn}1t2x@_FRw5a~t z|06w5S$!Ah^tV!cU7Ye>L&eNGS}a<%_QE^koP0jbZXPp=B?Oj3)&I4tlHpZUvWAb1 zZRPJeM%KZf!y7K$1;~mC13jNf#HFmmB8*y@IA4xUdehR6O)YX?CF$> zt6&Ahu(4&Qw^+8!gNRuEZC-wC<#PQ{SC(77G%5k9no2$DShc_XxY$=%SL!ntk^lOl&pzz*yvcJ0qO{A(Fl&ir>z!4FQM1ZE*$FzQYibKkK}KG6;YI;$C_WK3dcqZI8hmpFA}+NK=6Hid1x%S&9Mr zQVxRsde02tt~J5kRO z8RnyvoEY9=!t(NwLcy*Z+16-1Q3wcyrLR&3eN;Rg^K?P=-Cd{);-sM`!UlU9=J(r} zc-JIK0%D*%@NQZZN*D|Xhh!c(u!U^v^e3`mA}oG!WPLHmVCwDrf>9rqWuV*%$p?V+ zYZhzz4XxbNp0*#hYJ=)TTc}%o2I(Sdwp}sxC*&qf&*=DabW~GL3?oflYsu8BKB;tl z1uRe4n)_ww$P4CKP2fL%-p*&71TAy=^1`wKQ%ZtwzVqLrW8K|1C(bk^i;T~}RHP~8 z4n3c;bBkInTo=KI2-58>s>zo;M(@0qLlqI9ek^0QW7YgA>p^z87PUOYI%EBEz&A^M z+`%hZ`1tNSQxxQUVHO6KyF|!Cj8AB>91A#>uBhfjwZcD7dW@pM?{RW!K{d@*WqQlX>rx?nFJ;MJWeYR!Jf5ZO@^w{H+HtwO>6EaLDC4<{DGxI4 zLve>f?Y;_J6%OhB7xsMetDb^%5%E1l$qRdx@7+6kcKGbzpgp}VEpKU#l^kcRPFL)u z2(+JG!kU^9WT3?ts*c%IksZJZc{(60<6^Yol!u`SVcANbSd)mAXdYxHRj^%``edZ! z)h5z-xr!H$%)^dEPXgiKbO4Gm%s@63aw}pve8qx=xy9dL1*r&={lzsMb<)s`u z(gK5rR{m{aYCSgY>PJ}xr8Yq_)&GwzD&&bdAuCuo^7GPOO08{bJCEm7k7@+G*3;a8 zXy3OH()-}hj>~A#O`L!oEp-Wx6(!5j66y4xd$q{Ks(_XRk z?v>d;IFaQO`a)wEuKHMt7MqEHD*bn);n{8@mYt^mHFoWbwEOJ0I%a5o5tYkFSAWH# z2qZG)cIdA7xt~*u(0h#mRp3W=n73G0VeKX z9!XhLPbV8X;X3Oa3jc`+Sw&t7M$UBi*f8el;vRmr!(eyh7Z!?oaWg9{k#tSIg68j$ z9*W)gziv|nCcRQPW)kPv)X`nCYh?}H?pJRQ7y8*~Lgdv}?_1cRqTB_7Ht;G4 zS0nXiFB@;>Tvvp8#WX=*YHK^Y-E#5727_-i(^%N(m!o*(Qtoz#KIS{bK@lZA#g~W9 zx8zIw&}mepAS$F%#Xy`ih~}@F zyN2Ucu7M1pl?+5tS=zoV3|oaG+L-$$kDf|W?+Sv!XXlQvi=sdj)ZEG(VF2_aY=>VL zh<_iEbm?2kz*dz&fx;n8w{P}ZWXU(mM{41z58I%#?+Gut6fSnw3*;zC!z8+?GhyZS zSzueNaZzQBe0oW{y+Uvjv~FoNOXgJ8Ca@(!K13Z>M6XPoW@K;8s%76p|nmDqH99L@F~hu45_YktG2M%N@OxW%Fz} z4+=uh$&kP4a2W(@4W*c=Ay6|NKXVVykyDMQ&W8DIJarP^thZEvGH~W^X=|V+wII-Y z{E|$WqnF$waE%=wemPadJAKl*rbwG}YMT4RTEF##Mgass1e?299UIV9p`<}!#8}~# zx=8axhdnmR?U%N~;?^4y(YQeRILbYevt>a|Cj%B`4sZWZM`}_crGkdC)Spt^wc5%+ zw*{iV=6D#p5w<6(2VHFA`k(pSU_wS5_;maPfT~Yw>yPqdRo?-=BQ$Z~<#YBrjX{!4f& z zfqyr}e)5+1%`3?yj2LecCj22VU2tbIG-P_NqCG_=r|GP9NUV+@Q0^_zrjLwB_(5mVfinq5>=!(MI!z3jdF*e8pt5; zkpMvQD-3IZi403?pW9FpJqYjx{<+)h-f}y;rOm>5s|dEvDa&pLRC-Q?Syh)>)s^&o zj13NJg~&@7x$+?|&p^yb=~LFK)&i=xm1{jCl({y{WRPVa>6uC0-Ze7P`X}u;az19l z9O^f_Je_47jQ0G8#HKA#s`~(8QF0h}tes*vEFDM9J&ZeS!ijMAag~(i3-lXx=P3a9 zHrTd)5-^z=9Qgf|>HRAg_ZDpc96$2vs_vj2t!Q!-<_=~mGZ^S;;vm_`&B{nb^coH$ zJ{>(hYtWrUo?Fn&LCjk01kE+?O?!YbXHK{__tu|dK2mcEnbpRUXoULH9kY#)NGAzk zl*@M+cfA9i1{3*Ib1yeviEnw^!Vrz^@WMJ*t-m69F^eh%Jfn3TvW^Ee3vK;NA*tR1 z=?TL)Lu=P|C-4nFkp$Qm8=t{n3Ta1hnJ>PA2_i5hF1-g!8ckcEkprDJ91xz z&`_I(b{-SV&tQEga0l}AJasO7O_RGG6UNUltyGyNA^3)V{*Fc-HuIJ;m@hHP0OE72 z=@UgH`s`%=F*+Lz2ethPV#5$ylhZ<1M(t26H7a@%h{a#wfBs~L6~lBKHeM&UA>rml zAsPwxx4t?AqO<_FFoMLXpQUZponX=Jbo4k(8p|6#YQ`n4Zhbdk42lJ`n%EB5#U>E|Ei;{a?b0seWI~V4aYr}e-Gnig|Wp5 zt;u<+ES%U8gCq$!WUN@ei6?fQJ$n^w8iC3C#D~PPO5J@h5MtKb?YwP5I)VplE|MaA zX4??%3RrqG5Le=ilrz8P>!M)PcQY{EH7F9r+0$36yI#wDV#N3B#$(QA zi(zNNAZLkzdpM^|%?L|V*BcN$gi26^Je7f7>uXy;SHf#@E7s&iU}ciru16XrK!EYl z1zj08?wA7=-X2Sm7zLUXLy_?lsJcMqCAb2msil1~2YLN$i;dGupZ2q!=^V~1{MI)$ ztlq6K_CPcXo+p>H5^Ve?YC$G{0Q>j+@T8@tc?FA2#;CaFP-GBx!Vb;o(GjcqV+$N( zv`DU;^*Z6XtV2Fi`*>WKte5!O~l?_Z3Ac#*+zHb35q#l=5Cg z9(SsF49(?$Q4Dxov`?PpBK`^u&kUeM5pk$QtJ_0cARWUSJT^{UVX*%(d4l#(m7W9?}1Xjv(sAmsAjj| z&SRMdN`1dycz#()PLc&mAOfiHPS_c9Eawv zv*qsfctp0HVn&ZiCMqm8_^vMMTWt7SzXL#@!iF1GGSw`kCq)Y1csRIt~0A%0b_1XV#d^fiqS(>Uytmsa?3ocu$xNQ8!w;oc*w-Y^5e?_Sq|gsk)YNZ8a`ly*oXUaLEdui_q8iZtOAqsfP|Y=a&c|e4;pA*qiG)*}cfV%D2$|0iO^|KGt`)FZw4l z95MD-xKkrPZjo2Li+ni^XhXk1F7|*z`wXyp|EU-pATk9BIw3#xIISQ9I)uA^AW}Gx zU?+EF)Z4fktc0zzwEG+r{FDe}umJTM0sBCtiSvFm(YA;Fp$qFu@-K_kL-eexSBS*tN~e+!(-bEFz@7fe}0eCQz+)A*P?i z?|HbEFIYj{p2le2J@`8XFCw73*V|p%L2(ZaA>tPTKt7;d&y^pX+eOUcq?hNXRZjaK zJ9}q%ya!U}Xb%cZgaie(=cDZoaKlki%Iuv4`k_=sFoXu@djKSTy2}D+)}QUbnZEe( zqx=D~C2&4T^jLj&gvq!-?ExpTgmZw#6+rM8K;p|S1&}!bXeF}1^!gOE4Hf(W#BMJ2 zkL(=U4HR1H?8Tzyf%#;CjsX;1`ut?7QPu`VhWGut9qkuKC6I#D`76@VhlU}JJ|=BJ z6I&c0waPa7rmF!9Q+lq|yN#xHpum;C3^;lTQ%9+Y?)&wDT90@Fn3eGv&DIT8Do*4LLn5AQDskvaQdpSbcd^j`%RAnF>0^X^8G z`iy3GfTYS;M{cc7j)CeXe>lE{zrKK6AN@#?;&A(H9}&ku^;^FokHG6Tzd;{>*RBDe zgv~<;?~vYGqxZXUhs^bdf-FI8Kvy#W|5)2Kzp>cQKIYs{H04XRypqFOB&LI!$dO!9+-ZR4vGVrm%cy{?+EveVi$6gQ8= z0UC(dOgB`;Q9Kv^83~4JH%`-<-MLjK=E3&aU1YJxagbK>CQn7=B0BWgTXU~n*`Qzs zyt%DYF6z9Q4c8Eu?QiU0O4?FIYN+OI{h@Z1{inYpZdje* zX~_+Sy2y8e9anoN$a_MgY1yx8k}kSn)uQF^4S|`z2r|gJ^aWM$HtR;N(oIZ7O1;et zi~`Tk*cgjpO)}+dt7ayP;}EmmW9R)wj;xS|x;{z3ZH6 z5_Ce8>~eyXBl2hyt;$y_#dyeZY_yH8$y-IE|Hun-p*}RXiw{{JIBk`w?9(x~QMATm-Sy+o}*e;MB9PQ_P}^ z4XGJ9ZtC&rtLtkDc9gbTZjagK>+;aAl$Tw-owWR<85Eizm2ok2P5I_&*5#l#vg-FY zsorg!BU4^aThme+f>H^yH=KV|O;Za`7xD6^M|z_htCrnbwmmpkXB3k_^9o0LDk%Ph zp6zxlLkb29^fQ;KM|*@eKP!<{yoDS`*UqjSW(rsy>YzAtGfcT3{$n%>M1wNJc7WjN z&3GLM)~OH*qV|&47j9$pfxvh%v{ypBqp5i>Lk^D7LtU2_eH$LMNxFPboRVa#kKNaz z3T{N546nacwh~*OX%N&~+DJ8R(}tR+Eg_<`3K=}yg)m{y(07w3-rwE;5@Vd#ds!8t z>Yo{DHq`L|oPeJU`$y|_$1Z_}K$tF#C^Mje0 z{u`%_)_332^h9_wXvWlKgi_T?`W5KTpCLvbciWEy7N*lfgKKP44)nT#S zQUb*wUKh4@s7%mwi_ob?Fa)#qGI6fu^lBd@ekAU8E1Fx*HZHV4NgU8iM*J2;RcF5S zKTg#x)Ha2~IdMl_*#vGrqq%S+J_pX+T??ng-y`K#$ep#c*yB4KSVjQHPAFmlpTBgW z^R5>n#BdsGJ!Kh9T;YW+OO|dd;KK?|r8;Ob;oFxok?Zjwe>5l`e^|NCtLOQ?LUN6f z--)JEmHMA^s4!AK_acIRJMgpH&LoxY;9GXtFu4d#-f1y7F6P9>yP>HV)V`lc35BnF zjsiaeeJBZka?4seiig-IupLRDz@A0kVF5w5tCd>jj%_t;PtW-{gT@3(#bVGeEKC~i zV%RDiIyfw%l^7*qGgh~)q%{fgt^AyzlcjIPvB;N_Sa7wf{$;ZV*wXxY6Ozcj!#?19 z0SlWSpnjZ45;GUrOZHSeP)>D^&$%sdkhM{CT^vofiqo%g9nYJY)S?jCMv3R8i>GCy z0R#U&sp~@m(^6m;_z=3Yg!L9_E$;WL)i6)rdmorCwF`IU<k6zY|ArjQ(Hx28&tU%(Ir#i z3uaxSrnQJvax+&nVm|TGa4-++qJ=GW=?TMStrM-i-K;d)HBN+K{?PAkBTwhj?!v5N zx)K`^82w>W@4OxDUi_0isxiZVJp?JqIH^eJ2f>L#Z}*Q4&dD`r9MbavBB4Jf@V(Tj zE}0BLpjC+kEk@cxf|}em*~LbkZ-``@b`j5T62x{EOzsuS!As7Z>%x4JKq?w!Z;Lh& zQuDlMw=V5R3Uyz^BhKZ7W?5+1X4wRVe6R67o_$hf7 zm~CdAZj?~oFQT7F_{cln!F={k$ENt!wI9M&ugXhNF%eqo$*PP^l$GQr*>4;b^E!S# zz63>B&tRJw%$6P|i+PZX0Y#~X0&x8RwZF0+bow4`@s%!Rx*b<8eN=8aD?bsZN~!F$ z^=GRd+qt^=9AY?*aQ1*cH;bpJ z@Iof_DOTtXcmY&xpo%0aS{YPKF7vG!^&>Yyh86vH=epr^7gkFLfvzN(Y(!uQt*zU_ z#XHeCE&Q$<2v{8q%jGU-ET7f+=Cu$ISS+CJBMUVow|!NzTymYmmJlAqQ^IWk@yrGB zfz?RM6+O*;Hl?>k&0dY0l#) zq(Ag(*fcbOJAHs7PIUWPT{M2D!_At$(F~GaRu++bnzdpIIvm^$`S2pgYSvPm9~lav zp0nxtph2y5uyfe;)y*?E-LB3r)@YWGu-FO^BcO?Yo-PT4|;7dLW zmOJqvX5};@zeM=^d|F~xVNH3ykZnct4An8?uQ9umMNWJ{i<)*XAk^l&8weNO!9!k3 zCx911BRJc4(_?xAgr0r?*2qHqon(2nc#IJ8YO+ow^?S8vp79x3Yv_Q*&qoPU+gT5X zipV{5j+d_BVLZ_b5&$D^5{OJ8we%`&GZ3%;dCpi^nL{y_W{6MzOE?Bxp_DRR9^|yL z#trtQD?o6=)gssEk9ilgpaxwKl9aEAV072vo=)B&Rq4ahk)M@TA=g&hFI4Tuf#|uM}gJ;W!ceWmT|c7xN_ZD zUN{U7Ve7QL$N>i9{UPId39HqRPl%Rae|JTu=1&4W%uV6?rugB(lW!ZsirG(D&U1WY=RgApmc zld@B@=K0_8)zCfSzfc&ObOa`Rw?U)fEWMO-l3G(PQ*prw9SnLB@EvW2l(2y=X2HWL zuiAolO@B2zJ!2<^5?8~>;#Q*dk^tb>4O9;ExsUFYn(D|^^hK1CVC`Z!br z+Xm9vJ(+0~Q%0$*qQMM6qN4*90JCZ8$e9RCDAgF#5W8uM-SSQP>58_gI`97iSWM+V zAvkQpNYv&;+3cDy$%PLkYYSG~c?BFpWj|~`$j3=F~} z1E|6ykvN(@3^eJ_UA=xSHCGEJjKn*Z1j>{8BYN^C|IwNyHvCw2&2X{B9~bZA*%!Hh zH-8dCO&zc&$Udrx+)c%PfL{_NGmM7nX3t``wyJ%f+H=@=QF>J$6k1iM$Oo%S2~IS( zL)-o-0o%mq7o`-8J5ttK^S?tdmy8z52uEKb5cWQ@Is0MlQ`v3^N6z`12&sg(IoYcf z?{6ZCdl~kj_786h z3LS;!o{7e?vFp*40$T^|eeUn9V8GD={>_I#V-o#ANU;mV5Ft4zKHDJbbA4*^raA|}ft6Ij6PHt-{C2nB7 zT|GRKTxBPn!5-dLc141VoI}V?wYZLpxx97c$rfYE@t3gs(NxCR`#YgG+9}>PwdW`q z4XE@*A>-I-X<_5<1#U>vW5yv8S*gUbp!J`uKMSgjXla^_C8E1bIl+!;I0y(dXy3spY!7sMvWWjJ2&|*OjCK zC=eW0bL=eNP46JCtOfSq{xU~Lj-xrb1Z3^{`C zyJ2U4WPfadyzPM!H4ao&oI+G3QmW4&8B2e6{Kn*LKu)|C>bPpft%gFP0SPWhp0-! zvx;YNFBdUulJKt0J+VuuL}uExljBTmvZNiFLiQF!wgtER@{4>nC&tkE z&q6&;PucIO9E_@Pm2(+|Q*N6`LP0qJq`#q1C&R@+Ac;Hk2leSBj`gk14L$dKE^IA- zYBuN2b`REmEX_VNp7E3B7ngWN*W4^PsfJ}w-225@LOQ>Ly&!0!$SkQSR!{W#ma+6= ze7BrLRg?8-a`o_mz6lJTjG1QgE%<^#aMm%XkKCg!7;8yC>K(xXS~xvS$QKcSBW??@ z8=Cp>uf-!Ak&PR_s-z=)@&pPzJ_`~iF&gkoRuFFAyYfiBmC`aG)l{V5;w9bgiK;!0 z7_8HEw$4ftfw?JY6rQ+)LwO8gGVjz|oLo#81%bm_GIr;!8l+WF zpRU)=AK9H?u1cXO$a=AE8|9YzqK)xvBMg<7sXNTFhDh>A_#bRZZ*-Ex>w}l>YEj>7 z+hI{}Oq5j0AL=UECJGk2bKy{r!da;YaHUk41}m8b98vq(x=LBm!8V|3Gt~^Mt}TOD z4~*Cpj%yiRlJC+`-hW47@b{(%35p&r%R)#or&Lf1Sd~hjcr0A5xF?`?IXpzL*@Ut} zzq0c4BcfZ^O&bWtY0n(7&t#XyzS7Bh}M=_)v z1ZC$Ge^c7|y^CG(Zy(MiTT{JaTnVsG8~ml@q6@L0g+^0q%izi?ioWi54W6tTfyf|% zf{k5s+U!}7&EiJms1lSQ;&{I#V7||7AgYk0yEj=h5{atNB%n_}Y85$uif5sX_U)W8 zBI9j(+2ON|J@yuphY)*N_Hp$7^cX&-YAAiTslk%DVuFlTjA>~4GY6biTCZK>;y4)Wz+)M`gut|k2d1sR(dy&@((IDGDt>F;?w}?0 zWPP4r{I0B{3W^w)Xq35)DY7F`1G_Ym>D?8auF5mxRQl;y+SY$1=xj6k3^f=9%1XmU z*km>oLT%bm?J!DhaZTGfU?-MSGskzCKFpy37HK0N(yXnQc8?1Vx3#dBbY>g9hFH)H><|8ChJ~Ym+>*_% zLpo*5m``g&GC^y4{A61AYAO(o88YJm|3C_Kg+hAr)o2Qcb?ka`G(7H7^gq=oTJ{C9 zc(|^Xh$0GjnrwvY1*zz=s3IyTu5xi0**Dt^Tm^}p6ao+}?7Ef)__DAHii>EgS-+X& zilybfhSH7X;W#0g4kp}dp=QG=qaAH2%;d_kRJwqr?U5p*QnrkSA2EE6VEsJvi7)dL zaeVNYvVn4AThdXKX$}t>@e?WTL_cz0FXKC82|)vO#9)oyt5wnKVM-|v?h-#3qv6ZG zPm6QTOTVL4N5Rk?8(^n)+A12lhwNurx^+}MH~uU{CaZJJg?dcM0jCKYA>Lq4@kl>C zkKq|4crOv5NrUO%RyShfDj;PVfjY}u2Sfqf`n|DTE3sTSt3x__j^Z(Bx2C6Bg1TCw z>pL13;byiVH7L@}!%(8Jc5|UaeWf0oGebWmFvZ9FKo8FiL2fSyhg6IU)k~Mv(^ONk z11uvMD~fBJ5vH=1|6&!k#xZG2Mi0eg#g}M}!epF6KX-LZF?1WznF_f<&u{=M@TyD{U~)o?#?B3hq{6wq~9!t;!pQs zNEo)u8YZW6;d~qA*5of6BCOZr$dI>e{-Q{Sqh+fn79^j?C=wGnWdcM36WR4{4WXX`g?_IR8(e36(W#f#ZC&V{*YB%f(GgrWkWK0| z&fB#@m3@QMuS>b5^R@|}OX1GL^6+)@rL|%qIt_Xz1{nEd^Y#@?gKRUm1rZwE39&HO&m8r5j=*FZlP9U6ZI%<c*mZg~kk=y)HRuSzd%y)r5f$k^Y#1`|UL6v9LrIG=n`=r4kd2u&;)}+t1 z;bbM(tc>T*y}V-UARx>mAp{j_p|n=2pEzJuYV8}d*tJw#z$8Yr9;p&<$Wq-B! z3;udSUaG1P=BIuFbs^Msg5fZgh8x1LvMK3XwJ2x#%Dc0xec<{M+bg0Fqs0go_T9Ts zS?h~jz8asqzTE!dw7g z(iBx=fCnvfQ)Px8^wyb3spLSeo-4dfh%2H%ZnqM4W1+Ki0k7xdiVUoZ81URbM0Wd1 zvs`MN`S{nKLM$%;973e;uDz^7nT1COF46QwIvT>ORwvo89ttN2mD=zCnXNKj_vErM zEGStYUWE{HbKv=)nUKc7a8_r@@l9fdS0m1kk=4qBqNwQ4*leF`jg+Rmx9Ru7BHFB_S>$yJ$1QAUA# zJ>nyXQ}(r*lls^b`jPHxy$rN?IurS`)jX>Dxpz8j8c`*whbmGDuv`&gNT3t127>Qw zP969=kx^`>xKly{ITiF@j$Kj?@y2&?&%9&==I?cB6Ij+4) zyPE+x%HH|JXq{bf+U#S*`JqjnSF_szk=wNm2()jUGS904GJ-D+^H9h{R5u_mM( zbUF5P2YLD%TrCX}0PQs~DQ~_8j}@Qch$u!}OB%i>N*U9p*od|!#b)!#cM{@K_Lrw& zgFBW6;=w|;o7L0-LvKeD{prqL#)#ua_B8xa&uG@`HbMc3{G10zn>?n_lwC(UhdEd72 z0McM$uF(E+qlVF6&YFc3#b58w{0~e6g8?F&t+#QQGlmX6ko-0~`!iE@r)QGMZcqw9 z(PqtPpgipzBCDxlXP4{1IPBWZ?{Mn7=NntzJ!45e2 z(7FsT;ZbHG8x&2#H(2_=4F7%laqIrvu23$w5sGFu|F@!vVgPgntu*8&xt!eXqKd9_1~$lS_>pM)Ymjhq1>@3b@3RTf}mwXAO26vq|4bVd%F>8ZU}1%+qxDy zgsVx&zdz9tX@vKp?|e}=10!lcZQl-!}nt4uk%$0ej; zDKZn_m$LI!B+WMmF&CP78v5?aF~~%})DbKLnL5nJ+EDakl17JJv7k9#iv1P~I=E&D zau3qQMIg3C+mc`(lwmQmAKu*wFs!*$m8Ye&0o~PVFz3MV=)( z2DAQLUXD+hSQLoauX2^TiOU#Gt-x%ylG9+O;k`-PIvVzQ3}LM_Vv6^pDBpOUf`5*oN>=ffgN))&2|ZF6NGQw?*2)Z8;xNM&>d4wU!*VcZ>J; zFj{5eu=2{gFD;rke%{7)&Fy!`rm=~{arrLyHcDVOC9(as#5oXEmS^iVg90>MK~;vMjg%IUh|;^h@^m51b|C`OQ*e zHUg7)%w|Jurl1{XttM3*!d^Hnv)-sB9hy7DGxi^;05DLwh;pH~30NBPd;D?8iX+ma z&JLb;-;X~AdKjI5?>o-fOtQ~uIsT3{SDK!V1Z(rb7Y|p&q~s3UJT2Y1FGOj{2^ z3%%q)A~uvpFK^h8p%wq`2SgmnpYy*%QLIeN{~;8`!ou|5F(?)yW-hk>tW{^`=49je zKepgUwSddxS$5DzK)N9=w1K<*8>J37Zf+pZ1Y6t0ZIJLcw*B;NyxK2U+==|B-4$Mu zG-uD_A76Ga{_-&PYh$c*)j8l zK+KJ=PY?_}B7!PV=G9MjN>8mFZ_DlwfWuS|ePWFZ?#Q2mpWldmU6H~%{Px$%i5EU-9E z*LtvxZgDpD7Vxr-Oif@NK0st(#_YoIaKPXA`F{oY{cAJn{X65o{7+E(wkL*B@<5y+ zc-n!df&9?n=DULpFX`o0J|{Ja2S#^4{RjrHZ_-Pu9RV~a4xz)Yz^ zM_Af?+5*5)!lr)MIGa41y`Q+9*jnp2nO(SFv`$9;!F4QsZ{2-aaHQ+w7X zpK9Sze)C;!8hFMOmUd>4VC`IkfaKh?WYoE*?(3W=Ke-hM<6DG>A5NxLko0tbN^p85 zXd%zo#5_6`{fk|q8}b)nI`|xT-^|d^(D*bA5D$nyK}pKLH7h-MfB?nDKT==10ky3` z-JpG6-B5Y8$=r9}h+VCX^|(lT2S+eZ&p)LHy~up5tbJ1xOW^t-O?4ps00Tcv&}qQP z?Vwx31JD^u_bmad|9NdLz=?j5 zU#r1E$^dlj1gN};=bwxxfKwGf>Lg${6fpeYr}Y5Hjtj1=zV2OG0xSUhmPTAvTXFy# z?s}SPsjr}Mt$u=_a{!KIupghM2O(BkwN5~FvMaN%Aecd1$hF^w`7xcrF)*7_gCleE zmzUO0N+rO>q(zk|FQL0}S#K7`|Hp?7Aoc32S;BEE_xJHr@<$5XyEhAfK#joC%=vja zq`t-tG$JcIl5=+};wydyWOwqa#Rls6Wfcw_1xMtS>W10p<A(VvMgBvi3RL(+IE0{|^c}lar zw_V7#Ad4U47vYqjiXnjVrStl8bwhCJww?D&p?B29z1Qy3&-td-5FY@lu?vl-bG>VK z-McyZrF$TV)Aywt`j!4!QTqA?>0P^aDK~s8tt(X8EAqV>pV7g?-R&p$eTnVUy9ek8 zd?#P&8|cwSo9i3(16RI8NLbGhxuAD-`_ z{MyNjl~wSJ#3LKuqjOJ1tNGD>GT)~qoB92b)Fl}^Vc(aaYg3E@Zh7&2!_l!c-j(+OXeO7zEbMRMSCfdu$m!$9cf`j3t#hj6o)k zjpCxzq}5J)c5Wo!dq)_PB=&7e3tU>7M|-Y&uMn*Cidz74z9$*}6eb(D(Hyqr=Io>2 zt{)G^jzTbDb{6)IPGdNlWuVMyakx|P^d}-;@uV7@V+tabImb@fkTopX6J>JPuAWgB zE)KInLh9v)K^b1`0^CDW(EgM)%Tl?bb-3Y_Ht`Bid_ESR4GbXy?C*=iZU(;fc1mg_ zmOlQ56P~bicks|MzWm6cN~4epl6!W zSi{c41co1Fl;aAWLbVJxC*{*zz;dY1!eFAG)cubDsmI&h9|vKl@S^ODIXNjl z5unf(V@^RJm7-H`OO&jm9BI=l`yKbG+zY-z`xt#2OG!#3js4F4V1mNTn*7mM(sSR8 zzmdJn$xmnch10+XNJeeY^}qt!oRe2dWZ()mBXb{4cMgu$=gI)B{;YnCm=p<*jrNGS zi`X?e(1o$a1`kgqZ7yD4l~_rBk9($;(LsaTpn>nyohbhIC>(iRK}|LPS)NS_`(?L% zPbXB{>=G*zKOHT~xqTg51$@vvhDY!t?_y3x1nnt59QaO@8ScNPsTq>I{S&HbksIqFL=N zdx}c;f>O;(@Es;aK)Y72=DwVJzY(v=!lp^=&o}>{VT5>mzPc7|(*~bMV8-h}K8cUm+QE1VfP8F=URW ziz=Ih*Rfx&Y3x|Na1nQ~KE1wt#0C+!D>Kcno+Ww2+2s-Oix5SkY(PepivlbT88E6G3@&&I2*$v1b%Qx!?8v|c_fzVpZIsG zRAyA^gFvqO_~pbfI+sl}F2Q$vj`B*t1h4X9 zZJ`~9nmEYjT%c;J6Wq&xEG6wYZTqxuQE&aAud z4Q~A>+#c&gf1PmQRlO-*N{-kn>AGJSsfV(!8>I=lOr(dKPr5?%6sV1kEyAa9fd$`t z)+R>}fUD9p8WNhD-!YuD#EMN3k>;yWvrw7ZsQXx+9?;}c!U){J!#0ndtRC(nktDCV zCndM3O|pAPhF>m0MYR;SG?!h>Xmh03e0BU@@RsLDW#cSpeSgrODw&E>V!eQ*I^A(x zqOxWnpa&b`#a+uf;?5K+4T|eN=r9meKDoubn?fBk@z_5XFU(HmCKkmwNe1tv4s-cE zF6HQ$C^f0ktw;gIsOh~0w(jU2_;<2z_zx9__QUwVH34-rI@NDj*h7TQ5b`%bM$2^J zmR;NGj!s#rCJyAHcFq=j$<>4rOcHq4fk82|X1@l#Oy@wl@&0SrfD`B>G)zyKDQ6(g_`We`IXeGTw0=!%1>0jU_(SfiQ87!sJ zso#~_HUHZ2ol7^RielA3x=wSyx7-%O>F)8r~D|ej@oCQ*42b| z{$?K`A)RetoY>tb2}OBM>V5W3%A?n6Rvhek^VH7+YGY@eO67!MfKn=!uT(NmK8`96 zEmo4q7F=Ny|fns){JXz|k{6QYai&{{%J^Xv*GOa)TIoaOED{B{%|#oc26 zOY$62(IO0_Ev{jXC+2A}7((KAN>5!V)fyby7~Am3J?e;;6#j;PE1zlUI^0&OQ2iPx zIXUj<%AbsE4D|s)O_~}R4J4IK2Bz1U%gT}tp|_qVbVg4*U)SSktDk85Pzm|8iTFA6 zr-nz5@ZSPL{Fq(EyPw=3+cuZdwm!`+jIs3l8QdXTgoERohbGjzj7)*d@=La zk;;=1TV6;W|Dr?N4@sokl1U>%%EY6D3x`=<#+DWzT#j@g3!l775?BK(=5wnH^a`2_ ztUlKKL76rxur!Kmh|q3i)ZxF~wv#-MpGmqL*kYxOF3m}6pxtsx<%X)WpJ)p2(q`vk z{gv~ZLsMQa)l;9^@0Tm`T=m8Qj#QlUjorUaPgUa6mD9eWDEFqvs)BmUzDQ?Wo~E1j zWxDW$OC@AxB4uRemVePU1+%oqyzFtMNd`S__~stBjzABLcVNM!C|x$EKJ%aE2}dbP`%J9hA)*>}DU6^Dqc&Pd_88fihscp{2M z{GHw@USH41jmp7Ao2h%i5a=f11cHOX+p)(FE1Ql%bUGq@NNSXxhhW)(0x?e0*4@*qo6z@(%bHNo9vJOw2dExJ6$DELf%wf#SH{_aYn?y-i^gQ zX%<9($t}SVsBc*|xT)$U9Rsa;fJlAJJ=HM$u*D_GRcz0irj#4oeN|q+=XYCoy>vi= z$`R5?uq76LT)%Ly0*o=Vaa>isT8$}iKcbN%xa&m)PdW(pu?lU?_@c#L|aeA+`zpV8ES00~`#%OeMjP&upx` z8I7)cS!&QI?w_o8PukP+2d7xb$6vYr%EGXu$N6Ogy2j0KCOdMHeAyJ)>SSm%GJ;jx z-Ud^eq)$fZKq1_6* zU%wUmZKX&z{zDRM@_uNr`Q8`)vLvC?C`k1Sy(sq}scc~mv?Ru7+`#4sOKwTGu^mQy z0C`R+8~81b)rn|%$=rE*TufqNQP!0hHlP=VNW`_YK1U!|#?lAl8YC;wCO6s4L;^8kN{=PH?Tm?E7E}pz>o}Ub(zySlbK*=B`cf^gh7E!a ziU>{4#-oQ%X;24Fqm9V%aYpVz!om2_71;P|vDo>E%5X_;3!}3yERLeP1>TLbUFa6FrVZmC=;{%O3^}mgHMUA7yyD%iHU)6F zHRe2}=yKcFu;yrnbx#4*Lq`hzQ)0{AsmK+TmrZ;QEH+iCjs{%G62CS(bwdWvUk(Oe zERlB|_b$$tc)mf`k8UTD^1=IwY5ujKaN9W^>ENy8-1|lnf^9u>>0d$f1^v#EMOXBU zUfj|FpM46rOEFD}|GWp#mF@O(9UK|SR$@Z48?v`1qf6`eY#lAA7sUB%)AVvvt0t@k zh%_N_R{mfHm~2C&9}R1AX>3~6BPQS?)hSx2TOEzVHFk9spKV(W_wK~zK9&u|BbrCN zHy9$&(GmR}T1W&6g!qtwkNo*X<%v5R)e7s!5tBuo(zxC}bm1Lihh0N0OiPrWhTEG> zy$dc~%4c^`x!12UXisKLfHv#8XFU*CA3PvV$8OBPzSn>XE{c&1y=bm7xlfC_sr&mR z=1HfE@v%2P%x-feYQVHoSU?Hj7{rAwDI7n@9;NkWeTtI+kJ-CJnK z`ah^g=~eMSePunt1*25dx6aq2aZc&_m79MyV?6>dsVhH*+D z@Qe&8zhiD_>l%AS)E4Br#Q^79x0kZg8O=ORHI){&?Uw1{n>V0Cw+JW1q|8TP`Pv~n z&$uElPS#L37QNA2p{NKY=kgb5)DKt*8>@q7E!eQLERQ;VO0$|vg!{7HgYOA z@?l(>qi;J-^-YTS3yEIG=}yRS@UT4xd|6JRjrj72LP^YiBNV zm%{!~R7BczhO)g`1$Nl2LVbza5$UvaBkyhxIFS1WR#ej0yC8v)(V6UAy7TH9Zrwdg zQx>&hXuEJn;Ab+s-x`FBSbz+~jz&9m)(+i2O;-;oA?cECG}93`8G9-udYv6 ze$1U3IL_MMGOL%@%!WV>Hm!~7~m$71@ z+(&1BgwJ9YJSUi9+-<-xW#*bZ4M&T3<(E6aC61s{59ww@GuQ!!vd=W?!N-tez`=vj zuI6539~cGh?f@z2nRMF)8N%ZUhwz@pF*Ub(hU|WYny`jIWF~Z0WN0=RoCl<$%~9MoSNw zt)#1D?G3@LaITf6aB>FUm2gWJ@~4F^s@biVv1sN%=(^s=cC5cxwTTeJETl6v74L2Y zzx3e8AAGoS9i%yFh!!A0$#CCwCv3^FwbLf733QibiKr}8)Kse|HV6mw1m_W6jH z*b^k5nqj2QcJHk~-;Mn7(;6EmXQpph3>bmq)E@dieks`?`NHd=`9qxFFW|;wVEZN# z$kz%a5vD51s@>t2`Yi=$W-A_Y8ZcJnzg|I6kNvE5qdrFT!gYJre3d%dlB& zp#usbgQfMyp^K|$A7&JBPbA9hl+9AT3=;(;Dr{i^oS1XC>ok7K2Mb@qP^5f&FD2CA zB4xznNC`=cJGIkb*l=#A>Y+RuwaoRUX?A>J*MBUMoON%tjh8qQDA#c3>j7nW7_&&F z58#$#TR`tpK-gt70H`LXMj^|wfh50rvPHAhVMXt^tj+fg%meXu2>`D1VnLzVCWm?ivTcG!OrY zx6~uZ*nHM8jl>9b42aIsS3pVwT0INQ5$RmGe{;mE7b$^GubDDzXoiJ;Fn(rLe?a#r zQx-8eYa_eW-1}JI@0@ zzGKHOP7RxV0fZvL)bR)2vH4A8|9P$e^Uw<2KhX18I=>Erv{z)TZ8CwEl9E9m!L5zf z7IWqe!K9S+OgQt6&c#-mrdJ6vvi5me=?#U&nG8rHTB+ji^9*DcXFz2-P|iR5S18K# z_q7M|7J`7ABq=m|;XA1C1##-QdgMxG>)0TXlS34hQB9Lw5Ue-$#P=d``t~WewfY*A zKTK+Nvg4t|G2wwzq}QiwXO}asX@l&;|HZ+UPk`jt z^t#ii@8#~fc*%Q~6F(I?7*fQP=b@+4qv56v&s21sj)*}DIwt@xzfT>G8!@+JHTRT6yk2N~{ z&;iQ-J|=@2zLcOJ+|3$$CuX}DKf}(19hdjVaVEj=JMz#`XRMF;i%RcMr%3oP=~zZW z1O~=`XFe&yY&G&h)!jV=FAHKYlBG0;kTf(V%$rBbn2tc8+FvRJzLVz-3zt{;MJ20$ z_55nmMfE3WW09`XO$$i5bnC1>sQw-67mZz^l(%118keT)RE_%z$nNAMDVO*7zh#fh zikKNK;c28g`Ag@>c0da^-6DG=@bZq1`9kAS`=Gh;*Cm=C<#@|I7#`IHa!Zrx)r(Q0 zJ4z$yv3odonXqfs*Z`y21irBMUNZ|pv7$-WrE#d6pp-&JjiQq`1emmO4V3n~GCX0) zDT|0$zC?+$U`8^z!-Ga}(I@i3mMA_ciqukMi{TQG#X|8O*}3|qHkGbD;#o{9(quN4 z$P07f`h7&u^cmfEKYzgz{acCI1bDGaY??vz`G}?R$mb8v`8Q9~PI`6DG-9 zd6M6)ae2gA561w}y|66#>l)${fpsfetO#R<*3WHQy)8;zs&8%V>%nRRNp!QJ=>_LE zo4?mX02f(I%g)R-Ikb6OUTw86V&q>$KCjzI?$BQF9=LR6$AdK1sIjAt)u(tpzq})P z87;NvOwuYugN7D4oYp&4wcgk@w^+Bs5EiSH&<-kHFL;geGk2;fCz%@`$@tbMyp(3G z3{cRZhG^};bbTa`oHh#584vy4Mo2{h{r{q(btB7{1IiDuMX%LUBNPP^sIZAC=n#rocEY%?t8rBCAo zlWOS^2*N?FbMeKKM34E0QP=&)Jt8?lP3){`TYquMS0QPE<)@QVobo&V%EGCdz^X~c z$v<#|YuJnfHmvSn z)U>j~g-?Om90y=nlk7{aPJsX{Lc}+&`j>sqxc`Bj@wk#q0T%%x_|3@92zH-?NO6}o zSH|IV3NumRmanqA`sS&vvlmRD+f8}KsLRtsSOfLm3J@gz6Bc(l1|iHzGpJXhq64ZJ z>Wn6w02}$c@0_uWG6@Xi83#u4G13jq_b&Bl)ECvAO>}1Vf4R)@13GlvanEGd;lm|9 zW+7Q`f)=mH2JfPwn3W?@9Zr_K6TWHsO~JdHuqw1!LJhKvV*kaS!H(?GJ)+-yeT4Et zfwz(iGm^39DJc6BBONt$v!`0laib8f?==NdkG zm=t5*-^(vvEYnu4x0Y$bl(3rS&*%#U9Jd9SMyRRx4xj>zFE>=~eRrOEgXKR(__Znx zs7<{mwNWAGC+0%@uW38U*;g421Aib2P;YdN=#_%=fq`$zP&jQu?<#ZGc!jsSy3I1? zXWl#YJxvQKhPja}WdoF?zWQJ)ss|kh$56mV8?Pt%E1lT7NGWBwuC>8P(+9Z>GGl9& zGwmXULTZ1^teaSK^52iuNvn}X(&%aQAoM?EsSz_gl2lt&ph{5%b^YN#lO^j!{iQt6 zCpnfMt)eNOtX1g5U+>h6wl$JWLfTXxSlJ;_>CM&y#*Ipb;z~Fl)6ofe2N%p*ndTl{ zAiaF8A8U4JPf9l=G2ou6o$O-vgom{@(|HuCRffIAT)m{TvvcLFjBro0|Io<_)G=)z)Y9>w5+ z5FdTz``Okd<(ev$s3WN|nOEZ@5~%QL>3bC@W+N;13;&o;w!1GZ;I;@5<|VcN)%K1m z@pHCKt0cC5=9`RaV6DvFGN{rZ;FQnwwY*s)a}X?D!DlL1t@ix0rL;iq^#SK2-k5bu zs?K@~VR}^h`R;1Sf#ID-k1GGQ^N;Z?0_@U|)EIzMLbs%brIH<-IlV>- z8X1eKPWu+vEI$|}jFe28rpsONcCQRurDzR{?|8oqJ$j#Muv>?-_7wDfVOU#KFdRRN zibGSbj4dWi811!YTo7C8Tt8GoJGcKbu(<>-zmj~FZv9U@c#mJTzTcE0Yxo18dDli4 zCGjOON_<2}-M*#poo5CpAh60Oe4{brg=%Tn*ch>qB=$K%?O)|8EUgy53oApWc-w9m zDs0&y5XnqfoP(QAqtpZUE84Xz5$IpN@M(@tLc@;JYr~32qg&9-#$X2=2D5G@I4#gK z7jijG_EFw-?YUBK&k?RW(NGX}vP1#0zW&4ckPPsapp|hW3I4)K41e}uq9Gyx8Y@=t zrmki@rr7A;lXWrHg}WQi$H*A5r}=J9+W@|l^HGfT zK9bljkla;}O;HQw;meZw8+XaA&PE3vU9>G+Ucq_`cw=fiocSLzfwqFq>rDIB*zNC$ zospgZEwXMETBz@R6(BfdafISEp;?lCe(^BfQI=eqxI(8^Q(m*_SmKO2$G(tNKy3bj z=G#~s4O>{saq|w3v_`p$nMsCeoV~!A58lW^sn%zgO>W!V{fE`sXgU&Kx!U%B&M4Le zogD zpIUp_J#GK3Kc!(CjC}5awJ>XrZ5AAT@=zp)MNEr4Rsf8;S(Nbq4b*_ zhJ4&cr^yP2C;7TLa`VOTrK_dMs$F0?k0jo7r27jhR>p^FsVSEQcf@JP@`;RsQ_H~y z4+x3j^}S40i$UE&@=}~A2#h*A?>)EN5kjMsVWJK5tqXRLB~FMIUd|ZsrBrCFCARH# zl!s`$1FaAZanLsBw~dWCQlSLh_wZvE47_V0YG`s+vUM4A^zAWzMX{{qL^ z2dD?fgaQ6r%(c2ac(zjX9o$PEKFK^c`p!06jxU{mL=oo$vx6}BiMNM`lIJe@oD!-P zzr)shZXI_}7Dc~R!>eAhdS|xyN}K(o?OkC2OD>U^^-0a?@TGMxVl1^F*;)JD)MBP!S(`zqWq($gCt!U<1B-2p! z6__rtFo{6f(YV4paAKC9n}OJ*DNk>u)0_Bv0T+Pc=b~PKVVX^S$e?chy|v;Q22Ns?WW>%MZFEo9#2JO)gE)YoUy{%%^g~tICjdjC`4B_;A}P1#RQ0*a zGgFu939Woi-@R3`99jhzaAd(W2{7HliC}CB5)F87aLwKuyXj0i&!0^kyH~=R8FH0w zJM3RzWhle;dhcg6vagaHVewrJmo2X^g%wH;L)RfA8B)9q>SHmdWM6@wbP14@=D5r< z>@r>clESzi!9{8TH-NzR{1y<@_aL!w7$e3P*}Ub&Ap6dF-bb@{=e<2ye?m%qq%eb? z9}~2!F=2s}5(}tGyMHMHXWW}E)5sYjJc{cZ_Y`(0OUa7xL5*lURy0&zOEdmOz*qN8 zKDf2eQ_URzGX$X>jv;2;%QE&|H75B4$Jy6c8d~H^4-~~z+&+#RMO2DE<#J*Qcq~d7 z%ShGVHg$J9`DaBbViI0tS@EvXh=(U^h^;iXz+VRGCccn^>BhiH5X(arbHt8~V~Jqu zy(OF0dc}}3UV)|GbW|b+Bss8*GL_^lVXt|ZE!;)A4i3SBmoQ)mi;ob=otv}!6FXV2 z{nMT#tNsf-wBJLj&O^G(Vhl{Mv#bfYC~#{T;;sRgz2@}n#JHYiznrxISFJ_Vkjp+6 z9C`|G?!B3!%Oba0T~Y7|LJ%TMH4s@7BoUEPibx1b@S2E_1RWFpxAbnPeV~ z_rCw)UafRh6=}ok5c<&KTpK##@u1}o(2NKLX^elTHri+a)X(Zq7##GviZpjGiv<5i zX$eUL4^qN!)9Z4By*?~FhM!sCNCZ)H3@~{!#-(O<>zpyLj#HP8BRfVcjP}>EcFkIF zH>`VKb^#n7ngTZ1MDFPgBs?g0ZH$7nfnCZUiOh4C#F$)(y+$`4tD~e=RN45J!gZ*5 zP>H#lPyVXVu9Ht{o?a0vtV|lFj(~j9E57IMebIDNvzQtd2pA618{OkQIGm=f<0 zh8+2t%&=??;xK{A3SZ*7!W#aK;oGBzPe*;RA!^ovrBEmJea{Rizy*q4UKG4xH*kI$Q$BtDduV@K)=pmq2Vxg!Y^gr&F`l`sZqWVjHvsrXiam(>ry51_l? z{h@Q>Ws0YbHVlZBvyhD8>6+L(raANNZDU-5VrId*lH)u|%>tE%roMD;FDGi8qXGQ~ z9m4FBBXF{fb2^5izv}ptnw93~5FP|5tDGU?8rnMS+(83k0O((!bM2ijA31zPhV}>B z-e}wyh4a|3UXX*Oq#~}1*8*15H(c?-Q*8TlI6UT*wk*EE1EjIuxcFTGKkNKGL(%{1By?xf@4Xh!t1Xt-2p|bzF_|0;6jGWrS@osnx8R?OtZi9MxCgB~u z#VrXh*`MhLyC+nmv3W=AF!lbk;bqIWA^ko&c#DpaE|3*Q5?jZWrXox7+fMVJy)Y7< z#4Ss4l%*JI&i^fnXe}}9&^1duxE2rjCLV23i#k*pbyO$gO;H_#yk`yw4Tp*nO#n?> z9u8?qf+Zc7uETXR8qQTHFx1W(AR0K(K@pR==oyTHP+AE+X7s%ld)X19G@3@&0z0}| z4%x=IKAgV?{EcxtErJKw*Q+zahReByc~;j6$0&HTzbf~Vlk9jzxtnA7zT<3a2z0|4 zcIut<&*fjaiD|-=nq_Y@fc;+Awr!`4(+uR`DZfBAHOU#b$*MALtKEiELN{{2pl%wvn^?8EhqQJsRtC%rp~As5d|s2vG!-_jN9+Py?Dq5+pRKk; z6xV>sDv{!PXs|t3HGZEU>qOK#@WXB8nj?QB6&Xp9CsmI-Tr^CBu1OmXF~Q2KrZ*Fx z!VgaGTOi`C|24&-d1Ip}W?muoJ5qld387gUNbcmmGq$2cI(m-CA>F|1bhW5lgS@79T7;?a4;2DZb7_7jUl3>TWAvOX z5)u@0IV^<0r`Nn=C^Kn;sYXVPiCQyh$a(+ z4a=LPZjRJb3qH@5I-RENvZ~tH0Cy0oZeOTK5(4kh!l|2oo*=QWU|o!rMiST0z0EE9 zH(m8$Z2Xk1`h{tv1YU0m<|~APrstMzmmB;q&9_C}(S=$?m%zou!+aGVj!|Xd`E5z% zUU;_OJ(CDF>61SqV^LU4sB>_#VP-3-zxPXvV9UfX>;f{1=RBw+fll*Y_7vm7gcQ-o zAdvN+Vqgn6(X~3JORToSC)0$j=)q!Ey<)11+4Cx{95U65cqEqX?Ybr^D|~be`{aEy z8q}!1cwDh#&v##66vKEhYq9V`ACwrG&uB!ku}`EO3?}kYD?vuvPF`T$)TrKUcA~oQY?SO%Dcp>0)k7d!@d+4k)n!<=?(Lr zAvDo&cP!c$0dplW9*KEnooVj+3Dbw|0vo#YNDCzbMw8i1N?nrbJmh(+5~f=1s35`3kTC5d9c}g}Apm zU_yBanC9Dc!xN2>3{vZpluC4o>>HkTv!^}`rAHm*p5{_BC9~sJ6tn==ywnp;o4PG|C$y( zigXoYi(OUGUnS(zVCjXja#~8NlWCC4Iv>m13qfWo@k*isFrdK%hyU^ZNwQI=4>n+< zuKLQ^=kwcPa|tqw3GvbhXKV+T-l}}n+x*w!3evcbDF{}S9Qa92Pe+F1pOQCb&vNjC z2I6+xIC1_KR8iXm@wfVDEsPH*{qxMJru0h4v`e^F(+Koi-UYUPORm(NA zIhWgfNTbDjK9LWuUo|;WR_rj|FBb?g6R&8o0>AskiP%ZDd+$$0;w01lkn{Xtwm!gd zRyx6b6e_CG^`9f2Lb0+^?VWUJFROVV?x7AkLRG134y5z3E3+$O;E;OQ3KyR?dKp(2 z_j<1bRSty!j&%Py1?F`vR@A)IF6~1^^VA&Bck@HBg?!*K!me^UAoDs4u{9gY<}G7L z=@(2Tl_cr1&eQ%idr-xQi%Sp8K0!9i+3d)FVtg)Fxp7!*QiWEbI2V5~t;0l!{^&$& zgn*Xrk+bIW77mnUk$THg*@u<2gh7aN|Je2yWF+mI2(O!55)elKR77RwiZA*o6aEji zphIW+thi9zhlTi_2wo>nrlA79g11s`MZDW_%@+)#lPn>2pb~S(W2l^B{XoQ}vp#n; z#Z1^-`2Bve9u*81k#-nemFIwNVM7dTmjVqZa^)6 zK$-%6H1dW*w`%{OTj(`QN{&tPr~sc7ZEv;% zDB4jID#a+CHAm(R>tQT)0vdfnAa%FB%V^!b?HqicxsuRR@mlAaeAX|ym&iBJ<1qqh zphRnHlUbD4GuS0Bp8K&qjk;l+Xo&MlLYa7avKd!EZP?~l>D=uqe?bC>K21hMbr(Xu zeZOqrB7TE$aTqFxuRCk;Muz^~=C1tYcbnt8=m!Y}AzP)qKrgOyr%>Rmy3H{kn6z$- zNsZu5#IY#7jn&oYM(%*%$A@vos(i!hq{P@cD2(%Cv=u=tJbjlIisNrDDg`TiMrAP+ z^(T&#@|9j*hdYlqkI*{3KeYrqs!HDnTJco`mKSenm+Vph;_Bq_Nq;YG<;*P-dmDrL(srp<3>3-t?FZ__Oz*baRd{>A$v~$PW`L1T6cl3lu%B$^j&`-luxT1 zB9z7^|Vmz$OP8P!!-({&Ew02|zhm{mgAO{P_AosQE9OJiP% zLISs53#n_K1@{+atiDEuR4Qq0JdtV~!N>16r0x2RM-*GoR)`5BzQ> zSF!87gMJN^!Q`}iNlw8#Ia7_y-_NQMMczcY%*TzE%M{2PMX6@6&GDD^2<9c7ENBr8 zh{=>8H0qm0&VRZ6NZ*q%R>|Eo2!ImD_h$VLypL(PxkkAC-ER)GhXy;zF%dQ{#$sdr z0)x$Q%PmmUhpJsmg>$V@$%ZFsi3mxZBt&tTeR}DW6jJ*Ws~3_^vzn)Lr)JGh!cW7e ze0PIQzy>FLc8q5c{07~$Or91oBOav`HjqD?8buX56s={Ph!VMl}F9cVTF>qCD4I>g=$Cd>*=x* zh&_A!>DbsXacc)cywK_8jeRM|#ZM8haU(~)1(nO?rHjFiKDO+RtTz#oI{I zBXwVKphE=SDP=P7bxs!TJ!Ynyl8`3c^Xya%M{6`|7b#jIl=y)JzTtK-kJ5x}4(-=H zx4_DIS%$^1`d7Na+nrVNOpX)J8lN{1gnb~Zyd5@WsGHr81$o%^cj41DWgQ3O_4Hdb z@}E~ED=@ba^LsA(i6k3mu~kzE_T&sK&h@eUfJ}eaaGM~+HvcRXYTQWsB!E)eKf9HL z3l^0HTThxk@?*qmy`5=>o;&B&mu5ARr>zy_38!-n2I%f~m5H_Q5Di12Ze#P!d$N`4 z5fNLM^D&<9dD{B9YgXmWtT+IHOgAZ>)k?yq{e;8tCH}`Bs4m`}Z%ySj!>wEr%DMUz z5FLjY2o(7Ox%197B0^kzVJ8n6=Z4Egt~hWsvQVkFyXLq8LiDycPAvC!hVz$UCJPss zb8$M9i$L`WJ_~6hYj}ue;5r`iL^rJAGP9PqD z{I!PXN~t?RIWNr?ghL$3TH0BJekn;ue{d>l;z+GO9zz1}y`JX(;Uo6qWbA8D2&<0t z1D$9stkWM_qn;BdOvV*Y#wLfnDwaZn$YP!IlsFjrNJ1Wzv5$&M_BPZse4>70?I+Yb z_af6J`1&|5u#b!%lgz>Oqc}D>H>~)XFyD4*>|Fj?v zp9Xo3uwjr|wF=+!kQlr7Rs3ZMt*Yt{86<2&-qJ9VH|po8-vn#z8v2WCC7Tah9hM3k znX~_|c;xzk)BJ>ldh%R75|_B8b7-~7^Q%pw>vkYU%a`!QnrbUNl~*Ds7FlU)E9494y3%#hJER5gGZ-2!8U5wN zB-EgVwZkax49I-9GNNQaV*t$GsTnS=VJ3dh*f!Se<#;69!)IN9$CZye^1Ry3d+@djbSzJ4z4L{lfmo50lOea zANBkibSK*(v;GBeo@l8Y{nVv9SsR}UUd!hFL!V^Q)zk#`OK%mxk0^O%{^iJ;kf&IWhj;%c-w$&`>{ zjpYaU@b@KGXc>0>g%pEElpZIgE9a7ev@L5^sq5$8a4)Ji537CJT&+8oENDCI5NOni zG^pUZaPSbq2JBLjGDU#wzPAQT_v6r}83cQj+II4jPtBpnKO`ph5Qu|p@p*`M%^_V5 zZ*A}4z3OsE4bxn!0!}K#zKaU0G15=gU1Ub6lf5~m{52|8deXRM6ca|)Ls~i1!VSDU z1>q{wVWDXMh~J%eBm|VEXEH<}G(sDJtnku>)Z^418XhZOg?DBoTsuPAg1 zIq2KND_YpRd^gp$NGF?gJgR*McUif0@!WLJbS-lE1Z;l|*CS;^Cdi%j4YAd3?E>f> zt*G9Mr3yR+#-np~1M(Yq`qi#ox+7bIh)cO~7#Ii2P+%=79R36CQ^P-m!k8u}fkKkX z0HSahNLw$i#n7>>?{0ohC=5l6Va)G^Mqh9>nSWDR%c4?cF^;JP`zhg74g}l;^g%Xz zz5l_R7X-=+i*E7o1%2|Uu(CZ>W8q#*m-u}csLiC&IU@s%n{N{r6lvPj?AyK3{)a6lXTd<)g~Zs%q+S}xiKTZ4+7`bMZq9Xq`*z8^F-*!|b; zIGx}sz6a&BL?bbn2C3n+Xp2LaNq{mrP1uN1j;qPI(YQIbyKs{n)4$*R-=`^%eU7jO z07HGz@sCDTsqU&=lEw`Eo}2IkT=`B5aYOTbpImSL1ik-xYZrZRW{5j)$G5+M>pP7= z|JY4-1N}m2?DevcZo$#4zgVN|t5;2IRoozye$pmyI3%5i_@&vENg;Rm8ez#i9{_4l zr)3#FB8tK zak#-Fn1`i`bpQO3=y;EM-eCy*7oUgZP#w7kr^>Xfh);_@fGCA z1ghHsHdFHP8kI#y#&QNr1W^;Hs`5@SQiyXakuqxOTxhd(P*gvt;d$*Mg5`ph514SL zGB^w%_N}BVNhi^xlAuPSCcE6 zn#4#*6`q?eCt((W+o4+nDa-Otw+q(y(V%U2>uDPchAw%5^JjnlUFJ=lHXGsECb5L` zQ+Y}fteSoNy;7hXVxMyVwAS$J^k8&Nv5XMGcEG=qV1nc%JZAHm50k{C#wMXbvh~l? z>HW0X24VYHRlDOy$`sYf3hh|!E#LWvT6zOJy>zkO?7FFb)tjW4Ar6pAt7LsG|LY-3 z7tB6auy%W*bQOY-dLd@mtFjWqLPx&7vvHc>K+h8SuY)t1(%8RMTgbd zVN&p^UWIJcn_@H+#+5gGbv24Td$vVR5l z_~Q4r%NcfQ-~DtnIM#qXM_}6v8|f&~_UAVRp4aepzrSOoQnN7ysyWm{nqliAr zRvt!6O#1}-3VD5YjM9S_B$CE*$>T!@@!&Pi{{>H#fQ%MS_K4Q4!vRYx2KN4&`&?5n zyY1lzJ-dFrXkr^2?qh4y<3BiBw%~}vD{Pi8EU&U>%1u=7n-LiQ0*e?Ub(zsP-QETe z{VK#k+D^YAX|{n0PIQJ1?^PFU%%#fymv0_al|P78C{VD z?p^!dt)x@>?u$exFGDwKc8Tm-N3J3V?s;Q$5F=IC12IOi2SFUr7PI3%0H6rKGG+0X zE-NwQKLhWzRo@A$eSi08(%7H_re>J(VFSA53VY#e2F2gwhf@@vf`{y8h}nfG()ely z5hIOpl!}VW%3Ppu?9vH-+{#-L;GGYzba|0elIfNL#VaM*P^j+O=v=A82wZQ;H!_lk zk$tz!dCvX^YK5Ubc`wQqO>tsX72SInNpw?Z8hgHLj!DzZjMF|Oo~qEd>T>U|EuM!I znFjq^nW2^53i36LTCj@0$RLMk={AI*KEMldeY00?aOs}Wmih*w@puoo)hHle&Wqlx)V02?59{Oc6u&NV*Vs%{4>q;`T|y@P^(IRxk#E8 zUjHvxl#A&6PZWF=GDS3VyBvk->8{X890Yzhe@+3=rijB$b>PQmC;Y}Lr zFKwFQ0r<{URP!Wdt`<2DBJun%%nKL>Ve1pB;3VOM;P6wRrprHeMMIN^zKV1YHt;t- z3pFWQXX88Ms$uKIsm6UgKl@7~u;6qtJ5`FLU~*^1j`vx=$ojjpZk6a>H6Cs>*xM+- zO7?`_qgpRregIS+Mu4Yxl$yalkGuhVnyqD}_4F|41+*OZKGtCkwM;C1gu4{vF) zVzd{H@+??b>u#l`nHCfKpINvh)5oWhMy`BGIz5)5HP_eWYrp-~@~CzB{=N_V-0$eavV98d~>=aMtv2^37W7uo z1AVqS=NH*ga`yPBp9$t{0~47WHHUmDg0Zb#)y^+w>a|Z{XO74$-7iwu$?cRM6y*dw zHuiIEhpfSU{1NC~&weG4Xx_Mn$+Klpx z8}jZ4+9-%f5%3=e-LzxAfd-=0VXEGK{D9=E*@)J-5LaApSF!r?{xAmyD9q0DOl%I< zVq7BUT#;7u$mmQkW!nnVg=g&2_wJ$wfij&yxSoYqKc82e17H?QRXP$I8#L*QWE(Ps1KdC|`LGV*9Tk8JelK*L(Qm zC~r9w;%mqWCT)Lp0H*eAC?7+Td|mnPG_i$ipBAx2 z?Vh0eI)X86@yk!Dov0Dc+AD$`Zdmtf{0M2;QnQ+{xvE0t#3JrvMJU`(`^1DkzR$(- z*a!KF%g`*|v$uycg+3Q{07)g)oaAz=6}54zyChU;DQ!h5dOQ zguR3dn30ec@KkmALuIrWBJ2t0J$R`0UZKtWcJ{19qA-!u>rX|mItClD6p^SZ#6eV< zVOQ|_(-cjTC|wwNdH!D?l|8s*1%j+NI62X$)IkTVGudTKTufWPF(f=j`fsgNK<9Kl zRPi3OdiqKp5xPMyM-sl6mhAEPDNQXf0hf2>sy}84X*+ zmA63Nq|TJRA3Q~AN2E(ko88V+gwo;?2pxVxlkl5weR zPwF|3-`8t?QgpLn6dEiYYf3M=i+dI%G9(-iQaLY^wOUTT9;Sym5u1(tX*(|Q8%cu| zJ94=#S%potLycadg_z_ZexGe;9@mOf&44#pGUkUAg^H}Kp8Oj%%o*ZUPLPY~!ZcH4 z8uJ0sh}5PxSfy;Lt%BC5ItyGh410USv{0%~nmmQaogVoA(1%m>p38)n1D_2@%?L|l8s^& zZgr~ZYqo^g{)c8WmVh-lK0Y>ko5CkDLUeB6V08qdN^b`MUkgW@QMdxJOR`$$Z+86# z=kK>U{r!7jZ1(c>WWv(mX6oeJfL3e@g;)h3SS4g3L2Fo|GsM8Sst8D= zy^{lkr^k1dd;Q@4Nf@R!j=!MkAoMl(NWPMg5dj!}#&dVN)jdF-botxH;Pqc0@2^R~ z7CqDW_*XmM9KY7R(;G$`50yAiei~nO3aU%VMF6=u(s?=iq!RC4`#Ib79(_(*@qmid&voMfBqHaF6`ae zhdjP;#0he2`QOzKqYEP=Ggz=53%V`esrBEKUmyBk8X+;WUlIi+CRRz_xzew=;Gn~_ z;hi7O{W^3bj$hC~s(?OH|45LZuz(bJG!P4Wy92A|Ft}tyi}{~q$Nv^%t`K-;cMF7b zTHfG=Au#-eV)f@f;?jw}aGe@|0}1+Ajlap?@UtJ&I*Pic2yKW!rv!Ix zPffs`--bZQK7;bWRXhI?BC%0y3KEsK_@zQzbbS++1bBY%vQUmM13PE=oe8(_{wP2k zUA+;qmt&RfS}8G6&f%`^3)1!ve8fHV!S*8@IDvHf?^OGxJAIQkxqk7RAt?RLzS`3L z-2;=(_Pa%<0yTG&<*6jYs0NgF9`f&d@KYj=dzosW>KxC&aw< z>z^Ot`2{ST;qlWDLu6m~-=tq%|IMSj)0YG){s7VWCI6gfUYF4YYhh&P*Ml$o`XKmL zSH~lik2wctXt^_gWm_(9Z1Fnli7cWx+EWL@n(U>8-5f0-%fNqwNL63a?7}^O%g{zr zVGL?f#f!qmIXG23D=>!tuO2yPf<`y4S5YFOQpHObDr(n3K;D-N!xD!gl+s8QbLZ8^ z^<^2ZG2{y9Hl>8l!Sz&FnPKqjoSW=;nN6pIg+QRMtWkfAJnT)C`yW;nauJ~_hb+J% zlQ&Ef;c!$6PBP5#CWOc9FKmQ>uH8_Ja4K~dKPhMVANk9y6_$wruBKwXq1 zoJSC|fcP<~XFNqz*Pe=A<33BSK-eN{gK6HNm7CCEPg(iL;_=-=e56xi90Xw;Hj37_+-CD!PoxeeUtN3f)cTPx$)ADSLIQs~(y$^g(QHzwK1E#BBZ$yGvt( zTAgynIxOJ!TqfmrWLztNqR#2s+B$J9Ju}~ML z)Gqf=;McmGAs%G`vHc^_3aoyH4kdo`Z3UT#!&%j=0h$3fNxhR^pdqN>?R*C=h(`Tb z=m^vHC0C8I>05}F*WI0DzoDjT8@s^5n)a7lr|~4G-L(?0I1*Q29eh-k!mhMt&=T9_ z!1saVo6P45t$gh?pgI)88wBL-+VEtJ&oBD4j6c{o4B+JZ5C*4!%MU8rjz~;$lmgQV z6Mdie<;X5OHCxLO5{75CqYX~X3&jyft9T&y`cs;g$Yc|?o0Qqo&we(kq`Xzs&Q5_y zA0};84)|BFDf@c$B#ILID)&OFQoHeK5@P*l;|~3&p*3A4&ioj=s-N?#n#Ddj`bB3| zz!^S9W(Ux@ig@$Y&%E6XC~ymc{(aBPR(1ag&$h4A5x*4wu18fI89g?>GGVq4!3pu= zd=G}ewT6GT@er<7Vz1aOfztK~aS~o2D$Y<7h`D6Xse$KXCv|RQ_@)b6%8T$!pXuQt zDozv*N%{xOT+GWRoAvV$kV`bFXBv)^lxA(M9Ew{y9&8?`#Rq8=nXQ^t@^PzY~AMlFz^^NexFyvK@*aW*!i4_w^*7-j!2Go!Fjw@w-F| zw;d9EV~}-y>jU)FfXA;>?M%zn-b(ub?5z zbVEci8_*d+tr#YS?Mb7g;*oMoerfG+2{ab zp=H~^yB?fVEe^XmcG9KrcK2>(YSa&1oqm*nx;QHbWx7?RzdNVS^H1(}xEZ`4YeZY& z{r~l8I|rUppjMZcvB(s!!Wr5ji0x}IU6CSWk`>xf0N<(fgH<^0G^4kuJU=1yXjF~` zx9YnW&3!sLS9rKOI~w&q84Rtwr4D9%f$>x##KM+|r`gu6mC-8@kx2`87oc0U*4%|; z&bx0BRHWw=5Kp)7I?)!f);XLmLEO3E>$MkLmY}uf|Kr?HA>JTghl_F(#m+M0jN45u z`wyhFwIY8FXZ!2qh%4E7(a5HUc{-MvA(Z~kAA@lP4k1ml3*EUKpROb|ePMz6U*wt! zts6<4z1ZHHJL(QWQu zwTvl+2vAjch_5}R*e!_zU*2{jnwP+|uiAZfgr6YjV0%VN*4Vnbaqr{W&Jde_Pf0MW zqkcoJdL1)DU^5Mt?>Ns&tWfFTC;t*p4|bl+r}WaoJZ^j^9P)i8e%lTG&a0QVt^$Jj*LnQ{uTfFTbT4m4`2QtT(GlJOS{5uvCi7z zF@jrT@lGg1Md1HnnTO3>lb(hQRO(de@tHXG~` zuZ_5{uc*`TkH4|68r#yTBel}hRv6?BqH=MnE;8!%(@4~uh{%$$yYfK;=|qZ-R*T1x zpmkp=ML+*y$FykXX49#(x!J=VL2zTe?_!M@1-^N#@Ca z>r8rv@}s^>l1X6(#Rsoq>ur_@i^GFyQmv535T=geS9;m>Pdjy(!(3Da6ern_3H*rf zFpl0hq25i_Lpl(IT-($$sqdyDg55;m9Z*%FNAT>;=6g2`=Sn$7^7f6-k{jaI+y|xLC9=ctN40CO^|sDDYr7fzSo0(ae3Gf zRHbA!DJwFPnbp)SXjq29>ybZ`@Q#iUmRiQ%t$n7J&s_1%S$c4Biu?@WzKN<=!5~@K zlQ~(BDk6AFc0goS3*AKlOi=GG?|uQUTJ5m7Ii~I6AVHETMg4>3#YlLip<5b`omNwmw@3c4Id&bw_Lg%_Aw11s*akvM-uflV^TCyUEBa~ioUN$(xjI9mpn z)iIN}d@l<|JFuLnQ>ILQ?**tfKk7~E^D3AXc{V#9&iLFpSy=R>W4=ZcO2?P?(=pcE zII^fnMBzc%G&Clh(cwd4#9^HqRFc7)mXfGXc9v9QGr;on8q|X{Q%-ZtDLN$yc+Ldm z1`Tzq2mCcVk6zv`LL*x6`&s;P3MaAbtWU?pX!PSW?Fw=5bKT3zkDBk3l$;7bkGPqM z8F$VVY_nv?4K)VV@^Dt`h|mmEA0|bz%4Q+bA`#SKrpyR@b`X+qii_JaaYiqS)~~xi zqNe>~#IZ#D(N9jNu#UdPd|uGdY+ds`*Cz}X3EfX_45QCjnE15S*0+$r0Dux{Q_W01 zrvrK^p*4>v=L_Bbr_gk-O@|&~(te3Y2yx|C!Oj981(wXs%86mS!&09KqE4kC_Rqp1 z+c%rFobRVcg7S$7t+FrSf7$OF$xuE4kLehGDf=@LGtGnNdSmj0Qt0pnt$nG|R zx19G~y$oKo6FcGJvS3dDu)w}*hsk1~C(3c_%8Qem6sKNW?8~s06A8`QxD8T|@$fOz z#K`1H73MF)!hn5l;ffxC!jH&6<`1DSY^-p(1^mqM%3HJv`XUh798lNA+q>sIDD#mF zs`OK<3p!9ru~%EsNhB(b14hHu!1S$UA~;DbqdedDm^)_d>gC#TCrp#YG&oegoo}pO z_aiBnqr1k5Fq(a-7gQ1w5cxqut!8L=DDywDki=HfM>4s&8ySs9mMEa=Y7{&+V2_B8 zaD1D>s9c9)z9Q9fX_F0EzHu<0jL=K=MBfb!a-az{Uy4h2oOL9Di^NpWOD+2LR7 zs)UD#Y#9sK3{JN8Rbu)s!Ru+N z2GQv{EDGzLur;4G+Dz7x#b$X9@nQZ6i>n-Mb7jI5*32b?wBvFmQHy{TB|wCH!$G@f z{DQRwyo)2uAu&jfxL-+(5l2y^VoQJ**OF7vEn%07-rSvKtRB znxA6+^}I}+_~+1+q>QNEs;m;yUb3JByfKJ%=scZobrgv>~t^Dvs7J9TB(OJyB{mqEd~Xmo~>egvd%gpia8_v6|F8DXBz{L={R z2mNi%Q*x5vBt3{UKLJRiTHz1 z)G^p-eGf{(QC+Te=WcdJKI!^tV{eBL9-YOWPquoF&+C#DdQ1>&4S$Gc>^{Up*2qag z&}0Txey^dC`K0vut-s$&IchulPjkW0=urZ=&nV+*_sFD{ogZPL!R<7j96d^&qx0#} zo5)oty#NQ%F4mYoMAQ``1s zi%XXL_BM7t!2vsvo$p4p#c$(vvIh=|o=j*5;yK1nQv% z{;RQ?s(z~u6iNfrw5Q@G?z31!qV3*0%qstNcj-5WG(0Giirh^A57Z528C()X%eM!~ zED9C`K8s0@lI+Cr$+017p3)9Ze&P)*KVTn@uixG5npk`Y2`tYMVHc9PjblbVzt^{?2JUo?6VXxy3wtZs_usQ;2}~ z^Wr(Gph}M7f|RZ`Y)JLST~4B9dbpE-Pxy=x>S!j*tn`E+&6j3(t)7_@Z{KY7@#Y;( z8HJrX3%ZTsP_6-N*m1M(wabS5={c&L6CRA7wRC$5sEhHD_g87G!J0*-NY)WTyG6mW zq84EV__@c#s-+OH);_z^Z<;x*5NX1|3kWTT{ksu(J~Gb7EgXgQOkfHBr13k+#(Rp| z$5GFLQAbor&k0qoX$UqxbDE7TH1>}1d7;kEiS$B>I>V;VN<#R8%YVx6wvfcDn6=bD zzF{XLOKzZ&9IeIr;X1A@v$!xnTib|tWpMW==$KeiZYE@## zkZE(x0<*Ncu9yS-Tz|fs$j$mev*O3g8ug=xrEk}DDI;@#HtI5<;yTiWJNnzZf?i|t>z-iE~0zYEBj7O zgbIcdb-nH@nBt{*lh8BjVVAh&k4tS|1?UTy4&97erWvRMO9zx`m#@B+<+;$!pl@+# zg*h!y6d)35_z*M3w3bf=i$)P%0rF$Q37nS;GK&yQjFrZHop++lGoTCCx4yC;5$0AGGwp47}3;xL4{m*j>^%b}zlc%m!H! zWZFrfo_Hp=wfE15(g(%_I9;1BH)-2p#fxjZlVTO+Ng;NSn6fEA0Bex-Y@r5a4?t!Pgb>|WgM26ZxUOfTuFl6rDc~YA2cNdk(nE@+j4$idJDCdSV7evZRLIgM%6jgA}F;JcH@fK@oTRp~u{w{UV|)Sz-ene{BP^ zS|h~h0EB?TIIo{JvEGU*`__X98@*zP`7!yn2bd%myu?kC-Wd~W;nx{mMa5uoLgJl) z@1$0E*fGVvq!{ON-vv3R-!pO2$Zb7f&j`}PhKJB|biJDsF2`Pn&W-yfI(Mq4L)zY* zasyvY%xL_C9+MiCl6WzZNLEtD=v#54s;S_@1#eD)ZWDV~~E_=57YQak#hN#adUYhj>{E$MYQURC1?N z9Qlc23+81OotycpSv=d%TP;1{%u~-c_F)p8{i5jW<>f-aT=sQQ2vd++77YC;t}qp9 zPjsUG(3C-6uBp~WAz6{)dWq8#w#ncrOOi!=w_W_2L)Q#~gm^4cvlHxrmqP>{LV%ys`6}K0{om{=zXx)uU40M}MSU#Zz3z-Mt6#FxRVj9`H%&s)yVasLu-5(&+~s=LO%{!G2`goORXs zh7CILoQKT|kiI&j3r%;2VYUfKNT2|}K?-hj<;ey4S+HGrnj{*S2nyOrj?YhzzL+V14%Zz*mLjY2eEg)e?E0tebm znbVeJeMCUOJzU(_KaPYI>;bXvc#HYQqi9~UZC#>2`w&NJxw-Vpl=8;#xz znC@=4@)%YtyJV>))M%g(DxHRY6v*l{cNQILx&&jy=A_|0gSVp#q*6D>0yq7!)p~vc zwZXY6petD_&jF=tz?KJwgV=4;HF|OF!45HQ>7*f_66@M zR%c0YeZsGD(l<`c53EGKWEhq*whb!Gs)pK#owPfUG+S6E}idqm)#{SZ?46R*9WOw%qqTc6`%UZSCKF`dn5)mweH<#UTX}3sCP;3MzCIm~( z5%6C=pET7RNyujA-f*YLc$m41mu5tmA$f2F8&6M^&tsauDn#gF9pti0-@>h@qWRs9Hs!V#P?M^|f2)|*LCs78Q) z-=2r{jSpd&{8Knr$YHRk>iZgdN%8>=X5!uRbI@0dQaICoor5VjL81?%(y7zyMKx{N zA9|_ZzUHyJaJT_ZmqIh_yL*(*j=K9h*qX+TXzsGRhaoSFHkUG>j=muHzk%o7SbLUy zm_R42?r1MBIW7sY3SdqnXBA!kGyv9^K4$iXVMlKr9TO|YL^ZN~RZZ9Fvu#&H{Hq@S zek#8)7x&$a5oQ+rddC5(r_J>H3fDkcTeD$LSYuO-Kx5fqw#2WlUAsE#?pdQ`m?oZ@ zi1Okxs4-~VWk^%{A10!3q2s=>8q81cjohx4*HsW%IZ!MYBNQGuuM3+AN|cI5$b6Wg zF)bv%eviLu)`Cv>*~&|aZdh1U0c8+c`!dHn)JyH-Hic(+{dMvr^(>jMpodA)Yw>DoS$0M=R6il>mva90#g z0-fHLnZ2nw6i1Z55k!>C9;-`(WPDQ5NglHC`cx)sJw_4@9CAy?dVxU_XHG4Nir!t0$TSKaUVQG>ZwuFYCbhcdPU;!6j$!8@u z8ewb|p-u#ZySbA`i=E}?&pOYC7 z!qZ@A;N>=CRzurRMg=2M_lzUf43!^m`bjDk2VGViP;y`B8qQL+0?~Gf<6QP2zZnQU zdBqb=gzb2VRZ9O@~X%LN_vXE}KXC{1n!dpyPrm!YVXluh-@bHglgJ7q$V3*!`$ zZ#ld1g}ulN1YLa@`${!khWbjU$10=j_}Tpe|M*4*BW=bOFYJ~n^QL~cFD74>#cF@a zM9Ux7>6i@WXrp_L3kUg?%gF62VGZ7DbnWtGRMVay3htpUnt zbsUXBZ^pejeyaCwbKtcays8$vL3?Wnj_V@LL8LVW7})6$fW7RzNvl>5&|xJEmX8i@ zM*s@FD;eu=Ar^UPAG}*i%(yr+3ea&{UOCiTw%r)ou8y0vL7i|}q9t;qw)%{XPc@24 z`cpQgYTP1g`GV2N>%{#5Xc0Wg!6P6v;QK>_N=38}Q`)FMt~CnA6`e_ISn8|N_vxU) z9|=1vC&3SSTW@Cr0Xf5(H{$q;2al8Cafb0F?Aj znAkMdOud_+crTO{KEX4o-BX816@3y4a;EQgpt7D;Mbn!ssRVp9W3r@fgq(fo{zNlP zf*~MLaYWWi5L1LA5Xym~H2s75Y?uLeASGPvJc~NScj1au{;VhMc}952U>;|eea|-q zx1#lcwA=8K>il)s1TG{v|XR zYYCawc5nE}qFMKxwWI6_ZqTM+qboLEJt-1TR90$YDAe8dlfrlt)k!UTaJ&s5a*UeH z4N1(t1Ve1TjFs*N|C~6ZMza?=Eob)%E~^T7*U$CwHk5XtOEH+>L@i`rNRVV+g6A`d z@%IiYj+5O6+gVIOaxIzn*c=1&ud~{o#DbjbjZOYQN29Vlf zJqi&hRwt*kME6&h37SDFHPS2(OG5bT-PsA=lBPz6hNYMpGo7-8qr^_pBKIis;@3Ew z=fR=GuXhPo&2~uS$kyyYO?egaT(laYKLo3Hc%SVnUq8p!UV!VRLy<7qVl}Fn_}$CwQHGt2W2V#e(g!$oCQnO>(%~fl*hP<{E(5l-d%Qls zm96~Pja7zlq)AZI;i;2rI$t0*dR%FG7j_jAGPazRYo3>lBbd%$`2_ibz%)ZRlrLn@ zRf2}_+(U@OU5ZPtXYJO?H**AkQ|v7R=zwtgp#~#_b1&B)JBI!vNd`RX%_OP>`dWyr zs%H4tB`2g&ZxqU$@Q87gAAYH;8zwJWYr(sgJAP{Pd2nd0VP{>`Z0rObtU{)f2C^}L z$z>&Q2iP{1Ze8Z2+;#$Q)5@YR`9_dY9<3Hk;3WJH*08R8Ut8av`fq5cHDqa&JxeT! z;yi&nw?tYJ;cOJI7^m){-U2L_`66`9nW0x+*Z2_>dE6T{{B)0*WwroXKj%M@>BD?G zI~jX4tkMTtTXa=!FB>SANn7ue3n&H_NZs=A8OdUhIl)+I{3EzFLBcGPqyMbJJ)f z5A$sOMm7jbz-xm@Bjm+!bjrOb?_HM%KicLmIQlO70VWjZcGcJc=t!^t@5L z4u{MrYRwGPl<^)BJ<_;syV{NYIg6+H(olj}Y)IvC&~M}rlH$vfR-GK#TsKpCdbteO z344;%Tk8$p_$UN!Dm~BO-l_Q(EccXe{X}}l7FCKE3|35Mb>IhXbGUQLDDzi`2uYz3 z#WgB84b;K?ky+p}4=i;jef0oOA5Cs#>+@g>A*o3Gw%`QKt%;Ay1$PkX%bj(;-;sHa zIxOHdpB6@$c+2WtYpuSi>|!Lz905lP{@dZ04p>4zo0S*~npXN?dm`?n=xG%u>_H)_ z)0yd93O{$NrxUxg$L5rqZKP7b8;rP4>0P-kuIzZ}Q2MdpY>L@=_YaU+z38f{vsUAGu2){}p@#c2r8ivciOAwh z5C=}C@uGNfr{Q+e`hZ7FTPv*FA%^DeQojD{ofI;Gmtn>yY;p6=Pzo-MxP+_7*&DqN zDn}hUUM4g`!gmaBb24Qa1L6;XSr@PjejY3D+c`08GU};TNjA-!k1o^FJg?;H5^Wz> z6j&ngLUzj@nfz2M9Y#gMed3kkqg(>3@bM4 zsXB0lqZQLK60dQ`#y01-jaeB2hr&LG_09Iz=F_zcm4~OpS?>dHi39zPx2~GGlA=u4IcDkuMnmQ?~JZznjww$~(}WDR4Qp9(;4&bAdyO zwi#_1o)6A)xo%dK*-P%^U}-|6>3(+c3=plFc7xF%P3VTlqetjn6rV=+6!?{p{WFF} zWo<_!P=jkqQJ^FHdcn$KqsVqmp{i{>A@JfNb11XE0%3V40c_NQ@$r+D)qtVwyV<(0 zgW3`{(FwW9#!x?Pn%-S(5ZKCukS9Z*ShbcBz*5@XE?}8G2j7v>LC+*wqJb~YpG4$g zCp4f-Tf4Z1%)#wZXxlh{&O53ibUSV9fXmL5z;yWJDv{E{@L^D*oS|YFoNQxeL+3X3 zkurkIIwQRk#twpBx%c%8KKqW26@c+8;Jr*WHlYu;v`Ll6i|$6V#mKk0w#+0UWN%TL zG$9YG{X;k*Z`b;?Ji|qQ6;prrr5Pefh;1Uz+hkp%k%d#D&jLYgTWu3_UwGx;Ls6@qip%euQnp*#EhQX8srx8iw_3F*5wEANl|>GsEU+<_SC*F9GJR0j7U`6 zzXsm6&8R!34SKlbyN8Hy)p7Z&{D4~@uF+2x5vOHF6=hTh5B{m$jz^XrNa_hShGjzM z@V#{+$qgrC{U|MSjPBSe?7(*I!((N+$_jZ|YPCsq;c6X=^YZN+G90G+!pMH#L;;Fy zBY~dz6l-xj7Pv!uCbx+_b{RIUyOh|}!X!A|DCK;ALi!LP|qfx(R#}U7u+}R4Tbb_-r|L>8=Rq z?ju@YkbeBAyNh=qXm^H)ErUNC{E)07*M^NJBRSCQpd-jmQov$5BmeL%?~;3&J76TNL4YynfQ~#6#6eOKgpD zq_lK_$5-1`HcV7D`4$f+?tlsuOXISzm8Jl7cCelO>$oc8D%XXDxn`=JMgJ2l0iR1l=)kvREQG39_+vb%TiI4M@yR%86-E>&>?NeO{=%t!sK_6?;W%a*=K zTXfAK5?pK;QXvSH^aR!3KCbSbg!xI77? z+IvF#`c%xs46-H%ig9&Hq98LPk6pT8S2u_#ACi;3PeRyOTZ(mMXr+Ds@KI>j5)}fj zF8C15$)Fbu$x3J-Uy*V_H20~wdtJ{qb%-d1+?{hL?* zDYoTC>KC|egC7^2jPE&eW*zjh7&Q2v0*MPY^5-0a(uFgBN~aaXa)nI~bUY;)>C>HZ zE5MD7q&`rq_`X3MewTSDlGy;J1EDp`UhDXRWXU2UgOA4gjax0f3&vf;F93bHtI2Cs zkYdiPYlU1lyi*9E{i&Nu490X?qhJNyfYOLF)GcLxqtwP3C7cKzuR1q}Ad+CK_*I<~^{hFoB82;M;y@Mg<`_nwiK}*R$YY@|L@hbQ z`U!gCVq`QrBJh;{4cAW+q(z9nTyP}9knUs;B@=EU;sZ=CC5*qCn>O5u61MAG`d z#6@=zXD#xCD^+l_R+IIG*{Akrzc7fhq}cC&3OJv0#&S2H;+yiIsdD8p;J^s~Jk|G* zi{P+y1F$=%9Y=%_q@Vksh67U@{`DN=@Sr3WAZc?pkvAsV&4E3rZtJeHN1IQ1jbDDOvKJupGh4?qi7u=o*8(zMzf4Wlp>T zOQ#j09?U{jKcfe_&@0D9D;oin^=Mj^kGLVD@ENX+e(dFzs#QmbH&J+@aIB!=Z#_7E z75N&0)L?LZmoqdsCw0agcph~~s(m4M$6MGe+C*|8XdNsdJWtuPregu8_k%Q%em# zyiC!w!jLbSNye=o^Bp$U+w2G-8$|Lrz|Oxmt~d6uI{Nd93I9OZEb`gWpIa`sP_HuW zduVtM$ARJX8UrtK`W?#eS`e&w{0(Nwg?z)umvU)JoaL`!SFFe-t??tvZ#K#cq(cSs z&SuyrXPFmnCz5k}D6Shz(2!!%jZ*ovG{yKMedPv`CwZv@oi)Fj?q_hwEAJ3l2|@5M;hRSA>o~8 z$!@s+h{OUu2V39#p1oi7leD$R-e^ii3UMO3mcc1Durk3p1kGOpJ@+Y)kB&FbpEjl> zea>#j+OaV~vkH9n7Rg*=i*5TX0*mE&6j1APm_i_!XA0CwB1bmd(I3F=W?#yI3N0V` z?u5ozzoA=M%yrmsU?D7en{^#njSjY%8^+_%4XhTTLky|1+vC*^+0#2i*B>Dr48D5q zOsAq1SSiYI#a)HmOcRM&GG@}}j4JhL0e=!!p_|rWw}?dG${3?r_{rQQ?&Zp|#&BdqYw)+D1@}Sxt?vv0)@!>h>LCOeB3yk@A7x zKYfc!6)aM&Wf9*>8QC8zzt2;#w*H(ePcGF)uNnP}S2Y|rI#Sz$2nW+U-IOiZ1}SME zPIImnSfO}awaIx|n6-RBo!I7=cI!EG8{xC3)eS{VQk~Cc8YNG zsw*z+vS~k$w@6;=pjM!HFIK7h+C2dMW?!XRZNJ*l!2LiCJcs3=)hf6z-tlYJt6l33 zK0ycC9+&Eny-{J8fGo6#qy3l^W)Tf560+JJn>A67aTYlIqp&^Uf}{~EBajobW?w~8 zBXj19O8Q2DC}ZVFCcpVW@(=knKq4w7E>Ps7ak$STKRr2R%M4w>4(`E`<-x!LtwWlRFI?TQ?a0+MDRWUA0ir`jK z#M?INgvy9eY}saWNbsdcLV|AZ%sbK?K^+mhsx~Hytj1N=?>HKhvfo$Ea^K|Togi3F zhGvP}5|DfB6ZNwy9KB>_2>BIkMUjeD_8I6H9lu*fs*0u)^-j3jzR10=yKXqj2^%im zB)A;OJ)WB3cvp)B%vb zE~YDwi41?Iei&L_7T~Q+Q&38vZC@Ai)_C+-Ja+vlSKWL_KA5%WF=P5rnudg55%Nl9 zFBJXwnY9P^;z`={+zRLLlT$+()JV%M{{6W>Kiz!qcRTiZjS>5!r#C?l4@Wrm8?Z*- z1UR9{=KWbNx2!+%2uXFtQ6zuN|9Z#%jI@Dh1Dx%YQ4PXk&{Xyp%IyTdd9UQ=t~|qZ zv->k*;)`Nhd4e-f9(oJKPn=TTi=Jy$OE=ma*oK?o!%#2M;mfU8LGqj4Ja;;7KeekZ z2{yeSj8dT?=7%*D>D#OFh;VMSncP6G4 zYB|6S$J{Hki7%Y@A+UFIbh<~$xk|Gvi_gY!bn|Eh!c_F2_uzqrX_FxX-xM+(t5eWs zb$~fQ2^rg$erXj$TaDm0V+ZPss?=WxUIZY;& z)L^#n-gV%^P9gbzfK9(JZtn2*r9#{kXdG9_(((~maLN$hgv4of4ZJ~E3v3NBf}lRx zN`wenew|F!T38nA`*-%Humbg+4V_W(O$4$wAHFKxm=!@>O!5UOp1Xt)8**hPq_0e; zV&lG!;IG(KTSf2`T>nDS0pG<6H45m=(~4^ld}c z{X*Vfy+3>p7w;ejZ$vJBExQQ4oZH3Y`tSl75D!C4U_B5;vAO^eaNS8TxGF};nM1F1 znTM(4YF79Dh~fYdI$;q>(YIkmhdF-zS-&b&m(+aeB$Nqw$$XVmYmqyo{>Gfab~dTv z;b!#(7=aA>I`odUjjBgCAhbL!b=gR>efSnT*X?iJs$EEJFp2}`U ziu@VR+MSXvkkoL%RwBC$oFl~zx*ANpXhwdl zgRLu%*AIT_i%ZpW3p#P$Sw@qB*lisX8m!uuV@47xKwT}5whH|oh&NT|U~n?9yeEu9 z((WJ96V*fuLzj=Jq;Z9L&(Ydi%&Q(@PT9bxx^4ODtuBx<4{%b;d)D6aCirtE zndmQ|QK27ZNS@DQ*Xl#4ScG^%@pxDbIBr8s!!9~X&ra|zS}BwJ!@aff4PF$(d0)wx z*T(r%Z;X4XL_vtQcv~=pmp5ZlK$LVZVx3()!J@^xC6FsKzgHyl68#4hF}!!=N94B~ z2@mhLN4p2IL`R_TV_3;Y^%{PD31lTG`bY>>uqstilSTH3!2KQ3(b)lR-v=EEgD3*NSJGj|$Q0 zb&`nB-0pR3IoJi+n>M1WWkb{?4cFtC!&hL)h7l*_Y}umJ5r{PvJKj%w;*&%k@jr;<%Kmhn-xp$&>kU%d0v?99UAw#SC; z`nO^_gBj7?k zRtrcJ$M~WtU^GWFZ~=IzSaQ2-gQn>%shO&1OR)IZZ( zYpAx0n?*iw#^IA6LBu;TF`4To7Tq}V6naK0@PaZ#apC_GWJby@5r>m5^elvpvyfr_ zo?V-BmLoP$K)_e(aYu104PqVDpLfO`rP0~Ec8QVPoc(jy<#W0BE0i{2xs$QQ%C}T5 zA*?jW4tlXm@Mlrd{X2o(u4q8QLkdBjJoKd%afva3pQ`k;d%njfJa?tfcHfTWp(>wT zrN*g9ot2WotD?+&ZclXx^U9)+tHsAT#8TT-IX3k5GW=rz$WVw^|hB49SVmz=%siur~2@o77H-;p5$?pQ^ zdTS$AVNNGTo2j3Zu}knc^cW3&t8*gF<%bhDJv3;6B=9~<-1ioh6Suf4D;cI^98g#d z$_zNYD%(cu>&E&M&r>;*RX?@pvt3Z%jZO36@MC)g9GJ%<4`?nq%%w+>T;6ElYVVno zI|P(`wSaHR#vh@5u;)~i0aK?>YujvMh|1Hd!Tvu1xDrS0>p%UW;ziHg*Puq3*`?EM zr0tJ~=Cgx!Jdp2QW`p@I_){$1(0?|@Oj5mbOvE?VetwJp*!=Hb)mjjpjozM;m`ECMw1IAkM z^qz_YD?c_{(>r)lNU~;aI(L5cja3p66I)~yzHIefJ|iYo9akYT6bg~>@6+45#t>um zTmCa3#-}B6yYYX=-OHqgeYrGG15PqqI16XDJCM8U-J#AI6}_R6L;`!SemL{|^W33e686o=jH$oEj2yKR3 zNgfQSW)S4`WkURE$@3mT$0alnz7ipUpT|4e1$f+5 z@hoP_^`le&(V$bik|rMBDa*pUyI-__>Os}Ita7&OPLF70N3@|8`FYUcP5LWhroq^~ zUsWF$^~P2Yo~CY1fwb9#w7ZH@%~YUrrSD1l*Cp`5yBurD^`vuVqdzL!4r^WLn)4IE zYg?rrb&Go&3+`|r8YzRpfGT

dW*|VV~BTBVxXO%!x0aKVvf%c3EsRg_&+C*JK475;RS>0&x zb*G7e_cI{~L_wruaQVnUR&!u_{FK->c~?rMA#)31yvFr;32<^M9STuP!F)2#cv7Sh zcE^LGFc9L%9)8#GdQF(;eiZ&hSy}&6fnlC8@CT@f&Vi-*t>VpL&woi1C6cYyG2j8@ z6d~p&BxNYj9|5E$_3pjkKWhQ|jz|aV$=$e!U}AgUm|bdMaRYq_B!}gS1Po2SLN}4> zDG@3F+Nqmtp&YrQjP^Y7axFb?&!Z&}7_XCqhcL^#zUW^pQPD)s2EvPYqvh=yDX05+F3vk`rj zPJtTC+|}|`XTKv91Q(adqub71^cSMUY2IfYOAr;qB|%)*J-*G7$2#52KD`f1yBKuM zb;>W+>X-wzdz7E^a}KE#5J(Vc)C~0%hr(+he)YhT~K$Qi=osY z1aJ^4DV9pka1T$MXX)Pme2l(VOLh6mp#`~0=sl`H3V=mIQoT@iE@1M~(D5GZ4aosYkfa73cvr-P^+d9rX zn)Tqd!XXxQcMV37y09oroHC z6`$G>mVW7n*$1LaaQSy`G=FWL^2WNaoS-pK&mx020S&(@8}HdNZ1lY5JwV4aN5YF# zIX=>Z+pBG)B0u5^iQj6CUu^?`Usur{OsDf-?pNKU0{Wqr{l#s(pFDi^IW1t~~<3Y_RUS4@Fx*FboC*R7^D0O|K#8w<6DJ-``i~R2ryZ$zRrz1UgX3HpIc5)pl<%=SydYlZtE86xf!WK5@Z)5^!r7Icxg_{9ASg`m%FXw(qO1-1pf-85d5^u zqSt|}K?~B_SiD|9vi@Nf|3Po~_cAMsiGaiwY$&_P^GP8H!o9cOb7GFfJv;?^JQC^t z?z%7sq4Ja@U?%1hG7&hP8^srK3dbb-4xN$G!OOd^Jdkf9w;WQ*zUP$Tx6s|37*Bdp z&K?Kkc7r&h%=Jk8`VsOnFa2U?LEzmY9vCXA7weUiP z4|o5B+VyEZtnJno>HcBJQ}cu3AlSx4_EO*6Ar)alx!oX<5?9xC+B<*+3yhsplO|2T zVB2<2+qP}nwrx(^wr$(?)3&BHZQC}!`8HxB_Ts(Ry2z?OkX4ygb{H z*vdI`;?4OXX}j1D8#-YfKIMR^%)eS-frZ@TK+qe+o6(58X$6Ng`cC(z7$8@EmkH*n z@8qGVxgD%o5AnFMvg@8~h@KGxQofUN$W!||qXd>S^^zY{`Uv{pvm3EagO3tMu zgl;=Nt#rn!c`sp$r;0rClxlz>R8~ZtMb&>?H1|vyoh}RM(Xc#yE7W*DC?f*7P_eNy za^an%!@99a9VD?oRVO+8x)#w*IWF0{KbQ4kP5yOe zjk=!?(5S0fmbbc0pv#-p%EJWOGr$8T;^4T)2e(>UD~c(krO9X)e0U?_U9PvEPK011 zZJLFiE51Qq3A$i@#jNw(@2GKt{qE@Lis@Ahic`?l){bnalh=+}okeZylVQy{19u-z zxMvtDB>m2jL^64eE82 zR=jqnn%po5$=7GUc;%Mkuz_D-;F=_;v_P==+ufg|FcAEpsH1uT-IgdSj}$OWnC?G& zGI7j^i%%6(YJlgm|B4 zzG0N7oN9-EMgxzX+P&)L9|Pb0+v+Z-dMZDDcq-exLHaE7Y1}a^QVy1~2I>%!&oqKp zf1(nNLS?t#_%_W|w-p0tvB-fbYtruR`B|mMm{#p*_mEXhEZD&Tcd_V5c*_OVtCSe6 za#)4os^Hx-ktZ6l$w7QN)_OE7gQhQ}N)#DRH>W|gq4|4Yeus9;j9e7nGh4252Z5ue zaua2~W7Vg!y$FSsj}4?D8X?rZbR+w?P{Q}kRCdveIRwC`Q2v$)o!eEJs2hk!i{Fbj8w^qgSWf>I3g5Vq=!fxE2SCZyzm@4-HsHv>rq?;V}ze_Mds6^BFd7v>? z$=xxXhUzbT&vkL(VRtB2fPtez9nQqdTyo>ZU^^24_5cQbYk?MAE14eNekypvS3ogP z8VM1~E5^sl`7=}|SN4^~3!o=KSah3yfK4oG&mZ%IM@tZk4!XhdFCq8s_dSv={bQJuNc8l zpJNj%znC?)LW7P2vQj7UDL6DYGd%#Ax+;L89>tr0+pcLtDlteW#BW~PYVN;F=%px4pZK( ztbqA3kV$sn^FdmQmo-E|e5FZB%a7(y9C7C!KbNoNY7=w=;cu;Xxg7O*%c1rB1#bI+ zwbOR?_MO`aheKip@N0*vb z;dPn!qAy0cmj`OdG+}Kpv$ITC!SdU7 zuuJHyD5MU#9ylal(Wav;63A@wfoHA-23eUjfiln+;1337`2Ol7HwMeayL>l%rDNo? zzn-STDRcS~f#8Zh_E$#o(a7;Cu*k9I^Hv7$uNgq^uMQ!2X00|y5WB3wU&F83j&ATy z2aV9I8D?r!XszNQ^-tq_a}pmkqm#+qGK9_@rJ_fs9WyfU=eGp=0SLJj%X#z}iE;68 z<3D`-e?lB9b2hsI{u~03i8*H3^7e9}&#+{#WkMb2PD{uwnkFhbS zy8BYb8Zk&t=HdhUdH+BbqWqW}xe7yyQFjJN@uzI!>`QdWs%uP z)RtzU{>)ITfZ(7^$}dID0k?7(82IMhAK5WOTC}z9bx2N;cay@n0$V)*1MJyh^f?jW zPh2N%kOP{#iA?|1hPP_mDmFiHqe`MprxLR%4Cjg|%8k%d&Zq#_-qf(qvWyn%2VNQH z9h&Ah35+n>ow*QoQbA?M0!73VaPjE|>;C@dz$5SGmJ zkMCJo$8ihj-r4rh8Iz7)vHA)d0dFc(RtR#rHloTxRDwJWDj~HXEuMz|`nL8AJwvXX z1k)PVBA-J|*3r&FnfMO~99Pka7U>9hY$iqmNl{P4F-tt}u13WUD+~(|D>2=2ZInqF z;xn>wf%QWZ`dbxW%;BTgI23Ml1pX~@RxDzY8^4gPsC{pQi2>YhTx?EeZbZ%flEYfw zd4I~v>HXG+)yDqu{<}(Jm7?%U=JcJN^d1lSFiL)Ozz%|iS>1$lAKzg zm}ww!c6xC)dI$(0R8Y{9AIx=bM_^GnJwb9HE$)DnTKIY7Jl)lm*)3pG^XtdFV}Cyo z%GgSvn4FwTR{t(gu-ZTbLK6|3fSST&uK9Pq;>JQW!L=1kFjwcFdZZNIV|jTu-N(ne zx;odel{nWn1alu%Rv?XpK<#jM_;T!I0}ujU4uQ<1umXDU=8qXnG68UwK>qpg0f}*U0(;-j-y}%m z#|joACMG&IJR&&m39(rHzf8fw#-k_ZxjMQ20KwVGelxPRIJfw|aJVvoX<=da;(Tg# z5(&yGA{)ML@9tc2Vez*~7{W9$F?|(7qgwEIso^B_vM4GaIwZRze+n?O6rcvA1_lP^CZhqh zKmu*W%$)r|)$iLweyU8ohQIcLIw<_Uz8*Z}8+6?qm>Juc9DT>X z<-f=Shy9^HJDe|mFN0R7;}%s%^C1_Dj`o9q5lZw|bEAs+lZl>D6C|8VU3 zi@pJ7UVrE%r`IOW71)mQ-+$uQ=dn$XzS{>rrnz}~=KwWVALy(9G^oFK4R~l^S%JB9 zejI8`lJ`FFq1H3BzD_WO;*1G73L-(qCsse}joxJH-p7)(0&N3e+TscX^wEImNI-{v z7J41h8jn7v_FnlD{i_0Qq91ms#Oz}1q2KzOlsH2#2{ zKA*sWo}Ss-=5Ff&-CTKs%G%-W{&Xm@ue>zAi9BLB0iB9{i(JA1c{Tb+UlqWB!C|?yz8P zX!jDV|44pw@^~ft6OjbqzcTnRY+o9@=1qJWyl%st{<~Vc|2IsZ8@%dAJ_(G!=Dir0 z_0+TXUIA|T5{LeGH~`#!t>%1No%1CnUjTAq zzG^zXk$iiYAEk87$J|n z)nymKsl_fmFyNKQvq@o#mjk&Hl>w58_1X(#b@%3o&SH5JCq?@K-~J%Y>YP`xNv1k} z=^4`wg-JHSK){MaX9pgpJ70aYP$>Kt&a27KBNIy;pK&dg^<+@i>Y~`YjaD8`@$&ZT z%|l0B*kUB6aL-20-wGhQD*kx!+n#U0z&92MxiBJmSq^3!+3NDtsYSc9OAb>yjS(QU z+4}9pH_ekTEr(%Y7!>n~G=|B&qC;%Vh^zc(kE(O2rD;F!s#X1b4s*k+qPl znA^uc{aS;l`u-65+4Y>2JKfzg>A@7GajJD}?UJ_xi;VXyEwpM4s_68q&fFx1m(W}T zWM4Q^>F(mrDHUJ7`_f6gz1Anq(B58>?f?r#%HmpkRaw^(|U5V#5C$_EHO$1C&iBe@Y1v7RCn`SwV5>x1?iMMDW ztLJdK>OL6iZXvaf$E<%T^x%zhoOI7i^-q$CIcH;yt9a75ixJ&*y4X>)K(|_Wh|Ztl zf7O-Xl^b<37~=UtR5qT@tV#i%aa?E(N8a7A2KMU8_q*S_wrg^#T6gcPhtiM#$_1q2kRaG|6_Y0+|11*EKZrgxyV6p+nWq*`EjgPmfg&9zt6U&Ntf& zPOP=d1AnxE3#BR@`hu?VhECIL58JIU%MEtG-D0euKf6XG(95546@qXW8ElU(& zkuP9!>ACA_z_^2YvzGPdVZ?!dZyXiTobFQ;f*If0L7xMgRz#k8TAGUHk+`k& zjC0ds-;>ssck4G9(|9e!vGJoM=G)}FQYh`r#Oq##Yu)O1=<0b@W40UPzbDo0h@;DW zf#p4OeEHDiPk1VuELRV&{}v&wrf_(_^J^}Ym5}G`LQx=>GAy1{ahZ&pPVlLi_>C+a z-SxE%7?Cj3&M_{&uw%x

NR*hxM{+pD>cw7EEr~m5*|E9>qw3Rel~_g@2}-8CszM zau9hslw>2#ubEG;`zOan4`BMq9E!U!Ar_eW*1_Q{C1N`g%_0Z+&A<(`u~ zT=Qz&gB4FU%~(61;U(!TP(Dy-WrWV2R!Jn+gL40r6fq`@JB0tF6vkGQjv66P_EyT8 zyoCB8-1A}yW#V(T8IZCTeC@p+~OLX$mHq0Gidn&$oiPw zSN>Mw-NSy+mAgfL!u|tn_(qb9ZuBynn%4>>i2LVkPDFjrG5K_OU%%p_EP&RT*eavD zIqd_RB^jeA!y;$DwpnL>yn7~$eXFjlk~5r*!Cmr&nws2OMb}2M3O#6K{mn3y@STj7 z)IGfqZn|Z?ZELMoet!c0t(%l@Eu|J=I=>=3EbFsjtR}oDH@Zr5bY0xp4H0{~<`z#5 z_pX=-E{-~}hMcqd9J%@M+oN8zzpH^($*JN1&D|RI4d?hfqQrgPA)ecTQ1Oc<__0DV zLfVT+3-36E?YT`H?lSsS#K;$C zM-Pch2jxp&G8kJdF!n2WYs&~B5|}!PxXF*$B-EUkmsiq4!jH431bPIl8J{=4OA<@O zHbAw3UN6VQ1;y3x*Ydj>#Aw$jS3^Fd2!n5ULK6sQ!mMJWQ|A=pPe8@&tvV5ZxDpmt zJcs(L6~0eb=|z8?;820%x5zuGSUs1Ufptd~M|n`GuiRT(vLHe|bNaJ5MNT$I8A<q6(r;jTL2mN>?-BUe6=(G`zy8{Bbm(7w&tSs(!q41HyR^7Ld=g3bwm z$0KbSW!(kRSbG_-RPXh?SqO=K$HybiD~({KnYg;2D0mZ#rNEJ`+R%bv_xLSNk80RC z*x|t@gtj#DO(!dwWI;bo(*~^SYww&6YkBa3*7kS7Xz(GW7680?SRUeY9k^ zW@k%!W8*ia*X9yE3Ra(V^^_yTG(F(8;@-OR&=Cq@YWe7cA1i(-v&QfX9Wse?o^{W( zfNw)2t^6!E525R5jj0TT8u)3_2Zn3;KG)FsIx*tnC}fubuBS-VY)KWSQfB?lyhr_M z33k%P@Gk&T&);WRwqLeESvB9K+z(h8n0X*WP$4Fh8Hpbvomp9d`>Kt95Pjje+=lyUnJ^f(PpCxK$b->~g{y(b+zY zoX%8e0S0I+Jlo)SSJSzTJme|{X)Ed%d)tWi4O|aHtTDewmkD*ZS+5cAPMQ$ zp89;LY~%1@o=cqH-Ghs1{tWYC-XbC_C4gqc_$YL7@m=OV(xno6uXDSVM#>?xZq?E3 zDsEE3VRR+TwFnF7=azKNfegaoitTxNP1k3qiY_7-h)VaUJq?jn|Yhr!|+?Al|d zspoPdzsyIHr`%UbfgO$kHO~R)N0q9{#c&{qJ1}uuL-2dk#*!4|+N4VIR$#RU9AJFj z1+{E=gNAEFyKJk-+IO%;$N2_D?1x8RhlGvzP@}6P+Sr(z+AdFj2v`qeIbKvBk0rDo z98F3t{aTd*eq7Trc8Or?9%zc6fTV_8otn>deu2Cr4>vk?V@|B_ zMcMOXYp=JzW&6;n&(@D%qZA$D*z7df{DI56T*$f3Im;o{nYygd901q@7!uh$Ih@NhDyefu*EYnpAs-1SG1TsQ~wtgXzTKs>cTOD&By3GX+Io z;W7fX2wu^hU*nv-+EO93O+{H(fN)s%pHZuy-)MljclBN)j=Pj~Sie1V%+F-UjNBr9 z1AEFG>V$Be-?1W%2fTny;|e{s2}aGlWH9dZkp=# zb&}1S$tzN%z(=u!R@_3f)Wi~9P#^u(D@z7sJ21zLi3)R(Z4ibUT0C60o`oLV&?z3h zO+>KR{lTv4BD~_W(*N63dyl+n!zfm!JE+YSF|potFPmvj{KA;l8nf9MiPgVJ2Dz+} zn37eT2yv{LNOA-EbaRZs-Ikg3HmVQ8kfyYD-o)(gAj4na5eLo)w$o;I=2tf)C8Z2| zUzw3{w!o@2l9()MeIM*1Ba53>&B{DsQ2cOu`WpsvZ**-oRlki#2;5n)HN79 z2TQmcZkbtL&{gP8!xgol}!V9;U%fP<(6E^bVJA@-G;EUrRiMSPdN_YWSskx^Hq_ z;M1AH+9I%rQY3A?c%jCbg(!I^ay>(LAGf6InI90Qly~m{>C2QT*r;17G95!IkhNNF z%1mCnj-6%aoZSL+=weq;wn|Rc3Cw$MGm=Mmuo^=?gB>609u#qX!>~Cd3vb(q2Ip*B z4Y`XeV2kkbzb5FyWi*SG$6~_HfZp6MhVq20dG=6a;3-S|;+9;>z(FVz8%K3k_mc1db6k1e?3O;uC?=>+RB~^To zE;gT4TUcF`4~!(MrFxrKtFo(`$LE~bw5RM=3PE0J)W6u)eqsj+&WYv%B2gDt*YT4P zMw)OO&IU4Ng`saa+U!R@{O>AJD5C5c5qGz6ZHf)3Mtzi!ZW8h%j+tZruv3BjwN0v4kCD74}Nx5@< ztPkVI@bGO?zCPD-PhHxGgiu(&vg8g&7vd!k(gK5;_#D}yzrPaZqS7b_2z zDV8EeM6u>s?vsJEo~y2M-btfsU>dw?Ih5G#z_pEIZ6qB)`n{USDWfvDn&JHpn&`kX zA4B>8H|r8A754gh?}qRE+N7*_=Qf&pQ#YkaCCx}d&kV%(B&vmFjWu1(Zq=N&e30H? z4#9yQcVvNj-0gA5eE)CmG;m61LAhJeu){wkylR1G&%T<^!_aa|OEs0e9rACK0NzQA z1aPMeav%<2Q=hH(+D1)J;kR9_NHA>!phD9QZfxh=S4lvi)u3b`P}q$pF2h9y5T*qM z{+Oy@IpNGIZ|)mPEIYfd#%N0vNwl#mAp*YExrjsC^~Z>M^w{FVnZ4w1T2%SVrZ%4v zz`To3XT@K89ExF802Efcuzq+qy~;zdF>EXGIdX^V z7ht0i`TvSK#W#V?+gwuYcE&`FNpTDxKIOA$Y(S1!P%PO>PvGE2(`;Pnu{9tx6z+~h z^tN2Ln!qyEs@eNQc)zS}8mc!(E*OODU({aMZ)@?nx7TODS%4u-t#i6Nez zqw31Y+^DXM&1hx-Q<7ArMc|Z#J>^~=b0JWX@OccC@((<+JD*2qcD2#EgY}8DgBLb- z9c2N4CRmHw^f_uAfm0U9nXnI73w1dk=bh+73T#F4FV-!vlk%DgwA33=Zu*3V4Atms zis{mtUa#q96j-wNg<|uGCtmrfygxZhU$HtYHn*U?^SQw8cADmk!GTZek+2MTg?jJT zTZWC4!gv*7ItURsf8XUIWDi)GXHTG_e(A43$#F^EUh*r$Wd~my(VeptGxM#kKDQD-*86L{0f z)|UlbWSO>)T>Xl%U@igDYv?JHttK64(q8XgyS=-t$xH+d0&3r+pp8V(#jcpdzl5p^ z5}z!_wG;d8KanIabm8^RJw#E)l?cWY%t0vdglmf#Kr*X?-U?`Iwvoi= zOm_4w{kx!%}P3tkgpK2;2Sb>y4CUgM} zpEm^7bJ`4yc*q0(9FgAyOvYU6o+8-K>|PzvGT*mHq^Nm2Y^dQj_ij$Uq!&b&O0LPA zB-FG>TT-qL;<8>+1c^-}b^}J`wv;gDIC_{6q`Cc<;i3u7Ts-=~H=Xd3DTQ7@>U%CW zrhfgpb+zV~Pz{*5EdKGMk979eP;n>LtK0g6SS@o=Ilx|LVTH2__(ZXI7d|ltNS}*p zOLQK(W>3jxTf=1E_*Q;=6E>_V|FF^?_bx-rD#Y#Y9CQ2F#h-yQXKlA=(^myWu=(4?D*b#bD z`6Xiz!+dG_a5axzuOAEY(BYd3z}!A{rme!g8FG*TsdvluBUY(HeUE(~VclYAM_Yu= zi!=%9c5IrEWG&X@JeO+LBP}t!FK%#85fvZ`p@n?I5+P}9Y_lpmUQk5h_$)7h zn5^TSPL0&$K;O9ikg2!Aux~*dxPRjhLHH=5>=84lD!nfMJM@sUi^_JWeTItb;5)+b zjLxVJNh7JNaLhnWe5>5R1?E%|`h;6;K(#H*Zry z@eyURT%n_1BV#xD(Ic!4A({~EWeZvy5{0yxT+&^TDc&Y&M9+mk!6w4YY)6Ef{pFg>;r8LU} zy#_TS*$QnH^Dlm7(Qq~c@xo)U4^2Lzxox`-!fh$1cU)&39wVh?VkIxR+q?bqv(Sha=TM|{W6+9?+Ti*yU;qWd1r=S7K=lEc5mU< ztMeN~{Dmc)BOD3{za&NqQ?qhZcm;7vubhZj64`4JZ_;{qo8w#)_kZnKe_RdlZ(x1; zs5=83>b33{I`d~%Dc9tJ9gPH5a7}K?tk@jS6wnjCn^inLzz3_$w*5Jg#bboCgO9H< z`-Rcqkf*bz7NNjLeg@nOvgdwgGSA)qN_JU7E`1u|#h{;dfKaKbTaKIah)YsNi@71M zK6Nz`wu_2)?%psN6V5B`2$7w-$9c()WXL5-ZI5IWHJl48y-?$<=8pE=l<%RK*ohC2 zwNK{tt!ZSGsUT<9l+Ohp9$|1+Qwyw$4G7jq(xsA*I_2=WR;D7}KKV8D$AJP&){}QT zd9=^nYbhwfd7x}*6#JRMJgPEaX=WWY z{jxSCt=nwzwF##EKidCcg2$rKE}dM{038G%Fq;hcO?_}n|n1grV6D-J_&`t(9*urU~r z!lt}m>D!oBZcDYn7+aQwtttG=TK$etCkhnd+nkc+1!hdcb6^4$G6t3sVDVVx8mPia zl4?fvBthC62dxu%#dZ#ZsqW;i&k(ChM~n_eJV|0_((kL zN#9al@SaQ2pqA}E$)B=3+Qyc>{}ZHGzq!SxwKu)|@ z?2S$UXEa{HA|&}}Tf*NOC#XkX_R0Q4K`1his9kUHIkk~-)$ew11q0q{zXx}ny^cW& zTU0|f={;cezR+MwthwFek~?qP8vpOtR9gC5iXJ6xT2aaH>O^m%$peOie89H2 zx1)WEimMx6kV>Ri&M<$cg8vlkiI!bz?QD0N&We3F3WkZ&6zUJ5KHc#$ehY6IQ z>^mTaY{_>nZKe8OsXpjoHnZlC8*}(~w=Am6qeljTz60UwCZ2W8+Srje);Lbq7|H`} zc#3(I{IUSnq+}azEkvoE)P=uc|C2{SK|nem!C{T{J=P@cXtNeko#}O`^;)5h>P_}p z#A$cY*!q6X`{a+vX%6(ueo+NXaF}e)sO{?R;vJ29W;ogwn?Wkf#AN@uCpZHkAgEKd zA7b&#IlYUs*lg*}gt5TiH31sgWPCku+7)$nFP*ymQUxxvCP!3Z2u`A)d*ogS?xQ)~ zh7g)M(CdywlW)a|l$STx`7K~;J! zzN9*C6DHr}5Jc2dx4hXXLIPX_Jr%xzqF{B=(LditILt&H1@=04fF)Ve5JI>to@MP+ ztiCTkzjg63#A*YW025dX`}GB*e0vYi$t3Sgm&|h3O&vu2wqY^HvHml4jukva z;ZUhWZhh#?5R)kc71wMd8;(`mEV2l4yiAi4W`1(3tA25H9kSjwj*N#ssH&C6ZJt-|msTgUXs#fbX#+_!a5V zBh7sWO8`!g6j<=;aHrNxCPJ$kf^|h=fU}=9skl}F?@yS5B*it8AOew6BF8QdOQb=j z{*kgA#Bz;`mXsdq3kj-RzV{A`)jEnrDU@uS5nsGlc5B4JtTOb92Tq2IU38y+V1yaM zMO|w>nVltb#uPvb=)K1IO`g??Cd3t9BAP>=m#c%nAQ9f75UOcPhUM)0D`=h2U(IL} zLwg+ZK(Qm^-rJI>heIo8mfL$teY39b>~wLS5ex60$s(6jgdy(->Ry#1Rqmd1xuzP`!eI?;b{HG z9(~~`S;xeJ+6K;&<=>(eK<-gq>R-(cYI2Hcce^k|NuOtV9>iup3pChA^ZSYzP+|A= zd)smH^x0%5ZEZinxv;9b6RF1}>(`(=qddl0H?G2h^!CAns>p-41j*WJ#)$!kOW{MZR-#oB@XFu4|h?PGT`Ww)>&;pE+J;H`C?@e{#bX`5#eEkM3zTBV#@& zXccOTspV1(?6rMBve>}2UO+QtQogjL2$$-?&G3kD{enG!w`H6gauhr6VAP&{a3`O&xQCR!H1 z_F^3=DjHts`O1YbuNt}oLH*&mbf-Y<3pK|*nlGtUtpcLaxQ59^?KjI z9hMF8n>zaW%8^2*Tdz?iZq_IRBYU}K+4vGm?~z)o_HE3Euu+co$o3sdn*Q|i!f+7s z$f)#uJqf<7#+(n4?bj%fi6=ngBeW(~3Oa8e!Y|)?Wlhe}-@S=!rFRtkQG!eqW_;TF z#+&ZCro@t2&X0Fwl$>b8ObNFma{I5dYr5(;pv&A{92n>@M4KgbPDt=6*r7sAjEbx% zy)L+M$EhVIOrgI#hYThqzZ$xS&rSNyM#2rs-1sgh?UmwR?Fa&al0t4lu0i-^YICmq z_G;jE;>RcaDRZvqu17(o*%8_8ICDV8SXY_msJ%l;>i`ORZ#fJ}yN3Lg$~4Fi>2=WM;&z1bmTj1*WAy}BG zNd6kZ#a@_79y>}9Ee7$79Jkw_K-DJ=cq@+kI_~<05jK!!4{tYNlEX4`OAR_~=@nxH zBWs7`J7Whs6qz_Ot!%<>{5A@eCPt}RA>0@2+tRB|~1*^Oz z0bwm@cK#D;kc}ydVg+toB{v(n^vq#0SnyFzBaXi$VS6_) z-W~8~uM^AGggk^Gl5iJ&bSd&_c4t$fC-kRL*{Mb}l(}~f~1sPgE@M$4VNXH}? z#$wvWz2{CU<3o%UIiB)B0hY76{TtK8>s*@6_0=Hx#{^$O60e{eS6{7`0mVW)ux{;$ba*a7=2g<@n}(b&aE@^oaP^UAw1Rn?0T2(Zmz z-CN#2Iu&UQ_L+t{zQ`2chn0B90&scs&Dxm6%LQ1YKn4s~e9#q~#CdgcrR4mrhzc6= zQHgu}zuCp)8=qrMwp4vGgj=3@3ZtriXYT*99J%qP_iX9JHV4vCq$;3GQ{%X)5=l0YidpHJ=F452_Rz_UWAnZ zaX%&LP$^%R+Iz5D@}1o5-(phTT)0V-pv|0kolutIv$6FwMwA=wl!H;Gnk}(pMyZUX zc!$Gi@a%B=r65#E`UqxxB?yn1At&yRZ?usmonD@CdB60ne?c4g(9M1nJM5)SKbn~t23WI@5!YKYSc3T`I; zjHsdx6S?0H0>man3KROb=69llYzpAsvrtoZbx@|X_>Y$9SDE)y{Qe|n%NucXsW7$y z&6*)UA(8|Ricy%J8-S8~E3ZS&B*%U;F#J3TlJXzd;aXq;UL+Tj5@VQf?iteP+TTTG z<2qP7-?7L$neB#8O~*V)g>SMniu|J7gv^cmytlwCRpaKaM3lFqv)>Heriil@hTy|Ots`qCQ+3=l`x~Oj=MN156jD;1wygoL$MAumb*%f zUAFNrF7;|t?o2H^H=?Qb7s8gP>Q6|TENYOtI_4i|n{8V8^CE2B3ZVKtK^l8W=mC)% z-0^IQ_UgI9hfT!ziO>(pl(kyJHS3K>J<)vpwO3w$CM>7g0}d$3;L@{c9K?L5ZbW(a zOcHbLu4?kGuv9-6T;sAX!dRclvZxrPItOadnsGN>ufwNBaTAqx>PVvBgu)VT^D#k$ zJ2k$2KLKKUbg?WX&$hZ>H6>)}U#Tm#nT&-rOW7zKm$8zDW6S_1RyEb5z8oIS4O_d| z;5AMCt_ny(oMCQqu64CcQYyTk$~1XnkU0`0m&*fQv_EgSTYY%a35m^0zw7v3`kS!O z$t1xK2!pS5^X3`5bS`jYw)ve}FVDl2Y`)>IK(CkUX4xs2mTmnT)-cjVvR7u6Q3v5& z!h-MxIX^Z;Pv#ZK?3bz;4xE5_O7|O>@z;$bdfJfDT5UVY+qLsK$|A#l(REKW(WC^* z@T*aW7jGy4ujIj=;n9l-srTiP{RM1!ZlgFt3`_T0){G^C|J!`E@BacXzNtoA#t)vn ziMFdPSsq5zyb^9k4_D)ideo7K9;p)ZQr##)i~H-R;=Yvxd(h5f*ST{BY>q6GYJl_6 zr@0bM^Y^&MwNTK?x8YgcHGC!0wK6};K^FJkkIvk$E&7~ac z7tpYF9c=A4sZs)dVv9_F4UFcB-nEFB_)a4-rM(q#N*7n%F?fN8`+vfgIMtD!BZLpV4p6a?lDsF42I%Hx!cL55y&jFm;dEcUJS#fA9opon3DaQ`jd%wI){{wv3FAb> zG99tl6`r4TB`2*x5t`oOk#M;6_?G~*yGO9{Sf-%D_fyQvi2|2(FW{jj{`;)t!FhpJJpyA=cO^nRZvO!hA8)jf>%=X zmVf&hcw!OfxZljUT%(8YQ$}&}Dx#$~0kVsz#lHO!iNc(A(bC=I+E6g)P2PMKTyy(I z*Wo0k_!x4_R9YyYPB!1ho_8)80}0DK@)Jmc7Mn~%7^n_0#1rrj_GefnVwtqX6 z+Jw%roDs$^S46b2bTXOXBA%H$1JgdZnlqnWYQ_qnGLal5Or9BM2lxtO`J`-M(>OZ>iSayP(i`%;BJ!y zjnAz|YH!kgdphnRYQMu&$XmY+ADwNaSW^gE!pYa*eJ2z#$6p#@-j^lg89b*AvPl5v zc?_c^on9besWmhtvO6)$`}M?eqY-tcTY#|kOs4%`Q-0a|^zg(6INA$1;ZttHoq;zt zgX02^dkrSElXw2pYU}PLhc_sS^7anSN~?GY8RH&cblJUR`$IG?B7)oyjSS4K|>nh6^{h%^OY1W&~aj<58&iOe?c93A_D ztzB*7RdQ9=$kPE{HA7_UMHe#Rwgsg%UQhLuZv9*o$}M=E%G;z#=giQwKJF4=%;)o(k%EO7Rf!UJb+A;>MgSBy!I z?5<#yQn3r9K^gPwhsBDRv(A&^Yz4Vqg)p6HEWt|%QcT&1!1G(qZ0v6^G+)}^)#Jo< zoVBYjRz~ZQQT%3|mtsKQqB2JHs)Rt(@&q$ML6WIW8PGO7Vs5To zx@r$2e5BRWS3YC4zEd&X4-w!6f4kIPiPNLiJzc=2HuXbG8hq=SQ_a$LSI%${^rCr} zJh88L!v3X*%dW>L7Rg+DHdlKj?0?|DYa4Q|vZV!QG_}kYR}s4MWfRI6sasSVRnd&Q9JChFkz{=iSZ1rCBd|jXu*;os4uXkK!gaDh>#?~`bW$_ zB)W&b3xMcZGB{x3L?wS108JclL8HdhNUZF#7?D8MZylfmMS=|>teVQ$U?5Qeg33S8 zqFyqfYJ`1`5(RZgim;IZg+RKKl5;=~9nG-V-BbQ)g7}mSNVH^RRJS~M6{o-tVIo9O z1@vH7AP@c5P@x^5v5AVDaCo=%D7?({mM!Hi4GsDE`AEZ{WFe1kX$NRP?*o761xYYr zj;~-{z`l!Mmx4X|{ZLUs9D(I@cPj)Mm=>U~An(F}8v}T7VMGcVp!uOBGB}XBIp#H0 zAS-U5MLwZd-cSjFJ})c-42f=cj=m?q)u7^DbKwGo6Y=wqM1gSk;9P^chXUTMxrHtL zTY!*)96!;ZoFs@E-;fNTHu3fY;9s{m!79oup+XGbzD5whpTfM0l!PfeJ{ZCV0h5M^ zT4;L}F^{(aMDvvW_1~(y+l-XYOQ0eC2GuwqXW{*yT08^Daq<94&UX)(Y~o#genz=e z0C5A-F2OWb7(`UCAfXf$64Ehv$eV~>h(8(!^liwOTaf?2>US_xo}@wnY8nVF<~7)m zA21_j1jE03^>ZM&$#?s~4muDV1ovQ}WdZ0kRvgf$3LflG&iCqS_Dk3Y=uKe5ASkF1 zfZxB~wqe~_lz0>L8~Vq$bI=^(SezQ-69D~lrK~Lc4$2fnoFpiO?1Y2_hmH&^ZHzJS zQ)dhm__GXtny)Jtf{H@;z3Xyn%xP)UVI9f&yvWzslqq z^7f<7sHh1QC~Be-py@RrNKwME``6bvIpjxiaL^w~sh|NBmu4RTj7%Hwc=wHilner6 zlHcSHG8{NanZfWJ`HF))M67xDZ5JFkXo=yl`ZEb^ge-9A@L>>Az`wn{>hw*=NxSj0 z3fMuwoF;$(mz5(dbSKf>5%1xtR5iUr)x}G}JJM!TI6)hh_2l@-Q9>%c?VE+ke&JV_ zJ-aA)Z>pJeDPf#(i(Ip}rr<#@4a}kwq}@7kD&QX{`EyfUYix9MqmK z)ks(a<@kiyRHv)cWyt6G1@YEG;iq^0t+r*xLO?>H4*yqQ#xhwy#TO-pBevO3k#{up zBRC`8d@kSl^3)@R+~PD!jwX@-sRxZgJEBn=$FWS11~;)x=~^w)l^eom-*kI4KyYW;yc3k( z_CJiBLw9Cfpl)NQV%xTD+qP}nwr#v&#da#TZQI8AI%nL@Y24o0KVXg3*z=k56sTF` zpVjeLCoH`rZVGs_?W7HUBO?;j75sYW`Uxn=Di&s@zYw>bqt_Wiwu0qYaF53}%3yQSk6FLxFP50~2)5{@0t;~xAMak6LI%u>r5 z>gTm*wT7DlPZ$D9tksy5$~(l7iZ#@&NJyl5eqFW)4~b5)doulKmVj!vn`OkVQ&5q- zHZ?rez6FwBoN>3cmsCY_?=wMsK^S&o)^$y~Zh@@rV`vxI@@Cgf{B7-8o7qZpb$ZSN zag;~za^`JPC-pfOpLKy%H3G*OOH>Z2E$s(b!{OGq^mBCgti-=0dS~UQ2BLeXY~+-* z29>}B%yn`1sDM#nrNe`-$Q{7cY%b>=>10J2Z;V(Qs#Lhr^b`aaeR@pX-u!2v)J>LJ zN-WLJ0;0FPT-^XoeHaBqHm+K5C+oNfj`@#$2rAyswhoNT?TC4!W$j4|sjjex0iE1r zC#Af`41zytYPt2Xq$s!S8~eF)5UB%Q{LXJ9ZY-(H4!lS4jmOW+J|}+S(Tr#NR)voM~YXwtxR^9*wn@a zmgl8TWmyfRx4XGd z!If^;+jD=Loh7s`%io}c-WG?pb^N_bS`AXh<X4@<>4;u5ws(M?@%^L{`tmbpDWZMbG?Vbn zxkeBM?=GS9^j1RMC*j2;TqaNWD=+7lxGk>1IM;n+mCYr*hzF@%#0L9GG3QydmSe0| zqvd!TN|ky#R$RPfz-G&-;-68;ax8$~! zo&~kll@=8M4XYRI$Yp9C%T@nVC3Wb;f21d`cD)C0-h z-4exumV1UXv<&ah8g|SZEjTVx?pZ!)qu)80+;X=o@}kzJar~e%!v{I#t73~>2(7GZ zh}Yz3Qu{V7DCS#lhS6>;S*|1a zTfXvCD7}L588pAO8`v>fop!ZvW$e&4Ew?7Mm2y#v{uIRJ$wLCuitNh@7oP<$ zjua-g%GE|3{1qdFL1k^f%5A%4y(jN=a%i*}4h&DtMP#L;A22x!&9wi#S)$F^Chi7y}&?Er}d`#uyZSQRO z+_(Z8QxG0Xi8oYgSpGIMQyaxC)bg|{X&igjp!MGQoQGqN&OSgrc#JZGya`+7sXX9& z#?b-7R}1>9SOje?8NfFH(^S~9n?eazz#^8KM2?5!r<92;{xFYQL2Zs-HKqc6flC@| z$ePf{MlV4FtTAV+L>tyhv=$ga?mTFG*{T-ae{PqX>qq5V%WiK8)^f`rj@r)kr{>G5 zw+Mhse5_8K22=bW6^k_G_l;pl{?&|Jub=GtJof^A2W6B3S%=D zd{*rKZpVt}Lv7YySKg|D`${LDIZEp1__9{fmjA_Y3u_OHa1*fn*C&{p=(XX6A9X|a z!RJE?UAlD@65tShRE%N(B}d-Zr|9K7KIZ7_T~xwiXD6%H1lqwpqc)(}v?9*1RaklD zHrWxQ9cMm63Lxw8_*IwmnBlSG{Oo7DYFN&rT3ky-?DtmT%?=Lt`&HOwZk(PM}I(j-YBW@7axPL4YxgIBf*Z;A+R*i48R53_t+r#*+ z>?B~~O{!59kmtb$BlM^>jm0Xt@p&p?6BA6ZYy0ZPs`Cv`%-xWF*A=ljpBwZ?0OwMdq~a|oQ$8c-6PQMItvxs$DQ{}`R@l&v7|7t22V7%{qIc=3ic&?PNv%7 z?SG-OylE+&eYVEJqjn_&*K7v0NEp=3wn(FZ$^E~g=MnWG8}tZ};>|BjrC@36Zo8%1Cb#^bt=dDpU`3LVuT(t8z~^(|SyL9%_zjOMd< zil>=SwExS|UEMjPmDdpsLnAe{i_JsJcO z9GI_x41ckzTR-%rzD+Z7MT!}J^5qn-9LdDmW&T((57l0wgELb(lXA^QGRk>22^_@= zL54?#>1+0XcV6keoB+%1=+Hfs!5m|XeKBQPNjhA`vVIlY4`P9a0;a^eQAOX6tBkaX zROhA}P^$op&L6_MyU~lrmOf=tzx;c_l9H;ZnxG^UAO(~;mR4!GisVaJ*L)o}`qi#gpq9FRPtGD)C>^NNO=D@~MCKGOF)8bQ@88Gy z2eND^Oibdy6fvuyIWTesG7nK8%bx~mGC(49yS`+@P|vTV8D$pg!eBj&J_~3Q3r!;RZSxQOX1aca>g#unJVneQwA54$#pFfZ-ebwQP zC=91J`xmNPZy@XqC^r=cpP`cwmgT2vu^&o_pT5;(@PPj;I|%ppOtT=E`VE>sdeaG97F{uGu;RW(jy`_@RjU46H?6tbt-$tk505tVAZlk*~= zNO)p++>24cD~0+wExqUk3a52E*#084VIs-Ifmc;TUF7CytLRCXj9Y!MId6O+jA^|u zoA<&dIa>v+&-8uR^2g8#~qNKWeMb@_fLy^#9E#4FJ z&i#`QC@SE+G?m4R7sU0GD#NzU@u^VT;xllh!$Q`7{R4~odWz<%_lvkKl56s(d-SG-RajP{K65ndmV&XlY=vw%Bbuh8+}%5?w3hTNBr+i^C_ZIK7thCmd(IlAb}?iM7W-5976NpKrFaytB1 z!@I&Suvdi^aL#JT@9CW!-*eiW5W5AQ*DJ2lj<*(bgC={|ZoR=b%BIP@rt91kd&7wM zVsbCj5wSv5iJor1#umHgA%CSp2vpUxR(pXoilaiy@3&IWj?TGPcC!SQq;f6i477ZZ zx_F!XveM`?>;ET5bnyrPzGquVIQ!?D5OhxW`r}5hh)kJH->Rb6-en+#D>gw*oyh$r zj>p}>J`(J^8r3_4>P1z@89bDYI5uW5-Up>rMsX~ zmJTB#Q3mI>@>tg)0=IOfhQBF_0&cO$p!F^3J^9JJA8(;K-$M!0SN{y^%X@q!_iCT< zc_j0Czc29Wf6_$4DTqvyqpv)4Q(F@%$d%B9D{*B8&zX=9N3@fk4uFuK5Hr}uBiusOnLF0V#wH4Qtqvdikp-^rWIv{(H8b@e`z}2D=kb>qv;`E%qK|TaB!MR!SS~)SFMws zUa7FMRmopqMKBUjmEdR8jhLE(iQ%bL?k4z=ecF6cm$PopH++|lTawBIYU#x zQ^o_evhUMF2l;kXMv|Wzt>2@>wfZv+aZ9-j$}eo19>i(8}~Y%1vRpv}Dq0uz;2 z`m5wZ>=`dfmFRnI0}-soLVwm1ukJ8>qrJ%_lgVBW#8#5f% zt?TZY>R76MuXy3ZD$%7~6>%(k?k$;^AAMTQilUAJO;h*%XatW;xh%%9(Q-wo$Vy?e zk&Qq0np!1|xiqFtY36#UfAZ-&KjX9yex}g?h-$nh#l7x`S#4E&mBJ9ibMHvaMB|)v z#j|PSyuGt6PA0(eWsE@A(!Z99DUChb=D#Z@$9yAB_fH+mf}~_>daAwftXGm*>opi~&RchWCHxIp zB28|t0ENjaZ9$9mw#nNP90&a9=fKCS)0`&;od@N>iG5sKN(5KqX@10xCcxB=(~Kc$N;@~|fbDTe`h<~KLjAZbwd~>4iy_{y67?^Osd9D%VqfjVKbbzH9{Y)<<{+q19bPlDT zO*l5wv?@CnW&~Gw3%k|+H}f5}u(v1Lt~fzgloomToXJBAc3IT8{1M(ioSbTWsogb6 z)y!ccMc923Xh>n<0y^q`eXt>(c%LetV<9akjrzef_c<>_ny|V3UMKbIdn>94#m#?P z-|2egx|+1{{{kgRQJbX_7>GtZXDC?Z&>_f&2>l(~XeTKbk?8Dr)4Wp7(mg2h|Dx_G z=Ks3iq-|UN%XUo=J*@&jPOtNtXy{!~k6_4*mlGSQ=-KMPL-3;1thr>fzlPjBImt1` z0jh%UpCtWyzzX!LK&uZ?t)D&5h5ut4mvt@V)~V^i@XpWzLgUakK537`ONTwFn4|%S z3B)~0m|vH81?M&DY29kemZ!)#e8XKKLvpAYjTI(j6~ceTvx+W3{{u zyKK{7rscQE{3vbySRZ+=+d3GC3T$Ja8Wt*sk>)dC6c+WUu6Yg5Ww1@m>Ldu=USq0Y zf{bILHT%O1pX`t}K4Ohs!cW!($zBwIX&%gTqh+3-3afOgrdNkP&*Wz)H6_0?>9n&- zP^y5o2MY@(##QNxs5?QXeYtZjow2TADG8UM)xvZU8Xg}YM+0?y%(xi^EBoW$^n)$2 zc@c!Ee=<&C8ZmqG%$xp}IX`*ZxWo_^?^73EzHlp71i>0X1IRg0xI#^aELmBzbD8dZ zCF$YR?d10U;9((oB1mbtj$%VspZKSed2Ve{0HcUMlSmuM`zq($hvHIOVSOHp@Ad4+ zTfo-zK*&FE0f|SOAUHGZ;{Chidd$mARKFx4&GW5$5P$oopd9AwngZ#w%I(vXCCo}C zN&Y&Ssx-QfG7wTYTNumom|3`l&{Ua%!KfCKlOPY`L+!;?Ed-Z#VoO4x88S)IDij(O zil)2NLCK!y;%|c&dz3sp9n{S&P_Hl}b<@hGK!xDJpxA`Q3h8g0rxxFtN`t4$R1~7! zeDtx|(8F__MMQKp;7Oa*a@gwy(CcR?`rO-uro|OmA529tkiD$lP}QQWNsP*2CZXt zYfmFPjJ67Ne142M{&Mn!xQ5UJC6Tg@$njfgXMM`L1Yqby3O)?lPC?FAsj&D*N*TD5p|N(t zWNfk=ljJndMO`_eNUo>iE0F#Kx3Q|f@OcQv8JahK^-q#SjH-Gq1+dd>cY@X8z6mqj+5a9d;J`kXbpSB zK4+oIU|+LrD->A8x9NA#GH)RZ_xf0kSZJ{lG`bXSyH*GBg__UqH2x!%g_boifct3JyoC)bviA_|)SF&wZxL|!5F&j8d3~}D# zTx8I@7JHz;`1bIEJGEl&7mWGY!frW|cVMA35&m%qHH6W)^(|4Yl~MD2Eog;pcWEfJ zmMX38*n3oD6EdH@SS8>-ejk;2X7-&XgO@?&#ln_aY+{Ce`}*WtB94B)b7Jnc&+eH^ z;$DIE!%-4mQ-5*eJ}9>inHWBf4CV2#Z_E0#9lhMG<~q;{kzlEdrd|EfCHm+ID)3^E z@zOL;rX&5vQ}MNG9^}SES#|%qeD0~}ThVxS7kGw5@2AlQ2D#d4zU*Ca1ksh^wkeA% zUH*Lg6q4LV?5eCl<6iO$S*71+^nZ~AT>lqIz|P6`|40H(HWsG;{bC_x{NE!X7dtcO z|2Ij{46d50gTpTCkx>dqm?r`wDMg}3%-0VDIt0hW3Ki)Z5=jWM03QspfVV)mEg7@$ z9p}B{vh(=!tG(N1UTg8vTgUT?S0lUO-R;coNb?sNH+&EL0a_C1h|QG%1Pmp?qu5=kwTL;AaE1M24ZSeX@?cjS_^pWN+_$e0=YM^7g)t8a!6=_<{O zK&1|W`BmJ4lUQ({BsD)0fsl8ri$HTiBrWwIAt$k?pKq zAP6TX$ZF~@vY^nrpMk4>sKCg;z`#sSL?CzIz@D1x_W-QbS9geC@_?bSw{O3^{TUz~ zQy9a5NIJ~-h|^1m2OvM8U;h`%Vb4{1Lre@6|LPv2}9Ie~v$%b+FPX)*in z6+&`gBB3-%1p0qZ*?(&vepk2qYQ7AxzXA@GoFjw-nsEEhei2}ug9Uqj50btw;ssxN zA#k~X|Nf%+!Thk)VH@H^b}jxkRzV>;LmU~p&-D=?9h8IJgKS!gAg;UwXdffk-`VsR zB4TRd+(5jyC<1j00tfx>C#slTN9tnRkNJPe1tp0*I%GsM+O$vCpCh9pfd}g93GH}} zA(J=|0~ruH!`b!g_wiu%&q9WfBvJ#JdjA0FwA1X=dlT#A?oObYUPFo3@#zdRJRs0N zar{*H{}Kg`p&vp1`iXHLL;lJLK4IJ*#Mc}OPzM9~Zw9C)mfI2G|C$|Qzk}4-2)qmZJv<}|+=aaQfPB*ks@c{{LL%}?fBgZ zGyIFOhV5VuV^8RdNn*+p_l-w~Iz&`(L2UY&ApIxyRvw z1?ExY4_1p<9NXl-Z=xi3_V*5pibf4Ljr%RZv!(DE^RP3Nuge3b9n28R#lm3dl~?s! zazIuCVmBP8u4#n+FbYM*p=s8TN31pUmgkQTldxCs$YgK>MhZV{*+;N_Eh~a$EC6dx zVGKfi-Iu_jN4<;VS|_vZh&z#4YMHhD;0Xq5``~VUZi&xh`^Rc#gqwCiNMt%ez6cww zN3}lrOtn8XlI-ioQQvM{=DA|cf`$XXR&*>6U9~29toU)vzWGBgC8zSp+_}qLdao-r z!OiRkin%_4x3&u3I!QQhR+0IoH#O7fd$=n(dpAH|cPLZ({W@==ibho#T*EkUh(;*K zpzHQxInSGVdp3Unv&TivougW;Gd%bF6qw3Y(|?2RY-U_Fdqq3d?bysY1e}6`0AB;! zz21${Jp;v!flkW*9o{t}j4? zsM$U=BT#CraNj9jf2^v;YmIl?Ey04eFY34BSaKNaag)!LO*Yw+PN*oN=^l|*khMOm z>H+rhnOvvgIja9Fs33STq#3%2>*oWgc++7!R1B|CqTNg!M) zasZ9hXxgCO-5t(JCO99ljyyY{?h?+A6KUlxQl+GZjg;H<8tG5=BC@z_(NTZfD)dv- zu-T6`yC9264^gzkYhH7R9b^~{#RV7E&YO*B_wvl`W+MtNAQ1?vX{Y} z2c;adc87CRe<#J{YoN|}b z!jopJL<8&P?!oO3X_I=z;21xsH@kfc2SaSqyt}ga-U^kSjpc^ZS&?`h$e<}XC)3M? z87Yn#+Zd$t3xN%N8Z`$_-Y}J5q{m+986$}B^#fMVh&x{i7q~3S==S0nXBU17sHnHj zDAGG5s0(&h=n#WnNo#PD8`ihL@Qgf(CP>cK$pMM=*p0%O80x<${0vIBX0USLnkc7nByU$tw z&17W_GCX)V^+j#={7PbsFX;l!fSB>V z4I4IGwQk1NpM|Fz5JI*GG;D0|+kG!pFguw{>3MurSGR?e@`x zSK7^-eu3%Ii^gARYDm!zb7Gq)&@}4G^C@m|P(Zh|5NN9wQlX~rFXzjgSCo+dv#L|< zJcmz2Y_Rtj9p~j&+W}V6>~-Uogd}i?#mDepUtRe>aLI2!?z6q$`zb73_@so>=T zqHjffP=$YT8(b1_wtqw@(4u5qHZMhPl+n<*yMp8%y=hh1A_I)I@s-Z1BkS82`_Ex% z^E_6=Gclv$SAx6OE^+~|x+qheS)N?3z%*m;CN9fVOyBLqz!tfVFy^r1@o$=*3Dl?4 zEB<1;`-2~Faa?NHeJv|M!P0HQzUzUwi72h{bzl^kmOT-aRO{e>ca+MpsYL!EvHRM5 zFWQSHg<~)?j%JZN`ts`79wyUvum$gU-%%yqI;T!bpO^eVO2G8Dv*)dUI3}ZQ<8|)r zny`%}=MEr3eatO44M_JQxf99nZIXx;j`Wh|6|2PUP%qwqmZly|>ynl_!jgzGOmNRE#N*jgjHxcsk{4zgCPkN(C zLkZ5M+Xu9Q3=w^8zgI+*<9EdwX?XmX1Ga>ild7E>dt-(S#Uv;mPraEgEX?cia@XG> zpd%nK3;j|4`N#QOkbErCtqlEqWEn}obvt-<9sDJ%b!(iGOsrQG`L!oB52ywhCf-{x z?yl?kW;n4fn5jiGui@`f6@yOf7kV1lH>r%q^1oWR_k8!wJR|`|K7aoJH%|-8zMuh^ zdW0;LSOW5M-S}3axTKSGa{!DXr6=vT613v`UEi;(n8Xxim$esYb}Y)W;l#dg)3;xG zN(D8#Op3=H-I}X1VyDKdJ*g9fFsYU|B9ukiZ;)1456T2{U^9tZrBq~+-PYw2iG#c&A_1^ZMZ@-%m2d~2X^1U*^pYwHx^>E$Ic=4tIcByp6O5^RdT`oT=q z!d9Hc>FEW_$)izKrP#?05{-bi?h6P}srkfke0X}>kyBmldU_tO|L!^X3pb zIG(!Qb^b_GP^ENgE>mm)0>2MMCe67cs}A9wdf|*UU(gWGO)9)D z?LFg>^|28&*gH?mvqWMhm_z;n)tQs58|)LQ@-0Gyz|L(LHu`Qw<(@?it$39pga%nr zF1bfZ2l*YkjYMO-Q&2na5`O5o7|O>LPhVgY>jf8&rx!!?cgBjn5Dqm)g;9FhO^;UA z&d3qbkp z+Hww4r=L9(DJqAaAZDT|Hd*cht5zGdcV~P?G<3b2aoo-=nT$`t#j_LcQdQLTCDVV= zpc$y6;*s}&^}=s&tT{k%!pJxW9Y5q&pIi!4l!4%z%T;VIpKh)0+quLemuv8sQlanwZTcm-X#=D$5Jocj+-p;yo9`99h&<;P8B z&{aF(49*@jK@xv+7Taq4XFtLX2{ikv9Ev{9*OU2F<$$M+{!b>mg7L|1xW!YUWwhr$BPT;pm)F97% z@K(J4!1L*iF24ZmeRjOszhK3V#8dEqNryC4wh;YPb%j4*^$2s=B=u7q|LLFejWEZs zqSJiNc_CFAaVe|d*B)XDFJRCSIlJ|)h(0)hQ^Zt=^CeZxN}BJh6;8zIS!o{UdcCKG z<2}3swImi&ny2eg6UwcK4Iog`Ayz7YrS-($nWU@%$QWU!<%gY%`E7evJeHen$Q*RXkj9LmGqt|dO zm#gnxvMPUOV@yoH)+r@xVx4!FI%GvqOG0moQhdlbx8eTj>0-smpU~jPm}yKpILh!! zWv)d@e*ATAxYHen5Lv9lj4a_*?Hl)w&W~i~#8Z8{SlrpVJMdoBWt{VA`PeytP=^+K z6HK&7;YD=`Xlj2F83}|{1jvoD!$vl2EhDDU+Gl_HNwoIdUvr2zufxaJIeuV2p0Rq1 zLBu9q;H-2a;DWVU^CN-UoV?!e`kN}$)eQ>q)Q~NK`=a8;!h-kMmR>;5@Oz4jv!Yq+ zzW%(8Wb%9ZF3}H{gRTnt<_1jPe&4t6JEru4V=aT=T!Y56m8lXb_b<^9JP;jQ9i!Ff z(}DOmL!DG*f5DVBp>UcTJ(J+qg=R+k7$`(pXqFdz2P(+W2CW_79br_4c++$~Y`p>$ z@>#XeHD0>fMvZ6ULFV2S7PlGrgrXxeW5t1BQ>o^D|0(%G|4hh@SS+1gB1Y636`k*^ zf?w$x>V3lNA7J)=zMzIBGreeHd)?jCm#v^V4G&f5>1W`^MkK0VNGg1BPryzGalbAf z)rVHRJ|oCG9+bf-U5ZTuhXWk!T?kZ-#0awIC^kmw5v~=kdwNsZoN}B+p*d09Rc$y^ zj~P}Msqw@4R9MX1FUqz#@9x2S>qkmsyq?d|S^Pp^@mO@PKgI`SiwPhkT-a;YRq}nS z{>;&AjfS5X&Tp0KN_UQF+|#yRZA={9HEo;>1)E-oFwvW{lc)9<6?v4Io-)}Y_`gvb z;2w>RYYOK`GV}bW+bbbk|Kb&9i>$nCnAPE7mQ`She0p+3b%|WskBHat<=D?F$;NVJ zvTaPSBsKB2nm?e}%l%17s1HyLe~JGip;Wd+s#DPiuNlZ{w)ff(6;Gm+qm+~LCljk( z-X3<|kCKy>i#eRTE_z;t4q<^)vH?4K6jxtnQlcEF^wpn}bUJ;6)~8v55GNjY|B-E+fQNL) z(9ez9ow%Gs7_|o44vaCps-))M?qs(RJbh%2{?pCc9LX`Zkt#*;Ar{qb+rLfn>?PaV z=;9f-fzQ%X#)aPh{pm*sT^+vfsyDk^{JA{;fEVUgLfmv*;;XXL8LowFLQZ=by7fnm z+B)N(WedqZZ7;)z6tz@L%JU7E(cVJHH^MLObAuuugUm( z7*j0*x)=s6O-KYU>4){EILRFFRDF$k`x^b1`nar# zDxFTpGA=p!o5G=WCc70(a}=5T)La2R#VRcVMBK|!ZJRRx<&k}FWO6q%rD6!BO~CA% z$^CM_VwW#Dwzv|f1H;?axBTW$$(k6IJ${cpvgVH?<{|&giAQVHqw-5VYZY4Ty-7^@ z=AVLy!;IHNV?1|6$Kt7z@-006m|C07Sp@e&&fDLx+b^R(N7XJ*VynEPH-krMTTh^I zzEHo$tO{J0Vp(8anE*R#Od6#8W*JaixLUrefHA?ucad_$m6%jn0eEe? z`Rs?6p?gb8=Y!M3BOF|{kE^Vo)e{qF9sS+FcsOCkqUMGBQX=0U6ncgUiWKAr!^>uE z1hOwIL(KnE%P?KqT;@ZQ5cX0|7$sOsV7BvpB>3&lXF(?1p5_5nE zMJyRE4zGAVBFTjI`A=@8X**&R7ABW&Q9JM*c+lo&eGV)bG#{VTHXcp+jT`LljC+oI z`%3AFbJMQ;TDFU0f>DN$bDT?0)iH1e`GWnf6sIhJWGWW5(+ zWKKGB-18YM+3;5^X+5bAgp*#h%6xrCDDH@4DVgKvsZ2VAyCw+~2Lpq>vye1vXa?IM zbp%7HSfdCPw`3aEwq zc*0XqFelI)#SnzPHwK72$v=OxE_#Cal~U=x2fY!hX*43dKz#h}Een2J-K9Vq^)f+XCANfg})7hI4@Gv+67ip4^Y)ccCz^+8W*>b;(BDSD&D#u z4nOC_yFZt+D}|6a`!2cn1UtT*SZdhozN1D2hr4X(!mH0%@9v*L%*FRS6$kAjiemY6 zHHJUSri7~`)GhMvYiU16L2uM4ZaIiA8(+zCSw<+k{ zeN{#!!o|;Y@?{7UR1FKn=I{mGwzlvd()%y)y|Po%D+5Ykkq#Ee4K7PAF)|-AaXb@8 zuZCTLt@1ZW%Tr|l%*~J@9_2sqdWRUURDyu>Mv|Z%I6H4O61P1|Nt(5-Ze_YXaL3Eb zy=ZWOD!6PHdwuwmkBJ%| zmym15Qt007uXYsIdZ-#pH_olTKbs=Y=$S+l@ob%$n+bo7yXehVVf%}@8nG^8^b*01 zc$&z0C*B5R@4#;1#3kwmG)#TK=nEe=MhW+_NquJuu>fv8p{8m3!K3u}kZ?YF8Sm8x z%P`Jt!!W38e?)e4g&Y<0R0OD~nxUrw>+JaRiOwA}8>lEN+T8Q{dZeM&?4`x)0MUE; z`%K$`1c4#2m3)KWEXj-9sNWQ2CCOutMqkxi#Y+6uZuw_5*s`v|Q@yGS0=NLZ5?605 zRC}A{6w?~+4>=hnU%j(us&+0YvL~`5Bci5jy*&9kYa&r*OTC&$`0;;YUo{f$KN8wq zWC4jtlcY*%VLg?Cdue-P{Hr7RCm1EQtot+k5aAcY=KVAJkqoVi zrCzCq%-JDDmI#&Nh|88R*p@!~2r1UjvxIgGw4N72+_%3K%5qx)PD8w^=tOx)@Sz&R zV)MxsQ)MyUZBQttI}dyo1uWcBbiP$>82=@sG<9UnqzOMQf5ELPFB5cLW9DB-0}KgQ zbe#c)gamYx6uR*k&Omb+hm3_(U8wO+hoLW2@;T4Qo*kz`nbtv1kb|fM2UJg1q^^yBG6XWEHkCgWD<+n}&`t_J&o34X1ydZiiZuJ&t_Iq&aJr&BA<7;>1YL z^3|StjXuAxLKzYOx1qfHOG@_`oO~)l(c`t5lwIa}6>52FGD4p!d77&n#H$Y}qgkjs zCZ=gLZRLE$Extw3aT_++)aDu^tCZ*EIw7r^*g0ff5r>oL7}*TMWKm|u7iI-#D)}lC zQ*FE3{`3tq53!j6(sx*{IdJoT5ldc6eAQiA!}giHzkTS_2~9W9w$JDK@j-My~$0hSqubyKKJdwEA=eHks}Z zEsZ!&aI{T-YD|V@Rg?JNqHphSC+6v``busGSVRcf#zZjgw4UGLIjV?wXt4n(3;%BKdKCR3dLN$BY(Ug)sDd#n5 z2NyUQ`qyUv(&V>#bz2pp#=?7S#w4~aeN#Trp141z;NvyTia2y|TuGS~RWFFL`AL`6 zSk{$i3ddld?6ol~6^C+mk!QzMIdImil1oWO!RW4=;^`OgwK20T?BMfPx)VPebS`-I zJ;2G-3Q~&1?q63~dbp3+9~yEH?JPsyH)G~j)Tc6n8!S;23B%|(bHc90B~v<%;pUZL zY{1@3P8}a;PM;5~T)uG;|i@ts{M;1h13$zW}|OC^K8NUX6hc?J01 zsOOx;aY!Sy&xD>|GjLTHj)YMe_(|;z`4nW2yO6zw-@P3Dw7sS?o}hy(2F3Yvc`0-` zVar2Kz{%ay7-6-9x>8+*^4Gpc`y-(VYL!h^=Sss4#BF?|VUd~Qp@+;{}& z!F_#vxdodq2aai-jkLP2xK@#643sA18*uy7{IZaAS7D;w4+Z@tnk`XhW5&*7 zBZSFuVyX)VCOLO~SG>M-9<1C?vLr&7@yL;I#?dsh$?I{$rKZT^m|&1-lYGg{4z|2( z4ETu99(vM3tv4mQObNtyb*TkC1Q%Pz{qYDjD)q|Sy8u6jQn5i# zKC#mptwWG%^tWO_fjyypiBifvfV?q{y8ki-hq}4YDF0ah%LmnZ5amJ=z}{x7S-7+o zS8!S*scMz3C+njG z#M)Xr#ZEI|_qpiz{!$^Tgpsj)BDGWg`%~fT%B=r$4r#UwbboEa=bo6MB(4cSylF~p z#7;z%8Uvlvd2kGGUCMZjdRMdob$!ryb73Y9$^e=F=3(vH#KtwD{=Bd7`XGp~sea#Q zt?d|2wPcX=>3}75;(uTZy&rk-n%_E;V&G%DYBTmdUXU6RtlErDs?q^|e!UaleoB8m zaiI6YeY3U*4U7nDfnfFJC0>`lHO?I&yAG@6f9t7nM6YmJr%d-v;+3E$v@f3iny-p; z#=kXjJz>(Vc+~%*o1X5t08FC1mh)!)AmJ7k)dV%^J}4NM*wrU)y}IA+sBB=jus!>t z6Zy=X7gl|PaQA7pU6KsX@(1H&o$9_uV)U~zDy5$vGL%zQ|$bW;Y0)9So2vk$C!p8>TN+d(Q}Jg zSbx{;J8J>UP4sLwCkFE3ogi59FU|;rGkfVu7=>}>zY7P&;~qf8V2;aLW6Dn0d-H9W z4w_leJzcJje`rxtg7>&4E2&RT$$+3V*sn4d2@X+uwr4D68)x|F_8qCC*lZ}RT=!Kd zp_agUukP@pugED8COSs8J0~kH}(Gf$VkrW}IefCwzs9oiGBY32cJ=@UPX%0izJ#!_Ged@6mr@*+e z?{_@>IZ0~9lI+kU0uQyiss2OrEt8t2k zAj8J2o@9{(J*1n~_GvnU?i=mVr)k<&%7kLK+hvcjm^zV|- z{6{C)Au~;*aBr&9Jwaaz63>ZFEnhNm*~NIuBv-n8HHMp9K{lq)mnJ-gHVhvcCaWc7 zSvIGLrE@`vV75cf*e!;j0tJ#%;w=Qj1xW^Q(C( z$alU7*or?O)a~2L4|B>J)Qe=WIu=POOm;0;S7dby?)myg&aY_rd_hs=L71>A)-)dPw$Wveo&w;; zW!C$wSty&5z#m86kr1Al(s%avq#zxP`@c+$(wf;XQEQ(1?LYxDC zi(qSOi;$hMx0fz(au$CpTOj}+2WY;k(6136KOo782p?(Z^se)S`{t+S$=h-{!~NM= z*Y(PK%hLoFx*&%jmJ7lLT*ZGNjti*s6A3`rTF?>D2Y|N+ftI%ip9eY;5d;$SC;Ffv z&2I|r*v0gYoY~2onZvY1h0RadiObjAS;`RX;U(6_STXR#sL=6CH$hbBaGC5Nj8bp92V{7os*390TNa ziGgoq8T7M+S;q^_FP;eGF0~KACibz>85mgZ7h3}vKp;nV#eld3<1+@wD7OT3((bG6 z8P)JbYz6dX0mtv9yv?=q?eql===I^;0P?p(SJg~o1vv(&4dv?t&_<=!DMU}e(t`&6 zB3u_hv`gx#3Z(|IunZ;qq}f8tBdme16?T3t;RTt6-H#BuCB&cA<-Y%d3S2ia#t=ii z-UkXHKz!{vR}5(N_b>H+IsR~Lou+Fgn|!%R<^}<)|5*YX-AbOp0CPWwQ&D{%;OBq+ z(Sy??=1bGn5)x7a0PukV@C3wp^C6+TcLDuj`3{lie*I|g*9LqpW$FD!@T5Bj-a*&D z00hI|+U)!8{;__2fzssz00;nx2jHv)1_}9*bwYt@{;`ba@MR4{?+w5s;ROWvsaf&O zp^}FJ?$ejM@jdN{O_PiL3fKq0wMX>%Mb!Kt_>`0|c7JZz zJeuQpoj+4+I3{s_(Xa}wMV>G1`VlD?do z>mKdzAEj@~@gL>WAI2LJCAHx=B|6eqWPHx;;G{2Y+D<%VU^+!Rv&o{c7 z1p~z1^-Z^eV;+qe_MQ$rDc>Cg_wJte9Xb?W313Gq4i^N{=~I%~cW=1o7#7Ok|7jOu zv<3_8XlUk#mt`>yFk8L_0*GX4hm_7D8Dm$epS&mER-4rMo@g;R!a+}*AzZ@VCx%IQ|lA{{iZjd zv{z2)rIe4{`!n(( zJT#f4^L<$WZ0Y&;tz*|({cSiVug3IOqKFR4CV5Ag|0eyB{W> z-JekUxfA({LU8f-$3YgfG&HNf#f+-@`C9^5H9e?0u7oVOkv9NZ07It4QD=>#Y;T*> zIFA^PBFDyuac{^8ubwt&nAalb!-gxT^l`6_!Ov>vc-s|dcj$9z`liNuzuk2S`)x!CFM^Pj=g^qdDOrE!b-XWOv!*y5eXumv®SBWkz_axTfxjU?yCIERtlnO_`Sv$XHal?{ zB@44GN?&bZaWp&kT- z(Fm+Qze#Yap zJFaPY{>~ic6<~%wB0zc2$Kw=|+UtmM0oDD68vdqy%eT2f-~2sE<*!-?DChj=oWhv6 z8>6|ud;eo75F_`IV3lQ`vSnXNhcdOXT@#b87a&EAxW#>+b#t|gt3?kB+uqPxQQ0)G zM1UK1ftrD!?J( z+>SMS!85MQl05>gXTulAM%QQD#kX8R<1@dk$atv_WfefMKj|i z#%cwqA+ZZC#E1UM!lA!eLZlVxpdC@b8kUq`=W*#XiGtTxGACj2J2RcCNbR!L_ExMNma#3Jo4hc%gB7MHRVVpqp0V^(3&_E8%M` zA;4yxPmx3d8cys27m$>1!X@3(^$5Xx!R1IOI;2>yorIk1bvyUb7%ha#4q9x<8kDzF zlUnj(Slc;wJzg8UsgVecrixs5I$C9QxNh7}8q-Ll_$dm|1&H3`XaZkXF(!vgM5fy2 z?ZN&nT+ovQZj_{nQN#WUH;8MWTz9!AC3pk6G{9yAyRJ%keTMH z*D7Wq@~`*(Bk6=t^M$$4Tza{T(JJ;849FHE>*@WZ(-WgqZ?&RsK%d@3Wbfcd_r>#V z2B-D-Lax;le$55dwS6-cPl~!F-V>0{;{kJU*y;$p<1KKGfM(RWDb7%T16P(QrH@xc zzy;c^ChDnVS&d8DAZK4)wS?0vL10?f3usYZs{A=)w~y9F|gNT8sIHG z;gZ3q!s(+D!bF8bMGQ9PQ^s7-OKb>F4-a|4?Yc~FP3PND!a&(nwyT@keWdl$25vg0 z7whfgF)3k}5*&o5{eAASLedCJ?ULjMeSU%jl`5dO<+m)P8!L;)MnyxF+-#HTK z39dDR1pR>Fp_q*2cZat#sZVVAa^tmM!BJ4M$v+*UuT|#X(0}rXa?!%&^vs&3B>V{( z;R^X;fA2tz1#*8>hO{$W?C@s2SXB z%=-t9bpZ^e!FB6KyaJXt?|DzN)H3TKeev8K`+Nnd$SL&S4#8Q-n|zRiX#7m3p03n&xdss6;0Q6bS2I_r zhyep_91fXB=2+W-3O>0T_Zj2-OmzGmg@;1TN@xL5L0cQ?eDKO0mR24o;B!QnNiJ7Y z($8({UV~(|R@@dCELFr5XQ_jfUXE|ByoMs#pXkbG2#@p$yu;=Nk7E1As;ieMvQUe5 zEYTUc;5MB~_zLLcSGNS_#bG?}5T-5GwHd>2vauL|uhfyUh$1x}pHGSGQMmWVDx1e- z60{z>QLH}^-LMZH?#JM`nKQCB_)RjlPIIOIta%;d=6c1I@UYA_nZ#^r9$g{H5ClK0(jK0<(^m&75>dIb`YliC~@4<8^i= zZ`Y$DFYdZUKst4*i(N>#PnX~AG<_KON{xhb4Jsu4`wky5AO5vE@KYM+Co=g&6;b6G zJOtN}sWdUy)y}%dk9Q9!scsdZL#1b0u65iOp}wSW=5%9nVw3vRKDyZN%LB_W2@2`& zzjpUpa({f_-BHN4n!eOv%%&hpFYC=%uPoF%`%u2Q#4Nk!+Qq&mKN&jSxE;KXWyrHSF^JiqNQfJGOfZ?}mo~fK30xL1E;%8k zB)tg7#BlzzrkHk-Hy6dT{l%O)6iT{FS&=ASLkic_b+#A+0wq2I*z)FTcQ>ls-?1zG zN!j4AI2rXbnvxbMa(wW^ub{|7MLHdRBAU!+Wi8AbYi{4%$p%2Cbf&sCuG$=`mGI@2 z^$L`}8EsiowS$z3?^NPiDGQv*Ji>iGK3|xd4YIA4-UE#*VK50FyXWIuWKy19^vDEE z82-HR)H_Oe`p$`GX>IFU8cgvPtPOcfJ9S^dx8ghV(W=VDh)9g~tp_@Vb^(K-x zE!p&gkjx~fKu`4Z1;DKfMc~Xz>K++I{L9b5n8{;5Kax_=F@Z`vf))K^G@TyH*wDb( za8!_O0W~R@=k54y%q*TsSZv#Kd33Rc}P>@A{eAi6M{H8UsJNMa3>--^;ZXoZ`s7>Hu zkRLX2iog-mnb5|9(}EdQ*7~=F6E{=3rf^$AM4svdrdps)HO?Y4(4owVJ4GQAv37hf z9SAprwQTa0m++mqM+GIg3F-*uvvkZ;Ak{oj%)0q=%EV=UNXPnm)hn#?zaZHUZnfWh zd>S=%IoN6TPZKWDIal=KrgtJa;#HL)GrC6`y%ra3`S^XbVa`gq7i;&dp~V6=Ec{8i zSuWEJNMyp;y=rYq_MQM=W23spQv%#H+y2dUU1r4t@vwlkj_*!tV|wxGD=BYG1*!bc z`%!({pRQ~^i2;esbwAq%(@{VF-!mj%hL6;q-0%sP*#CED4W?hYP2nXFcQfJX)PhTCcs!4-!Hz&^5kNHuw&vYo^?5%ey!;kV$&L18C#l|Ianu-bF2 zGxOc0DK%3OM?5>ElQqN3&uY2`uK5BfL6tOs5V6BhdZM0dDFrFs@27ZDH=$?Kq;o>; z2zSHX5j7lxjJ2nQe`fqH#Mrq%Lp%r2I_EZM$sixKRAG0LQhp>OlhghvF;c?i3r)xYO;^2L9%>kB06na@p#i4!_f=3B-xK; z%5vlmHSXe<%!J7b2@oSPfTY8lo5#$P#aWq{<3rig7|*Q>W;0uDO378G+2$VES4vJm zcSD?ae2EgbS?BKmjo@R#U8Fxg-CI3AGi8P6+N(Ay(M*p@AMf&@6;d|Zd--i zjj2eh-(fE#7wQSaxyoGFh~-UIs-2gerIti|pBpN(Q!S+##90pM+iWb{mTmOTG_8t+ z?XG(Z`@s)Qx-s;_bHj*B|0ZY&xVTPGFHRu?i>Aa|F zzJj{2<}VA6jDPf@kxI}e)^>G|9#WIBCTmQJtMB8YTJ@VMPaBdBlHUk2tR_Iihu zhR)qO7n-SS4u={-=()WjPi=T_&ns(<@^1H4NA@%N%x}3PJQz_@ z__=c8wFl$1)te14kf25KeS*rZzycG@7Y9w3#rBzx&WPquGo^jtSu^j=dp4(2ug%G_@>e$4j8m*1TO* z_mww=rcMnr=$=K@nb(AVx~#*ERXC0{S=2&__=9IsuaWz@cl#)k)w8VAma9FVEdFjX ziu)SQx}lC*$6CeKiQM=ny5!@)Y~*+w!6w|&A;k7Up{yMu*`WQni4ZK;bP5{XAuA%7 zU?6s~=E;{qirgX_EsL00(Zkqn-}{QH7+%p_XL8tp#A_I(y*P`I5$Sq`+&oZxQ*`$( z@o2x#t0{g6*A8ss9v>qLcdGDxGi~^*1!ZQ~$z6Ha8kTSZ4T&_EHukwI;`7=ioI_5^^AKtJz!usD;Eam_ zsFIHry&B>Kw7b$iY~`tmrrQ%K8ZOZNua!I9+Vj_=>cQgJu{ z?*;gxPSi`GiM*&v9muLsH3wrvJotG#cfPl@T+Ypv=G;8z*gRjuaR@e3#~r-;0==X+ zs>j40o|y8IujHl2i^UJ@g>2^qJ=61%^0AjGBt*1eq8&+34JfZWKy&Mk;wkWcy zMqMnOmLn5JZ;DWp4V;`evF1gSQcqc4`R|}78#m7JrK&~s%sg>8&1$U4_va!x4X{R2 zawv1R@2b>b9V@%#Tr*S8up*C7oIf@jv2X-|9 z(ja8wMZf=%M%ag3{C|1@6KjHz5}Ae_1CXZ}0)gUlw@c^{UC%O`yJ6TrP#4 zP8thHYJ%8y!R@Qm6~e(w+u`i zpo>rn>DNF&cj@UA4A<{C3G57Jr=GhMwR2qV{_?B>)A#&^o>$ScCQV@AFKX3;|TIdBlf1=o~M)-fE-9Bn5);JAIaQ~Vf>)-#rJ)sV8x=?A%qRNR} zr&yp1b}t=~zJE@mTQ)nd0vcVB&rKlB2s9gnurqVFHB_K)w3Nk-jV%Myb^nWC#ZQUP z49G=9`2$w%MPQCO(jL+Cl}7k441GS)GgaC+XzdfpEFpoS;VWmGiy?y7OvOU8zCKV= zyEj0c{pC9uI+!F3%2v{8i45PS-cuq#pN2H03AW&GWq`6NrBCT-@X7QQqec-zy0-8i zoqbs^(LII78pmEkb|S|Gu+5M6ziFZGqAK(XpRuSKOp-I(QMF?|EPW9=h_g67QD?qA z_~2J}b)>-Dag2(80WIM{&V=7irNdY%+;yr9Y*itHu=403LiS2;7BNaJN~tmttYHeL zoi=xucQP`xK)R_$VCdN(HxiW5p(n9Zj^IeIihNU3LcXTMn&8+~CYWDFH3$ob2^cj_QkdA(Ra#s`QNSMj-X>vvM>RaX{_B{}R?sS?W!v7vDiKB5Wi;@XZ>&auz1_aEnze&V&Qa=$D9rsNv zO_!)y)UsIT?8)E^H;Qp1iv=meA6JR_>bH^(%AgdwW~lYpu%C~oMkW+|e~r%usUYla zam{mUql6?CCgDfvqi${Y&RN%rn2B;j{=3;8!@a{9FgdkWD$vFokm~sfeIf$x<=K;& zb!S)PvRzzFj05UuO@klE6CIF1AKF{=9F;c_RQZHY*_i!ViS13lo|pY==3op_*kZ3k z6A!G$?i88ycL=I947wQxi4$piAt_PUHN8ORZ8Jmn)0UjYfL%YyV>5LCCarS><|~6B z@9rcuIX`=F$~K8t>?bXBRSYM$mKwK&o#Ypp&@>4af=^8HO@E;&^`n^BagPQd-P10O zt%Azzkb{=?BLs2ZY3-iO74GeE-jCPDYbBFqr(ulYRGnuJs4|(5?4aRBE%ZMkLm8~& zEzw@p^3jD**UasQeU!T8lty&CvoM(}YH2|roZ}LNk)-}PWCvbrAV;#bpyu{Qj66kM zCgtPqnVHrkyhi3q2R3_pBi&ADyfYF%j>PW&{1nZ0*S|9XuOx-4;dq9=96rdbod@*u zo|5G>TozDn0?#RI9hPJRGzvNvm$$o{{XatHVwNR}mySDvbev#(lBqIl(q#lw<%D3!3I9?Ol2B5? zprE3qyk&+7OaLwQUfYq&=^~T{3iQ+Yah8*#(=u&t14eEf--zO~27!|k5)lpk{C3ks z`Tg5NphGH!ALi&qXvHDWGtGm90t(gd`c@9m{C&VV9u?{S_7(wzh%SJ2#x;yj5Ygu~Hq4?N3s8VP@HaCqSU^aJ9(@2t>q8A< zuupPdc0C7Dd4vGIo){I_Z3Qng+aIt#oQ~Al+)QVIe6BbiU+T+UvC^#?f;<}<0TZ#- z3M?tjD_STxpwGayfm%LDeN1t2drbUEREUQlHi$0@+w&u!4;+*?&;!!j2YdcDfGaz} z-p^eZx=r|AaA22Ez&bP5wA`5=xKB?|1VliAJp`&-#7W#3kl$_^@PW(kO8eB<5Kkcf zezvP{fM8$m?+2s6M0Av3r|0+3?+>3~rfc)e^NZWB^jBVbRMaH0Sy4VI403!T0zeeR z_!IyM@iBm}Z|z|On4b#RJzs~Wco*`#$y^)zkw@2&YhOse4_e1tfFJD(T&NNajJOZH zcui`0Vq8L$roSt98z2VESk&J#k zP^Q3#zKYa(zETzNf&E(uKR8Rn0rnztBI^f(RQQB6;pdTXR5obA%O^X zg;>YDe&ld=j>G-hcA4MN;gZ0?TC;YWZC?W4OUQ$;(Zfq0)HqrLzBRr*5PEd77$9_< z!2zSe9NjS1Jk=^Dj|gg*q~DKi%oKC=WC}nN>|dKB{5KZG$3Ye2f@VWieM|%C3AcBD zbw#$X4iIH8VlMN7m8wkDxSn!IlC}Le{S&+3JTVwihV*zTlF^!k>K;~4S=Iy*2*NJH zhZfz_luA}Ljx73wDNUv%j|DSBSi9>b76%`I?EFPr!7 z+Lw!Am!}jG*U$Cv1v%0n_<6RQ@H0eTVX4u1KQFark9_=r{Zknv5tX-`4v7O;x>K-~ zlllO*k`+KMoY6w)NFZMFz{!EN`7}r=;iod@wgYJY74Ul8C2Y^>SUdvdU2a5N*aqW( z7xITYlLI?iGwJGIyadoCU{Sp>Fe=_bLw;Ika!1T2w*ANDZP^<*4a1UR|sfBYVjBcX7{1C~4DwY1`9@KG9S=>29)U-_{VS}h z6VLKM=3Fq`+C^Mzy5*1>6l>^iW~^NzY<{#ZXv3L)g~6E|syX7_3f}@Mz_u$w-9tj( zv{?4j-H9;M#CPqW15<=nVtZ0^2V25?)uMXSaK1hWm5q+EI;b6d=Q)7p-43f2-cHvu z*vc&R1p}TQIU!0Q>n~(O1zf-KQ_Xd927$%S298ZYb!t*g6Dk(xJ~1dq9~ukCTfS<$ zQPCHCKGQEui{b9gs$Yr9XTtna{ihXl`k%3zbOR3?C*MJ3v%tySZVI6y zWp9nc{aTf{sP2lchHO?>d9pg`6GRYSQ~U>oCF2Ju3!i&O>1s8jhQ6^QrXjban2b`A zCl`Ox*IblIFS{&S*d_hW$twKZ^!dRbsYR_JDUuo_b{iIb{t)8fuj$_=+6)>U*W2Q1 zyB98s^iZyjKtp-5nq1fw(%J_t4)UF^N=NEI^8BNH+yb5}`7?bJT*;hnN%N%h4`zDd zjZdEFrVi~9=WpWFMJZkw0m#@+r*p-|Fj$BG7}+SZ1LvK4`GM+t&>(MWz1Q6BUJ2h-U)5_`|SZiGbGn5jX;dTZpgQs zNOP3vh|vGE`f?$yG>J<@sKG?n706q}s;CinsH0JJ-I$H*Er3Y!Tpbe0S)5PcaHYSf zI7)pX31|CLapsIlAHdhMTo1sTL3fo;=S+2WXoC+oJyr2KL@ZF1cDY>yL7a%my0H%p zI&^$(M~*5Nm4#u&#D|%Yow-B@P|@q?vQYW#dCv+tr^}{W^puffXUU6JYH92a285kk zI11d1GOdBsCp-?^JSO+Ia=TrRtG4gBqE!5xgi&9MFDhGZx$K_9b^+=tFAp<#S&B+I zQaW+WMM2%R9A6F^N`g*6Bqci`byaM7wJ9DK=lpC8hjxuuj9jfIR&BKIog2rd3Zxde zpW}+n$L|x-_KuPWLZCkaIJ`5HIeA*H$}}PusB`Cf;Auf4y$MG1>qmXKS6|LT*B=th zeWp|4D!Up8NaX7n4Pi8^d9D4Kg#d;PM{^p1h_aOrZy<6lb z97vf?fWqi3l}4@9v+EO8BG#c;SxEA+Q-FNLc1_CAbQZ%vD+6RVp@%#?r!B7l#g$%glSI)rP6v@cMGEsQVRswKJnkD>WbO1>?9d= zyt03}9mrX?-``5L-kOc5pIRMkRntTjFcv7-49Qf4?NSol!>_<%?a@QUJf{<0PfAUb^=T;eumq2YC7D;<^>0QQtx|T_q(&(8T(mccE_y@P*(iIom z{Qka&LRXctTv=~LYN1xM!%xNOgoK5{o*G>xEqR{@(a)~AoaN*kcwRqQ(|YZTGS!YN z4NTh$(Y{Pi6f?1_Jt$&TIz3ZVV2Tck7=dFHNnCiM1&~6iIwhIe^M%DoV6{ds2~Hqf zGsDk!qc2d0J$9FaR+x4c`Y?HSferlJLl7ooUo#ty@-aImr2%&LtXd>l+(u=xo7^T) ztvt{Tmkpqfn-Mz$+5fG=Fh?v2J}BPL>*^)>fUJexCb@ zhq*3S!W*Z8-oeIQM(Q09E~8|3dhh9Yp@)g#9+pY8w9)vpTcU-V3!jubin*p8RR}m( zdqp&)iJhqGcDWS@)pU6>S0g_nqQO~zla_Hb_hu`ZJ3GCr%N}n5lR$Y$ss^6`KIUu% zwFEmTEI-V{@JN`dCNm&6{5A4n--4&2Wv)YZGe;N|c8*lcoo1ML{iI&p@2r3JOQtt< zlsc0$%S>QK+UZ$Y%I$jfiRG)vJYE=Xg}f{=<-ENK`oyu5;adFSNWbyY1C44L{U^^x z$HIEI)%8|Yv%jTJREwft^-epoTKPRRl~&jsx@f5Z&>tk&Rz4TX+dw%!utWe^X=;9W zl#46*N5LzT8@qVUv}Q8dWb_uV>6CA!sYwxMqhqeUtX!}|gtA4tZ3Ja;K^BxGwTE3R zx*;BX`$Nom3AZLu=)lxEq^-c(TB!C2}Q*)9~h%+zttbd2Ge^rfTr zv3KY5z1ZGn44S^F_E8@J-bhyJ#z`+a4c1? zqHmS%&b8xI91)juBf2WADTj~J#iOksChdIP&;xI~pcJ&H65kJ7 zc+BIie#b~pft5y@;9lQVj@D{ERAeNS2{A;c1~XcH!-g$S<qtN=c5l?B}sSfMdsWRL0RJ||PBy5MJC`<*ON?A{ooq3qY3l4TAo)*FWRhAF6rAIb_U z*V!bPn_`=|BWe~R&vQB)YniAKofdHXB1}?v&85|1=+!*2zG2XVSc8b|Sqrm3d9$Um zKv+#BM||p` z5vnjJnS%7}OaAFDMNV%itlm)rlA%&GB#X-d;)qzx0f{Rv93mhc>m{fOKHx);DZa9QMnrD~S!#Rnak1It|tW?#z!F%eCQ zY**<YJ96Q4Hd4?sD!A1; zY)y0REE5uF5px_c?)0|$e9<=7F$wc?%n35_8Zo_B1cn8FQ8tjjQ{oJZ;D{^I$N9RM zC%pv%@Qpj$nq3wPH(-Q&WfVn^Zlcz9ZLg3A#}#IBDK@1ac|{xURlZSdY?MEEIkV%? zM<~dcsoAr-3WeslhGsIY6fntqW&h({dT|7-CT~N$xhEr!0@5Z((&+s0@E|ndjzsl#x2T3uGl`UO#)a=O%GyEs?MERIp6cL{SFWXq<#V%lsJ20f?89 z^g}@gWVi9YIomRJ6t?lbJTMgl=|MN}T|`NYJ7Sz-=&;#aLtTL1{ zDyT&o7Ss0bY!oVCdJUUo@K#_+5}hdU<04^@Jg8pz3f>z72_?IB@;%A(QC6^MNpLq zWraYI`cT>vPEZm}HZD>pOT`&Sn&)8-o`CC;$$U@qM>yI|91MQbRB@~tbGa#nEdCYI z!|z&b7}4n$bqPM1Z-&FN5dlkn^jP5mC1E^G6sv(D?Q=oSHC))Vos ztm0(o3oY=?sLlF&55lEQ^%VCe7li<{iXb7|vSNNpqvOqEE^ju4OQ+m*#1zwpwE6z1sq+2{ zqX{%&#iMy<$=#moIqbp(l6CpU96QrW3&OA4CBLCEOYg)}vstbqQz6(S>}44y-UK~_ zJXOcqASc&vXkGVKG;XqS{eeP`71f*K5LJ{D84>Sh4EF?OcE>O*_0T^rCsWyTDH-z0 z^|KAx!n?7EY3;W*@ZUYOU))*cyG(u-!Rz2x|7P7%Sh);AX5tY4jych!3a~5y#Tu<4 z`-0__0gnn`DA4DP?zv1v<-x>&%c%wLbEIYn=W zFJMw#$!};+KBBB&Rf3#b^greYg*j&}SP81up~^cjmyR`k{qGF8b%u%%+t)TpML?;jreUx~xsGdV4WE*{=u6<_Q z(5XlQRx9C*lLrpnosPsp-kSz|bzbMFZ&K?z75ub0NK9t2O4~CG+VrGwi;k=vKcgsG zl|_e3ox+4A@;PQ&H9+%ArH3o0QXK2lMZs-y$6dE7Sl{9^OwBVMW^-l>F^JAZe>wU6 znsjF0Li*+GWl1n#OF%cysnw3QHdaV$RLSeFsNpxoP9lkWh#_m*?0aPoHRXzX1?$)8 z??*cyq_c1*gCkJN@?2q||H|rd8up3LeXOeUzRWJckY+AUk zJdjv@x^7dLnDqtyVm@7Srx!H44B93mi_xu+GXEjmr)wY8D7mmwpLf(4z;xXwL~!6v zDMlUjDAt`27)s3JAi|_9>0q_cM^q&pK&kgh)gCpds&ZU;q>S}==y-~O)qZ@a&! z&bC)AwQLM{gJSXPzBd<~y0Z%f6+v9TmGN@P_bKkjW!1BJy>L7(r7qo=!LnDNsv9~- zK$*CfimB)1K3fjhA<6E1bA2FF?Hiwn7|Q0>t%)`~zq`2db0_qd6J?BPK~g4@wOro9 z`@}WF2ucG#Py5U%+nU0l;Izm zkrc-(z#hKDVtHfcE68s>NMg#Ae8dT`GCTYsI-zWV*Jzy*^Z{9p1+(*J@^TifNTqL0WqE<+LNm-G3sFI6kUKRmfdtyL~VB^lZPq9*f0$Es;T}5~79M zCAqKyHT?ur@%;5qac>pl#pY`yxjAc`nQ-1&Zb|$Nwxx@K)ORBsXqmY_NVdN4CFq3~ zr)tzS*ycqSeOWfQ*`?18ncON)F%WG8CRB=oIRhS=nSWdy%sP1Alb4&F%E0e^;?Lg5 zv#tc{SVwWq^_>uF@vui!%Ab0<2M1J^cqyPYtS2jRi^#k80N z&x1>8O;=uK@hD~bIi+^Xdq^~F+U!bJ_Q90JCd@am_{sS9UFS=%zg0dSSXCx8D20Jv zez=8u|Mj)2sq&30Roc&*Bbx@@Av%^H%dtl zhNsS@uKJXmS96u!o}`LtN^Q)a7baksc-CQyD^tsyZKas`WSmeeOwmvRw}2erM68Xw zYFi(+v3eN{C~G*>)ER9mbJm=+pYqqpe2TA#Ea20^p7-kGt1Fc}-(;K(i)G`vNxA!h zWUPBQuTYW!f!+PE%p@1MSs>lr^I|{sgxPkXY5o#9?9zvUOsxeVMT6}%)V}wyvd~%k zGQSz(HxS;~Yg6Gb1&5TdC1?8ZcvPMbf3xw(L>FOXbfdfULd_7pAC!KQf}IehN;>@Y zCleP9p8nm*P_Ce=tS-rf&xm5u`+)8QfrIZqm1Furub-7ed zAb4i0xgbu85xyy`bK5m~|48co6(Oa7cE*vnzIktY4r>otfw|lM3jFLq)on(S07i_~ z@b9&0Gz=b_)qaB^M8sr*_r=W zT?RcP9Rt&UXP^IjH~KdPWn}-q0Vfo#h`E)Mu>(G>h?TyRv5>K$t&uSl4-b^1lY_Co zHI&&n_49p#x>YW(Rsql4R{N}Jo$!8z&v;lDcT3LqmBKsx$5hTpq4A+bWOeKqZS z1WvNryL(z10F{)~<`2O- z=N*6$>=uNk{0^Sm*BtbUtG#SiR%p+HTkt?TJ& z9DqD3;N-x@0QYdkF23=`--NS(-`m&#jKB}R3tu>2qJ)wiyn(RM(O1?uFjrjESCfDy zDJ;bR?)I2q==d?`*Vx1fCht607LMgewI(HE{)E1 z_a}F)%pXEUhI!|`tQyj?!^66&s>-q>887jk!ZkS3|M*^ZkR9xVT3Val*qnUf8JU=y z8a#)>Agh9lD051Rsw(E4?qtCuU-GgzDxw2$A|oTCL$mfSBI=#%TlKw;08!+?75j3e z|IqoG-9EY0x&owmkJ3FgF)@C<`aij_I5C2@vbANrd;DzN^+2SgW9k{0UjorFFt;=Y zevy5It}R~De%gP_q~HQr7soyug3*25z1{F`#lkVxwKUv)hJK$QGL}nNLWYmde^bBp z$mr{P!uzpuQ-b^D1ZQLRPEHO&?HZW^y?)!`f(lObTeJPP#o9f;c zWS@RlrAKVdEX@zzSPu#uwY7PQ`If&8(JsGfmA4*i=8~D2>ztatdUWz4_ePi?*3x~Q zn7A3SfRQ(JT_jEICE*p3K_$@*BYWD9X*1Wl~I z>?7HH8;^5o7Gg2qcbtbVqpqaCF<8)C;Qan2CjKL%S`Zbjt8D=8Oy@10A6z_`F#)4v zSY7Mz3IgC^dIy@k%+~v1Q-gB>D44}7?m-~)lRxYUOGNF3_yn&5#3=ga#|98d_nj5Z zy7WavNA{E7>k0c9FnxvJ2crA)71#>+e;7N5EkU$x3#M(`w(YF6Ds9`gZQHhO+qP}n zx0SDBV#-7@JB+YA9U}8W2bxfLJ2l~ z2_D7XePs*=is$jIxC6dAxpZD@^t+fo zQX`A6-1+4m?A3JbdzWh)P+lHd&Up0@(9LKYrfe<#d22z)BOg4_9a(3 z!&E~9zM!qG0aoQgc9NDlEy=%a3u;FW>A=4 z-`=LS-!H@!)WA&@Y#C&7%H?k~(+fgAlL#>si_Z)us=nq5rrNXUuVG`lmz_nxA--#p z>m|#7aom^8mIgGHaQhsg&p)aNy4?~AqJYU6OZmu3O*zasM)IE7l->4u4R{!cL!#bp zW(c=fgY0VwUW*lRC4Jr*RZVc; zS}aulw*i?D0403EltF$cPbQ2*@Qh+71|(LJtv8IZB* zKhkfe*O;E&H^c;wUASRL5Qla?Pg%$Ym0#@yAvqEsr~rXtvYN&fo+0A%Z$SY= z@A@Rrohcbt7Mct^muT4%ien09j<%;OU<3PEL^foHK;;$TDkurOC&CoF%YCg%b;e|= zW+}&)N;T?d5`!-89+$o{F~inTpGY^S7J~Rmu0RO9ok0SH-ZJ;58ge2~s-0l_Q<}`Z zWbLCRcPq}M0~pHzsbr#TcFN0pucNS+*kW2!-e@-oA^W=IfDD%6J$>{S2 zN6uFq?bW2wQF7B$)!7lrVXe8UBvuTNB@C zR2hb7&=9TgKPQRITCH5W!`Vm~+s~mz7t=;8%aW*29vTH`9p)-A**n@O$@w_dpGuou zQ0mGb!}D_ET}178$~#q$3rc(2si2MahyJ{-3p}UktgYuk2i&h^;6zG|x<>j12@`gp z)<1W(Yi!K(o=1yZf(vNAY1k$mb6R(W!~G&+k5cntjRVGwf+96i!B85mx&jAP;cHRU zg)aK9-!`rZ4tlV`1&Ux`%XeC<4S;+ecIa{J*X2e*#9hB(FESuQ^>*pLi`9REiOtTd z^h1l*fzMxC_G0P#ePExg5_>p(h5XVOd#iQJ)zGU`-AkoF_ll97v?j|i8SlG&9gm$UF2nLuqAX@HH_<`NvUoQCPa!`MdRiNb&5;7fJ zZvS#`^`Ct%h&$-mg8Af(9aHIQ=NGlb9hR0rDNDWWe(k^3ioo#rb3@PYX8*mO(rE zysj)cz`k$gagXI1)Nf!c-JzC7G+A2L6WTv1)I7O_`369bZgm`|HtfW&{F!Tml+>vf z;~vrnm_o0{9-ux#c5BP|nq?ShjB+_ie@-wqno2rAI2!V@!3At7)O%@8q-*d%M{cjm z2jS^E*7`QU-Zm~YjPmG}OD0|O1ngd(_*4DTW=|H>yFHZ%1f}ccske!~(y`Jj_->CK z6`gfNt05PZqd5* zyhy6v_(10M=}Uz!xWKY`uX*Mx!d?6Td}n|xdCxiPLv3oXE_ZGy`;;e0KT+JE(OaUs z&0vHLYa(zDiHmY-sKQKG7wVBSCA+gx%ut0abFyTwNfIO`4WGhZNDuF|TU>)^Ct%&> z&fZDghe1&PZ6GD;fBTcz2}~JUd8j|`>YN%>VLA)XT%vbc*KB9*LTkej-B}W2u=;LI76*%%va^zLRGsVAL6;#6 zBeJ>6^mMgry*jee)pW%%*G4FO0>`K8(=fEV=9D6C5IRgcdfTw*(Dy*+^%hZ~)A??q zdmS%D;2Ib#&e9x1RkA-vg#*zIra+q~({kdtmm_@{yYukoy;#`^1lkwf4{?S*xFF9i zold7=(UNX9+9l1Mt6Gd22~^MnwG?0``uhQcoWXm7gr9i?qjhMlJ7^n{)-*Q+Lbm6? zbL=dd7C|3Lu(G3YO1e8l5cX1*b_ElSUUw|?!fW%<0((hEo@!TA%{Xugmh`9=1};#i zkPnHOLOdMMD3z~!!>pJ~a)TrwvwCCeWn{v?uLlEqm>u@2h;l%p{ zQximNHc}8RRrm@cN{N=v3?{H8xZ1O~QV2bNh*NmgTOGS3t($Qy3`!L~09!#S=+l`w z!uWeI#en*PAWy=LU6g@d&w$A?%RtbZ2{1JtR#s8&hqD8|{Ss=Y53Z{5?6D?Wp(4$v z3Kkp#u;FbzutYmsI}y1Z_>^X;Hi>l?&LP)Ed48?C;M9n`+I_^Z;~|buLA>2|O0Uy0 zp}W!^W}iqG&3N#-Eb@k(zAuoo-vX60e0spPha0w3lRiT$9dUeEmnQ3lm*s+IEq0;Q z;mR3MOwv{&dCNJh?@vgBtP2i(ZkTjSZ4Ul`*;m=5Gkxv*Ipnt6W8>Hz@46O=*sTbJRwFoWtX&&;rCoZzgcrG$-P}{QK-g{HatTcM#Nql6%s<@c9RgRdif3~ z5m#^^7t3)RT8V|6>D!YbDtCK-MNPx#cBSE4-u2Bu)G)1hxSl)2=0sv|XWS(Z5?S(M z=2K$1MY@R(GmEN)IJ7NFl66y~-GYZh*hEU}-V`SWtq-_R2V}l$ixm{y+7j7-_u@rO zs9qis(%t#FoD3%$a1aYU%HXI2V8kXpAJE~XsJe*A)G^#{PLPPd%!@oZl;z1w0~+fI z7D2nKfRJ5n>XdHeTSs=vMDih~G`J9{d${O~+2-!*;we#`J{D{~GXn)x35TLCKU_vJ zOnkab+C^l%ZSYthkgVg9z2tBxMtO2quaYzlX9^xV5zy+)hgVmSA9KM|k_TCc%q$85 zhMF_p{(%)Bpqxh{*(IJ9(}Kz|@CJ^!m+)?w*T=5y*Y@{t=@QzC^>gr!%a)bpap-Ok zF;@Lvg%fZeS0QNla{Ua1+pZyA4Z_EksnkFJ#I*OM-dU93jB(k(wd1g*1iO;^;Gg-} z?IuHn5L_T~yn(t>ELEKiqWklpyV zi%|izdQ3{L!8fCIK27KU zvqABX_QsHw0)Ba;at=^ji&933i<+{Uba}XS!HPJajZK~k`bZ{>!&erx`ya48heR;>)#db_fGok;J8SX zjM4;P8b`FGssJROuIC||jf+;(*!Sx`6tZF7Ue)iU88{(uk*t~T(f)QIn!(Y8V_!#O zQ2zj#5OAid*nqqdnub*nBoC`FzpI#?IgtZ9cM_ufr2WXxjv0M57x+ zRLzZd%(^!-NYfn0Li(x~Lz;8eo3kI<+OwQ*mGX{X_tzEW-xS16`DOPnJgkAQk2D*y z08P*?BdplT=t7qXk2{2YuNoCOu0=;yc8EQ{C&b>j!q+kS0|MX3s(_~%oCr3fsY{9< zOsMvCjhEY@R0A{d*lj)3p)>HjNLBVs?3O9^l;dfSQ7?`}3#thbc}!em)B?n{`RU>4 z?vUX@*7ljHE2x~)n#wNqkris0J`0@TZKXZ?Ds;!!=9l;Av-~>E7448cLR;b=Csqn# z9H2EXJKq?(?|}CiLy9R0s%cDCjYxx5@DIBLA%_WN^gG8SPWDp@uV}mpZaJIR3v?3; z(%MK!A>3#}iK$}r1pYA8$b6cS%r9A3)}%TZb8;`{F7~C0`G{L2)+QhFR+9a$w`)(_-l zRQ4G4a7C}gxHV-c)oPu!_RwuljcWCxjZRHcjBB?LJbgBiw zpNYX#Wm&@JD8X-3(Ee4<=iBCgagNTi=5iX5JWvpEgli8WQchz)9S2qL3 zL2Ubf&F2|8t@`QD3;^X4P)ZK&Kip z!M^y7<0BleOI9Gw-poJw)IAb>egLGNX`ULLcQ7KlYY*I_!G+x-5YSMSgEA3?)D3CU z*Jn|#PZ|97?Lz(tGUDq9Vog0x?uviDM4}j@YTc-;0{<-Y4Wu;vx(AZ45!r2558RiZ z0=6-RtdpG*;n+tFbT% z#4dli`^c-8k0~jU4Z^O2s+SA%(F-x)fPeDa>4WMHTX?EssUb7U7na6y&`g|9h7Zvy zhvA`w#04)aN0P25{#2+vAwcq(BRTVG@h5s=fJ;DZb{InzQ5#fy`T^7(!ED)(Oa1H3 zY^jOldz^Jb^FBuA_-fgy^Ys@%ViPQO5{Yvos7Vw8#{eg=R=aBvL4^!d*U992Y8J{2Pob<2Z;tvqIJK{$hbQ@xrAo}ya75%Cz9u(! ze@C+U!X_$%GR@pt#$1Jjjy7yaZu$tpAZ{fv(^-a1*-z79=#b$}+VkjAnHs8BMpwGl z%m`Kh4NM>Rm`3+>Bv*mQ$y`oLJqs-@p*M|5n*om2v@8nA8O@`V+ohdNUDLC$+BW_Z zNJYUSIC`=M@|xcJq}{OyGuB%@?F1{Zc;C9vLR{CbDPNTa-zq+5!EJ)E%?m<)X&)m$Tn#3Nj3_jl zY6;|B_JlWp0pxC!t8rAtmhD+l&c~7GJ>4~pCz7vOL)+fSvH;7tSl=1}xpq?%Nvzlr zfqxQdQA%xV)CCWZ`0u)W<{y0mEvH#HCl51pyoiv^< zQ`%AK)!6(B5(N;(VP8-5MJmfe*M)sWlOtt4Aawb70FA*x48taC=#D$SS0M%o3;$%1 z0P~`uxBs-LHJ8E@>n0~F19~IdU%^H){-|=}>SBqhm@TSUsknhT(2FTHds*h;WnX3# z-7|B~=*Xst(ck#AB|-he^JF{@lOV}`9}0i&T5S3D#q+xJDU;I`g$lWmf>iY^9=AL5 z-V)-LBdVKa$@=byjDV-GyGydDL(eg95NB1DTKQ?Ze$O#s;`rTRL^Ft2>YICY!xKU0 zVyimP9zfN~Bn@i)Y1|tVGyk=nweULm9yyv7HT6gf?}GiQwjOvh6?nb(BxpanLsiDb zBC-?veZ1v8zPT79Yjc&cV51$}1N}(hQkOz6+{Luwk|bSr?3is_pY$;Q63h|`5B zN8;a>x0hHs7$7l3ql=+Mcg!k{)k}j*0q0&_O$@OJTUEJl=XGm=Pa+>xecZuZghW%m z+|7OU9`6qA#HjR`Z$f?TW%Z_ZwP=zs$Ti&D8t#5T@P2gskjWo5bw zu8jFlp6!mu&1E62+1hm>|3%uknGbMmv5@k8XCbAh(rdjR&kk9uo@0Fa+!TYH$+DAE zwYsZ=>(Vbm2<@h0yAY-iF7?2Rn_#-`OLjjjn2);SGjC1LDo3vT9PWnu1_U;i|2V z#z)a^g+!xi5g7zkJ&P6#v#W45PDzWH?V8Jp;bP^hK6mi6+8}?$@fFEk-xs7Z9!Nkw zV^aruUYX>A6>JBkop$}Znr(`HLclS zWKjh$sjea`X)&1ltX1uq${ey}Znb&rXdbLjzL(Q^>#Obuz(d9+;>a1XrK7^nrWO*U zUD4u7)L1a0C7W;{dNZlQWAE?%TW80RtE?0|xtxoQkk0$9VW!kM2^;9!UUc!U<{iYb z(XsTpl+(MC74ATwuR#oaGIzy5w#D34$Sx5~kom;UKxZeUuuf9X!Xa8^BqevQM~=k} zK|yjWD;ns|hzk8InCp>s$Rc+fn1 zVsRWviMW2igy4cRhM!Hj$jBEdt1>=)?=qth*L_=Kf5=>RD)5tnN_P#mjH&nK`Cy+2 znFn(KG5Fk@A!!-!G&v0&UlBX?8tb6{-QTtr8Hdm|sf&02oU5yWN5_9k{ zQJ*3YIpmdxkVPmfL7MH6`<8*`a?{e_xH#E;pQ|-xdSIMV&CYPYD2)Ro?Th(n+lZ>_ zrxZhx{&O(2|8oj`Zn#nogKQywa?AI!2GzJBJQD_(#`XMbc+J4_d~8wbnUn#>VPpLvt7^@16F zZK%IC$N$W{DR5psAc(Haqc(^KYhW=w^Z_g1Z)0u@`#=J87l^bm22gVfW^-8((7;{D zl-&0*?*`KhtYi1|$ov=n+l$K>h4@_d%r0PQ!3&qUqyH7khiJAg$}k3B zL8*Zv)RtLi{rnT@wXVRla@ah4QN0<%ItEFOJxIu({>-QVhGjz`L{k!y`b#8^Zw7}> zAA9C$VoF6LzTQe>?_Utdnlayl1@(^MgPXDkm%U;2A?628@KjOJi{SZLti8IJU$siu?C1Sn3exB^kJ`XARzHFkdUw8m_YfO&!TIea{i&{?@3nf9S@n029#T9fwVtV-_tgwer-6il> z`LfL3dBk{rPubd>(;_a6HZvj1-G@yy7K%kc=pdqTdn{biGim;8?dJ0YfqreDqBeIE zTq7xbK*J78nWz!y7pjVl;L56=Gzj}+mktp5-nCaQJYpNsw?vil40y=>?YQoSvn8{V2b$@RosSZ5T!+loCR!U;RhrZZe-xk1A!?<<3Rs zMAKr7dYu2vAA#D4ca3|0a*+6xjk>$f0qnZ#=5oh6PurcCPwXHK`>V`oHaV{IMbf=f zbLpc4J;7KA=atN##IG&$3cuTobg@TUBgbO%RNSjy#oK(kWv%j5B-VAM{glzJ$*od;?)2)=2JAk4H{?d}$JIGs$%z1ksG?Cpjg zBSeLugd(>tY)PT3+?aND|MZ#B-|vo;n1@)7cf+BWS-x8|!*12oF2FL}Lq0YCp#6wv6fY zUdoabKUvP`0Z?Z!0|{i0f$B)l>p{NEL=igig1g^}u27|+u~7aBy=jE`AVGM|)%wX| zqXZh=zPVX>yAP+hW|PaIDu;ZfD6|X->fvmhxT^*!bITr91t0d6?Yw5AUQmIKe9&<* zzwUx=Z{c&9Cj+0VUf#ws-f|wdTF9l6smz}QcLVFfiW@9K-5O$$4Eat0onlk~$Kc(k z-zm^d=)|3LYYe3~Ns=pb{`U%`{X=-Gv%y?rfuxq*L{eP-+U5yt!K}zYl>HVPzeL#B zxB%9mmR_Cf(G6Bix*31O?a{e9>4-3ao7}zqQT&r_}^Z$|N2PB4IXtWAXy; zTxaozjjzbUwC*5wa^8UfXhv<2l@ze|f+#9y(o?kI3G$WzM21 z-9L>@JnKO1oJ zNzwiC`IK{powtmmL9JgQe#SF(4(Q_<2-J&NIiJ;?vQqS&vl#X+h5=JC)P-LJx9s1~ zVlO|WMm1caJrp>RBbyo0u=f;XO{7uJ`d$zIHn-xSo&(vvFoQqdaGn!}NbkeW2>X?% zm@J5xksZPf*!zKDlhmlwvSgw0{b`D^_4}mka`A7k;{jLt;pMGRy0Vt&zRgY%+a$gO zWgG z9O&BFHf(LQA-g(oAZI)Fhv*5zY-k7Sxg{mSa5IkK|aSW0+|HCFuHX{ z?zyKX0@qC(o1Bj!sf3u5(*l#~_x5mgkq%6uG)bGw{DTOK7D!k0cNfMzDTjR@$VGe# zNGGEV@GhUy2L_G}6h5U^T<_Aiy-d`QV}k`#FwX%j=npu|$0zE@rtm*rh|7k@8*Np~ z2MtTYmK@S78!J1Hjh4X<`~ltiK2!5Rw#flt4N8R+<^-6vN{uI{?HL|>5W3e9>UH?~ zSnTV0Pi#<||F~o;6Ik7B9-baDJZ4(`SgAU$YFNDJX?_3?nIX)E2=ou~kFjd{q6WGn z;zJ&AJ2+Wq2B46}Biz(D6wI`C9HLq=Q%pGY5-^K&6?%|Rxehg4rZ_eQZ(yqZ9r5;Z z&R-!yO#6*rVyiJ+JE?7cK=TU@whJV9<<5XTEvY@1@(p82-}GWj_H5VZn6Cgk1fY4PmqN-J#<&u&E0{OZie(kbFH0{{EI*=|dBlPnm zxcs{31;vjY2H8cYbQ)~VTkA?&Tk3e>qBhS=kd4}Py3(pbb)pdGgEizMv_m`CWip&F6N-4ZUXbXJn!_Nb)qN#HK-Fc79EcOapNdhjeu&7?B%1lc%hz#Da3%`ZRGp(P_f z(p&VxefL{VG0{K@JZAXJEr(E(<5?&$Eb^H>di=4^anPy?g_zwzhd)k1S3O&@8hcR3 zS`Sl{A(0+Y_~aB@KWd_nLgC7_vGM)H1Y1q?VfTaQ52w71E)8qRJ%*#DZ4w^nLL$KL zda#!Ka5z^k7XtzbW4_hA?ps?enDIUcOi>VCrKIW{0#t!SH0J|QOaH~~-*>PpzTrsD zl!aGcc@0dXmWet(YrlFv0!ADsylOG@h`{X+mKS6uSCk0r!{F=x_tVuk;<$$4 zmHndpS}F9_JEHo5{v~VlOhWo+Q9gwXuRv}5Ydp?r9$=1c0u8Pn7eU$)7TjeQvx&JU zaTz`kg&%w$P$qR+j=|KQBza^f+lJ|}w~Y6fAOIIPOymLhz+G#-J-2h^c&{Ha3m#~d zujO;vBL<^KL`1$z_p(6iNJ1%M9Qrw!U+1u%<4lS(K+2b@6uzFzZF)Z|Z0!{KmVq!a z9a_^{hAMe4JC%iQ9e@2}H(bw+>i9&7D9Y8s=C0lvmxzh(kpR>n zds-JwEzZJe%(t9Wz1VT6`Nx&Gi!@Ro7~#9q_*4Ky7|L*hd^bO9YDBvR+3siA{jU$Q z;^~m$x#YkmN^5nu7UX2gXuv1tpXs(!c`}<8sk)G#9aLlMLMfsLqbJ41f@R@+kUEkq zkzEqyXysrL6S*lZROi)h`q?rU8~jYf0ppo9*!$zJb9N5{pcst|W^f3#teB=1)*+)? z9s6}CvXUDzS)RWno|}<)!MVxxG*CI!8he3p{&7X z8&w%$Oe}v-;sZpfD}$4u3bxnuDP?OVbtYM5P|CEdk)uDSrIvX6yi z&JguZ`6(P#aB5aDiyQl1w6adBIm%GBji!ciw75mOEwFRa_M6HFPz(}4k zSt`%=JuVF8W=cyjwNyo9CW~z01nk_(=w{`Ba|tWJHwd;2Hx1kDzoCpk(q>-?02GP1 zA*zKns~Un<#^6=R7WlInh>&p(6&s%sh^@AR3MNpy_tWh0aXoXr6`biEvj!=FkguOV z54>w2Y$9GHfSZWCc% z?yc45xdr=wNZ86Nhpth4W?IqqqLLu@CC#8!UW3$as3S=gDHv<#DuQ28f+`YY{SQMQ zRtN3`2Irq_Ct{;eAiDIoJME!#^XG{t6Q=u#1ryzq_yVA!`wc|{C23$jry_4WuTG2) zRnBX4X8)+nemi%4^@G|;$a}$JUAN-B|5>@p!m~phsIg*Dut`1lnq!qfX%D0ne}?f0 z|LI)SYE69lF>W0I*vUTo$O2-$iE2icd0T^*U6k68lj1a(Bzs0lhXwZ4rCBmQ9#R$c zP=+?55R%Ql+6JZtZty!hl`*ABQQth5*zL4uSb|E)b%Cp;^X}?3 ziag4<&gfYrz0oA6A22pCYr5ijxNk3Iy-d+O{jJ?~cTu!O=n@_8g$x2{kJcc%p^Ui8 zj;%!mKk(WX;VRS)x`0cr$mW1AY`{S`_gXo!$9@!5e61>`jx3fbmt4=;evP%{#*id4 zkx=`LB?OXEL`(57p&Cmp38V6s4`>sxd!THjr_UJ83D>NsWR{86{KYy@KUjxFc@&1=k^Gu%ejDE@gqQ*q(UW&<^$`}KUkvHc$W(w{ zzKrO<@YS&gC4L{PO)F(ME&r*XxNRj+X%Xe+xgC(a6_|CRU}hpI%2NuIJjPcyzMX11 z4CD47CB@V)CbL_F!nyqCi|P?7ebH)Cl-`o?#<8+x9{)J;xk)OALid9PQw+UrN0cMD z$b6t1xsM;0Ijx|HmcsInwZnIsN;9oa;m|K=Y5-pZwCASd;~5dIX%(W98hCrd)Yf$k zZp^Ax!J4a|g5dkVg#xp=wtHcINbqTOVz5fITGm}H546aDpFYZwZh#aSvv)?0VSaDs zE#;aW%5>j}FB0*b+t8zn>`2BVl>>)eA02~dJ~q969o$o+usSJw<=+`Br#&iNiDe&RfSBXEw8?56Ivtnx4GzDG5TMoHzK%f*Ua= zMR=?F#!p`l;nsxp^gctl4-4i<^SY01>6AiBE@jr6EV7|#o&Ex*jR|KwWBuZX8T@Q>pyH0Xrh>BU` zM9(M?txbi!_9Wzk`9qE5=iQS=Ce6qctvC}10qpa1=-1usMi^tsIQav^d6|XDp(bR^ zVCXbI{2SY=n%D;SRmZ>4FBS9p!sr$gDx6kEPxM^|gO`2xW9Q`Ty)+yCh6ktuu?hFB zzb%+Ed>$Y1Rf2aU(fsZrP>nA4Ox$=@c+nJ*=S4#uitc#)DX0cAb9g6h3Ztycybzjo z66>t&xfj9{Q_Qp1>J^VftQlH1*$sVJN~uBf2+hof4Szi)54zifIo2~d9IxaB)o+CgpZ~HZ5lhVZCQ&%9gR&hei4P{fcf=w)ywa>cC>d?!#TDlu_LH_zx z{q3_aK?XBu=vzR8Gc`aaUx>A4 zPOHxX)doSbxjK!Oj{ZrsUMCIp^yvWsajuLr)9XCvzciXthd(*E1(t_!N44c-s|}Ia zDH~vHgYR?+kqVT6R(`Sv-uU}WDBDe?5UpkaGoF<9u+tddWp+To!RcQ2C;*HYydBnn z9R80swhJBYK0QbFSbG7^ph!(>DC6!_e|$9Cy2v)~;x0nlm%R4e?1Nwuc+;;7 zqftf+$HI~@V|MmP6gNDcOgNioK|_Z$L0_uubEx+&vdNoEP1eB*jA{4{^^Y4$y&)mn zTf>gC4N|=LPpeSIV*GNcFo#XoIjaiVnlcie>uuj>UY z#6j$}B71}yRN;n4N2`ySv8>h)yZF(Ljw7kg`vdPnex~uUAzMLQt=v|bd9G*Q3cu5g zG3S2!EcBxPj8Gt912QqXJd+D@Zn_a~ot1Gt+^f)wbYL@echS$Z$6eK$Zb7KN9+|&f z(kTg1gE=%^PTYjJF%uBv677ky}u2bNBfdz`?xH`M+|3?EjGqWMbg> zzsgiLPPYGw1#)n*F#o@?r_KMAsn^JCvf+MCp%Vd4p-%tw3{5nXKvLHYQmOxZsYIl( zNDG7n+LDC?;;xde;=Yx(d!L`bv+q5Pr&l+uj?-I?)4Y#7QBE2iSxhQ_rRs>5#)A`6 zBgIe%N^43-C&Bmj_fHS@_XqWjmIB*2{C?{X8L<5JAt06{p5Q}paA2GSH;(-2#0$8V zAn%5j0sAK5_fS!eQIU@K0q^eb!oGxq21EcK1lYq+1+t;ggPqN|}F zvV4Dl)C8&kbPy2ffPlCFm;C8k^u&(B3G#pi(4hkSw0V9L14On~hC#_D zrmt^qWSpdjjtew+0Z0Lf(_3cgr-Z|X=WZvAK>4p=? z_6GVBAS@w*IXwh%tOHaL!s+>y6kw%BhC+h_oEbewLO2Ns#NP|B6AS^Gg_C^I;X?dP zng;`j{}&1j1?VSaKnFnwI=GV}eywF0W25F!9~Z>M?U$v4$kKft7v9rDXt>SU^3|>B zMqb(;|HLUz3DB0BZQ=#r92q_PtIA9)hd_6%p{^RR_rG;^Fx{6sAKT!+U=cAP{?Kg^t?F z{>>@d0}0Uh)?nfT6a+wk1NS(%BOni#69Cwoj@%NZt{c521haGG6x5C*1SsOt^Xr5-6a;9!qD2C-o7^Ts1ZW-T75`_Qt#*oh1K65q4a4uNc8H8qU-}gp z-~q6y)(kSb*eMtVu<7=VhhTzpiwpwXN@;~qhd$FK@q6;mgBwHpF??T^u&LxHXwbN$ z#V>!T)$AfA09D&XiZfOh-uel9I}Yg>_=ONqu=?>ETd@8Oj*XXTehyj>_^pC45Bvg* zzqtDK6NiNJ6)55x)f|gda?F$jz`O{;iQ)eecc0r_jPIZOZHr$XwAmJaW;i+#{5^xG zZ}{RLx+k&}*~)o^_TTT`X&?d%e&(*CTMQo72+#ms@-IYlX(If{FVI3r3Gvs*dJ3Jz6Hm0 z2>$8s-Ida-1IGwrW`qI_2i;HxWBZ{=&9^_*dY^d3sC^{3g19_y07~A3zXssEZxsG< z-9Kci-c=o(Jqc%j-`VwQVkbGRXU~0f$EK6ylV5fl-EasWG>ua`@IuZ%nP{!pSTL~a zzcn%z?$NAVy!IjCos#U|@9Jer=%^u?QPT9#FzED5Z8wR8M!&W+defHF*Z0wC>LW{E zDp6R6O5eYoruv6MKCj)h#iOXn?Y=%cL{_`XqB#->rs*nPU+W7j$1AoGC#!CTR;bi~ z0SduP-9)9JH&6qL!2quYQK(vpMOoSOEVRv}~nLO$LvMcuxr_ig|Ks z2?AQTlGdrULzJ!#b%6DuDIxirnVk7^mw+Myj{7h;g!FhJdvNy5KK>xa1XIzaQAX!}H<%}Ic_eV^Nk1Y$xZcr7 zWC>C*LOw<5{66@$E8tkP#-FhauTohX^8mXyx=3B4R=8w=NksNj)59`MvnDNn7fZiX z@Eu#RHB^JVDWUIBTe>D~v!OJGzyyqTa^c z(?P3~Kd1Tv_YYE&h*-LCg@(|*LXxQa(2(#xS#Nr~*hc=89JbzIt_Q0e(xmq}MI7`c z$pfgDWs_@9CPKI0r}9i8SV4=UtRuAEMmMHwdY+ZWW(;WSuwejfQYZLqNO*0Xzk;%!NfiBh~d<{sr*Io)Zi z(Y{kBw(MG>id^l~T?fWI5^YUoK5>)nD%R|*>ErqRm_VN4mYYCmVwuXQ0 z= zIJYFY^HqOpaSQk&8O9I?Y^tehwhL*$e3|31zzL9|gY(v6S1}WB#>?ey+L|ay4Z2>{ zFw|XA%FW(%zRkHkYAy9Z(?=UXojTxgCwcpbF1o84cC|~b zQaCj!ycbsf5VnCk{IH!SM(uBIo*a*K+mCXP2j=?1AosT$ zpzOpKL%PzlXU}dGB1}c~>Dm4KPvhW?Er>Ew zhK3XYa~^sLY}5;XkQe#)>7jb=b@<37CN07&i!2h(U1Wrk4+9i=sBN4Hj>57j;opbjR?R0Sa_CmHp2yHhU=uL_ZgN&ibC7fh8-SfJm zRgLz82va|38zI)wB_o$h!?AgZo}7m52|cE7EQM<&wlwTT&!UWX?Uc~_;*^@5)T{1S@+%W>ix#)`aIKR$qo7A zMKK~8mP|DHeA(X;QM(P8lU=8Zxt%!Y)l>jSH=NdjPQF#Ao}33X>ex9_5>BgR%*TJj zEsMBQuP|q4X;Nuvp&LBwgy=K}5uyXTZg}`Gbf(3!2&%aLzB4<o$luCOX zt21TUtPx*DGqR7f@c@$rZkmVLI9b276;9GSb?>W|u5i{mr+2`@s}L$jnNn3Cn+6xt z3=~E)#Qu0T?eO}j^WLq-$ybrC9)wN%3#83Gnb)jnYiBz|hWOPa(5XzE2#>bjo>BZO zvh+Z?!oMSg{^9HDc|GT1u-wPao6;ctod3rj#NLTE+wc6#LtwD(>hYvjop<9o?@|A@ zZTJsu{9^UwdsnfSY2}|_C0=U>p|y^Nn@KjAFJkH6FO01V+L0?@U@>@6Y3bYP4TE#E zv-xyHQf9KQmy{CitNgQJmz^GId4NI%E?cHgp$65OK$(u%Cv5(Xl$)`JMlh%NE)qJD z-r}n4l*toh1a1$fC z@Za(^X(CA4m}{fVx;=!l2sN-86&{>e5DngvTSX{=Av_2BCmX==QOw-E3|KdVd5Y>_ z7PYf%uw5+TSDTo8J{^tQdddX?)4hMynvP=BvWYX?n(BFdFJ(NjzB3sW$tT zF#3wP+91=o>@lSVZ=$L{KlXxgi6X2+9j*S}%Wxu_L2!ME|J{36-e@~UHZvPT13wR9 zE^0GbL~Xz;PVbF%-qnd3WI5KUcBi~5WFUrY^7PeHO{)wwYtRl;Y9q_Sz@%}IMhPCP zTdKK`RV}@X!nqKKj~DiYb|-oFkc7IxkL{`H&}MsXaY4c+WtlE&*Y8-|Mm_W0-^a`Eqv~lHLYpYN zm(141F&W=9EVY**3&MjHC{YTTP9o9%r-hysWm>u}uM`<4&mRJHUlV=zF0uJX`OdBR z{uHVVmAKH%f_Xc#BVVtI!6XqjsSlyxMP7vSYGT+noJvVH@^RWh-;F=a-4I46h>>+g zR5F7IETSNreZ(>fegfP&sz8J(?SGp3^)j*~>Jr4$OA{-}A*I=Vl^FzB*>PK|0IC%% zcxgQ@y!|FVOyQeNyk^hs^Qo$Stw*}6k+VhSD%hj2p%)t~Pr?uYFtNQCrR3>WN(QP% zn<}dmu96-psrIbT$7i8kb_Cc;gB#yty+2t=huy&?a5rDNC;t_txC>&xi~G1xF6=I$ z;#ZR_PqJ&WXY)LDm?(B#?Ki?*Y>8P;2P4hIX*kIQ5G!6gchbF7W?j_mTSZ(EnDU6R zle@JF(R(S=q4uj}*kMUxWy_EM*?EnFPu+#@R-oOimgFw&j8gItrTVOrF5W(qKc@VH zp6slpKbncFgWP4{BD@Y6Ai+|wf8Hu+K4Ween*M1?fy^)3XVU3Y9r@M$t=O@U>A*~!V-ILAv^&xY#a0iHoN0jSvv8Cv8{9}H4{`^4IUuj2eb((m)oiqP91Tec9&-`tBN%=j z)p-A75O;Se=(<4ni>E~nd;9EPw3!+b))m6PLvfkI`}r-hG>?oc*Jh0%ekC{*9{|@> zu3Eg5vifZY1)efB7B$(z700}u?csS0Yz^bFNiBm#h(_Rv)Xwz)kv%wtR_yfLKjD^! z^$E?-<;tRV4fGe}BNbE1^@6KaLbzqmneo@cZhJod#OsD=VV5Tj&I<;}r z?P7EWjhvilYHs#2~7@t=UB$Nl{l_9R)7fcHt40hfx^3FRky7(ETvpA*^J650F_U!cF0~+@CYO2Q(qOBt;MTgg#OW3zK+=6Rujp1s zLyf{kfV^bB$!%yF(U=8F-633$icsbC5fw9mgLb+>ZF+~qY;u-yEPp)Vcl1BEd{u!f zMYRFU^N-uV+QI2nGFfq7LbiB|YpDMYq5ciP<**D~=E$RJ;nn)$Pm?*#tt^3f!6J>T zpH^UDj~A+~a}`+lJHCcP;Bt=YQ7VvZ56jheox&ik-jb5(VNXQ-qKjxl%Ss2^WAhyo zs!04oJ20ToI7Vwc5Tl)R_by}EYWYHJUv=7hN8Db!rt!5wJ8C$6-6KFb$fvD8f$Bep zA~YvlG9q;T$3pE?xg=Ml>9duXBgBZAO*@qIrvJ23A0~Ko)x;iAr^k7Hl+xpv#D#hU zE**AMz0YZ0e6JDzco@<&38vdHQOR3$^8g1-pa{G<9aFk8Un;$i*p&OizK$}d6bPBg z2~VLJUAN=PT=$qv90xx7SiK!R;jsYQNOlddFjZ4HQ?TmdPBWO942quAFzCH^FD;Zx z?UoFmk?$JCcYduV=y8pfA`us+3fI_JN}%2@kQ68s9J}U3*t^WbD!d0aBF)NS8(UPt z_i&eOWm`4n$S8Ru^2go>TKP->m~88RS9l|k6+H>O9ld{U)twNlEU?Pa@Y-C6`GIxO z?Hoo)sq0MnIna;#qD)}>_Cq~mr zY*~&gq#|yrrh&^Rwh@*W^WhsaoynpCzKX^fa=%oi7f147=ThKv6ix$T;M;3PvyW2{ zrXGt4#CCFd)m#425;e(<7F(vJl@sEs$S~vnwREgft!d7ITemJxk$!*=baLc+(|}?# z`;8f-Bt8`1<*n?HMh1DbkC^HGFG@Cfjua33)kr$t4ah}n;`B`s$U1yqukehod^!US z5%;Fzkc)=mxq_@NjlB1)-9JN(hr?sfe8V*sQVUS67aT@5*r;cu167BhGo@EXma4YY z-H<4^cD&FJWc1ieKuZ_-&T+c(+DO;#XGc-QDuL|R`W}b%IdZ-aCzf!oFyIKHzPyoz z7h`PNnsUEnkynjp-=blJ;Yrh!Rg-qPv)&j2x%mbcDS$)rxRyi{dyGmX zR@qR>KDAZcZUjHv9C2A}H4f(I4CAv}pf~G~2+_@PS=l5myls(3@Zge%F4qrE5=*SRiNr?|XQGgG8~YiB z5eqH4RAYD%14S-vC29hP<^A&H7`1h&zwfk~TZIO>pF@4E%bpHG(w4pa)%G8!?xmLo zP^$jDCKX!EA1E)CWWEdIVdCKYEsJ|GA>Ba^le=0ED#n$A*Z!F^Y>SymHpLuy=+T`_ z!4RQ))f6SaIS+ZCJU^_In2aK9X8qEaGV29()*^clXC~2rOc>keRZQvy0-2tF5&6D# zu*+4Hk*pW?Xr#bj*Bq8O0b+2L)yRR38s%k8F0nTikw>#N{M7`FSQgpN_4-)0+HNO- z>?y>Z7c;AK%&JyNeheuqI>~XH?nTr@R45d7D6>RUlx+wp14Cj`H0FJR4QEaU#7cj8 zMiwG-(~L^@KGT)n0uLtRBxvFsqg^<=pQ=|5ku{7KrBDplOk2yq3LkkTLOW-x^VX}~ zQ$&tZmkG>*sT;6FuHvF9i^w7GRAwH}6^pKYdDUD zrHvV-;bOt(9e3k@oTt2BvV6U(;ehj`nxe4posA&bN}U=-dX_@k4{B@oOYXp3g*-X; zMx=9pSvB#(?KA=*N9EVmiBI@+b%)kNW>`@8g~k}vMt4S`c%zmV=L1Qx0H$G%zQ2_( zIAfMdx7R2<^+dT|AbG*iv|B{{(mfYBnP>t2U2fANr{pcM#<*tlP(sPWIX4s!#|bEx>)ds+ zNuVD;dnbOkr`#vT*FcknqOPrbr2vki+OkECqLW*DvF|XdB2lcH+_pp!H_g0JQG>eSii--x?}ID6CI?R^`0jjo|8LOoAj!c)v#6_i z2!E)0F~7fKY>%lQ#FO>@9$qv|6lL>%qETshn(9}FsK{>8>f*SCcGq+n3cM$-sNI4h z=bxe#Iy?l#*C7+_j{bk=bMQwWS5S7fwlvsrF?NY>;xqauL22Oj-h^yoW?{-(nJL4O zM%thwtUPAQ)y43mf|y+}2gGge4Ej~rH70OV z&Er$8HZ(h3ITDahy2{P}%x{jzP;v{EzjM|uMWA(gZerH7!RhEgvLe{P86h{ueE=Gi z`9@0hi66$Br!!|s9`k9A@jBaJtvCDX5Nc{N#_BxYcYNEeUjd6NGa^ZIqwm!JRoUo^ z0`6D5r~)Wa)>Tta2v7$@Q`)^3Z#1%E*k<_$CSXs(Gthe8BBSWr{tQ_iQ{G%BofXWB zV%k!4`v8tDBRGJ~N1_m~Y^&yepfs|&Qm}sB&e4!p~QNELNu{b@RfS)=v zR~YI&pRJRr6Ow2AUD?3?B;{ zN1%Lj^8Lmu-HkW8s2O)TxtTHyLyS9C9Lnd}AQ&*leK~?d+k_&>9;)uBG=zTKof3+C z`fDsIrho4us{||_Z>r;{MAcmWJu|QH^r?tfERkCoqOT0W_>`&I%>pa;t5JQ6)Y2^@Y^}RRuKJdwqbD3QV^^AvC#7+eq!mTG zZi5g|Vd7N(r!iv8s2FRgrSqtij6u7{@Qj`z3GNk<@9`kK0G4-!pagF$`cf4i#J0~n z)@U1u_+mz*$X2c63bH#ZiQXC-QvPntTVK$mP;;!dpy2~mkXF(1Y6Lmcs`9eXU8(jo2PKs^T2i}h-0+VI{Q#3d%zz5x5w+k{6P}g>tHxOCh@__`_S4 zt#%v@*Qtpd;1V}95F{~HxKQcME|ZL8qjWf}y)N&&?iF7Pb(|HXxr8J5*jCsNkRpNx z1(S>4@Q8s?q$YI8UcRtfbHzXsERw`dD;LOmqqC6^h0v)m`@jvEJ`>-dr<^VYZav| z(6>tl)iFtU#_`25Fml~Q?314(h{5$X$Y2^mt9hR*fOg?VEhYvp~ zXnoy-QxXdri_cVq#AoiYgb?N=s26WfoL~ zCUmcPK8XjN+0xu*T(0{`0)gkQzhPF9+?t;~P^JKb62bKm%yB^r$=CI2POt%`?RNUh zwtYX8IWL=;y6<{sGLo_+!l!#{wsoA9JRr0PhFDI~ciFd-FrKmkpEG0I!`1@a)Y4j4 zBG>zb)6KE$kEz8F9{BYW$_w-P?S#QEMn>wE^G+pG3chX>>YTTx0@E<5#;}U1zg}+i zBDPc(Q-o`VbeP$78t+<{uvYorNFcbcI!!VQsY)KbBIuNVxF`u#9n`?bQMJ~LAKW?x zq=pVm)Q=tP!yfk1M@oG=v#1~*%?OX5e-B^nr84sP+gH2U*Ht}!D> zD__IE&1yZL33x_chFl~O>dvN{T(0ToGM)6a;fFTZ3|bCltwsN;xWc2#!@wc@J@;%9 zxrRC=ptmlCP&GwG$$$-FsWDeP$pWH>G-T#gzc~3P+RIYMOl>NRPrKe)rB_rh#uOUa zQ1d2bFPFJ&-BR(Qc+=_@&GWrdemaO*C@^K6)kDQhJ2Ls7rgl{;Z}{~gh$^$HP|*cu zoyG6Io>1T->b9B#1On~7rODk9E?Z@dPvQif9g^W_AWJ=TAgLA*`4Gk*l2|I`RFP_N zFTY*O^z(I!%TgZZLEbtB>GxU^|w35+hydEv7PF!20V>W+YE)gq|F7i2?$J0EsZWH`K$*v z^qjxV?9Z6*_M2rJ>|_-*Y9rMO(O>XMw^=Ix9n6N~zk%8O5N`i}z#2yUpAgf32eV=P zq2K<$hS?}VDkE#4sifO7DP;2&0s_a~4Ag^*6A@e$T7CgXQd8m+r4$QB5~~nY5D#L2 zf@2^$SXjrKaL3X>6#}3rX~Sn|Xz^pHYs$;R@$*~qX9!p5QU8LOO=WO!l-+%MJ@03!?*3Lqp|T4F^b86E`XSa#qhj4EI|eZcI} z3h4&|JpKm5g&XNN3&LPf|E+g1Uf~c#&=-s!fWW790>!OIIt1~EGV1f+{j1&92NEFc(3e-@) ztHla=3;E)q=_R$$1AzKdgL<(i#_ScjAer2lLLH}C$NJ}~x5 zOqtZvsnFOMBy74IbvRf$wiT=_B|6YiUT_bjp%C}oz)SBTujs2C<+8|Yp5QklQLG>} z?tNAq$kzb8lzf9H_HG3NQ6nADpZMF5N3oZ3!Ml>>F#k~;`=P=?y!Eg#4s+^$cz(!S zdu>u9*I?yu<65%ef!7WegGL)5>ZCqPN@2?Y2{`Ei{O5?c!PrrR84C>AcocjU1Wo+E zCd!-Q7x?_gC(;WN1|Cou(HpT1qX~AtZE-b(frEtHmg9Bo(-ahM&T}PwNwuVu=dqln zF#?b0IG@R|ay-Wrc{NhbZobcYI*AKb4}PR#gTcoJo&O-6=mBDqk>k2<*@zH@V51Pk*@r# z_YP`CMOTwmRsmP8F!H8@S@yo^G43=f+6B#SwC|=Ri?<60NmlA`DQI?~LpG09j7F7K zp&MXv8v})U<+)qvT31%>9Hf0`Ry?qwho*yM=o$#CrOWTj^<-&SN&yw zb84QQF*Ucza77JAwy?VQay#qP`&Q95oA4R(OBYp878G1~7U2!0t$1LskYCS_BBKz) zGNwb=RV=CF5x6N@^V2|?Yi_TtH~qUcp4W@J8|EPl4XyPozvRf@rJ_Ez&HXAZ_-9(< z);yU)7rzf*@6zPQ`|f!-Ar;G^sMK5w5rhv z(`ISVOk#wZa(L`T;sE{bbjbGaw875(AYxnd8udw6t}(S~mCiS~^h=5M|94Xf#`>oYvlO)P~9) zRmIe=y^48@cG6lV2QQITb1A!60vq@1#O$iRX2On^@;;+-o2j#e=iV*o3R7G5qan`j zMZM4G`Q^3=e{DW@zubgkSxA+Z zz5<%Np@2=tXIBHm^s&OiJjk8RS zG@qUc&b{K;P|kGse?cWfH2%%8@_x zSigVpxH8tdzx4vCLvzdgSF47RjsAbLYLq?fP4MaD3@w$MZJ_96@fqmp|05UT=;Vyg z!NBmpEE{|l1~#_;TXW`8TgwSqqS5!c?quY4m0#z19MX{J*8m)dDF!VQ3@CiRt@Y)< z!~|p4biLkJ6P4CVEp+PCvX~hDlC!hwiE28!l*J!ordpV!rZT#+xJiZn;gGW81exYh zs3f>98RbF7r%;(tl@bfESr#ICaEv3fAiPs8A-t%vOD$3Dw1{+3LRx>6LAehJ zk46|37^pO)vM~Q$bEYwRSxmEyQZ%y?l3o*^MjF{$QJrux(H>1;PUlUW0yTiF!PmP&?Sk_13pUS3oVq9~CcLS*rq!^GS{on{s4!hru!LPIc6T*8tVL1I_jQbkb; z(g`U+H8dTuG_e{2tZ}Y5;R}|J++#K48(gLcrReBb z90~KkHmrFZZAmJ9OX`b51il*(Fx%YdB7$iV_6R~}?)n8R_5o;S>9ZISgaqm9V!jTq z=W^bPtv(as$HL0(kFV5fc0$nTKLnwsZ1bn@MZMoXY$Y+4!c$&u zR$bieRQU`iOfd{A<957&dky3c#HQcA!FFiF2YeF!wWNQD){*Iu-lqU3SnZDx$jV@P)La+*@d6)js_y#m*mKr_|ClCt{a2jwpJ|}^A_pOm+5f#?*|So zUAjNMJ94AN%%U_QNvzf&VkN5Vl%f+?#AkhvtFaS!0Rz$Gs2pWtW4?0DJM<{YR$&GH zm0hYt$1QEDaft(Yfg~Hl4Lzf2TVRKxE8iF5sugz4*rzm~>wsiJ3WPg=5ZQL5%h#Gu zS@LG@eAXJC6Dg91<+F5QKTuz8Xcs<&uiwMeOlUw{NnC5-I7m+KB@mvD_ummTZk~uT z#gIWIl|Y6VT<^vF#J+MDqP$apm{rMLiLH$sb5X$i!wK6eg2|Nb0&xWIxB~EE**IqimIBQ*7%R_b@ka3twDZ3$K z)7NCp(GnSlGHidhONU2dn7ZzWM#yPv5#(>1MYH%NPv{3mkua_l<%I5&4MO<7j-WnF zvnHMA;l`+LXbEB@A%?AOyn?yHEeyu(ciD-=O~9xaA+py|HRJ%Y!U%PMo;xd^Ud(GY zFL2FDgX0;h_V-U$NU=-iJmAFT0lKfBjt^KEvf9dn@tHCUXD$KhFEY&$i)`s zK#x1WGTRr3qMVu{zc3sh?6AUK{Htz4O{W`_#$(cS-DVs{(Nb%DmCFL`nVS6>m84Z8vRj5;u+1o3BW^r|jR85q zof5hwMHI)BNx*xyhlb=GEu>GE;B4ip){+e#TO*?fgchL7X7&FhV>t+`;%;iz?w-e>;mOq)0dvi4aqg4#>x3hMqJP zDX*t$%C?zZ))-q4g^diA~+wH zl|$=f1QhH~Jl|QmWU@HWGH;>AMqqn%s^Su=43>ro^5-g6of=`1!v!X_(xbcj|DJBo zsv;q_c2zL~L1Em{p<{^CS};G^r4K+%K}^x>7{la@Z7y`wx1b&*n?-5!}nsd%9bH*BJe=MKF>Jj1ws75-=v*B&A=p!a>G6E9|R zpSjch?rL(B34a?b3lo5Qoj^N3KsXvpeJy6l&1x?jAOXpJbqf<)jA*qv?A1M~SL6RX z(Z~|%YrunhxvE^_5qxL7p+E*TIRu+~lC^|CvUqh5)#`&lDD@Q=h>x2J6v}nQMWZ(11R|knd%Q@} zEWD2FIs6FG?P31QT4ZX_lEtN$HwL%;a$uDqJBS4!k z=)?G$=?mx6b{3;mdPX}Y?~G}|(HZkM@xXn-M=ClSd-?(bnp^5-5z$~x6XDyI0vmSY zFb1rlh{1#bveuXbRSJ#GfB2HIHNCCvIZNy&l{2F#sFHxMz#$BRD!+y?9@(zP@}^+B=G*OZZk-GnJ_3OORpa z4a8-`oyK09)pBz^gUBYPPFqL_rjFViSaxFfVak}+j4xW(QpXTaiQ0o{J*2dDHPl+% zZ7@%@lN#;6W~Usba%X@yvmuE|qL|8H&N;yh2fc6f(CNDD^2|syacfD}s@;ot(mhyv zi@5oEydMmWo^Io2Z}|~{ZKlW4rKU)rJipTH;G9=-xr53rLiK{U`zdd0=b*=( zLF}L7WsMGz{D|x+3&C7%CbYe}v*gN7T}{%K%7|R()n-=dDRAO0+T9UL-Vg3OcKx*~ z!#neOZ2OYWTa5D5sEbX)jU#3LQ6_;g z(PpOed=BwA@P^oH;H}74lefsXJbjM-IR2*c$y)c;WqTI)I0E_kF5Ue)`NO_WcRo(H zoHkjVyWqDl|5lm1*fd&Qar~IQcK+6(A$@clz0&w9?L0o3_gJZ`Lz`Ku`)Q@Sy^weA zx9GQ|ch-B>n>o}4U&RZ$^j*X1z@6$G~lT4sa^lb-YwKHd>9Ybz_3RW%~j+2k2 zr(bCjaT+%CQk}tyr}j*Sj{eB9CAWVzK&(eiA5n*Pf1lN~_-gFIgX33`#-^rb)RjF6 z;=_GG;GWIP+x?98v)MW{)jYaE9P=5PeBfp43@VpMXYWK#f;iB#+mV|;m%6JkWg%_l z%$g4zZJ)}b&|zC1A9dzUI

09KvX&9c}#@-D1(=S|D?*7&^qZWogGRJaq2rA~qYV z6|R2&`zqxB{yDmjE{DfpZxDy)u$HjLwR|&lcxb zWYV*ON-GMuBG~h5oS@a$-q^<`OZI1-pSH{PMUeGlrV5r{KmJXLU0#tv=cp{HG*2UO zmZy`6Wn3=zSeEr#DItyA(JH5`iJknt&Fy~wFk}VptEQcq{AKie8{&CWAS3Y`vh3UM z2K)wjeFtC`viw+Q9I<_GTEC2i(({MaqIL=~Z<5-_rvp>2DaV~}RBOyP=<5}G3bvPOc}EDVeUEYw1TE`E~d*jfieL=Q@My`Q2Y|E?~ zB&xbPGHOYe=joIY+-s~pMRANt96d+F6Y(;f9aakZ8Z(>?&zR;o!xt< zuPFko`BWYwQ*08REQlcOR#OiIGG12K(fRmZ5r6lrh2T%Yx~mu804jgelbOKtX}i8q zl=L+7dZb$+VManMYI0*Y zB45Ly^LZc8qg;pXi4fqloCik@guqWf3+);hT{gD5YJ{tWMd4*9zR%rm8$ryeY~QrR z*TSdqvUkCiRMk~?13LM%-*!ORl5KkkH&Nz#368)=LJJ;X%bE<+Huu@ubb4j+ptkKf zk)d~6e!G3c?rza#LNp$Rj%RH3n?QE*Xus6oVMnUH2kJyJ&W{e1z*n{w*f+_sW9J8b z{+ZjTFUEp$dk>&1n)DL_z@8@*yvXD=aSiziwZXDo1h_Zvy7p*y)78V9DslRL0tTn^ zY~^`f_)Pla5Hc>4Mqe8}({$ny>2uPx9;0m~DW*Uqw)X`@K>ymLhU= z(HVivd8=CRIjNpP<==e8QJ6=UIh}HCmtawOSz5WJtT%;yacNO%ZFa=m=3*MV_=f3w z%50Bonp6a7J)z2KafAl8hPOH^-C(vzo9sQtf1WJ$I}2%-tAnokT|@WqN#H`5!1~Wt z?ZEout*e&m31_&?3XyUuM&}lk&Z$ssuiz@WDVNYn4}sTzY(~`s#L{&E~H5Zk5 z6PdQ8arVh5Wlg*eAmtFek(26Q4{FQHBB#s6>==sXV`V~R&6s-Clc5-lo1{%zvZH3=~Baq@dB-%FIABtviGE7!TH+KVLnHx7O6HODQLSx?#q*b?r;lFw+nSPiG z=<)3gEupx%q3Glt?TlTFepHOf?Tt;9P1I=^XqoA085t>{=mcDx&Fvh?Dd?0foUKiM zwn{FBmL^8dKYKzy3dhbr$D|E@vf#T}IGf{}|F}FI9ZgIr=%h?M-0U2UoydRA@&6{Y zu(K8ZQAoxo7v^N7XJlabp;Vx!r)Onir=e#jqo*hPNhf1x{C`TK>}X(bZ({r-w`^eT zWb#vTWf65+Q5S1#Ljzme|5QNP+`;J0ZPllheOIx^_;Qyyo6!<^C6r9bSObqZ{|7S*4S|(a1ivLJDo7fuvr@@$*nE$t? z`~N$6IsTW6m+i;dD{W$H_R}g%%>P3y8Xy1vEEip)#A6pkkFfcQ%9dK7{~isih*DwM z;!9~$MhPNQ{hI=^Jig$&$Bdo^l2nJ2=hpjqt9w9HSl>V0UtheOA4;U005OpsY>34G zT+^O^l;7-uhNGC12qm5YZNgOGBmG(i_FC=nwutq-lpG#Wln}IFR3Ea8JJf$;Kxz*e zJ})@_Dnfevfi_?V_jpEobEu(}vmi-Jlx3*#PzVMx#SWU?kb zo!vTY#QH4$;-JL%GJlKLu4|q!Q^PXvrF>%ii+uz2&zUef;wh<1I=eDl^T5M~lR|Bs zUgk#=K^f(C=I-|CP$wz@W7tN4{mDeyLho1taZ z#BOO$z?SFr6YPmSnd^Ty3jXV@R97@Hg`$(NH8yd_*P{QqcPu(kbV?SUCjZ%mqEo}y zV!&s@XZVR!w6k;miDdlg4T}HCVQTji`p>ZXPe>SFi<@3Zl$}9PP=tY9gjtk@ot;I1 zft{H}kdcLjK~P9kke(O+|D5FKI{&j5vD5!g)WiQpFTzRTP(x8b2+@0lp|@tbc<;G} zYb5`@_lq*Bg}e&?92=Hf3>H?JdqjG25=N#vcmt3BL{GuVr6Y_ zmpqtU9m5+v+!e~Ea*;zIzwIa0C+I@lZBf^ zIG{r2t+Q8&a6yOwSb)l$0zLS!z>?q(jo1V19ArL3g(KR8KMAcigpxkiguh09Gg}{a zIz;PVV0F;efXWRZj(QkXe_fgD>bP08N|_|IM2}VJP4#0P(uf6X8G-%T4Q6>6O|e9UHNE zI7OY&v}9w2n9x*p&4RXRDOnohN6yq|jpW15n8;EdRUz)eRO-=mKS;> z_Ot^AOw)k`&=7jc`qA%zKT-j#Eo-qQ*i!91MfCR{0M#A(5p7vA94C$=M;73p^&QFC z>3u`85$cFqY#p{9N2;SN^t6V1bIqMKr&$+q%YT3jC_J6QD6 zbRU>qK5{U~m>YGMPl4G1P+9IidK#N!D#m+dhFTC5G#*)sDL>YNzotBdGu-LUti(Jh za~IkS)EYn6$*-LJ@&02?m8%x}R~d!4kZsQ1`EFAct;n|tR>#P<$*l_tEc*9bso8vy zML!f+4ZxeyY`w^|7D|lzgN5L1xyZI2C05gkrOs>{Nq>7Gq|}2h#oTKnjOt$kc3{g` zU*tEs6ES;mfY-vFAQuf>s`?jyMf$hk#E(2DKIDyYG?~#CxWgDydvkx}BKrr#j$u*% fe|;17kBqSnoqEP=2cN!w3 literal 0 HcmV?d00001 From 6c25fb971793816e3534eac23b365f6053ccc1d6 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 9 Jan 2022 09:53:54 -0800 Subject: [PATCH 012/143] wording --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cf9a90c1..8f1f1cfc 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Plonky2 is a SNARK implementation based on techniques from PLONK and FRI. It is the successor of [Plonky](https://github.com/mir-protocol/plonky), which was based on PLONK and Halo. -Plonky2 is built for speed, particularly fast recursion. On a Macbook Pro, recursive proofs can be generated in about 170 ms. +Plonky2 is built for speed, and features a highly efficient recursive circuit. On a Macbook Pro, recursive proofs can be generated in about 170 ms. ## Documentation From 9ecdc4d30f5c8bd8363fc3704b5905d6faa70961 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Tue, 11 Jan 2022 19:36:32 -0800 Subject: [PATCH 013/143] note about toolchain --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 8f1f1cfc..d4bb04fd 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,17 @@ Plonky2 is built for speed, and features a highly efficient recursive circuit. O For more details about the Plonky2 argument system, see this [writeup](plonky2.pdf). +## Building + +Plonky2 requires a recent nightly toolchain, although we plan to transition to stable in the future. + +To use a nightly toolchain for Plonky2 by default, you can run +``` +rustup override set nightly +``` +in the Plonky2 directory. + + ## Running To see recursion performance, one can run this test, which generates a chain of three recursion proofs: From 9f09a2aace8f3895872567cb5d031d1aa8073882 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Wed, 12 Jan 2022 16:25:12 -0800 Subject: [PATCH 014/143] Add Merkle tree benchmark (#429) And one for a single Keccak hash --- plonky2/Cargo.toml | 4 ++++ plonky2/benches/hashing.rs | 15 +++++++++++++++ plonky2/benches/merkle.rs | 35 ++++++++++++++++++++++++++++++++++ plonky2/src/hash/hash_types.rs | 12 ++++++++++++ 4 files changed, 66 insertions(+) create mode 100644 plonky2/benches/merkle.rs diff --git a/plonky2/Cargo.toml b/plonky2/Cargo.toml index 54cf5c1f..bce9f400 100644 --- a/plonky2/Cargo.toml +++ b/plonky2/Cargo.toml @@ -48,6 +48,10 @@ harness = false name = "hashing" harness = false +[[bench]] +name = "merkle" +harness = false + [[bench]] name = "transpose" harness = false diff --git a/plonky2/benches/hashing.rs b/plonky2/benches/hashing.rs index b1193516..2d17dba1 100644 --- a/plonky2/benches/hashing.rs +++ b/plonky2/benches/hashing.rs @@ -1,10 +1,14 @@ +#![allow(incomplete_features)] #![feature(generic_const_exprs)] use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use plonky2::field::goldilocks_field::GoldilocksField; use plonky2::hash::gmimc::GMiMC; +use plonky2::hash::hash_types::{BytesHash, RichField}; use plonky2::hash::hashing::SPONGE_WIDTH; +use plonky2::hash::keccak::KeccakHash; use plonky2::hash::poseidon::Poseidon; +use plonky2::plonk::config::Hasher; use tynm::type_name; pub(crate) fn bench_gmimc, const WIDTH: usize>(c: &mut Criterion) { @@ -17,6 +21,16 @@ pub(crate) fn bench_gmimc, const WIDTH: usize>(c: &mut Criterion }); } +pub(crate) fn bench_keccak(c: &mut Criterion) { + c.bench_function("keccak256", |b| { + b.iter_batched( + || (BytesHash::<32>::rand(), BytesHash::<32>::rand()), + |(left, right)| as Hasher>::two_to_one(left, right), + BatchSize::SmallInput, + ) + }); +} + pub(crate) fn bench_poseidon(c: &mut Criterion) { c.bench_function( &format!("poseidon<{}, {}>", type_name::(), SPONGE_WIDTH), @@ -33,6 +47,7 @@ pub(crate) fn bench_poseidon(c: &mut Criterion) { fn criterion_benchmark(c: &mut Criterion) { bench_gmimc::(c); bench_poseidon::(c); + bench_keccak::(c); } criterion_group!(benches, criterion_benchmark); diff --git a/plonky2/benches/merkle.rs b/plonky2/benches/merkle.rs new file mode 100644 index 00000000..7445682b --- /dev/null +++ b/plonky2/benches/merkle.rs @@ -0,0 +1,35 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use plonky2::field::goldilocks_field::GoldilocksField; +use plonky2::hash::hash_types::RichField; +use plonky2::hash::keccak::KeccakHash; +use plonky2::hash::merkle_tree::MerkleTree; +use plonky2::hash::poseidon::PoseidonHash; +use plonky2::plonk::config::Hasher; +use tynm::type_name; + +const ELEMS_PER_LEAF: usize = 135; + +pub(crate) fn bench_merkle_tree>(c: &mut Criterion) { + let mut group = c.benchmark_group(&format!( + "merkle-tree<{}, {}>", + type_name::(), + type_name::() + )); + group.sample_size(10); + + for size_log in [13, 14, 15] { + let size = 1 << size_log; + group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { + let leaves = vec![F::rand_vec(ELEMS_PER_LEAF); size]; + b.iter(|| MerkleTree::::new(leaves.clone(), 0)); + }); + } +} + +fn criterion_benchmark(c: &mut Criterion) { + bench_merkle_tree::(c); + bench_merkle_tree::>(c); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index 1cfa7de8..82b8ceea 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -123,6 +123,18 @@ pub struct MerkleCapTarget(pub Vec); #[derive(Eq, PartialEq, Copy, Clone, Debug)] pub struct BytesHash(pub [u8; N]); +impl BytesHash { + pub fn rand_from_rng(rng: &mut R) -> Self { + let mut buf = [0; N]; + rng.fill_bytes(&mut buf); + Self(buf) + } + + pub fn rand() -> Self { + Self::rand_from_rng(&mut rand::thread_rng()) + } +} + impl GenericHashOut for BytesHash { fn to_bytes(&self) -> Vec { self.0.to_vec() From 0ff836582772a28c5b48044afa50e15164640081 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Wed, 12 Jan 2022 19:07:14 -0800 Subject: [PATCH 015/143] timing --- plonky2/src/plonk/circuit_builder.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 3d3c7197..d5a2192b 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -42,6 +42,7 @@ use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::copy_constraint::CopyConstraint; use crate::plonk::permutation_argument::Forest; use crate::plonk::plonk_common::PlonkOracle; +use crate::timed; use crate::util::context_tree::ContextTree; use crate::util::marking::{Markable, MarkedTargets}; use crate::util::partial_products::num_partial_products; @@ -635,10 +636,18 @@ impl, const D: usize> CircuitBuilder { let subgroup = F::two_adic_subgroup(degree_bits); - let constant_vecs = self.constant_polys(&prefixed_gates, num_constants); + let constant_vecs = timed!( + &mut timing, + "generate constant polynomials", + self.constant_polys(&prefixed_gates, num_constants) + ); let k_is = get_unique_coset_shifts(degree, self.config.num_routed_wires); - let (sigma_vecs, forest) = self.sigma_vecs(&k_is, &subgroup); + let (sigma_vecs, forest) = timed!( + &mut timing, + "generate sigma polynomials", + self.sigma_vecs(&k_is, &subgroup) + ); // Precompute FFT roots. let max_fft_points = 1 << (degree_bits + max(rate_bits, log2_ceil(quotient_degree_factor))); @@ -732,6 +741,7 @@ impl, const D: usize> CircuitBuilder { circuit_digest, }; + timing.print(); debug!("Building circuit took {}s", start.elapsed().as_secs_f32()); CircuitData { prover_only, From fe5a30ede12ccd2b3f6b51db3580714fc73df88f Mon Sep 17 00:00:00 2001 From: Sebastien La Duca Date: Thu, 13 Jan 2022 15:12:59 -0500 Subject: [PATCH 016/143] make HashOutTarget internals public (#430) --- plonky2/src/hash/hash_types.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index 82b8ceea..674486c8 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -94,18 +94,18 @@ impl Default for HashOut { /// Represents a ~256 bit hash output. #[derive(Copy, Clone, Debug)] pub struct HashOutTarget { - pub(crate) elements: [Target; 4], + pub elements: [Target; 4], } impl HashOutTarget { - pub(crate) fn from_vec(elements: Vec) -> Self { + pub fn from_vec(elements: Vec) -> Self { debug_assert!(elements.len() == 4); Self { elements: elements.try_into().unwrap(), } } - pub(crate) fn from_partial(mut elements: Vec, zero: Target) -> Self { + pub fn from_partial(mut elements: Vec, zero: Target) -> Self { debug_assert!(elements.len() <= 4); while elements.len() < 4 { elements.push(zero); From fe0c232d6d63dccd4c037a148b4e63850fde01fc Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 14 Jan 2022 07:56:06 +0100 Subject: [PATCH 017/143] Working (not yet for recursion) --- plonky2/src/fri/oracle.rs | 6 ++++++ plonky2/src/fri/verifier.rs | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index 2d42e899..9c51bc91 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -154,6 +154,12 @@ impl, C: GenericConfig, const D: usize> alpha.shift_poly(&mut final_poly); final_poly += quotient; } + final_poly.trim(); + let mut final_poly_coeffs = final_poly.coeffs; + final_poly_coeffs.insert(0, F::Extension::ZERO); + final_poly = PolynomialCoeffs { + coeffs: final_poly_coeffs, + }; let lde_final_poly = final_poly.lde(fri_params.config.rate_bits); let lde_final_values = timed!( diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index e95cb80a..2a51f6d9 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -160,7 +160,7 @@ pub(crate) fn fri_combine_initial< sum += numerator / denominator; } - sum + sum * subgroup_x } fn fri_verifier_query_round< From 2aa46e148c05aa3cc94024c5552baaa37f1df6af Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Sun, 16 Jan 2022 15:15:25 -0800 Subject: [PATCH 018/143] Optimize + test log2 functions (#434) * Speed up log2 functions * Move tests to `util` crate * Trick --- util/src/lib.rs | 64 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/util/src/lib.rs b/util/src/lib.rs index 8cc60a27..6dc32cb5 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -17,14 +17,16 @@ pub const fn ceil_div_usize(a: usize, b: usize) -> usize { } /// Computes `ceil(log_2(n))`. +#[must_use] pub fn log2_ceil(n: usize) -> usize { - n.next_power_of_two().trailing_zeros() as usize + (usize::BITS - n.saturating_sub(1).leading_zeros()) as usize } /// Computes `log_2(n)`, panicking if `n` is not a power of two. pub fn log2_strict(n: usize) -> usize { - assert!(n.is_power_of_two(), "Not a power of two: {}", n); - log2_ceil(n) + let res = n.trailing_zeros(); + assert!(n.wrapping_shr(res) == 1, "Not a power of two: {}", n); + res as usize } /// Permutes `arr` such that each index is mapped to its reverse in binary. @@ -171,3 +173,59 @@ pub fn branch_hint() { asm!("", options(nomem, nostack, preserves_flags)); } } + +#[cfg(test)] +mod tests { + use crate::{log2_ceil, log2_strict}; + + #[test] + fn test_log2_strict() { + assert_eq!(log2_strict(1), 0); + assert_eq!(log2_strict(2), 1); + assert_eq!(log2_strict(1 << 18), 18); + assert_eq!(log2_strict(1 << 31), 31); + assert_eq!( + log2_strict(1 << (usize::BITS - 1)), + usize::BITS as usize - 1 + ); + } + + #[test] + #[should_panic] + fn test_log2_strict_zero() { + log2_strict(0); + } + + #[test] + #[should_panic] + fn test_log2_strict_nonpower_2() { + log2_strict(0x78c341c65ae6d262); + } + + #[test] + #[should_panic] + fn test_log2_strict_usize_max() { + log2_strict(usize::MAX); + } + + #[test] + fn test_log2_ceil() { + // Powers of 2 + assert_eq!(log2_ceil(0), 0); + assert_eq!(log2_ceil(1), 0); + assert_eq!(log2_ceil(2), 1); + assert_eq!(log2_ceil(1 << 18), 18); + assert_eq!(log2_ceil(1 << 31), 31); + assert_eq!(log2_ceil(1 << (usize::BITS - 1)), usize::BITS as usize - 1); + + // Nonpowers; want to round up + assert_eq!(log2_ceil(3), 2); + assert_eq!(log2_ceil(0x14fe901b), 29); + assert_eq!( + log2_ceil((1 << (usize::BITS - 1)) + 1), + usize::BITS as usize + ); + assert_eq!(log2_ceil(usize::MAX - 1), usize::BITS as usize); + assert_eq!(log2_ceil(usize::MAX), usize::BITS as usize); + } +} From ec474efe12bd81ac170be605b772b6f99ee32366 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 17 Jan 2022 06:25:03 +0100 Subject: [PATCH 019/143] Minor --- plonky2/src/fri/oracle.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index 9c51bc91..27d3e129 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -155,11 +155,7 @@ impl, C: GenericConfig, const D: usize> final_poly += quotient; } final_poly.trim(); - let mut final_poly_coeffs = final_poly.coeffs; - final_poly_coeffs.insert(0, F::Extension::ZERO); - final_poly = PolynomialCoeffs { - coeffs: final_poly_coeffs, - }; + final_poly.coeffs.insert(0, F::Extension::ZERO); let lde_final_poly = final_poly.lde(fri_params.config.rate_bits); let lde_final_values = timed!( From 6f65620ff2b467b8824ebd61a005501dd1138fa9 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 17 Jan 2022 06:33:23 +0100 Subject: [PATCH 020/143] Add fix for recursive verifier. --- plonky2/src/fri/oracle.rs | 2 ++ plonky2/src/fri/recursive_verifier.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index 27d3e129..88b0298b 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -155,6 +155,8 @@ impl, C: GenericConfig, const D: usize> final_poly += quotient; } final_poly.trim(); + // Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for + // which the LDT will pass. See github.com/mir-protocol/plonky2/pull/434 for details. final_poly.coeffs.insert(0, F::Extension::ZERO); let lde_final_poly = final_poly.lde(fri_params.config.rate_bits); diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 526456b6..2a59d457 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -281,7 +281,7 @@ impl, const D: usize> CircuitBuilder { sum = self.div_add_extension(numerator, denominator, sum); } - sum + self.mul_extension(sum, subgroup_x) } fn fri_verifier_query_round>( From 2bb0c4f4e7dffc45a77111e7cc3545f34f6aa0a7 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 17 Jan 2022 06:44:05 +0100 Subject: [PATCH 021/143] Fix comment --- plonky2/src/fri/oracle.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index 88b0298b..f1dc84eb 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -156,7 +156,7 @@ impl, C: GenericConfig, const D: usize> } final_poly.trim(); // Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for - // which the LDT will pass. See github.com/mir-protocol/plonky2/pull/434 for details. + // which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details. final_poly.coeffs.insert(0, F::Extension::ZERO); let lde_final_poly = final_poly.lde(fri_params.config.rate_bits); From fcdcc86569c9ae1d64e32a7a4f0e817a51445dbb Mon Sep 17 00:00:00 2001 From: Hamish Ivey-Law <426294+unzvfu@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:41:08 +1100 Subject: [PATCH 022/143] Move profile defns to root workspace toml. (#437) --- Cargo.toml | 8 ++++++++ plonky2/Cargo.toml | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c36e3023..2bd67bb6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,2 +1,10 @@ [workspace] members = ["field", "insertion", "plonky2", "util", "waksman"] + +[profile.release] +opt-level = 3 +#lto = "fat" +#codegen-units = 1 + +[profile.bench] +opt-level = 3 diff --git a/plonky2/Cargo.toml b/plonky2/Cargo.toml index bce9f400..466d3df6 100644 --- a/plonky2/Cargo.toml +++ b/plonky2/Cargo.toml @@ -55,11 +55,3 @@ harness = false [[bench]] name = "transpose" harness = false - -[profile.release] -opt-level = 3 -#lto = "fat" -#codegen-units = 1 - -[profile.bench] -opt-level = 3 From dcf63f536eb31342bfb6812b07125c900b3e0fc3 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Tue, 18 Jan 2022 12:51:04 -0800 Subject: [PATCH 023/143] Have hash functions take references to avoid cloning (#438) And other tweaks to `MerkleTree::new` --- plonky2/src/fri/prover.rs | 5 +++-- plonky2/src/hash/gmimc.rs | 2 +- plonky2/src/hash/hashing.rs | 16 +++++++++------- plonky2/src/hash/keccak.rs | 4 ++-- plonky2/src/hash/merkle_proofs.rs | 2 +- plonky2/src/hash/merkle_tree.rs | 23 +++++++++++++---------- plonky2/src/hash/path_compression.rs | 2 +- plonky2/src/hash/poseidon.rs | 2 +- plonky2/src/plonk/circuit_builder.rs | 2 +- plonky2/src/plonk/config.rs | 2 +- plonky2/src/plonk/get_challenges.rs | 5 +++-- plonky2/src/plonk/proof.rs | 4 ++-- plonky2/src/plonk/prover.rs | 2 +- 13 files changed, 39 insertions(+), 32 deletions(-) diff --git a/plonky2/src/fri/prover.rs b/plonky2/src/fri/prover.rs index 43674ad1..d2731600 100644 --- a/plonky2/src/fri/prover.rs +++ b/plonky2/src/fri/prover.rs @@ -1,3 +1,4 @@ +use itertools::Itertools; use plonky2_field::extension_field::{flatten, unflatten, Extendable}; use plonky2_field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2_util::reverse_index_bits_in_place; @@ -116,12 +117,12 @@ fn fri_proof_of_work, C: GenericConfig, c .into_par_iter() .find_any(|&i| { C::InnerHasher::hash( - current_hash + ¤t_hash .elements .iter() .copied() .chain(Some(F::from_canonical_u64(i))) - .collect(), + .collect_vec(), false, ) .elements[0] diff --git a/plonky2/src/hash/gmimc.rs b/plonky2/src/hash/gmimc.rs index 3492e08f..050bdeec 100644 --- a/plonky2/src/hash/gmimc.rs +++ b/plonky2/src/hash/gmimc.rs @@ -107,7 +107,7 @@ impl Hasher for GMiMCHash { type Hash = HashOut; type Permutation = GMiMCPermutation; - fn hash(input: Vec, pad: bool) -> Self::Hash { + fn hash(input: &[F], pad: bool) -> Self::Hash { hash_n_to_hash::(input, pad) } diff --git a/plonky2/src/hash/hashing.rs b/plonky2/src/hash/hashing.rs index 2f6a725c..0867eaa8 100644 --- a/plonky2/src/hash/hashing.rs +++ b/plonky2/src/hash/hashing.rs @@ -18,7 +18,7 @@ pub fn hash_or_noop>(inputs: Vec) -> Ha if inputs.len() <= 4 { HashOut::from_partial(inputs) } else { - hash_n_to_hash::(inputs, false) + hash_n_to_hash::(&inputs, false) } } @@ -101,16 +101,18 @@ pub trait PlonkyPermutation { /// for the hash to be secure, but it can safely be disabled in certain cases, like if the input /// length is fixed. pub fn hash_n_to_m>( - mut inputs: Vec, + inputs: &[F], num_outputs: usize, pad: bool, ) -> Vec { if pad { - inputs.push(F::ZERO); - while (inputs.len() + 1) % SPONGE_WIDTH != 0 { - inputs.push(F::ONE); + let mut padded_inputs = inputs.to_vec(); + padded_inputs.push(F::ZERO); + while (padded_inputs.len() + 1) % SPONGE_WIDTH != 0 { + padded_inputs.push(F::ONE); } - inputs.push(F::ZERO); + padded_inputs.push(F::ZERO); + return hash_n_to_m::(&padded_inputs, num_outputs, false); } let mut state = [F::ZERO; SPONGE_WIDTH]; @@ -135,7 +137,7 @@ pub fn hash_n_to_m>( } pub fn hash_n_to_hash>( - inputs: Vec, + inputs: &[F], pad: bool, ) -> HashOut { HashOut::from_vec(hash_n_to_m::(inputs, 4, pad)) diff --git a/plonky2/src/hash/keccak.rs b/plonky2/src/hash/keccak.rs index 78cf5dc3..a537f5e3 100644 --- a/plonky2/src/hash/keccak.rs +++ b/plonky2/src/hash/keccak.rs @@ -56,9 +56,9 @@ impl Hasher for KeccakHash { type Hash = BytesHash; type Permutation = KeccakPermutation; - fn hash(input: Vec, _pad: bool) -> Self::Hash { + fn hash(input: &[F], _pad: bool) -> Self::Hash { let mut buffer = Buffer::new(Vec::new()); - buffer.write_field_vec(&input).unwrap(); + buffer.write_field_vec(input).unwrap(); let mut arr = [0; N]; let hash_bytes = keccak(buffer.bytes()).0; arr.copy_from_slice(&hash_bytes[..N]); diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index 543c06fd..60fe236a 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -32,7 +32,7 @@ pub(crate) fn verify_merkle_proof>( proof: &MerkleProof, ) -> Result<()> { let mut index = leaf_index; - let mut current_digest = H::hash(leaf_data, false); + let mut current_digest = H::hash(&leaf_data, false); for &sibling_digest in proof.siblings.iter() { let bit = index & 1; index >>= 1; diff --git a/plonky2/src/hash/merkle_tree.rs b/plonky2/src/hash/merkle_tree.rs index 88c1ebdc..8f191366 100644 --- a/plonky2/src/hash/merkle_tree.rs +++ b/plonky2/src/hash/merkle_tree.rs @@ -36,21 +36,24 @@ pub struct MerkleTree> { impl> MerkleTree { pub fn new(leaves: Vec>, cap_height: usize) -> Self { - let mut layers = vec![leaves + let mut current_layer = leaves .par_iter() - .map(|l| H::hash(l.clone(), false)) - .collect::>()]; - while let Some(l) = layers.last() { - if l.len() == 1 << cap_height { - break; + .map(|l| H::hash(l, false)) + .collect::>(); + + let mut layers = vec![]; + let cap = loop { + if current_layer.len() == 1 << cap_height { + break current_layer; } - let next_layer = l + let next_layer = current_layer .par_chunks(2) .map(|chunk| H::two_to_one(chunk[0], chunk[1])) .collect::>(); - layers.push(next_layer); - } - let cap = layers.pop().unwrap(); + layers.push(current_layer); + current_layer = next_layer; + }; + Self { leaves, layers, diff --git a/plonky2/src/hash/path_compression.rs b/plonky2/src/hash/path_compression.rs index 75c63331..c5c3f36e 100644 --- a/plonky2/src/hash/path_compression.rs +++ b/plonky2/src/hash/path_compression.rs @@ -66,7 +66,7 @@ pub(crate) fn decompress_merkle_proofs>( for (&i, v) in leaves_indices.iter().zip(leaves_data) { // Observe the leaves. - seen.insert(i + num_leaves, H::hash(v.to_vec(), false)); + seen.insert(i + num_leaves, H::hash(v, false)); } // Iterators over the siblings. diff --git a/plonky2/src/hash/poseidon.rs b/plonky2/src/hash/poseidon.rs index 606dfd13..81fc3937 100644 --- a/plonky2/src/hash/poseidon.rs +++ b/plonky2/src/hash/poseidon.rs @@ -633,7 +633,7 @@ impl Hasher for PoseidonHash { type Hash = HashOut; type Permutation = PoseidonPermutation; - fn hash(input: Vec, pad: bool) -> Self::Hash { + fn hash(input: &[F], pad: bool) -> Self::Hash { hash_n_to_hash::(input, pad) } diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index d5a2192b..33b44054 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -725,7 +725,7 @@ impl, const D: usize> CircuitBuilder { constants_sigmas_cap.flatten(), vec![/* Add other circuit data here */], ]; - let circuit_digest = C::Hasher::hash(circuit_digest_parts.concat(), false); + let circuit_digest = C::Hasher::hash(&circuit_digest_parts.concat(), false); let common = CommonCircuitData { config: self.config, diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index 461a9573..72d5487e 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -32,7 +32,7 @@ pub trait Hasher: Sized + Clone + Debug + Eq + PartialEq { /// Permutation used in the sponge construction. type Permutation: PlonkyPermutation; - fn hash(input: Vec, pad: bool) -> Self::Hash; + fn hash(input: &[F], pad: bool) -> Self::Hash; fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash; } diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index c340cef9..d28f29da 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -1,5 +1,6 @@ use std::collections::HashSet; +use itertools::Itertools; use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::PolynomialCoeffs; @@ -65,13 +66,13 @@ fn get_challenges, C: GenericConfig, cons challenger.observe_extension_elements(&final_poly.coeffs); let fri_pow_response = C::InnerHasher::hash( - challenger + &challenger .get_hash() .elements .iter() .copied() .chain(Some(pow_witness)) - .collect(), + .collect_vec(), false, ) .elements[0]; diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index cd49de89..07ca7c9e 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -90,7 +90,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn get_public_inputs_hash( &self, ) -> <>::InnerHasher as Hasher>::Hash { - C::InnerHasher::hash(self.public_inputs.clone(), true) + C::InnerHasher::hash(&self.public_inputs, true) } pub fn to_bytes(&self) -> anyhow::Result> { @@ -206,7 +206,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn get_public_inputs_hash( &self, ) -> <>::InnerHasher as Hasher>::Hash { - C::InnerHasher::hash(self.public_inputs.clone(), true) + C::InnerHasher::hash(&self.public_inputs, true) } pub fn to_bytes(&self) -> anyhow::Result> { diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 64730ea3..0dd2aba2 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -43,7 +43,7 @@ pub(crate) fn prove, C: GenericConfig, co ); let public_inputs = partition_witness.get_targets(&prover_data.public_inputs); - let public_inputs_hash = C::InnerHasher::hash(public_inputs.clone(), true); + let public_inputs_hash = C::InnerHasher::hash(&public_inputs, true); if cfg!(debug_assertions) { // Display the marked targets for debugging purposes. From 27ebc21faff9f5287278d24a95fc23494d7d5233 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Wed, 19 Jan 2022 11:57:46 +0100 Subject: [PATCH 024/143] Add comments for LDT fix in verifier --- plonky2/src/fri/recursive_verifier.rs | 2 ++ plonky2/src/fri/verifier.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 2a59d457..63d07035 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -281,6 +281,8 @@ impl, const D: usize> CircuitBuilder { sum = self.div_add_extension(numerator, denominator, sum); } + // Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for + // which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details. self.mul_extension(sum, subgroup_x) } diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 2a51f6d9..40d1ab25 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -160,6 +160,8 @@ pub(crate) fn fri_combine_initial< sum += numerator / denominator; } + // Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for + // which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details. sum * subgroup_x } From 5255c04c700c4cc4a63abf47353d8c92389cbbcc Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Wed, 19 Jan 2022 12:31:20 +0100 Subject: [PATCH 025/143] Remove `compute_quotient` and update division tests --- field/src/polynomial/division.rs | 52 +++++++------------------------- plonky2/src/fri/oracle.rs | 27 +---------------- 2 files changed, 12 insertions(+), 67 deletions(-) diff --git a/field/src/polynomial/division.rs b/field/src/polynomial/division.rs index 4f3cafae..d761ab50 100644 --- a/field/src/polynomial/division.rs +++ b/field/src/polynomial/division.rs @@ -67,9 +67,9 @@ impl PolynomialCoeffs { } } - /// Let `self=p(X)`, this returns `(p(X)-p(z))/(X-z)` and `p(z)`. + /// Let `self=p(X)`, this returns `(p(X)-p(z))/(X-z)`. /// See https://en.wikipedia.org/wiki/Horner%27s_method - pub fn divide_by_linear(&self, z: F) -> (PolynomialCoeffs, F) { + pub fn divide_by_linear(&self, z: F) -> PolynomialCoeffs { let mut bs = self .coeffs .iter() @@ -79,9 +79,9 @@ impl PolynomialCoeffs { Some(*acc) }) .collect::>(); - let ev = bs.pop().unwrap_or(F::ZERO); + bs.pop(); bs.reverse(); - (Self { coeffs: bs }, ev) + Self { coeffs: bs } } /// Computes the inverse of `self` modulo `x^n`. @@ -125,7 +125,7 @@ impl PolynomialCoeffs { #[cfg(test)] mod tests { - use std::time::Instant; + use rand::{thread_rng, Rng}; use crate::extension_field::quartic::QuarticExtension; use crate::field_types::Field; @@ -133,47 +133,17 @@ mod tests { use crate::polynomial::PolynomialCoeffs; #[test] - #[ignore] fn test_division_by_linear() { type F = QuarticExtension; - let n = 1_000_000; + let n = thread_rng().gen_range(1..1000); let poly = PolynomialCoeffs::new(F::rand_vec(n)); let z = F::rand(); let ev = poly.eval(z); - let timer = Instant::now(); - let (_quotient, ev2) = poly.div_rem(&PolynomialCoeffs::new(vec![-z, F::ONE])); - println!("{:.3}s for usual", timer.elapsed().as_secs_f32()); - assert_eq!(ev2.trimmed().coeffs, vec![ev]); - - let timer = Instant::now(); - let (quotient, ev3) = poly.div_rem_long_division(&PolynomialCoeffs::new(vec![-z, F::ONE])); - println!("{:.3}s for long division", timer.elapsed().as_secs_f32()); - assert_eq!(ev3.trimmed().coeffs, vec![ev]); - - let timer = Instant::now(); - let horn = poly.divide_by_linear(z); - println!("{:.3}s for Horner", timer.elapsed().as_secs_f32()); - assert_eq!((quotient, ev), horn); - } - - #[test] - #[ignore] - fn test_division_by_quadratic() { - type F = QuarticExtension; - let n = 1_000_000; - let poly = PolynomialCoeffs::new(F::rand_vec(n)); - let quad = PolynomialCoeffs::new(F::rand_vec(2)); - - let timer = Instant::now(); - let (quotient0, rem0) = poly.div_rem(&quad); - println!("{:.3}s for usual", timer.elapsed().as_secs_f32()); - - let timer = Instant::now(); - let (quotient1, rem1) = poly.div_rem_long_division(&quad); - println!("{:.3}s for long division", timer.elapsed().as_secs_f32()); - - assert_eq!(quotient0.trimmed(), quotient1.trimmed()); - assert_eq!(rem0.trimmed(), rem1.trimmed()); + let quotient = poly.divide_by_linear(z); + assert_eq!( + poly, + &("ient * &vec![-z, F::ONE].into()) + &vec![ev].into() // `quotient * (X-z) + ev` + ); } } diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index f1dc84eb..c705e125 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -150,11 +150,10 @@ impl, C: GenericConfig, const D: usize> &format!("reduce batch of {} polynomials", polynomials.len()), alpha.reduce_polys_base(polys_coeff) ); - let quotient = Self::compute_quotient([*point], composition_poly); + let quotient = composition_poly.divide_by_linear(*point); alpha.shift_poly(&mut final_poly); final_poly += quotient; } - final_poly.trim(); // Multiply the final polynomial by `X`, so that `final_poly` has the maximum degree for // which the LDT will pass. See github.com/mir-protocol/plonky2/pull/436 for details. final_poly.coeffs.insert(0, F::Extension::ZERO); @@ -180,28 +179,4 @@ impl, C: GenericConfig, const D: usize> fri_proof } - - /// Given `points=(x_i)`, `evals=(y_i)` and `poly=P` with `P(x_i)=y_i`, computes the polynomial - /// `Q=(P-I)/Z` where `I` interpolates `(x_i, y_i)` and `Z` is the vanishing polynomial on `(x_i)`. - fn compute_quotient( - points: [F::Extension; N], - poly: PolynomialCoeffs, - ) -> PolynomialCoeffs { - let quotient = if N == 1 { - poly.divide_by_linear(points[0]).0 - } else if N == 2 { - // The denominator is `(X - p0)(X - p1) = p0 p1 - (p0 + p1) X + X^2`. - let denominator = vec![ - points[0] * points[1], - -points[0] - points[1], - F::Extension::ONE, - ] - .into(); - poly.div_rem_long_division(&denominator).0 // Could also use `divide_by_linear` twice. - } else { - unreachable!("This shouldn't happen. Plonk should open polynomials at 1 or 2 points.") - }; - - quotient.padded(quotient.degree_plus_one().next_power_of_two()) - } } From f98a6adfbf285e701e87a90a8cca572011f52805 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Wed, 19 Jan 2022 17:51:20 -0800 Subject: [PATCH 026/143] Bit-order reversal benchmarks (#441) --- plonky2/Cargo.toml | 4 ++++ plonky2/benches/reverse_index_bits.rs | 30 +++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 plonky2/benches/reverse_index_bits.rs diff --git a/plonky2/Cargo.toml b/plonky2/Cargo.toml index 466d3df6..28d5b210 100644 --- a/plonky2/Cargo.toml +++ b/plonky2/Cargo.toml @@ -55,3 +55,7 @@ harness = false [[bench]] name = "transpose" harness = false + +[[bench]] +name = "reverse_index_bits" +harness = false diff --git a/plonky2/benches/reverse_index_bits.rs b/plonky2/benches/reverse_index_bits.rs new file mode 100644 index 00000000..90f1e285 --- /dev/null +++ b/plonky2/benches/reverse_index_bits.rs @@ -0,0 +1,30 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use plonky2::field::field_types::Field; +use plonky2::field::goldilocks_field::GoldilocksField; +use plonky2_util::{reverse_index_bits, reverse_index_bits_in_place}; + +type F = GoldilocksField; + +fn benchmark_in_place(c: &mut Criterion) { + let mut group = c.benchmark_group("reverse-index-bits-in-place"); + for width in [1 << 8, 1 << 16, 1 << 24] { + group.bench_with_input(BenchmarkId::from_parameter(width), &width, |b, _| { + let mut values = F::rand_vec(width); + b.iter(|| reverse_index_bits_in_place(&mut values)); + }); + } +} + +fn benchmark_out_of_place(c: &mut Criterion) { + let mut group = c.benchmark_group("reverse-index-bits"); + for width in [1 << 8, 1 << 16, 1 << 24] { + group.bench_with_input(BenchmarkId::from_parameter(width), &width, |b, _| { + let values = F::rand_vec(width); + b.iter(|| reverse_index_bits(&values)); + }); + } +} + +criterion_group!(benches_in_place, benchmark_in_place); +criterion_group!(benches_out_of_place, benchmark_out_of_place); +criterion_main!(benches_in_place, benches_out_of_place); From d69220e2a78561a8e70f8b59c3fec1c402b5d697 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Fri, 21 Jan 2022 10:06:40 -0800 Subject: [PATCH 027/143] metadata --- plonky2/Cargo.toml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plonky2/Cargo.toml b/plonky2/Cargo.toml index 28d5b210..af82a622 100644 --- a/plonky2/Cargo.toml +++ b/plonky2/Cargo.toml @@ -1,12 +1,11 @@ [package] name = "plonky2" -description = "Recursive SNARKs based on Plonk and FRI" +description = "Recursive SNARKs based on PLONK and FRI" version = "0.1.0" -authors = ["Daniel Lubarov "] +authors = ["Polygon Zero "] readme = "README.md" -license = "MIT OR Apache-2.0" repository = "https://github.com/mir-protocol/plonky2" -keywords = ["cryptography", "SNARK", "FRI"] +keywords = ["cryptography", "SNARK", "PLONK", "FRI"] categories = ["cryptography"] edition = "2021" default-run = "generate_constants" From 2e3a682bdebd1bb02599b61d0a628b3ef6dfab9a Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Fri, 21 Jan 2022 10:14:44 -0800 Subject: [PATCH 028/143] metadata --- field/Cargo.toml | 1 + util/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/field/Cargo.toml b/field/Cargo.toml index 1a974852..6abffc5d 100644 --- a/field/Cargo.toml +++ b/field/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "plonky2_field" +description = "Finite field arithmetic" version = "0.1.0" edition = "2021" diff --git a/util/Cargo.toml b/util/Cargo.toml index 4d6e735c..a1ab402a 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "plonky2_util" +description = "Utilities used by Plonky2" version = "0.1.0" edition = "2021" From 86dc4c933ae28e4ba7a3da3c595051bc8b31de7a Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Fri, 21 Jan 2022 10:26:43 -0800 Subject: [PATCH 029/143] Make all FFTs in-place (#439) * Make all FFTs in-place * Delete leftover marker --- field/src/fft.rs | 59 +++++++++++++++++++------------------ field/src/interpolation.rs | 2 +- field/src/polynomial/mod.rs | 18 +++++------ plonky2/benches/ffts.rs | 2 +- plonky2/src/fri/oracle.rs | 2 +- util/src/lib.rs | 6 ++-- 6 files changed, 45 insertions(+), 44 deletions(-) diff --git a/field/src/fft.rs b/field/src/fft.rs index 8428d3fb..c548d51e 100644 --- a/field/src/fft.rs +++ b/field/src/fft.rs @@ -1,7 +1,7 @@ use std::cmp::{max, min}; use std::option::Option; -use plonky2_util::{log2_strict, reverse_index_bits}; +use plonky2_util::{log2_strict, reverse_index_bits_in_place}; use unroll::unroll_for_loops; use crate::field_types::Field; @@ -34,10 +34,10 @@ pub fn fft_root_table(n: usize) -> FftRootTable { #[inline] fn fft_dispatch( - input: &[F], + input: &mut [F], zero_factor: Option, root_table: Option<&FftRootTable>, -) -> Vec { +) { let computed_root_table = if root_table.is_some() { None } else { @@ -45,33 +45,32 @@ fn fft_dispatch( }; let used_root_table = root_table.or(computed_root_table.as_ref()).unwrap(); - fft_classic(input, zero_factor.unwrap_or(0), used_root_table) + fft_classic(input, zero_factor.unwrap_or(0), used_root_table); } #[inline] -pub fn fft(poly: &PolynomialCoeffs) -> PolynomialValues { +pub fn fft(poly: PolynomialCoeffs) -> PolynomialValues { fft_with_options(poly, None, None) } #[inline] pub fn fft_with_options( - poly: &PolynomialCoeffs, + poly: PolynomialCoeffs, zero_factor: Option, root_table: Option<&FftRootTable>, ) -> PolynomialValues { - let PolynomialCoeffs { coeffs } = poly; - PolynomialValues { - values: fft_dispatch(coeffs, zero_factor, root_table), - } + let PolynomialCoeffs { coeffs: mut buffer } = poly; + fft_dispatch(&mut buffer, zero_factor, root_table); + PolynomialValues { values: buffer } } #[inline] -pub fn ifft(poly: &PolynomialValues) -> PolynomialCoeffs { +pub fn ifft(poly: PolynomialValues) -> PolynomialCoeffs { ifft_with_options(poly, None, None) } pub fn ifft_with_options( - poly: &PolynomialValues, + poly: PolynomialValues, zero_factor: Option, root_table: Option<&FftRootTable>, ) -> PolynomialCoeffs { @@ -79,20 +78,20 @@ pub fn ifft_with_options( let lg_n = log2_strict(n); let n_inv = F::inverse_2exp(lg_n); - let PolynomialValues { values } = poly; - let mut coeffs = fft_dispatch(values, zero_factor, root_table); + let PolynomialValues { values: mut buffer } = poly; + fft_dispatch(&mut buffer, zero_factor, root_table); // We reverse all values except the first, and divide each by n. - coeffs[0] *= n_inv; - coeffs[n / 2] *= n_inv; + buffer[0] *= n_inv; + buffer[n / 2] *= n_inv; for i in 1..(n / 2) { let j = n - i; - let coeffs_i = coeffs[j] * n_inv; - let coeffs_j = coeffs[i] * n_inv; - coeffs[i] = coeffs_i; - coeffs[j] = coeffs_j; + let coeffs_i = buffer[j] * n_inv; + let coeffs_j = buffer[i] * n_inv; + buffer[i] = coeffs_i; + buffer[j] = coeffs_j; } - PolynomialCoeffs { coeffs } + PolynomialCoeffs { coeffs: buffer } } /// Generic FFT implementation that works with both scalar and packed inputs. @@ -167,8 +166,8 @@ fn fft_classic_simd( /// The parameter r signifies that the first 1/2^r of the entries of /// input may be non-zero, but the last 1 - 1/2^r entries are /// definitely zero. -pub(crate) fn fft_classic(input: &[F], r: usize, root_table: &FftRootTable) -> Vec { - let mut values = reverse_index_bits(input); +pub(crate) fn fft_classic(values: &mut [F], r: usize, root_table: &FftRootTable) { + reverse_index_bits_in_place(values); let n = values.len(); let lg_n = log2_strict(n); @@ -200,11 +199,10 @@ pub(crate) fn fft_classic(input: &[F], r: usize, root_table: &FftRootT if lg_n <= lg_packed_width { // Need the slice to be at least the width of two packed vectors for the vectorized version // to work. Do this tiny problem in scalar. - fft_classic_simd::(&mut values[..], r, lg_n, root_table); + fft_classic_simd::(values, r, lg_n, root_table); } else { - fft_classic_simd::<::Packing>(&mut values[..], r, lg_n, root_table); + fft_classic_simd::<::Packing>(values, r, lg_n, root_table); } - values } #[cfg(test)] @@ -231,10 +229,10 @@ mod tests { assert_eq!(coeffs.len(), degree_padded); let coefficients = PolynomialCoeffs { coeffs }; - let points = fft(&coefficients); + let points = fft(coefficients.clone()); assert_eq!(points, evaluate_naive(&coefficients)); - let interpolated_coefficients = ifft(&points); + let interpolated_coefficients = ifft(points); for i in 0..degree { assert_eq!(interpolated_coefficients.coeffs[i], coefficients.coeffs[i]); } @@ -245,7 +243,10 @@ mod tests { for r in 0..4 { // expand coefficients by factor 2^r by filling with zeros let zero_tail = coefficients.lde(r); - assert_eq!(fft(&zero_tail), fft_with_options(&zero_tail, Some(r), None)); + assert_eq!( + fft(zero_tail.clone()), + fft_with_options(zero_tail, Some(r), None) + ); } } diff --git a/field/src/interpolation.rs b/field/src/interpolation.rs index ac6f6437..1a2e37df 100644 --- a/field/src/interpolation.rs +++ b/field/src/interpolation.rs @@ -19,7 +19,7 @@ pub fn interpolant(points: &[(F, F)]) -> PolynomialCoeffs { .map(|x| interpolate(points, x, &barycentric_weights)) .collect(); - let mut coeffs = ifft(&PolynomialValues { + let mut coeffs = ifft(PolynomialValues { values: subgroup_evals, }); coeffs.trim(); diff --git a/field/src/polynomial/mod.rs b/field/src/polynomial/mod.rs index 624e8212..4264c914 100644 --- a/field/src/polynomial/mod.rs +++ b/field/src/polynomial/mod.rs @@ -31,12 +31,12 @@ impl PolynomialValues { self.values.len() } - pub fn ifft(&self) -> PolynomialCoeffs { + pub fn ifft(self) -> PolynomialCoeffs { ifft(self) } /// Returns the polynomial whose evaluation on the coset `shift*H` is `self`. - pub fn coset_ifft(&self, shift: F) -> PolynomialCoeffs { + pub fn coset_ifft(self, shift: F) -> PolynomialCoeffs { let mut shifted_coeffs = self.ifft(); shifted_coeffs .coeffs @@ -52,9 +52,9 @@ impl PolynomialValues { polys.into_iter().map(|p| p.lde(rate_bits)).collect() } - pub fn lde(&self, rate_bits: usize) -> Self { + pub fn lde(self, rate_bits: usize) -> Self { let coeffs = ifft(self).lde(rate_bits); - fft_with_options(&coeffs, Some(rate_bits), None) + fft_with_options(coeffs, Some(rate_bits), None) } pub fn degree(&self) -> usize { @@ -64,7 +64,7 @@ impl PolynomialValues { } pub fn degree_plus_one(&self) -> usize { - self.ifft().degree_plus_one() + self.clone().ifft().degree_plus_one() } } @@ -213,12 +213,12 @@ impl PolynomialCoeffs { Self::new(self.trimmed().coeffs.into_iter().rev().collect()) } - pub fn fft(&self) -> PolynomialValues { + pub fn fft(self) -> PolynomialValues { fft(self) } pub fn fft_with_options( - &self, + self, zero_factor: Option, root_table: Option<&FftRootTable>, ) -> PolynomialValues { @@ -386,7 +386,7 @@ impl Mul for &PolynomialCoeffs { .zip(b_evals.values) .map(|(pa, pb)| pa * pb) .collect(); - ifft(&mul_evals.into()) + ifft(mul_evals.into()) } } @@ -454,7 +454,7 @@ mod tests { let n = 1 << k; let evals = PolynomialValues::new(F::rand_vec(n)); let shift = F::rand(); - let coeffs = evals.coset_ifft(shift); + let coeffs = evals.clone().coset_ifft(shift); let generator = F::primitive_root_of_unity(k); let naive_coset_evals = F::cyclic_subgroup_coset_known_order(generator, shift, n) diff --git a/plonky2/benches/ffts.rs b/plonky2/benches/ffts.rs index cfa02a25..63ac9c85 100644 --- a/plonky2/benches/ffts.rs +++ b/plonky2/benches/ffts.rs @@ -11,7 +11,7 @@ pub(crate) fn bench_ffts(c: &mut Criterion) { let size = 1 << size_log; group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { let coeffs = PolynomialCoeffs::new(F::rand_vec(size)); - b.iter(|| coeffs.fft_with_options(None, None)); + b.iter(|| coeffs.clone().fft_with_options(None, None)); }); } } diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index c705e125..02db3140 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -47,7 +47,7 @@ impl, C: GenericConfig, const D: usize> let coeffs = timed!( timing, "IFFT", - values.par_iter().map(|v| v.ifft()).collect::>() + values.into_par_iter().map(|v| v.ifft()).collect::>() ); Self::from_coeffs( diff --git a/util/src/lib.rs b/util/src/lib.rs index 6dc32cb5..5c683a50 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -80,7 +80,7 @@ fn reverse_index_bits_large(arr: &[T], n_power: usize) -> Vec { result } -pub fn reverse_index_bits_in_place(arr: &mut Vec) { +pub fn reverse_index_bits_in_place(arr: &mut [T]) { let n = arr.len(); let n_power = log2_strict(n); @@ -101,7 +101,7 @@ pub fn reverse_index_bits_in_place(arr: &mut Vec) { where reverse_bits(src, n_power) computes the n_power-bit reverse. */ -fn reverse_index_bits_in_place_small(arr: &mut Vec, n_power: usize) { +fn reverse_index_bits_in_place_small(arr: &mut [T], n_power: usize) { let n = arr.len(); // BIT_REVERSE_6BIT holds 6-bit reverses. This shift makes them n_power-bit reverses. let dst_shr_amt = 6 - n_power; @@ -113,7 +113,7 @@ fn reverse_index_bits_in_place_small(arr: &mut Vec, n_power: usize) { } } -fn reverse_index_bits_in_place_large(arr: &mut Vec, n_power: usize) { +fn reverse_index_bits_in_place_large(arr: &mut [T], n_power: usize) { let n = arr.len(); // LLVM does not know that it does not need to reverse src at each iteration (which is expensive // on x86). We take advantage of the fact that the low bits of dst change rarely and the high From 5f0eee1a9b31af40fde45be7f774cbfe0d548f25 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Fri, 21 Jan 2022 10:44:46 -0800 Subject: [PATCH 030/143] Bit-order reversal optimizations (#442) * Bit-order in-place reversal optimizations * optimization/simplification * Done modulo documentation and testing on x86 * Minor type fixes on non-ARM * Minor x86 * Transpose docs * Docs * Make rustfmt happy * Bug fixes + tests * Minor docs + lints --- plonky2/src/util/mod.rs | 35 +++++++- util/src/lib.rs | 172 +++++++++++++++++++++++++++---------- util/src/transpose_util.rs | 112 ++++++++++++++++++++++++ 3 files changed, 272 insertions(+), 47 deletions(-) create mode 100644 util/src/transpose_util.rs diff --git a/plonky2/src/util/mod.rs b/plonky2/src/util/mod.rs index 13a72f78..4cf7119a 100644 --- a/plonky2/src/util/mod.rs +++ b/plonky2/src/util/mod.rs @@ -99,7 +99,18 @@ mod tests { } #[test] - fn test_reverse_index_bits_in_place() { + fn test_reverse_index_bits_in_place_trivial() { + let mut arr1: Vec = vec![10]; + reverse_index_bits_in_place(&mut arr1); + assert_eq!(arr1, vec![10]); + + let mut arr2: Vec = vec![10, 20]; + reverse_index_bits_in_place(&mut arr2); + assert_eq!(arr2, vec![10, 20]); + } + + #[test] + fn test_reverse_index_bits_in_place_small() { let mut arr4: Vec = vec![10, 20, 30, 40]; reverse_index_bits_in_place(&mut arr4); assert_eq!(arr4, vec![10, 30, 20, 40]); @@ -127,4 +138,26 @@ mod tests { reverse_index_bits_in_place(&mut arr256); assert_eq!(arr256, output256); } + + #[test] + fn test_reverse_index_bits_in_place_big_even() { + let mut arr: Vec = (0..1 << 16).collect(); + let target = reverse_index_bits(&arr); + reverse_index_bits_in_place(&mut arr); + assert_eq!(arr, target); + reverse_index_bits_in_place(&mut arr); + let range: Vec = (0..1 << 16).collect(); + assert_eq!(arr, range); + } + + #[test] + fn test_reverse_index_bits_in_place_big_odd() { + let mut arr: Vec = (0..1 << 17).collect(); + let target = reverse_index_bits(&arr); + reverse_index_bits_in_place(&mut arr); + assert_eq!(arr, target); + reverse_index_bits_in_place(&mut arr); + let range: Vec = (0..1 << 17).collect(); + assert_eq!(arr, range); + } } diff --git a/util/src/lib.rs b/util/src/lib.rs index 5c683a50..f760cfba 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -7,6 +7,11 @@ use std::arch::asm; use std::hint::unreachable_unchecked; +use std::mem::size_of; +use std::ptr::{swap, swap_nonoverlapping}; + +mod transpose_util; +use crate::transpose_util::transpose_in_place_square; pub fn bits_u64(n: u64) -> usize { (64 - n.leading_zeros()) as usize @@ -26,6 +31,9 @@ pub fn log2_ceil(n: usize) -> usize { pub fn log2_strict(n: usize) -> usize { let res = n.trailing_zeros(); assert!(n.wrapping_shr(res) == 1, "Not a power of two: {}", n); + // Tell the optimizer about the semantics of `log2_strict`. i.e. it can replace `n` with + // `1 << res` and vice versa. + assume(n == 1 << res); res as usize } @@ -80,57 +88,129 @@ fn reverse_index_bits_large(arr: &[T], n_power: usize) -> Vec { result } +/// Bit-reverse the order of elements in `arr`. +/// SAFETY: ensure that `arr.len() == 1 << lb_n`. +#[cfg(not(target_arch = "aarch64"))] +unsafe fn reverse_index_bits_in_place_small(arr: &mut [T], lb_n: usize) { + if lb_n <= 6 { + // BIT_REVERSE_6BIT holds 6-bit reverses. This shift makes them lb_n-bit reverses. + let dst_shr_amt = 6 - lb_n; + for src in 0..arr.len() { + let dst = (BIT_REVERSE_6BIT[src] as usize) >> dst_shr_amt; + if src < dst { + swap(arr.get_unchecked_mut(src), arr.get_unchecked_mut(dst)); + } + } + } else { + // LLVM does not know that it does not need to reverse src at each iteration (which is + // expensive on x86). We take advantage of the fact that the low bits of dst change rarely and the high + // bits of dst are dependent only on the low bits of src. + let dst_lo_shr_amt = 64 - (lb_n - 6); + let dst_hi_shl_amt = lb_n - 6; + for src_chunk in 0..(arr.len() >> 6) { + let src_hi = src_chunk << 6; + let dst_lo = src_chunk.reverse_bits() >> dst_lo_shr_amt; + for src_lo in 0..(1 << 6) { + let dst_hi = (BIT_REVERSE_6BIT[src_lo] as usize) << dst_hi_shl_amt; + let src = src_hi + src_lo; + let dst = dst_hi + dst_lo; + if src < dst { + swap(arr.get_unchecked_mut(src), arr.get_unchecked_mut(dst)); + } + } + } + } +} + +/// Bit-reverse the order of elements in `arr`. +/// SAFETY: ensure that `arr.len() == 1 << lb_n`. +#[cfg(target_arch = "aarch64")] +unsafe fn reverse_index_bits_in_place_small(arr: &mut [T], lb_n: usize) { + // Aarch64 can reverse bits in one instruction, so the trivial version works best. + for src in 0..arr.len() { + // `wrapping_shr` handles the case when `arr.len() == 1`. In that case `src == 0`, so + // `src.reverse_bits() == 0`. `usize::wrapping_shr` by 64 is a no-op, but it gives the + // correct result. + let dst = src.reverse_bits().wrapping_shr(usize::BITS - lb_n as u32); + if src < dst { + swap(arr.get_unchecked_mut(src), arr.get_unchecked_mut(dst)); + } + } +} + +/// Split `arr` chunks and bit-reverse the order of the chunks. There are `1 << lb_num_chunks` +/// chunks, each of length `1 << lb_chunk_size`. +/// SAFETY: ensure that `arr.len() == 1 << lb_num_chunks + lb_chunk_size`. +unsafe fn reverse_index_bits_in_place_chunks( + arr: &mut [T], + lb_num_chunks: usize, + lb_chunk_size: usize, +) { + for i in 0..1usize << lb_num_chunks { + // `wrapping_shr` handles the silly case when `lb_num_chunks == 0`. + let j = i + .reverse_bits() + .wrapping_shr(usize::BITS - lb_num_chunks as u32); + if i < j { + swap_nonoverlapping( + arr.get_unchecked_mut(i << lb_chunk_size), + arr.get_unchecked_mut(j << lb_chunk_size), + 1 << lb_chunk_size, + ); + } + } +} + +// Ensure that SMALL_ARR_SIZE >= 4 * BIG_T_SIZE. +const BIG_T_SIZE: usize = 1 << 14; +const SMALL_ARR_SIZE: usize = 1 << 16; pub fn reverse_index_bits_in_place(arr: &mut [T]) { let n = arr.len(); - let n_power = log2_strict(n); - - if n_power <= 6 { - reverse_index_bits_in_place_small(arr, n_power); + let lb_n = log2_strict(n); + // If the whole array fits in fast cache, then the trivial algorithm is cache friendly. Also, if + // `T` is really big, then the trivial algorithm is cache-friendly, no matter the size of the + // array. + if size_of::() << lb_n <= SMALL_ARR_SIZE || size_of::() >= BIG_T_SIZE { + unsafe { + reverse_index_bits_in_place_small(arr, lb_n); + } } else { - reverse_index_bits_in_place_large(arr, n_power); - } -} + debug_assert!(n >= 4); // By our choice of `BIG_T_SIZE` and `SMALL_ARR_SIZE`. -/* Both functions below are semantically equivalent to: - for src in 0..n { - let dst = reverse_bits(src, n_power); - if src < dst { - arr.swap(src, dst); - } - } - where reverse_bits(src, n_power) computes the n_power-bit reverse. -*/ - -fn reverse_index_bits_in_place_small(arr: &mut [T], n_power: usize) { - let n = arr.len(); - // BIT_REVERSE_6BIT holds 6-bit reverses. This shift makes them n_power-bit reverses. - let dst_shr_amt = 6 - n_power; - for src in 0..n { - let dst = (BIT_REVERSE_6BIT[src] as usize) >> dst_shr_amt; - if src < dst { - arr.swap(src, dst); - } - } -} - -fn reverse_index_bits_in_place_large(arr: &mut [T], n_power: usize) { - let n = arr.len(); - // LLVM does not know that it does not need to reverse src at each iteration (which is expensive - // on x86). We take advantage of the fact that the low bits of dst change rarely and the high - // bits of dst are dependent only on the low bits of src. - let dst_lo_shr_amt = 64 - (n_power - 6); - let dst_hi_shl_amt = n_power - 6; - for src_chunk in 0..(n >> 6) { - let src_hi = src_chunk << 6; - let dst_lo = src_chunk.reverse_bits() >> dst_lo_shr_amt; - for src_lo in 0..(1 << 6) { - let dst_hi = (BIT_REVERSE_6BIT[src_lo] as usize) << dst_hi_shl_amt; - - let src = src_hi + src_lo; - let dst = dst_hi + dst_lo; - if src < dst { - arr.swap(src, dst); + // Algorithm: + // + // Treat `arr` as a `sqrt(n)` by `sqrt(n)` row-major matrix. (Assume for now that `lb_n` is + // even, i.e., `n` is a square number.) To perform bit-order reversal we: + // 1. Bit-reverse the order of the rows. (They are contiguous in memory, so this is + // basically a series of large `memcpy`s.) + // 2. Transpose the matrix. + // 3. Bit-reverse the order of the rows. + // This is equivalent to, for every index `0 <= i < n`: + // 1. bit-reversing `i[lb_n / 2..lb_n]`, + // 2. swapping `i[0..lb_n / 2]` and `i[lb_n / 2..lb_n]`, + // 3. bit-reversing `i[lb_n / 2..lb_n]`. + // + // If `lb_n` is odd, i.e., `n` is not a square number, then the above procedure requires + // slight modification. At steps 1 and 3 we bit-reverse bits `ceil(lb_n / 2)..lb_n`, of the + // index (shuffling `floor(lb_n / 2)` chunks of length `ceil(lb_n / 2)`). At step 2, we + // perform _two_ transposes. We treat `arr` as two matrices, one where the middle bit of the + // index is `0` and another, where the middle bit is `1`; we transpose each individually. + + let lb_num_chunks = lb_n >> 1; + let lb_chunk_size = lb_n - lb_num_chunks; + unsafe { + reverse_index_bits_in_place_chunks(arr, lb_num_chunks, lb_chunk_size); + transpose_in_place_square(arr, lb_chunk_size, lb_num_chunks, 0); + if lb_num_chunks != lb_chunk_size { + // `arr` cannot be interpreted as a square matrix. We instead interpret it as a + // `1 << lb_num_chunks` by `2` by `1 << lb_num_chunks` tensor, in row-major order. + // The above transpose acted on `tensor[..., 0, ...]` (all indices with middle bit + // `0`). We still need to transpose `tensor[..., 1, ...]`. To do so, we advance + // arr by `1 << lb_num_chunks` effectively, adding that to every index. + let arr_with_offset = &mut arr[1 << lb_num_chunks..]; + transpose_in_place_square(arr_with_offset, lb_chunk_size, lb_num_chunks, 0); } + reverse_index_bits_in_place_chunks(arr, lb_num_chunks, lb_chunk_size); } } } diff --git a/util/src/transpose_util.rs b/util/src/transpose_util.rs new file mode 100644 index 00000000..1c8280a8 --- /dev/null +++ b/util/src/transpose_util.rs @@ -0,0 +1,112 @@ +use std::ptr::swap; + +const LB_BLOCK_SIZE: usize = 3; + +/// Transpose square matrix in-place +/// The matrix is of size `1 << lb_size` by `1 << lb_size`. It occupies +/// `M[i, j] == arr[(i + x << lb_stride) + j + x]` for `0 <= i, j < 1 << lb_size`. The transposition +/// swaps `M[i, j]` and `M[j, i]`. +/// +/// SAFETY: +/// Make sure that `(i + x << lb_stride) + j + x` is a valid index in `arr` for all +/// `0 <= i, j < 1 << lb_size`. Ensure also that `lb_size <= lb_stride` to prevent overlap. +unsafe fn transpose_in_place_square_small( + arr: &mut [T], + lb_stride: usize, + lb_size: usize, + x: usize, +) { + for i in x..x + (1 << lb_size) { + for j in x..i { + swap( + arr.get_unchecked_mut(i + (j << lb_stride)), + arr.get_unchecked_mut((i << lb_stride) + j), + ); + } + } +} + +/// Transpose square matrices and swap +/// The matrices are of of size `1 << lb_size` by `1 << lb_size`. They occupy +/// `M0[i, j] == arr[(i + x << lb_stride) + j + y]`, `M1[i, j] == arr[i + x + (j + y << lb_stride)]` +/// for `0 <= i, j < 1 << lb_size. The transposition swaps `M0[i, j]` and `M1[j, i]`. +/// +/// SAFETY: +/// Make sure that `(i + x << lb_stride) + j + y` and `i + x + (j + y << lb_stride)` are valid +/// indices in `arr` for all `0 <= i, j < 1 << lb_size`. Ensure also that `lb_size <= lb_stride` to +/// prevent overlap. +unsafe fn transpose_swap_square_small( + arr: &mut [T], + lb_stride: usize, + lb_size: usize, + x: usize, + y: usize, +) { + for i in x..x + (1 << lb_size) { + for j in y..y + (1 << lb_size) { + swap( + arr.get_unchecked_mut(i + (j << lb_stride)), + arr.get_unchecked_mut((i << lb_stride) + j), + ); + } + } +} + +/// Transpose square matrices and swap +/// The matrices are of of size `1 << lb_size` by `1 << lb_size`. They occupy +/// `M0[i, j] == arr[(i + x << lb_stride) + j + y]`, `M1[i, j] == arr[i + x + (j + y << lb_stride)]` +/// for `0 <= i, j < 1 << lb_size. The transposition swaps `M0[i, j]` and `M1[j, i]`. +/// +/// SAFETY: +/// Make sure that `(i + x << lb_stride) + j + y` and `i + x + (j + y << lb_stride)` are valid +/// indices in `arr` for all `0 <= i, j < 1 << lb_size`. Ensure also that `lb_size <= lb_stride` to +/// prevent overlap. +unsafe fn transpose_swap_square( + arr: &mut [T], + lb_stride: usize, + lb_size: usize, + x: usize, + y: usize, +) { + if lb_size <= LB_BLOCK_SIZE { + transpose_swap_square_small(arr, lb_stride, lb_size, x, y); + } else { + let lb_block_size = lb_size - 1; + let block_size = 1 << lb_block_size; + transpose_swap_square(arr, lb_stride, lb_block_size, x, y); + transpose_swap_square(arr, lb_stride, lb_block_size, x + block_size, y); + transpose_swap_square(arr, lb_stride, lb_block_size, x, y + block_size); + transpose_swap_square( + arr, + lb_stride, + lb_block_size, + x + block_size, + y + block_size, + ); + } +} + +/// Transpose square matrix in-place +/// The matrix is of size `1 << lb_size` by `1 << lb_size`. It occupies +/// `M[i, j] == arr[(i + x << lb_stride) + j + x]` for `0 <= i, j < 1 << lb_size`. The transposition +/// swaps `M[i, j]` and `M[j, i]`. +/// +/// SAFETY: +/// Make sure that `(i + x << lb_stride) + j + x` is a valid index in `arr` for all +/// `0 <= i, j < 1 << lb_size`. Ensure also that `lb_size <= lb_stride` to prevent overlap. +pub(crate) unsafe fn transpose_in_place_square( + arr: &mut [T], + lb_stride: usize, + lb_size: usize, + x: usize, +) { + if lb_size <= LB_BLOCK_SIZE { + transpose_in_place_square_small(arr, lb_stride, lb_size, x); + } else { + let lb_block_size = lb_size - 1; + let block_size = 1 << lb_block_size; + transpose_in_place_square(arr, lb_stride, lb_block_size, x); + transpose_swap_square(arr, lb_stride, lb_block_size, x, x + block_size); + transpose_in_place_square(arr, lb_stride, lb_block_size, x + block_size); + } +} From 04fbb05d7c04ec0a9f8da19dde8fa03c4dd520df Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Fri, 21 Jan 2022 15:55:24 -0800 Subject: [PATCH 031/143] Swap loops in `compute_quotient_polys` (#444) --- plonky2/src/plonk/prover.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 0dd2aba2..2a11e4c0 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -403,17 +403,17 @@ fn compute_quotient_polys< // NB (JN): I'm not sure how (in)efficient the below is. It needs measuring. let mut local_constants_batch = vec![F::ZERO; xs_batch.len() * local_constants_batch_refs[0].len()]; - for (i, constants) in local_constants_batch_refs.iter().enumerate() { - for (j, &constant) in constants.iter().enumerate() { - local_constants_batch[i + j * xs_batch.len()] = constant; + for i in 0..local_constants_batch_refs[0].len() { + for (j, constants) in local_constants_batch_refs.iter().enumerate() { + local_constants_batch[i * xs_batch.len() + j] = constants[i]; } } let mut local_wires_batch = vec![F::ZERO; xs_batch.len() * local_wires_batch_refs[0].len()]; - for (i, wires) in local_wires_batch_refs.iter().enumerate() { - for (j, &wire) in wires.iter().enumerate() { - local_wires_batch[i + j * xs_batch.len()] = wire; + for i in 0..local_wires_batch_refs[0].len() { + for (j, wires) in local_wires_batch_refs.iter().enumerate() { + local_wires_batch[i * xs_batch.len() + j] = wires[i]; } } From a6e64d1c7eb2a0f75e03153188bb33a1d57b2d2b Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 23 Jan 2022 23:27:26 -0800 Subject: [PATCH 032/143] Replace `proof_to_proof_target` (#445) * Replace `proof_to_proof_target` With a `add_virtual_proof_with_pis` method that uses the inner circuit data, but does not require constructing a proof. Note that this doesn't support IVC yet. For that, I think we can add a variant of `add_virtual_proof_with_pis` that takes several parameters like FRI arities, but does not involve `CommonCircuitData` (since no circuit has been build yet). It might also be best to avoid large objects like `FriParams`, and pass just the data we need. Then there will be some nontrivial work to do recursion with "estimated" parameters (degree, arities, etc), check if the estimates were correct, and try again if not. * PR feedback --- plonky2/src/fri/proof.rs | 7 +- plonky2/src/fri/prover.rs | 5 +- plonky2/src/fri/recursive_verifier.rs | 79 ++++++- plonky2/src/gadgets/polynomial.rs | 1 + plonky2/src/hash/merkle_proofs.rs | 2 +- plonky2/src/plonk/circuit_builder.rs | 18 ++ plonky2/src/plonk/circuit_data.rs | 24 ++- plonky2/src/plonk/proof.rs | 2 + plonky2/src/plonk/recursive_verifier.rs | 267 +++++++++++------------- plonky2/src/plonk/verifier.rs | 4 + 10 files changed, 246 insertions(+), 163 deletions(-) diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index ff81c2c5..f96db781 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -26,7 +26,7 @@ pub struct FriQueryStep, H: Hasher, const D: usi pub merkle_proof: MerkleProof, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct FriQueryStepTarget { pub evals: Vec>, pub merkle_proof: MerkleProofTarget, @@ -51,7 +51,7 @@ impl> FriInitialTreeProof { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct FriInitialTreeProofTarget { pub evals_proofs: Vec<(Vec, MerkleProofTarget)>, } @@ -80,7 +80,7 @@ pub struct FriQueryRound, H: Hasher, const D: us pub steps: Vec>, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct FriQueryRoundTarget { pub initial_trees_proof: FriInitialTreeProofTarget, pub steps: Vec>, @@ -111,6 +111,7 @@ pub struct FriProof, H: Hasher, const D: usize> pub pow_witness: F, } +#[derive(Debug)] pub struct FriProofTarget { pub commit_phase_merkle_caps: Vec, pub query_round_proofs: Vec>, diff --git a/plonky2/src/fri/prover.rs b/plonky2/src/fri/prover.rs index d2731600..e814beae 100644 --- a/plonky2/src/fri/prover.rs +++ b/plonky2/src/fri/prover.rs @@ -72,9 +72,8 @@ fn fri_committed_trees, C: GenericConfig, let mut trees = Vec::new(); let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR; - let num_reductions = fri_params.reduction_arity_bits.len(); - for i in 0..num_reductions { - let arity = 1 << fri_params.reduction_arity_bits[i]; + for arity_bits in &fri_params.reduction_arity_bits { + let arity = 1 << arity_bits; reverse_index_bits_in_place(&mut values.values); let chunked_values = values diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 63d07035..a9a224d1 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -2,7 +2,9 @@ use itertools::Itertools; use plonky2_field::extension_field::Extendable; use plonky2_util::{log2_strict, reverse_index_bits_in_place}; -use crate::fri::proof::{FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget}; +use crate::fri::proof::{ + FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, FriQueryStepTarget, +}; use crate::fri::structure::{FriBatchInfoTarget, FriInstanceInfoTarget, FriOpeningsTarget}; use crate::fri::{FriConfig, FriParams}; use crate::gadgets::interpolation::InterpolationGate; @@ -420,6 +422,81 @@ impl, const D: usize> CircuitBuilder { assert!(p_ambiguous < query_error * 1e-5, "A non-negligible portion of field elements are in the range that permits non-canonical encodings. Need to do more analysis or enforce canonical encodings."); } + + pub(crate) fn add_virtual_fri_proof( + &mut self, + num_leaves_per_oracle: &[usize], + params: &FriParams, + ) -> FriProofTarget { + let cap_height = params.config.cap_height; + let num_queries = params.config.num_query_rounds; + let commit_phase_merkle_caps = (0..params.reduction_arity_bits.len()) + .map(|_| self.add_virtual_cap(cap_height)) + .collect(); + let query_round_proofs = (0..num_queries) + .map(|_| self.add_virtual_fri_query(num_leaves_per_oracle, params)) + .collect(); + let final_poly = self.add_virtual_poly_coeff_ext(params.final_poly_len()); + let pow_witness = self.add_virtual_target(); + FriProofTarget { + commit_phase_merkle_caps, + query_round_proofs, + final_poly, + pow_witness, + } + } + + fn add_virtual_fri_query( + &mut self, + num_leaves_per_oracle: &[usize], + params: &FriParams, + ) -> FriQueryRoundTarget { + let cap_height = params.config.cap_height; + assert!(params.lde_bits() >= cap_height); + let mut merkle_proof_len = params.lde_bits() - cap_height; + + let initial_trees_proof = + self.add_virtual_fri_initial_trees_proof(num_leaves_per_oracle, merkle_proof_len); + + let mut steps = vec![]; + for &arity_bits in ¶ms.reduction_arity_bits { + assert!(merkle_proof_len >= arity_bits); + merkle_proof_len -= arity_bits; + steps.push(self.add_virtual_fri_query_step(arity_bits, merkle_proof_len)); + } + + FriQueryRoundTarget { + initial_trees_proof, + steps, + } + } + + fn add_virtual_fri_initial_trees_proof( + &mut self, + num_leaves_per_oracle: &[usize], + initial_merkle_proof_len: usize, + ) -> FriInitialTreeProofTarget { + let evals_proofs = num_leaves_per_oracle + .iter() + .map(|&num_oracle_leaves| { + let leaves = self.add_virtual_targets(num_oracle_leaves); + let merkle_proof = self.add_virtual_merkle_proof(initial_merkle_proof_len); + (leaves, merkle_proof) + }) + .collect(); + FriInitialTreeProofTarget { evals_proofs } + } + + fn add_virtual_fri_query_step( + &mut self, + arity_bits: usize, + merkle_proof_len: usize, + ) -> FriQueryStepTarget { + FriQueryStepTarget { + evals: self.add_virtual_extension_targets(1 << arity_bits), + merkle_proof: self.add_virtual_merkle_proof(merkle_proof_len), + } + } } /// For each opening point, holds the reduced (by `alpha`) evaluations of each polynomial that's diff --git a/plonky2/src/gadgets/polynomial.rs b/plonky2/src/gadgets/polynomial.rs index 195eabd3..6e4a9bb4 100644 --- a/plonky2/src/gadgets/polynomial.rs +++ b/plonky2/src/gadgets/polynomial.rs @@ -6,6 +6,7 @@ use crate::iop::target::Target; use crate::plonk::circuit_builder::CircuitBuilder; use crate::util::reducing::ReducingFactorTarget; +#[derive(Debug)] pub struct PolynomialCoeffsExtTarget(pub Vec>); impl PolynomialCoeffsExtTarget { diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index 60fe236a..c2f3655d 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -17,7 +17,7 @@ pub struct MerkleProof> { pub siblings: Vec, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct MerkleProofTarget { /// The Merkle digest of each sibling subtree, staying from the bottommost layer. pub siblings: Vec, diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 33b44054..e4abe611 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -15,6 +15,7 @@ use crate::fri::{FriConfig, FriParams}; use crate::gadgets::arithmetic::BaseArithmeticOperation; use crate::gadgets::arithmetic_extension::ExtensionArithmeticOperation; use crate::gadgets::arithmetic_u32::U32Target; +use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; use crate::gates::arithmetic_base::ArithmeticGate; use crate::gates::arithmetic_extension::ArithmeticExtensionGate; use crate::gates::arithmetic_u32::U32ArithmeticGate; @@ -28,6 +29,7 @@ use crate::gates::random_access::RandomAccessGate; use crate::gates::subtraction_u32::U32SubtractionGate; use crate::gates::switch::SwitchGate; use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField}; +use crate::hash::merkle_proofs::MerkleProofTarget; use crate::iop::ext_target::ExtensionTarget; use crate::iop::generator::{ CopyGenerator, RandomValueGenerator, SimpleGenerator, WitnessGenerator, @@ -172,6 +174,12 @@ impl, const D: usize> CircuitBuilder { (0..n).map(|_i| self.add_virtual_hash()).collect() } + pub(crate) fn add_virtual_merkle_proof(&mut self, len: usize) -> MerkleProofTarget { + MerkleProofTarget { + siblings: self.add_virtual_hashes(len), + } + } + pub fn add_virtual_extension_target(&mut self) -> ExtensionTarget { ExtensionTarget(self.add_virtual_targets(D).try_into().unwrap()) } @@ -182,6 +190,14 @@ impl, const D: usize> CircuitBuilder { .collect() } + pub(crate) fn add_virtual_poly_coeff_ext( + &mut self, + num_coeffs: usize, + ) -> PolynomialCoeffsExtTarget { + let coeffs = self.add_virtual_extension_targets(num_coeffs); + PolynomialCoeffsExtTarget(coeffs) + } + // TODO: Unsafe pub fn add_virtual_bool_target(&mut self) -> BoolTarget { BoolTarget::new_unsafe(self.add_virtual_target()) @@ -596,6 +612,7 @@ impl, const D: usize> CircuitBuilder { // Hash the public inputs, and route them to a `PublicInputGate` which will enforce that // those hash wires match the claimed public inputs. + let num_public_inputs = self.public_inputs.len(); let public_inputs_hash = self.hash_n_to_hash::(self.public_inputs.clone(), true); let pi_gate = self.add_gate(PublicInputGate, vec![]); @@ -736,6 +753,7 @@ impl, const D: usize> CircuitBuilder { num_gate_constraints, num_constants, num_virtual_targets: self.virtual_target_index, + num_public_inputs, k_is, num_partial_products, circuit_digest, diff --git a/plonky2/src/plonk/circuit_data.rs b/plonky2/src/plonk/circuit_data.rs index 35c37991..dd7ebc25 100644 --- a/plonky2/src/plonk/circuit_data.rs +++ b/plonky2/src/plonk/circuit_data.rs @@ -234,6 +234,8 @@ pub struct CommonCircuitData< pub(crate) num_virtual_targets: usize, + pub(crate) num_public_inputs: usize, + /// The `{k_i}` valued used in `S_ID_i` in Plonk's permutation argument. pub(crate) k_is: Vec, @@ -341,34 +343,42 @@ impl, C: GenericConfig, const D: usize> } fn fri_preprocessed_polys(&self) -> Vec { - let num_preprocessed_polys = self.sigmas_range().end; FriPolynomialInfo::from_range( PlonkOracle::CONSTANTS_SIGMAS.index, - 0..num_preprocessed_polys, + 0..self.num_preprocessed_polys(), ) } + pub(crate) fn num_preprocessed_polys(&self) -> usize { + self.sigmas_range().end + } + fn fri_wire_polys(&self) -> Vec { let num_wire_polys = self.config.num_wires; FriPolynomialInfo::from_range(PlonkOracle::WIRES.index, 0..num_wire_polys) } fn fri_zs_partial_products_polys(&self) -> Vec { - let num_zs_partial_products_polys = - self.config.num_challenges * (1 + self.num_partial_products); FriPolynomialInfo::from_range( PlonkOracle::ZS_PARTIAL_PRODUCTS.index, - 0..num_zs_partial_products_polys, + 0..self.num_zs_partial_products_polys(), ) } + pub(crate) fn num_zs_partial_products_polys(&self) -> usize { + self.config.num_challenges * (1 + self.num_partial_products) + } + fn fri_zs_polys(&self) -> Vec { FriPolynomialInfo::from_range(PlonkOracle::ZS_PARTIAL_PRODUCTS.index, self.zs_range()) } fn fri_quotient_polys(&self) -> Vec { - let num_quotient_polys = self.config.num_challenges * self.quotient_degree_factor; - FriPolynomialInfo::from_range(PlonkOracle::QUOTIENT.index, 0..num_quotient_polys) + FriPolynomialInfo::from_range(PlonkOracle::QUOTIENT.index, 0..self.num_quotient_polys()) + } + + pub(crate) fn num_quotient_polys(&self) -> usize { + self.config.num_challenges * self.quotient_degree_factor } fn fri_all_polys(&self) -> Vec { diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 07ca7c9e..7fbdc671 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -32,6 +32,7 @@ pub struct Proof, C: GenericConfig, const pub opening_proof: FriProof, } +#[derive(Debug)] pub struct ProofTarget { pub wires_cap: MerkleCapTarget, pub plonk_zs_partial_products_cap: MerkleCapTarget, @@ -255,6 +256,7 @@ pub(crate) struct FriInferredElements, const D: usi pub Vec, ); +#[derive(Debug)] pub struct ProofWithPublicInputsTarget { pub proof: ProofTarget, pub public_inputs: Vec, diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index 38024eea..6b3b6b0c 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -5,7 +5,7 @@ use crate::iop::challenger::RecursiveChallenger; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierCircuitTarget}; use crate::plonk::config::{AlgebraicHasher, GenericConfig}; -use crate::plonk::proof::ProofWithPublicInputsTarget; +use crate::plonk::proof::{OpeningSetTarget, ProofTarget, ProofWithPublicInputsTarget}; use crate::plonk::vanishing_poly::eval_vanishing_poly_recursively; use crate::plonk::vars::EvaluationTargets; use crate::util::reducing::ReducingFactorTarget; @@ -13,7 +13,7 @@ use crate::with_context; impl, const D: usize> CircuitBuilder { /// Recursively verifies an inner proof. - pub fn add_recursive_verifier>( + pub fn verify_proof_with_pis>( &mut self, proof_with_pis: ProofWithPublicInputsTarget, inner_config: &CircuitConfig, @@ -26,12 +26,34 @@ impl, const D: usize> CircuitBuilder { proof, public_inputs, } = proof_with_pis; + + assert_eq!(public_inputs.len(), inner_common_data.num_public_inputs); + let public_inputs_hash = self.hash_n_to_hash::(public_inputs, true); + + self.verify_proof( + proof, + public_inputs_hash, + inner_config, + inner_verifier_data, + inner_common_data, + ); + } + + /// Recursively verifies an inner proof. + pub fn verify_proof>( + &mut self, + proof: ProofTarget, + public_inputs_hash: HashOutTarget, + inner_config: &CircuitConfig, + inner_verifier_data: &VerifierCircuitTarget, + inner_common_data: &CommonCircuitData, + ) where + C::Hasher: AlgebraicHasher, + { let one = self.one_extension(); let num_challenges = inner_config.num_challenges; - let public_inputs_hash = &self.hash_n_to_hash::(public_inputs, true); - let mut challenger = RecursiveChallenger::::new(self); let (betas, gammas, alphas, zeta) = @@ -41,7 +63,7 @@ impl, const D: usize> CircuitBuilder { self.constants(&inner_common_data.circuit_digest.elements), ); challenger.observe_hash(&digest); - challenger.observe_hash(public_inputs_hash); + challenger.observe_hash(&public_inputs_hash); challenger.observe_cap(&proof.wires_cap); let betas = challenger.get_n_challenges(self, num_challenges); @@ -61,7 +83,7 @@ impl, const D: usize> CircuitBuilder { let vars = EvaluationTargets { local_constants, local_wires, - public_inputs_hash, + public_inputs_hash: &public_inputs_hash, }; let local_zs = &proof.openings.plonk_zs; let next_zs = &proof.openings.plonk_zs_right; @@ -123,137 +145,81 @@ impl, const D: usize> CircuitBuilder { ) ); } -} -#[cfg(test)] -mod tests { - use anyhow::Result; - use log::{info, Level}; - use plonky2_util::log2_strict; - - use super::*; - use crate::fri::proof::{ - FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, FriQueryStepTarget, - }; - use crate::fri::reduction_strategies::FriReductionStrategy; - use crate::fri::FriConfig; - use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; - use crate::gates::noop::NoopGate; - use crate::hash::merkle_proofs::MerkleProofTarget; - use crate::iop::witness::{PartialWitness, Witness}; - use crate::plonk::circuit_data::VerifierOnlyCircuitData; - use crate::plonk::config::{ - GMiMCGoldilocksConfig, GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig, - }; - use crate::plonk::proof::{ - CompressedProofWithPublicInputs, OpeningSetTarget, Proof, ProofTarget, - ProofWithPublicInputs, - }; - use crate::plonk::prover::prove; - use crate::util::timing::TimingTree; - - // Construct a `FriQueryRoundTarget` with the same dimensions as the ones in `proof`. - fn get_fri_query_round< - F: RichField + Extendable, - C: GenericConfig, - const D: usize, - >( - proof: &Proof, - builder: &mut CircuitBuilder, - ) -> FriQueryRoundTarget { - let mut query_round = FriQueryRoundTarget { - initial_trees_proof: FriInitialTreeProofTarget { - evals_proofs: vec![], - }, - steps: vec![], - }; - for (v, merkle_proof) in &proof.opening_proof.query_round_proofs[0] - .initial_trees_proof - .evals_proofs - { - query_round.initial_trees_proof.evals_proofs.push(( - builder.add_virtual_targets(v.len()), - MerkleProofTarget { - siblings: builder.add_virtual_hashes(merkle_proof.siblings.len()), - }, - )); - } - for step in &proof.opening_proof.query_round_proofs[0].steps { - query_round.steps.push(FriQueryStepTarget { - evals: builder.add_virtual_extension_targets(step.evals.len()), - merkle_proof: MerkleProofTarget { - siblings: builder.add_virtual_hashes(step.merkle_proof.siblings.len()), - }, - }); - } - query_round - } - - // Construct a `ProofTarget` with the same dimensions as `proof`. - fn proof_to_proof_target< - F: RichField + Extendable, - C: GenericConfig, - const D: usize, - >( - proof_with_pis: &ProofWithPublicInputs, - builder: &mut CircuitBuilder, + pub fn add_virtual_proof_with_pis>( + &mut self, + common_data: &CommonCircuitData, ) -> ProofWithPublicInputsTarget { - let ProofWithPublicInputs { - proof, - public_inputs, - } = proof_with_pis; - - let wires_cap = builder.add_virtual_cap(log2_strict(proof.wires_cap.0.len())); - let plonk_zs_cap = - builder.add_virtual_cap(log2_strict(proof.plonk_zs_partial_products_cap.0.len())); - let quotient_polys_cap = - builder.add_virtual_cap(log2_strict(proof.quotient_polys_cap.0.len())); - - let openings = OpeningSetTarget { - constants: builder.add_virtual_extension_targets(proof.openings.constants.len()), - plonk_sigmas: builder.add_virtual_extension_targets(proof.openings.plonk_sigmas.len()), - wires: builder.add_virtual_extension_targets(proof.openings.wires.len()), - plonk_zs: builder.add_virtual_extension_targets(proof.openings.plonk_zs.len()), - plonk_zs_right: builder - .add_virtual_extension_targets(proof.openings.plonk_zs_right.len()), - partial_products: builder - .add_virtual_extension_targets(proof.openings.partial_products.len()), - quotient_polys: builder - .add_virtual_extension_targets(proof.openings.quotient_polys.len()), - }; - let query_round_proofs = (0..proof.opening_proof.query_round_proofs.len()) - .map(|_| get_fri_query_round(proof, builder)) - .collect(); - let commit_phase_merkle_caps = proof - .opening_proof - .commit_phase_merkle_caps - .iter() - .map(|r| builder.add_virtual_cap(log2_strict(r.0.len()))) - .collect(); - let opening_proof = FriProofTarget { - commit_phase_merkle_caps, - query_round_proofs, - final_poly: PolynomialCoeffsExtTarget( - builder.add_virtual_extension_targets(proof.opening_proof.final_poly.len()), - ), - pow_witness: builder.add_virtual_target(), - }; - - let proof = ProofTarget { - wires_cap, - plonk_zs_partial_products_cap: plonk_zs_cap, - quotient_polys_cap, - openings, - opening_proof, - }; - - let public_inputs = builder.add_virtual_targets(public_inputs.len()); + let proof = self.add_virtual_proof(common_data); + let public_inputs = self.add_virtual_targets(common_data.num_public_inputs); ProofWithPublicInputsTarget { proof, public_inputs, } } + fn add_virtual_proof>( + &mut self, + common_data: &CommonCircuitData, + ) -> ProofTarget { + let config = &common_data.config; + let fri_params = &common_data.fri_params; + let cap_height = fri_params.config.cap_height; + + let num_leaves_per_oracle = &[ + common_data.num_preprocessed_polys(), + config.num_wires, + common_data.num_zs_partial_products_polys(), + common_data.num_quotient_polys(), + ]; + + ProofTarget { + wires_cap: self.add_virtual_cap(cap_height), + plonk_zs_partial_products_cap: self.add_virtual_cap(cap_height), + quotient_polys_cap: self.add_virtual_cap(cap_height), + openings: self.add_opening_set(common_data), + opening_proof: self.add_virtual_fri_proof(num_leaves_per_oracle, fri_params), + } + } + + fn add_opening_set>( + &mut self, + common_data: &CommonCircuitData, + ) -> OpeningSetTarget { + let config = &common_data.config; + let num_challenges = config.num_challenges; + let total_partial_products = num_challenges * common_data.num_partial_products; + OpeningSetTarget { + constants: self.add_virtual_extension_targets(common_data.num_constants), + plonk_sigmas: self.add_virtual_extension_targets(config.num_routed_wires), + wires: self.add_virtual_extension_targets(config.num_wires), + plonk_zs: self.add_virtual_extension_targets(num_challenges), + plonk_zs_right: self.add_virtual_extension_targets(num_challenges), + partial_products: self.add_virtual_extension_targets(total_partial_products), + quotient_polys: self.add_virtual_extension_targets(common_data.num_quotient_polys()), + } + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use itertools::Itertools; + use log::{info, Level}; + + use super::*; + use crate::fri::reduction_strategies::FriReductionStrategy; + use crate::fri::FriConfig; + use crate::gates::noop::NoopGate; + use crate::iop::witness::{PartialWitness, Witness}; + use crate::plonk::circuit_data::VerifierOnlyCircuitData; + use crate::plonk::config::{ + GMiMCGoldilocksConfig, GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig, + }; + use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs}; + use crate::plonk::prover::prove; + use crate::util::timing::TimingTree; + // Set the targets in a `ProofTarget` to their corresponding values in a `Proof`. fn set_proof_target, C: GenericConfig, const D: usize>( proof: &ProofWithPublicInputs, @@ -272,7 +238,7 @@ mod tests { } = pt; // Set public inputs. - for (&pi_t, &pi) in pi_targets.iter().zip(public_inputs) { + for (&pi_t, &pi) in pi_targets.iter().zip_eq(public_inputs) { pw.set_target(pi_t, pi); } @@ -283,28 +249,33 @@ mod tests { ); pw.set_cap_target(&pt.quotient_polys_cap, &proof.quotient_polys_cap); - for (&t, &x) in pt.openings.wires.iter().zip(&proof.openings.wires) { + for (&t, &x) in pt.openings.wires.iter().zip_eq(&proof.openings.wires) { pw.set_extension_target(t, x); } - for (&t, &x) in pt.openings.constants.iter().zip(&proof.openings.constants) { + for (&t, &x) in pt + .openings + .constants + .iter() + .zip_eq(&proof.openings.constants) + { pw.set_extension_target(t, x); } for (&t, &x) in pt .openings .plonk_sigmas .iter() - .zip(&proof.openings.plonk_sigmas) + .zip_eq(&proof.openings.plonk_sigmas) { pw.set_extension_target(t, x); } - for (&t, &x) in pt.openings.plonk_zs.iter().zip(&proof.openings.plonk_zs) { + for (&t, &x) in pt.openings.plonk_zs.iter().zip_eq(&proof.openings.plonk_zs) { pw.set_extension_target(t, x); } for (&t, &x) in pt .openings .plonk_zs_right .iter() - .zip(&proof.openings.plonk_zs_right) + .zip_eq(&proof.openings.plonk_zs_right) { pw.set_extension_target(t, x); } @@ -312,7 +283,7 @@ mod tests { .openings .partial_products .iter() - .zip(&proof.openings.partial_products) + .zip_eq(&proof.openings.partial_products) { pw.set_extension_target(t, x); } @@ -320,7 +291,7 @@ mod tests { .openings .quotient_polys .iter() - .zip(&proof.openings.quotient_polys) + .zip_eq(&proof.openings.quotient_polys) { pw.set_extension_target(t, x); } @@ -330,14 +301,14 @@ mod tests { pw.set_target(fpt.pow_witness, fri_proof.pow_witness); - for (&t, &x) in fpt.final_poly.0.iter().zip(&fri_proof.final_poly.coeffs) { + for (&t, &x) in fpt.final_poly.0.iter().zip_eq(&fri_proof.final_poly.coeffs) { pw.set_extension_target(t, x); } for (t, x) in fpt .commit_phase_merkle_caps .iter() - .zip(&fri_proof.commit_phase_merkle_caps) + .zip_eq(&fri_proof.commit_phase_merkle_caps) { pw.set_cap_target(t, x); } @@ -345,31 +316,31 @@ mod tests { for (qt, q) in fpt .query_round_proofs .iter() - .zip(&fri_proof.query_round_proofs) + .zip_eq(&fri_proof.query_round_proofs) { for (at, a) in qt .initial_trees_proof .evals_proofs .iter() - .zip(&q.initial_trees_proof.evals_proofs) + .zip_eq(&q.initial_trees_proof.evals_proofs) { - for (&t, &x) in at.0.iter().zip(&a.0) { + for (&t, &x) in at.0.iter().zip_eq(&a.0) { pw.set_target(t, x); } - for (&t, &x) in at.1.siblings.iter().zip(&a.1.siblings) { + for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) { pw.set_hash_target(t, x); } } - for (st, s) in qt.steps.iter().zip(&q.steps) { - for (&t, &x) in st.evals.iter().zip(&s.evals) { + for (st, s) in qt.steps.iter().zip_eq(&q.steps) { + for (&t, &x) in st.evals.iter().zip_eq(&s.evals) { pw.set_extension_target(t, x); } for (&t, &x) in st .merkle_proof .siblings .iter() - .zip(&s.merkle_proof.siblings) + .zip_eq(&s.merkle_proof.siblings) { pw.set_hash_target(t, x); } @@ -582,7 +553,7 @@ mod tests { { let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); - let pt = proof_to_proof_target(&inner_proof, &mut builder); + let pt = builder.add_virtual_proof_with_pis(&inner_cd); set_proof_target(&inner_proof, &pt, &mut pw); let inner_data = VerifierCircuitTarget { @@ -593,7 +564,7 @@ mod tests { &inner_vd.constants_sigmas_cap, ); - builder.add_recursive_verifier(pt, inner_config, &inner_data, &inner_cd); + builder.verify_proof_with_pis(pt, inner_config, &inner_data, &inner_cd); if print_gate_counts { builder.print_gate_counts(0); diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index e612a1c9..cbaec6d9 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -30,6 +30,10 @@ pub(crate) fn verify_with_challenges< verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, ) -> Result<()> { + assert_eq!( + proof_with_pis.public_inputs.len(), + common_data.num_public_inputs + ); let public_inputs_hash = &proof_with_pis.get_public_inputs_hash(); let ProofWithPublicInputs { proof, .. } = proof_with_pis; From 2af85ccb8d16982ad45c425ff368ca7aacf2a06c Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Mon, 24 Jan 2022 11:16:38 -0800 Subject: [PATCH 033/143] Make `set_proof_target` publicly accessible (#447) * Make `set_proof_target` publicly accessible Same code as before, except I broke it into a few functions and renamed a couple things. * fmt --- plonky2/src/iop/witness.rs | 170 +++++++++++++++++++++++- plonky2/src/plonk/recursive_verifier.rs | 131 +----------------- 2 files changed, 170 insertions(+), 131 deletions(-) diff --git a/plonky2/src/iop/witness.rs b/plonky2/src/iop/witness.rs index 29ad513e..efe4d911 100644 --- a/plonky2/src/iop/witness.rs +++ b/plonky2/src/iop/witness.rs @@ -1,9 +1,11 @@ use std::collections::HashMap; +use itertools::Itertools; use num::{BigUint, FromPrimitive, Zero}; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::field_types::Field; +use crate::fri::proof::{FriProof, FriProofTarget}; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::nonnative::NonNativeTarget; @@ -14,7 +16,8 @@ use crate::hash::merkle_tree::MerkleCap; use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::{BoolTarget, Target}; use crate::iop::wire::Wire; -use crate::plonk::config::AlgebraicHasher; +use crate::plonk::config::{AlgebraicHasher, GenericConfig}; +use crate::plonk::proof::{Proof, ProofTarget, ProofWithPublicInputs, ProofWithPublicInputsTarget}; /// A witness holds information on the values of targets in a circuit. pub trait Witness { @@ -155,6 +158,171 @@ pub trait Witness { } } + /// Set the targets in a `ProofWithPublicInputsTarget` to their corresponding values in a + /// `ProofWithPublicInputs`. + fn set_proof_with_pis_target, const D: usize>( + &mut self, + proof_with_pis: &ProofWithPublicInputs, + proof_with_pis_target: &ProofWithPublicInputsTarget, + ) where + F: RichField + Extendable, + C::Hasher: AlgebraicHasher, + { + let ProofWithPublicInputs { + proof, + public_inputs, + } = proof_with_pis; + let ProofWithPublicInputsTarget { + proof: pt, + public_inputs: pi_targets, + } = proof_with_pis_target; + + // Set public inputs. + for (&pi_t, &pi) in pi_targets.iter().zip_eq(public_inputs) { + self.set_target(pi_t, pi); + } + + self.set_proof_target(proof, pt); + } + + /// Set the targets in a `ProofTarget` to their corresponding values in a `Proof`. + fn set_proof_target, const D: usize>( + &mut self, + proof: &Proof, + proof_target: &ProofTarget, + ) where + F: RichField + Extendable, + C::Hasher: AlgebraicHasher, + { + self.set_cap_target(&proof_target.wires_cap, &proof.wires_cap); + self.set_cap_target( + &proof_target.plonk_zs_partial_products_cap, + &proof.plonk_zs_partial_products_cap, + ); + self.set_cap_target(&proof_target.quotient_polys_cap, &proof.quotient_polys_cap); + + for (&t, &x) in proof_target + .openings + .wires + .iter() + .zip_eq(&proof.openings.wires) + { + self.set_extension_target(t, x); + } + for (&t, &x) in proof_target + .openings + .constants + .iter() + .zip_eq(&proof.openings.constants) + { + self.set_extension_target(t, x); + } + for (&t, &x) in proof_target + .openings + .plonk_sigmas + .iter() + .zip_eq(&proof.openings.plonk_sigmas) + { + self.set_extension_target(t, x); + } + for (&t, &x) in proof_target + .openings + .plonk_zs + .iter() + .zip_eq(&proof.openings.plonk_zs) + { + self.set_extension_target(t, x); + } + for (&t, &x) in proof_target + .openings + .plonk_zs_right + .iter() + .zip_eq(&proof.openings.plonk_zs_right) + { + self.set_extension_target(t, x); + } + for (&t, &x) in proof_target + .openings + .partial_products + .iter() + .zip_eq(&proof.openings.partial_products) + { + self.set_extension_target(t, x); + } + for (&t, &x) in proof_target + .openings + .quotient_polys + .iter() + .zip_eq(&proof.openings.quotient_polys) + { + self.set_extension_target(t, x); + } + + self.set_fri_proof_target(&proof.opening_proof, &proof_target.opening_proof); + } + + /// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`. + fn set_fri_proof_target, const D: usize>( + &mut self, + fri_proof: &FriProof, + fri_proof_target: &FriProofTarget, + ) where + F: RichField + Extendable, + { + self.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness); + + for (&t, &x) in fri_proof_target + .final_poly + .0 + .iter() + .zip_eq(&fri_proof.final_poly.coeffs) + { + self.set_extension_target(t, x); + } + + for (t, x) in fri_proof_target + .commit_phase_merkle_caps + .iter() + .zip_eq(&fri_proof.commit_phase_merkle_caps) + { + self.set_cap_target(t, x); + } + + for (qt, q) in fri_proof_target + .query_round_proofs + .iter() + .zip_eq(&fri_proof.query_round_proofs) + { + for (at, a) in qt + .initial_trees_proof + .evals_proofs + .iter() + .zip_eq(&q.initial_trees_proof.evals_proofs) + { + for (&t, &x) in at.0.iter().zip_eq(&a.0) { + self.set_target(t, x); + } + for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) { + self.set_hash_target(t, x); + } + } + + for (st, s) in qt.steps.iter().zip_eq(&q.steps) { + for (&t, &x) in st.evals.iter().zip_eq(&s.evals) { + self.set_extension_target(t, x); + } + for (&t, &x) in st + .merkle_proof + .siblings + .iter() + .zip_eq(&s.merkle_proof.siblings) + { + self.set_hash_target(t, x); + } + } + } + } + fn set_wire(&mut self, wire: Wire, value: F) { self.set_target(Target::Wire(wire), value) } diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index 6b3b6b0c..fbe1ad53 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -204,7 +204,6 @@ impl, const D: usize> CircuitBuilder { #[cfg(test)] mod tests { use anyhow::Result; - use itertools::Itertools; use log::{info, Level}; use super::*; @@ -220,134 +219,6 @@ mod tests { use crate::plonk::prover::prove; use crate::util::timing::TimingTree; - // Set the targets in a `ProofTarget` to their corresponding values in a `Proof`. - fn set_proof_target, C: GenericConfig, const D: usize>( - proof: &ProofWithPublicInputs, - pt: &ProofWithPublicInputsTarget, - pw: &mut PartialWitness, - ) where - C::Hasher: AlgebraicHasher, - { - let ProofWithPublicInputs { - proof, - public_inputs, - } = proof; - let ProofWithPublicInputsTarget { - proof: pt, - public_inputs: pi_targets, - } = pt; - - // Set public inputs. - for (&pi_t, &pi) in pi_targets.iter().zip_eq(public_inputs) { - pw.set_target(pi_t, pi); - } - - pw.set_cap_target(&pt.wires_cap, &proof.wires_cap); - pw.set_cap_target( - &pt.plonk_zs_partial_products_cap, - &proof.plonk_zs_partial_products_cap, - ); - pw.set_cap_target(&pt.quotient_polys_cap, &proof.quotient_polys_cap); - - for (&t, &x) in pt.openings.wires.iter().zip_eq(&proof.openings.wires) { - pw.set_extension_target(t, x); - } - for (&t, &x) in pt - .openings - .constants - .iter() - .zip_eq(&proof.openings.constants) - { - pw.set_extension_target(t, x); - } - for (&t, &x) in pt - .openings - .plonk_sigmas - .iter() - .zip_eq(&proof.openings.plonk_sigmas) - { - pw.set_extension_target(t, x); - } - for (&t, &x) in pt.openings.plonk_zs.iter().zip_eq(&proof.openings.plonk_zs) { - pw.set_extension_target(t, x); - } - for (&t, &x) in pt - .openings - .plonk_zs_right - .iter() - .zip_eq(&proof.openings.plonk_zs_right) - { - pw.set_extension_target(t, x); - } - for (&t, &x) in pt - .openings - .partial_products - .iter() - .zip_eq(&proof.openings.partial_products) - { - pw.set_extension_target(t, x); - } - for (&t, &x) in pt - .openings - .quotient_polys - .iter() - .zip_eq(&proof.openings.quotient_polys) - { - pw.set_extension_target(t, x); - } - - let fri_proof = &proof.opening_proof; - let fpt = &pt.opening_proof; - - pw.set_target(fpt.pow_witness, fri_proof.pow_witness); - - for (&t, &x) in fpt.final_poly.0.iter().zip_eq(&fri_proof.final_poly.coeffs) { - pw.set_extension_target(t, x); - } - - for (t, x) in fpt - .commit_phase_merkle_caps - .iter() - .zip_eq(&fri_proof.commit_phase_merkle_caps) - { - pw.set_cap_target(t, x); - } - - for (qt, q) in fpt - .query_round_proofs - .iter() - .zip_eq(&fri_proof.query_round_proofs) - { - for (at, a) in qt - .initial_trees_proof - .evals_proofs - .iter() - .zip_eq(&q.initial_trees_proof.evals_proofs) - { - for (&t, &x) in at.0.iter().zip_eq(&a.0) { - pw.set_target(t, x); - } - for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) { - pw.set_hash_target(t, x); - } - } - - for (st, s) in qt.steps.iter().zip_eq(&q.steps) { - for (&t, &x) in st.evals.iter().zip_eq(&s.evals) { - pw.set_extension_target(t, x); - } - for (&t, &x) in st - .merkle_proof - .siblings - .iter() - .zip_eq(&s.merkle_proof.siblings) - { - pw.set_hash_target(t, x); - } - } - } - } - #[test] #[ignore] fn test_recursive_verifier() -> Result<()> { @@ -554,7 +425,7 @@ mod tests { let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); let pt = builder.add_virtual_proof_with_pis(&inner_cd); - set_proof_target(&inner_proof, &pt, &mut pw); + pw.set_proof_with_pis_target(&inner_proof, &pt); let inner_data = VerifierCircuitTarget { constants_sigmas_cap: builder.add_virtual_cap(inner_config.fri_config.cap_height), From 483799746b5d7555b8b018b3f0680e0b894344b4 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Mon, 24 Jan 2022 13:35:26 -0800 Subject: [PATCH 034/143] Jemalloc warnings in Readme (#448) --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index d4bb04fd..1db24c69 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,13 @@ RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive ``` +## Jemalloc + +By default, Plonky2 uses the [Jemalloc](http://jemalloc.net) memory allocator due to its superior performance. Currently, it changes the default allocator of any binary to which it is linked. You can disable this behavior by removing the corresponding lines in [`plonky2/src/lib.rs`](https://github.com/mir-protocol/plonky2/blob/main/plonky2/src/lib.rs). + +Jemalloc is known to cause crashes when a binary compiled for x86 is run on an Apple silicon-based Mac under [Rosetta 2](https://support.apple.com/en-us/HT211861). If you are experiencing crashes on your Apple silicon Mac, run `rustc --print target-libdir`. The output should contain `aarch64-apple-darwin`. If the output contains `x86_64-apple-darwin`, then you are running the Rust toolchain for x86; we recommend switching to the native ARM version. + + ## Copyright Plonky2 was developed by Polygon Zero (formerly Mir). While we plan to adopt an open source license, we haven't selected one yet, so all rights are reserved for the time being. Please reach out to us if you have thoughts on licensing. From c0ac79e2e1a1a71e124085a216333a8e471e6661 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Wed, 26 Jan 2022 00:09:29 -0800 Subject: [PATCH 035/143] Beginning of STARK implementation (#413) * Beginning of STARK implementation * PR feedback * minor * Suppress warnings for now --- Cargo.toml | 2 +- plonky2/src/fri/oracle.rs | 4 +- plonky2/src/fri/reduction_strategies.rs | 2 +- plonky2/src/hash/poseidon.rs | 6 +- plonky2/src/iop/mod.rs | 2 +- plonky2/src/plonk/circuit_builder.rs | 2 +- plonky2/src/util/mod.rs | 4 +- starky/Cargo.toml | 14 +++ starky/src/config.rs | 45 ++++++++++ starky/src/constraint_consumer.rs | 110 ++++++++++++++++++++++++ starky/src/lib.rs | 14 +++ starky/src/proof.rs | 35 ++++++++ starky/src/prover.rs | 83 ++++++++++++++++++ starky/src/stark.rs | 62 +++++++++++++ starky/src/vars.rs | 26 ++++++ system_zero/Cargo.toml | 12 +++ system_zero/src/column_layout.rs | 86 ++++++++++++++++++ system_zero/src/core_registers.rs | 80 +++++++++++++++++ system_zero/src/lib.rs | 12 +++ system_zero/src/memory.rs | 16 ++++ system_zero/src/permutation_unit.rs | 86 ++++++++++++++++++ system_zero/src/public_input_layout.rs | 7 ++ system_zero/src/system_zero.rs | 109 +++++++++++++++++++++++ 23 files changed, 808 insertions(+), 11 deletions(-) create mode 100644 starky/Cargo.toml create mode 100644 starky/src/config.rs create mode 100644 starky/src/constraint_consumer.rs create mode 100644 starky/src/lib.rs create mode 100644 starky/src/proof.rs create mode 100644 starky/src/prover.rs create mode 100644 starky/src/stark.rs create mode 100644 starky/src/vars.rs create mode 100644 system_zero/Cargo.toml create mode 100644 system_zero/src/column_layout.rs create mode 100644 system_zero/src/core_registers.rs create mode 100644 system_zero/src/lib.rs create mode 100644 system_zero/src/memory.rs create mode 100644 system_zero/src/permutation_unit.rs create mode 100644 system_zero/src/public_input_layout.rs create mode 100644 system_zero/src/system_zero.rs diff --git a/Cargo.toml b/Cargo.toml index 2bd67bb6..cc070d96 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["field", "insertion", "plonky2", "util", "waksman"] +members = ["field", "insertion", "plonky2", "starky", "system_zero", "util", "waksman"] [profile.release] opt-level = 3 diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index 02db3140..c016f0ee 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -36,7 +36,7 @@ impl, C: GenericConfig, const D: usize> PolynomialBatch { /// Creates a list polynomial commitment for the polynomials interpolating the values in `values`. - pub(crate) fn from_values( + pub fn from_values( values: Vec>, rate_bits: usize, blinding: bool, @@ -61,7 +61,7 @@ impl, C: GenericConfig, const D: usize> } /// Creates a list polynomial commitment for the polynomials `polynomials`. - pub(crate) fn from_coeffs( + pub fn from_coeffs( polynomials: Vec>, rate_bits: usize, blinding: bool, diff --git a/plonky2/src/fri/reduction_strategies.rs b/plonky2/src/fri/reduction_strategies.rs index c0423c2c..49eda3ba 100644 --- a/plonky2/src/fri/reduction_strategies.rs +++ b/plonky2/src/fri/reduction_strategies.rs @@ -22,7 +22,7 @@ pub enum FriReductionStrategy { impl FriReductionStrategy { /// The arity of each FRI reduction step, expressed as the log2 of the actual arity. - pub(crate) fn reduction_arity_bits( + pub fn reduction_arity_bits( &self, mut degree_bits: usize, rate_bits: usize, diff --git a/plonky2/src/hash/poseidon.rs b/plonky2/src/hash/poseidon.rs index 81fc3937..9dc5f394 100644 --- a/plonky2/src/hash/poseidon.rs +++ b/plonky2/src/hash/poseidon.rs @@ -21,10 +21,10 @@ use crate::plonk::config::{AlgebraicHasher, Hasher}; // // NB: Changing any of these values will require regenerating all of // the precomputed constant arrays in this file. -pub(crate) const HALF_N_FULL_ROUNDS: usize = 4; +pub const HALF_N_FULL_ROUNDS: usize = 4; pub(crate) const N_FULL_ROUNDS_TOTAL: usize = 2 * HALF_N_FULL_ROUNDS; -pub(crate) const N_PARTIAL_ROUNDS: usize = 22; -pub(crate) const N_ROUNDS: usize = N_FULL_ROUNDS_TOTAL + N_PARTIAL_ROUNDS; +pub const N_PARTIAL_ROUNDS: usize = 22; +pub const N_ROUNDS: usize = N_FULL_ROUNDS_TOTAL + N_PARTIAL_ROUNDS; const MAX_WIDTH: usize = 12; // we only have width 8 and 12, and 12 is bigger. :) #[inline(always)] diff --git a/plonky2/src/iop/mod.rs b/plonky2/src/iop/mod.rs index cc11fb56..de315a09 100644 --- a/plonky2/src/iop/mod.rs +++ b/plonky2/src/iop/mod.rs @@ -1,5 +1,5 @@ //! Logic common to multiple IOPs. -pub(crate) mod challenger; +pub mod challenger; pub mod ext_target; pub mod generator; pub mod target; diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index e4abe611..d9bcc1cf 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -389,7 +389,7 @@ impl, const D: usize> CircuitBuilder { let fri_config = &self.config.fri_config; let reduction_arity_bits = fri_config.reduction_strategy.reduction_arity_bits( degree_bits, - self.config.fri_config.rate_bits, + fri_config.rate_bits, fri_config.num_query_rounds, ); FriParams { diff --git a/plonky2/src/util/mod.rs b/plonky2/src/util/mod.rs index 4cf7119a..9342a75e 100644 --- a/plonky2/src/util/mod.rs +++ b/plonky2/src/util/mod.rs @@ -6,8 +6,8 @@ pub(crate) mod marking; pub(crate) mod partial_products; pub mod reducing; pub mod serialization; -pub(crate) mod strided_view; -pub(crate) mod timing; +pub mod strided_view; +pub mod timing; pub(crate) fn transpose_poly_values(polys: Vec>) -> Vec> { let poly_values = polys.into_iter().map(|p| p.values).collect::>(); diff --git a/starky/Cargo.toml b/starky/Cargo.toml new file mode 100644 index 00000000..4e67856d --- /dev/null +++ b/starky/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "starky" +description = "Implementation of STARKs" +version = "0.1.0" +edition = "2021" + +[dependencies] +plonky2 = { path = "../plonky2" } +plonky2_util = { path = "../util" } +anyhow = "1.0.40" +env_logger = "0.9.0" +itertools = "0.10.0" +log = "0.4.14" +rayon = "1.5.1" diff --git a/starky/src/config.rs b/starky/src/config.rs new file mode 100644 index 00000000..24fb725a --- /dev/null +++ b/starky/src/config.rs @@ -0,0 +1,45 @@ +use plonky2::fri::reduction_strategies::FriReductionStrategy; +use plonky2::fri::{FriConfig, FriParams}; + +pub struct StarkConfig { + pub security_bits: usize, + + /// The number of challenge points to generate, for IOPs that have soundness errors of (roughly) + /// `degree / |F|`. + pub num_challenges: usize, + + pub fri_config: FriConfig, +} + +impl StarkConfig { + /// A typical configuration with a rate of 2, resulting in fast but large proofs. + /// Targets ~100 bit conjectured security. + pub fn standard_fast_config() -> Self { + Self { + security_bits: 100, + num_challenges: 2, + fri_config: FriConfig { + rate_bits: 1, + cap_height: 4, + proof_of_work_bits: 10, + reduction_strategy: FriReductionStrategy::ConstantArityBits(4, 5), + num_query_rounds: 90, + }, + } + } + + pub(crate) fn fri_params(&self, degree_bits: usize) -> FriParams { + let fri_config = &self.fri_config; + let reduction_arity_bits = fri_config.reduction_strategy.reduction_arity_bits( + degree_bits, + fri_config.rate_bits, + fri_config.num_query_rounds, + ); + FriParams { + config: fri_config.clone(), + hiding: false, + degree_bits, + reduction_arity_bits, + } + } +} diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs new file mode 100644 index 00000000..09b5397f --- /dev/null +++ b/starky/src/constraint_consumer.rs @@ -0,0 +1,110 @@ +use std::marker::PhantomData; + +use plonky2::field::extension_field::Extendable; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; +use plonky2::iop::target::Target; +use plonky2::plonk::circuit_builder::CircuitBuilder; + +pub struct ConstraintConsumer { + /// A random value used to combine multiple constraints into one. + alpha: P::Scalar, + + /// A running sum of constraints that have been emitted so far, scaled by powers of alpha. + constraint_acc: P, + + /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated + /// with the first trace row, and zero at other points in the subgroup. + lagrange_basis_first: P::Scalar, + + /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated + /// with the last trace row, and zero at other points in the subgroup. + lagrange_basis_last: P::Scalar, +} + +impl ConstraintConsumer

{ + /// Add one constraint. + pub fn one(&mut self, constraint: P) { + self.constraint_acc *= self.alpha; + self.constraint_acc += constraint; + } + + /// Add a series of constraints. + pub fn many(&mut self, constraints: impl IntoIterator) { + constraints + .into_iter() + .for_each(|constraint| self.one(constraint)); + } + + /// Add one constraint, but first multiply it by a filter such that it will only apply to the + /// first row of the trace. + pub fn one_first_row(&mut self, constraint: P) { + self.one(constraint * self.lagrange_basis_first); + } + + /// Add one constraint, but first multiply it by a filter such that it will only apply to the + /// last row of the trace. + pub fn one_last_row(&mut self, constraint: P) { + self.one(constraint * self.lagrange_basis_last); + } +} + +pub struct RecursiveConstraintConsumer, const D: usize> { + /// A random value used to combine multiple constraints into one. + alpha: Target, + + /// A running sum of constraints that have been emitted so far, scaled by powers of alpha. + constraint_acc: ExtensionTarget, + + /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated + /// with the first trace row, and zero at other points in the subgroup. + lagrange_basis_first: ExtensionTarget, + + /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated + /// with the last trace row, and zero at other points in the subgroup. + lagrange_basis_last: ExtensionTarget, + + _phantom: PhantomData, +} + +impl, const D: usize> RecursiveConstraintConsumer { + /// Add one constraint. + pub fn one(&mut self, builder: &mut CircuitBuilder, constraint: ExtensionTarget) { + self.constraint_acc = + builder.scalar_mul_add_extension(self.alpha, self.constraint_acc, constraint); + } + + /// Add a series of constraints. + pub fn many( + &mut self, + builder: &mut CircuitBuilder, + constraints: impl IntoIterator>, + ) { + constraints + .into_iter() + .for_each(|constraint| self.one(builder, constraint)); + } + + /// Add one constraint, but first multiply it by a filter such that it will only apply to the + /// first row of the trace. + pub fn one_first_row( + &mut self, + builder: &mut CircuitBuilder, + constraint: ExtensionTarget, + ) { + let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_first); + self.one(builder, filtered_constraint); + } + + /// Add one constraint, but first multiply it by a filter such that it will only apply to the + /// last row of the trace. + pub fn one_last_row( + &mut self, + builder: &mut CircuitBuilder, + constraint: ExtensionTarget, + ) { + let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_last); + self.one(builder, filtered_constraint); + } +} diff --git a/starky/src/lib.rs b/starky/src/lib.rs new file mode 100644 index 00000000..be28a01e --- /dev/null +++ b/starky/src/lib.rs @@ -0,0 +1,14 @@ +// TODO: Remove these when crate is closer to being finished. +#![allow(dead_code)] +#![allow(unused_variables)] +#![allow(unreachable_code)] +#![allow(clippy::diverging_sub_expression)] +#![allow(incomplete_features)] +#![feature(generic_const_exprs)] + +pub mod config; +pub mod constraint_consumer; +pub mod proof; +pub mod prover; +pub mod stark; +pub mod vars; diff --git a/starky/src/proof.rs b/starky/src/proof.rs new file mode 100644 index 00000000..1cdbbd3c --- /dev/null +++ b/starky/src/proof.rs @@ -0,0 +1,35 @@ +use plonky2::field::extension_field::Extendable; +use plonky2::fri::proof::{CompressedFriProof, FriProof}; +use plonky2::hash::hash_types::RichField; +use plonky2::hash::merkle_tree::MerkleCap; +use plonky2::plonk::config::GenericConfig; + +pub struct StarkProof, C: GenericConfig, const D: usize> { + /// Merkle cap of LDEs of trace values. + pub trace_cap: MerkleCap, + /// Purported values of each polynomial at the challenge point. + pub openings: StarkOpeningSet, + /// A batch FRI argument for all openings. + pub opening_proof: FriProof, +} + +pub struct CompressedStarkProof< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +> { + /// Merkle cap of LDEs of trace values. + pub trace_cap: MerkleCap, + /// Purported values of each polynomial at the challenge point. + pub openings: StarkOpeningSet, + /// A batch FRI argument for all openings. + pub opening_proof: CompressedFriProof, +} + +/// Purported values of each polynomial at the challenge point. +pub struct StarkOpeningSet, const D: usize> { + pub local_values: Vec, + pub next_values: Vec, + pub permutation_zs: Vec, + pub quotient_polys: Vec, +} diff --git a/starky/src/prover.rs b/starky/src/prover.rs new file mode 100644 index 00000000..bda478e5 --- /dev/null +++ b/starky/src/prover.rs @@ -0,0 +1,83 @@ +use itertools::Itertools; +use plonky2::field::extension_field::Extendable; +use plonky2::field::polynomial::PolynomialValues; +use plonky2::fri::oracle::PolynomialBatch; +use plonky2::fri::prover::fri_proof; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::challenger::Challenger; +use plonky2::plonk::config::GenericConfig; +use plonky2::timed; +use plonky2::util::timing::TimingTree; +use plonky2::util::transpose; +use plonky2_util::log2_strict; +use rayon::prelude::*; + +use crate::config::StarkConfig; +use crate::proof::StarkProof; +use crate::stark::Stark; + +pub fn prove( + stark: S, + config: StarkConfig, + trace: Vec<[F; S::COLUMNS]>, + timing: &mut TimingTree, +) -> StarkProof +where + F: RichField + Extendable, + C: GenericConfig, + S: Stark, + [(); S::COLUMNS]:, +{ + let degree_bits = log2_strict(trace.len()); + + let trace_vecs = trace.into_iter().map(|row| row.to_vec()).collect_vec(); + let trace_col_major: Vec> = transpose(&trace_vecs); + + let trace_poly_values: Vec> = timed!( + timing, + "compute trace polynomials", + trace_col_major + .par_iter() + .map(|column| PolynomialValues::new(column.clone())) + .collect() + ); + + let rate_bits = config.fri_config.rate_bits; + let cap_height = config.fri_config.cap_height; + let trace_commitment = timed!( + timing, + "compute trace commitment", + PolynomialBatch::::from_values( + trace_poly_values, + rate_bits, + false, + cap_height, + timing, + None, + ) + ); + + let trace_cap = trace_commitment.merkle_tree.cap; + let openings = todo!(); + + let initial_merkle_trees = todo!(); + let lde_polynomial_coeffs = todo!(); + let lde_polynomial_values = todo!(); + let mut challenger = Challenger::new(); + let fri_params = config.fri_params(degree_bits); + + let opening_proof = fri_proof::( + initial_merkle_trees, + lde_polynomial_coeffs, + lde_polynomial_values, + &mut challenger, + &fri_params, + timing, + ); + + StarkProof { + trace_cap, + openings, + opening_proof, + } +} diff --git a/starky/src/stark.rs b/starky/src/stark.rs new file mode 100644 index 00000000..8d6abb69 --- /dev/null +++ b/starky/src/stark.rs @@ -0,0 +1,62 @@ +use plonky2::field::extension_field::{Extendable, FieldExtension}; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::plonk::circuit_builder::CircuitBuilder; + +use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use crate::vars::StarkEvaluationTargets; +use crate::vars::StarkEvaluationVars; + +/// Represents a STARK system. +pub trait Stark, const D: usize> { + /// The total number of columns in the trace. + const COLUMNS: usize; + /// The number of public inputs. + const PUBLIC_INPUTS: usize; + + /// Evaluate constraints at a vector of points. + /// + /// The points are elements of a field `FE`, a degree `D2` extension of `F`. This lets us + /// evaluate constraints over a larger domain if desired. This can also be called with `FE = F` + /// and `D2 = 1`, in which case we are using the trivial extension, i.e. just evaluating + /// constraints over `F`. + fn eval_packed_generic( + &self, + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField; + + /// Evaluate constraints at a vector of points from the base field `F`. + fn eval_packed_base>( + &self, + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, + ) { + self.eval_packed_generic(vars, yield_constr) + } + + /// Evaluate constraints at a single point from the degree `D` extension field. + fn eval_ext( + &self, + vars: StarkEvaluationVars< + F::Extension, + F::Extension, + { Self::COLUMNS }, + { Self::PUBLIC_INPUTS }, + >, + yield_constr: &mut ConstraintConsumer, + ) { + self.eval_packed_generic(vars, yield_constr) + } + + /// Evaluate constraints at a vector of points from the degree `D` extension field. This is like + /// `eval_ext`, except in the context of a recursive circuit. + fn eval_ext_recursively( + &self, + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, + ); +} diff --git a/starky/src/vars.rs b/starky/src/vars.rs new file mode 100644 index 00000000..cb83aeb7 --- /dev/null +++ b/starky/src/vars.rs @@ -0,0 +1,26 @@ +use plonky2::field::field_types::Field; +use plonky2::field::packed_field::PackedField; +use plonky2::iop::ext_target::ExtensionTarget; + +#[derive(Debug, Copy, Clone)] +pub struct StarkEvaluationVars<'a, F, P, const COLUMNS: usize, const PUBLIC_INPUTS: usize> +where + F: Field, + P: PackedField, +{ + pub local_values: &'a [P; COLUMNS], + pub next_values: &'a [P; COLUMNS], + pub public_inputs: &'a [P::Scalar; PUBLIC_INPUTS], +} + +#[derive(Debug, Copy, Clone)] +pub struct StarkEvaluationTargets< + 'a, + const D: usize, + const COLUMNS: usize, + const PUBLIC_INPUTS: usize, +> { + pub local_values: &'a [ExtensionTarget; COLUMNS], + pub next_values: &'a [ExtensionTarget; COLUMNS], + pub public_inputs: &'a [ExtensionTarget; PUBLIC_INPUTS], +} diff --git a/system_zero/Cargo.toml b/system_zero/Cargo.toml new file mode 100644 index 00000000..b908dea0 --- /dev/null +++ b/system_zero/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "system_zero" +description = "A VM whose execution can be verified with STARKs; designed for proving Ethereum transactions" +version = "0.1.0" +edition = "2021" + +[dependencies] +plonky2 = { path = "../plonky2" } +starky = { path = "../starky" } +anyhow = "1.0.40" +env_logger = "0.9.0" +log = "0.4.14" diff --git a/system_zero/src/column_layout.rs b/system_zero/src/column_layout.rs new file mode 100644 index 00000000..3d8fc2c0 --- /dev/null +++ b/system_zero/src/column_layout.rs @@ -0,0 +1,86 @@ +use plonky2::hash::hashing::SPONGE_WIDTH; +use plonky2::hash::poseidon; + +//// CORE REGISTERS + +/// A cycle counter. Starts at 0; increments by 1. +pub(crate) const COL_CLOCK: usize = 0; + +/// A column which contains the values `[0, ... 2^16 - 1]`, potentially with duplicates. Used for +/// 16-bit range checks. +/// +/// For ease of verification, we enforce that it must begin with 0 and end with `2^16 - 1`, and each +/// delta must be either 0 or 1. +pub(crate) const COL_RANGE_16: usize = COL_CLOCK + 1; + +/// Pointer to the current instruction. +pub(crate) const COL_INSTRUCTION_PTR: usize = COL_RANGE_16 + 1; +/// Pointer to the base of the current call's stack frame. +pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1; +/// Pointer to the tip of the current call's stack frame. +pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1; + +//// PERMUTATION UNIT + +const START_PERMUTATION_UNIT: usize = COL_STACK_PTR + 1; + +pub(crate) const fn col_permutation_full_first(round: usize, i: usize) -> usize { + debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_PERMUTATION_UNIT + round * SPONGE_WIDTH + i +} + +const START_PERMUTATION_PARTIAL: usize = + col_permutation_full_first(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; + +pub(crate) const fn col_permutation_partial(round: usize) -> usize { + debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); + START_PERMUTATION_PARTIAL + round +} + +const START_PERMUTATION_FULL_SECOND: usize = COL_STACK_PTR + 1; + +pub(crate) const fn col_permutation_full_second(round: usize, i: usize) -> usize { + debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_PERMUTATION_FULL_SECOND + round * SPONGE_WIDTH + i +} + +pub(crate) const fn col_permutation_input(i: usize) -> usize { + col_permutation_full_first(0, i) +} + +pub(crate) const fn col_permutation_output(i: usize) -> usize { + debug_assert!(i < SPONGE_WIDTH); + col_permutation_full_second(poseidon::HALF_N_FULL_ROUNDS, i) +} + +const END_PERMUTATION_UNIT: usize = col_permutation_output(SPONGE_WIDTH - 1); + +//// MEMORY UNITS + +//// DECOMPOSITION UNITS + +const START_DECOMPOSITION_UNITS: usize = END_PERMUTATION_UNIT + 1; + +const NUM_DECOMPOSITION_UNITS: usize = 4; +/// The number of bits associated with a single decomposition unit. +const DECOMPOSITION_UNIT_BITS: usize = 32; +/// One column for the value being decomposed, plus one column per bit. +const DECOMPOSITION_UNIT_COLS: usize = 1 + DECOMPOSITION_UNIT_BITS; + +pub(crate) const fn col_decomposition_input(unit: usize) -> usize { + debug_assert!(unit < NUM_DECOMPOSITION_UNITS); + START_DECOMPOSITION_UNITS + unit * DECOMPOSITION_UNIT_COLS +} + +pub(crate) const fn col_decomposition_bit(unit: usize, bit: usize) -> usize { + debug_assert!(unit < NUM_DECOMPOSITION_UNITS); + debug_assert!(bit < DECOMPOSITION_UNIT_BITS); + START_DECOMPOSITION_UNITS + unit * DECOMPOSITION_UNIT_COLS + 1 + bit +} + +const END_DECOMPOSITION_UNITS: usize = + START_DECOMPOSITION_UNITS + DECOMPOSITION_UNIT_COLS * NUM_DECOMPOSITION_UNITS; + +pub(crate) const NUM_COLUMNS: usize = END_DECOMPOSITION_UNITS; diff --git a/system_zero/src/core_registers.rs b/system_zero/src/core_registers.rs new file mode 100644 index 00000000..249c16a3 --- /dev/null +++ b/system_zero/src/core_registers.rs @@ -0,0 +1,80 @@ +use plonky2::field::extension_field::{Extendable, FieldExtension}; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use starky::vars::StarkEvaluationTargets; +use starky::vars::StarkEvaluationVars; + +use crate::column_layout::{ + COL_CLOCK, COL_FRAME_PTR, COL_INSTRUCTION_PTR, COL_RANGE_16, COL_STACK_PTR, NUM_COLUMNS, +}; +use crate::public_input_layout::NUM_PUBLIC_INPUTS; +use crate::system_zero::SystemZero; + +impl, const D: usize> SystemZero { + pub(crate) fn generate_first_row_core_registers(&self, first_values: &mut [F; NUM_COLUMNS]) { + first_values[COL_CLOCK] = F::ZERO; + first_values[COL_RANGE_16] = F::ZERO; + first_values[COL_INSTRUCTION_PTR] = F::ZERO; + first_values[COL_FRAME_PTR] = F::ZERO; + first_values[COL_STACK_PTR] = F::ZERO; + } + + pub(crate) fn generate_next_row_core_registers( + &self, + local_values: &[F; NUM_COLUMNS], + next_values: &mut [F; NUM_COLUMNS], + ) { + // We increment the clock by 1. + next_values[COL_CLOCK] = local_values[COL_CLOCK] + F::ONE; + + // We increment the 16-bit table by 1, unless we've reached the max value of 2^16 - 1, in + // which case we repeat that value. + let prev_range_16 = local_values[COL_RANGE_16].to_canonical_u64(); + let next_range_16 = (prev_range_16 + 1).min((1 << 16) - 1); + next_values[COL_RANGE_16] = F::from_canonical_u64(next_range_16); + + next_values[COL_INSTRUCTION_PTR] = todo!(); + + next_values[COL_FRAME_PTR] = todo!(); + + next_values[COL_STACK_PTR] = todo!(); + } + + #[inline] + pub(crate) fn eval_core_registers( + &self, + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + // The clock must start with 0, and increment by 1. + let local_clock = vars.local_values[COL_CLOCK]; + let next_clock = vars.next_values[COL_CLOCK]; + let delta_clock = next_clock - local_clock; + yield_constr.one_first_row(local_clock); + yield_constr.one(delta_clock - FE::ONE); + + // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. + let local_range_16 = vars.local_values[COL_RANGE_16]; + let next_range_16 = vars.next_values[COL_RANGE_16]; + let delta_range_16 = next_range_16 - local_range_16; + yield_constr.one_first_row(local_range_16); + yield_constr.one_last_row(local_range_16 - FE::from_canonical_u64((1 << 16) - 1)); + yield_constr.one(delta_range_16 * (delta_range_16 - FE::ONE)); + + todo!() + } + + pub(crate) fn eval_core_registers_recursively( + &self, + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, + ) { + todo!() + } +} diff --git a/system_zero/src/lib.rs b/system_zero/src/lib.rs new file mode 100644 index 00000000..029c2abd --- /dev/null +++ b/system_zero/src/lib.rs @@ -0,0 +1,12 @@ +// TODO: Remove these when crate is closer to being finished. +#![allow(dead_code)] +#![allow(unused_variables)] +#![allow(unreachable_code)] +#![allow(clippy::diverging_sub_expression)] + +mod column_layout; +mod core_registers; +mod memory; +mod permutation_unit; +mod public_input_layout; +pub mod system_zero; diff --git a/system_zero/src/memory.rs b/system_zero/src/memory.rs new file mode 100644 index 00000000..0cc42d30 --- /dev/null +++ b/system_zero/src/memory.rs @@ -0,0 +1,16 @@ +#[derive(Default)] +pub struct TransactionMemory { + pub calls: Vec, +} + +/// A virtual memory space specific to the current contract call. +pub struct ContractMemory { + pub code: MemorySegment, + pub main: MemorySegment, + pub calldata: MemorySegment, + pub returndata: MemorySegment, +} + +pub struct MemorySegment { + pub content: Vec, +} diff --git a/system_zero/src/permutation_unit.rs b/system_zero/src/permutation_unit.rs new file mode 100644 index 00000000..a490b49d --- /dev/null +++ b/system_zero/src/permutation_unit.rs @@ -0,0 +1,86 @@ +use plonky2::field::extension_field::{Extendable, FieldExtension}; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::hash::hashing::SPONGE_WIDTH; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use starky::vars::StarkEvaluationTargets; +use starky::vars::StarkEvaluationVars; + +use crate::column_layout::{col_permutation_input, col_permutation_output, NUM_COLUMNS}; +use crate::public_input_layout::NUM_PUBLIC_INPUTS; +use crate::system_zero::SystemZero; + +impl, const D: usize> SystemZero { + pub(crate) fn generate_permutation_unit(&self, values: &mut [F; NUM_COLUMNS]) { + // Load inputs. + let mut state = [F::ZERO; SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + state[i] = values[col_permutation_input(i)]; + } + + // TODO: First full rounds. + // TODO: Partial rounds. + // TODO: Second full rounds. + + // Write outputs. + for i in 0..SPONGE_WIDTH { + values[col_permutation_output(i)] = state[i]; + } + } + + #[inline] + pub(crate) fn eval_permutation_unit( + &self, + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + let local_values = &vars.local_values; + + // Load inputs. + let mut state = [P::ZEROS; SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + state[i] = local_values[col_permutation_input(i)]; + } + + // TODO: First full rounds. + // TODO: Partial rounds. + // TODO: Second full rounds. + + // Assert that the computed output matches the outputs in the trace. + for i in 0..SPONGE_WIDTH { + let out = local_values[col_permutation_output(i)]; + yield_constr.one(state[i] - out); + } + } + + pub(crate) fn eval_permutation_unit_recursively( + &self, + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, + ) { + let zero = builder.zero_extension(); + let local_values = &vars.local_values; + + // Load inputs. + let mut state = [zero; SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + state[i] = local_values[col_permutation_input(i)]; + } + + // TODO: First full rounds. + // TODO: Partial rounds. + // TODO: Second full rounds. + + // Assert that the computed output matches the outputs in the trace. + for i in 0..SPONGE_WIDTH { + let out = local_values[col_permutation_output(i)]; + let diff = builder.sub_extension(state[i], out); + yield_constr.one(builder, diff); + } + } +} diff --git a/system_zero/src/public_input_layout.rs b/system_zero/src/public_input_layout.rs new file mode 100644 index 00000000..225b3814 --- /dev/null +++ b/system_zero/src/public_input_layout.rs @@ -0,0 +1,7 @@ +/// The previous state root, before these transactions were executed. +const PI_OLD_STATE_ROOT: usize = 0; + +/// The updated state root, after these transactions were executed. +const PI_NEW_STATE_ROOT: usize = PI_OLD_STATE_ROOT + 1; + +pub(crate) const NUM_PUBLIC_INPUTS: usize = PI_NEW_STATE_ROOT + 1; diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs new file mode 100644 index 00000000..a16fb699 --- /dev/null +++ b/system_zero/src/system_zero.rs @@ -0,0 +1,109 @@ +use std::marker::PhantomData; + +use plonky2::field::extension_field::{Extendable, FieldExtension}; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use starky::stark::Stark; +use starky::vars::StarkEvaluationTargets; +use starky::vars::StarkEvaluationVars; + +use crate::column_layout::NUM_COLUMNS; +use crate::memory::TransactionMemory; +use crate::public_input_layout::NUM_PUBLIC_INPUTS; + +/// We require at least 2^16 rows as it helps support efficient 16-bit range checks. +const MIN_TRACE_ROWS: usize = 1 << 16; + +pub struct SystemZero, const D: usize> { + _phantom: PhantomData, +} + +impl, const D: usize> SystemZero { + fn generate_trace(&self) -> Vec<[F; NUM_COLUMNS]> { + let memory = TransactionMemory::default(); + + let mut row = [F::ZERO; NUM_COLUMNS]; + self.generate_first_row_core_registers(&mut row); + self.generate_permutation_unit(&mut row); + + let mut trace = Vec::with_capacity(MIN_TRACE_ROWS); + + loop { + let mut next_row = [F::ZERO; NUM_COLUMNS]; + self.generate_next_row_core_registers(&row, &mut next_row); + self.generate_permutation_unit(&mut next_row); + + trace.push(row); + row = next_row; + } + + trace.push(row); + trace + } +} + +impl, const D: usize> Default for SystemZero { + fn default() -> Self { + Self { + _phantom: PhantomData, + } + } +} + +impl, const D: usize> Stark for SystemZero { + const COLUMNS: usize = NUM_COLUMNS; + const PUBLIC_INPUTS: usize = NUM_PUBLIC_INPUTS; + + fn eval_packed_generic( + &self, + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + self.eval_core_registers(vars, yield_constr); + self.eval_permutation_unit(vars, yield_constr); + todo!() + } + + fn eval_ext_recursively( + &self, + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, + ) { + self.eval_core_registers_recursively(builder, vars, yield_constr); + self.eval_permutation_unit_recursively(builder, vars, yield_constr); + todo!() + } +} + +#[cfg(test)] +mod tests { + use log::Level; + use plonky2::field::goldilocks_field::GoldilocksField; + use plonky2::plonk::config::PoseidonGoldilocksConfig; + use plonky2::util::timing::TimingTree; + use starky::config::StarkConfig; + use starky::prover::prove; + + use crate::system_zero::SystemZero; + + #[test] + #[ignore] // TODO + fn run() { + type F = GoldilocksField; + type C = PoseidonGoldilocksConfig; + const D: usize = 2; + + type S = SystemZero; + let system = S::default(); + let config = StarkConfig::standard_fast_config(); + let mut timing = TimingTree::new("prove", Level::Debug); + let trace = system.generate_trace(); + prove::(system, config, trace, &mut timing); + } +} From d54cc9a7c8587a085acd605c7a297e14207e998e Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Wed, 26 Jan 2022 16:08:04 +0100 Subject: [PATCH 036/143] First try --- field/src/lib.rs | 1 + field/src/zero_poly_coset.rs | 47 ++++++++++++++++ plonky2/src/plonk/plonk_common.rs | 46 --------------- plonky2/src/plonk/prover.rs | 3 +- plonky2/src/plonk/vanishing_poly.rs | 3 +- starky/src/constraint_consumer.rs | 19 +++++++ starky/src/prover.rs | 87 ++++++++++++++++++++++++++++- starky/src/stark.rs | 2 +- util/src/lib.rs | 1 + 9 files changed, 157 insertions(+), 52 deletions(-) create mode 100644 field/src/zero_poly_coset.rs diff --git a/field/src/lib.rs b/field/src/lib.rs index f190bcbc..2c89aab3 100644 --- a/field/src/lib.rs +++ b/field/src/lib.rs @@ -24,6 +24,7 @@ pub mod packed_field; pub mod polynomial; pub mod secp256k1_base; pub mod secp256k1_scalar; +pub mod zero_poly_coset; #[cfg(test)] mod field_testing; diff --git a/field/src/zero_poly_coset.rs b/field/src/zero_poly_coset.rs new file mode 100644 index 00000000..0b7452f5 --- /dev/null +++ b/field/src/zero_poly_coset.rs @@ -0,0 +1,47 @@ +use crate::field_types::Field; + +/// Precomputations of the evaluation of `Z_H(X) = X^n - 1` on a coset `gK` with `H <= K`. +pub struct ZeroPolyOnCoset { + /// `n = |H|`. + n: F, + /// `rate = |K|/|H|`. + rate: usize, + /// Holds `g^n * (w^n)^i - 1 = g^n * v^i - 1` for `i in 0..rate`, with `w` a generator of `K` and `v` a + /// `rate`-primitive root of unity. + evals: Vec, + /// Holds the multiplicative inverses of `evals`. + inverses: Vec, +} + +impl ZeroPolyOnCoset { + pub fn new(n_log: usize, rate_bits: usize) -> Self { + let g_pow_n = F::coset_shift().exp_power_of_2(n_log); + let evals = F::two_adic_subgroup(rate_bits) + .into_iter() + .map(|x| g_pow_n * x - F::ONE) + .collect::>(); + let inverses = F::batch_multiplicative_inverse(&evals); + Self { + n: F::from_canonical_usize(1 << n_log), + rate: 1 << rate_bits, + evals, + inverses, + } + } + + /// Returns `Z_H(g * w^i)`. + pub fn eval(&self, i: usize) -> F { + self.evals[i % self.rate] + } + + /// Returns `1 / Z_H(g * w^i)`. + pub fn eval_inverse(&self, i: usize) -> F { + self.inverses[i % self.rate] + } + + /// Returns `L_1(x) = Z_H(x)/(n * (x - 1))` with `x = w^i`. + pub fn eval_l1(&self, i: usize, x: F) -> F { + // Could also precompute the inverses using Montgomery. + self.eval(i) * (self.n * (x - F::ONE)).inverse() + } +} diff --git a/plonky2/src/plonk/plonk_common.rs b/plonky2/src/plonk/plonk_common.rs index 92c4168d..74495198 100644 --- a/plonky2/src/plonk/plonk_common.rs +++ b/plonky2/src/plonk/plonk_common.rs @@ -63,52 +63,6 @@ pub(crate) fn eval_zero_poly(n: usize, x: F) -> F { x.exp_u64(n as u64) - F::ONE } -/// Precomputations of the evaluation of `Z_H(X) = X^n - 1` on a coset `gK` with `H <= K`. -pub(crate) struct ZeroPolyOnCoset { - /// `n = |H|`. - n: F, - /// `rate = |K|/|H|`. - rate: usize, - /// Holds `g^n * (w^n)^i - 1 = g^n * v^i - 1` for `i in 0..rate`, with `w` a generator of `K` and `v` a - /// `rate`-primitive root of unity. - evals: Vec, - /// Holds the multiplicative inverses of `evals`. - inverses: Vec, -} - -impl ZeroPolyOnCoset { - pub fn new(n_log: usize, rate_bits: usize) -> Self { - let g_pow_n = F::coset_shift().exp_power_of_2(n_log); - let evals = F::two_adic_subgroup(rate_bits) - .into_iter() - .map(|x| g_pow_n * x - F::ONE) - .collect::>(); - let inverses = F::batch_multiplicative_inverse(&evals); - Self { - n: F::from_canonical_usize(1 << n_log), - rate: 1 << rate_bits, - evals, - inverses, - } - } - - /// Returns `Z_H(g * w^i)`. - pub fn eval(&self, i: usize) -> F { - self.evals[i % self.rate] - } - - /// Returns `1 / Z_H(g * w^i)`. - pub fn eval_inverse(&self, i: usize) -> F { - self.inverses[i % self.rate] - } - - /// Returns `L_1(x) = Z_H(x)/(n * (x - 1))` with `x = w^i`. - pub fn eval_l1(&self, i: usize, x: F) -> F { - // Could also precompute the inverses using Montgomery. - self.eval(i) * (self.n * (x - F::ONE)).inverse() - } -} - /// Evaluate the Lagrange basis `L_1` with `L_1(1) = 1`, and `L_1(x) = 0` for other members of an /// order `n` multiplicative subgroup. pub(crate) fn eval_l_1(n: usize, x: F) -> F { diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 2a11e4c0..09caf81e 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -4,6 +4,7 @@ use anyhow::ensure; use anyhow::Result; use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::{PolynomialCoeffs, PolynomialValues}; +use plonky2_field::zero_poly_coset::ZeroPolyOnCoset; use plonky2_util::log2_ceil; use rayon::prelude::*; @@ -15,7 +16,7 @@ use crate::iop::generator::generate_partial_witness; use crate::iop::witness::{MatrixWitness, PartialWitness, Witness}; use crate::plonk::circuit_data::{CommonCircuitData, ProverOnlyCircuitData}; use crate::plonk::config::{GenericConfig, Hasher}; -use crate::plonk::plonk_common::{PlonkOracle, ZeroPolyOnCoset}; +use crate::plonk::plonk_common::PlonkOracle; use crate::plonk::proof::OpeningSet; use crate::plonk::proof::{Proof, ProofWithPublicInputs}; use crate::plonk::vanishing_poly::eval_vanishing_poly_base_batch; diff --git a/plonky2/src/plonk/vanishing_poly.rs b/plonky2/src/plonk/vanishing_poly.rs index 74e0fab3..70de5833 100644 --- a/plonky2/src/plonk/vanishing_poly.rs +++ b/plonky2/src/plonk/vanishing_poly.rs @@ -1,6 +1,7 @@ use plonky2_field::batch_util::batch_add_inplace; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::field_types::Field; +use plonky2_field::zero_poly_coset::ZeroPolyOnCoset; use crate::gates::gate::PrefixedGate; use crate::hash::hash_types::RichField; @@ -10,7 +11,7 @@ use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::GenericConfig; use crate::plonk::plonk_common; -use crate::plonk::plonk_common::{eval_l_1_recursively, ZeroPolyOnCoset}; +use crate::plonk::plonk_common::eval_l_1_recursively; use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch}; use crate::util::partial_products::{check_partial_products, check_partial_products_recursively}; use crate::util::reducing::ReducingFactorTarget; diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index 09b5397f..20f29192 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -1,6 +1,7 @@ use std::marker::PhantomData; use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; @@ -24,6 +25,24 @@ pub struct ConstraintConsumer { } impl ConstraintConsumer

{ + pub fn new( + alpha: P::Scalar, + lagrange_basis_first: P::Scalar, + lagrange_basis_last: P::Scalar, + ) -> Self { + Self { + alpha, + constraint_acc: P::ZEROS, + lagrange_basis_first, + lagrange_basis_last, + } + } + + // TODO: Do this correctly. + pub fn accumulator(&self) -> P::Scalar { + self.constraint_acc.as_slice()[0] + } + /// Add one constraint. pub fn one(&mut self, constraint: P) { self.constraint_acc *= self.alpha; diff --git a/starky/src/prover.rs b/starky/src/prover.rs index bda478e5..ad86143b 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -1,10 +1,12 @@ use itertools::Itertools; use plonky2::field::extension_field::Extendable; -use plonky2::field::polynomial::PolynomialValues; +use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; +use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; use plonky2::fri::oracle::PolynomialBatch; use plonky2::fri::prover::fri_proof; use plonky2::hash::hash_types::RichField; use plonky2::iop::challenger::Challenger; +use plonky2::plonk::circuit_data::CommonCircuitData; use plonky2::plonk::config::GenericConfig; use plonky2::timed; use plonky2::util::timing::TimingTree; @@ -13,8 +15,10 @@ use plonky2_util::log2_strict; use rayon::prelude::*; use crate::config::StarkConfig; +use crate::constraint_consumer::ConstraintConsumer; use crate::proof::StarkProof; use crate::stark::Stark; +use crate::vars::StarkEvaluationVars; pub fn prove( stark: S, @@ -27,6 +31,7 @@ where C: GenericConfig, S: Stark, [(); S::COLUMNS]:, + [(); S::PUBLIC_INPUTS]:, { let degree_bits = log2_strict(trace.len()); @@ -57,13 +62,23 @@ where ) ); - let trace_cap = trace_commitment.merkle_tree.cap; + let trace_cap = trace_commitment.merkle_tree.cap.clone(); + let mut challenger = Challenger::new(); + challenger.observe_cap(&trace_cap); + + let alphas = challenger.get_n_challenges(config.num_challenges); + let quotient = compute_quotient_polys::( + &stark, + &trace_commitment, + &alphas, + degree_bits, + rate_bits, + ); let openings = todo!(); let initial_merkle_trees = todo!(); let lde_polynomial_coeffs = todo!(); let lde_polynomial_values = todo!(); - let mut challenger = Challenger::new(); let fri_params = config.fri_params(degree_bits); let opening_proof = fri_proof::( @@ -81,3 +96,69 @@ where opening_proof, } } + +fn compute_quotient_polys( + stark: &S, + trace_commitment: &PolynomialBatch, + alphas: &[F], + degree_bits: usize, + rate_bits: usize, +) -> Vec> +where + F: RichField + Extendable, + C: GenericConfig, + S: Stark, + [(); S::COLUMNS]:, + [(); S::PUBLIC_INPUTS]:, +{ + let degree = 1 << degree_bits; + let points = F::two_adic_subgroup(degree_bits + rate_bits); + + let lagrange_first = { + let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); + evals.values[0] = F::ONE; + evals.lde(rate_bits) + }; + let lagrange_last = { + let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); + evals.values[degree - 1] = F::ONE; + evals.lde(rate_bits) + }; + + let z_h_on_coset = ZeroPolyOnCoset::new(degree_bits, rate_bits); + + alphas + .iter() + .map(|&alpha| { + let quotient_evals = PolynomialValues::new( + (0..degree << rate_bits) + .into_par_iter() + .map(|i| { + let mut consumer = ConstraintConsumer::::new( + alpha, + lagrange_first.values[i], + lagrange_last.values[i], + ); + let vars = + StarkEvaluationVars:: { + local_values: trace_commitment + .get_lde_values(i) + .try_into() + .unwrap(), + next_values: trace_commitment + .get_lde_values((i + 1) % (degree << rate_bits)) + .try_into() + .unwrap(), + public_inputs: &[F::ZERO; S::PUBLIC_INPUTS], + }; + stark.eval_packed_base(vars, &mut consumer); + let constraints_eval = consumer.accumulator(); + let denominator_inv = z_h_on_coset.eval_inverse(i); + constraints_eval * denominator_inv + }) + .collect(), + ); + quotient_evals.coset_ifft(F::coset_shift()) + }) + .collect() +} diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 8d6abb69..6be5be2c 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -8,7 +8,7 @@ use crate::vars::StarkEvaluationTargets; use crate::vars::StarkEvaluationVars; /// Represents a STARK system. -pub trait Stark, const D: usize> { +pub trait Stark, const D: usize>: Sync { /// The total number of columns in the trace. const COLUMNS: usize; /// The number of public inputs. diff --git a/util/src/lib.rs b/util/src/lib.rs index f760cfba..61677ff0 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -11,6 +11,7 @@ use std::mem::size_of; use std::ptr::{swap, swap_nonoverlapping}; mod transpose_util; + use crate::transpose_util::transpose_in_place_square; pub fn bits_u64(n: u64) -> usize { From ab48cca1f3a22270bdcf4c183d13ac61e4345872 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Wed, 26 Jan 2022 11:54:39 -0800 Subject: [PATCH 037/143] Merkle tree optimizations (#433) * Merkle tree optimizations * Replace spawn with parallel iterators Co-authored-by: Daniel Lubarov * Missing imports Co-authored-by: Daniel Lubarov --- plonky2/src/hash/hashing.rs | 2 +- plonky2/src/hash/merkle_tree.rs | 154 +++++++++++++++++++++++++------- 2 files changed, 125 insertions(+), 31 deletions(-) diff --git a/plonky2/src/hash/hashing.rs b/plonky2/src/hash/hashing.rs index 0867eaa8..997a6b12 100644 --- a/plonky2/src/hash/hashing.rs +++ b/plonky2/src/hash/hashing.rs @@ -69,7 +69,7 @@ impl, const D: usize> CircuitBuilder { } // Squeeze until we have the desired number of outputs. - let mut outputs = Vec::new(); + let mut outputs = Vec::with_capacity(num_outputs); loop { for i in 0..SPONGE_RATE { outputs.push(state[i]); diff --git a/plonky2/src/hash/merkle_tree.rs b/plonky2/src/hash/merkle_tree.rs index 8f191366..e10b5019 100644 --- a/plonky2/src/hash/merkle_tree.rs +++ b/plonky2/src/hash/merkle_tree.rs @@ -1,3 +1,7 @@ +use std::mem::MaybeUninit; +use std::slice; + +use plonky2_util::log2_strict; use rayon::prelude::*; use serde::{Deserialize, Serialize}; @@ -27,36 +31,103 @@ pub struct MerkleTree> { /// The data in the leaves of the Merkle tree. pub leaves: Vec>, - /// The layers of hashes in the tree. The first layer is the one at the bottom. - pub layers: Vec>, + /// The digests in the tree. Consists of `cap.len()` sub-trees, each corresponding to one + /// element in `cap`. Each subtree is contiguous and located at + /// `digests[digests.len() / cap.len() * i..digests.len() / cap.len() * (i + 1)]`. + /// Within each subtree, siblings are stored next to each other. The layout is, + /// left_child_subtree || left_child_digest || right_child_digest || right_child_subtree, where + /// left_child_digest and right_child_digest are H::Hash and left_child_subtree and + /// right_child_subtree recurse. Observe that the digest of a node is stored by its _parent_. + /// Consequently, the digests of the roots are not stored here (they can be found in `cap`). + pub digests: Vec, /// The Merkle cap. pub cap: MerkleCap, } +fn capacity_up_to_mut(v: &mut Vec, len: usize) -> &mut [MaybeUninit] { + assert!(v.capacity() >= len); + let v_ptr = v.as_mut_ptr().cast::>(); + unsafe { + // SAFETY: `v_ptr` is a valid pointer to a buffer of length at least `len`. Upon return, the + // lifetime will be bound to that of `v`. The underlying memory will not be deallocated as + // we hold the sole mutable reference to `v`. The contents of the slice may be + // uninitialized, but the `MaybeUninit` makes it safe. + slice::from_raw_parts_mut(v_ptr, len) + } +} + +fn fill_subtree>( + digests_buf: &mut [MaybeUninit], + leaves: &[Vec], +) -> H::Hash { + assert_eq!(leaves.len(), digests_buf.len() / 2 + 1); + if digests_buf.is_empty() { + H::hash(&leaves[0], false) + } else { + // Layout is: left recursive output || left child digest + // || right child digest || right recursive output. + // Split `digests_buf` into the two recursive outputs (slices) and two child digests + // (references). + let (left_digests_buf, right_digests_buf) = digests_buf.split_at_mut(digests_buf.len() / 2); + let (left_digest_mem, left_digests_buf) = left_digests_buf.split_last_mut().unwrap(); + let (right_digest_mem, right_digests_buf) = right_digests_buf.split_first_mut().unwrap(); + // Split `leaves` between both children. + let (left_leaves, right_leaves) = leaves.split_at(leaves.len() / 2); + let (left_digest, right_digest) = rayon::join( + || fill_subtree::(left_digests_buf, left_leaves), + || fill_subtree::(right_digests_buf, right_leaves), + ); + left_digest_mem.write(left_digest); + right_digest_mem.write(right_digest); + H::two_to_one(left_digest, right_digest) + } +} + +fn fill_digests_buf>( + digests_buf: &mut [MaybeUninit], + cap_buf: &mut [MaybeUninit], + leaves: &[Vec], + cap_height: usize, +) { + let subtree_digests_len = digests_buf.len() >> cap_height; + let subtree_leaves_len = leaves.len() >> cap_height; + let digests_chunks = digests_buf.par_chunks_exact_mut(subtree_digests_len); + let leaves_chunks = leaves.par_chunks_exact(subtree_leaves_len); + assert_eq!(digests_chunks.len(), cap_buf.len()); + assert_eq!(digests_chunks.len(), leaves_chunks.len()); + digests_chunks.zip(cap_buf).zip(leaves_chunks).for_each( + |((subtree_digests, subtree_cap), subtree_leaves)| { + // We have `1 << cap_height` sub-trees, one for each entry in `cap`. They are totally + // independent, so we schedule one task for each. `digests_buf` and `leaves` are split + // into `1 << cap_height` slices, one for each sub-tree. + subtree_cap.write(fill_subtree::(subtree_digests, subtree_leaves)); + }, + ); +} + impl> MerkleTree { pub fn new(leaves: Vec>, cap_height: usize) -> Self { - let mut current_layer = leaves - .par_iter() - .map(|l| H::hash(l, false)) - .collect::>(); + let num_digests = 2 * (leaves.len() - (1 << cap_height)); + let mut digests = Vec::with_capacity(num_digests); - let mut layers = vec![]; - let cap = loop { - if current_layer.len() == 1 << cap_height { - break current_layer; - } - let next_layer = current_layer - .par_chunks(2) - .map(|chunk| H::two_to_one(chunk[0], chunk[1])) - .collect::>(); - layers.push(current_layer); - current_layer = next_layer; - }; + let len_cap = 1 << cap_height; + let mut cap = Vec::with_capacity(len_cap); + + let digests_buf = capacity_up_to_mut(&mut digests, num_digests); + let cap_buf = capacity_up_to_mut(&mut cap, len_cap); + fill_digests_buf::(digests_buf, cap_buf, &leaves[..], cap_height); + + unsafe { + // SAFETY: `fill_digests_buf` and `cap` initialized the spare capacity up to + // `num_digests` and `len_cap`, resp. + digests.set_len(num_digests); + cap.set_len(len_cap); + } Self { leaves, - layers, + digests, cap: MerkleCap(cap), } } @@ -67,17 +138,40 @@ impl> MerkleTree { /// Create a Merkle proof from a leaf index. pub fn prove(&self, leaf_index: usize) -> MerkleProof { - MerkleProof { - siblings: self - .layers - .iter() - .scan(leaf_index, |acc, layer| { - let index = *acc ^ 1; - *acc >>= 1; - Some(layer[index]) - }) - .collect(), - } + let cap_height = log2_strict(self.cap.len()); + let num_layers = log2_strict(self.leaves.len()) - cap_height; + debug_assert_eq!(leaf_index >> (cap_height + num_layers), 0); + + let digest_tree = { + let tree_index = leaf_index >> num_layers; + let tree_len = self.digests.len() >> cap_height; + &self.digests[tree_len * tree_index..tree_len * (tree_index + 1)] + }; + + // Mask out high bits to get the index within the sub-tree. + let mut pair_index = leaf_index & ((1 << num_layers) - 1); + let siblings = (0..num_layers) + .into_iter() + .map(|i| { + let parity = pair_index & 1; + pair_index >>= 1; + + // The layers' data is interleaved as follows: + // [layer 0, layer 1, layer 0, layer 2, layer 0, layer 1, layer 0, layer 3, ...]. + // Each of the above is a pair of siblings. + // `pair_index` is the index of the pair within layer `i`. + // The index of that the pair within `digests` is + // `pair_index * 2 ** (i + 1) + (2 ** i - 1)`. + let siblings_index = (pair_index << (i + 1)) + (1 << i) - 1; + // We have an index for the _pair_, but we want the index of the _sibling_. + // Double the pair index to get the index of the left sibling. Conditionally add `1` + // if we are to retrieve the right sibling. + let sibling_index = 2 * siblings_index + (1 - parity); + digest_tree[sibling_index] + }) + .collect(); + + MerkleProof { siblings } } } From 3e0cb36063add2124e7f969628f0116d41a91d05 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 27 Jan 2022 07:56:22 +0100 Subject: [PATCH 038/143] Added test stark --- starky/src/constraint_consumer.rs | 10 +--- starky/src/julia_stark.rs | 91 +++++++++++++++++++++++++++++++ starky/src/lib.rs | 3 + 3 files changed, 97 insertions(+), 7 deletions(-) create mode 100644 starky/src/julia_stark.rs diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index 20f29192..c7a8bfea 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -17,19 +17,15 @@ pub struct ConstraintConsumer { /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated /// with the first trace row, and zero at other points in the subgroup. - lagrange_basis_first: P::Scalar, + lagrange_basis_first: P, /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated /// with the last trace row, and zero at other points in the subgroup. - lagrange_basis_last: P::Scalar, + lagrange_basis_last: P, } impl ConstraintConsumer

{ - pub fn new( - alpha: P::Scalar, - lagrange_basis_first: P::Scalar, - lagrange_basis_last: P::Scalar, - ) -> Self { + pub fn new(alpha: P::Scalar, lagrange_basis_first: P, lagrange_basis_last: P) -> Self { Self { alpha, constraint_acc: P::ZEROS, diff --git a/starky/src/julia_stark.rs b/starky/src/julia_stark.rs new file mode 100644 index 00000000..e8bca9fe --- /dev/null +++ b/starky/src/julia_stark.rs @@ -0,0 +1,91 @@ +use std::marker::PhantomData; + +use plonky2::field::extension_field::{Extendable, FieldExtension}; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::plonk::circuit_builder::CircuitBuilder; + +use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use crate::stark::Stark; +use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; + +pub struct JuliaStark, const D: usize> { + c: F, + _phantom: PhantomData, +} + +impl, const D: usize> JuliaStark { + const NUM_COLUMNS: usize = 1; + const NUM_ROWS: usize = 1 << 10; + + fn new(c: F) -> Self { + Self { + c, + _phantom: PhantomData, + } + } + + fn generate_trace(&self) -> Vec<[F; Self::NUM_COLUMNS]> { + (0..Self::NUM_ROWS) + .scan([F::ZERO; Self::NUM_COLUMNS], |acc, _| { + let tmp = *acc; + acc[0] = acc[0] * acc[0] + self.c; + Some(tmp) + }) + .collect() + } +} + +impl, const D: usize> Stark for JuliaStark { + const COLUMNS: usize = Self::NUM_COLUMNS; + const PUBLIC_INPUTS: usize = 0; + + fn eval_packed_generic( + &self, + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, + ) where + FE: FieldExtension, + P: PackedField, + { + yield_constr.one( + vars.next_values[0] + - vars.local_values[0] * vars.local_values[0] + - FE::from_basefield(self.c), + ); + } + + fn eval_ext_recursively( + &self, + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, + ) { + todo!() + } +} + +#[cfg(test)] +mod tests { + use plonky2::field::field_types::Field; + use plonky2::field::goldilocks_field::GoldilocksField; + use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; + use plonky2::util::timing::TimingTree; + + use crate::config::StarkConfig; + use crate::julia_stark::JuliaStark; + use crate::prover::prove; + + #[test] + fn test_julia_stark() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type S = JuliaStark; + + let config = StarkConfig::standard_fast_config(); + let stark = S::new(F::NEG_ONE); + let trace = stark.generate_trace(); + prove::(stark, config, trace, &mut TimingTree::default()); + } +} diff --git a/starky/src/lib.rs b/starky/src/lib.rs index be28a01e..72407511 100644 --- a/starky/src/lib.rs +++ b/starky/src/lib.rs @@ -12,3 +12,6 @@ pub mod proof; pub mod prover; pub mod stark; pub mod vars; + +#[cfg(test)] +pub mod julia_stark; From 4a2681034e4da1c5850ad3f3d4d52c870726ffee Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 27 Jan 2022 12:58:56 +0100 Subject: [PATCH 039/143] Working prover --- plonky2/src/fri/oracle.rs | 2 +- .../{julia_stark.rs => fibonacci_stark.rs} | 43 +++++------ starky/src/lib.rs | 2 +- starky/src/proof.rs | 24 +++++++ starky/src/prover.rs | 71 ++++++++++++++----- starky/src/stark.rs | 23 ++++++ 6 files changed, 127 insertions(+), 38 deletions(-) rename starky/src/{julia_stark.rs => fibonacci_stark.rs} (70%) diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index c016f0ee..0922962a 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -127,7 +127,7 @@ impl, C: GenericConfig, const D: usize> } /// Produces a batch opening proof. - pub(crate) fn prove_openings( + pub fn prove_openings( instance: &FriInstanceInfo, oracles: &[&Self], challenger: &mut Challenger, diff --git a/starky/src/julia_stark.rs b/starky/src/fibonacci_stark.rs similarity index 70% rename from starky/src/julia_stark.rs rename to starky/src/fibonacci_stark.rs index e8bca9fe..3d8d4be3 100644 --- a/starky/src/julia_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -9,34 +9,37 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer use crate::stark::Stark; use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; -pub struct JuliaStark, const D: usize> { - c: F, +pub struct FibonacciStark, const D: usize> { + x0: F, + x1: F, _phantom: PhantomData, } -impl, const D: usize> JuliaStark { - const NUM_COLUMNS: usize = 1; - const NUM_ROWS: usize = 1 << 10; +impl, const D: usize> FibonacciStark { + const NUM_COLUMNS: usize = 2; + const NUM_ROWS: usize = 1 << 5; - fn new(c: F) -> Self { + fn new(x0: F, x1: F) -> Self { Self { - c, + x0, + x1, _phantom: PhantomData, } } fn generate_trace(&self) -> Vec<[F; Self::NUM_COLUMNS]> { (0..Self::NUM_ROWS) - .scan([F::ZERO; Self::NUM_COLUMNS], |acc, _| { + .scan([self.x0, self.x1], |acc, _| { let tmp = *acc; - acc[0] = acc[0] * acc[0] + self.c; + acc[0] = tmp[1]; + acc[1] = tmp[0] + tmp[1]; Some(tmp) }) .collect() } } -impl, const D: usize> Stark for JuliaStark { +impl, const D: usize> Stark for FibonacciStark { const COLUMNS: usize = Self::NUM_COLUMNS; const PUBLIC_INPUTS: usize = 0; @@ -48,11 +51,8 @@ impl, const D: usize> Stark for JuliaStark, P: PackedField, { - yield_constr.one( - vars.next_values[0] - - vars.local_values[0] * vars.local_values[0] - - FE::from_basefield(self.c), - ); + yield_constr.one(vars.next_values[0] - vars.local_values[1]); + yield_constr.one(vars.next_values[1] - vars.local_values[0] - vars.local_values[1]); } fn eval_ext_recursively( @@ -67,25 +67,28 @@ impl, const D: usize> Stark for JuliaStark Result<()> { const D: usize = 2; type C = PoseidonGoldilocksConfig; type F = >::F; - type S = JuliaStark; + type S = FibonacciStark; let config = StarkConfig::standard_fast_config(); - let stark = S::new(F::NEG_ONE); + let stark = S::new(F::ZERO, F::ONE); let trace = stark.generate_trace(); - prove::(stark, config, trace, &mut TimingTree::default()); + prove::(stark, config, trace, &mut TimingTree::default())?; + + Ok(()) } } diff --git a/starky/src/lib.rs b/starky/src/lib.rs index 72407511..541950ab 100644 --- a/starky/src/lib.rs +++ b/starky/src/lib.rs @@ -14,4 +14,4 @@ pub mod stark; pub mod vars; #[cfg(test)] -pub mod julia_stark; +pub mod fibonacci_stark; diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 1cdbbd3c..22ebf5e2 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -1,8 +1,10 @@ use plonky2::field::extension_field::Extendable; +use plonky2::fri::oracle::PolynomialBatch; use plonky2::fri::proof::{CompressedFriProof, FriProof}; use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; use plonky2::plonk::config::GenericConfig; +use rayon::prelude::*; pub struct StarkProof, C: GenericConfig, const D: usize> { /// Merkle cap of LDEs of trace values. @@ -33,3 +35,25 @@ pub struct StarkOpeningSet, const D: usize> { pub permutation_zs: Vec, pub quotient_polys: Vec, } + +impl, const D: usize> StarkOpeningSet { + pub fn new>( + zeta: F::Extension, + g: F::Extension, + trace_commitment: &PolynomialBatch, + quotient_commitment: &PolynomialBatch, + ) -> Self { + let eval_commitment = |z: F::Extension, c: &PolynomialBatch| { + c.polynomials + .par_iter() + .map(|p| p.to_extension().eval(z)) + .collect::>() + }; + Self { + local_values: eval_commitment(zeta, trace_commitment), + next_values: eval_commitment(zeta * g, trace_commitment), + permutation_zs: vec![], + quotient_polys: eval_commitment(zeta, quotient_commitment), + } + } +} diff --git a/starky/src/prover.rs b/starky/src/prover.rs index ad86143b..fac876aa 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -1,5 +1,7 @@ +use anyhow::{ensure, Result}; use itertools::Itertools; use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; use plonky2::fri::oracle::PolynomialBatch; @@ -16,7 +18,7 @@ use rayon::prelude::*; use crate::config::StarkConfig; use crate::constraint_consumer::ConstraintConsumer; -use crate::proof::StarkProof; +use crate::proof::{StarkOpeningSet, StarkProof}; use crate::stark::Stark; use crate::vars::StarkEvaluationVars; @@ -25,7 +27,7 @@ pub fn prove( config: StarkConfig, trace: Vec<[F; S::COLUMNS]>, timing: &mut TimingTree, -) -> StarkProof +) -> Result> where F: RichField + Extendable, C: GenericConfig, @@ -33,7 +35,8 @@ where [(); S::COLUMNS]:, [(); S::PUBLIC_INPUTS]:, { - let degree_bits = log2_strict(trace.len()); + let degree = trace.len(); + let degree_bits = log2_strict(degree); let trace_vecs = trace.into_iter().map(|row| row.to_vec()).collect_vec(); let trace_col_major: Vec> = transpose(&trace_vecs); @@ -67,34 +70,70 @@ where challenger.observe_cap(&trace_cap); let alphas = challenger.get_n_challenges(config.num_challenges); - let quotient = compute_quotient_polys::( + let quotient_polys = compute_quotient_polys::( &stark, &trace_commitment, &alphas, degree_bits, rate_bits, ); - let openings = todo!(); + let all_quotient_chunks = quotient_polys + .into_par_iter() + .flat_map(|mut quotient_poly| { + quotient_poly.trim(); + quotient_poly + .pad(degree << rate_bits) + .expect("Quotient has failed, the vanishing polynomial is not divisible by `Z_H"); + // Split t into degree-n chunks. + quotient_poly.chunks(degree) + }) + .collect(); + let quotient_commitment = timed!( + timing, + "compute quotient commitment", + PolynomialBatch::from_coeffs( + all_quotient_chunks, + rate_bits, + false, + config.fri_config.cap_height, + timing, + None, + ) + ); + challenger.observe_cap("ient_commitment.merkle_tree.cap); - let initial_merkle_trees = todo!(); - let lde_polynomial_coeffs = todo!(); - let lde_polynomial_values = todo!(); + let zeta = challenger.get_extension_challenge::(); + // To avoid leaking witness data, we want to ensure that our opening locations, `zeta` and + // `g * zeta`, are not in our subgroup `H`. It suffices to check `zeta` only, since + // `(g * zeta)^n = zeta^n`, where `n` is the order of `g`. + let g = F::Extension::primitive_root_of_unity(degree_bits); + ensure!( + zeta.exp_power_of_2(degree_bits) != F::Extension::ONE, + "Opening point is in the subgroup." + ); + let openings = StarkOpeningSet::new(zeta, g, &trace_commitment, "ient_commitment); + + // TODO: Add permuation checks + let initial_merkle_trees = &[&trace_commitment, "ient_commitment]; let fri_params = config.fri_params(degree_bits); - let opening_proof = fri_proof::( - initial_merkle_trees, - lde_polynomial_coeffs, - lde_polynomial_values, - &mut challenger, - &fri_params, + let opening_proof = timed!( timing, + "compute openings proof", + PolynomialBatch::prove_openings( + &S::fri_instance(zeta, g, rate_bits), + initial_merkle_trees, + &mut challenger, + &fri_params, + timing, + ) ); - StarkProof { + Ok(StarkProof { trace_cap, openings, opening_proof, - } + }) } fn compute_quotient_polys( diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 6be5be2c..3c44f343 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -1,5 +1,6 @@ use plonky2::field::extension_field::{Extendable, FieldExtension}; use plonky2::field::packed_field::PackedField; +use plonky2::fri::structure::{FriBatchInfo, FriInstanceInfo, FriOracleInfo, FriPolynomialInfo}; use plonky2::hash::hash_types::RichField; use plonky2::plonk::circuit_builder::CircuitBuilder; @@ -59,4 +60,26 @@ pub trait Stark, const D: usize>: Sync { vars: StarkEvaluationTargets, yield_constr: &mut RecursiveConstraintConsumer, ); + + fn fri_instance( + zeta: F::Extension, + g: F::Extension, + rate_bits: usize, + ) -> FriInstanceInfo { + let no_blinding_oracle = FriOracleInfo { blinding: false }; + let trace_info = FriPolynomialInfo::from_range(0, 0..Self::COLUMNS); + let quotient_info = FriPolynomialInfo::from_range(1, 0..1 << rate_bits); + let zeta_batch = FriBatchInfo { + point: zeta, + polynomials: [trace_info.clone(), quotient_info].concat(), + }; + let zeta_right_batch = FriBatchInfo:: { + point: zeta * g, + polynomials: trace_info, + }; + FriInstanceInfo { + oracles: vec![no_blinding_oracle; 3], + batches: vec![zeta_batch], + } + } } From 1770e83c632214f9d8bd439ecc24b9930a24a17e Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 27 Jan 2022 13:02:36 +0100 Subject: [PATCH 040/143] Clippy --- starky/src/constraint_consumer.rs | 1 - starky/src/fibonacci_stark.rs | 1 - starky/src/prover.rs | 2 -- system_zero/src/system_zero.rs | 2 +- 4 files changed, 1 insertion(+), 5 deletions(-) diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index c7a8bfea..bc76f03b 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -1,7 +1,6 @@ use std::marker::PhantomData; use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index 3d8d4be3..e6caa1e6 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -69,7 +69,6 @@ impl, const D: usize> Stark for FibonacciStar mod tests { use anyhow::Result; use plonky2::field::field_types::Field; - use plonky2::field::goldilocks_field::GoldilocksField; use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; use plonky2::util::timing::TimingTree; diff --git a/starky/src/prover.rs b/starky/src/prover.rs index fac876aa..29d9f2e4 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -5,10 +5,8 @@ use plonky2::field::field_types::Field; use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; use plonky2::fri::oracle::PolynomialBatch; -use plonky2::fri::prover::fri_proof; use plonky2::hash::hash_types::RichField; use plonky2::iop::challenger::Challenger; -use plonky2::plonk::circuit_data::CommonCircuitData; use plonky2::plonk::config::GenericConfig; use plonky2::timed; use plonky2::util::timing::TimingTree; diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index a16fb699..47950eb2 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -104,6 +104,6 @@ mod tests { let config = StarkConfig::standard_fast_config(); let mut timing = TimingTree::new("prove", Level::Debug); let trace = system.generate_trace(); - prove::(system, config, trace, &mut timing); + prove::(system, config, trace, &mut timing).unwrap(); } } From b6cb72b629867209a09535c0c4274032422926e0 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 27 Jan 2022 13:27:06 +0100 Subject: [PATCH 041/143] Comments --- starky/src/fibonacci_stark.rs | 5 +++++ starky/src/proof.rs | 2 +- starky/src/prover.rs | 29 ++++++++++++++++++++--------- starky/src/stark.rs | 2 ++ 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index e6caa1e6..5e41f18e 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -9,6 +9,9 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer use crate::stark::Stark; use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; +/// Toy STARK system used for testing. +/// Computes a Fibonacci sequence with inital values `x0, x1` using the transition +/// `x0 <- x1, x1 <- x0 + x1`. pub struct FibonacciStark, const D: usize> { x0: F, x1: F, @@ -51,7 +54,9 @@ impl, const D: usize> Stark for FibonacciStar FE: FieldExtension, P: PackedField, { + // x0 <- x1 yield_constr.one(vars.next_values[0] - vars.local_values[1]); + // x1 <- x0 + x1 yield_constr.one(vars.next_values[1] - vars.local_values[0] - vars.local_values[1]); } diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 22ebf5e2..4218e71f 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -52,7 +52,7 @@ impl, const D: usize> StarkOpeningSet { Self { local_values: eval_commitment(zeta, trace_commitment), next_values: eval_commitment(zeta * g, trace_commitment), - permutation_zs: vec![], + permutation_zs: vec![/*TODO*/], quotient_polys: eval_commitment(zeta, quotient_commitment), } } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 29d9f2e4..5af22871 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -20,6 +20,7 @@ use crate::proof::{StarkOpeningSet, StarkProof}; use crate::stark::Stark; use crate::vars::StarkEvaluationVars; +// TODO: Deal with public inputs. pub fn prove( stark: S, config: StarkConfig, @@ -82,7 +83,7 @@ where quotient_poly .pad(degree << rate_bits) .expect("Quotient has failed, the vanishing polynomial is not divisible by `Z_H"); - // Split t into degree-n chunks. + // Split quotient into degree-n chunks. quotient_poly.chunks(degree) }) .collect(); @@ -134,6 +135,10 @@ where }) } +/// Computes the quotient polynomials `(sum alpha^i C_i(x)) / Z_H(x)` for `alpha` in `alphas`, +/// where the `C_i`s are the Stark constraints. +// TODO: This won't work for the Fibonacci example because the constraints wrap around the subgroup. +// The denominator should be the vanishing polynomial of `H` without its last element. fn compute_quotient_polys( stark: &S, trace_commitment: &PolynomialBatch, @@ -151,11 +156,13 @@ where let degree = 1 << degree_bits; let points = F::two_adic_subgroup(degree_bits + rate_bits); + // Evaluation of the first Lagrange polynomial on the LDE domain. let lagrange_first = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[0] = F::ONE; evals.lde(rate_bits) }; + // Evaluation of the last Lagrange polynomial on the LDE domain. let lagrange_last = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[degree - 1] = F::ONE; @@ -164,6 +171,11 @@ where let z_h_on_coset = ZeroPolyOnCoset::new(degree_bits, rate_bits); + // Retrieve the LDE values at index `i`. + let get_at_index = |comm: &PolynomialBatch, i: usize| -> [F; S::COLUMNS] { + comm.get_lde_values(i).try_into().unwrap() + }; + alphas .iter() .map(|&alpha| { @@ -171,6 +183,7 @@ where (0..degree << rate_bits) .into_par_iter() .map(|i| { + // TODO: Set `P` to a genuine `PackedField` here. let mut consumer = ConstraintConsumer::::new( alpha, lagrange_first.values[i], @@ -178,17 +191,15 @@ where ); let vars = StarkEvaluationVars:: { - local_values: trace_commitment - .get_lde_values(i) - .try_into() - .unwrap(), - next_values: trace_commitment - .get_lde_values((i + 1) % (degree << rate_bits)) - .try_into() - .unwrap(), + local_values: &get_at_index(trace_commitment, i), + next_values: &get_at_index( + trace_commitment, + (i + 1) % (degree << rate_bits), + ), public_inputs: &[F::ZERO; S::PUBLIC_INPUTS], }; stark.eval_packed_base(vars, &mut consumer); + // TODO: Fix this once we a genuine `PackedField`. let constraints_eval = consumer.accumulator(); let denominator_inv = z_h_on_coset.eval_inverse(i); constraints_eval * denominator_inv diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 3c44f343..f91d4fdd 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -61,6 +61,8 @@ pub trait Stark, const D: usize>: Sync { yield_constr: &mut RecursiveConstraintConsumer, ); + /// Computes the FRI instance used to prove this Stark. + // TODO: Permutation polynomials. fn fri_instance( zeta: F::Extension, g: F::Extension, From a74ffdb898554b657d48402d3b564bf4f5382b71 Mon Sep 17 00:00:00 2001 From: Hamish Ivey-Law <426294+unzvfu@users.noreply.github.com> Date: Fri, 28 Jan 2022 09:08:27 +1100 Subject: [PATCH 042/143] Remove GMiMC and Rescue hash functions (#450) * Remove GMiMC and Rescue hash functions. * rustfmt --- plonky2/benches/hashing.rs | 12 - plonky2/src/bin/generate_constants.rs | 1 - plonky2/src/gates/gate_tree.rs | 6 +- plonky2/src/gates/gmimc.rs | 433 ---------------------- plonky2/src/gates/mod.rs | 1 - plonky2/src/hash/gmimc.rs | 168 --------- plonky2/src/hash/hash_types.rs | 3 +- plonky2/src/hash/mod.rs | 2 - plonky2/src/hash/rescue.rs | 457 ------------------------ plonky2/src/plonk/config.rs | 11 - plonky2/src/plonk/recursive_verifier.rs | 15 +- 11 files changed, 6 insertions(+), 1103 deletions(-) delete mode 100644 plonky2/src/gates/gmimc.rs delete mode 100644 plonky2/src/hash/gmimc.rs delete mode 100644 plonky2/src/hash/rescue.rs diff --git a/plonky2/benches/hashing.rs b/plonky2/benches/hashing.rs index 2d17dba1..a968d957 100644 --- a/plonky2/benches/hashing.rs +++ b/plonky2/benches/hashing.rs @@ -3,7 +3,6 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use plonky2::field::goldilocks_field::GoldilocksField; -use plonky2::hash::gmimc::GMiMC; use plonky2::hash::hash_types::{BytesHash, RichField}; use plonky2::hash::hashing::SPONGE_WIDTH; use plonky2::hash::keccak::KeccakHash; @@ -11,16 +10,6 @@ use plonky2::hash::poseidon::Poseidon; use plonky2::plonk::config::Hasher; use tynm::type_name; -pub(crate) fn bench_gmimc, const WIDTH: usize>(c: &mut Criterion) { - c.bench_function(&format!("gmimc<{}, {}>", type_name::(), WIDTH), |b| { - b.iter_batched( - || F::rand_arr::(), - |state| F::gmimc_permute(state), - BatchSize::SmallInput, - ) - }); -} - pub(crate) fn bench_keccak(c: &mut Criterion) { c.bench_function("keccak256", |b| { b.iter_batched( @@ -45,7 +34,6 @@ pub(crate) fn bench_poseidon(c: &mut Criterion) { } fn criterion_benchmark(c: &mut Criterion) { - bench_gmimc::(c); bench_poseidon::(c); bench_keccak::(c); } diff --git a/plonky2/src/bin/generate_constants.rs b/plonky2/src/bin/generate_constants.rs index eb35aec3..d9757aff 100644 --- a/plonky2/src/bin/generate_constants.rs +++ b/plonky2/src/bin/generate_constants.rs @@ -11,7 +11,6 @@ use rand_chacha::ChaCha8Rng; // range of GoldilocksField, then verify that each constant also fits in GoldilocksField. const SAMPLE_RANGE_END: u64 = 0xffffffff70000001; -// const N: usize = 101; // For GMiMC // const N: usize = 8 * 30; // For Posiedon-8 const N: usize = 12 * 30; // For Posiedon-12 diff --git a/plonky2/src/gates/gate_tree.rs b/plonky2/src/gates/gate_tree.rs index 029616a8..2f670337 100644 --- a/plonky2/src/gates/gate_tree.rs +++ b/plonky2/src/gates/gate_tree.rs @@ -228,9 +228,9 @@ mod tests { use crate::gates::arithmetic_extension::ArithmeticExtensionGate; use crate::gates::base_sum::BaseSumGate; use crate::gates::constant::ConstantGate; - use crate::gates::gmimc::GMiMCGate; use crate::gates::interpolation::HighDegreeInterpolationGate; use crate::gates::noop::NoopGate; + use crate::gates::poseidon::PoseidonGate; use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; #[test] @@ -245,7 +245,7 @@ mod tests { GateRef::new(ConstantGate { num_consts: 4 }), GateRef::new(ArithmeticExtensionGate { num_ops: 4 }), GateRef::new(BaseSumGate::<4>::new(4)), - GateRef::new(GMiMCGate::::new()), + GateRef::new(PoseidonGate::::new()), GateRef::new(HighDegreeInterpolationGate::new(2)), ]; @@ -276,7 +276,7 @@ mod tests { assert!( gates_with_prefix .iter() - .all(|(g, p)| g.0.degree() + g.0.num_constants() + p.len() <= 8), + .all(|(g, p)| g.0.degree() + g.0.num_constants() + p.len() <= 9), "Total degree is larger than 8." ); diff --git a/plonky2/src/gates/gmimc.rs b/plonky2/src/gates/gmimc.rs deleted file mode 100644 index fdfb8673..00000000 --- a/plonky2/src/gates/gmimc.rs +++ /dev/null @@ -1,433 +0,0 @@ -use std::marker::PhantomData; - -use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::Field; -use plonky2_field::packed_field::PackedField; - -use crate::gates::gate::Gate; -use crate::gates::packed_util::PackedEvaluableBase; -use crate::gates::util::StridedConstraintConsumer; -use crate::hash::gmimc; -use crate::hash::gmimc::GMiMC; -use crate::hash::hash_types::RichField; -use crate::iop::ext_target::ExtensionTarget; -use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; -use crate::iop::target::Target; -use crate::iop::wire::Wire; -use crate::iop::witness::{PartitionWitness, Witness}; -use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::vars::{ - EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch, - EvaluationVarsBasePacked, -}; - -/// Evaluates a full GMiMC permutation with 12 state elements. -/// -/// This also has some extra features to make it suitable for efficiently verifying Merkle proofs. -/// It has a flag which can be used to swap the first four inputs with the next four, for ordering -/// sibling digests. -#[derive(Debug)] -pub struct GMiMCGate< - F: RichField + Extendable + GMiMC, - const D: usize, - const WIDTH: usize, -> { - _phantom: PhantomData, -} - -impl + GMiMC, const D: usize, const WIDTH: usize> - GMiMCGate -{ - pub fn new() -> Self { - GMiMCGate { - _phantom: PhantomData, - } - } - - /// The wire index for the `i`th input to the permutation. - pub fn wire_input(i: usize) -> usize { - i - } - - /// The wire index for the `i`th output to the permutation. - pub fn wire_output(i: usize) -> usize { - WIDTH + i - } - - /// If this is set to 1, the first four inputs will be swapped with the next four inputs. This - /// is useful for ordering hashes in Merkle proofs. Otherwise, this should be set to 0. - pub const WIRE_SWAP: usize = 2 * WIDTH; - - /// A wire which stores the input to the `i`th cubing. - fn wire_cubing_input(i: usize) -> usize { - 2 * WIDTH + 1 + i - } - - /// End of wire indices, exclusive. - fn end() -> usize { - 2 * WIDTH + 1 + gmimc::NUM_ROUNDS - } -} - -impl + GMiMC, const D: usize, const WIDTH: usize> Gate - for GMiMCGate -{ - fn id(&self) -> String { - format!(" {:?}", WIDTH, self) - } - - fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { - let mut constraints = Vec::with_capacity(self.num_constraints()); - - // Assert that `swap` is binary. - let swap = vars.local_wires[Self::WIRE_SWAP]; - constraints.push(swap * (swap - F::Extension::ONE)); - - let mut state = Vec::with_capacity(12); - for i in 0..4 { - let a = vars.local_wires[i]; - let b = vars.local_wires[i + 4]; - state.push(a + swap * (b - a)); - } - for i in 0..4 { - let a = vars.local_wires[i + 4]; - let b = vars.local_wires[i]; - state.push(a + swap * (b - a)); - } - for i in 8..12 { - state.push(vars.local_wires[i]); - } - - // Value that is implicitly added to each element. - // See https://affine.group/2020/02/starkware-challenge - let mut addition_buffer = F::Extension::ZERO; - - for r in 0..gmimc::NUM_ROUNDS { - let active = r % WIDTH; - let constant = F::from_canonical_u64(>::ROUND_CONSTANTS[r]); - let cubing_input = state[active] + addition_buffer + constant.into(); - let cubing_input_wire = vars.local_wires[Self::wire_cubing_input(r)]; - constraints.push(cubing_input - cubing_input_wire); - let f = cubing_input_wire.cube(); - addition_buffer += f; - state[active] -= f; - } - - for i in 0..WIDTH { - state[i] += addition_buffer; - constraints.push(state[i] - vars.local_wires[Self::wire_output(i)]); - } - - constraints - } - - fn eval_unfiltered_base_one( - &self, - _vars: EvaluationVarsBase, - _yield_constr: StridedConstraintConsumer, - ) { - panic!("use eval_unfiltered_base_packed instead"); - } - - fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch) -> Vec { - self.eval_unfiltered_base_batch_packed(vars_base) - } - - fn eval_unfiltered_recursively( - &self, - builder: &mut CircuitBuilder, - vars: EvaluationTargets, - ) -> Vec> { - let mut constraints = Vec::with_capacity(self.num_constraints()); - - let swap = vars.local_wires[Self::WIRE_SWAP]; - constraints.push(builder.mul_sub_extension(swap, swap, swap)); - - let mut state = Vec::with_capacity(12); - for i in 0..4 { - let a = vars.local_wires[i]; - let b = vars.local_wires[i + 4]; - let delta = builder.sub_extension(b, a); - state.push(builder.mul_add_extension(swap, delta, a)); - } - for i in 0..4 { - let a = vars.local_wires[i + 4]; - let b = vars.local_wires[i]; - let delta = builder.sub_extension(b, a); - state.push(builder.mul_add_extension(swap, delta, a)); - } - for i in 8..12 { - state.push(vars.local_wires[i]); - } - - // Value that is implicitly added to each element. - // See https://affine.group/2020/02/starkware-challenge - let mut addition_buffer = builder.zero_extension(); - - for r in 0..gmimc::NUM_ROUNDS { - let active = r % WIDTH; - - let constant = F::from_canonical_u64(>::ROUND_CONSTANTS[r]); - let constant = builder.constant_extension(constant.into()); - let cubing_input = - builder.add_many_extension(&[state[active], addition_buffer, constant]); - let cubing_input_wire = vars.local_wires[Self::wire_cubing_input(r)]; - constraints.push(builder.sub_extension(cubing_input, cubing_input_wire)); - let f = builder.cube_extension(cubing_input_wire); - addition_buffer = builder.add_extension(addition_buffer, f); - state[active] = builder.sub_extension(state[active], f); - } - - for i in 0..WIDTH { - state[i] = builder.add_extension(state[i], addition_buffer); - constraints - .push(builder.sub_extension(state[i], vars.local_wires[Self::wire_output(i)])); - } - - constraints - } - - fn generators( - &self, - gate_index: usize, - _local_constants: &[F], - ) -> Vec>> { - let gen = GMiMCGenerator:: { - gate_index, - _phantom: PhantomData, - }; - vec![Box::new(gen.adapter())] - } - - fn num_wires(&self) -> usize { - Self::end() - } - - fn num_constants(&self) -> usize { - 0 - } - - fn degree(&self) -> usize { - 3 - } - - fn num_constraints(&self) -> usize { - gmimc::NUM_ROUNDS + WIDTH + 1 - } -} - -impl + GMiMC, const D: usize, const WIDTH: usize> - PackedEvaluableBase for GMiMCGate -{ - fn eval_unfiltered_base_packed>( - &self, - vars: EvaluationVarsBasePacked

, - mut yield_constr: StridedConstraintConsumer

, - ) { - // Assert that `swap` is binary. - let swap = vars.local_wires[Self::WIRE_SWAP]; - yield_constr.one(swap * (swap - F::ONE)); - - let mut state = Vec::with_capacity(12); - for i in 0..4 { - let a = vars.local_wires[i]; - let b = vars.local_wires[i + 4]; - state.push(a + swap * (b - a)); - } - for i in 0..4 { - let a = vars.local_wires[i + 4]; - let b = vars.local_wires[i]; - state.push(a + swap * (b - a)); - } - for i in 8..12 { - state.push(vars.local_wires[i]); - } - - // Value that is implicitly added to each element. - // See https://affine.group/2020/02/starkware-challenge - let mut addition_buffer = P::ZEROS; - - for r in 0..gmimc::NUM_ROUNDS { - let active = r % WIDTH; - let constant = F::from_canonical_u64(>::ROUND_CONSTANTS[r]); - let cubing_input = state[active] + addition_buffer + constant; - let cubing_input_wire = vars.local_wires[Self::wire_cubing_input(r)]; - yield_constr.one(cubing_input - cubing_input_wire); - let f = cubing_input_wire.square() * cubing_input_wire; - addition_buffer += f; - state[active] -= f; - } - - for i in 0..WIDTH { - state[i] += addition_buffer; - yield_constr.one(state[i] - vars.local_wires[Self::wire_output(i)]); - } - } -} - -#[derive(Debug)] -struct GMiMCGenerator< - F: RichField + Extendable + GMiMC, - const D: usize, - const WIDTH: usize, -> { - gate_index: usize, - _phantom: PhantomData, -} - -impl + GMiMC, const D: usize, const WIDTH: usize> - SimpleGenerator for GMiMCGenerator -{ - fn dependencies(&self) -> Vec { - let mut dep_input_indices = Vec::with_capacity(WIDTH + 1); - for i in 0..WIDTH { - dep_input_indices.push(GMiMCGate::::wire_input(i)); - } - dep_input_indices.push(GMiMCGate::::WIRE_SWAP); - - dep_input_indices - .into_iter() - .map(|input| { - Target::Wire(Wire { - gate: self.gate_index, - input, - }) - }) - .collect() - } - - fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { - let mut state = (0..WIDTH) - .map(|i| { - witness.get_wire(Wire { - gate: self.gate_index, - input: GMiMCGate::::wire_input(i), - }) - }) - .collect::>(); - - let swap_value = witness.get_wire(Wire { - gate: self.gate_index, - input: GMiMCGate::::WIRE_SWAP, - }); - debug_assert!(swap_value == F::ZERO || swap_value == F::ONE); - if swap_value == F::ONE { - for i in 0..4 { - state.swap(i, 4 + i); - } - } - - // Value that is implicitly added to each element. - // See https://affine.group/2020/02/starkware-challenge - let mut addition_buffer = F::ZERO; - - for r in 0..gmimc::NUM_ROUNDS { - let active = r % WIDTH; - let constant = F::from_canonical_u64(>::ROUND_CONSTANTS[r]); - let cubing_input = state[active] + addition_buffer + constant; - out_buffer.set_wire( - Wire { - gate: self.gate_index, - input: GMiMCGate::::wire_cubing_input(r), - }, - cubing_input, - ); - let f = cubing_input.cube(); - addition_buffer += f; - state[active] -= f; - } - - for i in 0..WIDTH { - state[i] += addition_buffer; - out_buffer.set_wire( - Wire { - gate: self.gate_index, - input: GMiMCGate::::wire_output(i), - }, - state[i], - ); - } - } -} - -#[cfg(test)] -mod tests { - use anyhow::Result; - use plonky2_field::field_types::Field; - use plonky2_field::goldilocks_field::GoldilocksField; - - use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; - use crate::gates::gmimc::GMiMCGate; - use crate::hash::gmimc::GMiMC; - use crate::iop::generator::generate_partial_witness; - use crate::iop::wire::Wire; - use crate::iop::witness::{PartialWitness, Witness}; - use crate::plonk::circuit_builder::CircuitBuilder; - use crate::plonk::circuit_data::CircuitConfig; - use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; - - #[test] - fn generated_output() { - const D: usize = 2; - type C = PoseidonGoldilocksConfig; - type F = >::F; - const WIDTH: usize = 12; - - let config = CircuitConfig::standard_recursion_config(); - let mut builder = CircuitBuilder::new(config); - type Gate = GMiMCGate; - let gate = Gate::new(); - let gate_index = builder.add_gate(gate, vec![]); - let circuit = builder.build_prover::(); - - let permutation_inputs = (0..WIDTH).map(F::from_canonical_usize).collect::>(); - - let mut inputs = PartialWitness::new(); - inputs.set_wire( - Wire { - gate: gate_index, - input: Gate::WIRE_SWAP, - }, - F::ZERO, - ); - for i in 0..WIDTH { - inputs.set_wire( - Wire { - gate: gate_index, - input: Gate::wire_input(i), - }, - permutation_inputs[i], - ); - } - - let witness = generate_partial_witness(inputs, &circuit.prover_only, &circuit.common); - - let expected_outputs: [F; WIDTH] = - F::gmimc_permute_naive(permutation_inputs.try_into().unwrap()); - for i in 0..WIDTH { - let out = witness.get_wire(Wire { - gate: 0, - input: Gate::wire_output(i), - }); - assert_eq!(out, expected_outputs[i]); - } - } - - #[test] - fn low_degree() { - type F = GoldilocksField; - const WIDTH: usize = 12; - let gate = GMiMCGate::::new(); - test_low_degree(gate) - } - - #[test] - fn eval_fns() -> Result<()> { - const D: usize = 2; - type C = PoseidonGoldilocksConfig; - type F = >::F; - const WIDTH: usize = 12; - let gate = GMiMCGate::::new(); - test_eval_fns::(gate) - } -} diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index aae46dc5..a3f92615 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -11,7 +11,6 @@ pub mod constant; pub mod exponentiation; pub mod gate; pub mod gate_tree; -pub mod gmimc; pub mod interpolation; pub mod low_degree_interpolation; pub mod multiplication_extension; diff --git a/plonky2/src/hash/gmimc.rs b/plonky2/src/hash/gmimc.rs deleted file mode 100644 index 050bdeec..00000000 --- a/plonky2/src/hash/gmimc.rs +++ /dev/null @@ -1,168 +0,0 @@ -use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::Field; -use plonky2_field::goldilocks_field::GoldilocksField; -use unroll::unroll_for_loops; - -use crate::gates::gmimc::GMiMCGate; -use crate::hash::hash_types::{HashOut, RichField}; -use crate::hash::hashing::{compress, hash_n_to_hash, PlonkyPermutation, SPONGE_WIDTH}; -use crate::iop::target::{BoolTarget, Target}; -use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::config::{AlgebraicHasher, Hasher}; - -pub(crate) const NUM_ROUNDS: usize = 101; - -pub trait GMiMC: Field -where - [u64; NUM_ROUNDS]: Sized, -{ - const ROUND_CONSTANTS: [u64; NUM_ROUNDS]; - - #[unroll_for_loops] - fn gmimc_permute(mut xs: [Self; WIDTH]) -> [Self; WIDTH] { - // Value that is implicitly added to each element. - // See https://affine.group/2020/02/starkware-challenge - let mut addition_buffer = Self::ZERO; - - for (r, &constant) in Self::ROUND_CONSTANTS.iter().enumerate() { - let active = r % WIDTH; - let f = (xs[active] + addition_buffer + Self::from_canonical_u64(constant)).cube(); - addition_buffer += f; - xs[active] -= f; - } - - for i in 0..WIDTH { - xs[i] += addition_buffer; - } - - xs - } - - #[unroll_for_loops] - fn gmimc_permute_naive(mut xs: [Self; WIDTH]) -> [Self; WIDTH] { - for (r, &constant) in Self::ROUND_CONSTANTS.iter().enumerate() { - let active = r % WIDTH; - let f = (xs[active] + Self::from_canonical_u64(constant)).cube(); - for i in 0..WIDTH { - if i != active { - xs[i] += f; - } - } - } - - xs - } -} - -/// See `generate_constants` about how these were generated. -#[rustfmt::skip] -const GOLDILOCKS_ROUND_CONSTANTS: [u64; NUM_ROUNDS] = [ - 0xb585f767417ee042, 0x7746a55f77c10331, 0xb2fb0d321d356f7a, 0x0f6760a486f1621f, - 0xe10d6666b36abcdf, 0x8cae14cb455cc50b, 0xd438539cf2cee334, 0xef781c7d4c1fd8b4, - 0xcdc4a23a0aca4b1f, 0x277fa208d07b52e3, 0xe17653a300493d38, 0xc54302f27c287dc1, - 0x8628782231d47d10, 0x59cd1a8a690b49f2, 0xc3b919ad9efec0b0, 0xa484c4c637641d97, - 0x308bbd23f191398b, 0x6e4a40c1bf713cf1, 0x9a2eedb7510414fb, 0xe360c6e111c2c63b, - 0xd5c771901d4d89aa, 0xc35eae076e7d6b2f, 0x849c2656d0a09cad, 0xc0572c8c5cf1df2b, - 0xe9fa634a883b8bf3, 0xf56f6d4900fb1fdd, 0xf7d713e872a72a1b, 0x8297132b6ba47612, - 0xad6805e12ee8af1c, 0xac51d9f6485c22b9, 0x502ad7dc3bd56bf8, 0x57a1550c3761c577, - 0x66bbd30e99d311da, 0x0da2abef5e948f87, 0xf0612750443f8e94, 0x28b8ec3afb937d8c, - 0x92a756e6be54ca18, 0x70e741ec304e925d, 0x019d5ee2b037c59f, 0x6f6f2ed7a30707d1, - 0x7cf416d01e8c169c, 0x61df517bb17617df, 0x85dc499b4c67dbaa, 0x4b959b48dad27b23, - 0xe8be3e5e0dd779a0, 0xf5c0bc1e525ed8e6, 0x40b12cbf263cf853, 0xa637093f13e2ea3c, - 0x3cc3f89232e3b0c8, 0x2e479dc16bfe86c0, 0x6f49de07d6d39469, 0x213ce7beecc232de, - 0x5b043134851fc00a, 0xa2de45784a861506, 0x7103aaf97bed8dd5, 0x5326fc0dbb88a147, - 0xa9ceb750364cb77a, 0x27f8ec88cc9e991f, 0xfceb4fda8c93fb83, 0xfac6ff13b45b260e, - 0x7131aa455813380b, 0x93510360d5d68119, 0xad535b24fb96e3db, 0x4627f5c6b7efc045, - 0x645cf794e4da78a9, 0x241c70ed1ac2877f, 0xacb8e076b009e825, 0x3737e9db6477bd9d, - 0xe7ea5e344cd688ed, 0x90dee4a009214640, 0xd1b1edf7c77e74af, 0x0b65481bab42158e, - 0x99ad1aab4b4fe3e7, 0x438a7c91f1a360cd, 0xb60de3bd159088bf, 0xc99cab6b47a3e3bb, - 0x69a5ed92d5677cef, 0x5e7b329c482a9396, 0x5fc0ac0829f893c9, 0x32db82924fb757ea, - 0x0ade699c5cf24145, 0x7cc5583b46d7b5bb, 0x85df9ed31bf8abcb, 0x6604df501ad4de64, - 0xeb84f60941611aec, 0xda60883523989bd4, 0x8f97fe40bf3470bf, 0xa93f485ce0ff2b32, - 0x6704e8eebc2afb4b, 0xcee3e9ac788ad755, 0x510d0e66062a270d, 0xf6323f48d74634a0, - 0x0b508cdf04990c90, 0xf241708a4ef7ddf9, 0x60e75c28bb368f82, 0xa6217d8c3f0f9989, - 0x7159cd30f5435b53, 0x839b4e8fe97ec79f, 0x0d3f3e5e885db625, 0x8f7d83be1daea54b, - 0x780f22441e8dbc04, -]; - -impl GMiMC<8> for GoldilocksField { - const ROUND_CONSTANTS: [u64; NUM_ROUNDS] = GOLDILOCKS_ROUND_CONSTANTS; -} - -impl GMiMC<12> for GoldilocksField { - const ROUND_CONSTANTS: [u64; NUM_ROUNDS] = GOLDILOCKS_ROUND_CONSTANTS; -} - -pub struct GMiMCPermutation; -impl PlonkyPermutation for GMiMCPermutation { - fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH] { - F::gmimc_permute(input) - } -} - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct GMiMCHash; -impl Hasher for GMiMCHash { - const HASH_SIZE: usize = 4 * 8; - type Hash = HashOut; - type Permutation = GMiMCPermutation; - - fn hash(input: &[F], pad: bool) -> Self::Hash { - hash_n_to_hash::(input, pad) - } - - fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash { - compress::(left, right) - } -} - -impl AlgebraicHasher for GMiMCHash { - fn permute_swapped( - inputs: [Target; SPONGE_WIDTH], - swap: BoolTarget, - builder: &mut CircuitBuilder, - ) -> [Target; SPONGE_WIDTH] - where - F: RichField + Extendable, - { - let gate_type = GMiMCGate::::new(); - let gate = builder.add_gate(gate_type, vec![]); - - let swap_wire = GMiMCGate::::WIRE_SWAP; - let swap_wire = Target::wire(gate, swap_wire); - builder.connect(swap.target, swap_wire); - - // Route input wires. - for i in 0..SPONGE_WIDTH { - let in_wire = GMiMCGate::::wire_input(i); - let in_wire = Target::wire(gate, in_wire); - builder.connect(inputs[i], in_wire); - } - - // Collect output wires. - (0..SPONGE_WIDTH) - .map(|i| Target::wire(gate, GMiMCGate::::wire_output(i))) - .collect::>() - .try_into() - .unwrap() - } -} - -#[cfg(test)] -mod tests { - use plonky2_field::goldilocks_field::GoldilocksField; - - use crate::hash::gmimc::GMiMC; - - fn check_consistency, const WIDTH: usize>() { - let xs = F::rand_arr::(); - let out = F::gmimc_permute(xs); - let out_naive = F::gmimc_permute_naive(xs); - assert_eq!(out, out_naive); - } - - #[test] - fn consistency() { - check_consistency::(); - } -} diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index 674486c8..51a93fdc 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -3,13 +3,12 @@ use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::hash::gmimc::GMiMC; use crate::hash::poseidon::Poseidon; use crate::iop::target::Target; use crate::plonk::config::GenericHashOut; /// A prime order field with the features we need to use it as a base field in our argument system. -pub trait RichField: PrimeField + GMiMC<12> + Poseidon {} +pub trait RichField: PrimeField + Poseidon {} impl RichField for GoldilocksField {} diff --git a/plonky2/src/hash/mod.rs b/plonky2/src/hash/mod.rs index 5a8ccb3f..b8293920 100644 --- a/plonky2/src/hash/mod.rs +++ b/plonky2/src/hash/mod.rs @@ -1,5 +1,4 @@ mod arch; -pub mod gmimc; pub mod hash_types; pub mod hashing; pub mod keccak; @@ -8,4 +7,3 @@ pub mod merkle_tree; pub mod path_compression; pub mod poseidon; pub mod poseidon_goldilocks; -pub mod rescue; diff --git a/plonky2/src/hash/rescue.rs b/plonky2/src/hash/rescue.rs deleted file mode 100644 index 59e9d265..00000000 --- a/plonky2/src/hash/rescue.rs +++ /dev/null @@ -1,457 +0,0 @@ -//! Implements Rescue Prime. - -use plonky2_field::field_types::Field; -use unroll::unroll_for_loops; - -const ROUNDS: usize = 8; - -const W: usize = 12; - -const MDS: [[u64; W]; W] = [ - [ - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - 3074457345215605419, - 11068046442776179508, - 13835058053470224385, - 6148914690431210838, - 9223372035646816257, - 1, - ], - [ - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - 3074457345215605419, - 11068046442776179508, - 13835058053470224385, - 6148914690431210838, - 9223372035646816257, - ], - [ - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - 3074457345215605419, - 11068046442776179508, - 13835058053470224385, - 6148914690431210838, - ], - [ - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - 3074457345215605419, - 11068046442776179508, - 13835058053470224385, - ], - [ - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - 3074457345215605419, - 11068046442776179508, - ], - [ - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - 3074457345215605419, - ], - [ - 1024819115071868473, - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - 2635249153041947502, - ], - [ - 9708812669101911849, - 1024819115071868473, - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - 16140901062381928449, - ], - [ - 2767011610694044877, - 9708812669101911849, - 1024819115071868473, - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - 2049638230143736946, - ], - [ - 878416384347315834, - 2767011610694044877, - 9708812669101911849, - 1024819115071868473, - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - 5534023221388089754, - ], - [ - 17608255704416649217, - 878416384347315834, - 2767011610694044877, - 9708812669101911849, - 1024819115071868473, - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - 16769767337539665921, - ], - [ - 15238614667590392076, - 17608255704416649217, - 878416384347315834, - 2767011610694044877, - 9708812669101911849, - 1024819115071868473, - 3255307777287111620, - 17293822566837780481, - 15987178195121148178, - 1317624576520973751, - 5675921252705733081, - 10760600708254618966, - ], -]; - -const RESCUE_CONSTANTS: [[u64; W]; ROUNDS * 2] = [ - [ - 12050887499329086906, - 1748247961703512657, - 315780861775001585, - 2827656358919812970, - 13335864861236723579, - 3010729529365640897, - 8463534053828271146, - 2528500966106598845, - 8969871077123422281, - 1002624930202741107, - 599979829006456404, - 4386170815218774254, - ], - [ - 5771413917591851532, - 11946802620311685142, - 4759792267858670262, - 6879094914431255667, - 3985911073214909073, - 1542850118294175816, - 5393560436452023029, - 8331250756632997735, - 3395511836281190608, - 17601255793194446503, - 12848459944475727152, - 11995465655754698601, - ], - [ - 14063960046551560130, - 14790209580166185143, - 5509023472758717841, - 1274395897760495573, - 16719545989415697758, - 17865948122414223407, - 3919263713959798649, - 5633741078654387163, - 15665612362287352054, - 3418834727998553015, - 5324019631954832682, - 17962066557010997431, - ], - [ - 3282193104189649752, - 18423507935939999211, - 9035104445528866459, - 30842260240043277, - 3896337933354935129, - 6615548113269323045, - 6625827707190475694, - 6677757329269550670, - 11419013193186889337, - 17111888851716383760, - 12075517898615128691, - 8139844272075088233, - ], - [ - 8872892112814161072, - 17529364346566228604, - 7526576514327158912, - 850359069964902700, - 9679332912197531902, - 10591229741059812071, - 12759208863825924546, - 14552519355635838750, - 16066249893409806278, - 11283035366525176262, - 1047378652379935387, - 17032498397644511356, - ], - [ - 2938626421478254042, - 10375267398354586672, - 13728514869380643947, - 16707318479225743731, - 9785828188762698567, - 8610686976269299752, - 5478372191917042178, - 12716344455538470365, - 9968276048553747246, - 14746805727771473956, - 4822070620124107028, - 9901161649549513416, - ], - [ - 13458162407040644078, - 4045792126424269312, - 9709263167782315020, - 2163173014916005515, - 17079206331095671215, - 2556388076102629669, - 6582772486087242347, - 1239959540200663058, - 18268236910639895687, - 12499012548657350745, - 17213068585339946119, - 7641451088868756688, - ], - [ - 14674555473338434116, - 14624532976317185113, - 13625541984298615970, - 7612892294159054770, - 12294028208969561574, - 6067206081581804358, - 5778082506883496792, - 7389487446513884800, - 12929525660730020877, - 18244350162788654296, - 15285920877034454694, - 3640669683987215349, - ], - [ - 6737585134029996281, - 1826890539455248546, - 289376081355380231, - 10782622161517803787, - 12978425540147835172, - 9828233103297278473, - 16384075371934678711, - 3187492301890791304, - 12985433735185968457, - 9470935291631377473, - 16328323199113140151, - 16218490552434224203, - ], - [ - 6188809977565251499, - 18437718710937437067, - 4530469469895539008, - 9596355277372723349, - 13602518824447658705, - 8759976068576854281, - 10504320064094929535, - 3980760429843656150, - 14609448298151012462, - 5839843841558860609, - 10283805260656050418, - 7239168159249274821, - ], - [ - 3604243611640027441, - 5237321927316578323, - 5071861664926666316, - 13025405632646149705, - 3285281651566464074, - 12121596060272825779, - 1900602777802961569, - 8122527981264852045, - 6731303887159752901, - 9197659817406857040, - 844741616904786364, - 14249777686667858094, - ], - [ - 8602844218963499297, - 10133401373828451640, - 11618292280328565166, - 8828272598402499582, - 4252246265076774689, - 9760449011955070998, - 10233981507028897480, - 10427510555228840014, - 1007817664531124790, - 4465396600980659145, - 7727267420665314215, - 7904022788946844554, - ], - [ - 11418297156527169222, - 15865399053509010196, - 1727198235391450850, - 16557095577717348672, - 1524052121709169653, - 14531367160053894310, - 4071756280138432327, - 10333204220115446291, - 16584144375833061215, - 12237566480526488368, - 11090440024401607208, - 18281335018830792766, - ], - [ - 16152169547074248135, - 18338155611216027761, - 15842640128213925612, - 14687926435880145351, - 13259626900273707210, - 6187877366876303234, - 10312881470701795438, - 1924945292721719446, - 2278209355262975917, - 3250749056007953206, - 11589006946114672195, - 241829012299953928, - ], - [ - 11244459446597052449, - 7319043416418482137, - 8148526814449636806, - 9054933038587901070, - 550333919248348827, - 5513167392062632770, - 12644459803778263764, - 9903621375535446226, - 16390581784506871871, - 14586524717888286021, - 6975796306584548762, - 5200407948555191573, - ], - [ - 2855794043288846965, - 1259443213892506318, - 6145351706926586935, - 3853784494234324998, - 5871277378086513850, - 9414363368707862566, - 11946957446931890832, - 308083693687568600, - 12712587722369770461, - 6792392698104204991, - 16465224002344550280, - 10282380383506806095, - ], -]; - -pub fn rescue(mut xs: [F; W]) -> [F; W] { - for r in 0..8 { - xs = sbox_layer_a(xs); - xs = mds_layer(xs); - xs = constant_layer(xs, &RESCUE_CONSTANTS[r * 2]); - - xs = sbox_layer_b(xs); - xs = mds_layer(xs); - xs = constant_layer(xs, &RESCUE_CONSTANTS[r * 2 + 1]); - } - xs -} - -#[unroll_for_loops] -fn sbox_layer_a(x: [F; W]) -> [F; W] { - let mut result = [F::ZERO; W]; - for i in 0..W { - result[i] = x[i].cube(); - } - result -} - -#[unroll_for_loops] -fn sbox_layer_b(x: [F; W]) -> [F; W] { - let mut result = [F::ZERO; W]; - for i in 0..W { - result[i] = x[i].cube_root(); - } - result -} - -#[unroll_for_loops] -fn mds_layer(x: [F; W]) -> [F; W] { - let mut result = [F::ZERO; W]; - for r in 0..W { - for c in 0..W { - result[r] += F::from_canonical_u64(MDS[r][c]) * x[c]; - } - } - result -} - -#[unroll_for_loops] -fn constant_layer(xs: [F; W], con: &[u64; W]) -> [F; W] { - let mut result = [F::ZERO; W]; - for i in 0..W { - result[i] = xs[i] + F::from_canonical_u64(con[i]); - } - result -} diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index 72d5487e..281d0025 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -5,7 +5,6 @@ use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::goldilocks_field::GoldilocksField; use serde::{de::DeserializeOwned, Serialize}; -use crate::hash::gmimc::GMiMCHash; use crate::hash::hash_types::HashOut; use crate::hash::hash_types::RichField; use crate::hash::hashing::{PlonkyPermutation, SPONGE_WIDTH}; @@ -76,16 +75,6 @@ impl GenericConfig<2> for PoseidonGoldilocksConfig { type InnerHasher = PoseidonHash; } -/// Configuration using GMiMC over the Goldilocks field. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub struct GMiMCGoldilocksConfig; -impl GenericConfig<2> for GMiMCGoldilocksConfig { - type F = GoldilocksField; - type FE = QuadraticExtension; - type Hasher = GMiMCHash; - type InnerHasher = GMiMCHash; -} - /// Configuration using truncated Keccak over the Goldilocks field. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct KeccakGoldilocksConfig; diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index fbe1ad53..fccaea5c 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -212,9 +212,7 @@ mod tests { use crate::gates::noop::NoopGate; use crate::iop::witness::{PartialWitness, Witness}; use crate::plonk::circuit_data::VerifierOnlyCircuitData; - use crate::plonk::config::{ - GMiMCGoldilocksConfig, GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig, - }; + use crate::plonk::config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig}; use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs}; use crate::plonk::prover::prove; use crate::util::timing::TimingTree; @@ -352,7 +350,6 @@ mod tests { init_logger(); const D: usize = 2; type PC = PoseidonGoldilocksConfig; - type GC = GMiMCGoldilocksConfig; type KC = KeccakGoldilocksConfig; type F = >::F; @@ -363,16 +360,8 @@ mod tests { recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; test_serialization(&proof, &cd)?; - let (proof, vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; - test_serialization(&proof, &cd)?; - - let (proof, vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; - test_serialization(&proof, &cd)?; - let (proof, _vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; + recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; test_serialization(&proof, &cd)?; Ok(()) From e78630ae81134c5c0447b7bec65c8f664cdb8979 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 28 Jan 2022 05:02:31 +0100 Subject: [PATCH 043/143] PR feedback --- starky/src/fibonacci_stark.rs | 46 ++++++++++++++++++++++++++-------- starky/src/prover.rs | 5 +++- system_zero/src/system_zero.rs | 10 ++++++-- 3 files changed, 48 insertions(+), 13 deletions(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index 5e41f18e..ad2c8e8e 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -12,26 +12,33 @@ use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; /// Toy STARK system used for testing. /// Computes a Fibonacci sequence with inital values `x0, x1` using the transition /// `x0 <- x1, x1 <- x0 + x1`. -pub struct FibonacciStark, const D: usize> { +struct FibonacciStark, const D: usize> { x0: F, x1: F, + num_rows: usize, _phantom: PhantomData, } impl, const D: usize> FibonacciStark { - const NUM_COLUMNS: usize = 2; - const NUM_ROWS: usize = 1 << 5; + // The first public input is `x0`. + const PI_INDEX_X0: usize = 0; + // The second public input is `x1`. + const PI_INDEX_X1: usize = 1; + // The third public input is the second element of the last row, which should be equal to the + // `(num_rows + 1)`-th Fibonacci number. + const PI_INDEX_RES: usize = 2; - fn new(x0: F, x1: F) -> Self { + fn new(num_rows: usize, x0: F, x1: F) -> Self { Self { x0, x1, + num_rows, _phantom: PhantomData, } } - fn generate_trace(&self) -> Vec<[F; Self::NUM_COLUMNS]> { - (0..Self::NUM_ROWS) + fn generate_trace(&self) -> Vec<[F; Self::COLUMNS]> { + (0..self.num_rows) .scan([self.x0, self.x1], |acc, _| { let tmp = *acc; acc[0] = tmp[1]; @@ -43,8 +50,8 @@ impl, const D: usize> FibonacciStark { } impl, const D: usize> Stark for FibonacciStark { - const COLUMNS: usize = Self::NUM_COLUMNS; - const PUBLIC_INPUTS: usize = 0; + const COLUMNS: usize = 2; + const PUBLIC_INPUTS: usize = 3; fn eval_packed_generic( &self, @@ -54,6 +61,9 @@ impl, const D: usize> Stark for FibonacciStar FE: FieldExtension, P: PackedField, { + yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); + yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); + yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); // x0 <- x1 yield_constr.one(vars.next_values[0] - vars.local_values[1]); // x1 <- x0 + x1 @@ -81,6 +91,10 @@ mod tests { use crate::fibonacci_stark::FibonacciStark; use crate::prover::prove; + fn fibonacci(n: usize, x0: usize, x1: usize) -> usize { + (0..n).fold((0, 1), |x, _| (x.1, x.0 + x.1)).1 + } + #[test] fn test_fibonacci_stark() -> Result<()> { const D: usize = 2; @@ -89,9 +103,21 @@ mod tests { type S = FibonacciStark; let config = StarkConfig::standard_fast_config(); - let stark = S::new(F::ZERO, F::ONE); + let num_rows = 1 << 5; + let public_inputs = [ + F::ZERO, + F::ONE, + F::from_canonical_usize(fibonacci(num_rows - 1, 0, 1)), + ]; + let stark = S::new(num_rows, public_inputs[0], public_inputs[1]); let trace = stark.generate_trace(); - prove::(stark, config, trace, &mut TimingTree::default())?; + prove::( + stark, + config, + trace, + public_inputs, + &mut TimingTree::default(), + )?; Ok(()) } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 5af22871..5473db68 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -25,6 +25,7 @@ pub fn prove( stark: S, config: StarkConfig, trace: Vec<[F; S::COLUMNS]>, + public_inputs: [F; S::PUBLIC_INPUTS], timing: &mut TimingTree, ) -> Result> where @@ -72,6 +73,7 @@ where let quotient_polys = compute_quotient_polys::( &stark, &trace_commitment, + public_inputs, &alphas, degree_bits, rate_bits, @@ -142,6 +144,7 @@ where fn compute_quotient_polys( stark: &S, trace_commitment: &PolynomialBatch, + public_inputs: [F; S::PUBLIC_INPUTS], alphas: &[F], degree_bits: usize, rate_bits: usize, @@ -196,7 +199,7 @@ where trace_commitment, (i + 1) % (degree << rate_bits), ), - public_inputs: &[F::ZERO; S::PUBLIC_INPUTS], + public_inputs: &public_inputs, }; stark.eval_packed_base(vars, &mut consumer); // TODO: Fix this once we a genuine `PackedField`. diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 47950eb2..49e25e6c 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -83,27 +83,33 @@ impl, const D: usize> Stark for SystemZero Result<()> { type F = GoldilocksField; type C = PoseidonGoldilocksConfig; const D: usize = 2; type S = SystemZero; let system = S::default(); + let public_inputs = [F::ZERO; S::PUBLIC_INPUTS]; let config = StarkConfig::standard_fast_config(); let mut timing = TimingTree::new("prove", Level::Debug); let trace = system.generate_trace(); - prove::(system, config, trace, &mut timing).unwrap(); + prove::(system, config, trace, public_inputs, &mut timing)?; + + Ok(()) } } From c73f32ef2ba2b6428b30e855c8471e4e20739992 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 28 Jan 2022 13:59:43 +0100 Subject: [PATCH 044/143] Remove initial values from Fibonacci STARK state --- starky/src/fibonacci_stark.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index ad2c8e8e..ea834e99 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -10,11 +10,9 @@ use crate::stark::Stark; use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; /// Toy STARK system used for testing. -/// Computes a Fibonacci sequence with inital values `x0, x1` using the transition +/// Computes a Fibonacci sequence with state `[x0, x1]` using the state transition /// `x0 <- x1, x1 <- x0 + x1`. struct FibonacciStark, const D: usize> { - x0: F, - x1: F, num_rows: usize, _phantom: PhantomData, } @@ -25,21 +23,20 @@ impl, const D: usize> FibonacciStark { // The second public input is `x1`. const PI_INDEX_X1: usize = 1; // The third public input is the second element of the last row, which should be equal to the - // `(num_rows + 1)`-th Fibonacci number. + // `num_rows`-th Fibonacci number. const PI_INDEX_RES: usize = 2; - fn new(num_rows: usize, x0: F, x1: F) -> Self { + fn new(num_rows: usize) -> Self { Self { - x0, - x1, num_rows, _phantom: PhantomData, } } - fn generate_trace(&self) -> Vec<[F; Self::COLUMNS]> { + /// Generate the trace using `x0, x1` as inital state values. + fn generate_trace(&self, x0: F, x1: F) -> Vec<[F; Self::COLUMNS]> { (0..self.num_rows) - .scan([self.x0, self.x1], |acc, _| { + .scan([x0, x1], |acc, _| { let tmp = *acc; acc[0] = tmp[1]; acc[1] = tmp[0] + tmp[1]; @@ -61,9 +58,11 @@ impl, const D: usize> Stark for FibonacciStar FE: FieldExtension, P: PackedField, { + // Check public inputs. yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); + // x0 <- x1 yield_constr.one(vars.next_values[0] - vars.local_values[1]); // x1 <- x0 + x1 @@ -109,8 +108,8 @@ mod tests { F::ONE, F::from_canonical_usize(fibonacci(num_rows - 1, 0, 1)), ]; - let stark = S::new(num_rows, public_inputs[0], public_inputs[1]); - let trace = stark.generate_trace(); + let stark = S::new(num_rows); + let trace = stark.generate_trace(public_inputs[0], public_inputs[1]); prove::( stark, config, From dff9a4095547f02a17e6019f15f9f275e2d76dae Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 28 Jan 2022 17:06:40 +0100 Subject: [PATCH 045/143] Batch alphas in constraint consumer --- starky/src/constraint_consumer.rs | 27 ++++++++------ starky/src/prover.rs | 61 +++++++++++++++---------------- 2 files changed, 45 insertions(+), 43 deletions(-) diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index bc76f03b..adb88e41 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -8,11 +8,11 @@ use plonky2::iop::target::Target; use plonky2::plonk::circuit_builder::CircuitBuilder; pub struct ConstraintConsumer { - /// A random value used to combine multiple constraints into one. - alpha: P::Scalar, + /// Random values used to combine multiple constraints into one. + alphas: Vec, - /// A running sum of constraints that have been emitted so far, scaled by powers of alpha. - constraint_acc: P, + /// Running sums of constraints that have been emitted so far, scaled by powers of alpha. + constraint_accs: Vec

, /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated /// with the first trace row, and zero at other points in the subgroup. @@ -24,24 +24,29 @@ pub struct ConstraintConsumer { } impl ConstraintConsumer

{ - pub fn new(alpha: P::Scalar, lagrange_basis_first: P, lagrange_basis_last: P) -> Self { + pub fn new(alphas: Vec, lagrange_basis_first: P, lagrange_basis_last: P) -> Self { Self { - alpha, - constraint_acc: P::ZEROS, + constraint_accs: vec![P::ZEROS; alphas.len()], + alphas, lagrange_basis_first, lagrange_basis_last, } } // TODO: Do this correctly. - pub fn accumulator(&self) -> P::Scalar { - self.constraint_acc.as_slice()[0] + pub fn accumulators(self) -> Vec { + self.constraint_accs + .into_iter() + .map(|acc| acc.as_slice()[0]) + .collect() } /// Add one constraint. pub fn one(&mut self, constraint: P) { - self.constraint_acc *= self.alpha; - self.constraint_acc += constraint; + for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) { + *acc *= alpha; + *acc += constraint; + } } /// Add a series of constraints. diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 5473db68..e0652b24 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -74,7 +74,7 @@ where &stark, &trace_commitment, public_inputs, - &alphas, + alphas, degree_bits, rate_bits, ); @@ -145,7 +145,7 @@ fn compute_quotient_polys( stark: &S, trace_commitment: &PolynomialBatch, public_inputs: [F; S::PUBLIC_INPUTS], - alphas: &[F], + alphas: Vec, degree_bits: usize, rate_bits: usize, ) -> Vec> @@ -179,37 +179,34 @@ where comm.get_lde_values(i).try_into().unwrap() }; - alphas - .iter() - .map(|&alpha| { - let quotient_evals = PolynomialValues::new( - (0..degree << rate_bits) - .into_par_iter() - .map(|i| { - // TODO: Set `P` to a genuine `PackedField` here. - let mut consumer = ConstraintConsumer::::new( - alpha, - lagrange_first.values[i], - lagrange_last.values[i], - ); - let vars = - StarkEvaluationVars:: { - local_values: &get_at_index(trace_commitment, i), - next_values: &get_at_index( - trace_commitment, - (i + 1) % (degree << rate_bits), - ), - public_inputs: &public_inputs, - }; - stark.eval_packed_base(vars, &mut consumer); - // TODO: Fix this once we a genuine `PackedField`. - let constraints_eval = consumer.accumulator(); - let denominator_inv = z_h_on_coset.eval_inverse(i); - constraints_eval * denominator_inv - }) - .collect(), + let quotient_values = (0..degree << rate_bits) + .into_par_iter() + .map(|i| { + // TODO: Set `P` to a genuine `PackedField` here. + let mut consumer = ConstraintConsumer::::new( + alphas.clone(), + lagrange_first.values[i], + lagrange_last.values[i], ); - quotient_evals.coset_ifft(F::coset_shift()) + let vars = StarkEvaluationVars:: { + local_values: &get_at_index(trace_commitment, i), + next_values: &get_at_index(trace_commitment, (i + 1) % (degree << rate_bits)), + public_inputs: &public_inputs, + }; + stark.eval_packed_base(vars, &mut consumer); + // TODO: Fix this once we a genuine `PackedField`. + let mut constraints_evals = consumer.accumulators(); + let denominator_inv = z_h_on_coset.eval_inverse(i); + for eval in &mut constraints_evals { + *eval *= denominator_inv; + } + constraints_evals }) + .collect::>(); + + transpose("ient_values) + .into_par_iter() + .map(PolynomialValues::new) + .map(|values| values.coset_ifft(F::coset_shift())) .collect() } From 8993270f80b8bad808c109b15807849ef42e5bf5 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Sat, 29 Jan 2022 12:49:00 +0100 Subject: [PATCH 046/143] Progress --- starky/src/get_challenges.rs | 219 +++++++++++++++++++++++++++++++++++ starky/src/lib.rs | 2 + starky/src/proof.rs | 57 ++++++++- starky/src/prover.rs | 17 ++- starky/src/verifier.rs | 105 +++++++++++++++++ 5 files changed, 394 insertions(+), 6 deletions(-) create mode 100644 starky/src/get_challenges.rs create mode 100644 starky/src/verifier.rs diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs new file mode 100644 index 00000000..f3c78701 --- /dev/null +++ b/starky/src/get_challenges.rs @@ -0,0 +1,219 @@ +use anyhow::Result; +use plonky2::field::extension_field::Extendable; +use plonky2::field::polynomial::PolynomialCoeffs; +use plonky2::fri::proof::FriProof; +use plonky2::hash::hash_types::RichField; +use plonky2::hash::merkle_tree::MerkleCap; +use plonky2::iop::challenger::Challenger; +use plonky2::plonk::config::{GenericConfig, Hasher}; + +use crate::config::StarkConfig; +use crate::proof::{StarkOpeningSet, StarkProof, StarkProofChallenges, StarkProofWithPublicInputs}; + +fn get_challenges, C: GenericConfig, const D: usize>( + trace_cap: &MerkleCap, + quotient_polys_cap: &MerkleCap, + openings: &StarkOpeningSet, + commit_phase_merkle_caps: &[MerkleCap], + final_poly: &PolynomialCoeffs, + pow_witness: F, + config: &StarkConfig, + degree_bits: usize, +) -> Result> { + let num_challenges = config.num_challenges; + let num_fri_queries = config.fri_config.num_query_rounds; + let lde_size = 1 << (degree_bits + config.fri_config.rate_bits); + + let mut challenger = Challenger::::new(); + + challenger.observe_cap(trace_cap); + let stark_alphas = challenger.get_n_challenges(num_challenges); + + challenger.observe_cap(quotient_polys_cap); + let stark_zeta = challenger.get_extension_challenge::(); + + openings.observe(&mut challenger); + + // Scaling factor to combine polynomials. + let fri_alpha = challenger.get_extension_challenge::(); + + // Recover the random betas used in the FRI reductions. + let fri_betas = commit_phase_merkle_caps + .iter() + .map(|cap| { + challenger.observe_cap(cap); + challenger.get_extension_challenge::() + }) + .collect(); + + challenger.observe_extension_elements(&final_poly.coeffs); + + let fri_pow_response = C::InnerHasher::hash( + &challenger + .get_hash() + .elements + .iter() + .copied() + .chain(Some(pow_witness)) + .collect::>(), + false, + ) + .elements[0]; + + let fri_query_indices = (0..num_fri_queries) + .map(|_| challenger.get_challenge().to_canonical_u64() as usize % lde_size) + .collect(); + + Ok(StarkProofChallenges { + stark_alphas, + stark_zeta, + fri_alpha, + fri_betas, + fri_pow_response, + fri_query_indices, + }) +} + +impl, C: GenericConfig, const D: usize> + StarkProofWithPublicInputs +{ + pub(crate) fn fri_query_indices(&self, config: &StarkConfig) -> anyhow::Result> { + Ok(self.get_challenges(config)?.fri_query_indices) + } + + /// Computes all Fiat-Shamir challenges used in the Plonk proof. + pub(crate) fn get_challenges( + &self, + config: &StarkConfig, + ) -> Result> { + let StarkProof { + trace_cap, + quotient_polys_cap, + openings, + opening_proof: + FriProof { + commit_phase_merkle_caps, + final_poly, + pow_witness, + .. + }, + } = &self.proof; + + get_challenges( + trace_cap, + quotient_polys_cap, + openings, + commit_phase_merkle_caps, + final_poly, + *pow_witness, + config, + ) + } +} + +// impl, C: GenericConfig, const D: usize> +// CompressedProofWithPublicInputs +// { +// /// Computes all Fiat-Shamir challenges used in the Plonk proof. +// pub(crate) fn get_challenges( +// &self, +// common_data: &CommonCircuitData, +// ) -> anyhow::Result> { +// let CompressedProof { +// wires_cap, +// plonk_zs_partial_products_cap, +// quotient_polys_cap, +// openings, +// opening_proof: +// CompressedFriProof { +// commit_phase_merkle_caps, +// final_poly, +// pow_witness, +// .. +// }, +// } = &self.proof; +// +// get_challenges( +// self.get_public_inputs_hash(), +// wires_cap, +// plonk_zs_partial_products_cap, +// quotient_polys_cap, +// openings, +// commit_phase_merkle_caps, +// final_poly, +// *pow_witness, +// common_data, +// ) +// } +// +// /// Computes all coset elements that can be inferred in the FRI reduction steps. +// pub(crate) fn get_inferred_elements( +// &self, +// challenges: &ProofChallenges, +// common_data: &CommonCircuitData, +// ) -> FriInferredElements { +// let ProofChallenges { +// plonk_zeta, +// fri_alpha, +// fri_betas, +// fri_query_indices, +// .. +// } = challenges; +// let mut fri_inferred_elements = Vec::new(); +// // Holds the indices that have already been seen at each reduction depth. +// let mut seen_indices_by_depth = +// vec![HashSet::new(); common_data.fri_params.reduction_arity_bits.len()]; +// let precomputed_reduced_evals = PrecomputedReducedOpenings::from_os_and_alpha( +// &self.proof.openings.to_fri_openings(), +// *fri_alpha, +// ); +// let log_n = common_data.degree_bits + common_data.config.fri_config.rate_bits; +// // Simulate the proof verification and collect the inferred elements. +// // The content of the loop is basically the same as the `fri_verifier_query_round` function. +// for &(mut x_index) in fri_query_indices { +// let mut subgroup_x = F::MULTIPLICATIVE_GROUP_GENERATOR +// * F::primitive_root_of_unity(log_n).exp_u64(reverse_bits(x_index, log_n) as u64); +// let mut old_eval = fri_combine_initial::( +// &common_data.get_fri_instance(*plonk_zeta), +// &self +// .proof +// .opening_proof +// .query_round_proofs +// .initial_trees_proofs[&x_index], +// *fri_alpha, +// subgroup_x, +// &precomputed_reduced_evals, +// &common_data.fri_params, +// ); +// for (i, &arity_bits) in common_data +// .fri_params +// .reduction_arity_bits +// .iter() +// .enumerate() +// { +// let coset_index = x_index >> arity_bits; +// if !seen_indices_by_depth[i].insert(coset_index) { +// // If this index has already been seen, we can skip the rest of the reductions. +// break; +// } +// fri_inferred_elements.push(old_eval); +// let arity = 1 << arity_bits; +// let mut evals = self.proof.opening_proof.query_round_proofs.steps[i][&coset_index] +// .evals +// .clone(); +// let x_index_within_coset = x_index & (arity - 1); +// evals.insert(x_index_within_coset, old_eval); +// old_eval = compute_evaluation( +// subgroup_x, +// x_index_within_coset, +// arity_bits, +// &evals, +// fri_betas[i], +// ); +// subgroup_x = subgroup_x.exp_power_of_2(arity_bits); +// x_index = coset_index; +// } +// } +// FriInferredElements(fri_inferred_elements) +// } +// } diff --git a/starky/src/lib.rs b/starky/src/lib.rs index 541950ab..e56c0ef6 100644 --- a/starky/src/lib.rs +++ b/starky/src/lib.rs @@ -8,10 +8,12 @@ pub mod config; pub mod constraint_consumer; +mod get_challenges; pub mod proof; pub mod prover; pub mod stark; pub mod vars; +pub mod verifier; #[cfg(test)] pub mod fibonacci_stark; diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 4218e71f..4d81793e 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -1,20 +1,34 @@ use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; use plonky2::fri::oracle::PolynomialBatch; use plonky2::fri::proof::{CompressedFriProof, FriProof}; use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; -use plonky2::plonk::config::GenericConfig; +use plonky2::iop::challenger::Challenger; +use plonky2::plonk::config::{GenericConfig, Hasher}; use rayon::prelude::*; pub struct StarkProof, C: GenericConfig, const D: usize> { /// Merkle cap of LDEs of trace values. pub trace_cap: MerkleCap, + /// Merkle cap of LDEs of trace values. + pub quotient_polys_cap: MerkleCap, /// Purported values of each polynomial at the challenge point. pub openings: StarkOpeningSet, /// A batch FRI argument for all openings. pub opening_proof: FriProof, } +pub struct StarkProofWithPublicInputs< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +> { + pub proof: StarkProof, + // TODO: Maybe make it generic over a `S: Start` and replace with `[F; S::PUBLIC_INPUTS]`. + pub public_inputs: Vec, +} + pub struct CompressedStarkProof< F: RichField + Extendable, C: GenericConfig, @@ -28,6 +42,34 @@ pub struct CompressedStarkProof< pub opening_proof: CompressedFriProof, } +pub struct CompressedStarkProofWithPublicInputs< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +> { + pub proof: CompressedStarkProof, + pub public_inputs: Vec, +} + +pub(crate) struct StarkProofChallenges, const D: usize> { + // Random values used to combine PLONK constraints. + pub stark_alphas: Vec, + + // Point at which the PLONK polynomials are opened. + pub stark_zeta: F::Extension, + + // Scaling factor to combine polynomials. + pub fri_alpha: F::Extension, + + // Betas used in the FRI commit phase reductions. + pub fri_betas: Vec, + + pub fri_pow_response: F, + + // Indices at which the oracle is queried in FRI. + pub fri_query_indices: Vec, +} + /// Purported values of each polynomial at the challenge point. pub struct StarkOpeningSet, const D: usize> { pub local_values: Vec, @@ -56,4 +98,17 @@ impl, const D: usize> StarkOpeningSet { quotient_polys: eval_commitment(zeta, quotient_commitment), } } + + // Note: Can't implement this directly on `Challenger` as it's in a different crate. + pub fn observe>(&self, challenger: &mut Challenger) { + let StarkOpeningSet { + local_values, + next_values, + permutation_zs, + quotient_polys, + } = self; + for v in &[local_values, next_values, permutation_zs, quotient_polys] { + self.observe_extension_elements(v); + } + } } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index e0652b24..4ba09e22 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -16,7 +16,7 @@ use rayon::prelude::*; use crate::config::StarkConfig; use crate::constraint_consumer::ConstraintConsumer; -use crate::proof::{StarkOpeningSet, StarkProof}; +use crate::proof::{StarkOpeningSet, StarkProof, StarkProofWithPublicInputs}; use crate::stark::Stark; use crate::vars::StarkEvaluationVars; @@ -27,7 +27,7 @@ pub fn prove( trace: Vec<[F; S::COLUMNS]>, public_inputs: [F; S::PUBLIC_INPUTS], timing: &mut TimingTree, -) -> Result> +) -> Result> where F: RichField + Extendable, C: GenericConfig, @@ -101,7 +101,8 @@ where None, ) ); - challenger.observe_cap("ient_commitment.merkle_tree.cap); + let quotient_polys_cap = quotient_commitment.merkle_tree.cap; + challenger.observe_cap(quotient_polys_cap); let zeta = challenger.get_extension_challenge::(); // To avoid leaking witness data, we want to ensure that our opening locations, `zeta` and @@ -113,6 +114,7 @@ where "Opening point is in the subgroup." ); let openings = StarkOpeningSet::new(zeta, g, &trace_commitment, "ient_commitment); + openings.observe(&mut challenger); // TODO: Add permuation checks let initial_merkle_trees = &[&trace_commitment, "ient_commitment]; @@ -129,11 +131,16 @@ where timing, ) ); - - Ok(StarkProof { + let proof = StarkProof { trace_cap, + quotient_polys_cap, openings, opening_proof, + }; + + Ok(StarkProofWithPublicInputs { + proof, + public_inputs: public_inputs.to_vec(), }) } diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs new file mode 100644 index 00000000..d9c8c309 --- /dev/null +++ b/starky/src/verifier.rs @@ -0,0 +1,105 @@ +use anyhow::{ensure, Result}; +use plonky2::field::extension_field::Extendable; +use plonky2::hash::hash_types::RichField; +use plonky2::plonk::circuit_data::CommonCircuitData; +use plonky2::plonk::config::GenericConfig; +use plonky2::plonk::proof::ProofWithPublicInputs; + +use crate::config::StarkConfig; +use crate::proof::{StarkProof, StarkProofWithPublicInputs}; +use crate::stark::Stark; + +pub(crate) fn verify< + F: RichField + Extendable, + C: GenericConfig, + S: Stark, + const D: usize, +>( + proof_with_pis: StarkProofWithPublicInputs, + config: &StarkConfig, +) -> Result<()> { + let challenges = proof_with_pis.get_challenges(config)?; + verify_with_challenges(proof_with_pis, challenges, verifier_data, common_data) +} + +pub(crate) fn verify_with_challenges< + F: RichField + Extendable, + C: GenericConfig, + S: Stark, + const D: usize, +>( + proof_with_pis: StarkProofWithPublicInputs, + challenges: ProofChallenges, + verifier_data: &VerifierOnlyCircuitData, + common_data: &CommonCircuitData, +) -> Result<()> { + assert_eq!( + proof_with_pis.public_inputs.len(), + common_data.num_public_inputs + ); + let public_inputs_hash = &proof_with_pis.get_public_inputs_hash(); + + let ProofWithPublicInputs { proof, .. } = proof_with_pis; + + let local_constants = &proof.openings.constants; + let local_wires = &proof.openings.wires; + let vars = EvaluationVars { + local_constants, + local_wires, + public_inputs_hash, + }; + let local_zs = &proof.openings.plonk_zs; + let next_zs = &proof.openings.plonk_zs_right; + let s_sigmas = &proof.openings.plonk_sigmas; + let partial_products = &proof.openings.partial_products; + + // Evaluate the vanishing polynomial at our challenge point, zeta. + let vanishing_polys_zeta = eval_vanishing_poly( + common_data, + challenges.plonk_zeta, + vars, + local_zs, + next_zs, + partial_products, + s_sigmas, + &challenges.plonk_betas, + &challenges.plonk_gammas, + &challenges.plonk_alphas, + ); + + // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. + let quotient_polys_zeta = &proof.openings.quotient_polys; + let zeta_pow_deg = challenges + .plonk_zeta + .exp_power_of_2(common_data.degree_bits); + let z_h_zeta = zeta_pow_deg - F::Extension::ONE; + // `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations. + // Each chunk of `quotient_degree_factor` holds the evaluations of `t_0(zeta),...,t_{quotient_degree_factor-1}(zeta)` + // where the "real" quotient polynomial is `t(X) = t_0(X) + t_1(X)*X^n + t_2(X)*X^{2n} + ...`. + // So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each + // `quotient_degree_factor`-sized chunk of the original evaluations. + for (i, chunk) in quotient_polys_zeta + .chunks(common_data.quotient_degree_factor) + .enumerate() + { + ensure!(vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); + } + + let merkle_caps = &[ + verifier_data.constants_sigmas_cap.clone(), + proof.wires_cap, + proof.plonk_zs_partial_products_cap, + proof.quotient_polys_cap, + ]; + + verify_fri_proof::( + &common_data.get_fri_instance(challenges.plonk_zeta), + &proof.openings, + &challenges, + merkle_caps, + &proof.opening_proof, + &common_data.fri_params, + )?; + + Ok(()) +} From 851455a26aa88a0ff600e3416e884bbe2153f8e2 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 31 Jan 2022 10:07:01 +0100 Subject: [PATCH 047/143] Eval Lagrange --- plonky2/src/plonk/plonk_common.rs | 1 + starky/src/verifier.rs | 78 ++++++++++++++++++------------- 2 files changed, 47 insertions(+), 32 deletions(-) diff --git a/plonky2/src/plonk/plonk_common.rs b/plonky2/src/plonk/plonk_common.rs index 74495198..937f3ccd 100644 --- a/plonky2/src/plonk/plonk_common.rs +++ b/plonky2/src/plonk/plonk_common.rs @@ -1,6 +1,7 @@ use plonky2_field::extension_field::Extendable; use plonky2_field::field_types::Field; use plonky2_field::packed_field::PackedField; +use plonky2_util::log2_strict; use crate::fri::oracle::SALT_SIZE; use crate::fri::structure::FriOracleInfo; diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index d9c8c309..cbde85c0 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -1,13 +1,17 @@ use anyhow::{ensure, Result}; use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; use plonky2::hash::hash_types::RichField; use plonky2::plonk::circuit_data::CommonCircuitData; use plonky2::plonk::config::GenericConfig; use plonky2::plonk::proof::ProofWithPublicInputs; +use plonky2_util::log2_strict; use crate::config::StarkConfig; -use crate::proof::{StarkProof, StarkProofWithPublicInputs}; +use crate::constraint_consumer::ConstraintConsumer; +use crate::proof::{StarkOpeningSet, StarkProof, StarkProofChallenges, StarkProofWithPublicInputs}; use crate::stark::Stark; +use crate::vars::StarkEvaluationVars; pub(crate) fn verify< F: RichField + Extendable, @@ -28,44 +32,40 @@ pub(crate) fn verify_with_challenges< S: Stark, const D: usize, >( + stark: S, proof_with_pis: StarkProofWithPublicInputs, - challenges: ProofChallenges, - verifier_data: &VerifierOnlyCircuitData, - common_data: &CommonCircuitData, + challenges: StarkProofChallenges, + config: &StarkConfig, ) -> Result<()> { - assert_eq!( - proof_with_pis.public_inputs.len(), - common_data.num_public_inputs - ); - let public_inputs_hash = &proof_with_pis.get_public_inputs_hash(); - - let ProofWithPublicInputs { proof, .. } = proof_with_pis; + let StarkProofWithPublicInputs { + proof, + public_inputs, + } = proof_with_pis; + let degree = recover_degree(&proof, config); + let degree_log = log2_strict(degree); let local_constants = &proof.openings.constants; - let local_wires = &proof.openings.wires; - let vars = EvaluationVars { - local_constants, - local_wires, - public_inputs_hash, + let local_values = &proof.openings.local_values; + let next_values = &proof.openings.local_values; + let StarkOpeningSet { + local_values, + next_values, + permutation_zs, + quotient_polys, + } = &proof.openings; + let vars = StarkEvaluationVars { + local_values, + next_values, + public_inputs: &public_inputs, }; - let local_zs = &proof.openings.plonk_zs; - let next_zs = &proof.openings.plonk_zs_right; - let s_sigmas = &proof.openings.plonk_sigmas; - let partial_products = &proof.openings.partial_products; - // Evaluate the vanishing polynomial at our challenge point, zeta. - let vanishing_polys_zeta = eval_vanishing_poly( - common_data, - challenges.plonk_zeta, - vars, - local_zs, - next_zs, - partial_products, - s_sigmas, - &challenges.plonk_betas, - &challenges.plonk_gammas, - &challenges.plonk_alphas, + let mut consumer = ConstraintConsumer::::new( + challenges.stark_alphas, + lagrange_first.values[i], + lagrange_last.values[i], ); + let (l_1, l_n) = eval_l_1_and_l_last(degree_log, challenges.stark_zeta); + stark.eval_ext() // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. let quotient_polys_zeta = &proof.openings.quotient_polys; @@ -103,3 +103,17 @@ pub(crate) fn verify_with_challenges< Ok(()) } + +/// Evaluate the Lagrange basis `L_1` and `L_n` at a point `x`. +fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F,F) { + let n = 1 << log_n; + let g = F::primitive_root_of_unity(log_n); + let z_x = x.exp_power_of_2(log_n); + let invs = F::batch_multiplicative_inverse(&[F::from_canonical_usize(n) * (x - F::ONE), F::from_canonical_usize(n) * (g*x - F::ONE)]); + + (z_x * invs[0], z_x * invs[1]) +} + +fn recover_degree, C: GenericConfig, const D: usize>(proof: &StarkProof, config: &StarkConfig) -> usize { + 1<<(proof.opening_proof.query_round_proofs[0].initial_trees_proof.evals_proofs[0].1.siblings.len() + config.fri_config.cap_height) +} \ No newline at end of file From d24d26e5c05a3e0cbe6d0b2d27ba527448b17530 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 31 Jan 2022 16:19:30 +0100 Subject: [PATCH 048/143] Add FRI challenges --- plonky2/src/fri/proof.rs | 6 +- plonky2/src/fri/verifier.rs | 8 +-- plonky2/src/plonk/get_challenges.rs | 29 ++++++--- plonky2/src/plonk/mod.rs | 2 +- plonky2/src/plonk/plonk_common.rs | 2 +- plonky2/src/plonk/proof.rs | 4 ++ plonky2/src/plonk/verifier.rs | 2 +- starky/src/get_challenges.rs | 12 +++- starky/src/proof.rs | 2 +- starky/src/prover.rs | 2 +- starky/src/verifier.rs | 96 +++++++++++++++++++---------- 11 files changed, 107 insertions(+), 58 deletions(-) diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index f96db781..bca7b8db 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -16,7 +16,7 @@ use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::plonk_common::salt_size; -use crate::plonk::proof::{FriInferredElements, ProofChallenges}; +use crate::plonk::proof::{FriChallenges, FriInferredElements, ProofChallenges}; /// Evaluations and Merkle proof produced by the prover in a FRI query step. #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)] @@ -253,10 +253,10 @@ impl, H: Hasher, const D: usize> CompressedFriPr pow_witness, .. } = self; - let ProofChallenges { + let FriChallenges { fri_query_indices: indices, .. - } = challenges; + } = &challenges.fri_challenges; let mut fri_inferred_elements = fri_inferred_elements.0.into_iter(); let cap_height = params.config.cap_height; let reduction_arity_bits = ¶ms.reduction_arity_bits; diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 40d1ab25..f41ae969 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -11,7 +11,7 @@ use crate::hash::hash_types::RichField; use crate::hash::merkle_proofs::verify_merkle_proof; use crate::hash::merkle_tree::MerkleCap; use crate::plonk::config::{GenericConfig, Hasher}; -use crate::plonk::proof::{OpeningSet, ProofChallenges}; +use crate::plonk::proof::{FriChallenges, OpeningSet, ProofChallenges}; use crate::util::reducing::ReducingFactor; use crate::util::reverse_bits; @@ -57,7 +57,7 @@ pub(crate) fn fri_verify_proof_of_work, const D: us Ok(()) } -pub(crate) fn verify_fri_proof< +pub fn verify_fri_proof< F: RichField + Extendable, C: GenericConfig, const D: usize, @@ -65,7 +65,7 @@ pub(crate) fn verify_fri_proof< instance: &FriInstanceInfo, // Openings of the PLONK polynomials. os: &OpeningSet, - challenges: &ProofChallenges, + challenges: &FriChallenges, initial_merkle_caps: &[MerkleCap], proof: &FriProof, params: &FriParams, @@ -171,7 +171,7 @@ fn fri_verifier_query_round< const D: usize, >( instance: &FriInstanceInfo, - challenges: &ProofChallenges, + challenges: &FriChallenges, precomputed_reduced_evals: &PrecomputedReducedOpenings, initial_merkle_caps: &[MerkleCap], proof: &FriProof, diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index d28f29da..440705ce 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -12,8 +12,8 @@ use crate::iop::challenger::Challenger; use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::proof::{ - CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet, Proof, - ProofChallenges, ProofWithPublicInputs, + CompressedProof, CompressedProofWithPublicInputs, FriChallenges, FriInferredElements, + OpeningSet, Proof, ProofChallenges, ProofWithPublicInputs, }; use crate::util::reverse_bits; @@ -86,10 +86,12 @@ fn get_challenges, C: GenericConfig, cons plonk_gammas, plonk_alphas, plonk_zeta, - fri_alpha, - fri_betas, - fri_pow_response, - fri_query_indices, + fri_challenges: FriChallenges { + fri_alpha, + fri_betas, + fri_pow_response, + fri_query_indices, + }, }) } @@ -100,7 +102,10 @@ impl, C: GenericConfig, const D: usize> &self, common_data: &CommonCircuitData, ) -> anyhow::Result> { - Ok(self.get_challenges(common_data)?.fri_query_indices) + Ok(self + .get_challenges(common_data)? + .fri_challenges + .fri_query_indices) } /// Computes all Fiat-Shamir challenges used in the Plonk proof. @@ -179,9 +184,13 @@ impl, C: GenericConfig, const D: usize> ) -> FriInferredElements { let ProofChallenges { plonk_zeta, - fri_alpha, - fri_betas, - fri_query_indices, + fri_challenges: + FriChallenges { + fri_alpha, + fri_betas, + fri_query_indices, + .. + }, .. } = challenges; let mut fri_inferred_elements = Vec::new(); diff --git a/plonky2/src/plonk/mod.rs b/plonky2/src/plonk/mod.rs index b2d1ed03..4f2fa4e1 100644 --- a/plonky2/src/plonk/mod.rs +++ b/plonky2/src/plonk/mod.rs @@ -4,7 +4,7 @@ pub mod config; pub(crate) mod copy_constraint; mod get_challenges; pub(crate) mod permutation_argument; -pub(crate) mod plonk_common; +pub mod plonk_common; pub mod proof; pub mod prover; pub mod recursive_verifier; diff --git a/plonky2/src/plonk/plonk_common.rs b/plonky2/src/plonk/plonk_common.rs index 937f3ccd..94279d12 100644 --- a/plonky2/src/plonk/plonk_common.rs +++ b/plonky2/src/plonk/plonk_common.rs @@ -125,7 +125,7 @@ pub(crate) fn reduce_with_powers_multi< cumul } -pub(crate) fn reduce_with_powers<'a, P: PackedField, T: IntoIterator>( +pub fn reduce_with_powers<'a, P: PackedField, T: IntoIterator>( terms: T, alpha: P::Scalar, ) -> P diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 7fbdc671..401b9f52 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -239,6 +239,10 @@ pub(crate) struct ProofChallenges, const D: usize> // Point at which the PLONK polynomials are opened. pub plonk_zeta: F::Extension, + pub fri_challenges: FriChallenges, +} + +pub struct FriChallenges, const D: usize> { // Scaling factor to combine polynomials. pub fri_alpha: F::Extension, diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index cbaec6d9..f0c976fa 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -92,7 +92,7 @@ pub(crate) fn verify_with_challenges< verify_fri_proof::( &common_data.get_fri_instance(challenges.plonk_zeta), &proof.openings, - &challenges, + &challenges.fri_challenges, merkle_caps, &proof.opening_proof, &common_data.fri_params, diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index f3c78701..927eef6c 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -77,14 +77,19 @@ fn get_challenges, C: GenericConfig, cons impl, C: GenericConfig, const D: usize> StarkProofWithPublicInputs { - pub(crate) fn fri_query_indices(&self, config: &StarkConfig) -> anyhow::Result> { - Ok(self.get_challenges(config)?.fri_query_indices) + pub(crate) fn fri_query_indices( + &self, + config: &StarkConfig, + degree_bits: usize, + ) -> anyhow::Result> { + Ok(self.get_challenges(config, degree_bits)?.fri_query_indices) } /// Computes all Fiat-Shamir challenges used in the Plonk proof. pub(crate) fn get_challenges( &self, config: &StarkConfig, + degree_bits: usize, ) -> Result> { let StarkProof { trace_cap, @@ -99,7 +104,7 @@ impl, C: GenericConfig, const D: usize> }, } = &self.proof; - get_challenges( + get_challenges::( trace_cap, quotient_polys_cap, openings, @@ -107,6 +112,7 @@ impl, C: GenericConfig, const D: usize> final_poly, *pow_witness, config, + degree_bits, ) } } diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 4d81793e..fe4ac43d 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -108,7 +108,7 @@ impl, const D: usize> StarkOpeningSet { quotient_polys, } = self; for v in &[local_values, next_values, permutation_zs, quotient_polys] { - self.observe_extension_elements(v); + challenger.observe_extension_elements(v); } } } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 4ba09e22..a3f2cb2d 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -102,7 +102,7 @@ where ) ); let quotient_polys_cap = quotient_commitment.merkle_tree.cap; - challenger.observe_cap(quotient_polys_cap); + challenger.observe_cap("ient_polys_cap); let zeta = challenger.get_extension_challenge::(); // To avoid leaking witness data, we want to ensure that our opening locations, `zeta` and diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index cbde85c0..46dc948f 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -1,9 +1,11 @@ use anyhow::{ensure, Result}; -use plonky2::field::extension_field::Extendable; +use plonky2::field::extension_field::{Extendable, FieldExtension}; use plonky2::field::field_types::Field; +use plonky2::fri::verifier::verify_fri_proof; use plonky2::hash::hash_types::RichField; use plonky2::plonk::circuit_data::CommonCircuitData; use plonky2::plonk::config::GenericConfig; +use plonky2::plonk::plonk_common::reduce_with_powers; use plonky2::plonk::proof::ProofWithPublicInputs; use plonky2_util::log2_strict; @@ -19,11 +21,17 @@ pub(crate) fn verify< S: Stark, const D: usize, >( + stark: S, proof_with_pis: StarkProofWithPublicInputs, config: &StarkConfig, -) -> Result<()> { - let challenges = proof_with_pis.get_challenges(config)?; - verify_with_challenges(proof_with_pis, challenges, verifier_data, common_data) + degree_bits: usize, +) -> Result<()> +where + [(); S::COLUMNS]:, + [(); S::PUBLIC_INPUTS]:, +{ + let challenges = proof_with_pis.get_challenges(config, degree_bits)?; + verify_with_challenges(stark, proof_with_pis, challenges, config) } pub(crate) fn verify_with_challenges< @@ -36,15 +44,18 @@ pub(crate) fn verify_with_challenges< proof_with_pis: StarkProofWithPublicInputs, challenges: StarkProofChallenges, config: &StarkConfig, -) -> Result<()> { +) -> Result<()> +where + [(); S::COLUMNS]:, + [(); S::PUBLIC_INPUTS]:, +{ let StarkProofWithPublicInputs { proof, public_inputs, } = proof_with_pis; let degree = recover_degree(&proof, config); - let degree_log = log2_strict(degree); + let degree_bits = log2_strict(degree); - let local_constants = &proof.openings.constants; let local_values = &proof.openings.local_values; let next_values = &proof.openings.local_values; let StarkOpeningSet { @@ -54,24 +65,32 @@ pub(crate) fn verify_with_challenges< quotient_polys, } = &proof.openings; let vars = StarkEvaluationVars { - local_values, - next_values, - public_inputs: &public_inputs, + local_values: &local_values.to_vec().try_into().unwrap(), + next_values: &next_values.to_vec().try_into().unwrap(), + public_inputs: &public_inputs + .into_iter() + .map(F::Extension::from_basefield) + .collect::>() + .try_into() + .unwrap(), }; + let (l_1, l_last) = eval_l_1_and_l_last(degree_bits, challenges.stark_zeta); let mut consumer = ConstraintConsumer::::new( - challenges.stark_alphas, - lagrange_first.values[i], - lagrange_last.values[i], + challenges + .stark_alphas + .iter() + .map(|&alpha| F::Extension::from_basefield(alpha)) + .collect::>(), + l_1.into(), + l_last.into(), ); - let (l_1, l_n) = eval_l_1_and_l_last(degree_log, challenges.stark_zeta); - stark.eval_ext() + stark.eval_ext(vars, &mut consumer); + let acc = consumer.accumulators(); // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. let quotient_polys_zeta = &proof.openings.quotient_polys; - let zeta_pow_deg = challenges - .plonk_zeta - .exp_power_of_2(common_data.degree_bits); + let zeta_pow_deg = challenges.stark_zeta.exp_power_of_2(degree_bits); let z_h_zeta = zeta_pow_deg - F::Extension::ONE; // `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations. // Each chunk of `quotient_degree_factor` holds the evaluations of `t_0(zeta),...,t_{quotient_degree_factor-1}(zeta)` @@ -79,41 +98,52 @@ pub(crate) fn verify_with_challenges< // So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each // `quotient_degree_factor`-sized chunk of the original evaluations. for (i, chunk) in quotient_polys_zeta - .chunks(common_data.quotient_degree_factor) + .chunks(config.fri_config.rate_bits) .enumerate() { - ensure!(vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); + ensure!(acc[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); } - let merkle_caps = &[ - verifier_data.constants_sigmas_cap.clone(), - proof.wires_cap, - proof.plonk_zs_partial_products_cap, - proof.quotient_polys_cap, - ]; + let merkle_caps = &[proof.trace_cap, proof.quotient_polys_cap]; verify_fri_proof::( - &common_data.get_fri_instance(challenges.plonk_zeta), + &S::fri_instance( + challenges.stark_zeta, + F::primitive_root_of_unity(degree_bits).into(), + config.fri_config.rate_bits, + ), &proof.openings, &challenges, merkle_caps, &proof.opening_proof, - &common_data.fri_params, + &config.fri_params(degree_bits), )?; Ok(()) } /// Evaluate the Lagrange basis `L_1` and `L_n` at a point `x`. -fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F,F) { +fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F, F) { let n = 1 << log_n; let g = F::primitive_root_of_unity(log_n); let z_x = x.exp_power_of_2(log_n); - let invs = F::batch_multiplicative_inverse(&[F::from_canonical_usize(n) * (x - F::ONE), F::from_canonical_usize(n) * (g*x - F::ONE)]); + let invs = F::batch_multiplicative_inverse(&[ + F::from_canonical_usize(n) * (x - F::ONE), + F::from_canonical_usize(n) * (g * x - F::ONE), + ]); (z_x * invs[0], z_x * invs[1]) } -fn recover_degree, C: GenericConfig, const D: usize>(proof: &StarkProof, config: &StarkConfig) -> usize { - 1<<(proof.opening_proof.query_round_proofs[0].initial_trees_proof.evals_proofs[0].1.siblings.len() + config.fri_config.cap_height) -} \ No newline at end of file +fn recover_degree, C: GenericConfig, const D: usize>( + proof: &StarkProof, + config: &StarkConfig, +) -> usize { + 1 << (proof.opening_proof.query_round_proofs[0] + .initial_trees_proof + .evals_proofs[0] + .1 + .siblings + .len() + + config.fri_config.cap_height) +} From 92ea4b65d1829f90f07e7c705821c0397daee06f Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 31 Jan 2022 18:00:07 +0100 Subject: [PATCH 049/143] Constraint check working --- field/src/field_types.rs | 1 + plonky2/src/fri/verifier.rs | 5 ++--- plonky2/src/hash/merkle_proofs.rs | 1 + plonky2/src/plonk/verifier.rs | 2 +- starky/src/fibonacci_stark.rs | 16 +++++++++------- starky/src/get_challenges.rs | 16 +++++++++++----- starky/src/proof.rs | 30 ++++++++++++++++++++---------- starky/src/prover.rs | 23 ++++++++++++++++------- starky/src/verifier.rs | 14 +++++++++----- system_zero/src/system_zero.rs | 2 +- 10 files changed, 71 insertions(+), 39 deletions(-) diff --git a/field/src/field_types.rs b/field/src/field_types.rs index 0d7b314f..845d8e83 100644 --- a/field/src/field_types.rs +++ b/field/src/field_types.rs @@ -389,6 +389,7 @@ pub trait Field: /// Representative `g` of the coset used in FRI, so that LDEs in FRI are done over `gH`. fn coset_shift() -> Self { Self::MULTIPLICATIVE_GROUP_GENERATOR + // Self::ONE } /// Equivalent to *self + x * y, but may be cheaper. diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index f41ae969..3e70c025 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -63,8 +63,7 @@ pub fn verify_fri_proof< const D: usize, >( instance: &FriInstanceInfo, - // Openings of the PLONK polynomials. - os: &OpeningSet, + openings: &FriOpenings, challenges: &FriChallenges, initial_merkle_caps: &[MerkleCap], proof: &FriProof, @@ -88,7 +87,7 @@ pub fn verify_fri_proof< ); let precomputed_reduced_evals = - PrecomputedReducedOpenings::from_os_and_alpha(&os.to_fri_openings(), challenges.fri_alpha); + PrecomputedReducedOpenings::from_os_and_alpha(&openings, challenges.fri_alpha); for (&x_index, round_proof) in challenges .fri_query_indices .iter() diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index c2f3655d..feb39791 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -31,6 +31,7 @@ pub(crate) fn verify_merkle_proof>( merkle_cap: &MerkleCap, proof: &MerkleProof, ) -> Result<()> { + dbg!(leaf_index); let mut index = leaf_index; let mut current_digest = H::hash(&leaf_data, false); for &sibling_digest in proof.siblings.iter() { diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index f0c976fa..46d41bfe 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -91,7 +91,7 @@ pub(crate) fn verify_with_challenges< verify_fri_proof::( &common_data.get_fri_instance(challenges.plonk_zeta), - &proof.openings, + &proof.openings.to_fri_openings(), &challenges.fri_challenges, merkle_caps, &proof.opening_proof, diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index ea834e99..ffaa14a7 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -12,6 +12,7 @@ use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; /// Toy STARK system used for testing. /// Computes a Fibonacci sequence with state `[x0, x1]` using the state transition /// `x0 <- x1, x1 <- x0 + x1`. +#[derive(Copy, Clone)] struct FibonacciStark, const D: usize> { num_rows: usize, _phantom: PhantomData, @@ -58,10 +59,10 @@ impl, const D: usize> Stark for FibonacciStar FE: FieldExtension, P: PackedField, { - // Check public inputs. - yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); - yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); - yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); + // // Check public inputs. + // yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); + // yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); + // yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); // x0 <- x1 yield_constr.one(vars.next_values[0] - vars.local_values[1]); @@ -89,6 +90,7 @@ mod tests { use crate::config::StarkConfig; use crate::fibonacci_stark::FibonacciStark; use crate::prover::prove; + use crate::verifier::verify; fn fibonacci(n: usize, x0: usize, x1: usize) -> usize { (0..n).fold((0, 1), |x, _| (x.1, x.0 + x.1)).1 @@ -110,14 +112,14 @@ mod tests { ]; let stark = S::new(num_rows); let trace = stark.generate_trace(public_inputs[0], public_inputs[1]); - prove::( + let proof = prove::( stark, - config, + &config, trace, public_inputs, &mut TimingTree::default(), )?; - Ok(()) + verify(stark, proof, &config, num_rows) } } diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index 927eef6c..d6a9b562 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -6,6 +6,7 @@ use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; use plonky2::iop::challenger::Challenger; use plonky2::plonk::config::{GenericConfig, Hasher}; +use plonky2::plonk::proof::FriChallenges; use crate::config::StarkConfig; use crate::proof::{StarkOpeningSet, StarkProof, StarkProofChallenges, StarkProofWithPublicInputs}; @@ -67,10 +68,12 @@ fn get_challenges, C: GenericConfig, cons Ok(StarkProofChallenges { stark_alphas, stark_zeta, - fri_alpha, - fri_betas, - fri_pow_response, - fri_query_indices, + fri_challenges: FriChallenges { + fri_alpha, + fri_betas, + fri_pow_response, + fri_query_indices, + }, }) } @@ -82,7 +85,10 @@ impl, C: GenericConfig, const D: usize> config: &StarkConfig, degree_bits: usize, ) -> anyhow::Result> { - Ok(self.get_challenges(config, degree_bits)?.fri_query_indices) + Ok(self + .get_challenges(config, degree_bits)? + .fri_challenges + .fri_query_indices) } /// Computes all Fiat-Shamir challenges used in the Plonk proof. diff --git a/starky/src/proof.rs b/starky/src/proof.rs index fe4ac43d..2d9597d0 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -2,10 +2,12 @@ use plonky2::field::extension_field::Extendable; use plonky2::field::field_types::Field; use plonky2::fri::oracle::PolynomialBatch; use plonky2::fri::proof::{CompressedFriProof, FriProof}; +use plonky2::fri::structure::{FriOpeningBatch, FriOpenings}; use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; use plonky2::iop::challenger::Challenger; use plonky2::plonk::config::{GenericConfig, Hasher}; +use plonky2::plonk::proof::FriChallenges; use rayon::prelude::*; pub struct StarkProof, C: GenericConfig, const D: usize> { @@ -58,16 +60,7 @@ pub(crate) struct StarkProofChallenges, const D: us // Point at which the PLONK polynomials are opened. pub stark_zeta: F::Extension, - // Scaling factor to combine polynomials. - pub fri_alpha: F::Extension, - - // Betas used in the FRI commit phase reductions. - pub fri_betas: Vec, - - pub fri_pow_response: F, - - // Indices at which the oracle is queried in FRI. - pub fri_query_indices: Vec, + pub fri_challenges: FriChallenges, } /// Purported values of each polynomial at the challenge point. @@ -111,4 +104,21 @@ impl, const D: usize> StarkOpeningSet { challenger.observe_extension_elements(v); } } + + pub(crate) fn to_fri_openings(&self) -> FriOpenings { + let zeta_batch = FriOpeningBatch { + values: [ + self.local_values.as_slice(), + self.quotient_polys.as_slice(), + self.permutation_zs.as_slice(), + ] + .concat(), + }; + let zeta_right_batch = FriOpeningBatch { + values: self.next_values.to_vec(), + }; + FriOpenings { + batches: vec![zeta_batch, zeta_right_batch], + } + } } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index a3f2cb2d..6a22e671 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -23,7 +23,7 @@ use crate::vars::StarkEvaluationVars; // TODO: Deal with public inputs. pub fn prove( stark: S, - config: StarkConfig, + config: &StarkConfig, trace: Vec<[F; S::COLUMNS]>, public_inputs: [F; S::PUBLIC_INPUTS], timing: &mut TimingTree, @@ -101,7 +101,7 @@ where None, ) ); - let quotient_polys_cap = quotient_commitment.merkle_tree.cap; + let quotient_polys_cap = quotient_commitment.merkle_tree.cap.clone(); challenger.observe_cap("ient_polys_cap); let zeta = challenger.get_extension_challenge::(); @@ -164,7 +164,6 @@ where [(); S::PUBLIC_INPUTS]:, { let degree = 1 << degree_bits; - let points = F::two_adic_subgroup(degree_bits + rate_bits); // Evaluation of the first Lagrange polynomial on the LDE domain. let lagrange_first = { @@ -179,12 +178,18 @@ where evals.lde(rate_bits) }; - let z_h_on_coset = ZeroPolyOnCoset::new(degree_bits, rate_bits); + let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, rate_bits); // Retrieve the LDE values at index `i`. let get_at_index = |comm: &PolynomialBatch, i: usize| -> [F; S::COLUMNS] { comm.get_lde_values(i).try_into().unwrap() }; + let last = F::primitive_root_of_unity(degree_bits).inverse(); + let coset = F::cyclic_subgroup_coset_known_order( + F::primitive_root_of_unity(degree_bits + rate_bits), + F::coset_shift(), + degree << rate_bits, + ); let quotient_values = (0..degree << rate_bits) .into_par_iter() @@ -197,15 +202,19 @@ where ); let vars = StarkEvaluationVars:: { local_values: &get_at_index(trace_commitment, i), - next_values: &get_at_index(trace_commitment, (i + 1) % (degree << rate_bits)), + next_values: &get_at_index( + trace_commitment, + (i + (1 << rate_bits)) % (degree << rate_bits), + ), public_inputs: &public_inputs, }; stark.eval_packed_base(vars, &mut consumer); - // TODO: Fix this once we a genuine `PackedField`. + // TODO: Fix this once we use a genuine `PackedField`. let mut constraints_evals = consumer.accumulators(); let denominator_inv = z_h_on_coset.eval_inverse(i); + let z_last = coset[i] - last; for eval in &mut constraints_evals { - *eval *= denominator_inv; + *eval *= denominator_inv * z_last; } constraints_evals }) diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 46dc948f..63e063af 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -92,16 +92,19 @@ where let quotient_polys_zeta = &proof.openings.quotient_polys; let zeta_pow_deg = challenges.stark_zeta.exp_power_of_2(degree_bits); let z_h_zeta = zeta_pow_deg - F::Extension::ONE; + let g = F::primitive_root_of_unity(degree_bits + config.fri_config.rate_bits); + let last = F::primitive_root_of_unity(degree_bits).inverse(); + let z_last = challenges.stark_zeta - last.into(); // `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations. // Each chunk of `quotient_degree_factor` holds the evaluations of `t_0(zeta),...,t_{quotient_degree_factor-1}(zeta)` // where the "real" quotient polynomial is `t(X) = t_0(X) + t_1(X)*X^n + t_2(X)*X^{2n} + ...`. // So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each // `quotient_degree_factor`-sized chunk of the original evaluations. for (i, chunk) in quotient_polys_zeta - .chunks(config.fri_config.rate_bits) + .chunks(1 << config.fri_config.rate_bits) .enumerate() { - ensure!(acc[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); + ensure!(acc[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg) / z_last); } let merkle_caps = &[proof.trace_cap, proof.quotient_polys_cap]; @@ -112,8 +115,8 @@ where F::primitive_root_of_unity(degree_bits).into(), config.fri_config.rate_bits, ), - &proof.openings, - &challenges, + &proof.openings.to_fri_openings(), + &challenges.fri_challenges, merkle_caps, &proof.opening_proof, &config.fri_params(degree_bits), @@ -145,5 +148,6 @@ fn recover_degree, C: GenericConfig, cons .1 .siblings .len() - + config.fri_config.cap_height) + + config.fri_config.cap_height + - config.fri_config.rate_bits) } diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 49e25e6c..38326b68 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -108,7 +108,7 @@ mod tests { let config = StarkConfig::standard_fast_config(); let mut timing = TimingTree::new("prove", Level::Debug); let trace = system.generate_trace(); - prove::(system, config, trace, public_inputs, &mut timing)?; + prove::(system, &config, trace, public_inputs, &mut timing)?; Ok(()) } From 37562943220bbad0d3cadf089e287724d39566df Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Tue, 30 Nov 2021 15:32:27 -0800 Subject: [PATCH 050/143] ECDSA rebase --- plonky2/src/curve/ecdsa.rs | 111 +++++++++++++++++++++++++++++++++++++ plonky2/src/curve/mod.rs | 1 + plonky2/src/gadgets/mod.rs | 1 + waksman/src/ecdsa.rs | 53 ++++++++++++++++++ 4 files changed, 166 insertions(+) create mode 100644 plonky2/src/curve/ecdsa.rs create mode 100644 waksman/src/ecdsa.rs diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs new file mode 100644 index 00000000..ca5b27ad --- /dev/null +++ b/plonky2/src/curve/ecdsa.rs @@ -0,0 +1,111 @@ +use std::ops::Mul; + +use itertools::unfold; + +use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; +use crate::field::field_types::{Field, RichField}; +use crate::hash::hashing::hash_n_to_1; + +pub struct ECDSASignature { + pub r: C::ScalarField, + pub s: C::ScalarField, +} + +pub struct ECDSASecretKey(C::ScalarField); +pub struct ECDSAPublicKey(AffinePoint); + +pub fn base_to_scalar(x: C::BaseField) -> C::ScalarField { + C::ScalarField::from_biguint(x.to_biguint()) +} + +pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { + C::BaseField::from_biguint(x.to_biguint()) +} + +pub fn hash_to_scalar(msg: F, num_bits: usize) -> C::ScalarField { + let h_bits = hash_to_bits(msg, num_bits); + let h_u32 = h_bits + .iter() + .zip(0..32) + .fold(0u32, |acc, (&bit, pow)| acc + (bit as u32) * (2 << pow)); + C::ScalarField::from_canonical_u32(h_u32) +} + +pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { + let hashed = hash_n_to_1(vec![x], true); + + let mut val = hashed.to_canonical_u64(); + unfold((), move |_| { + let ret = val % 2 != 0; + val /= 2; + Some(ret) + }) + .take(num_bits) + .collect() +} + +pub fn sign_message(msg: F, sk: ECDSASecretKey) -> ECDSASignature { + let h = hash_to_scalar::(msg, 32); + println!("SIGNING h: {:?}", h); + + let k = C::ScalarField::rand(); + let rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); + let r = base_to_scalar::(rr.x); + let s = k.inverse() * (h + r * sk.0); + + println!("SIGNING s: {:?}", s); + println!("SIGNING s^-1: {:?}", s.inverse()); + println!("SIGNING s^-1^-1: {:?}", s.inverse().inverse()); + + ECDSASignature { r, s } +} + +pub fn verify_message( + msg: F, + sig: ECDSASignature, + pk: ECDSAPublicKey, +) -> bool { + let ECDSASignature { r, s } = sig; + + let h = hash_to_scalar::(msg, 32); + println!("VERIFYING h: {:?}", h); + + let c = s.inverse(); + + println!("VERIFYING c^-1: {:?}", c.inverse()); + let u1 = h * c; + let u2 = r * c; + + let g = C::GENERATOR_PROJECTIVE; + let point_proj = CurveScalar(u1) * g + CurveScalar(u2) * pk.0.to_projective(); + let point = point_proj.to_affine(); + + let x = base_to_scalar::(point.x); + r == x +} + +mod tests { + use anyhow::Result; + + use crate::curve::curve_types::{Curve, CurveScalar}; + use crate::curve::ecdsa::{sign_message, verify_message, ECDSAPublicKey, ECDSASecretKey}; + use crate::curve::secp256k1::Secp256K1; + use crate::field::field_types::Field; + use crate::field::goldilocks_field::GoldilocksField; + use crate::field::secp256k1_scalar::Secp256K1Scalar; + use crate::plonk::circuit_data::CircuitConfig; + + #[test] + fn test_ecdsa_native() { + type F = GoldilocksField; + type C = Secp256K1; + + let msg = F::rand(); + let sk = ECDSASecretKey(Secp256K1Scalar::rand()); + let pk = ECDSAPublicKey((CurveScalar(sk.0) * C::GENERATOR_PROJECTIVE).to_affine()); + + let sig = sign_message(msg, sk); + let result = verify_message(msg, sig, pk); + assert!(result); + } +} diff --git a/plonky2/src/curve/mod.rs b/plonky2/src/curve/mod.rs index d31e373e..8dd6f0d6 100644 --- a/plonky2/src/curve/mod.rs +++ b/plonky2/src/curve/mod.rs @@ -3,4 +3,5 @@ pub mod curve_msm; pub mod curve_multiplication; pub mod curve_summation; pub mod curve_types; +pub mod ecdsa; pub mod secp256k1; diff --git a/plonky2/src/gadgets/mod.rs b/plonky2/src/gadgets/mod.rs index b73e2a7f..ec4d1263 100644 --- a/plonky2/src/gadgets/mod.rs +++ b/plonky2/src/gadgets/mod.rs @@ -3,6 +3,7 @@ pub mod arithmetic_extension; pub mod arithmetic_u32; pub mod biguint; pub mod curve; +pub mod ecdsa; pub mod hash; pub mod interpolation; pub mod multiple_comparison; diff --git a/waksman/src/ecdsa.rs b/waksman/src/ecdsa.rs new file mode 100644 index 00000000..47068ee8 --- /dev/null +++ b/waksman/src/ecdsa.rs @@ -0,0 +1,53 @@ +pub struct ECDSASecretKeyTarget(NonNativeTarget); +pub struct ECDSAPublicKeyTarget(AffinePointTarget); + +pub struct ECDSASignatureTarget { + pub r: NonNativeTarget, + pub s: NonNativeTarget, +} + + + +impl, const D: usize> CircuitBuilder { + +} + +mod tests { + use std::ops::{Mul, Neg}; + + use anyhow::Result; + + use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; + use crate::curve::secp256k1::Secp256K1; + use crate::field::field_types::Field; + use crate::field::goldilocks_field::GoldilocksField; + use crate::field::secp256k1_base::Secp256K1Base; + use crate::field::secp256k1_scalar::Secp256K1Scalar; + use crate::iop::witness::PartialWitness; + use crate::plonk::circuit_builder::CircuitBuilder; + use crate::plonk::circuit_data::CircuitConfig; + use crate::plonk::verifier::verify; + + /*#[test] + fn test_curve_point_is_valid() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let g = Secp256K1::GENERATOR_AFFINE; + let g_target = builder.constant_affine_point(g); + let neg_g_target = builder.curve_neg(&g_target); + + builder.curve_assert_valid(&g_target); + builder.curve_assert_valid(&neg_g_target); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + + verify(proof, &data.verifier_only, &data.common) + }*/ +} From f55948a6212129860450460bb27efff491128778 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Wed, 1 Dec 2021 09:31:10 -0800 Subject: [PATCH 051/143] removed debugging prints --- plonky2/src/curve/ecdsa.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index ca5b27ad..177f08ed 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -46,17 +46,12 @@ pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { pub fn sign_message(msg: F, sk: ECDSASecretKey) -> ECDSASignature { let h = hash_to_scalar::(msg, 32); - println!("SIGNING h: {:?}", h); let k = C::ScalarField::rand(); let rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); let r = base_to_scalar::(rr.x); let s = k.inverse() * (h + r * sk.0); - println!("SIGNING s: {:?}", s); - println!("SIGNING s^-1: {:?}", s.inverse()); - println!("SIGNING s^-1^-1: {:?}", s.inverse().inverse()); - ECDSASignature { r, s } } @@ -68,11 +63,8 @@ pub fn verify_message( let ECDSASignature { r, s } = sig; let h = hash_to_scalar::(msg, 32); - println!("VERIFYING h: {:?}", h); let c = s.inverse(); - - println!("VERIFYING c^-1: {:?}", c.inverse()); let u1 = h * c; let u2 = r * c; From b796c73e49ba01e577aa0ac9b68b351a23b85a7e Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 2 Dec 2021 10:27:47 -0800 Subject: [PATCH 052/143] ECDSA gadget and test --- plonky2/src/curve/ecdsa.rs | 28 ++++----- plonky2/src/gadgets/nonnative.rs | 2 +- waksman/src/ecdsa.rs | 99 +++++++++++++++++++++++++++----- 3 files changed, 97 insertions(+), 32 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 177f08ed..0ed777d9 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,5 +1,3 @@ -use std::ops::Mul; - use itertools::unfold; use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; @@ -11,8 +9,8 @@ pub struct ECDSASignature { pub s: C::ScalarField, } -pub struct ECDSASecretKey(C::ScalarField); -pub struct ECDSAPublicKey(AffinePoint); +pub struct ECDSASecretKey(pub C::ScalarField); +pub struct ECDSAPublicKey(pub AffinePoint); pub fn base_to_scalar(x: C::BaseField) -> C::ScalarField { C::ScalarField::from_biguint(x.to_biguint()) @@ -22,15 +20,6 @@ pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { C::BaseField::from_biguint(x.to_biguint()) } -pub fn hash_to_scalar(msg: F, num_bits: usize) -> C::ScalarField { - let h_bits = hash_to_bits(msg, num_bits); - let h_u32 = h_bits - .iter() - .zip(0..32) - .fold(0u32, |acc, (&bit, pow)| acc + (bit as u32) * (2 << pow)); - C::ScalarField::from_canonical_u32(h_u32) -} - pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { let hashed = hash_n_to_1(vec![x], true); @@ -44,6 +33,15 @@ pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { .collect() } +pub fn hash_to_scalar(x: F, num_bits: usize) -> C::ScalarField { + let h_bits = hash_to_bits(x, num_bits); + let h_u32 = h_bits + .iter() + .zip(0..32) + .fold(0u32, |acc, (&bit, pow)| acc + (bit as u32) * (2 << pow)); + C::ScalarField::from_canonical_u32(h_u32) +} + pub fn sign_message(msg: F, sk: ECDSASecretKey) -> ECDSASignature { let h = hash_to_scalar::(msg, 32); @@ -76,16 +74,14 @@ pub fn verify_message( r == x } +#[cfg(test)] mod tests { - use anyhow::Result; - use crate::curve::curve_types::{Curve, CurveScalar}; use crate::curve::ecdsa::{sign_message, verify_message, ECDSAPublicKey, ECDSASecretKey}; use crate::curve::secp256k1::Secp256K1; use crate::field::field_types::Field; use crate::field::goldilocks_field::GoldilocksField; use crate::field::secp256k1_scalar::Secp256K1Scalar; - use crate::plonk::circuit_data::CircuitConfig; #[test] fn test_ecdsa_native() { diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 16fd022e..824d851c 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -15,7 +15,7 @@ use crate::plonk::circuit_builder::CircuitBuilder; #[derive(Clone, Debug)] pub struct NonNativeTarget { pub(crate) value: BigUintTarget, - _phantom: PhantomData, + pub(crate) _phantom: PhantomData, } impl, const D: usize> CircuitBuilder { diff --git a/waksman/src/ecdsa.rs b/waksman/src/ecdsa.rs index 47068ee8..199a8c56 100644 --- a/waksman/src/ecdsa.rs +++ b/waksman/src/ecdsa.rs @@ -1,3 +1,15 @@ +use std::marker::PhantomData; + +use crate::curve::curve_types::Curve; +use crate::field::extension_field::Extendable; +use crate::field::field_types::RichField; +use crate::gadgets::arithmetic_u32::U32Target; +use crate::gadgets::biguint::BigUintTarget; +use crate::gadgets::curve::AffinePointTarget; +use crate::gadgets::nonnative::NonNativeTarget; +use crate::iop::target::{BoolTarget, Target}; +use crate::plonk::circuit_builder::CircuitBuilder; + pub struct ECDSASecretKeyTarget(NonNativeTarget); pub struct ECDSAPublicKeyTarget(AffinePointTarget); @@ -6,48 +18,105 @@ pub struct ECDSASignatureTarget { pub s: NonNativeTarget, } - - impl, const D: usize> CircuitBuilder { - + pub fn hash_to_bits(&mut self, x: Target, num_bits: usize) -> Vec { + let inputs = vec![x]; + let hashed = self.hash_n_to_m(inputs, 1, true)[0]; + self.split_le(hashed, num_bits) + } + + pub fn hash_to_scalar(&mut self, x: Target, num_bits: usize) -> NonNativeTarget { + let h_bits = self.hash_to_bits(x, num_bits); + + let two = self.two(); + let mut rev_bits = h_bits.iter().rev(); + let mut sum = rev_bits.next().unwrap().target; + for &bit in rev_bits { + sum = self.mul_add(two, sum, bit.target); + } + let limbs = vec![U32Target(sum)]; + let value = BigUintTarget { + limbs, + }; + + NonNativeTarget { + value, + _phantom: PhantomData, + } + } + + pub fn verify_message(&mut self, msg: Target, sig: ECDSASignatureTarget, pk: ECDSAPublicKeyTarget) { + let ECDSASignatureTarget { r, s } = sig; + + let h = self.hash_to_scalar::(msg, 32); + + let c = self.inv_nonnative(&s); + let u1 = self.mul_nonnative(&h, &c); + let u2 = self.mul_nonnative(&r, &c); + + let g = self.constant_affine_point(C::GENERATOR_AFFINE); + let point1 = self.curve_scalar_mul(&g, &u1); + let point2 = self.curve_scalar_mul(&pk.0, &u2); + let point = self.curve_add(&point1, &point2); + + let x = NonNativeTarget:: { + value: point.x.value, + _phantom: PhantomData, + }; + self.connect_nonnative(&r, &x); + } } +#[cfg(test)] mod tests { - use std::ops::{Mul, Neg}; - use anyhow::Result; - use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; + use crate::curve::curve_types::{Curve, CurveScalar}; + use crate::curve::ecdsa::{ECDSAPublicKey, ECDSASecretKey, ECDSASignature, sign_message}; use crate::curve::secp256k1::Secp256K1; use crate::field::field_types::Field; use crate::field::goldilocks_field::GoldilocksField; - use crate::field::secp256k1_base::Secp256K1Base; use crate::field::secp256k1_scalar::Secp256K1Scalar; + use crate::gadgets::ecdsa::{ECDSAPublicKeyTarget, ECDSASignatureTarget}; use crate::iop::witness::PartialWitness; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; use crate::plonk::verifier::verify; - /*#[test] - fn test_curve_point_is_valid() -> Result<()> { + #[test] + fn test_ecdsa_circuit() -> Result<()> { type F = GoldilocksField; const D: usize = 4; + type C = Secp256K1; let config = CircuitConfig::standard_recursion_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); - let g = Secp256K1::GENERATOR_AFFINE; - let g_target = builder.constant_affine_point(g); - let neg_g_target = builder.curve_neg(&g_target); + let msg = F::rand(); + let msg_target = builder.constant(msg); + + let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); + let pk = ECDSAPublicKey((CurveScalar(sk.0) * C::GENERATOR_PROJECTIVE).to_affine()); - builder.curve_assert_valid(&g_target); - builder.curve_assert_valid(&neg_g_target); + let pk_target = ECDSAPublicKeyTarget(builder.constant_affine_point(pk.0)); + + let sig = sign_message(msg, sk); + + let ECDSASignature { r, s } = sig; + let r_target = builder.constant_nonnative(r); + let s_target = builder.constant_nonnative(s); + let sig_target = ECDSASignatureTarget { + r: r_target, + s: s_target, + }; + + builder.verify_message(msg_target, sig_target, pk_target); let data = builder.build(); let proof = data.prove(pw).unwrap(); verify(proof, &data.verifier_only, &data.common) - }*/ + } } From 9cac6d3a4d6943a4333d2ea10b256d2a9b407e06 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 2 Dec 2021 10:28:00 -0800 Subject: [PATCH 053/143] fmt --- waksman/src/ecdsa.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/waksman/src/ecdsa.rs b/waksman/src/ecdsa.rs index 199a8c56..362fbc6d 100644 --- a/waksman/src/ecdsa.rs +++ b/waksman/src/ecdsa.rs @@ -25,9 +25,13 @@ impl, const D: usize> CircuitBuilder { self.split_le(hashed, num_bits) } - pub fn hash_to_scalar(&mut self, x: Target, num_bits: usize) -> NonNativeTarget { + pub fn hash_to_scalar( + &mut self, + x: Target, + num_bits: usize, + ) -> NonNativeTarget { let h_bits = self.hash_to_bits(x, num_bits); - + let two = self.two(); let mut rev_bits = h_bits.iter().rev(); let mut sum = rev_bits.next().unwrap().target; @@ -35,9 +39,7 @@ impl, const D: usize> CircuitBuilder { sum = self.mul_add(two, sum, bit.target); } let limbs = vec![U32Target(sum)]; - let value = BigUintTarget { - limbs, - }; + let value = BigUintTarget { limbs }; NonNativeTarget { value, @@ -45,7 +47,12 @@ impl, const D: usize> CircuitBuilder { } } - pub fn verify_message(&mut self, msg: Target, sig: ECDSASignatureTarget, pk: ECDSAPublicKeyTarget) { + pub fn verify_message( + &mut self, + msg: Target, + sig: ECDSASignatureTarget, + pk: ECDSAPublicKeyTarget, + ) { let ECDSASignatureTarget { r, s } = sig; let h = self.hash_to_scalar::(msg, 32); @@ -72,7 +79,7 @@ mod tests { use anyhow::Result; use crate::curve::curve_types::{Curve, CurveScalar}; - use crate::curve::ecdsa::{ECDSAPublicKey, ECDSASecretKey, ECDSASignature, sign_message}; + use crate::curve::ecdsa::{sign_message, ECDSAPublicKey, ECDSASecretKey, ECDSASignature}; use crate::curve::secp256k1::Secp256K1; use crate::field::field_types::Field; use crate::field::goldilocks_field::GoldilocksField; @@ -96,7 +103,7 @@ mod tests { let msg = F::rand(); let msg_target = builder.constant(msg); - + let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); let pk = ECDSAPublicKey((CurveScalar(sk.0) * C::GENERATOR_PROJECTIVE).to_affine()); From 08fa4031badd0ec42355404ed70ee6b4886c8f5f Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:06:00 -0800 Subject: [PATCH 054/143] ECDSA merge --- plonky2/src/gadgets/arithmetic_u32.rs | 82 +++++++ {waksman/src => plonky2/src/gadgets}/ecdsa.rs | 0 plonky2/src/gadgets/mod.rs | 1 + plonky2/src/gadgets/multiple_comparison.rs | 9 +- plonky2/src/gadgets/nonnative.rs | 206 ++++++++++++++++-- plonky2/src/gates/mod.rs | 2 + plonky2/src/iop/generator.rs | 7 +- plonky2/src/plonk/circuit_builder.rs | 82 ++++++- 8 files changed, 370 insertions(+), 19 deletions(-) rename {waksman/src => plonky2/src/gadgets}/ecdsa.rs (100%) diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index 6116f61b..0fbd076f 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -4,6 +4,7 @@ use crate::gates::arithmetic_u32::U32ArithmeticGate; use crate::gates::subtraction_u32::U32SubtractionGate; use crate::hash::hash_types::RichField; use crate::iop::target::Target; +use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; #[derive(Clone, Copy, Debug)] @@ -152,4 +153,85 @@ impl, const D: usize> CircuitBuilder { (output_result, output_borrow) } + + pub fn split_to_u32(&mut self, x: Target) -> (U32Target, U32Target) { + let low = self.add_virtual_u32_target(); + let high = self.add_virtual_u32_target(); + + let base = self.constant(F::from_canonical_u64(1u64 << 32)); + let combined = self.mul_add(high.0, base, low.0); + self.connect(x, combined); + + self.add_simple_generator(SplitToU32Generator:: { + x: x.clone(), + low: low.clone(), + high: high.clone(), + _phantom: PhantomData, + }); + + (low, high) + } } + +#[derive(Debug)] +struct SplitToU32Generator, const D: usize> { + x: Target, + low: U32Target, + high: U32Target, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for SplitToU32Generator +{ + fn dependencies(&self) -> Vec { + vec![self.x] + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let x = witness.get_target(self.x.clone()); + let x_u64 = x.to_canonical_u64(); + let low = x_u64 as u32; + let high: u32 = (x_u64 >> 32).try_into().unwrap(); + println!("LOW: {}", low); + println!("HIGH: {}", high); + + out_buffer.set_u32_target(self.low.clone(), low); + out_buffer.set_u32_target(self.high.clone(), high); + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + + use rand::{thread_rng, Rng}; + + use crate::field::goldilocks_field::GoldilocksField; + use crate::iop::witness::PartialWitness; + use crate::plonk::circuit_builder::CircuitBuilder; + use crate::plonk::circuit_data::CircuitConfig; + use crate::plonk::verifier::verify; + + #[test] + pub fn test_add_many_u32s() -> Result<()> { + type F = GoldilocksField; + const D: usize = 4; + + let config = CircuitConfig::standard_recursion_config(); + + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let mut rng = thread_rng(); + let mut to_add = Vec::new(); + for _ in 0..10 { + to_add.push(builder.constant_u32(rng.gen())); + } + let _ = builder.add_many_u32(&to_add); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + verify(proof, &data.verifier_only, &data.common) + } +} \ No newline at end of file diff --git a/waksman/src/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs similarity index 100% rename from waksman/src/ecdsa.rs rename to plonky2/src/gadgets/ecdsa.rs diff --git a/plonky2/src/gadgets/mod.rs b/plonky2/src/gadgets/mod.rs index ec4d1263..5dacdb51 100644 --- a/plonky2/src/gadgets/mod.rs +++ b/plonky2/src/gadgets/mod.rs @@ -2,6 +2,7 @@ pub mod arithmetic; pub mod arithmetic_extension; pub mod arithmetic_u32; pub mod biguint; +pub mod binary_arithmetic; pub mod curve; pub mod ecdsa; pub mod hash; diff --git a/plonky2/src/gadgets/multiple_comparison.rs b/plonky2/src/gadgets/multiple_comparison.rs index 88b94f3f..70afcab5 100644 --- a/plonky2/src/gadgets/multiple_comparison.rs +++ b/plonky2/src/gadgets/multiple_comparison.rs @@ -60,8 +60,13 @@ impl, const D: usize> CircuitBuilder { /// Helper function for comparing, specifically, lists of `U32Target`s. pub fn list_le_u32(&mut self, a: Vec, b: Vec) -> BoolTarget { - let a_targets = a.iter().map(|&t| t.0).collect(); - let b_targets = b.iter().map(|&t| t.0).collect(); + // let a_targets = a.iter().map(|&t| t.0).collect(); + // let b_targets = b.iter().map(|&t| t.0).collect(); + // self.list_le(a_targets, b_targets, 32) + + let num = a.len() / 2; + let a_targets = self.add_virtual_targets(num); + let b_targets = self.add_virtual_targets(num); self.list_le(a_targets, b_targets, 32) } } diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 824d851c..cca22fd1 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -39,6 +39,10 @@ impl, const D: usize> CircuitBuilder { self.biguint_to_nonnative(&x_biguint) } + pub fn zero_nonnative(&mut self) -> NonNativeTarget { + self.constant_nonnative(FF::ZERO) + } + // Assert that two NonNativeTarget's, both assumed to be in reduced form, are equal. pub fn connect_nonnative( &mut self, @@ -70,6 +74,22 @@ impl, const D: usize> CircuitBuilder { self.reduce(&result) } + pub fn add_many_nonnative( + &mut self, + to_add: &[NonNativeTarget], + ) -> NonNativeTarget { + if to_add.len() == 1 { + return to_add[0].clone(); + } + + let mut result = self.add_biguint(&to_add[0].value, &to_add[1].value); + for i in 2..to_add.len() { + result = self.add_biguint(&result, &to_add[i].value); + } + + self.reduce(&result) + } + // Subtract two `NonNativeTarget`s. pub fn sub_nonnative( &mut self, @@ -94,6 +114,22 @@ impl, const D: usize> CircuitBuilder { self.reduce(&result) } + pub fn mul_many_nonnative( + &mut self, + to_mul: &[NonNativeTarget], + ) -> NonNativeTarget { + if to_mul.len() == 1 { + return to_mul[0].clone(); + } + + let mut result = self.mul_biguint(&to_mul[0].value, &to_mul[1].value); + for i in 2..to_mul.len() { + result = self.mul_biguint(&result, &to_mul[i].value); + } + + self.reduce(&result) + } + pub fn neg_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { let zero_target = self.constant_biguint(&BigUint::zero()); let zero_ff = self.biguint_to_nonnative(&zero_target); @@ -104,21 +140,27 @@ impl, const D: usize> CircuitBuilder { pub fn inv_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { let num_limbs = x.value.num_limbs(); let inv_biguint = self.add_virtual_biguint_target(num_limbs); + let div = self.add_virtual_biguint_target(num_limbs); + + self.add_simple_generator(NonNativeInverseGenerator:: { + x: x.clone(), + inv: inv_biguint.clone(), + div: div.clone(), + _phantom: PhantomData, + }); + + let product = self.mul_biguint(&x.value, &inv_biguint); + + let modulus = self.constant_biguint(&FF::order()); + let mod_times_div = self.mul_biguint(&modulus, &div); + let one = self.constant_biguint(&BigUint::one()); + let expected_product = self.add_biguint(&mod_times_div, &one); + self.connect_biguint(&product, &expected_product); + let inv = NonNativeTarget:: { value: inv_biguint, _phantom: PhantomData, }; - - self.add_simple_generator(NonNativeInverseGenerator:: { - x: x.clone(), - inv: inv.clone(), - _phantom: PhantomData, - }); - - let product = self.mul_nonnative(x, &inv); - let one = self.constant_nonnative(FF::ONE); - self.connect_nonnative(&product, &one); - inv } @@ -138,10 +180,70 @@ impl, const D: usize> CircuitBuilder { /// Returns `x % |FF|` as a `NonNativeTarget`. fn reduce(&mut self, x: &BigUintTarget) -> NonNativeTarget { + println!("NUM LIMBS: {}", x.limbs.len()); + let before = self.num_gates(); + let modulus = FF::order(); let order_target = self.constant_biguint(&modulus); let value = self.rem_biguint(x, &order_target); + println!("NUMBER OF GATES: {}", self.num_gates() - before); + println!("OUTPUT LIMBS: {}", value.limbs.len()); + + NonNativeTarget { + value, + _phantom: PhantomData, + } + } + + /// Returns `x % |FF|` as a `NonNativeTarget`. + fn reduce_by_bits(&mut self, x: &BigUintTarget) -> NonNativeTarget { + println!("NUM LIMBS: {}", x.limbs.len()); + let before = self.num_gates(); + + let mut powers_of_two = Vec::new(); + let mut cur_power_of_two = FF::ONE; + let two = FF::TWO; + let mut max_num_limbs = 0; + for _ in 0..(x.limbs.len() * 32) { + let cur_power = self.constant_biguint(&cur_power_of_two.to_biguint()); + max_num_limbs = max_num_limbs.max(cur_power.limbs.len()); + powers_of_two.push(cur_power.limbs); + + cur_power_of_two *= two; + } + + let mut result_limbs_unreduced = vec![self.zero(); max_num_limbs]; + for i in 0..x.limbs.len() { + let this_limb = x.limbs[i]; + let bits = self.split_le(this_limb.0, 32); + for b in 0..bits.len() { + let this_power = powers_of_two[32 * i + b].clone(); + for x in 0..this_power.len() { + result_limbs_unreduced[x] = self.mul_add(bits[b].target, this_power[x].0, result_limbs_unreduced[x]); + } + } + } + + let mut result_limbs_reduced = Vec::new(); + let mut carry = self.zero_u32(); + for i in 0..result_limbs_unreduced.len() { + println!("{}", i); + let (low, high) = self.split_to_u32(result_limbs_unreduced[i]); + let (cur, overflow) = self.add_u32(carry, low); + let (new_carry, _) = self.add_many_u32(&[overflow, high, carry]); + result_limbs_reduced.push(cur); + carry = new_carry; + } + result_limbs_reduced.push(carry); + + let value = BigUintTarget { + limbs: result_limbs_reduced, + }; + + println!("NUMBER OF GATES: {}", self.num_gates() - before); + println!("OUTPUT LIMBS: {}", value.limbs.len()); + NonNativeTarget { value, _phantom: PhantomData, @@ -190,7 +292,8 @@ impl, const D: usize> CircuitBuilder { #[derive(Debug)] struct NonNativeInverseGenerator, const D: usize, FF: Field> { x: NonNativeTarget, - inv: NonNativeTarget, + inv: BigUintTarget, + div: BigUintTarget, _phantom: PhantomData, } @@ -205,7 +308,14 @@ impl, const D: usize, FF: Field> SimpleGenerator let x = witness.get_nonnative_target(self.x.clone()); let inv = x.inverse(); - out_buffer.set_nonnative_target(self.inv.clone(), inv); + let x_biguint = x.to_biguint(); + let inv_biguint = inv.to_biguint(); + let prod = x_biguint * &inv_biguint; + let modulus = FF::order(); + let (div, _rem) = prod.div_rem(&modulus); + + out_buffer.set_biguint_target(self.div.clone(), div); + out_buffer.set_biguint_target(self.inv.clone(), inv_biguint); } } @@ -247,6 +357,43 @@ mod tests { verify(proof, &data.verifier_only, &data.common) } + #[test] + fn test_nonnative_many_adds() -> Result<()> { + type FF = Secp256K1Base; + let a_ff = FF::rand(); + let b_ff = FF::rand(); + let c_ff = FF::rand(); + let d_ff = FF::rand(); + let e_ff = FF::rand(); + let f_ff = FF::rand(); + let g_ff = FF::rand(); + let h_ff = FF::rand(); + let sum_ff = a_ff + b_ff + c_ff + d_ff + e_ff + f_ff + g_ff + h_ff; + + type F = GoldilocksField; + let config = CircuitConfig::standard_recursion_config(); + let pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + let a = builder.constant_nonnative(a_ff); + let b = builder.constant_nonnative(b_ff); + let c = builder.constant_nonnative(c_ff); + let d = builder.constant_nonnative(d_ff); + let e = builder.constant_nonnative(e_ff); + let f = builder.constant_nonnative(f_ff); + let g = builder.constant_nonnative(g_ff); + let h = builder.constant_nonnative(h_ff); + let all = [a, b, c, d, e, f, g, h]; + let sum = builder.add_many_nonnative(&all); + + let sum_expected = builder.constant_nonnative(sum_ff); + builder.connect_nonnative(&sum, &sum_expected); + + let data = builder.build(); + let proof = data.prove(pw).unwrap(); + verify(proof, &data.verifier_only, &data.common) + } + #[test] fn test_nonnative_sub() -> Result<()> { type FF = Secp256K1Base; @@ -285,6 +432,7 @@ mod tests { let x_ff = FF::rand(); let y_ff = FF::rand(); let product_ff = x_ff * y_ff; + println!("PRODUCT FF: {:?}", product_ff); let config = CircuitConfig::standard_recursion_config(); let pw = PartialWitness::new(); @@ -302,6 +450,38 @@ mod tests { verify(proof, &data.verifier_only, &data.common) } + fn test_nonnative_many_muls_helper(num: usize) { + type FF = Secp256K1Base; + + type F = GoldilocksField; + let config = CircuitConfig::standard_recursion_config(); + let mut unop_builder = CircuitBuilder::::new(config.clone()); + let mut op_builder = CircuitBuilder::::new(config); + + println!("NUM: {}", num); + + let ffs: Vec<_> = (0..num).map(|_| FF::rand()).collect(); + + let op_targets: Vec<_> = ffs.iter().map(|&x| op_builder.constant_nonnative(x)).collect(); + op_builder.mul_many_nonnative(&op_targets); + println!("OPTIMIZED GATE COUNT: {}", op_builder.num_gates()); + + let unop_targets: Vec<_> = ffs.iter().map(|&x| unop_builder.constant_nonnative(x)).collect(); + let mut result = unop_targets[0].clone(); + for i in 1..unop_targets.len() { + result = unop_builder.mul_nonnative(&result, &unop_targets[i]); + } + + println!("UNOPTIMIZED GATE COUNT: {}", unop_builder.num_gates()); + } + + #[test] + fn test_nonnative_many_muls() { + for num in 2..10 { + test_nonnative_many_muls_helper(num); + } + } + #[test] fn test_nonnative_neg() -> Result<()> { type FF = Secp256K1Base; diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index a3f92615..ac4600fa 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -3,6 +3,8 @@ pub mod arithmetic_base; pub mod arithmetic_extension; +pub mod binary_arithmetic; +pub mod binary_subtraction; pub mod arithmetic_u32; pub mod assert_le; pub mod base_sum; diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 368232fd..994ba62b 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -7,6 +7,7 @@ use plonky2_field::field_types::Field; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; +use crate::gadgets::binary_arithmetic::BinaryTarget; use crate::gadgets::nonnative::NonNativeTarget; use crate::hash::hash_types::{HashOut, HashOutTarget, RichField}; use crate::iop::ext_target::ExtensionTarget; @@ -161,10 +162,14 @@ impl GeneratedValues { self.target_values.push((target, value)) } - fn set_u32_target(&mut self, target: U32Target, value: u32) { + pub fn set_u32_target(&mut self, target: U32Target, value: u32) { self.set_target(target.0, F::from_canonical_u32(value)) } + pub fn set_binary_target(&mut self, target: BinaryTarget, value: F) { + self.set_target(target.0, value) + } + pub fn set_biguint_target(&mut self, target: BigUintTarget, value: BigUint) { let mut limbs = value.to_u32_digits(); assert!(target.num_limbs() >= limbs.len()); diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index d9bcc1cf..c9d01abe 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -19,6 +19,8 @@ use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; use crate::gates::arithmetic_base::ArithmeticGate; use crate::gates::arithmetic_extension::ArithmeticExtensionGate; use crate::gates::arithmetic_u32::U32ArithmeticGate; +use crate::gates::binary_arithmetic::BinaryArithmeticGate; +use crate::gates::binary_subtraction::BinarySubtractionGate; use crate::gates::constant::ConstantGate; use crate::gates::gate::{Gate, GateInstance, GateRef, PrefixedGate}; use crate::gates::gate_tree::Tree; @@ -222,6 +224,11 @@ impl, const D: usize> CircuitBuilder { let gate_ref = GateRef::new(gate_type); self.gates.insert(gate_ref.clone()); + /*println!("ADDING GATE {}: {:?}", index, gate_ref); + if index == 145 { + panic!(); + }*/ + self.gate_instances.push(GateInstance { gate_ref, constants, @@ -346,6 +353,11 @@ impl, const D: usize> CircuitBuilder { U32Target(self.constant(F::from_canonical_u32(c))) } + /// Returns a BinaryTarget for the value `c`, which is assumed to be at most BITS bits. + pub fn constant_binary(&mut self, c: F) -> BinaryTarget { + BinaryTarget(self.constant(c)) + } + /// If the given target is a constant (i.e. it was created by the `constant(F)` method), returns /// its constant value. Otherwise, returns `None`. pub fn target_as_constant(&self, target: Target) -> Option { @@ -818,10 +830,14 @@ pub struct BatchedGates, const D: usize> { /// The `U32ArithmeticGate` currently being filled (so new u32 arithmetic operations will be added to this gate before creating a new one) pub(crate) current_u32_arithmetic_gate: Option<(usize, usize)>, - /// The `U32SubtractionGate` currently being filled (so new u32 subtraction operations will be added to this gate before creating a new one) pub(crate) current_u32_subtraction_gate: Option<(usize, usize)>, + /// A map `b -> (g, i)` from `b` bits to an available `BinaryArithmeticGate` for number of bits `b`. + pub(crate) free_binary_arithmetic_gate: HashMap, + /// A map `b -> (g, i)` from `b` bits to an available `BinarySubtractionGate` for number of bits `b`. + pub(crate) free_binary_subtraction_gate: HashMap, + /// An available `ConstantGate` instance, if any. pub(crate) free_constant: Option<(usize, usize)>, } @@ -836,6 +852,8 @@ impl, const D: usize> BatchedGates { current_switch_gates: Vec::new(), current_u32_arithmetic_gate: None, current_u32_subtraction_gate: None, + free_binary_arithmetic_gate: HashMap::new(), + free_binary_subtraction_gate: HashMap::new(), free_constant: None, } } @@ -931,8 +949,8 @@ impl, const D: usize> CircuitBuilder { (gate, i) } - /// Finds the last available random access gate with the given `vec_size` or add one if there aren't any. - /// Returns `(g,i)` such that there is a random access gate with the given `vec_size` at index + /// Finds the last available random access gate with the given `bits` or add one if there aren't any. + /// Returns `(g,i)` such that there is a random access gate for the given `bits` at index /// `g` and the gate's `i`-th random access is available. pub(crate) fn find_random_access_gate(&mut self, bits: usize) -> (usize, usize) { let (gate, i) = self @@ -1031,6 +1049,64 @@ impl, const D: usize> CircuitBuilder { (gate_index, copy) } + + /// Finds the last available binary arithmetic with the given `bits` or add one if there aren't any. + /// Returns `(g,i)` such that there is a binary arithmetic for the given `bits` at index + /// `g` and the gate's `i`-th copy is available. + pub(crate) fn find_binary_arithmetic_gate(&mut self) -> (usize, usize) { + let (gate, i) = self + .batched_gates + .free_binary_arithmetic_gate + .get(&BITS) + .copied() + .unwrap_or_else(|| { + let gate = self.add_gate( + BinaryArithmeticGate::::new_from_config(&self.config), + vec![], + ); + (gate, 0) + }); + + // Update `free_binary_arithmetic` with new values. + if i + 1 < BinaryArithmeticGate::::new_from_config(&self.config).num_ops { + self.batched_gates + .free_random_access + .insert(BITS, (gate, i + 1)); + } else { + self.batched_gates.free_random_access.remove(&BITS); + } + + (gate, i) + } + + /// Finds the last available binary subtraction with the given `bits` or add one if there aren't any. + /// Returns `(g,i)` such that there is a binary subtraction for the given `bits` at index + /// `g` and the gate's `i`-th copy is available. + pub(crate) fn find_binary_subtraction_gate(&mut self) -> (usize, usize) { + let (gate, i) = self + .batched_gates + .free_binary_subtraction_gate + .get(&BITS) + .copied() + .unwrap_or_else(|| { + let gate = self.add_gate( + BinarySubtractionGate::::new_from_config(&self.config), + vec![], + ); + (gate, 0) + }); + + // Update `free_binary_subtraction` with new values. + if i + 1 < BinarySubtractionGate::::new_from_config(&self.config).num_ops { + self.batched_gates + .free_random_access + .insert(BITS, (gate, i + 1)); + } else { + self.batched_gates.free_random_access.remove(&BITS); + } + + (gate, i) + } /// Returns the gate index and copy index of a free `ConstantGate` slot, potentially adding a /// new `ConstantGate` if needed. From 82ce3ea8b2c1202a97ef2f20fa543bf7e8660f52 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:07:54 -0800 Subject: [PATCH 055/143] ECDSA merge --- plonky2/src/gadgets/biguint.rs | 50 ++++++++++++---------- plonky2/src/gadgets/ecdsa.rs | 4 +- plonky2/src/gadgets/multiple_comparison.rs | 13 ++---- plonky2/src/gadgets/nonnative.rs | 12 +++--- plonky2/src/iop/generator.rs | 15 +++++-- 5 files changed, 51 insertions(+), 43 deletions(-) diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 77013d27..289a9589 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -12,7 +12,7 @@ use crate::plonk::circuit_builder::CircuitBuilder; #[derive(Clone, Debug)] pub struct BigUintTarget { - pub limbs: Vec, + pub limbs: Vec>, } impl BigUintTarget { @@ -20,15 +20,23 @@ impl BigUintTarget { self.limbs.len() } - pub fn get_limb(&self, i: usize) -> U32Target { + pub fn get_limb(&self, i: usize) -> BinaryTarget<30> { self.limbs[i] } } impl, const D: usize> CircuitBuilder { pub fn constant_biguint(&mut self, value: &BigUint) -> BigUintTarget { - let limb_values = value.to_u32_digits(); - let limbs = limb_values.iter().map(|&l| self.constant_u32(l)).collect(); + let base = BigUint::from_u64(1 << 30).unwrap(); + let mut limb_values = Vec::new(); + let mut current = value.clone(); + while current > BigUint::zero() { + let (div, rem) = current.div_rem(&base); + current = div; + let rem_u64 = rem.to_u64_digits()[0]; + limb_values.push(F::from_canonical_u64(rem_u64)); + } + let limbs = limb_values.iter().map(|&l| self.constant_binary(l)).collect(); BigUintTarget { limbs } } @@ -36,14 +44,14 @@ impl, const D: usize> CircuitBuilder { pub fn connect_biguint(&mut self, lhs: &BigUintTarget, rhs: &BigUintTarget) { let min_limbs = lhs.num_limbs().min(rhs.num_limbs()); for i in 0..min_limbs { - self.connect_u32(lhs.get_limb(i), rhs.get_limb(i)); + self.connect_binary(lhs.get_limb(i), rhs.get_limb(i)); } for i in min_limbs..lhs.num_limbs() { - self.assert_zero_u32(lhs.get_limb(i)); + self.assert_zero_binary(lhs.get_limb(i)); } for i in min_limbs..rhs.num_limbs() { - self.assert_zero_u32(rhs.get_limb(i)); + self.assert_zero_binary(rhs.get_limb(i)); } } @@ -55,14 +63,14 @@ impl, const D: usize> CircuitBuilder { if a.num_limbs() > b.num_limbs() { let mut padded_b = b.clone(); for _ in b.num_limbs()..a.num_limbs() { - padded_b.limbs.push(self.zero_u32()); + padded_b.limbs.push(self.zero_binary()); } (a.clone(), padded_b) } else { let mut padded_a = a.clone(); for _ in a.num_limbs()..b.num_limbs() { - padded_a.limbs.push(self.zero_u32()); + padded_a.limbs.push(self.zero_binary()); } (padded_a, b.clone()) @@ -72,13 +80,11 @@ impl, const D: usize> CircuitBuilder { pub fn cmp_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BoolTarget { let (a, b) = self.pad_biguints(a, b); - self.list_le_u32(a.limbs, b.limbs) + self.list_le_binary::<30>(a.limbs, b.limbs) } pub fn add_virtual_biguint_target(&mut self, num_limbs: usize) -> BigUintTarget { - let limbs = (0..num_limbs) - .map(|_| self.add_virtual_u32_target()) - .collect(); + let limbs = self.add_virtual_binary_targets(num_limbs); BigUintTarget { limbs } } @@ -88,16 +94,16 @@ impl, const D: usize> CircuitBuilder { let num_limbs = a.num_limbs().max(b.num_limbs()); let mut combined_limbs = vec![]; - let mut carry = self.zero_u32(); + let mut carry = self.zero_binary(); for i in 0..num_limbs { let a_limb = (i < a.num_limbs()) .then(|| a.limbs[i]) - .unwrap_or_else(|| self.zero_u32()); + .unwrap_or_else(|| self.zero_binary()); let b_limb = (i < b.num_limbs()) .then(|| b.limbs[i]) - .unwrap_or_else(|| self.zero_u32()); + .unwrap_or_else(|| self.zero_binary()); - let (new_limb, new_carry) = self.add_many_u32(&[carry, a_limb, b_limb]); + let (new_limb, new_carry) = self.add_many_binary(&[carry, a_limb, b_limb]); carry = new_carry; combined_limbs.push(new_limb); } @@ -115,9 +121,9 @@ impl, const D: usize> CircuitBuilder { let mut result_limbs = vec![]; - let mut borrow = self.zero_u32(); + let mut borrow = self.zero_binary(); for i in 0..num_limbs { - let (result, new_borrow) = self.sub_u32(a.limbs[i], b.limbs[i], borrow); + let (result, new_borrow) = self.sub_binary(a.limbs[i], b.limbs[i], borrow); result_limbs.push(result); borrow = new_borrow; } @@ -134,17 +140,17 @@ impl, const D: usize> CircuitBuilder { let mut to_add = vec![vec![]; total_limbs]; for i in 0..a.limbs.len() { for j in 0..b.limbs.len() { - let (product, carry) = self.mul_u32(a.limbs[i], b.limbs[j]); + let (product, carry) = self.mul_binary(a.limbs[i], b.limbs[j]); to_add[i + j].push(product); to_add[i + j + 1].push(carry); } } let mut combined_limbs = vec![]; - let mut carry = self.zero_u32(); + let mut carry = self.zero_binary(); for summands in &mut to_add { summands.push(carry); - let (new_result, new_carry) = self.add_many_u32(summands); + let (new_result, new_carry) = self.add_many_binary(summands); combined_limbs.push(new_result); carry = new_carry; } diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 362fbc6d..20fca13d 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use crate::curve::curve_types::Curve; use crate::field::extension_field::Extendable; use crate::field::field_types::RichField; -use crate::gadgets::arithmetic_u32::U32Target; +use crate::gadgets::binary_arithmetic::BinaryTarget; use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::curve::AffinePointTarget; use crate::gadgets::nonnative::NonNativeTarget; @@ -38,7 +38,7 @@ impl, const D: usize> CircuitBuilder { for &bit in rev_bits { sum = self.mul_add(two, sum, bit.target); } - let limbs = vec![U32Target(sum)]; + let limbs = vec![BinaryTarget::<30>(sum)]; let value = BigUintTarget { limbs }; NonNativeTarget { diff --git a/plonky2/src/gadgets/multiple_comparison.rs b/plonky2/src/gadgets/multiple_comparison.rs index 70afcab5..acb157e5 100644 --- a/plonky2/src/gadgets/multiple_comparison.rs +++ b/plonky2/src/gadgets/multiple_comparison.rs @@ -59,15 +59,10 @@ impl, const D: usize> CircuitBuilder { } /// Helper function for comparing, specifically, lists of `U32Target`s. - pub fn list_le_u32(&mut self, a: Vec, b: Vec) -> BoolTarget { - // let a_targets = a.iter().map(|&t| t.0).collect(); - // let b_targets = b.iter().map(|&t| t.0).collect(); - // self.list_le(a_targets, b_targets, 32) - - let num = a.len() / 2; - let a_targets = self.add_virtual_targets(num); - let b_targets = self.add_virtual_targets(num); - self.list_le(a_targets, b_targets, 32) + pub fn list_le_binary(&mut self, a: Vec>, b: Vec>) -> BoolTarget { + let a_targets = a.iter().map(|&t| t.0).collect(); + let b_targets = b.iter().map(|&t| t.0).collect(); + self.list_le(a_targets, b_targets, BITS) } } diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index cca22fd1..946bf35a 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -5,10 +5,8 @@ use plonky2_field::{extension_field::Extendable, field_types::Field}; use plonky2_util::ceil_div_usize; use crate::gadgets::arithmetic_u32::U32Target; -use crate::gadgets::biguint::BigUintTarget; -use crate::hash::hash_types::RichField; -use crate::iop::generator::{GeneratedValues, SimpleGenerator}; -use crate::iop::target::{BoolTarget, Target}; +use crate::field::field_types::RichField; +use crate::gadgets::binary_arithmetic::BinaryTarget; use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; @@ -197,7 +195,7 @@ impl, const D: usize> CircuitBuilder { } /// Returns `x % |FF|` as a `NonNativeTarget`. - fn reduce_by_bits(&mut self, x: &BigUintTarget) -> NonNativeTarget { + /*fn reduce_by_bits(&mut self, x: &BigUintTarget) -> NonNativeTarget { println!("NUM LIMBS: {}", x.limbs.len()); let before = self.num_gates(); @@ -248,7 +246,7 @@ impl, const D: usize> CircuitBuilder { value, _phantom: PhantomData, } - } + }*/ #[allow(dead_code)] fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { @@ -257,7 +255,7 @@ impl, const D: usize> CircuitBuilder { } pub fn bool_to_nonnative(&mut self, b: &BoolTarget) -> NonNativeTarget { - let limbs = vec![U32Target(b.target)]; + let limbs = vec![BinaryTarget::<30>(b.target)]; let value = BigUintTarget { limbs }; NonNativeTarget { diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 994ba62b..70f99d54 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -171,12 +171,21 @@ impl GeneratedValues { } pub fn set_biguint_target(&mut self, target: BigUintTarget, value: BigUint) { - let mut limbs = value.to_u32_digits(); + let base = BigUint::from_u64(1 << 30).unwrap(); + let mut limbs = Vec::new(); + let mut current = value.clone(); + while current > BigUint::zero() { + let (div, rem) = current.div_rem(&base); + current = div; + let rem_u64 = rem.to_u64_digits()[0]; + limbs.push(F::from_canonical_u64(rem_u64)); + } + assert!(target.num_limbs() >= limbs.len()); - limbs.resize(target.num_limbs(), 0); + limbs.resize(target.num_limbs(), F::ZERO); for i in 0..target.num_limbs() { - self.set_u32_target(target.get_limb(i), limbs[i]); + self.set_binary_target(target.get_limb(i), limbs[i]); } } From c561333c220df6a5f2095ead952bb59c2b982d05 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:09:11 -0800 Subject: [PATCH 056/143] ECDSA merge --- plonky2/src/gadgets/arithmetic_u32.rs | 3 +-- plonky2/src/gadgets/biguint.rs | 5 ++++- plonky2/src/gadgets/ecdsa.rs | 2 +- plonky2/src/gadgets/multiple_comparison.rs | 6 +++++- plonky2/src/gadgets/nonnative.rs | 14 ++++++++++---- plonky2/src/gates/mod.rs | 4 ++-- plonky2/src/plonk/circuit_builder.rs | 2 +- 7 files changed, 24 insertions(+), 12 deletions(-) diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index 0fbd076f..0ba50c85 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -204,7 +204,6 @@ impl, const D: usize> SimpleGenerator #[cfg(test)] mod tests { use anyhow::Result; - use rand::{thread_rng, Rng}; use crate::field::goldilocks_field::GoldilocksField; @@ -234,4 +233,4 @@ mod tests { let proof = data.prove(pw).unwrap(); verify(proof, &data.verifier_only, &data.common) } -} \ No newline at end of file +} diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 289a9589..6f9081b7 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -36,7 +36,10 @@ impl, const D: usize> CircuitBuilder { let rem_u64 = rem.to_u64_digits()[0]; limb_values.push(F::from_canonical_u64(rem_u64)); } - let limbs = limb_values.iter().map(|&l| self.constant_binary(l)).collect(); + let limbs = limb_values + .iter() + .map(|&l| self.constant_binary(l)) + .collect(); BigUintTarget { limbs } } diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 20fca13d..6466cea7 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -3,8 +3,8 @@ use std::marker::PhantomData; use crate::curve::curve_types::Curve; use crate::field::extension_field::Extendable; use crate::field::field_types::RichField; -use crate::gadgets::binary_arithmetic::BinaryTarget; use crate::gadgets::biguint::BigUintTarget; +use crate::gadgets::binary_arithmetic::BinaryTarget; use crate::gadgets::curve::AffinePointTarget; use crate::gadgets::nonnative::NonNativeTarget; use crate::iop::target::{BoolTarget, Target}; diff --git a/plonky2/src/gadgets/multiple_comparison.rs b/plonky2/src/gadgets/multiple_comparison.rs index acb157e5..222a6858 100644 --- a/plonky2/src/gadgets/multiple_comparison.rs +++ b/plonky2/src/gadgets/multiple_comparison.rs @@ -59,7 +59,11 @@ impl, const D: usize> CircuitBuilder { } /// Helper function for comparing, specifically, lists of `U32Target`s. - pub fn list_le_binary(&mut self, a: Vec>, b: Vec>) -> BoolTarget { + pub fn list_le_binary( + &mut self, + a: Vec>, + b: Vec>, + ) -> BoolTarget { let a_targets = a.iter().map(|&t| t.0).collect(); let b_targets = b.iter().map(|&t| t.0).collect(); self.list_le(a_targets, b_targets, BITS) diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 946bf35a..12709300 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -139,7 +139,7 @@ impl, const D: usize> CircuitBuilder { let num_limbs = x.value.num_limbs(); let inv_biguint = self.add_virtual_biguint_target(num_limbs); let div = self.add_virtual_biguint_target(num_limbs); - + self.add_simple_generator(NonNativeInverseGenerator:: { x: x.clone(), inv: inv_biguint.clone(), @@ -148,7 +148,7 @@ impl, const D: usize> CircuitBuilder { }); let product = self.mul_biguint(&x.value, &inv_biguint); - + let modulus = self.constant_biguint(&FF::order()); let mod_times_div = self.mul_biguint(&modulus, &div); let one = self.constant_biguint(&BigUint::one()); @@ -460,11 +460,17 @@ mod tests { let ffs: Vec<_> = (0..num).map(|_| FF::rand()).collect(); - let op_targets: Vec<_> = ffs.iter().map(|&x| op_builder.constant_nonnative(x)).collect(); + let op_targets: Vec<_> = ffs + .iter() + .map(|&x| op_builder.constant_nonnative(x)) + .collect(); op_builder.mul_many_nonnative(&op_targets); println!("OPTIMIZED GATE COUNT: {}", op_builder.num_gates()); - let unop_targets: Vec<_> = ffs.iter().map(|&x| unop_builder.constant_nonnative(x)).collect(); + let unop_targets: Vec<_> = ffs + .iter() + .map(|&x| unop_builder.constant_nonnative(x)) + .collect(); let mut result = unop_targets[0].clone(); for i in 1..unop_targets.len() { result = unop_builder.mul_nonnative(&result, &unop_targets[i]); diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index ac4600fa..f63080e5 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -3,11 +3,11 @@ pub mod arithmetic_base; pub mod arithmetic_extension; -pub mod binary_arithmetic; -pub mod binary_subtraction; pub mod arithmetic_u32; pub mod assert_le; pub mod base_sum; +pub mod binary_arithmetic; +pub mod binary_subtraction; pub mod comparison; pub mod constant; pub mod exponentiation; diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index c9d01abe..f9704e0e 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -1049,7 +1049,7 @@ impl, const D: usize> CircuitBuilder { (gate_index, copy) } - + /// Finds the last available binary arithmetic with the given `bits` or add one if there aren't any. /// Returns `(g,i)` such that there is a binary arithmetic for the given `bits` at index /// `g` and the gate's `i`-th copy is available. From f436c142421498b0c7c8224031df27394ac056f5 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:09:31 -0800 Subject: [PATCH 057/143] ECDSA merge --- plonky2/src/gadgets/arithmetic_u32.rs | 2 -- plonky2/src/gadgets/nonnative.rs | 13 -------- plonky2/src/iop/witness.rs | 2 +- plonky2/src/plonk/circuit_builder.rs | 45 +++++++++++++++++++++------ 4 files changed, 37 insertions(+), 25 deletions(-) diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index 0ba50c85..af1682f6 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -193,8 +193,6 @@ impl, const D: usize> SimpleGenerator let x_u64 = x.to_canonical_u64(); let low = x_u64 as u32; let high: u32 = (x_u64 >> 32).try_into().unwrap(); - println!("LOW: {}", low); - println!("HIGH: {}", high); out_buffer.set_u32_target(self.low.clone(), low); out_buffer.set_u32_target(self.high.clone(), high); diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 12709300..20ecd382 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -178,16 +178,10 @@ impl, const D: usize> CircuitBuilder { /// Returns `x % |FF|` as a `NonNativeTarget`. fn reduce(&mut self, x: &BigUintTarget) -> NonNativeTarget { - println!("NUM LIMBS: {}", x.limbs.len()); - let before = self.num_gates(); - let modulus = FF::order(); let order_target = self.constant_biguint(&modulus); let value = self.rem_biguint(x, &order_target); - println!("NUMBER OF GATES: {}", self.num_gates() - before); - println!("OUTPUT LIMBS: {}", value.limbs.len()); - NonNativeTarget { value, _phantom: PhantomData, @@ -196,7 +190,6 @@ impl, const D: usize> CircuitBuilder { /// Returns `x % |FF|` as a `NonNativeTarget`. /*fn reduce_by_bits(&mut self, x: &BigUintTarget) -> NonNativeTarget { - println!("NUM LIMBS: {}", x.limbs.len()); let before = self.num_gates(); let mut powers_of_two = Vec::new(); @@ -430,7 +423,6 @@ mod tests { let x_ff = FF::rand(); let y_ff = FF::rand(); let product_ff = x_ff * y_ff; - println!("PRODUCT FF: {:?}", product_ff); let config = CircuitConfig::standard_recursion_config(); let pw = PartialWitness::new(); @@ -456,8 +448,6 @@ mod tests { let mut unop_builder = CircuitBuilder::::new(config.clone()); let mut op_builder = CircuitBuilder::::new(config); - println!("NUM: {}", num); - let ffs: Vec<_> = (0..num).map(|_| FF::rand()).collect(); let op_targets: Vec<_> = ffs @@ -465,7 +455,6 @@ mod tests { .map(|&x| op_builder.constant_nonnative(x)) .collect(); op_builder.mul_many_nonnative(&op_targets); - println!("OPTIMIZED GATE COUNT: {}", op_builder.num_gates()); let unop_targets: Vec<_> = ffs .iter() @@ -475,8 +464,6 @@ mod tests { for i in 1..unop_targets.len() { result = unop_builder.mul_nonnative(&result, &unop_targets[i]); } - - println!("UNOPTIMIZED GATE COUNT: {}", unop_builder.num_gates()); } #[test] diff --git a/plonky2/src/iop/witness.rs b/plonky2/src/iop/witness.rs index efe4d911..558832d6 100644 --- a/plonky2/src/iop/witness.rs +++ b/plonky2/src/iop/witness.rs @@ -65,7 +65,7 @@ pub trait Witness { fn get_biguint_target(&self, target: BigUintTarget) -> BigUint { let mut result = BigUint::zero(); - let limb_base = BigUint::from_u64(1 << 32u64).unwrap(); + let limb_base = BigUint::from_u64(1 << 30u64).unwrap(); for i in (0..target.num_limbs()).rev() { let limb = target.get_limb(i); result *= &limb_base; diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index f9704e0e..ae65a53b 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -224,11 +224,6 @@ impl, const D: usize> CircuitBuilder { let gate_ref = GateRef::new(gate_type); self.gates.insert(gate_ref.clone()); - /*println!("ADDING GATE {}: {:?}", index, gate_ref); - if index == 145 { - panic!(); - }*/ - self.gate_instances.push(GateInstance { gate_ref, constants, @@ -1070,10 +1065,10 @@ impl, const D: usize> CircuitBuilder { // Update `free_binary_arithmetic` with new values. if i + 1 < BinaryArithmeticGate::::new_from_config(&self.config).num_ops { self.batched_gates - .free_random_access + .free_binary_arithmetic_gate .insert(BITS, (gate, i + 1)); } else { - self.batched_gates.free_random_access.remove(&BITS); + self.batched_gates.free_binary_arithmetic_gate.remove(&BITS); } (gate, i) @@ -1099,10 +1094,10 @@ impl, const D: usize> CircuitBuilder { // Update `free_binary_subtraction` with new values. if i + 1 < BinarySubtractionGate::::new_from_config(&self.config).num_ops { self.batched_gates - .free_random_access + .free_binary_subtraction_gate .insert(BITS, (gate, i + 1)); } else { - self.batched_gates.free_random_access.remove(&BITS); + self.batched_gates.free_binary_subtraction_gate.remove(&BITS); } (gate, i) @@ -1242,6 +1237,36 @@ impl, const D: usize> CircuitBuilder { } } + /// Fill the remaining unused binary arithmetic operations with zeros, so that all + /// `BinaryArithmeticGenerator`s are run. + fn fill_binary_arithmetic_gates(&mut self) { + let zero = self.zero_binary::<30>(); + if let Some(&(_, i)) = self.batched_gates.free_binary_arithmetic_gate.get(&30) { + let max_copies = + BinaryArithmeticGate::::new_from_config(&self.config).num_ops; + for _ in i..max_copies { + let dummy = self.add_virtual_binary_target(); + self.mul_add_binary(dummy, dummy, dummy); + self.connect_binary(dummy, zero); + } + } + } + + /// Fill the remaining unused binary subtraction operations with zeros, so that all + /// `BinarySubtractionGenerator`s are run. + fn fill_binary_subtraction_gates(&mut self) { + let zero = self.zero_binary::<30>(); + if let Some(&(_, i)) = self.batched_gates.free_binary_subtraction_gate.get(&30) { + let max_copies = + BinarySubtractionGate::::new_from_config(&self.config).num_ops; + for _ in i..max_copies { + let dummy = self.add_virtual_binary_target(); + self.sub_binary(dummy, dummy, dummy); + self.connect_binary(dummy, zero); + } + } + } + fn fill_batched_gates(&mut self) { self.fill_arithmetic_gates(); self.fill_base_arithmetic_gates(); @@ -1250,5 +1275,7 @@ impl, const D: usize> CircuitBuilder { self.fill_switch_gates(); self.fill_u32_arithmetic_gates(); self.fill_u32_subtraction_gates(); + self.fill_binary_arithmetic_gates(); + self.fill_binary_subtraction_gates(); } } From 440a5bd5d97ff20972e68b7bf353918b51ce90b8 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Tue, 4 Jan 2022 15:27:59 -0800 Subject: [PATCH 058/143] fmt --- plonky2/src/plonk/circuit_builder.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index ae65a53b..058c5d0d 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -1097,7 +1097,9 @@ impl, const D: usize> CircuitBuilder { .free_binary_subtraction_gate .insert(BITS, (gate, i + 1)); } else { - self.batched_gates.free_binary_subtraction_gate.remove(&BITS); + self.batched_gates + .free_binary_subtraction_gate + .remove(&BITS); } (gate, i) From 07b71a961398ec12876b7d8be084dc7ccae8b84f Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:10:21 -0800 Subject: [PATCH 059/143] ECDSA merge --- plonky2/src/gadgets/biguint.rs | 6 +- plonky2/src/gadgets/multiple_comparison.rs | 13 +- plonky2/src/gates/add_many_u32.rs | 437 +++++++++++++++++++++ plonky2/src/gates/mod.rs | 1 + 4 files changed, 448 insertions(+), 9 deletions(-) create mode 100644 plonky2/src/gates/add_many_u32.rs diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 6f9081b7..f647a30f 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -83,7 +83,7 @@ impl, const D: usize> CircuitBuilder { pub fn cmp_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BoolTarget { let (a, b) = self.pad_biguints(a, b); - self.list_le_binary::<30>(a.limbs, b.limbs) + self.list_le_30(a.limbs, b.limbs) } pub fn add_virtual_biguint_target(&mut self, num_limbs: usize) -> BigUintTarget { @@ -138,6 +138,8 @@ impl, const D: usize> CircuitBuilder { } pub fn mul_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { + let before = self.num_gates(); + let total_limbs = a.limbs.len() + b.limbs.len(); let mut to_add = vec![vec![]; total_limbs]; @@ -159,6 +161,8 @@ impl, const D: usize> CircuitBuilder { } combined_limbs.push(carry); + println!("NUMBER OF GATES: {}", self.num_gates() - before); + BigUintTarget { limbs: combined_limbs, } diff --git a/plonky2/src/gadgets/multiple_comparison.rs b/plonky2/src/gadgets/multiple_comparison.rs index 222a6858..bca337fe 100644 --- a/plonky2/src/gadgets/multiple_comparison.rs +++ b/plonky2/src/gadgets/multiple_comparison.rs @@ -59,14 +59,11 @@ impl, const D: usize> CircuitBuilder { } /// Helper function for comparing, specifically, lists of `U32Target`s. - pub fn list_le_binary( - &mut self, - a: Vec>, - b: Vec>, - ) -> BoolTarget { - let a_targets = a.iter().map(|&t| t.0).collect(); - let b_targets = b.iter().map(|&t| t.0).collect(); - self.list_le(a_targets, b_targets, BITS) + pub fn list_le_30(&mut self, a: Vec>, b: Vec>) -> BoolTarget { + let a_targets: Vec = a.iter().map(|&t| t.0).collect(); + let b_targets: Vec = b.iter().map(|&t| t.0).collect(); + + self.list_le(a_targets, b_targets, 30) } } diff --git a/plonky2/src/gates/add_many_u32.rs b/plonky2/src/gates/add_many_u32.rs new file mode 100644 index 00000000..7ad8523d --- /dev/null +++ b/plonky2/src/gates/add_many_u32.rs @@ -0,0 +1,437 @@ +use std::marker::PhantomData; + +use itertools::unfold; + +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::Extendable; +use crate::field::field_types::{Field, RichField}; +use crate::gates::gate::Gate; +use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; +use crate::iop::target::Target; +use crate::iop::wire::Wire; +use crate::iop::witness::{PartitionWitness, Witness}; +use crate::plonk::circuit_builder::CircuitBuilder; +use crate::plonk::circuit_data::CircuitConfig; +use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; + +const LOG2_MAX_NUM_ADDENDS: usize = 6; +const MAX_NUM_ADDENDS: usize = 1 << LOG2_MAX_NUM_ADDENDS; + +/// A gate to perform addition on `num_addends` different 32-bit values, plus a small carry +#[derive(Copy, Clone, Debug)] +pub struct U32AddManyGate, const D: usize> { + pub num_addends: usize, + pub num_ops: usize, + _phantom: PhantomData, +} + +impl, const D: usize> U32AddManyGate { + pub fn new_from_config(num_addends: usize, config: &CircuitConfig) -> Self { + Self { + num_addends, + num_ops: Self::num_ops(num_addends, config), + _phantom: PhantomData, + } + } + + pub(crate) fn num_ops(num_addends: usize, config: &CircuitConfig) -> usize { + debug_assert!(num_addends < MAX_NUM_ADDENDS); + let wires_per_op = (num_addends + 3) + Self::num_limbs(); + let routed_wires_per_op = 5; + (config.num_wires / wires_per_op).min(config.num_routed_wires / routed_wires_per_op) + } + + pub fn wire_ith_op_jth_addend(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_ops); + debug_assert!(i < self.num_addends); + (self.num_addends + 3) * i + j + } + pub fn wire_ith_carry(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + (self.num_addends + 3) * i + self.num_addends + } + + pub fn wire_ith_output_low_half(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + (self.num_addends + 3) * i + self.num_addends + 1 + } + pub fn wire_ith_output_high_half(&self, i: usize) -> usize { + debug_assert!(i < self.num_ops); + (self.num_addends + 3) * i + self.num_addends + 2 + } + + pub fn limb_bits() -> usize { + 2 + } + pub fn num_limbs() -> usize { + 32 / Self::limb_bits() + } + + pub fn wire_ith_output_jth_limb(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_ops); + debug_assert!(j < Self::num_limbs()); + (self.num_addends + 3) * self.num_ops + Self::num_limbs() * i + j + } +} + +impl, const D: usize> Gate for U32AddManyGate { + fn id(&self) -> String { + format!("{:?}", self) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + for i in 0..self.num_ops { + let addends: Vec = (0..self.num_addends) + .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) + .collect(); + let borrow = vars.local_wires[self.wire_ith_carry(i)]; + + let computed_output = addends.iter().fold(F::Extension::ZERO, |x, &y| x + y) + borrow; + + let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; + let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + + let base = F::Extension::from_canonical_u64(1 << 32u64); + let combined_output = output_high * base + output_low; + + constraints.push(combined_output - computed_output); + + let mut combined_low_limbs = F::Extension::ZERO; + let base = F::Extension::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = (0..max_limb) + .map(|x| this_limb - F::Extension::from_canonical_usize(x)) + .product(); + constraints.push(product); + + combined_low_limbs = base * combined_low_limbs + this_limb; + } + constraints.push(combined_low_limbs - output_low); + + let max_overflow = self.num_addends; + let product = (0..max_overflow) + .map(|x| output_high - F::Extension::from_canonical_usize(x)) + .product(); + constraints.push(product); + } + + constraints + } + + fn eval_unfiltered_base(&self, vars: EvaluationVarsBase) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + for i in 0..self.num_ops { + let addends: Vec = (0..self.num_addends) + .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) + .collect(); + let borrow = vars.local_wires[self.wire_ith_carry(i)]; + + let computed_output = addends.iter().fold(F::ZERO, |x, &y| x + y) + borrow; + + let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; + let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + + let base = F::from_canonical_u64(1 << 32u64); + let combined_output = output_high * base + output_low; + + constraints.push(combined_output - computed_output); + + let mut combined_low_limbs = F::ZERO; + let base = F::from_canonical_u64(1u64 << Self::limb_bits()); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + let product = (0..max_limb) + .map(|x| this_limb - F::from_canonical_usize(x)) + .product(); + constraints.push(product); + + combined_low_limbs = base * combined_low_limbs + this_limb; + } + constraints.push(combined_low_limbs - output_low); + + let max_overflow = self.num_addends; + let product = (0..max_overflow) + .map(|x| output_high - F::from_canonical_usize(x)) + .product(); + constraints.push(product); + } + + constraints + } + + fn eval_unfiltered_recursively( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + for i in 0..self.num_ops { + let addends: Vec> = (0..self.num_addends) + .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) + .collect(); + let borrow = vars.local_wires[self.wire_ith_carry(i)]; + + let mut computed_output = borrow; + for addend in addends { + computed_output = builder.add_extension(computed_output, addend); + } + + let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; + let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + + let base: F::Extension = F::from_canonical_u64(1 << 32u64).into(); + let base_target = builder.constant_extension(base); + let combined_output = builder.mul_add_extension(output_high, base_target, output_low); + + constraints.push(builder.sub_extension(combined_output, computed_output)); + + let mut combined_low_limbs = builder.zero_extension(); + let base = builder + .constant_extension(F::Extension::from_canonical_u64(1u64 << Self::limb_bits())); + for j in (0..Self::num_limbs()).rev() { + let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; + let max_limb = 1 << Self::limb_bits(); + + let mut product = builder.one_extension(); + for x in 0..max_limb { + let x_target = + builder.constant_extension(F::Extension::from_canonical_usize(x)); + let diff = builder.sub_extension(this_limb, x_target); + product = builder.mul_extension(product, diff); + } + constraints.push(product); + + combined_low_limbs = builder.mul_add_extension(base, combined_low_limbs, this_limb); + } + constraints.push(builder.sub_extension(combined_low_limbs, output_low)); + + let max_overflow = self.num_addends; + let mut product = builder.one_extension(); + for x in 0..max_overflow { + let x_target = builder.constant_extension(F::Extension::from_canonical_usize(x)); + let diff = builder.sub_extension(output_high, x_target); + product = builder.mul_extension(product, diff); + } + constraints.push(product); + } + + constraints + } + + fn generators( + &self, + gate_index: usize, + _local_constants: &[F], + ) -> Vec>> { + (0..self.num_ops) + .map(|i| { + let g: Box> = Box::new( + U32AddManyGenerator { + gate: *self, + gate_index, + i, + _phantom: PhantomData, + } + .adapter(), + ); + g + }) + .collect::>() + } + + fn num_wires(&self) -> usize { + (self.num_addends + 3) * self.num_ops + Self::num_limbs() * self.num_ops + } + + fn num_constants(&self) -> usize { + 0 + } + + fn degree(&self) -> usize { + 1 << Self::limb_bits() + } + + fn num_constraints(&self) -> usize { + self.num_ops * (3 + Self::num_limbs()) + } +} + +#[derive(Clone, Debug)] +struct U32AddManyGenerator, const D: usize> { + gate: U32AddManyGate, + gate_index: usize, + i: usize, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for U32AddManyGenerator +{ + fn dependencies(&self) -> Vec { + let local_target = |input| Target::wire(self.gate_index, input); + + (0..self.gate.num_addends) + .map(|j| local_target(self.gate.wire_ith_op_jth_addend(self.i, j))) + .chain([local_target(self.gate.wire_ith_carry(self.i))]) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let local_wire = |input| Wire { + gate: self.gate_index, + input, + }; + + let get_local_wire = |input| witness.get_wire(local_wire(input)); + + let addends: Vec<_> = (0..self.gate.num_addends).map(|j| get_local_wire(self.gate.wire_ith_output_jth_limb(self.i, j))).collect(); + let carry = get_local_wire(self.gate.wire_ith_carry(self.i)); + + let output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry; + let mut output_u64 = output.to_canonical_u64(); + + let output_high_u64 = output_u64 >> 32; + let output_low_u64 = output_u64 & ((1 << 32) - 1); + + let output_high = F::from_canonical_u64(output_high_u64); + let output_low = F::from_canonical_u64(output_low_u64); + + let output_high_wire = local_wire(self.gate.wire_ith_output_high_half(self.i)); + let output_low_wire = local_wire(self.gate.wire_ith_output_low_half(self.i)); + + out_buffer.set_wire(output_high_wire, output_high); + out_buffer.set_wire(output_low_wire, output_low); + + let num_limbs = U32AddManyGate::::num_limbs(); + let limb_base = 1 << U32AddManyGate::::limb_bits(); + let output_limbs_u64 = unfold((), move |_| { + let ret = output_u64 % limb_base; + output_u64 /= limb_base; + Some(ret) + }) + .take(num_limbs); + let output_limbs_f = output_limbs_u64.map(F::from_canonical_u64); + + for (j, output_limb) in output_limbs_f.enumerate() { + let wire = local_wire(self.gate.wire_ith_output_jth_limb(self.i, j)); + out_buffer.set_wire(wire, output_limb); + } + } +} + +#[cfg(test)] +mod tests { + use std::marker::PhantomData; + + use anyhow::Result; + use rand::Rng; + + use crate::field::extension_field::quartic::QuarticExtension; + use crate::field::field_types::Field; + use crate::field::goldilocks_field::GoldilocksField; + use crate::gates::add_many_u32::U32AddManyGate; + use crate::gates::gate::Gate; + use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; + use crate::hash::hash_types::HashOut; + use crate::plonk::vars::EvaluationVars; + + #[test] + fn low_degree() { + test_low_degree::(U32AddManyGate:: { + num_addends: 4, + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn eval_fns() -> Result<()> { + test_eval_fns::(U32AddManyGate:: { + num_addends: 4, + num_ops: 3, + _phantom: PhantomData, + }) + } + + #[test] + fn test_gate_constraint() { + type F = GoldilocksField; + type FF = QuarticExtension; + const D: usize = 4; + const NUM_ADDENDS: usize = 4; + const NUM_U32_ADD_MANY_OPS: usize = 3; + + fn get_wires( + addends: Vec>, + carries: Vec, + ) -> Vec { + let mut v0 = Vec::new(); + let mut v1 = Vec::new(); + + let limb_bits = U32AddManyGate::::limb_bits(); + let num_limbs = U32AddManyGate::::num_limbs(); + let limb_base = 1 << limb_bits; + for op in 0..NUM_U32_ADD_MANY_OPS { + let adds = &addends[op]; + let ca = carries[op]; + + let mut output = adds.iter().sum::() + ca; + let output_low = output & ((1 << 32) - 1); + let output_high = output >> 32; + + let mut output_limbs = Vec::with_capacity(num_limbs); + for _i in 0..num_limbs { + output_limbs.push(output % limb_base); + output /= limb_base; + } + let mut output_limbs_f: Vec<_> = output_limbs + .into_iter() + .map(F::from_canonical_u64) + .collect(); + + for a in adds { + v0.push(F::from_canonical_u64(*a)); + } + v0.push(F::from_canonical_u64(ca)); + v0.push(F::from_canonical_u64(output_low)); + v0.push(F::from_canonical_u64(output_high)); + v1.append(&mut output_limbs_f); + } + + v0.iter() + .chain(v1.iter()) + .map(|&x| x.into()) + .collect::>() + } + + let mut rng = rand::thread_rng(); + let addends: Vec> = (0..NUM_U32_ADD_MANY_OPS) + .map(|_| (0..NUM_ADDENDS).map(|_| rng.gen::() as u64).collect()) + .collect(); + let carries: Vec<_> = (0..NUM_U32_ADD_MANY_OPS) + .map(|_| rng.gen::() as u64) + .collect(); + + let gate = U32AddManyGate:: { + num_addends: NUM_ADDENDS, + num_ops: NUM_U32_ADD_MANY_OPS, + _phantom: PhantomData, + }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(addends, carries), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } +} diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index f63080e5..a7591648 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -1,6 +1,7 @@ // Gates have `new` methods that return `GateRef`s. #![allow(clippy::new_ret_no_self)] +pub mod add_many_u32; pub mod arithmetic_base; pub mod arithmetic_extension; pub mod arithmetic_u32; From 3ba61a4e9ce8111704b4e18bfb8e08e7b3c67147 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:11:04 -0800 Subject: [PATCH 060/143] ECDSA merge --- plonky2/src/gadgets/biguint.rs | 47 ++++++++++------------ plonky2/src/gadgets/ecdsa.rs | 4 +- plonky2/src/gadgets/multiple_comparison.rs | 4 +- plonky2/src/gadgets/nonnative.rs | 2 +- plonky2/src/iop/generator.rs | 9 ++--- plonky2/src/iop/witness.rs | 2 +- 6 files changed, 31 insertions(+), 37 deletions(-) diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index f647a30f..559d5b92 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -12,7 +12,7 @@ use crate::plonk::circuit_builder::CircuitBuilder; #[derive(Clone, Debug)] pub struct BigUintTarget { - pub limbs: Vec>, + pub limbs: Vec, } impl BigUintTarget { @@ -20,25 +20,24 @@ impl BigUintTarget { self.limbs.len() } - pub fn get_limb(&self, i: usize) -> BinaryTarget<30> { + pub fn get_limb(&self, i: usize) -> U32Target { self.limbs[i] } } impl, const D: usize> CircuitBuilder { pub fn constant_biguint(&mut self, value: &BigUint) -> BigUintTarget { - let base = BigUint::from_u64(1 << 30).unwrap(); + let base = BigUint::from_u64(1 << 32).unwrap(); let mut limb_values = Vec::new(); let mut current = value.clone(); while current > BigUint::zero() { let (div, rem) = current.div_rem(&base); current = div; - let rem_u64 = rem.to_u64_digits()[0]; - limb_values.push(F::from_canonical_u64(rem_u64)); + limb_values.push(rem.to_u64_digits()[0] as u32); } let limbs = limb_values .iter() - .map(|&l| self.constant_binary(l)) + .map(|&l| self.constant_u32(l)) .collect(); BigUintTarget { limbs } @@ -47,14 +46,14 @@ impl, const D: usize> CircuitBuilder { pub fn connect_biguint(&mut self, lhs: &BigUintTarget, rhs: &BigUintTarget) { let min_limbs = lhs.num_limbs().min(rhs.num_limbs()); for i in 0..min_limbs { - self.connect_binary(lhs.get_limb(i), rhs.get_limb(i)); + self.connect_u32(lhs.get_limb(i), rhs.get_limb(i)); } for i in min_limbs..lhs.num_limbs() { - self.assert_zero_binary(lhs.get_limb(i)); + self.assert_zero_u32(lhs.get_limb(i)); } for i in min_limbs..rhs.num_limbs() { - self.assert_zero_binary(rhs.get_limb(i)); + self.assert_zero_u32(rhs.get_limb(i)); } } @@ -66,14 +65,14 @@ impl, const D: usize> CircuitBuilder { if a.num_limbs() > b.num_limbs() { let mut padded_b = b.clone(); for _ in b.num_limbs()..a.num_limbs() { - padded_b.limbs.push(self.zero_binary()); + padded_b.limbs.push(self.zero_u32()); } (a.clone(), padded_b) } else { let mut padded_a = a.clone(); for _ in a.num_limbs()..b.num_limbs() { - padded_a.limbs.push(self.zero_binary()); + padded_a.limbs.push(self.zero_u32()); } (padded_a, b.clone()) @@ -83,11 +82,11 @@ impl, const D: usize> CircuitBuilder { pub fn cmp_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BoolTarget { let (a, b) = self.pad_biguints(a, b); - self.list_le_30(a.limbs, b.limbs) + self.list_le_u32(a.limbs, b.limbs) } pub fn add_virtual_biguint_target(&mut self, num_limbs: usize) -> BigUintTarget { - let limbs = self.add_virtual_binary_targets(num_limbs); + let limbs = self.add_virtual_u32_targets(num_limbs); BigUintTarget { limbs } } @@ -97,16 +96,16 @@ impl, const D: usize> CircuitBuilder { let num_limbs = a.num_limbs().max(b.num_limbs()); let mut combined_limbs = vec![]; - let mut carry = self.zero_binary(); + let mut carry = self.zero_u32(); for i in 0..num_limbs { let a_limb = (i < a.num_limbs()) .then(|| a.limbs[i]) - .unwrap_or_else(|| self.zero_binary()); + .unwrap_or_else(|| self.zero_u32()); let b_limb = (i < b.num_limbs()) .then(|| b.limbs[i]) - .unwrap_or_else(|| self.zero_binary()); + .unwrap_or_else(|| self.zero_u32()); - let (new_limb, new_carry) = self.add_many_binary(&[carry, a_limb, b_limb]); + let (new_limb, new_carry) = self.add_many_u32(&[carry, a_limb, b_limb]); carry = new_carry; combined_limbs.push(new_limb); } @@ -124,9 +123,9 @@ impl, const D: usize> CircuitBuilder { let mut result_limbs = vec![]; - let mut borrow = self.zero_binary(); + let mut borrow = self.zero_u32(); for i in 0..num_limbs { - let (result, new_borrow) = self.sub_binary(a.limbs[i], b.limbs[i], borrow); + let (result, new_borrow) = self.sub_u32(a.limbs[i], b.limbs[i], borrow); result_limbs.push(result); borrow = new_borrow; } @@ -138,31 +137,27 @@ impl, const D: usize> CircuitBuilder { } pub fn mul_biguint(&mut self, a: &BigUintTarget, b: &BigUintTarget) -> BigUintTarget { - let before = self.num_gates(); - let total_limbs = a.limbs.len() + b.limbs.len(); let mut to_add = vec![vec![]; total_limbs]; for i in 0..a.limbs.len() { for j in 0..b.limbs.len() { - let (product, carry) = self.mul_binary(a.limbs[i], b.limbs[j]); + let (product, carry) = self.mul_u32(a.limbs[i], b.limbs[j]); to_add[i + j].push(product); to_add[i + j + 1].push(carry); } } let mut combined_limbs = vec![]; - let mut carry = self.zero_binary(); + let mut carry = self.zero_u32(); for summands in &mut to_add { summands.push(carry); - let (new_result, new_carry) = self.add_many_binary(summands); + let (new_result, new_carry) = self.add_many_u32(summands); combined_limbs.push(new_result); carry = new_carry; } combined_limbs.push(carry); - println!("NUMBER OF GATES: {}", self.num_gates() - before); - BigUintTarget { limbs: combined_limbs, } diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 6466cea7..8238cbad 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -4,7 +4,7 @@ use crate::curve::curve_types::Curve; use crate::field::extension_field::Extendable; use crate::field::field_types::RichField; use crate::gadgets::biguint::BigUintTarget; -use crate::gadgets::binary_arithmetic::BinaryTarget; +use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::curve::AffinePointTarget; use crate::gadgets::nonnative::NonNativeTarget; use crate::iop::target::{BoolTarget, Target}; @@ -38,7 +38,7 @@ impl, const D: usize> CircuitBuilder { for &bit in rev_bits { sum = self.mul_add(two, sum, bit.target); } - let limbs = vec![BinaryTarget::<30>(sum)]; + let limbs = vec![U32Target(sum)]; let value = BigUintTarget { limbs }; NonNativeTarget { diff --git a/plonky2/src/gadgets/multiple_comparison.rs b/plonky2/src/gadgets/multiple_comparison.rs index bca337fe..434fb6c1 100644 --- a/plonky2/src/gadgets/multiple_comparison.rs +++ b/plonky2/src/gadgets/multiple_comparison.rs @@ -59,11 +59,11 @@ impl, const D: usize> CircuitBuilder { } /// Helper function for comparing, specifically, lists of `U32Target`s. - pub fn list_le_30(&mut self, a: Vec>, b: Vec>) -> BoolTarget { + pub fn list_le_u32(&mut self, a: Vec, b: Vec) -> BoolTarget { let a_targets: Vec = a.iter().map(|&t| t.0).collect(); let b_targets: Vec = b.iter().map(|&t| t.0).collect(); - self.list_le(a_targets, b_targets, 30) + self.list_le(a_targets, b_targets, 32) } } diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 20ecd382..7a62b04f 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -248,7 +248,7 @@ impl, const D: usize> CircuitBuilder { } pub fn bool_to_nonnative(&mut self, b: &BoolTarget) -> NonNativeTarget { - let limbs = vec![BinaryTarget::<30>(b.target)]; + let limbs = vec![U32Target(b.target)]; let value = BigUintTarget { limbs }; NonNativeTarget { diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 70f99d54..d4e37dcb 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -171,21 +171,20 @@ impl GeneratedValues { } pub fn set_biguint_target(&mut self, target: BigUintTarget, value: BigUint) { - let base = BigUint::from_u64(1 << 30).unwrap(); + let base = BigUint::from_u64(1 << 32).unwrap(); let mut limbs = Vec::new(); let mut current = value.clone(); while current > BigUint::zero() { let (div, rem) = current.div_rem(&base); current = div; - let rem_u64 = rem.to_u64_digits()[0]; - limbs.push(F::from_canonical_u64(rem_u64)); + limbs.push(rem.to_u64_digits()[0] as u32); } assert!(target.num_limbs() >= limbs.len()); - limbs.resize(target.num_limbs(), F::ZERO); + limbs.resize(target.num_limbs(), 0); for i in 0..target.num_limbs() { - self.set_binary_target(target.get_limb(i), limbs[i]); + self.set_u32_target(target.get_limb(i), limbs[i]); } } diff --git a/plonky2/src/iop/witness.rs b/plonky2/src/iop/witness.rs index 558832d6..efe4d911 100644 --- a/plonky2/src/iop/witness.rs +++ b/plonky2/src/iop/witness.rs @@ -65,7 +65,7 @@ pub trait Witness { fn get_biguint_target(&self, target: BigUintTarget) -> BigUint { let mut result = BigUint::zero(); - let limb_base = BigUint::from_u64(1 << 30u64).unwrap(); + let limb_base = BigUint::from_u64(1 << 32u64).unwrap(); for i in (0..target.num_limbs()).rev() { let limb = target.get_limb(i); result *= &limb_base; From 3bbedecddb0114c3c66efc7610425f26a1c1bfbb Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 16:11:30 -0800 Subject: [PATCH 061/143] ECDSA merge --- plonky2/src/gadgets/arithmetic_u32.rs | 69 +++++++-- plonky2/src/gadgets/biguint.rs | 16 +- plonky2/src/gadgets/ecdsa.rs | 2 +- plonky2/src/gadgets/nonnative.rs | 1 + plonky2/src/gates/add_many_u32.rs | 210 ++++++++++++++------------ plonky2/src/plonk/circuit_builder.rs | 59 +++++++- 6 files changed, 240 insertions(+), 117 deletions(-) diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index af1682f6..9bf78d44 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -114,18 +114,57 @@ impl, const D: usize> CircuitBuilder { 1 => (to_add[0], self.zero_u32()), 2 => self.add_u32(to_add[0], to_add[1]), _ => { - let (mut low, mut carry) = self.add_u32(to_add[0], to_add[1]); - for i in 2..to_add.len() { - let (new_low, new_carry) = self.add_u32(to_add[i], low); - let (combined_carry, _zero) = self.add_u32(carry, new_carry); - low = new_low; - carry = combined_carry; + let num_addends = to_add.len(); + let gate = U32AddManyGate::::new_from_config(&self.config, num_addends); + let (gate_index, copy) = self.find_u32_add_many_gate(num_addends); + + for j in 0..num_addends { + self.connect( + Target::wire(gate_index, gate.wire_ith_op_jth_addend(copy, j)), + to_add[j].0, + ); } - (low, carry) + let zero = self.zero(); + self.connect(Target::wire(gate_index, gate.wire_ith_carry(copy)), zero); + + let output_low = + U32Target(Target::wire(gate_index, gate.wire_ith_output_result(copy))); + let output_high = + U32Target(Target::wire(gate_index, gate.wire_ith_output_carry(copy))); + + (output_low, output_high) } } } + pub fn add_u32s_with_carry( + &mut self, + to_add: &[U32Target], + carry: U32Target, + ) -> (U32Target, U32Target) { + if to_add.len() == 1 { + return self.add_u32(to_add[0], carry); + } + + let num_addends = to_add.len(); + + let gate = U32AddManyGate::::new_from_config(&self.config, num_addends); + let (gate_index, copy) = self.find_u32_add_many_gate(num_addends); + + for j in 0..num_addends { + self.connect( + Target::wire(gate_index, gate.wire_ith_op_jth_addend(copy, j)), + to_add[j].0, + ); + } + self.connect(Target::wire(gate_index, gate.wire_ith_carry(copy)), carry.0); + + let output = U32Target(Target::wire(gate_index, gate.wire_ith_output_result(copy))); + let output_carry = U32Target(Target::wire(gate_index, gate.wire_ith_output_carry(copy))); + + (output, output_carry) + } + pub fn mul_u32(&mut self, a: U32Target, b: U32Target) -> (U32Target, U32Target) { let zero = self.zero_u32(); self.mul_add_u32(a, b, zero) @@ -214,6 +253,7 @@ mod tests { pub fn test_add_many_u32s() -> Result<()> { type F = GoldilocksField; const D: usize = 4; + const NUM_ADDENDS: usize = 15; let config = CircuitConfig::standard_recursion_config(); @@ -222,10 +262,19 @@ mod tests { let mut rng = thread_rng(); let mut to_add = Vec::new(); - for _ in 0..10 { - to_add.push(builder.constant_u32(rng.gen())); + let mut sum = 0u64; + for _ in 0..NUM_ADDENDS { + let x: u32 = rng.gen(); + sum += x as u64; + to_add.push(builder.constant_u32(x)); } - let _ = builder.add_many_u32(&to_add); + let carry = builder.zero_u32(); + let (result_low, result_high) = builder.add_u32s_with_carry(&to_add, carry); + let expected_low = builder.constant_u32((sum % (1 << 32)) as u32); + let expected_high = builder.constant_u32((sum >> 32) as u32); + + builder.connect_u32(result_low, expected_low); + builder.connect_u32(result_high, expected_high); let data = builder.build(); let proof = data.prove(pw).unwrap(); diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 559d5b92..24d3da7d 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -35,10 +35,7 @@ impl, const D: usize> CircuitBuilder { current = div; limb_values.push(rem.to_u64_digits()[0] as u32); } - let limbs = limb_values - .iter() - .map(|&l| self.constant_u32(l)) - .collect(); + let limbs = limb_values.iter().map(|&l| self.constant_u32(l)).collect(); BigUintTarget { limbs } } @@ -151,8 +148,7 @@ impl, const D: usize> CircuitBuilder { let mut combined_limbs = vec![]; let mut carry = self.zero_u32(); for summands in &mut to_add { - summands.push(carry); - let (new_result, new_carry) = self.add_many_u32(summands); + let (new_result, new_carry) = self.add_u32s_with_carry(summands, carry); combined_limbs.push(new_result); carry = new_carry; } @@ -400,11 +396,11 @@ mod tests { let y = builder.constant_biguint(&y_value); let (div, rem) = builder.div_rem_biguint(&x, &y); - let expected_div = builder.constant_biguint(&expected_div_value); - let expected_rem = builder.constant_biguint(&expected_rem_value); + // let expected_div = builder.constant_biguint(&expected_div_value); + // let expected_rem = builder.constant_biguint(&expected_rem_value); - builder.connect_biguint(&div, &expected_div); - builder.connect_biguint(&rem, &expected_rem); + // builder.connect_biguint(&div, &expected_div); + // builder.connect_biguint(&rem, &expected_rem); let data = builder.build::(); let proof = data.prove(pw).unwrap(); diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 8238cbad..362fbc6d 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -3,8 +3,8 @@ use std::marker::PhantomData; use crate::curve::curve_types::Curve; use crate::field::extension_field::Extendable; use crate::field::field_types::RichField; -use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::arithmetic_u32::U32Target; +use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::curve::AffinePointTarget; use crate::gadgets::nonnative::NonNativeTarget; use crate::iop::target::{BoolTarget, Target}; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 7a62b04f..fd3dab87 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -430,6 +430,7 @@ mod tests { let x = builder.constant_nonnative(x_ff); let y = builder.constant_nonnative(y_ff); + println!("LIMBS LIMBS LIMBS {}", y.value.limbs.len()); let product = builder.mul_nonnative(&x, &y); let product_expected = builder.constant_nonnative(product_ff); diff --git a/plonky2/src/gates/add_many_u32.rs b/plonky2/src/gates/add_many_u32.rs index 7ad8523d..571106e2 100644 --- a/plonky2/src/gates/add_many_u32.rs +++ b/plonky2/src/gates/add_many_u32.rs @@ -13,9 +13,10 @@ use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; +use crate::util::ceil_div_usize; -const LOG2_MAX_NUM_ADDENDS: usize = 6; -const MAX_NUM_ADDENDS: usize = 1 << LOG2_MAX_NUM_ADDENDS; +const LOG2_MAX_NUM_ADDENDS: usize = 4; +const MAX_NUM_ADDENDS: usize = 16; /// A gate to perform addition on `num_addends` different 32-bit values, plus a small carry #[derive(Copy, Clone, Debug)] @@ -26,7 +27,7 @@ pub struct U32AddManyGate, const D: usize> { } impl, const D: usize> U32AddManyGate { - pub fn new_from_config(num_addends: usize, config: &CircuitConfig) -> Self { + pub fn new_from_config(config: &CircuitConfig, num_addends: usize) -> Self { Self { num_addends, num_ops: Self::num_ops(num_addends, config), @@ -35,7 +36,7 @@ impl, const D: usize> U32AddManyGate { } pub(crate) fn num_ops(num_addends: usize, config: &CircuitConfig) -> usize { - debug_assert!(num_addends < MAX_NUM_ADDENDS); + debug_assert!(num_addends <= MAX_NUM_ADDENDS); let wires_per_op = (num_addends + 3) + Self::num_limbs(); let routed_wires_per_op = 5; (config.num_wires / wires_per_op).min(config.num_routed_wires / routed_wires_per_op) @@ -43,7 +44,7 @@ impl, const D: usize> U32AddManyGate { pub fn wire_ith_op_jth_addend(&self, i: usize, j: usize) -> usize { debug_assert!(i < self.num_ops); - debug_assert!(i < self.num_addends); + debug_assert!(j < self.num_addends); (self.num_addends + 3) * i + j } pub fn wire_ith_carry(&self, i: usize) -> usize { @@ -51,11 +52,11 @@ impl, const D: usize> U32AddManyGate { (self.num_addends + 3) * i + self.num_addends } - pub fn wire_ith_output_low_half(&self, i: usize) -> usize { + pub fn wire_ith_output_result(&self, i: usize) -> usize { debug_assert!(i < self.num_ops); (self.num_addends + 3) * i + self.num_addends + 1 } - pub fn wire_ith_output_high_half(&self, i: usize) -> usize { + pub fn wire_ith_output_carry(&self, i: usize) -> usize { debug_assert!(i < self.num_ops); (self.num_addends + 3) * i + self.num_addends + 2 } @@ -63,8 +64,14 @@ impl, const D: usize> U32AddManyGate { pub fn limb_bits() -> usize { 2 } + pub fn num_result_limbs() -> usize { + ceil_div_usize(32, Self::limb_bits()) + } + pub fn num_carry_limbs() -> usize { + ceil_div_usize(LOG2_MAX_NUM_ADDENDS, Self::limb_bits()) + } pub fn num_limbs() -> usize { - 32 / Self::limb_bits() + Self::num_result_limbs() + Self::num_carry_limbs() } pub fn wire_ith_output_jth_limb(&self, i: usize, j: usize) -> usize { @@ -85,19 +92,20 @@ impl, const D: usize> Gate for U32AddManyGate let addends: Vec = (0..self.num_addends) .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) .collect(); - let borrow = vars.local_wires[self.wire_ith_carry(i)]; + let carry = vars.local_wires[self.wire_ith_carry(i)]; - let computed_output = addends.iter().fold(F::Extension::ZERO, |x, &y| x + y) + borrow; + let computed_output = addends.iter().fold(F::Extension::ZERO, |x, &y| x + y) + carry; - let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; - let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_carry = vars.local_wires[self.wire_ith_output_carry(i)]; let base = F::Extension::from_canonical_u64(1 << 32u64); - let combined_output = output_high * base + output_low; + let combined_output = output_carry * base + output_result; constraints.push(combined_output - computed_output); - let mut combined_low_limbs = F::Extension::ZERO; + let mut combined_result_limbs = F::Extension::ZERO; + let mut combined_carry_limbs = F::Extension::ZERO; let base = F::Extension::from_canonical_u64(1u64 << Self::limb_bits()); for j in (0..Self::num_limbs()).rev() { let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; @@ -107,15 +115,14 @@ impl, const D: usize> Gate for U32AddManyGate .product(); constraints.push(product); - combined_low_limbs = base * combined_low_limbs + this_limb; + if j < Self::num_result_limbs() { + combined_result_limbs = base * combined_result_limbs + this_limb; + } else { + combined_carry_limbs = base * combined_carry_limbs + this_limb; + } } - constraints.push(combined_low_limbs - output_low); - - let max_overflow = self.num_addends; - let product = (0..max_overflow) - .map(|x| output_high - F::Extension::from_canonical_usize(x)) - .product(); - constraints.push(product); + constraints.push(combined_result_limbs - output_result); + constraints.push(combined_carry_limbs - output_carry); } constraints @@ -127,19 +134,20 @@ impl, const D: usize> Gate for U32AddManyGate let addends: Vec = (0..self.num_addends) .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) .collect(); - let borrow = vars.local_wires[self.wire_ith_carry(i)]; + let carry = vars.local_wires[self.wire_ith_carry(i)]; - let computed_output = addends.iter().fold(F::ZERO, |x, &y| x + y) + borrow; + let computed_output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry; - let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; - let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_carry = vars.local_wires[self.wire_ith_output_carry(i)]; let base = F::from_canonical_u64(1 << 32u64); - let combined_output = output_high * base + output_low; + let combined_output = output_carry * base + output_result; constraints.push(combined_output - computed_output); - let mut combined_low_limbs = F::ZERO; + let mut combined_result_limbs = F::ZERO; + let mut combined_carry_limbs = F::ZERO; let base = F::from_canonical_u64(1u64 << Self::limb_bits()); for j in (0..Self::num_limbs()).rev() { let this_limb = vars.local_wires[self.wire_ith_output_jth_limb(i, j)]; @@ -149,15 +157,14 @@ impl, const D: usize> Gate for U32AddManyGate .product(); constraints.push(product); - combined_low_limbs = base * combined_low_limbs + this_limb; + if j < Self::num_result_limbs() { + combined_result_limbs = base * combined_result_limbs + this_limb; + } else { + combined_carry_limbs = base * combined_carry_limbs + this_limb; + } } - constraints.push(combined_low_limbs - output_low); - - let max_overflow = self.num_addends; - let product = (0..max_overflow) - .map(|x| output_high - F::from_canonical_usize(x)) - .product(); - constraints.push(product); + constraints.push(combined_result_limbs - output_result); + constraints.push(combined_carry_limbs - output_carry); } constraints @@ -174,23 +181,25 @@ impl, const D: usize> Gate for U32AddManyGate let addends: Vec> = (0..self.num_addends) .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) .collect(); - let borrow = vars.local_wires[self.wire_ith_carry(i)]; + let carry = vars.local_wires[self.wire_ith_carry(i)]; - let mut computed_output = borrow; + let mut computed_output = carry; for addend in addends { computed_output = builder.add_extension(computed_output, addend); } - let output_low = vars.local_wires[self.wire_ith_output_low_half(i)]; - let output_high = vars.local_wires[self.wire_ith_output_high_half(i)]; + let output_result = vars.local_wires[self.wire_ith_output_result(i)]; + let output_carry = vars.local_wires[self.wire_ith_output_carry(i)]; let base: F::Extension = F::from_canonical_u64(1 << 32u64).into(); let base_target = builder.constant_extension(base); - let combined_output = builder.mul_add_extension(output_high, base_target, output_low); + let combined_output = + builder.mul_add_extension(output_carry, base_target, output_result); constraints.push(builder.sub_extension(combined_output, computed_output)); - let mut combined_low_limbs = builder.zero_extension(); + let mut combined_result_limbs = builder.zero_extension(); + let mut combined_carry_limbs = builder.zero_extension(); let base = builder .constant_extension(F::Extension::from_canonical_u64(1u64 << Self::limb_bits())); for j in (0..Self::num_limbs()).rev() { @@ -206,18 +215,16 @@ impl, const D: usize> Gate for U32AddManyGate } constraints.push(product); - combined_low_limbs = builder.mul_add_extension(base, combined_low_limbs, this_limb); + if j < Self::num_result_limbs() { + combined_result_limbs = + builder.mul_add_extension(base, combined_result_limbs, this_limb); + } else { + combined_carry_limbs = + builder.mul_add_extension(base, combined_carry_limbs, this_limb); + } } - constraints.push(builder.sub_extension(combined_low_limbs, output_low)); - - let max_overflow = self.num_addends; - let mut product = builder.one_extension(); - for x in 0..max_overflow { - let x_target = builder.constant_extension(F::Extension::from_canonical_usize(x)); - let diff = builder.sub_extension(output_high, x_target); - product = builder.mul_extension(product, diff); - } - constraints.push(product); + constraints.push(builder.sub_extension(combined_result_limbs, output_result)); + constraints.push(builder.sub_extension(combined_carry_limbs, output_carry)); } constraints @@ -289,37 +296,46 @@ impl, const D: usize> SimpleGenerator let get_local_wire = |input| witness.get_wire(local_wire(input)); - let addends: Vec<_> = (0..self.gate.num_addends).map(|j| get_local_wire(self.gate.wire_ith_output_jth_limb(self.i, j))).collect(); + let addends: Vec<_> = (0..self.gate.num_addends) + .map(|j| get_local_wire(self.gate.wire_ith_op_jth_addend(self.i, j))) + .collect(); let carry = get_local_wire(self.gate.wire_ith_carry(self.i)); let output = addends.iter().fold(F::ZERO, |x, &y| x + y) + carry; - let mut output_u64 = output.to_canonical_u64(); + let output_u64 = output.to_canonical_u64(); - let output_high_u64 = output_u64 >> 32; - let output_low_u64 = output_u64 & ((1 << 32) - 1); + let output_carry_u64 = output_u64 >> 32; + let output_result_u64 = output_u64 & ((1 << 32) - 1); - let output_high = F::from_canonical_u64(output_high_u64); - let output_low = F::from_canonical_u64(output_low_u64); + let output_carry = F::from_canonical_u64(output_carry_u64); + let output_result = F::from_canonical_u64(output_result_u64); - let output_high_wire = local_wire(self.gate.wire_ith_output_high_half(self.i)); - let output_low_wire = local_wire(self.gate.wire_ith_output_low_half(self.i)); + let output_carry_wire = local_wire(self.gate.wire_ith_output_carry(self.i)); + let output_result_wire = local_wire(self.gate.wire_ith_output_result(self.i)); - out_buffer.set_wire(output_high_wire, output_high); - out_buffer.set_wire(output_low_wire, output_low); + out_buffer.set_wire(output_carry_wire, output_carry); + out_buffer.set_wire(output_result_wire, output_result); - let num_limbs = U32AddManyGate::::num_limbs(); + let num_result_limbs = U32AddManyGate::::num_result_limbs(); + let num_carry_limbs = U32AddManyGate::::num_carry_limbs(); let limb_base = 1 << U32AddManyGate::::limb_bits(); - let output_limbs_u64 = unfold((), move |_| { - let ret = output_u64 % limb_base; - output_u64 /= limb_base; - Some(ret) - }) - .take(num_limbs); - let output_limbs_f = output_limbs_u64.map(F::from_canonical_u64); - for (j, output_limb) in output_limbs_f.enumerate() { + let split_to_limbs = |mut val, num| { + unfold((), move |_| { + let ret = val % limb_base; + val /= limb_base; + Some(ret) + }) + .take(num) + .map(F::from_canonical_u64) + }; + + let result_limbs = split_to_limbs(output_result_u64, num_result_limbs); + let carry_limbs = split_to_limbs(output_carry_u64, num_carry_limbs); + + for (j, limb) in result_limbs.chain(carry_limbs).enumerate() { let wire = local_wire(self.gate.wire_ith_output_jth_limb(self.i, j)); - out_buffer.set_wire(wire, output_limb); + out_buffer.set_wire(wire, limb); } } } @@ -329,6 +345,7 @@ mod tests { use std::marker::PhantomData; use anyhow::Result; + use itertools::unfold; use rand::Rng; use crate::field::extension_field::quartic::QuarticExtension; @@ -363,44 +380,47 @@ mod tests { type F = GoldilocksField; type FF = QuarticExtension; const D: usize = 4; - const NUM_ADDENDS: usize = 4; + const NUM_ADDENDS: usize = 10; const NUM_U32_ADD_MANY_OPS: usize = 3; - fn get_wires( - addends: Vec>, - carries: Vec, - ) -> Vec { + fn get_wires(addends: Vec>, carries: Vec) -> Vec { let mut v0 = Vec::new(); let mut v1 = Vec::new(); - let limb_bits = U32AddManyGate::::limb_bits(); - let num_limbs = U32AddManyGate::::num_limbs(); - let limb_base = 1 << limb_bits; + let num_result_limbs = U32AddManyGate::::num_result_limbs(); + let num_carry_limbs = U32AddManyGate::::num_carry_limbs(); + let limb_base = 1 << U32AddManyGate::::limb_bits(); for op in 0..NUM_U32_ADD_MANY_OPS { let adds = &addends[op]; let ca = carries[op]; - let mut output = adds.iter().sum::() + ca; - let output_low = output & ((1 << 32) - 1); - let output_high = output >> 32; + let output = adds.iter().sum::() + ca; + let output_result = output & ((1 << 32) - 1); + let output_carry = output >> 32; - let mut output_limbs = Vec::with_capacity(num_limbs); - for _i in 0..num_limbs { - output_limbs.push(output % limb_base); - output /= limb_base; - } - let mut output_limbs_f: Vec<_> = output_limbs - .into_iter() + let split_to_limbs = |mut val, num| { + unfold((), move |_| { + let ret = val % limb_base; + val /= limb_base; + Some(ret) + }) + .take(num) .map(F::from_canonical_u64) - .collect(); + }; + + let mut result_limbs: Vec<_> = + split_to_limbs(output_result, num_result_limbs).collect(); + let mut carry_limbs: Vec<_> = + split_to_limbs(output_carry, num_carry_limbs).collect(); for a in adds { v0.push(F::from_canonical_u64(*a)); } v0.push(F::from_canonical_u64(ca)); - v0.push(F::from_canonical_u64(output_low)); - v0.push(F::from_canonical_u64(output_high)); - v1.append(&mut output_limbs_f); + v0.push(F::from_canonical_u64(output_result)); + v0.push(F::from_canonical_u64(output_carry)); + v1.append(&mut result_limbs); + v1.append(&mut carry_limbs); } v0.iter() diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 058c5d0d..d9f79f59 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -823,6 +823,10 @@ pub struct BatchedGates, const D: usize> { /// of switches pub(crate) current_switch_gates: Vec, usize, usize)>>, + /// A map `n -> (g, i)` from `n` number of addends to an available `U32AddManyGate` of that size with gate + /// index `g` and already using `i` random accesses. + pub(crate) free_u32_add_many: HashMap, + /// The `U32ArithmeticGate` currently being filled (so new u32 arithmetic operations will be added to this gate before creating a new one) pub(crate) current_u32_arithmetic_gate: Option<(usize, usize)>, /// The `U32SubtractionGate` currently being filled (so new u32 subtraction operations will be added to this gate before creating a new one) @@ -845,6 +849,7 @@ impl, const D: usize> BatchedGates { free_mul: HashMap::new(), free_random_access: HashMap::new(), current_switch_gates: Vec::new(), + free_u32_add_many: HashMap::new(), current_u32_arithmetic_gate: None, current_u32_subtraction_gate: None, free_binary_arithmetic_gate: HashMap::new(), @@ -944,7 +949,7 @@ impl, const D: usize> CircuitBuilder { (gate, i) } - /// Finds the last available random access gate with the given `bits` or add one if there aren't any. + /// Finds the last available random access gate with the given `bits` or adds one if there aren't any. /// Returns `(g,i)` such that there is a random access gate for the given `bits` at index /// `g` and the gate's `i`-th random access is available. pub(crate) fn find_random_access_gate(&mut self, bits: usize) -> (usize, usize) { @@ -1007,6 +1012,35 @@ impl, const D: usize> CircuitBuilder { (gate, gate_index, next_copy) } + /// Finds the last available U32 add-many gate with the given `num_addends` or adds one if there aren't any. + /// Returns `(g,i)` such that there is a `U32AddManyGate` for the given `num_addends` at index + /// `g` and the gate's `i`-th copy is available. + pub(crate) fn find_u32_add_many_gate(&mut self, num_addends: usize) -> (usize, usize) { + let (gate, i) = self + .batched_gates + .free_u32_add_many + .get(&num_addends) + .copied() + .unwrap_or_else(|| { + let gate = self.add_gate( + U32AddManyGate::new_from_config(&self.config, num_addends), + vec![], + ); + (gate, 0) + }); + + // Update `free_u32_add_many` with new values. + if i + 1 < U32AddManyGate::::new_from_config(&self.config, num_addends).num_ops { + self.batched_gates + .free_u32_add_many + .insert(num_addends, (gate, i + 1)); + } else { + self.batched_gates.free_u32_add_many.remove(&num_addends); + } + + (gate, i) + } + pub(crate) fn find_u32_arithmetic_gate(&mut self) -> (usize, usize) { let (gate_index, copy) = match self.batched_gates.current_u32_arithmetic_gate { None => { @@ -1213,6 +1247,28 @@ impl, const D: usize> CircuitBuilder { } } + /// Fill the remaining unused u32 add-many operations with zeros, so that all + /// `U32AddManyGenerator`s are run. + fn fill_u32_add_many_gates(&mut self) { + let zero = self.zero_u32(); + for (num_addends, (_, i)) in self.batched_gates.free_u32_add_many.clone() { + let max_copies = + U32AddManyGate::::new_from_config(&self.config, num_addends).num_ops; + for _ in i..max_copies { + let gate = U32AddManyGate::::new_from_config(&self.config, num_addends); + let (gate_index, copy) = self.find_u32_add_many_gate(num_addends); + + for j in 0..num_addends { + self.connect( + Target::wire(gate_index, gate.wire_ith_op_jth_addend(copy, j)), + zero.0, + ); + } + self.connect(Target::wire(gate_index, gate.wire_ith_carry(copy)), zero.0); + } + } + } + /// Fill the remaining unused U32 arithmetic operations with zeros, so that all /// `U32ArithmeticGenerator`s are run. fn fill_u32_arithmetic_gates(&mut self) { @@ -1275,6 +1331,7 @@ impl, const D: usize> CircuitBuilder { self.fill_mul_gates(); self.fill_random_access_gates(); self.fill_switch_gates(); + self.fill_u32_add_many_gates(); self.fill_u32_arithmetic_gates(); self.fill_u32_subtraction_gates(); self.fill_binary_arithmetic_gates(); From facb5661f3fbcd4d77ebfe2c0d88c2d56bc67167 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Wed, 12 Jan 2022 15:49:27 -0800 Subject: [PATCH 062/143] fix --- plonky2/src/gates/add_many_u32.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plonky2/src/gates/add_many_u32.rs b/plonky2/src/gates/add_many_u32.rs index 571106e2..c8b5f8af 100644 --- a/plonky2/src/gates/add_many_u32.rs +++ b/plonky2/src/gates/add_many_u32.rs @@ -38,7 +38,7 @@ impl, const D: usize> U32AddManyGate { pub(crate) fn num_ops(num_addends: usize, config: &CircuitConfig) -> usize { debug_assert!(num_addends <= MAX_NUM_ADDENDS); let wires_per_op = (num_addends + 3) + Self::num_limbs(); - let routed_wires_per_op = 5; + let routed_wires_per_op = num_addends + 3; (config.num_wires / wires_per_op).min(config.num_routed_wires / routed_wires_per_op) } From 50c24dfe8af84275900d32eb28d1735d4f4fce24 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Tue, 18 Jan 2022 14:07:47 -0800 Subject: [PATCH 063/143] more efficient nonnative add and multi-add --- plonky2/src/gadgets/biguint.rs | 24 +++- plonky2/src/gadgets/curve.rs | 4 +- plonky2/src/gadgets/nonnative.rs | 186 ++++++++++++++++++++----------- plonky2/src/gates/mod.rs | 1 + plonky2/src/iop/generator.rs | 4 + 5 files changed, 151 insertions(+), 68 deletions(-) diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 24d3da7d..1a829f97 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -40,6 +40,10 @@ impl, const D: usize> CircuitBuilder { BigUintTarget { limbs } } + pub fn zero_biguint(&mut self) -> BigUintTarget { + self.constant_biguint(&BigUint::zero()) + } + pub fn connect_biguint(&mut self, lhs: &BigUintTarget, rhs: &BigUintTarget) { let min_limbs = lhs.num_limbs().min(rhs.num_limbs()); for i in 0..min_limbs { @@ -159,6 +163,18 @@ impl, const D: usize> CircuitBuilder { } } + pub fn mul_biguint_by_bool( + &mut self, + a: &BigUintTarget, + b: BoolTarget, + ) -> BigUintTarget { + let t = b.target; + + BigUintTarget { + limbs: a.limbs.iter().map(|l| U32Target(self.mul(l.0, t))).collect() + } + } + // Returns x * y + z. This is no more efficient than mul-then-add; it's purely for convenience (only need to call one CircuitBuilder function). pub fn mul_add_biguint( &mut self, @@ -396,11 +412,11 @@ mod tests { let y = builder.constant_biguint(&y_value); let (div, rem) = builder.div_rem_biguint(&x, &y); - // let expected_div = builder.constant_biguint(&expected_div_value); - // let expected_rem = builder.constant_biguint(&expected_rem_value); + let expected_div = builder.constant_biguint(&expected_div_value); + let expected_rem = builder.constant_biguint(&expected_rem_value); - // builder.connect_biguint(&div, &expected_div); - // builder.connect_biguint(&rem, &expected_rem); + builder.connect_biguint(&div, &expected_div); + builder.connect_biguint(&rem, &expected_rem); let data = builder.build::(); let proof = data.prove(pw).unwrap(); diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index 63e96721..907aa5e3 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -100,6 +100,7 @@ impl, const D: usize> CircuitBuilder { p1: &AffinePointTarget, p2: &AffinePointTarget, ) -> AffinePointTarget { + let before = self.num_gates(); let AffinePointTarget { x: x1, y: y1 } = p1; let AffinePointTarget { x: x2, y: y2 } = p2; @@ -123,6 +124,7 @@ impl, const D: usize> CircuitBuilder { let x3_norm = self.mul_nonnative(&x3, &z3_inv); let y3_norm = self.mul_nonnative(&y3, &z3_inv); + println!("NUM GATES: {}", self.num_gates() - before); AffinePointTarget { x: x3_norm, y: y3_norm, @@ -310,7 +312,6 @@ mod tests { } #[test] - #[ignore] fn test_curve_mul() -> Result<()> { const D: usize = 2; type C = PoseidonGoldilocksConfig; @@ -345,7 +346,6 @@ mod tests { } #[test] - #[ignore] fn test_curve_random() -> Result<()> { const D: usize = 2; type C = PoseidonGoldilocksConfig; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index fd3dab87..1006b513 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -60,16 +60,45 @@ impl, const D: usize> CircuitBuilder { } } - // Add two `NonNativeTarget`s. pub fn add_nonnative( &mut self, a: &NonNativeTarget, b: &NonNativeTarget, ) -> NonNativeTarget { - let result = self.add_biguint(&a.value, &b.value); + let sum = self.add_virtual_nonnative_target::(); + let overflow = self.add_virtual_bool_target(); - // TODO: reduce add result with only one conditional subtraction - self.reduce(&result) + self.add_simple_generator(NonNativeAdditionGenerator:: { + a: a.clone(), + b: b.clone(), + sum: sum.clone(), + overflow: overflow.clone(), + _phantom: PhantomData, + }); + + let sum_expected = self.add_biguint(&a.value, &b.value); + + let modulus = self.constant_biguint(&FF::order()); + let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow); + let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); + self.connect_biguint(&sum_expected, &sum_actual); + + sum + } + + pub fn mul_nonnative_by_bool( + &mut self, + a: &NonNativeTarget, + b: BoolTarget, + ) -> NonNativeTarget { + let t = b.target; + + NonNativeTarget { + value: BigUintTarget { + limbs: a.value.limbs.iter().map(|l| U32Target(self.mul(l.0, t))).collect() + }, + _phantom: PhantomData, + } } pub fn add_many_nonnative( @@ -80,12 +109,28 @@ impl, const D: usize> CircuitBuilder { return to_add[0].clone(); } - let mut result = self.add_biguint(&to_add[0].value, &to_add[1].value); - for i in 2..to_add.len() { - result = self.add_biguint(&result, &to_add[i].value); - } + let sum = self.add_virtual_nonnative_target::(); + let overflow = self.add_virtual_u32_target(); + let summands = to_add.to_vec(); - self.reduce(&result) + self.add_simple_generator(NonNativeMultipleAddsGenerator:: { + summands: summands.clone(), + sum: sum.clone(), + overflow: overflow.clone(), + _phantom: PhantomData, + }); + + let sum_expected = summands.iter().fold(self.zero_biguint(), |a, b| self.add_biguint(&a, &b.value)); + + let modulus = self.constant_biguint(&FF::order()); + let overflow_biguint = BigUintTarget { + limbs: vec![overflow], + }; + let mod_times_overflow = self.mul_biguint(&modulus, &overflow_biguint); + let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); + self.connect_biguint(&sum_expected, &sum_actual); + + sum } // Subtract two `NonNativeTarget`s. @@ -188,59 +233,6 @@ impl, const D: usize> CircuitBuilder { } } - /// Returns `x % |FF|` as a `NonNativeTarget`. - /*fn reduce_by_bits(&mut self, x: &BigUintTarget) -> NonNativeTarget { - let before = self.num_gates(); - - let mut powers_of_two = Vec::new(); - let mut cur_power_of_two = FF::ONE; - let two = FF::TWO; - let mut max_num_limbs = 0; - for _ in 0..(x.limbs.len() * 32) { - let cur_power = self.constant_biguint(&cur_power_of_two.to_biguint()); - max_num_limbs = max_num_limbs.max(cur_power.limbs.len()); - powers_of_two.push(cur_power.limbs); - - cur_power_of_two *= two; - } - - let mut result_limbs_unreduced = vec![self.zero(); max_num_limbs]; - for i in 0..x.limbs.len() { - let this_limb = x.limbs[i]; - let bits = self.split_le(this_limb.0, 32); - for b in 0..bits.len() { - let this_power = powers_of_two[32 * i + b].clone(); - for x in 0..this_power.len() { - result_limbs_unreduced[x] = self.mul_add(bits[b].target, this_power[x].0, result_limbs_unreduced[x]); - } - } - } - - let mut result_limbs_reduced = Vec::new(); - let mut carry = self.zero_u32(); - for i in 0..result_limbs_unreduced.len() { - println!("{}", i); - let (low, high) = self.split_to_u32(result_limbs_unreduced[i]); - let (cur, overflow) = self.add_u32(carry, low); - let (new_carry, _) = self.add_many_u32(&[overflow, high, carry]); - result_limbs_reduced.push(cur); - carry = new_carry; - } - result_limbs_reduced.push(carry); - - let value = BigUintTarget { - limbs: result_limbs_reduced, - }; - - println!("NUMBER OF GATES: {}", self.num_gates() - before); - println!("OUTPUT LIMBS: {}", value.limbs.len()); - - NonNativeTarget { - value, - _phantom: PhantomData, - } - }*/ - #[allow(dead_code)] fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { let x_biguint = self.nonnative_to_biguint(x); @@ -280,6 +272,74 @@ impl, const D: usize> CircuitBuilder { } } +#[derive(Debug)] +struct NonNativeAdditionGenerator, const D: usize, FF: Field> { + a: NonNativeTarget, + b: NonNativeTarget, + sum: NonNativeTarget, + overflow: BoolTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: Field> SimpleGenerator + for NonNativeAdditionGenerator +{ + fn dependencies(&self) -> Vec { + self.a.value.limbs.iter().cloned().chain(self.b.value.limbs.clone()) + .map(|l| l.0) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = witness.get_nonnative_target(self.a.clone()); + let b = witness.get_nonnative_target(self.b.clone()); + let a_biguint = a.to_biguint(); + let b_biguint = b.to_biguint(); + let sum_biguint = a_biguint + b_biguint; + let modulus = FF::order(); + let (overflow, sum_reduced) = if sum_biguint > modulus { + (true, sum_biguint - modulus) + } else { + (false, sum_biguint) + }; + + out_buffer.set_biguint_target(self.sum.value.clone(), sum_reduced); + out_buffer.set_bool_target(self.overflow, overflow); + } +} + +#[derive(Debug)] +struct NonNativeMultipleAddsGenerator, const D: usize, FF: Field> { + summands: Vec>, + sum: NonNativeTarget, + overflow: U32Target, + _phantom: PhantomData, +} + +impl, const D: usize, FF: Field> SimpleGenerator + for NonNativeMultipleAddsGenerator +{ + fn dependencies(&self) -> Vec { + self.summands.iter().map(|summand| summand.value.limbs.iter().map(|limb| limb.0)) + .flatten() + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let summands: Vec<_> = self.summands.iter().map(|summand| witness.get_nonnative_target(summand.clone())).collect(); + let summand_biguints: Vec<_> = summands.iter().map(|summand| summand.to_biguint()).collect(); + + let sum_biguint = summand_biguints.iter().fold(BigUint::zero(), |a, b| a + b.clone()); + + let modulus = FF::order(); + let (overflow_biguint, sum_reduced) = sum_biguint.div_rem(&modulus); + let overflow = overflow_biguint.to_u64_digits()[0] as u32; + + out_buffer.set_biguint_target(self.sum.value.clone(), sum_reduced); + out_buffer.set_u32_target(self.overflow, overflow); + } +} + #[derive(Debug)] struct NonNativeInverseGenerator, const D: usize, FF: Field> { x: NonNativeTarget, @@ -310,6 +370,8 @@ impl, const D: usize, FF: Field> SimpleGenerator } } + + #[cfg(test)] mod tests { use anyhow::Result; diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index a7591648..177db7cf 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -11,6 +11,7 @@ pub mod binary_arithmetic; pub mod binary_subtraction; pub mod comparison; pub mod constant; +// pub mod curve_double; pub mod exponentiation; pub mod gate; pub mod gate_tree; diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index d4e37dcb..5f8b8a5f 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -162,6 +162,10 @@ impl GeneratedValues { self.target_values.push((target, value)) } + pub fn set_bool_target(&mut self, target: BoolTarget, value: bool) { + self.set_target(target.target, F::from_bool(value)) + } + pub fn set_u32_target(&mut self, target: U32Target, value: u32) { self.set_target(target.0, F::from_canonical_u32(value)) } From 8d3662692e6cbcec73ae35d6066fbca21202a29c Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Tue, 18 Jan 2022 14:11:21 -0800 Subject: [PATCH 064/143] fmt --- plonky2/src/gadgets/biguint.rs | 12 ++++---- plonky2/src/gadgets/nonnative.rs | 51 ++++++++++++++++++++++---------- 2 files changed, 42 insertions(+), 21 deletions(-) diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 1a829f97..4f9e1574 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -163,15 +163,15 @@ impl, const D: usize> CircuitBuilder { } } - pub fn mul_biguint_by_bool( - &mut self, - a: &BigUintTarget, - b: BoolTarget, - ) -> BigUintTarget { + pub fn mul_biguint_by_bool(&mut self, a: &BigUintTarget, b: BoolTarget) -> BigUintTarget { let t = b.target; BigUintTarget { - limbs: a.limbs.iter().map(|l| U32Target(self.mul(l.0, t))).collect() + limbs: a + .limbs + .iter() + .map(|l| U32Target(self.mul(l.0, t))) + .collect(), } } diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 1006b513..83333491 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -77,7 +77,7 @@ impl, const D: usize> CircuitBuilder { }); let sum_expected = self.add_biguint(&a.value, &b.value); - + let modulus = self.constant_biguint(&FF::order()); let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow); let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); @@ -95,7 +95,12 @@ impl, const D: usize> CircuitBuilder { NonNativeTarget { value: BigUintTarget { - limbs: a.value.limbs.iter().map(|l| U32Target(self.mul(l.0, t))).collect() + limbs: a + .value + .limbs + .iter() + .map(|l| U32Target(self.mul(l.0, t))) + .collect(), }, _phantom: PhantomData, } @@ -120,8 +125,10 @@ impl, const D: usize> CircuitBuilder { _phantom: PhantomData, }); - let sum_expected = summands.iter().fold(self.zero_biguint(), |a, b| self.add_biguint(&a, &b.value)); - + let sum_expected = summands + .iter() + .fold(self.zero_biguint(), |a, b| self.add_biguint(&a, &b.value)); + let modulus = self.constant_biguint(&FF::order()); let overflow_biguint = BigUintTarget { limbs: vec![overflow], @@ -285,9 +292,14 @@ impl, const D: usize, FF: Field> SimpleGenerator for NonNativeAdditionGenerator { fn dependencies(&self) -> Vec { - self.a.value.limbs.iter().cloned().chain(self.b.value.limbs.clone()) - .map(|l| l.0) - .collect() + self.a + .value + .limbs + .iter() + .cloned() + .chain(self.b.value.limbs.clone()) + .map(|l| l.0) + .collect() } fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { @@ -320,16 +332,27 @@ impl, const D: usize, FF: Field> SimpleGenerator for NonNativeMultipleAddsGenerator { fn dependencies(&self) -> Vec { - self.summands.iter().map(|summand| summand.value.limbs.iter().map(|limb| limb.0)) - .flatten() - .collect() + self.summands + .iter() + .map(|summand| summand.value.limbs.iter().map(|limb| limb.0)) + .flatten() + .collect() } fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { - let summands: Vec<_> = self.summands.iter().map(|summand| witness.get_nonnative_target(summand.clone())).collect(); - let summand_biguints: Vec<_> = summands.iter().map(|summand| summand.to_biguint()).collect(); + let summands: Vec<_> = self + .summands + .iter() + .map(|summand| witness.get_nonnative_target(summand.clone())) + .collect(); + let summand_biguints: Vec<_> = summands + .iter() + .map(|summand| summand.to_biguint()) + .collect(); - let sum_biguint = summand_biguints.iter().fold(BigUint::zero(), |a, b| a + b.clone()); + let sum_biguint = summand_biguints + .iter() + .fold(BigUint::zero(), |a, b| a + b.clone()); let modulus = FF::order(); let (overflow_biguint, sum_reduced) = sum_biguint.div_rem(&modulus); @@ -370,8 +393,6 @@ impl, const D: usize, FF: Field> SimpleGenerator } } - - #[cfg(test)] mod tests { use anyhow::Result; From ddf5ee5d1f587879c7d254dc1c6c76df27a9feaa Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Tue, 18 Jan 2022 14:59:39 -0800 Subject: [PATCH 065/143] more efficient nonnative subtraction --- plonky2/src/gadgets/nonnative.rs | 63 +++++++++++++++++++++++++++++--- 1 file changed, 58 insertions(+), 5 deletions(-) diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 83333491..8726fde7 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -146,12 +146,24 @@ impl, const D: usize> CircuitBuilder { a: &NonNativeTarget, b: &NonNativeTarget, ) -> NonNativeTarget { - let order = self.constant_biguint(&FF::order()); - let a_plus_order = self.add_biguint(&order, &a.value); - let result = self.sub_biguint(&a_plus_order, &b.value); + let diff = self.add_virtual_nonnative_target::(); + let overflow = self.add_virtual_bool_target(); - // TODO: reduce sub result with only one conditional addition? - self.reduce(&result) + self.add_simple_generator(NonNativeSubtractionGenerator:: { + a: a.clone(), + b: b.clone(), + diff: diff.clone(), + overflow: overflow.clone(), + _phantom: PhantomData, + }); + + let diff_plus_b = self.add_biguint(&diff.value, &b.value); + let modulus = self.constant_biguint(&FF::order()); + let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow); + let diff_plus_b_reduced = self.sub_biguint(&diff_plus_b, &mod_times_overflow); + self.connect_biguint(&a.value, &diff_plus_b_reduced); + + diff } pub fn mul_nonnative( @@ -363,6 +375,47 @@ impl, const D: usize, FF: Field> SimpleGenerator } } +#[derive(Debug)] +struct NonNativeSubtractionGenerator, const D: usize, FF: Field> { + a: NonNativeTarget, + b: NonNativeTarget, + diff: NonNativeTarget, + overflow: BoolTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: Field> SimpleGenerator + for NonNativeSubtractionGenerator +{ + fn dependencies(&self) -> Vec { + self.a + .value + .limbs + .iter() + .cloned() + .chain(self.b.value.limbs.clone()) + .map(|l| l.0) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = witness.get_nonnative_target(self.a.clone()); + let b = witness.get_nonnative_target(self.b.clone()); + let a_biguint = a.to_biguint(); + let b_biguint = b.to_biguint(); + + let modulus = FF::order(); + let (diff_biguint, overflow) = if a_biguint > b_biguint { + (a_biguint - b_biguint, false) + } else { + (modulus + a_biguint - b_biguint, true) + }; + + out_buffer.set_biguint_target(self.diff.value.clone(), diff_biguint); + out_buffer.set_bool_target(self.overflow, overflow); + } +} + #[derive(Debug)] struct NonNativeInverseGenerator, const D: usize, FF: Field> { x: NonNativeTarget, From 6e9318c068914a11e6fd9573b34d7b4a98efa11d Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Wed, 19 Jan 2022 13:24:48 -0800 Subject: [PATCH 066/143] u32 range check gate --- insertion/src/insertion_gate.rs | 2 +- plonky2/src/gadgets/nonnative.rs | 77 +++-- plonky2/src/gates/add_many_u32.rs | 4 +- plonky2/src/gates/arithmetic_base.rs | 2 +- plonky2/src/gates/arithmetic_u32.rs | 2 +- plonky2/src/gates/assert_le.rs | 2 +- plonky2/src/gates/low_degree_interpolation.rs | 2 +- plonky2/src/gates/mod.rs | 2 +- plonky2/src/gates/multiplication_extension.rs | 2 +- plonky2/src/gates/random_access.rs | 2 +- plonky2/src/gates/range_check_u32.rs | 305 ++++++++++++++++++ plonky2/src/gates/subtraction_u32.rs | 2 +- 12 files changed, 374 insertions(+), 30 deletions(-) create mode 100644 plonky2/src/gates/range_check_u32.rs diff --git a/insertion/src/insertion_gate.rs b/insertion/src/insertion_gate.rs index 8ee60483..442416d3 100644 --- a/insertion/src/insertion_gate.rs +++ b/insertion/src/insertion_gate.rs @@ -404,7 +404,7 @@ mod tests { v.extend(equality_dummy_vals); v.extend(insert_here_vals); - v.iter().map(|&x| x.into()).collect::>() + v.iter().map(|&x| x.into()).collect() } let orig_vec = vec![FF::rand(); 3]; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 8726fde7..31b3cb1b 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -171,9 +171,25 @@ impl, const D: usize> CircuitBuilder { a: &NonNativeTarget, b: &NonNativeTarget, ) -> NonNativeTarget { - let result = self.mul_biguint(&a.value, &b.value); + let prod = self.add_virtual_nonnative_target::(); + let modulus = self.constant_biguint(&FF::order()); + let overflow = self.add_virtual_biguint_target(a.value.num_limbs() + b.value.num_limbs() - modulus.num_limbs()); - self.reduce(&result) + self.add_simple_generator(NonNativeMultiplicationGenerator:: { + a: a.clone(), + b: b.clone(), + prod: prod.clone(), + overflow: overflow.clone(), + _phantom: PhantomData, + }); + + let prod_expected = self.mul_biguint(&a.value, &b.value); + + let mod_times_overflow = self.mul_biguint(&modulus, &overflow); + let prod_actual = self.add_biguint(&prod.value, &mod_times_overflow); + self.connect_biguint(&prod_expected, &prod_actual); + + prod } pub fn mul_many_nonnative( @@ -226,20 +242,6 @@ impl, const D: usize> CircuitBuilder { inv } - pub fn div_rem_nonnative( - &mut self, - x: &NonNativeTarget, - y: &NonNativeTarget, - ) -> (NonNativeTarget, NonNativeTarget) { - let x_biguint = self.nonnative_to_biguint(x); - let y_biguint = self.nonnative_to_biguint(y); - - let (div_biguint, rem_biguint) = self.div_rem_biguint(&x_biguint, &y_biguint); - let div = self.biguint_to_nonnative(&div_biguint); - let rem = self.biguint_to_nonnative(&rem_biguint); - (div, rem) - } - /// Returns `x % |FF|` as a `NonNativeTarget`. fn reduce(&mut self, x: &BigUintTarget) -> NonNativeTarget { let modulus = FF::order(); @@ -252,8 +254,7 @@ impl, const D: usize> CircuitBuilder { } } - #[allow(dead_code)] - fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + pub fn reduce_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { let x_biguint = self.nonnative_to_biguint(x); self.reduce(&x_biguint) } @@ -416,6 +417,45 @@ impl, const D: usize, FF: Field> SimpleGenerator } } +#[derive(Debug)] +struct NonNativeMultiplicationGenerator, const D: usize, FF: Field> { + a: NonNativeTarget, + b: NonNativeTarget, + prod: NonNativeTarget, + overflow: BigUintTarget, + _phantom: PhantomData, +} + +impl, const D: usize, FF: Field> SimpleGenerator + for NonNativeMultiplicationGenerator +{ + fn dependencies(&self) -> Vec { + self.a + .value + .limbs + .iter() + .cloned() + .chain(self.b.value.limbs.clone()) + .map(|l| l.0) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let a = witness.get_nonnative_target(self.a.clone()); + let b = witness.get_nonnative_target(self.b.clone()); + let a_biguint = a.to_biguint(); + let b_biguint = b.to_biguint(); + + let prod_biguint = a_biguint * b_biguint; + + let modulus = FF::order(); + let (overflow_biguint, prod_reduced) = prod_biguint.div_rem(&modulus); + + out_buffer.set_biguint_target(self.prod.value.clone(), prod_reduced); + out_buffer.set_biguint_target(self.overflow.clone(), overflow_biguint); + } +} + #[derive(Debug)] struct NonNativeInverseGenerator, const D: usize, FF: Field> { x: NonNativeTarget, @@ -566,7 +606,6 @@ mod tests { let x = builder.constant_nonnative(x_ff); let y = builder.constant_nonnative(y_ff); - println!("LIMBS LIMBS LIMBS {}", y.value.limbs.len()); let product = builder.mul_nonnative(&x, &y); let product_expected = builder.constant_nonnative(product_ff); diff --git a/plonky2/src/gates/add_many_u32.rs b/plonky2/src/gates/add_many_u32.rs index c8b5f8af..01c7ed30 100644 --- a/plonky2/src/gates/add_many_u32.rs +++ b/plonky2/src/gates/add_many_u32.rs @@ -248,7 +248,7 @@ impl, const D: usize> Gate for U32AddManyGate ); g }) - .collect::>() + .collect() } fn num_wires(&self) -> usize { @@ -426,7 +426,7 @@ mod tests { v0.iter() .chain(v1.iter()) .map(|&x| x.into()) - .collect::>() + .collect() } let mut rng = rand::thread_rng(); diff --git a/plonky2/src/gates/arithmetic_base.rs b/plonky2/src/gates/arithmetic_base.rs index 8f67dab2..738b8ad4 100644 --- a/plonky2/src/gates/arithmetic_base.rs +++ b/plonky2/src/gates/arithmetic_base.rs @@ -131,7 +131,7 @@ impl, const D: usize> Gate for ArithmeticGate ); g }) - .collect::>() + .collect() } fn num_wires(&self) -> usize { diff --git a/plonky2/src/gates/arithmetic_u32.rs b/plonky2/src/gates/arithmetic_u32.rs index 1d4a834c..bef21a97 100644 --- a/plonky2/src/gates/arithmetic_u32.rs +++ b/plonky2/src/gates/arithmetic_u32.rs @@ -212,7 +212,7 @@ impl, const D: usize> Gate for U32ArithmeticG ); g }) - .collect::>() + .collect() } fn num_wires(&self) -> usize { diff --git a/plonky2/src/gates/assert_le.rs b/plonky2/src/gates/assert_le.rs index 6a99acd9..c385bb31 100644 --- a/plonky2/src/gates/assert_le.rs +++ b/plonky2/src/gates/assert_le.rs @@ -578,7 +578,7 @@ mod tests { v.append(&mut chunks_equal); v.append(&mut intermediate_values); - v.iter().map(|&x| x.into()).collect::>() + v.iter().map(|&x| x.into()).collect() }; let mut rng = rand::thread_rng(); diff --git a/plonky2/src/gates/low_degree_interpolation.rs b/plonky2/src/gates/low_degree_interpolation.rs index b7307470..845da5ab 100644 --- a/plonky2/src/gates/low_degree_interpolation.rs +++ b/plonky2/src/gates/low_degree_interpolation.rs @@ -443,7 +443,7 @@ mod tests { .take(gate.num_points() - 2) .flat_map(|ff| ff.0), ); - v.iter().map(|&x| x.into()).collect::>() + v.iter().map(|&x| x.into()).collect() } // Get a working row for LowDegreeInterpolationGate. diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index 177db7cf..163a7dac 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -11,7 +11,6 @@ pub mod binary_arithmetic; pub mod binary_subtraction; pub mod comparison; pub mod constant; -// pub mod curve_double; pub mod exponentiation; pub mod gate; pub mod gate_tree; @@ -24,6 +23,7 @@ pub mod poseidon; pub(crate) mod poseidon_mds; pub(crate) mod public_input; pub mod random_access; +pub mod range_check_u32; pub mod reducing; pub mod reducing_extension; pub mod subtraction_u32; diff --git a/plonky2/src/gates/multiplication_extension.rs b/plonky2/src/gates/multiplication_extension.rs index 9ccfe637..54629a47 100644 --- a/plonky2/src/gates/multiplication_extension.rs +++ b/plonky2/src/gates/multiplication_extension.rs @@ -125,7 +125,7 @@ impl, const D: usize> Gate for MulExtensionGa ); g }) - .collect::>() + .collect() } fn num_wires(&self) -> usize { diff --git a/plonky2/src/gates/random_access.rs b/plonky2/src/gates/random_access.rs index 77359a19..6379f99f 100644 --- a/plonky2/src/gates/random_access.rs +++ b/plonky2/src/gates/random_access.rs @@ -209,7 +209,7 @@ impl, const D: usize> Gate for RandomAccessGa ); g }) - .collect::>() + .collect() } fn num_wires(&self) -> usize { diff --git a/plonky2/src/gates/range_check_u32.rs b/plonky2/src/gates/range_check_u32.rs new file mode 100644 index 00000000..2533b51f --- /dev/null +++ b/plonky2/src/gates/range_check_u32.rs @@ -0,0 +1,305 @@ +use std::marker::PhantomData; + +use crate::field::extension_field::target::ExtensionTarget; +use crate::field::extension_field::Extendable; +use crate::field::field_types::{Field, RichField}; +use crate::gates::gate::Gate; +use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; +use crate::iop::target::Target; +use crate::iop::witness::{PartitionWitness, Witness}; +use crate::plonk::circuit_builder::CircuitBuilder; +use crate::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_recursive}; +use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; +use crate::util::ceil_div_usize; + +/// A gate which can decompose a number into base B little-endian limbs. +#[derive(Copy, Clone, Debug)] +pub struct U32RangeCheckGate, const D: usize> { + pub num_input_limbs: usize, + _phantom: PhantomData, +} + +impl, const D: usize> U32RangeCheckGate { + pub fn new(num_input_limbs: usize) -> Self { + Self { + num_input_limbs, + _phantom: PhantomData, + } + } + + pub const AUX_LIMB_BITS: usize = 3; + pub const BASE: usize = 1 << Self::AUX_LIMB_BITS; + + fn aux_limbs_per_input_limb(&self) -> usize { + ceil_div_usize(32, Self::AUX_LIMB_BITS) + } + pub fn wire_ith_input_limb(&self, i: usize) -> usize{ + debug_assert!(i < self.num_input_limbs); + i + } + pub fn wire_ith_input_limb_jth_aux_limb(&self, i: usize, j: usize) -> usize { + debug_assert!(i < self.num_input_limbs); + debug_assert!(j < self.aux_limbs_per_input_limb()); + self.num_input_limbs + self.aux_limbs_per_input_limb() * i + j + } +} + +impl, const D: usize> Gate for U32RangeCheckGate{ + fn id(&self) -> String { + format!("{:?}", self) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let base = F::Extension::from_canonical_usize(Self::BASE); + for i in 0..self.num_input_limbs { + let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()).map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]).collect(); + let computed_sum = reduce_with_powers(&aux_limbs, base); + + constraints.push(computed_sum - input_limb); + for aux_limb in aux_limbs { + constraints.push( + (0..Self::BASE) + .map(|i| aux_limb - F::Extension::from_canonical_usize(i)) + .product(), + ); + } + + } + + constraints + } + + fn eval_unfiltered_base(&self, vars: EvaluationVarsBase) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let base = F::from_canonical_usize(Self::BASE); + for i in 0..self.num_input_limbs { + let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()).map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]).collect(); + let computed_sum = reduce_with_powers(&aux_limbs, base); + + constraints.push(computed_sum - input_limb); + for aux_limb in aux_limbs { + constraints.push( + (0..Self::BASE) + .map(|i| aux_limb - F::from_canonical_usize(i)) + .product(), + ); + } + + } + + constraints + } + + fn eval_unfiltered_recursively( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let base = builder.constant(F::from_canonical_usize(Self::BASE)); + for i in 0..self.num_input_limbs { + let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()).map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]).collect(); + let computed_sum = reduce_with_powers_ext_recursive(builder, &aux_limbs, base); + + constraints.push(builder.sub_extension(computed_sum, input_limb)); + for aux_limb in aux_limbs { + constraints.push({ + let mut acc = builder.one_extension(); + (0..Self::BASE).for_each(|i| { + // We update our accumulator as: + // acc' = acc (x - i) + // = acc x + (-i) acc + // Since -i is constant, we can do this in one arithmetic_extension call. + let neg_i = -F::from_canonical_usize(i); + acc = builder.arithmetic_extension(F::ONE, neg_i, acc, aux_limb, acc) + }); + acc + }); + } + + } + + constraints + } + + fn generators( + &self, + gate_index: usize, + _local_constants: &[F], + ) -> Vec>> { + let gen = U32RangeCheckGenerator { + gate: self.clone(), + gate_index, + }; + vec![Box::new(gen.adapter())] + } + + fn num_wires(&self) -> usize { + self.num_input_limbs * (1 + self.aux_limbs_per_input_limb()) + } + + fn num_constants(&self) -> usize { + 0 + } + + // Bounded by the range-check (x-0)*(x-1)*...*(x-BASE+1). + fn degree(&self) -> usize { + Self::BASE + } + + // 1 for checking the each sum of aux limbs, plus a range check for each aux limb. + fn num_constraints(&self) -> usize { + self.num_input_limbs * (1 + self.aux_limbs_per_input_limb()) + } +} + +#[derive(Debug)] +pub struct U32RangeCheckGenerator, const D: usize> { + gate: U32RangeCheckGate, + gate_index: usize, +} + +impl, const D: usize> SimpleGenerator for U32RangeCheckGenerator { + fn dependencies(&self) -> Vec { + let num_input_limbs = self.gate.num_input_limbs; + (0..num_input_limbs).map(|i| Target::wire(self.gate_index, self.gate.wire_ith_input_limb(i))).collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let num_input_limbs = self.gate.num_input_limbs; + for i in 0..num_input_limbs { + let sum_value = witness + .get_target(Target::wire(self.gate_index, self.gate.wire_ith_input_limb(i))) + .to_canonical_u64() as u32; + + let base = U32RangeCheckGate::::BASE as u32; + let limbs = (0..self.gate.aux_limbs_per_input_limb()) + .map(|j| Target::wire(self.gate_index, self.gate.wire_ith_input_limb_jth_aux_limb(i, j))); + let limbs_value = (0..self.gate.aux_limbs_per_input_limb()) + .scan(sum_value, |acc, _| { + let tmp = *acc % base; + *acc /= base; + Some(F::from_canonical_u32(tmp)) + }) + .collect::>(); + + for (b, b_value) in limbs.zip(limbs_value) { + out_buffer.set_target(b, b_value); + } + } + + } +} + +#[cfg(test)] +mod tests { + use std::marker::PhantomData; + + use anyhow::Result; + use itertools::unfold; + use rand::Rng; + + use crate::field::extension_field::quartic::QuarticExtension; + use crate::field::field_types::Field; + use crate::field::goldilocks_field::GoldilocksField; + use crate::gates::gate::Gate; + use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; + use crate::gates::range_check_u32::U32RangeCheckGate; + use crate::hash::hash_types::HashOut; + use crate::plonk::vars::EvaluationVars; + use crate::util::ceil_div_usize; + + #[test] + fn low_degree() { + test_low_degree::(U32RangeCheckGate::new(8)) + } + + #[test] + fn eval_fns() -> Result<()> { + test_eval_fns::(U32RangeCheckGate::new(8)) + } + + fn test_gate_constraint(input_limbs: Vec) { + type F = GoldilocksField; + type FF = QuarticExtension; + const D: usize = 4; + const AUX_LIMB_BITS: usize = 3; + const BASE: usize = 1 << AUX_LIMB_BITS; + const AUX_LIMBS_PER_INPUT_LIMB: usize = ceil_div_usize(32, AUX_LIMB_BITS); + + fn get_wires(input_limbs: Vec) -> Vec { + let num_input_limbs = input_limbs.len(); + let mut v = Vec::new(); + + for i in 0..num_input_limbs { + let input_limb = input_limbs[i]; + + let split_to_limbs = |mut val, num| { + unfold((), move |_| { + let ret = val % (BASE as u64); + val /= BASE as u64; + Some(ret) + }) + .take(num) + .map(F::from_canonical_u64) + }; + + let mut aux_limbs: Vec<_> = + split_to_limbs(input_limb, AUX_LIMBS_PER_INPUT_LIMB).collect(); + + v.append(&mut aux_limbs); + } + + input_limbs.iter() + .cloned() + .map(F::from_canonical_u64) + .chain(v.iter().cloned()) + .map(|x| x.into()) + .collect() + } + + let gate = U32RangeCheckGate:: { + num_input_limbs: 8, + _phantom: PhantomData, + }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(input_limbs), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } + + #[test] + fn test_gate_constraint_good() { + let mut rng = rand::thread_rng(); + let input_limbs: Vec<_> = (0..8) + .map(|_| rng.gen::() as u64) + .collect(); + + test_gate_constraint(input_limbs); + } + + #[test] + #[should_panic] + fn test_gate_constraint_bad() { + let mut rng = rand::thread_rng(); + let input_limbs: Vec<_> = (0..8) + .map(|_| rng.gen()) + .collect(); + + test_gate_constraint(input_limbs); + } +} diff --git a/plonky2/src/gates/subtraction_u32.rs b/plonky2/src/gates/subtraction_u32.rs index fa817ce4..ffb2e2cb 100644 --- a/plonky2/src/gates/subtraction_u32.rs +++ b/plonky2/src/gates/subtraction_u32.rs @@ -419,7 +419,7 @@ mod tests { v0.iter() .chain(v1.iter()) .map(|&x| x.into()) - .collect::>() + .collect() } let mut rng = rand::thread_rng(); From 2ddfb03aeaaab817bb41c74b764072559d85816c Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Wed, 19 Jan 2022 13:25:00 -0800 Subject: [PATCH 067/143] various cleanup --- plonky2/src/gadgets/nonnative.rs | 4 +- plonky2/src/gates/add_many_u32.rs | 5 +- plonky2/src/gates/range_check_u32.rs | 70 ++++++++++++++++------------ plonky2/src/gates/subtraction_u32.rs | 5 +- 4 files changed, 45 insertions(+), 39 deletions(-) diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 31b3cb1b..5107be29 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -173,7 +173,9 @@ impl, const D: usize> CircuitBuilder { ) -> NonNativeTarget { let prod = self.add_virtual_nonnative_target::(); let modulus = self.constant_biguint(&FF::order()); - let overflow = self.add_virtual_biguint_target(a.value.num_limbs() + b.value.num_limbs() - modulus.num_limbs()); + let overflow = self.add_virtual_biguint_target( + a.value.num_limbs() + b.value.num_limbs() - modulus.num_limbs(), + ); self.add_simple_generator(NonNativeMultiplicationGenerator:: { a: a.clone(), diff --git a/plonky2/src/gates/add_many_u32.rs b/plonky2/src/gates/add_many_u32.rs index 01c7ed30..9f67c827 100644 --- a/plonky2/src/gates/add_many_u32.rs +++ b/plonky2/src/gates/add_many_u32.rs @@ -423,10 +423,7 @@ mod tests { v1.append(&mut carry_limbs); } - v0.iter() - .chain(v1.iter()) - .map(|&x| x.into()) - .collect() + v0.iter().chain(v1.iter()).map(|&x| x.into()).collect() } let mut rng = rand::thread_rng(); diff --git a/plonky2/src/gates/range_check_u32.rs b/plonky2/src/gates/range_check_u32.rs index 2533b51f..72e5ad4b 100644 --- a/plonky2/src/gates/range_check_u32.rs +++ b/plonky2/src/gates/range_check_u32.rs @@ -29,11 +29,11 @@ impl, const D: usize> U32RangeCheckGate { pub const AUX_LIMB_BITS: usize = 3; pub const BASE: usize = 1 << Self::AUX_LIMB_BITS; - + fn aux_limbs_per_input_limb(&self) -> usize { ceil_div_usize(32, Self::AUX_LIMB_BITS) } - pub fn wire_ith_input_limb(&self, i: usize) -> usize{ + pub fn wire_ith_input_limb(&self, i: usize) -> usize { debug_assert!(i < self.num_input_limbs); i } @@ -44,7 +44,7 @@ impl, const D: usize> U32RangeCheckGate { } } -impl, const D: usize> Gate for U32RangeCheckGate{ +impl, const D: usize> Gate for U32RangeCheckGate { fn id(&self) -> String { format!("{:?}", self) } @@ -55,7 +55,9 @@ impl, const D: usize> Gate for U32RangeCheckG let base = F::Extension::from_canonical_usize(Self::BASE); for i in 0..self.num_input_limbs { let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; - let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()).map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]).collect(); + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()) + .map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]) + .collect(); let computed_sum = reduce_with_powers(&aux_limbs, base); constraints.push(computed_sum - input_limb); @@ -66,7 +68,6 @@ impl, const D: usize> Gate for U32RangeCheckG .product(), ); } - } constraints @@ -78,7 +79,9 @@ impl, const D: usize> Gate for U32RangeCheckG let base = F::from_canonical_usize(Self::BASE); for i in 0..self.num_input_limbs { let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; - let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()).map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]).collect(); + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()) + .map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]) + .collect(); let computed_sum = reduce_with_powers(&aux_limbs, base); constraints.push(computed_sum - input_limb); @@ -89,7 +92,6 @@ impl, const D: usize> Gate for U32RangeCheckG .product(), ); } - } constraints @@ -105,7 +107,9 @@ impl, const D: usize> Gate for U32RangeCheckG let base = builder.constant(F::from_canonical_usize(Self::BASE)); for i in 0..self.num_input_limbs { let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; - let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()).map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]).collect(); + let aux_limbs: Vec<_> = (0..self.aux_limbs_per_input_limb()) + .map(|j| vars.local_wires[self.wire_ith_input_limb_jth_aux_limb(i, j)]) + .collect(); let computed_sum = reduce_with_powers_ext_recursive(builder, &aux_limbs, base); constraints.push(builder.sub_extension(computed_sum, input_limb)); @@ -123,7 +127,6 @@ impl, const D: usize> Gate for U32RangeCheckG acc }); } - } constraints @@ -166,22 +169,33 @@ pub struct U32RangeCheckGenerator, const D: usize> gate_index: usize, } -impl, const D: usize> SimpleGenerator for U32RangeCheckGenerator { +impl, const D: usize> SimpleGenerator + for U32RangeCheckGenerator +{ fn dependencies(&self) -> Vec { let num_input_limbs = self.gate.num_input_limbs; - (0..num_input_limbs).map(|i| Target::wire(self.gate_index, self.gate.wire_ith_input_limb(i))).collect() + (0..num_input_limbs) + .map(|i| Target::wire(self.gate_index, self.gate.wire_ith_input_limb(i))) + .collect() } fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { let num_input_limbs = self.gate.num_input_limbs; for i in 0..num_input_limbs { let sum_value = witness - .get_target(Target::wire(self.gate_index, self.gate.wire_ith_input_limb(i))) + .get_target(Target::wire( + self.gate_index, + self.gate.wire_ith_input_limb(i), + )) .to_canonical_u64() as u32; let base = U32RangeCheckGate::::BASE as u32; - let limbs = (0..self.gate.aux_limbs_per_input_limb()) - .map(|j| Target::wire(self.gate_index, self.gate.wire_ith_input_limb_jth_aux_limb(i, j))); + let limbs = (0..self.gate.aux_limbs_per_input_limb()).map(|j| { + Target::wire( + self.gate_index, + self.gate.wire_ith_input_limb_jth_aux_limb(i, j), + ) + }); let limbs_value = (0..self.gate.aux_limbs_per_input_limb()) .scan(sum_value, |acc, _| { let tmp = *acc % base; @@ -194,7 +208,6 @@ impl, const D: usize> SimpleGenerator for U32Ran out_buffer.set_target(b, b_value); } } - } } @@ -233,14 +246,14 @@ mod tests { const AUX_LIMB_BITS: usize = 3; const BASE: usize = 1 << AUX_LIMB_BITS; const AUX_LIMBS_PER_INPUT_LIMB: usize = ceil_div_usize(32, AUX_LIMB_BITS); - + fn get_wires(input_limbs: Vec) -> Vec { let num_input_limbs = input_limbs.len(); let mut v = Vec::new(); for i in 0..num_input_limbs { let input_limb = input_limbs[i]; - + let split_to_limbs = |mut val, num| { unfold((), move |_| { let ret = val % (BASE as u64); @@ -253,16 +266,17 @@ mod tests { let mut aux_limbs: Vec<_> = split_to_limbs(input_limb, AUX_LIMBS_PER_INPUT_LIMB).collect(); - + v.append(&mut aux_limbs); } - input_limbs.iter() - .cloned() - .map(F::from_canonical_u64) - .chain(v.iter().cloned()) - .map(|x| x.into()) - .collect() + input_limbs + .iter() + .cloned() + .map(F::from_canonical_u64) + .chain(v.iter().cloned()) + .map(|x| x.into()) + .collect() } let gate = U32RangeCheckGate:: { @@ -285,9 +299,7 @@ mod tests { #[test] fn test_gate_constraint_good() { let mut rng = rand::thread_rng(); - let input_limbs: Vec<_> = (0..8) - .map(|_| rng.gen::() as u64) - .collect(); + let input_limbs: Vec<_> = (0..8).map(|_| rng.gen::() as u64).collect(); test_gate_constraint(input_limbs); } @@ -296,9 +308,7 @@ mod tests { #[should_panic] fn test_gate_constraint_bad() { let mut rng = rand::thread_rng(); - let input_limbs: Vec<_> = (0..8) - .map(|_| rng.gen()) - .collect(); + let input_limbs: Vec<_> = (0..8).map(|_| rng.gen()).collect(); test_gate_constraint(input_limbs); } diff --git a/plonky2/src/gates/subtraction_u32.rs b/plonky2/src/gates/subtraction_u32.rs index ffb2e2cb..f083db5a 100644 --- a/plonky2/src/gates/subtraction_u32.rs +++ b/plonky2/src/gates/subtraction_u32.rs @@ -416,10 +416,7 @@ mod tests { v1.append(&mut output_limbs); } - v0.iter() - .chain(v1.iter()) - .map(|&x| x.into()) - .collect() + v0.iter().chain(v1.iter()).map(|&x| x.into()).collect() } let mut rng = rand::thread_rng(); From c392606a9a285b89773248a52118972b221f80bd Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 15:44:03 -0800 Subject: [PATCH 068/143] optimizations and cleanup --- plonky2/src/gadgets/arithmetic.rs | 6 ++ plonky2/src/gadgets/curve.rs | 85 +++++++++++----------------- plonky2/src/gadgets/ecdsa.rs | 3 +- plonky2/src/gadgets/nonnative.rs | 16 ++++-- plonky2/src/gadgets/range_check.rs | 19 +++++++ plonky2/src/gates/range_check_u32.rs | 4 +- plonky2/src/plonk/circuit_builder.rs | 8 ++- plonky2/src/plonk/circuit_data.rs | 9 ++- 8 files changed, 87 insertions(+), 63 deletions(-) diff --git a/plonky2/src/gadgets/arithmetic.rs b/plonky2/src/gadgets/arithmetic.rs index e280fa5e..90f3b090 100644 --- a/plonky2/src/gadgets/arithmetic.rs +++ b/plonky2/src/gadgets/arithmetic.rs @@ -315,6 +315,12 @@ impl, const D: usize> CircuitBuilder { let x_ext = self.convert_to_ext(x); self.inverse_extension(x_ext).0[0] } + + pub fn not(&mut self, b: BoolTarget) -> BoolTarget { + let one = self.one(); + let res = self.sub(one, b.target); + BoolTarget::new_unsafe(res) + } } /// Represents a base arithmetic operation in the circuit. Used to memoize results. diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index 907aa5e3..92f45242 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -100,35 +100,21 @@ impl, const D: usize> CircuitBuilder { p1: &AffinePointTarget, p2: &AffinePointTarget, ) -> AffinePointTarget { - let before = self.num_gates(); let AffinePointTarget { x: x1, y: y1 } = p1; let AffinePointTarget { x: x2, y: y2 } = p2; let u = self.sub_nonnative(y2, y1); - let uu = self.mul_nonnative(&u, &u); let v = self.sub_nonnative(x2, x1); - let vv = self.mul_nonnative(&v, &v); - let vvv = self.mul_nonnative(&v, &vv); - let r = self.mul_nonnative(&vv, x1); - let diff = self.sub_nonnative(&uu, &vvv); - let r2 = self.add_nonnative(&r, &r); - let a = self.sub_nonnative(&diff, &r2); - let x3 = self.mul_nonnative(&v, &a); + let v_inv = self.inv_nonnative(&v); + let s = self.mul_nonnative(&u, &v_inv); + let s_squared = self.mul_nonnative(&s, &s); + let x_sum = self.add_nonnative(x2, x1); + let x3 = self.sub_nonnative(&s_squared, &x_sum); + let x_diff = self.sub_nonnative(&x1, &x3); + let prod = self.mul_nonnative(&s, &x_diff); + let y3 = self.sub_nonnative(&prod, &y1); - let r_a = self.sub_nonnative(&r, &a); - let y3_first = self.mul_nonnative(&u, &r_a); - let y3_second = self.mul_nonnative(&vvv, y1); - let y3 = self.sub_nonnative(&y3_first, &y3_second); - - let z3_inv = self.inv_nonnative(&vvv); - let x3_norm = self.mul_nonnative(&x3, &z3_inv); - let y3_norm = self.mul_nonnative(&y3, &z3_inv); - - println!("NUM GATES: {}", self.num_gates() - before); - AffinePointTarget { - x: x3_norm, - y: y3_norm, - } + AffinePointTarget { x: x3, y: y3 } } pub fn curve_scalar_mul( @@ -136,11 +122,7 @@ impl, const D: usize> CircuitBuilder { p: &AffinePointTarget, n: &NonNativeTarget, ) -> AffinePointTarget { - let one = self.constant_nonnative(C::BaseField::ONE); - let bits = self.split_nonnative_to_bits(n); - let bits_as_base: Vec> = - bits.iter().map(|b| self.bool_to_nonnative(b)).collect(); let rando = (CurveScalar(C::ScalarField::rand()) * C::GENERATOR_PROJECTIVE).to_affine(); let randot = self.constant_affine_point(rando); @@ -151,15 +133,15 @@ impl, const D: usize> CircuitBuilder { let mut two_i_times_p = self.add_virtual_affine_point_target(); self.connect_affine_point(p, &two_i_times_p); - for bit in bits_as_base.iter() { - let not_bit = self.sub_nonnative(&one, bit); + for &bit in bits.iter() { + let not_bit = self.not(bit); let result_plus_2_i_p = self.curve_add(&result, &two_i_times_p); - let new_x_if_bit = self.mul_nonnative(bit, &result_plus_2_i_p.x); - let new_x_if_not_bit = self.mul_nonnative(¬_bit, &result.x); - let new_y_if_bit = self.mul_nonnative(bit, &result_plus_2_i_p.y); - let new_y_if_not_bit = self.mul_nonnative(¬_bit, &result.y); + let new_x_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.x, bit); + let new_x_if_not_bit = self.mul_nonnative_by_bool(&result.x, not_bit); + let new_y_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.y, bit); + let new_y_if_not_bit = self.mul_nonnative_by_bool(&result.y, not_bit); let new_x = self.add_nonnative(&new_x_if_bit, &new_x_if_not_bit); let new_y = self.add_nonnative(&new_y_if_bit, &new_y_if_not_bit); @@ -179,6 +161,8 @@ impl, const D: usize> CircuitBuilder { #[cfg(test)] mod tests { + use std::ops::Neg; + use anyhow::Result; use plonky2_field::field_types::Field; use plonky2_field::secp256k1_base::Secp256K1Base; @@ -198,7 +182,7 @@ mod tests { type C = PoseidonGoldilocksConfig; type F = >::F; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -223,7 +207,7 @@ mod tests { type C = PoseidonGoldilocksConfig; type F = >::F; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -250,7 +234,7 @@ mod tests { type C = PoseidonGoldilocksConfig; type F = >::F; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -287,7 +271,7 @@ mod tests { type C = PoseidonGoldilocksConfig; type F = >::F; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -317,27 +301,25 @@ mod tests { type C = PoseidonGoldilocksConfig; type F = >::F; - let config = CircuitConfig { - num_routed_wires: 33, - ..CircuitConfig::standard_recursion_config() - }; + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); let g = Secp256K1::GENERATOR_AFFINE; let five = Secp256K1Scalar::from_canonical_usize(5); - let five_scalar = CurveScalar::(five); - let five_g = (five_scalar * g.to_projective()).to_affine(); - let five_g_expected = builder.constant_affine_point(five_g); - builder.curve_assert_valid(&five_g_expected); + let neg_five = five.neg(); + let neg_five_scalar = CurveScalar::(neg_five); + let neg_five_g = (neg_five_scalar * g.to_projective()).to_affine(); + let neg_five_g_expected = builder.constant_affine_point(neg_five_g); + builder.curve_assert_valid(&neg_five_g_expected); let g_target = builder.constant_affine_point(g); - let five_target = builder.constant_nonnative(five); - let five_g_actual = builder.curve_scalar_mul(&g_target, &five_target); - builder.curve_assert_valid(&five_g_actual); + let neg_five_target = builder.constant_nonnative(neg_five); + let neg_five_g_actual = builder.curve_scalar_mul(&g_target, &neg_five_target); + builder.curve_assert_valid(&neg_five_g_actual); - builder.connect_affine_point(&five_g_expected, &five_g_actual); + builder.connect_affine_point(&neg_five_g_expected, &neg_five_g_actual); let data = builder.build::(); let proof = data.prove(pw).unwrap(); @@ -351,10 +333,7 @@ mod tests { type C = PoseidonGoldilocksConfig; type F = >::F; - let config = CircuitConfig { - num_routed_wires: 33, - ..CircuitConfig::standard_recursion_config() - }; + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 362fbc6d..71214ae7 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -96,7 +96,7 @@ mod tests { const D: usize = 4; type C = Secp256K1; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -123,7 +123,6 @@ mod tests { let data = builder.build(); let proof = data.prove(pw).unwrap(); - verify(proof, &data.verifier_only, &data.common) } } diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 5107be29..a46ee376 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -125,6 +125,9 @@ impl, const D: usize> CircuitBuilder { _phantom: PhantomData, }); + self.range_check_u32(sum.value.limbs.clone()); + self.range_check_u32(vec![overflow]); + let sum_expected = summands .iter() .fold(self.zero_biguint(), |a, b| self.add_biguint(&a, &b.value)); @@ -157,6 +160,9 @@ impl, const D: usize> CircuitBuilder { _phantom: PhantomData, }); + self.range_check_u32(diff.value.limbs.clone()); + self.assert_bool(overflow); + let diff_plus_b = self.add_biguint(&diff.value, &b.value); let modulus = self.constant_biguint(&FF::order()); let mod_times_overflow = self.mul_biguint_by_bool(&modulus, overflow); @@ -185,6 +191,9 @@ impl, const D: usize> CircuitBuilder { _phantom: PhantomData, }); + self.range_check_u32(prod.value.limbs.clone()); + self.range_check_u32(overflow.limbs.clone()); + let prod_expected = self.mul_biguint(&a.value, &b.value); let mod_times_overflow = self.mul_biguint(&modulus, &overflow); @@ -202,12 +211,11 @@ impl, const D: usize> CircuitBuilder { return to_mul[0].clone(); } - let mut result = self.mul_biguint(&to_mul[0].value, &to_mul[1].value); + let mut accumulator = self.mul_nonnative(&to_mul[0], &to_mul[1]); for i in 2..to_mul.len() { - result = self.mul_biguint(&result, &to_mul[i].value); + accumulator = self.mul_nonnative(&accumulator, &to_mul[i]); } - - self.reduce(&result) + accumulator } pub fn neg_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { diff --git a/plonky2/src/gadgets/range_check.rs b/plonky2/src/gadgets/range_check.rs index f8ada106..5a90afd0 100644 --- a/plonky2/src/gadgets/range_check.rs +++ b/plonky2/src/gadgets/range_check.rs @@ -41,6 +41,25 @@ impl, const D: usize> CircuitBuilder { (low, high) } + + pub fn range_check_u32(&mut self, vals: Vec) { + let num_input_limbs = vals.len(); + let gate = U32RangeCheckGate::::new(num_input_limbs); + let gate_index = self.add_gate(gate, vec![]); + + for i in 0..num_input_limbs { + self.connect( + Target::wire(gate_index, gate.wire_ith_input_limb(i)), + vals[i].0, + ); + } + } + + pub fn assert_bool(&mut self, b: BoolTarget) { + let z = self.mul_sub(b.target, b.target, b.target); + let zero = self.zero(); + self.connect(z, zero); + } } #[derive(Debug)] diff --git a/plonky2/src/gates/range_check_u32.rs b/plonky2/src/gates/range_check_u32.rs index 72e5ad4b..83cdb223 100644 --- a/plonky2/src/gates/range_check_u32.rs +++ b/plonky2/src/gates/range_check_u32.rs @@ -27,7 +27,7 @@ impl, const D: usize> U32RangeCheckGate { } } - pub const AUX_LIMB_BITS: usize = 3; + pub const AUX_LIMB_BITS: usize = 2; pub const BASE: usize = 1 << Self::AUX_LIMB_BITS; fn aux_limbs_per_input_limb(&self) -> usize { @@ -243,7 +243,7 @@ mod tests { type F = GoldilocksField; type FF = QuarticExtension; const D: usize = 4; - const AUX_LIMB_BITS: usize = 3; + const AUX_LIMB_BITS: usize = 2; const BASE: usize = 1 << AUX_LIMB_BITS; const AUX_LIMBS_PER_INPUT_LIMB: usize = ceil_div_usize(32, AUX_LIMB_BITS); diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index d9f79f59..38091bc9 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -205,6 +205,12 @@ impl, const D: usize> CircuitBuilder { BoolTarget::new_unsafe(self.add_virtual_target()) } + pub fn add_virtual_bool_target_safe(&mut self) -> BoolTarget { + let b = BoolTarget::new_unsafe(self.add_virtual_target()); + self.assert_bool(b); + b + } + /// Adds a gate to the circuit, and returns its index. pub fn add_gate>(&mut self, gate_type: G, constants: Vec) -> usize { self.check_gate_compatibility(&gate_type); @@ -235,7 +241,7 @@ impl, const D: usize> CircuitBuilder { fn check_gate_compatibility>(&self, gate: &G) { assert!( gate.num_wires() <= self.config.num_wires, - "{:?} requires {} wires, but our GateConfig has only {}", + "{:?} requires {} wires, but our CircuitConfig has only {}", gate.id(), gate.num_wires(), self.config.num_wires diff --git a/plonky2/src/plonk/circuit_data.rs b/plonky2/src/plonk/circuit_data.rs index dd7ebc25..7e667b8d 100644 --- a/plonky2/src/plonk/circuit_data.rs +++ b/plonky2/src/plonk/circuit_data.rs @@ -49,7 +49,7 @@ pub struct CircuitConfig { impl Default for CircuitConfig { fn default() -> Self { - CircuitConfig::standard_recursion_config() + Self::standard_recursion_config() } } @@ -79,6 +79,13 @@ impl CircuitConfig { } } + pub fn standard_ecc_config() -> Self { + Self { + num_wires: 136, + ..Self::standard_recursion_config() + } + } + pub fn standard_recursion_zk_config() -> Self { CircuitConfig { zero_knowledge: true, From e116ab7809a9add16ec4b1e6432263b76f9d05c9 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 15:49:29 -0800 Subject: [PATCH 069/143] fix --- plonky2/src/gadgets/ecdsa.rs | 1 + plonky2/src/gadgets/nonnative.rs | 15 +++++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 71214ae7..def3b5b4 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -91,6 +91,7 @@ mod tests { use crate::plonk::verifier::verify; #[test] + #[ignore] fn test_ecdsa_circuit() -> Result<()> { type F = GoldilocksField; const D: usize = 4; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index a46ee376..2c556616 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -518,7 +518,7 @@ mod tests { let y_ff = FF::rand(); let sum_ff = x_ff + y_ff; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -547,8 +547,7 @@ mod tests { let h_ff = FF::rand(); let sum_ff = a_ff + b_ff + c_ff + d_ff + e_ff + f_ff + g_ff + h_ff; - type F = GoldilocksField; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -584,7 +583,7 @@ mod tests { } let diff_ff = x_ff - y_ff; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -610,7 +609,7 @@ mod tests { let y_ff = FF::rand(); let product_ff = x_ff * y_ff; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -630,7 +629,7 @@ mod tests { type FF = Secp256K1Base; type F = GoldilocksField; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let mut unop_builder = CircuitBuilder::::new(config.clone()); let mut op_builder = CircuitBuilder::::new(config); @@ -668,7 +667,7 @@ mod tests { let x_ff = FF::rand(); let neg_x_ff = -x_ff; - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); @@ -692,7 +691,7 @@ mod tests { let x_ff = FF::rand(); let inv_x_ff = x_ff.inverse(); - let config = CircuitConfig::standard_recursion_config(); + let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); From 5de2b69558462516659ac39230f3b8c54ec8a6d1 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 20 Jan 2022 15:59:43 -0800 Subject: [PATCH 070/143] 256-bit hashing --- plonky2/src/curve/ecdsa.rs | 19 ++++++++++++------- plonky2/src/gadgets/ecdsa.rs | 2 +- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 0ed777d9..82eeba08 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,4 +1,5 @@ -use itertools::unfold; +use itertools::{unfold, Itertools}; +use num::BigUint; use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; use crate::field::field_types::{Field, RichField}; @@ -35,15 +36,19 @@ pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { pub fn hash_to_scalar(x: F, num_bits: usize) -> C::ScalarField { let h_bits = hash_to_bits(x, num_bits); - let h_u32 = h_bits + let h_vals: Vec<_> = h_bits .iter() - .zip(0..32) - .fold(0u32, |acc, (&bit, pow)| acc + (bit as u32) * (2 << pow)); - C::ScalarField::from_canonical_u32(h_u32) + .chunks(32) + .into_iter() + .map(|chunk| { + chunk.enumerate() + .fold(0u32, |acc, (pow, &bit)| acc + (bit as u32) * (2 << pow)) + }).collect(); + C::ScalarField::from_biguint(BigUint::new(h_vals)) } pub fn sign_message(msg: F, sk: ECDSASecretKey) -> ECDSASignature { - let h = hash_to_scalar::(msg, 32); + let h = hash_to_scalar::(msg, 256); let k = C::ScalarField::rand(); let rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); @@ -60,7 +65,7 @@ pub fn verify_message( ) -> bool { let ECDSASignature { r, s } = sig; - let h = hash_to_scalar::(msg, 32); + let h = hash_to_scalar::(msg, 256); let c = s.inverse(); let u1 = h * c; diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index def3b5b4..03a3807d 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -55,7 +55,7 @@ impl, const D: usize> CircuitBuilder { ) { let ECDSASignatureTarget { r, s } = sig; - let h = self.hash_to_scalar::(msg, 32); + let h = self.hash_to_scalar::(msg, 256); let c = self.inv_nonnative(&s); let u1 = self.mul_nonnative(&h, &c); From 1035438df46df7dcb3d855a907763016a81fd1ba Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Fri, 21 Jan 2022 16:10:25 -0800 Subject: [PATCH 071/143] updated for changes in main --- plonky2/src/curve/ecdsa.rs | 29 ++++--- plonky2/src/gadgets/arithmetic_u32.rs | 14 +++- plonky2/src/gadgets/biguint.rs | 2 +- plonky2/src/gadgets/ecdsa.rs | 21 ++--- plonky2/src/gadgets/mod.rs | 1 - plonky2/src/gadgets/nonnative.rs | 26 +++++-- plonky2/src/gadgets/range_check.rs | 2 + plonky2/src/gates/add_many_u32.rs | 31 +++++--- plonky2/src/gates/mod.rs | 2 - plonky2/src/gates/range_check_u32.rs | 31 +++++--- plonky2/src/iop/generator.rs | 9 +-- plonky2/src/plonk/circuit_builder.rs | 106 -------------------------- 12 files changed, 101 insertions(+), 173 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 82eeba08..cce83d4c 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -2,8 +2,10 @@ use itertools::{unfold, Itertools}; use num::BigUint; use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; -use crate::field::field_types::{Field, RichField}; -use crate::hash::hashing::hash_n_to_1; +use crate::field::field_types::Field; +use crate::hash::hash_types::RichField; +use crate::hash::hashing::{hash_n_to_m, PlonkyPermutation}; +use crate::hash::poseidon::PoseidonPermutation; pub struct ECDSASignature { pub r: C::ScalarField, @@ -21,8 +23,8 @@ pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { C::BaseField::from_biguint(x.to_biguint()) } -pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { - let hashed = hash_n_to_1(vec![x], true); +pub fn hash_to_bits>(x: F, num_bits: usize) -> Vec { + let hashed = hash_n_to_m::(&vec![x], 1, true)[0]; let mut val = hashed.to_canonical_u64(); unfold((), move |_| { @@ -34,21 +36,26 @@ pub fn hash_to_bits(x: F, num_bits: usize) -> Vec { .collect() } -pub fn hash_to_scalar(x: F, num_bits: usize) -> C::ScalarField { - let h_bits = hash_to_bits(x, num_bits); +pub fn hash_to_scalar>( + x: F, + num_bits: usize, +) -> C::ScalarField { + let h_bits = hash_to_bits::(x, num_bits); let h_vals: Vec<_> = h_bits .iter() .chunks(32) .into_iter() .map(|chunk| { - chunk.enumerate() - .fold(0u32, |acc, (pow, &bit)| acc + (bit as u32) * (2 << pow)) - }).collect(); + chunk + .enumerate() + .fold(0u32, |acc, (pow, &bit)| acc + (bit as u32) * (2 << pow)) + }) + .collect(); C::ScalarField::from_biguint(BigUint::new(h_vals)) } pub fn sign_message(msg: F, sk: ECDSASecretKey) -> ECDSASignature { - let h = hash_to_scalar::(msg, 256); + let h = hash_to_scalar::(msg, 256); let k = C::ScalarField::rand(); let rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); @@ -65,7 +72,7 @@ pub fn verify_message( ) -> bool { let ECDSASignature { r, s } = sig; - let h = hash_to_scalar::(msg, 256); + let h = hash_to_scalar::(msg, 256); let c = s.inverse(); let u1 = h * c; diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index 9bf78d44..8da01dc1 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -1,8 +1,12 @@ +use std::marker::PhantomData; + use plonky2_field::extension_field::Extendable; +use crate::gates::add_many_u32::U32AddManyGate; use crate::gates::arithmetic_u32::U32ArithmeticGate; use crate::gates::subtraction_u32::U32SubtractionGate; use crate::hash::hash_types::RichField; +use crate::iop::generator::{GeneratedValues, SimpleGenerator}; use crate::iop::target::Target; use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; @@ -243,16 +247,18 @@ mod tests { use anyhow::Result; use rand::{thread_rng, Rng}; - use crate::field::goldilocks_field::GoldilocksField; use crate::iop::witness::PartialWitness; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; + use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; use crate::plonk::verifier::verify; #[test] pub fn test_add_many_u32s() -> Result<()> { - type F = GoldilocksField; - const D: usize = 4; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + const NUM_ADDENDS: usize = 15; let config = CircuitConfig::standard_recursion_config(); @@ -276,7 +282,7 @@ mod tests { builder.connect_u32(result_low, expected_low); builder.connect_u32(result_high, expected_high); - let data = builder.build(); + let data = builder.build::(); let proof = data.prove(pw).unwrap(); verify(proof, &data.verifier_only, &data.common) } diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 4f9e1574..51cfcc06 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -1,6 +1,6 @@ use std::marker::PhantomData; -use num::{BigUint, Integer}; +use num::{BigUint, FromPrimitive, Integer, Zero}; use plonky2_field::extension_field::Extendable; use crate::gadgets::arithmetic_u32::U32Target; diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 03a3807d..cc787eb1 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -2,11 +2,12 @@ use std::marker::PhantomData; use crate::curve::curve_types::Curve; use crate::field::extension_field::Extendable; -use crate::field::field_types::RichField; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::curve::AffinePointTarget; use crate::gadgets::nonnative::NonNativeTarget; +use crate::hash::hash_types::RichField; +use crate::hash::poseidon::PoseidonHash; use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; @@ -21,7 +22,7 @@ pub struct ECDSASignatureTarget { impl, const D: usize> CircuitBuilder { pub fn hash_to_bits(&mut self, x: Target, num_bits: usize) -> Vec { let inputs = vec![x]; - let hashed = self.hash_n_to_m(inputs, 1, true)[0]; + let hashed = self.hash_n_to_m::(inputs, 1, true)[0]; self.split_le(hashed, num_bits) } @@ -82,20 +83,22 @@ mod tests { use crate::curve::ecdsa::{sign_message, ECDSAPublicKey, ECDSASecretKey, ECDSASignature}; use crate::curve::secp256k1::Secp256K1; use crate::field::field_types::Field; - use crate::field::goldilocks_field::GoldilocksField; use crate::field::secp256k1_scalar::Secp256K1Scalar; use crate::gadgets::ecdsa::{ECDSAPublicKeyTarget, ECDSASignatureTarget}; use crate::iop::witness::PartialWitness; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; + use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; use crate::plonk::verifier::verify; #[test] #[ignore] fn test_ecdsa_circuit() -> Result<()> { - type F = GoldilocksField; - const D: usize = 4; - type C = Secp256K1; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + type Curve = Secp256K1; let config = CircuitConfig::standard_ecc_config(); @@ -105,8 +108,8 @@ mod tests { let msg = F::rand(); let msg_target = builder.constant(msg); - let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); - let pk = ECDSAPublicKey((CurveScalar(sk.0) * C::GENERATOR_PROJECTIVE).to_affine()); + let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); + let pk = ECDSAPublicKey((CurveScalar(sk.0) * Curve::GENERATOR_PROJECTIVE).to_affine()); let pk_target = ECDSAPublicKeyTarget(builder.constant_affine_point(pk.0)); @@ -122,7 +125,7 @@ mod tests { builder.verify_message(msg_target, sig_target, pk_target); - let data = builder.build(); + let data = builder.build::(); let proof = data.prove(pw).unwrap(); verify(proof, &data.verifier_only, &data.common) } diff --git a/plonky2/src/gadgets/mod.rs b/plonky2/src/gadgets/mod.rs index 5dacdb51..ec4d1263 100644 --- a/plonky2/src/gadgets/mod.rs +++ b/plonky2/src/gadgets/mod.rs @@ -2,7 +2,6 @@ pub mod arithmetic; pub mod arithmetic_extension; pub mod arithmetic_u32; pub mod biguint; -pub mod binary_arithmetic; pub mod curve; pub mod ecdsa; pub mod hash; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 2c556616..998ce628 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -1,12 +1,14 @@ use std::marker::PhantomData; -use num::{BigUint, Zero}; +use num::{BigUint, Integer, One, Zero}; use plonky2_field::{extension_field::Extendable, field_types::Field}; use plonky2_util::ceil_div_usize; use crate::gadgets::arithmetic_u32::U32Target; -use crate::field::field_types::RichField; -use crate::gadgets::binary_arithmetic::BinaryTarget; +use crate::gadgets::biguint::BigUintTarget; +use crate::hash::hash_types::RichField; +use crate::iop::generator::{GeneratedValues, SimpleGenerator}; +use crate::iop::target::{BoolTarget, Target}; use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; @@ -514,6 +516,7 @@ mod tests { const D: usize = 2; type C = PoseidonGoldilocksConfig; type F = >::F; + let x_ff = FF::rand(); let y_ff = FF::rand(); let sum_ff = x_ff + y_ff; @@ -537,6 +540,10 @@ mod tests { #[test] fn test_nonnative_many_adds() -> Result<()> { type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + let a_ff = FF::rand(); let b_ff = FF::rand(); let c_ff = FF::rand(); @@ -549,7 +556,7 @@ mod tests { let config = CircuitConfig::standard_ecc_config(); let pw = PartialWitness::new(); - let mut builder = CircuitBuilder::::new(config); + let mut builder = CircuitBuilder::::new(config); let a = builder.constant_nonnative(a_ff); let b = builder.constant_nonnative(b_ff); @@ -565,7 +572,7 @@ mod tests { let sum_expected = builder.constant_nonnative(sum_ff); builder.connect_nonnative(&sum, &sum_expected); - let data = builder.build(); + let data = builder.build::(); let proof = data.prove(pw).unwrap(); verify(proof, &data.verifier_only, &data.common) } @@ -576,6 +583,7 @@ mod tests { const D: usize = 2; type C = PoseidonGoldilocksConfig; type F = >::F; + let x_ff = FF::rand(); let mut y_ff = FF::rand(); while y_ff.to_biguint() > x_ff.to_biguint() { @@ -627,11 +635,13 @@ mod tests { fn test_nonnative_many_muls_helper(num: usize) { type FF = Secp256K1Base; + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; - type F = GoldilocksField; let config = CircuitConfig::standard_ecc_config(); - let mut unop_builder = CircuitBuilder::::new(config.clone()); - let mut op_builder = CircuitBuilder::::new(config); + let mut unop_builder = CircuitBuilder::::new(config.clone()); + let mut op_builder = CircuitBuilder::::new(config); let ffs: Vec<_> = (0..num).map(|_| FF::rand()).collect(); diff --git a/plonky2/src/gadgets/range_check.rs b/plonky2/src/gadgets/range_check.rs index 5a90afd0..0776fc68 100644 --- a/plonky2/src/gadgets/range_check.rs +++ b/plonky2/src/gadgets/range_check.rs @@ -1,5 +1,7 @@ use plonky2_field::extension_field::Extendable; +use crate::gadgets::arithmetic_u32::U32Target; +use crate::gates::range_check_u32::U32RangeCheckGate; use crate::hash::hash_types::RichField; use crate::iop::generator::{GeneratedValues, SimpleGenerator}; use crate::iop::target::{BoolTarget, Target}; diff --git a/plonky2/src/gates/add_many_u32.rs b/plonky2/src/gates/add_many_u32.rs index 9f67c827..4f9c4293 100644 --- a/plonky2/src/gates/add_many_u32.rs +++ b/plonky2/src/gates/add_many_u32.rs @@ -1,11 +1,14 @@ use std::marker::PhantomData; use itertools::unfold; +use plonky2_util::ceil_div_usize; -use crate::field::extension_field::target::ExtensionTarget; use crate::field::extension_field::Extendable; -use crate::field::field_types::{Field, RichField}; +use crate::field::field_types::Field; use crate::gates::gate::Gate; +use crate::gates::util::StridedConstraintConsumer; +use crate::hash::hash_types::RichField; +use crate::iop::ext_target::ExtensionTarget; use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; use crate::iop::target::Target; use crate::iop::wire::Wire; @@ -13,7 +16,6 @@ use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; -use crate::util::ceil_div_usize; const LOG2_MAX_NUM_ADDENDS: usize = 4; const MAX_NUM_ADDENDS: usize = 16; @@ -128,8 +130,11 @@ impl, const D: usize> Gate for U32AddManyGate constraints } - fn eval_unfiltered_base(&self, vars: EvaluationVarsBase) -> Vec { - let mut constraints = Vec::with_capacity(self.num_constraints()); + fn eval_unfiltered_base_one( + &self, + vars: EvaluationVarsBase, + mut yield_constr: StridedConstraintConsumer, + ) { for i in 0..self.num_ops { let addends: Vec = (0..self.num_addends) .map(|j| vars.local_wires[self.wire_ith_op_jth_addend(i, j)]) @@ -144,7 +149,7 @@ impl, const D: usize> Gate for U32AddManyGate let base = F::from_canonical_u64(1 << 32u64); let combined_output = output_carry * base + output_result; - constraints.push(combined_output - computed_output); + yield_constr.one(combined_output - computed_output); let mut combined_result_limbs = F::ZERO; let mut combined_carry_limbs = F::ZERO; @@ -155,7 +160,7 @@ impl, const D: usize> Gate for U32AddManyGate let product = (0..max_limb) .map(|x| this_limb - F::from_canonical_usize(x)) .product(); - constraints.push(product); + yield_constr.one(product); if j < Self::num_result_limbs() { combined_result_limbs = base * combined_result_limbs + this_limb; @@ -163,11 +168,9 @@ impl, const D: usize> Gate for U32AddManyGate combined_carry_limbs = base * combined_carry_limbs + this_limb; } } - constraints.push(combined_result_limbs - output_result); - constraints.push(combined_carry_limbs - output_carry); + yield_constr.one(combined_result_limbs - output_result); + yield_constr.one(combined_carry_limbs - output_carry); } - - constraints } fn eval_unfiltered_recursively( @@ -355,6 +358,7 @@ mod tests { use crate::gates::gate::Gate; use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; use crate::hash::hash_types::HashOut; + use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; use crate::plonk::vars::EvaluationVars; #[test] @@ -368,7 +372,10 @@ mod tests { #[test] fn eval_fns() -> Result<()> { - test_eval_fns::(U32AddManyGate:: { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(U32AddManyGate:: { num_addends: 4, num_ops: 3, _phantom: PhantomData, diff --git a/plonky2/src/gates/mod.rs b/plonky2/src/gates/mod.rs index 163a7dac..18e3e99b 100644 --- a/plonky2/src/gates/mod.rs +++ b/plonky2/src/gates/mod.rs @@ -7,8 +7,6 @@ pub mod arithmetic_extension; pub mod arithmetic_u32; pub mod assert_le; pub mod base_sum; -pub mod binary_arithmetic; -pub mod binary_subtraction; pub mod comparison; pub mod constant; pub mod exponentiation; diff --git a/plonky2/src/gates/range_check_u32.rs b/plonky2/src/gates/range_check_u32.rs index 83cdb223..0e73990d 100644 --- a/plonky2/src/gates/range_check_u32.rs +++ b/plonky2/src/gates/range_check_u32.rs @@ -1,16 +1,19 @@ use std::marker::PhantomData; -use crate::field::extension_field::target::ExtensionTarget; +use plonky2_util::ceil_div_usize; + use crate::field::extension_field::Extendable; -use crate::field::field_types::{Field, RichField}; +use crate::field::field_types::Field; use crate::gates::gate::Gate; +use crate::gates::util::StridedConstraintConsumer; +use crate::hash::hash_types::RichField; +use crate::iop::ext_target::ExtensionTarget; use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; use crate::iop::target::Target; use crate::iop::witness::{PartitionWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_recursive}; use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; -use crate::util::ceil_div_usize; /// A gate which can decompose a number into base B little-endian limbs. #[derive(Copy, Clone, Debug)] @@ -73,9 +76,11 @@ impl, const D: usize> Gate for U32RangeCheckG constraints } - fn eval_unfiltered_base(&self, vars: EvaluationVarsBase) -> Vec { - let mut constraints = Vec::with_capacity(self.num_constraints()); - + fn eval_unfiltered_base_one( + &self, + vars: EvaluationVarsBase, + mut yield_constr: StridedConstraintConsumer, + ) { let base = F::from_canonical_usize(Self::BASE); for i in 0..self.num_input_limbs { let input_limb = vars.local_wires[self.wire_ith_input_limb(i)]; @@ -84,17 +89,15 @@ impl, const D: usize> Gate for U32RangeCheckG .collect(); let computed_sum = reduce_with_powers(&aux_limbs, base); - constraints.push(computed_sum - input_limb); + yield_constr.one(computed_sum - input_limb); for aux_limb in aux_limbs { - constraints.push( + yield_constr.one( (0..Self::BASE) .map(|i| aux_limb - F::from_canonical_usize(i)) .product(), ); } } - - constraints } fn eval_unfiltered_recursively( @@ -217,6 +220,7 @@ mod tests { use anyhow::Result; use itertools::unfold; + use plonky2_util::ceil_div_usize; use rand::Rng; use crate::field::extension_field::quartic::QuarticExtension; @@ -226,8 +230,8 @@ mod tests { use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; use crate::gates::range_check_u32::U32RangeCheckGate; use crate::hash::hash_types::HashOut; + use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; use crate::plonk::vars::EvaluationVars; - use crate::util::ceil_div_usize; #[test] fn low_degree() { @@ -236,7 +240,10 @@ mod tests { #[test] fn eval_fns() -> Result<()> { - test_eval_fns::(U32RangeCheckGate::new(8)) + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(U32RangeCheckGate::new(8)) } fn test_gate_constraint(input_limbs: Vec) { diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 5f8b8a5f..5d36ed1d 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -1,17 +1,16 @@ use std::fmt::Debug; use std::marker::PhantomData; -use num::BigUint; +use num::{BigUint, FromPrimitive, Integer, Zero}; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::field_types::Field; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; -use crate::gadgets::binary_arithmetic::BinaryTarget; use crate::gadgets::nonnative::NonNativeTarget; use crate::hash::hash_types::{HashOut, HashOutTarget, RichField}; use crate::iop::ext_target::ExtensionTarget; -use crate::iop::target::Target; +use crate::iop::target::{BoolTarget, Target}; use crate::iop::wire::Wire; use crate::iop::witness::{PartialWitness, PartitionWitness, Witness}; use crate::plonk::circuit_data::{CommonCircuitData, ProverOnlyCircuitData}; @@ -170,10 +169,6 @@ impl GeneratedValues { self.set_target(target.0, F::from_canonical_u32(value)) } - pub fn set_binary_target(&mut self, target: BinaryTarget, value: F) { - self.set_target(target.0, value) - } - pub fn set_biguint_target(&mut self, target: BigUintTarget, value: BigUint) { let base = BigUint::from_u64(1 << 32).unwrap(); let mut limbs = Vec::new(); diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 38091bc9..33c88950 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -19,8 +19,6 @@ use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; use crate::gates::arithmetic_base::ArithmeticGate; use crate::gates::arithmetic_extension::ArithmeticExtensionGate; use crate::gates::arithmetic_u32::U32ArithmeticGate; -use crate::gates::binary_arithmetic::BinaryArithmeticGate; -use crate::gates::binary_subtraction::BinarySubtractionGate; use crate::gates::constant::ConstantGate; use crate::gates::gate::{Gate, GateInstance, GateRef, PrefixedGate}; use crate::gates::gate_tree::Tree; @@ -354,11 +352,6 @@ impl, const D: usize> CircuitBuilder { U32Target(self.constant(F::from_canonical_u32(c))) } - /// Returns a BinaryTarget for the value `c`, which is assumed to be at most BITS bits. - pub fn constant_binary(&mut self, c: F) -> BinaryTarget { - BinaryTarget(self.constant(c)) - } - /// If the given target is a constant (i.e. it was created by the `constant(F)` method), returns /// its constant value. Otherwise, returns `None`. pub fn target_as_constant(&self, target: Target) -> Option { @@ -838,11 +831,6 @@ pub struct BatchedGates, const D: usize> { /// The `U32SubtractionGate` currently being filled (so new u32 subtraction operations will be added to this gate before creating a new one) pub(crate) current_u32_subtraction_gate: Option<(usize, usize)>, - /// A map `b -> (g, i)` from `b` bits to an available `BinaryArithmeticGate` for number of bits `b`. - pub(crate) free_binary_arithmetic_gate: HashMap, - /// A map `b -> (g, i)` from `b` bits to an available `BinarySubtractionGate` for number of bits `b`. - pub(crate) free_binary_subtraction_gate: HashMap, - /// An available `ConstantGate` instance, if any. pub(crate) free_constant: Option<(usize, usize)>, } @@ -858,8 +846,6 @@ impl, const D: usize> BatchedGates { free_u32_add_many: HashMap::new(), current_u32_arithmetic_gate: None, current_u32_subtraction_gate: None, - free_binary_arithmetic_gate: HashMap::new(), - free_binary_subtraction_gate: HashMap::new(), free_constant: None, } } @@ -1085,66 +1071,6 @@ impl, const D: usize> CircuitBuilder { (gate_index, copy) } - /// Finds the last available binary arithmetic with the given `bits` or add one if there aren't any. - /// Returns `(g,i)` such that there is a binary arithmetic for the given `bits` at index - /// `g` and the gate's `i`-th copy is available. - pub(crate) fn find_binary_arithmetic_gate(&mut self) -> (usize, usize) { - let (gate, i) = self - .batched_gates - .free_binary_arithmetic_gate - .get(&BITS) - .copied() - .unwrap_or_else(|| { - let gate = self.add_gate( - BinaryArithmeticGate::::new_from_config(&self.config), - vec![], - ); - (gate, 0) - }); - - // Update `free_binary_arithmetic` with new values. - if i + 1 < BinaryArithmeticGate::::new_from_config(&self.config).num_ops { - self.batched_gates - .free_binary_arithmetic_gate - .insert(BITS, (gate, i + 1)); - } else { - self.batched_gates.free_binary_arithmetic_gate.remove(&BITS); - } - - (gate, i) - } - - /// Finds the last available binary subtraction with the given `bits` or add one if there aren't any. - /// Returns `(g,i)` such that there is a binary subtraction for the given `bits` at index - /// `g` and the gate's `i`-th copy is available. - pub(crate) fn find_binary_subtraction_gate(&mut self) -> (usize, usize) { - let (gate, i) = self - .batched_gates - .free_binary_subtraction_gate - .get(&BITS) - .copied() - .unwrap_or_else(|| { - let gate = self.add_gate( - BinarySubtractionGate::::new_from_config(&self.config), - vec![], - ); - (gate, 0) - }); - - // Update `free_binary_subtraction` with new values. - if i + 1 < BinarySubtractionGate::::new_from_config(&self.config).num_ops { - self.batched_gates - .free_binary_subtraction_gate - .insert(BITS, (gate, i + 1)); - } else { - self.batched_gates - .free_binary_subtraction_gate - .remove(&BITS); - } - - (gate, i) - } - /// Returns the gate index and copy index of a free `ConstantGate` slot, potentially adding a /// new `ConstantGate` if needed. fn constant_gate_instance(&mut self) -> (usize, usize) { @@ -1301,36 +1227,6 @@ impl, const D: usize> CircuitBuilder { } } - /// Fill the remaining unused binary arithmetic operations with zeros, so that all - /// `BinaryArithmeticGenerator`s are run. - fn fill_binary_arithmetic_gates(&mut self) { - let zero = self.zero_binary::<30>(); - if let Some(&(_, i)) = self.batched_gates.free_binary_arithmetic_gate.get(&30) { - let max_copies = - BinaryArithmeticGate::::new_from_config(&self.config).num_ops; - for _ in i..max_copies { - let dummy = self.add_virtual_binary_target(); - self.mul_add_binary(dummy, dummy, dummy); - self.connect_binary(dummy, zero); - } - } - } - - /// Fill the remaining unused binary subtraction operations with zeros, so that all - /// `BinarySubtractionGenerator`s are run. - fn fill_binary_subtraction_gates(&mut self) { - let zero = self.zero_binary::<30>(); - if let Some(&(_, i)) = self.batched_gates.free_binary_subtraction_gate.get(&30) { - let max_copies = - BinarySubtractionGate::::new_from_config(&self.config).num_ops; - for _ in i..max_copies { - let dummy = self.add_virtual_binary_target(); - self.sub_binary(dummy, dummy, dummy); - self.connect_binary(dummy, zero); - } - } - } - fn fill_batched_gates(&mut self) { self.fill_arithmetic_gates(); self.fill_base_arithmetic_gates(); @@ -1340,7 +1236,5 @@ impl, const D: usize> CircuitBuilder { self.fill_u32_add_many_gates(); self.fill_u32_arithmetic_gates(); self.fill_u32_subtraction_gates(); - self.fill_binary_arithmetic_gates(); - self.fill_binary_subtraction_gates(); } } From edf75632086abaf9b709d2d5d5e549c40d5482ed Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Wed, 26 Jan 2022 11:30:13 -0800 Subject: [PATCH 072/143] MulBigUintByBool gate --- plonky2/src/gates/mul_biguint_bool.rs | 241 ++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 plonky2/src/gates/mul_biguint_bool.rs diff --git a/plonky2/src/gates/mul_biguint_bool.rs b/plonky2/src/gates/mul_biguint_bool.rs new file mode 100644 index 00000000..2fca84e1 --- /dev/null +++ b/plonky2/src/gates/mul_biguint_bool.rs @@ -0,0 +1,241 @@ +use std::marker::PhantomData; + +use plonky2_field::extension_field::Extendable; + +use crate::gates::gate::Gate; +use crate::gates::util::StridedConstraintConsumer; +use crate::hash::hash_types::RichField; +use crate::iop::ext_target::ExtensionTarget; +use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; +use crate::iop::target::Target; +use crate::iop::wire::Wire; +use crate::iop::witness::{PartitionWitness, Witness}; +use crate::plonk::circuit_builder::CircuitBuilder; +use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; + +/// A gate to perform a basic mul-add on 32-bit values (we assume they are range-checked beforehand). +#[derive(Copy, Clone, Debug)] +pub struct MulBiguintBoolGate, const D: usize> { + pub num_limbs: usize, + _phantom: PhantomData, +} + +impl, const D: usize> MulBiguintBoolGate { + pub fn new(num_limbs: usize) -> Self { + Self { + num_limbs, + _phantom: PhantomData, + } + } + + pub fn wire_ith_input_limb(&self, i: usize) -> usize { + debug_assert!(i < self.num_limbs); + i + } + pub fn wire_input_bool(&self) -> usize { + self.num_limbs + } + pub fn wire_ith_output_limb(&self, i: usize) -> usize { + debug_assert!(i < self.num_limbs); + self.num_limbs + 1 + i + } +} + +impl, const D: usize> Gate for MulBiguintBoolGate { + fn id(&self) -> String { + format!("{:?}", self) + } + + fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let input_bool = vars.local_wires[self.wire_input_bool()]; + for i in 0..self.num_limbs { + let input_i = vars.local_wires[self.wire_ith_input_limb(i)]; + let output_i = vars.local_wires[self.wire_ith_output_limb(i)]; + + constraints.push(input_i * input_bool - output_i); + } + + constraints + } + + fn eval_unfiltered_base_one( + &self, + vars: EvaluationVarsBase, + mut yield_constr: StridedConstraintConsumer, + ) { + let input_bool = vars.local_wires[self.wire_input_bool()]; + for i in 0..self.num_limbs { + let input_i = vars.local_wires[self.wire_ith_input_limb(i)]; + let output_i = vars.local_wires[self.wire_ith_output_limb(i)]; + + yield_constr.one(input_i * input_bool - output_i); + } + } + + fn eval_unfiltered_recursively( + &self, + builder: &mut CircuitBuilder, + vars: EvaluationTargets, + ) -> Vec> { + let mut constraints = Vec::with_capacity(self.num_constraints()); + + let input_bool = vars.local_wires[self.wire_input_bool()]; + for i in 0..self.num_limbs { + let input_i = vars.local_wires[self.wire_ith_input_limb(i)]; + let output_i = vars.local_wires[self.wire_ith_output_limb(i)]; + + constraints.push(builder.mul_sub_extension(input_i, input_bool, output_i)); + } + + constraints + } + + fn generators( + &self, + gate_index: usize, + _local_constants: &[F], + ) -> Vec>> { + let gen = MulBiguintBoolGenerator { + gate: *self, + gate_index, + _phantom: PhantomData, + }; + vec![Box::new(gen.adapter())] + } + + fn num_wires(&self) -> usize { + self.num_limbs * 2 + 1 + } + + fn num_constants(&self) -> usize { + 0 + } + + fn degree(&self) -> usize { + 2 + } + + fn num_constraints(&self) -> usize { + self.num_limbs + } +} + +#[derive(Clone, Debug)] +struct MulBiguintBoolGenerator, const D: usize> { + gate: MulBiguintBoolGate, + gate_index: usize, + _phantom: PhantomData, +} + +impl, const D: usize> SimpleGenerator + for MulBiguintBoolGenerator +{ + fn dependencies(&self) -> Vec { + let local_target = |input| Target::wire(self.gate_index, input); + + (0..self.gate.num_limbs) + .map(|i| local_target(self.gate.wire_ith_input_limb(i))) + .chain([local_target(self.gate.wire_input_bool())]) + .collect() + } + + fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { + let local_wire = |input| Wire { + gate: self.gate_index, + input, + }; + + let get_local_wire = |input| witness.get_wire(local_wire(input)); + + let input_bool = get_local_wire(self.gate.wire_input_bool()); + for i in 0..self.gate.num_limbs { + let input_limb = get_local_wire(self.gate.wire_ith_input_limb(i)); + let output_wire = local_wire(self.gate.wire_ith_output_limb(i)); + let output_limb = input_limb * input_bool; + out_buffer.set_wire(output_wire, output_limb); + } + } +} + +#[cfg(test)] +mod tests { + use std::marker::PhantomData; + + use anyhow::Result; + use plonky2_field::field_types::Field; + use plonky2_field::goldilocks_field::GoldilocksField; + use rand::Rng; + + use crate::gates::gate::Gate; + use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; + use crate::gates::mul_biguint_bool::MulBiguintBoolGate; + use crate::hash::hash_types::HashOut; + use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; + use crate::plonk::vars::EvaluationVars; + + #[test] + fn low_degree() { + test_low_degree::(MulBiguintBoolGate:: { + num_limbs: 8, + _phantom: PhantomData, + }) + } + + #[test] + fn eval_fns() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + test_eval_fns::(MulBiguintBoolGate:: { + num_limbs: 8, + _phantom: PhantomData, + }) + } + + #[test] + fn test_gate_constraint() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type FF = >::FE; + const NUM_LIMBS: usize = 8; + + fn get_wires(input_limbs: Vec, input_bool: bool) -> Vec { + let output_limbs = input_limbs + .iter() + .map(|&l| if input_bool { l } else { F::ZERO }); + + input_limbs + .iter() + .cloned() + .chain([F::from_bool(input_bool)]) + .chain(output_limbs) + .map(|x| x.into()) + .collect() + } + + let mut rng = rand::thread_rng(); + let input_limbs: Vec<_> = (0..NUM_LIMBS) + .map(|_| F::from_canonical_u64(rng.gen())) + .collect(); + let input_bool: bool = rng.gen(); + + let gate = MulBiguintBoolGate:: { + num_limbs: NUM_LIMBS, + _phantom: PhantomData, + }; + + let vars = EvaluationVars { + local_constants: &[], + local_wires: &get_wires(input_limbs, input_bool), + public_inputs_hash: &HashOut::rand(), + }; + + assert!( + gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), + "Gate constraints are not satisfied." + ); + } +} From 82e2872f5e6de13376b56ead7be1b5d1d33988b9 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 27 Jan 2022 13:34:27 -0800 Subject: [PATCH 073/143] updates and addressed comments --- plonky2/src/curve/curve_types.rs | 8 + plonky2/src/curve/ecdsa.rs | 10 +- plonky2/src/gadgets/arithmetic_u32.rs | 20 +- plonky2/src/gadgets/biguint.rs | 20 +- plonky2/src/gadgets/curve.rs | 3 +- plonky2/src/gadgets/nonnative.rs | 11 +- plonky2/src/gates/arithmetic_extension.rs | 2 +- plonky2/src/gates/arithmetic_u32.rs | 5 +- plonky2/src/gates/comparison.rs | 2 +- plonky2/src/gates/interpolation.rs | 2 +- plonky2/src/gates/mul_biguint_bool.rs | 241 ---------------------- plonky2/src/gates/switch.rs | 2 +- 12 files changed, 24 insertions(+), 302 deletions(-) delete mode 100644 plonky2/src/gates/mul_biguint_bool.rs diff --git a/plonky2/src/curve/curve_types.rs b/plonky2/src/curve/curve_types.rs index b7ee34e6..9599f6fe 100644 --- a/plonky2/src/curve/curve_types.rs +++ b/plonky2/src/curve/curve_types.rs @@ -259,3 +259,11 @@ impl Neg for ProjectivePoint { ProjectivePoint { x, y: -y, z } } } + +pub fn base_to_scalar(x: C::BaseField) -> C::ScalarField { + C::ScalarField::from_biguint(x.to_biguint()) +} + +pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { + C::BaseField::from_biguint(x.to_biguint()) +} diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index cce83d4c..f708a827 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,7 +1,7 @@ use itertools::{unfold, Itertools}; use num::BigUint; -use crate::curve::curve_types::{AffinePoint, Curve, CurveScalar}; +use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; use crate::field::field_types::Field; use crate::hash::hash_types::RichField; use crate::hash::hashing::{hash_n_to_m, PlonkyPermutation}; @@ -15,14 +15,6 @@ pub struct ECDSASignature { pub struct ECDSASecretKey(pub C::ScalarField); pub struct ECDSAPublicKey(pub AffinePoint); -pub fn base_to_scalar(x: C::BaseField) -> C::ScalarField { - C::ScalarField::from_biguint(x.to_biguint()) -} - -pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { - C::BaseField::from_biguint(x.to_biguint()) -} - pub fn hash_to_bits>(x: F, num_bits: usize) -> Vec { let hashed = hash_n_to_m::(&vec![x], 1, true)[0]; diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index 8da01dc1..f57c1db5 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -196,24 +196,6 @@ impl, const D: usize> CircuitBuilder { (output_result, output_borrow) } - - pub fn split_to_u32(&mut self, x: Target) -> (U32Target, U32Target) { - let low = self.add_virtual_u32_target(); - let high = self.add_virtual_u32_target(); - - let base = self.constant(F::from_canonical_u64(1u64 << 32)); - let combined = self.mul_add(high.0, base, low.0); - self.connect(x, combined); - - self.add_simple_generator(SplitToU32Generator:: { - x: x.clone(), - low: low.clone(), - high: high.clone(), - _phantom: PhantomData, - }); - - (low, high) - } } #[derive(Debug)] @@ -235,7 +217,7 @@ impl, const D: usize> SimpleGenerator let x = witness.get_target(self.x.clone()); let x_u64 = x.to_canonical_u64(); let low = x_u64 as u32; - let high: u32 = (x_u64 >> 32).try_into().unwrap(); + let high = (x_u64 >> 32) as u32; out_buffer.set_u32_target(self.low.clone(), low); out_buffer.set_u32_target(self.high.clone(), high); diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 51cfcc06..15c2a630 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -1,6 +1,6 @@ use std::marker::PhantomData; -use num::{BigUint, FromPrimitive, Integer, Zero}; +use num::{BigUint, Integer, Zero}; use plonky2_field::extension_field::Extendable; use crate::gadgets::arithmetic_u32::U32Target; @@ -27,14 +27,7 @@ impl BigUintTarget { impl, const D: usize> CircuitBuilder { pub fn constant_biguint(&mut self, value: &BigUint) -> BigUintTarget { - let base = BigUint::from_u64(1 << 32).unwrap(); - let mut limb_values = Vec::new(); - let mut current = value.clone(); - while current > BigUint::zero() { - let (div, rem) = current.div_rem(&base); - current = div; - limb_values.push(rem.to_u64_digits()[0] as u32); - } + let limb_values = value.to_u32_digits(); let limbs = limb_values.iter().map(|&l| self.constant_u32(l)).collect(); BigUintTarget { limbs } @@ -167,11 +160,10 @@ impl, const D: usize> CircuitBuilder { let t = b.target; BigUintTarget { - limbs: a - .limbs - .iter() - .map(|l| U32Target(self.mul(l.0, t))) - .collect(), + limbs: a.limbs + .iter() + .map(|&l| U32Target(self.mul(l.0, t))) + .collect(), } } diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index 92f45242..d16aaefa 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -135,7 +135,7 @@ impl, const D: usize> CircuitBuilder { for &bit in bits.iter() { let not_bit = self.not(bit); - + let result_plus_2_i_p = self.curve_add(&result, &two_i_times_p); let new_x_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.x, bit); @@ -321,6 +321,7 @@ mod tests { builder.connect_affine_point(&neg_five_g_expected, &neg_five_g_actual); + println!("NUM GATES: {}", builder.num_gates()); let data = builder.build::(); let proof = data.prove(pw).unwrap(); diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 998ce628..b513abbb 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -93,17 +93,8 @@ impl, const D: usize> CircuitBuilder { a: &NonNativeTarget, b: BoolTarget, ) -> NonNativeTarget { - let t = b.target; - NonNativeTarget { - value: BigUintTarget { - limbs: a - .value - .limbs - .iter() - .map(|l| U32Target(self.mul(l.0, t))) - .collect(), - }, + value: self.mul_biguint_by_bool(&a.value, b), _phantom: PhantomData, } } diff --git a/plonky2/src/gates/arithmetic_extension.rs b/plonky2/src/gates/arithmetic_extension.rs index 93f7277c..def09473 100644 --- a/plonky2/src/gates/arithmetic_extension.rs +++ b/plonky2/src/gates/arithmetic_extension.rs @@ -138,7 +138,7 @@ impl, const D: usize> Gate for ArithmeticExte ); g }) - .collect::>() + .collect() } fn num_wires(&self) -> usize { diff --git a/plonky2/src/gates/arithmetic_u32.rs b/plonky2/src/gates/arithmetic_u32.rs index bef21a97..dc03e296 100644 --- a/plonky2/src/gates/arithmetic_u32.rs +++ b/plonky2/src/gates/arithmetic_u32.rs @@ -425,10 +425,7 @@ mod tests { v1.append(&mut output_limbs_f); } - v0.iter() - .chain(v1.iter()) - .map(|&x| x.into()) - .collect::>() + v0.iter().chain(v1.iter()).map(|&x| x.into()).collect() } let mut rng = rand::thread_rng(); diff --git a/plonky2/src/gates/comparison.rs b/plonky2/src/gates/comparison.rs index 9a119b1d..424ecb5b 100644 --- a/plonky2/src/gates/comparison.rs +++ b/plonky2/src/gates/comparison.rs @@ -658,7 +658,7 @@ mod tests { v.append(&mut intermediate_values); v.append(&mut msd_bits); - v.iter().map(|&x| x.into()).collect::>() + v.iter().map(|&x| x.into()).collect() }; let mut rng = rand::thread_rng(); diff --git a/plonky2/src/gates/interpolation.rs b/plonky2/src/gates/interpolation.rs index 9d3d6e3f..46c42113 100644 --- a/plonky2/src/gates/interpolation.rs +++ b/plonky2/src/gates/interpolation.rs @@ -343,7 +343,7 @@ mod tests { for i in 0..coeffs.len() { v.extend(coeffs.coeffs[i].0); } - v.iter().map(|&x| x.into()).collect::>() + v.iter().map(|&x| x.into()).collect() } // Get a working row for InterpolationGate. diff --git a/plonky2/src/gates/mul_biguint_bool.rs b/plonky2/src/gates/mul_biguint_bool.rs deleted file mode 100644 index 2fca84e1..00000000 --- a/plonky2/src/gates/mul_biguint_bool.rs +++ /dev/null @@ -1,241 +0,0 @@ -use std::marker::PhantomData; - -use plonky2_field::extension_field::Extendable; - -use crate::gates::gate::Gate; -use crate::gates::util::StridedConstraintConsumer; -use crate::hash::hash_types::RichField; -use crate::iop::ext_target::ExtensionTarget; -use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator}; -use crate::iop::target::Target; -use crate::iop::wire::Wire; -use crate::iop::witness::{PartitionWitness, Witness}; -use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase}; - -/// A gate to perform a basic mul-add on 32-bit values (we assume they are range-checked beforehand). -#[derive(Copy, Clone, Debug)] -pub struct MulBiguintBoolGate, const D: usize> { - pub num_limbs: usize, - _phantom: PhantomData, -} - -impl, const D: usize> MulBiguintBoolGate { - pub fn new(num_limbs: usize) -> Self { - Self { - num_limbs, - _phantom: PhantomData, - } - } - - pub fn wire_ith_input_limb(&self, i: usize) -> usize { - debug_assert!(i < self.num_limbs); - i - } - pub fn wire_input_bool(&self) -> usize { - self.num_limbs - } - pub fn wire_ith_output_limb(&self, i: usize) -> usize { - debug_assert!(i < self.num_limbs); - self.num_limbs + 1 + i - } -} - -impl, const D: usize> Gate for MulBiguintBoolGate { - fn id(&self) -> String { - format!("{:?}", self) - } - - fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { - let mut constraints = Vec::with_capacity(self.num_constraints()); - - let input_bool = vars.local_wires[self.wire_input_bool()]; - for i in 0..self.num_limbs { - let input_i = vars.local_wires[self.wire_ith_input_limb(i)]; - let output_i = vars.local_wires[self.wire_ith_output_limb(i)]; - - constraints.push(input_i * input_bool - output_i); - } - - constraints - } - - fn eval_unfiltered_base_one( - &self, - vars: EvaluationVarsBase, - mut yield_constr: StridedConstraintConsumer, - ) { - let input_bool = vars.local_wires[self.wire_input_bool()]; - for i in 0..self.num_limbs { - let input_i = vars.local_wires[self.wire_ith_input_limb(i)]; - let output_i = vars.local_wires[self.wire_ith_output_limb(i)]; - - yield_constr.one(input_i * input_bool - output_i); - } - } - - fn eval_unfiltered_recursively( - &self, - builder: &mut CircuitBuilder, - vars: EvaluationTargets, - ) -> Vec> { - let mut constraints = Vec::with_capacity(self.num_constraints()); - - let input_bool = vars.local_wires[self.wire_input_bool()]; - for i in 0..self.num_limbs { - let input_i = vars.local_wires[self.wire_ith_input_limb(i)]; - let output_i = vars.local_wires[self.wire_ith_output_limb(i)]; - - constraints.push(builder.mul_sub_extension(input_i, input_bool, output_i)); - } - - constraints - } - - fn generators( - &self, - gate_index: usize, - _local_constants: &[F], - ) -> Vec>> { - let gen = MulBiguintBoolGenerator { - gate: *self, - gate_index, - _phantom: PhantomData, - }; - vec![Box::new(gen.adapter())] - } - - fn num_wires(&self) -> usize { - self.num_limbs * 2 + 1 - } - - fn num_constants(&self) -> usize { - 0 - } - - fn degree(&self) -> usize { - 2 - } - - fn num_constraints(&self) -> usize { - self.num_limbs - } -} - -#[derive(Clone, Debug)] -struct MulBiguintBoolGenerator, const D: usize> { - gate: MulBiguintBoolGate, - gate_index: usize, - _phantom: PhantomData, -} - -impl, const D: usize> SimpleGenerator - for MulBiguintBoolGenerator -{ - fn dependencies(&self) -> Vec { - let local_target = |input| Target::wire(self.gate_index, input); - - (0..self.gate.num_limbs) - .map(|i| local_target(self.gate.wire_ith_input_limb(i))) - .chain([local_target(self.gate.wire_input_bool())]) - .collect() - } - - fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { - let local_wire = |input| Wire { - gate: self.gate_index, - input, - }; - - let get_local_wire = |input| witness.get_wire(local_wire(input)); - - let input_bool = get_local_wire(self.gate.wire_input_bool()); - for i in 0..self.gate.num_limbs { - let input_limb = get_local_wire(self.gate.wire_ith_input_limb(i)); - let output_wire = local_wire(self.gate.wire_ith_output_limb(i)); - let output_limb = input_limb * input_bool; - out_buffer.set_wire(output_wire, output_limb); - } - } -} - -#[cfg(test)] -mod tests { - use std::marker::PhantomData; - - use anyhow::Result; - use plonky2_field::field_types::Field; - use plonky2_field::goldilocks_field::GoldilocksField; - use rand::Rng; - - use crate::gates::gate::Gate; - use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; - use crate::gates::mul_biguint_bool::MulBiguintBoolGate; - use crate::hash::hash_types::HashOut; - use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; - use crate::plonk::vars::EvaluationVars; - - #[test] - fn low_degree() { - test_low_degree::(MulBiguintBoolGate:: { - num_limbs: 8, - _phantom: PhantomData, - }) - } - - #[test] - fn eval_fns() -> Result<()> { - const D: usize = 2; - type C = PoseidonGoldilocksConfig; - type F = >::F; - test_eval_fns::(MulBiguintBoolGate:: { - num_limbs: 8, - _phantom: PhantomData, - }) - } - - #[test] - fn test_gate_constraint() { - const D: usize = 2; - type C = PoseidonGoldilocksConfig; - type F = >::F; - type FF = >::FE; - const NUM_LIMBS: usize = 8; - - fn get_wires(input_limbs: Vec, input_bool: bool) -> Vec { - let output_limbs = input_limbs - .iter() - .map(|&l| if input_bool { l } else { F::ZERO }); - - input_limbs - .iter() - .cloned() - .chain([F::from_bool(input_bool)]) - .chain(output_limbs) - .map(|x| x.into()) - .collect() - } - - let mut rng = rand::thread_rng(); - let input_limbs: Vec<_> = (0..NUM_LIMBS) - .map(|_| F::from_canonical_u64(rng.gen())) - .collect(); - let input_bool: bool = rng.gen(); - - let gate = MulBiguintBoolGate:: { - num_limbs: NUM_LIMBS, - _phantom: PhantomData, - }; - - let vars = EvaluationVars { - local_constants: &[], - local_wires: &get_wires(input_limbs, input_bool), - public_inputs_hash: &HashOut::rand(), - }; - - assert!( - gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()), - "Gate constraints are not satisfied." - ); - } -} diff --git a/plonky2/src/gates/switch.rs b/plonky2/src/gates/switch.rs index c5271a60..583f9a6e 100644 --- a/plonky2/src/gates/switch.rs +++ b/plonky2/src/gates/switch.rs @@ -432,7 +432,7 @@ mod tests { v.push(F::from_bool(switch)); } - v.iter().map(|&x| x.into()).collect::>() + v.iter().map(|&x| x.into()).collect() } let first_inputs: Vec> = (0..num_copies).map(|_| F::rand_vec(CHUNK_SIZE)).collect(); From 493f516fac517f068b0fff0b22d1677063d12609 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 27 Jan 2022 14:54:10 -0800 Subject: [PATCH 074/143] removed hashing --- plonky2/src/curve/ecdsa.rs | 58 +++++------------------------------- plonky2/src/gadgets/ecdsa.rs | 38 +++-------------------- 2 files changed, 12 insertions(+), 84 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index f708a827..3276e9cb 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,11 +1,5 @@ -use itertools::{unfold, Itertools}; -use num::BigUint; - -use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; +use crate::curve::curve_types::{AffinePoint, base_to_scalar, Curve, CurveScalar}; use crate::field::field_types::Field; -use crate::hash::hash_types::RichField; -use crate::hash::hashing::{hash_n_to_m, PlonkyPermutation}; -use crate::hash::poseidon::PoseidonPermutation; pub struct ECDSASignature { pub r: C::ScalarField, @@ -15,59 +9,25 @@ pub struct ECDSASignature { pub struct ECDSASecretKey(pub C::ScalarField); pub struct ECDSAPublicKey(pub AffinePoint); -pub fn hash_to_bits>(x: F, num_bits: usize) -> Vec { - let hashed = hash_n_to_m::(&vec![x], 1, true)[0]; - - let mut val = hashed.to_canonical_u64(); - unfold((), move |_| { - let ret = val % 2 != 0; - val /= 2; - Some(ret) - }) - .take(num_bits) - .collect() -} - -pub fn hash_to_scalar>( - x: F, - num_bits: usize, -) -> C::ScalarField { - let h_bits = hash_to_bits::(x, num_bits); - let h_vals: Vec<_> = h_bits - .iter() - .chunks(32) - .into_iter() - .map(|chunk| { - chunk - .enumerate() - .fold(0u32, |acc, (pow, &bit)| acc + (bit as u32) * (2 << pow)) - }) - .collect(); - C::ScalarField::from_biguint(BigUint::new(h_vals)) -} - -pub fn sign_message(msg: F, sk: ECDSASecretKey) -> ECDSASignature { - let h = hash_to_scalar::(msg, 256); - +pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { let k = C::ScalarField::rand(); let rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); let r = base_to_scalar::(rr.x); - let s = k.inverse() * (h + r * sk.0); + + let s = k.inverse() * (msg + r * sk.0); ECDSASignature { r, s } } -pub fn verify_message( - msg: F, +pub fn verify_message( + msg: C::ScalarField, sig: ECDSASignature, pk: ECDSAPublicKey, ) -> bool { let ECDSASignature { r, s } = sig; - let h = hash_to_scalar::(msg, 256); - let c = s.inverse(); - let u1 = h * c; + let u1 = msg * c; let u2 = r * c; let g = C::GENERATOR_PROJECTIVE; @@ -84,15 +44,13 @@ mod tests { use crate::curve::ecdsa::{sign_message, verify_message, ECDSAPublicKey, ECDSASecretKey}; use crate::curve::secp256k1::Secp256K1; use crate::field::field_types::Field; - use crate::field::goldilocks_field::GoldilocksField; use crate::field::secp256k1_scalar::Secp256K1Scalar; #[test] fn test_ecdsa_native() { - type F = GoldilocksField; type C = Secp256K1; - let msg = F::rand(); + let msg = Secp256K1Scalar::rand(); let sk = ECDSASecretKey(Secp256K1Scalar::rand()); let pk = ECDSAPublicKey((CurveScalar(sk.0) * C::GENERATOR_PROJECTIVE).to_affine()); diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index cc787eb1..9a7a8257 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -20,46 +20,16 @@ pub struct ECDSASignatureTarget { } impl, const D: usize> CircuitBuilder { - pub fn hash_to_bits(&mut self, x: Target, num_bits: usize) -> Vec { - let inputs = vec![x]; - let hashed = self.hash_n_to_m::(inputs, 1, true)[0]; - self.split_le(hashed, num_bits) - } - - pub fn hash_to_scalar( - &mut self, - x: Target, - num_bits: usize, - ) -> NonNativeTarget { - let h_bits = self.hash_to_bits(x, num_bits); - - let two = self.two(); - let mut rev_bits = h_bits.iter().rev(); - let mut sum = rev_bits.next().unwrap().target; - for &bit in rev_bits { - sum = self.mul_add(two, sum, bit.target); - } - let limbs = vec![U32Target(sum)]; - let value = BigUintTarget { limbs }; - - NonNativeTarget { - value, - _phantom: PhantomData, - } - } - pub fn verify_message( &mut self, - msg: Target, + msg: NonNativeTarget, sig: ECDSASignatureTarget, pk: ECDSAPublicKeyTarget, ) { let ECDSASignatureTarget { r, s } = sig; - let h = self.hash_to_scalar::(msg, 256); - let c = self.inv_nonnative(&s); - let u1 = self.mul_nonnative(&h, &c); + let u1 = self.mul_nonnative(&msg, &c); let u2 = self.mul_nonnative(&r, &c); let g = self.constant_affine_point(C::GENERATOR_AFFINE); @@ -105,8 +75,8 @@ mod tests { let pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); - let msg = F::rand(); - let msg_target = builder.constant(msg); + let msg = Secp256K1Scalar::rand(); + let msg_target = builder.constant_nonnative(msg); let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); let pk = ECDSAPublicKey((CurveScalar(sk.0) * Curve::GENERATOR_PROJECTIVE).to_affine()); From b62fa3f60880689d0ee62a8d2d39674c4871eb13 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Thu, 27 Jan 2022 14:54:16 -0800 Subject: [PATCH 075/143] fmt --- plonky2/src/curve/ecdsa.rs | 2 +- plonky2/src/gadgets/biguint.rs | 9 +++++---- plonky2/src/gadgets/curve.rs | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 3276e9cb..787567aa 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,4 +1,4 @@ -use crate::curve::curve_types::{AffinePoint, base_to_scalar, Curve, CurveScalar}; +use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; use crate::field::field_types::Field; pub struct ECDSASignature { diff --git a/plonky2/src/gadgets/biguint.rs b/plonky2/src/gadgets/biguint.rs index 15c2a630..c9ad7280 100644 --- a/plonky2/src/gadgets/biguint.rs +++ b/plonky2/src/gadgets/biguint.rs @@ -160,10 +160,11 @@ impl, const D: usize> CircuitBuilder { let t = b.target; BigUintTarget { - limbs: a.limbs - .iter() - .map(|&l| U32Target(self.mul(l.0, t))) - .collect(), + limbs: a + .limbs + .iter() + .map(|&l| U32Target(self.mul(l.0, t))) + .collect(), } } diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index d16aaefa..59b13840 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -135,7 +135,7 @@ impl, const D: usize> CircuitBuilder { for &bit in bits.iter() { let not_bit = self.not(bit); - + let result_plus_2_i_p = self.curve_add(&result, &two_i_times_p); let new_x_if_bit = self.mul_nonnative_by_bool(&result_plus_2_i_p.x, bit); From b1c8709f7e670f6767ea8fdd02c7b92ae6a07684 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Fri, 28 Jan 2022 10:55:17 -0800 Subject: [PATCH 076/143] addressed more comments --- plonky2/src/curve/ecdsa.rs | 2 ++ plonky2/src/gadgets/ecdsa.rs | 7 ++----- plonky2/src/gadgets/nonnative.rs | 35 -------------------------------- plonky2/src/iop/generator.rs | 9 +------- 4 files changed, 5 insertions(+), 48 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 787567aa..e86b40d8 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -26,6 +26,8 @@ pub fn verify_message( ) -> bool { let ECDSASignature { r, s } = sig; + assert!(pk.0.is_valid()); + let c = s.inverse(); let u1 = msg * c; let u2 = r * c; diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 9a7a8257..2acd1c80 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -2,13 +2,9 @@ use std::marker::PhantomData; use crate::curve::curve_types::Curve; use crate::field::extension_field::Extendable; -use crate::gadgets::arithmetic_u32::U32Target; -use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::curve::AffinePointTarget; use crate::gadgets::nonnative::NonNativeTarget; use crate::hash::hash_types::RichField; -use crate::hash::poseidon::PoseidonHash; -use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; pub struct ECDSASecretKeyTarget(NonNativeTarget); @@ -28,6 +24,8 @@ impl, const D: usize> CircuitBuilder { ) { let ECDSASignatureTarget { r, s } = sig; + self.curve_assert_valid(&pk.0); + let c = self.inv_nonnative(&s); let u1 = self.mul_nonnative(&msg, &c); let u2 = self.mul_nonnative(&r, &c); @@ -62,7 +60,6 @@ mod tests { use crate::plonk::verifier::verify; #[test] - #[ignore] fn test_ecdsa_circuit() -> Result<()> { const D: usize = 2; type C = PoseidonGoldilocksConfig; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index b513abbb..293a7183 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -624,41 +624,6 @@ mod tests { verify(proof, &data.verifier_only, &data.common) } - fn test_nonnative_many_muls_helper(num: usize) { - type FF = Secp256K1Base; - const D: usize = 2; - type C = PoseidonGoldilocksConfig; - type F = >::F; - - let config = CircuitConfig::standard_ecc_config(); - let mut unop_builder = CircuitBuilder::::new(config.clone()); - let mut op_builder = CircuitBuilder::::new(config); - - let ffs: Vec<_> = (0..num).map(|_| FF::rand()).collect(); - - let op_targets: Vec<_> = ffs - .iter() - .map(|&x| op_builder.constant_nonnative(x)) - .collect(); - op_builder.mul_many_nonnative(&op_targets); - - let unop_targets: Vec<_> = ffs - .iter() - .map(|&x| unop_builder.constant_nonnative(x)) - .collect(); - let mut result = unop_targets[0].clone(); - for i in 1..unop_targets.len() { - result = unop_builder.mul_nonnative(&result, &unop_targets[i]); - } - } - - #[test] - fn test_nonnative_many_muls() { - for num in 2..10 { - test_nonnative_many_muls_helper(num); - } - } - #[test] fn test_nonnative_neg() -> Result<()> { type FF = Secp256K1Base; diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 5d36ed1d..fb089d29 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -170,14 +170,7 @@ impl GeneratedValues { } pub fn set_biguint_target(&mut self, target: BigUintTarget, value: BigUint) { - let base = BigUint::from_u64(1 << 32).unwrap(); - let mut limbs = Vec::new(); - let mut current = value.clone(); - while current > BigUint::zero() { - let (div, rem) = current.div_rem(&base); - current = div; - limbs.push(rem.to_u64_digits()[0] as u32); - } + let mut limbs = value.to_u32_digits(); assert!(target.num_limbs() >= limbs.len()); From c1b8515e48a69f87b71516a389eb8cf1e493a8b2 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Fri, 28 Jan 2022 10:55:58 -0800 Subject: [PATCH 077/143] warning --- plonky2/src/iop/generator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index fb089d29..73978f5c 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use std::marker::PhantomData; -use num::{BigUint, FromPrimitive, Integer, Zero}; +use num::BigUint; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::field_types::Field; From 30f936c43436a0e1f93f58008aba03041aeb4161 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Sat, 29 Jan 2022 07:54:55 -0800 Subject: [PATCH 078/143] ecdsa changes --- plonky2/src/curve/ecdsa.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index e86b40d8..68660f0d 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,3 +1,4 @@ +use crate::curve::curve_msm::msm_parallel; use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; use crate::field::field_types::Field; @@ -10,8 +11,15 @@ pub struct ECDSASecretKey(pub C::ScalarField); pub struct ECDSAPublicKey(pub AffinePoint); pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { - let k = C::ScalarField::rand(); - let rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); + let (k, rr) = { + let mut k = C::ScalarField::rand(); + let mut rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); + while rr.x == C::BaseField::ZERO { + k = C::ScalarField::rand(); + rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); + } + (k, rr) + }; let r = base_to_scalar::(rr.x); let s = k.inverse() * (msg + r * sk.0); From d68ab119130c8e3af1f2b5177aa4492fa041480e Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Sun, 30 Jan 2022 08:30:54 -0800 Subject: [PATCH 079/143] msm --- plonky2/src/curve/ecdsa.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 68660f0d..c84c4c10 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -41,7 +41,8 @@ pub fn verify_message( let u2 = r * c; let g = C::GENERATOR_PROJECTIVE; - let point_proj = CurveScalar(u1) * g + CurveScalar(u2) * pk.0.to_projective(); + let w = 5; // Experimentally fastest + let point_proj = msm_parallel(&[u1, u2], &[g, pk.0.to_projective()], w); let point = point_proj.to_affine(); let x = base_to_scalar::(point.x); From 20930e008697301b70750c80e41bc273bba1e7a8 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Sun, 30 Jan 2022 08:52:26 -0800 Subject: [PATCH 080/143] range-check add results --- plonky2/src/gadgets/curve.rs | 1 - plonky2/src/gadgets/nonnative.rs | 8 ++++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index 59b13840..92f45242 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -321,7 +321,6 @@ mod tests { builder.connect_affine_point(&neg_five_g_expected, &neg_five_g_actual); - println!("NUM GATES: {}", builder.num_gates()); let data = builder.build::(); let proof = data.prove(pw).unwrap(); diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 293a7183..8ce5933c 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -85,6 +85,10 @@ impl, const D: usize> CircuitBuilder { let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); self.connect_biguint(&sum_expected, &sum_actual); + let cmp = self.cmp_biguint(&sum.value, &modulus); + let one = self.one(); + self.connect(cmp.target, one); + sum } @@ -133,6 +137,10 @@ impl, const D: usize> CircuitBuilder { let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); self.connect_biguint(&sum_expected, &sum_actual); + let cmp = self.cmp_biguint(&sum.value, &modulus); + let one = self.one(); + self.connect(cmp.target, one); + sum } From 8a56af93c2e921f3fad3028230cc38dc48801d54 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Mon, 31 Jan 2022 10:20:05 -0800 Subject: [PATCH 081/143] TODOs --- plonky2/src/gadgets/nonnative.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 8ce5933c..637eaccc 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -85,6 +85,8 @@ impl, const D: usize> CircuitBuilder { let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); self.connect_biguint(&sum_expected, &sum_actual); + // Range-check result. + // TODO: can potentially leave unreduced until necessary (e.g. when connecting values). let cmp = self.cmp_biguint(&sum.value, &modulus); let one = self.one(); self.connect(cmp.target, one); @@ -137,6 +139,8 @@ impl, const D: usize> CircuitBuilder { let sum_actual = self.add_biguint(&sum.value, &mod_times_overflow); self.connect_biguint(&sum_expected, &sum_actual); + // Range-check result. + // TODO: can potentially leave unreduced until necessary (e.g. when connecting values). let cmp = self.cmp_biguint(&sum.value, &modulus); let one = self.one(); self.connect(cmp.target, one); From a471574f785ef386f81e90a39f590a73f858eec7 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Mon, 31 Jan 2022 10:38:02 -0800 Subject: [PATCH 082/143] fix --- plonky2/src/plonk/circuit_builder.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 33c88950..58fff814 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -16,6 +16,7 @@ use crate::gadgets::arithmetic::BaseArithmeticOperation; use crate::gadgets::arithmetic_extension::ExtensionArithmeticOperation; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; +use crate::gates::add_many_u32::U32AddManyGate; use crate::gates::arithmetic_base::ArithmeticGate; use crate::gates::arithmetic_extension::ArithmeticExtensionGate; use crate::gates::arithmetic_u32::U32ArithmeticGate; From 5b5084b180bbf1991c80d239f77393574181c411 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Mon, 31 Jan 2022 10:49:06 -0800 Subject: [PATCH 083/143] clippy --- plonky2/src/gadgets/arithmetic_u32.rs | 6 +++--- plonky2/src/gadgets/curve.rs | 4 ++-- plonky2/src/gadgets/nonnative.rs | 14 ++++++-------- plonky2/src/gates/range_check_u32.rs | 2 +- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/plonky2/src/gadgets/arithmetic_u32.rs b/plonky2/src/gadgets/arithmetic_u32.rs index f57c1db5..8a0d5784 100644 --- a/plonky2/src/gadgets/arithmetic_u32.rs +++ b/plonky2/src/gadgets/arithmetic_u32.rs @@ -214,13 +214,13 @@ impl, const D: usize> SimpleGenerator } fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { - let x = witness.get_target(self.x.clone()); + let x = witness.get_target(self.x); let x_u64 = x.to_canonical_u64(); let low = x_u64 as u32; let high = (x_u64 >> 32) as u32; - out_buffer.set_u32_target(self.low.clone(), low); - out_buffer.set_u32_target(self.high.clone(), high); + out_buffer.set_u32_target(self.low, low); + out_buffer.set_u32_target(self.high, high); } } diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index 92f45242..2ff22319 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -110,9 +110,9 @@ impl, const D: usize> CircuitBuilder { let s_squared = self.mul_nonnative(&s, &s); let x_sum = self.add_nonnative(x2, x1); let x3 = self.sub_nonnative(&s_squared, &x_sum); - let x_diff = self.sub_nonnative(&x1, &x3); + let x_diff = self.sub_nonnative(x1, &x3); let prod = self.mul_nonnative(&s, &x_diff); - let y3 = self.sub_nonnative(&prod, &y1); + let y3 = self.sub_nonnative(&prod, y1); AffinePointTarget { x: x3, y: y3 } } diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 637eaccc..245b0403 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -74,7 +74,7 @@ impl, const D: usize> CircuitBuilder { a: a.clone(), b: b.clone(), sum: sum.clone(), - overflow: overflow.clone(), + overflow, _phantom: PhantomData, }); @@ -120,7 +120,7 @@ impl, const D: usize> CircuitBuilder { self.add_simple_generator(NonNativeMultipleAddsGenerator:: { summands: summands.clone(), sum: sum.clone(), - overflow: overflow.clone(), + overflow, _phantom: PhantomData, }); @@ -161,7 +161,7 @@ impl, const D: usize> CircuitBuilder { a: a.clone(), b: b.clone(), diff: diff.clone(), - overflow: overflow.clone(), + overflow, _phantom: PhantomData, }); @@ -250,11 +250,10 @@ impl, const D: usize> CircuitBuilder { let expected_product = self.add_biguint(&mod_times_div, &one); self.connect_biguint(&product, &expected_product); - let inv = NonNativeTarget:: { + NonNativeTarget:: { value: inv_biguint, _phantom: PhantomData, - }; - inv + } } /// Returns `x % |FF|` as a `NonNativeTarget`. @@ -362,8 +361,7 @@ impl, const D: usize, FF: Field> SimpleGenerator fn dependencies(&self) -> Vec { self.summands .iter() - .map(|summand| summand.value.limbs.iter().map(|limb| limb.0)) - .flatten() + .flat_map(|summand| summand.value.limbs.iter().map(|limb| limb.0)) .collect() } diff --git a/plonky2/src/gates/range_check_u32.rs b/plonky2/src/gates/range_check_u32.rs index 0e73990d..79e91de8 100644 --- a/plonky2/src/gates/range_check_u32.rs +++ b/plonky2/src/gates/range_check_u32.rs @@ -141,7 +141,7 @@ impl, const D: usize> Gate for U32RangeCheckG _local_constants: &[F], ) -> Vec>> { let gen = U32RangeCheckGenerator { - gate: self.clone(), + gate: *self, gate_index, }; vec![Box::new(gen.adapter())] From 11a1e52c38a140a1aed9c6a9f4684a056d97be60 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Mon, 31 Jan 2022 11:17:00 -0800 Subject: [PATCH 084/143] ignore ecdsa circuit test --- plonky2/src/gadgets/ecdsa.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index 2acd1c80..eba04d85 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -60,6 +60,7 @@ mod tests { use crate::plonk::verifier::verify; #[test] + #[ignore] fn test_ecdsa_circuit() -> Result<()> { const D: usize = 2; type C = PoseidonGoldilocksConfig; From b0afb581f1d7f631f0c7469e8f7fefff855daba0 Mon Sep 17 00:00:00 2001 From: Nicholas Ward Date: Mon, 31 Jan 2022 11:23:40 -0800 Subject: [PATCH 085/143] fixed clippy warnings from CI --- plonky2/src/gates/gate.rs | 2 +- plonky2/src/plonk/circuit_builder.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plonky2/src/gates/gate.rs b/plonky2/src/gates/gate.rs index 9ae2a271..9d21c273 100644 --- a/plonky2/src/gates/gate.rs +++ b/plonky2/src/gates/gate.rs @@ -113,7 +113,7 @@ pub trait Gate, const D: usize>: 'static + Send + S builder: &mut CircuitBuilder, mut vars: EvaluationTargets, prefix: &[bool], - combined_gate_constraints: &mut Vec>, + combined_gate_constraints: &mut [ExtensionTarget], ) { let filter = compute_filter_recursively(builder, prefix, vars.local_constants); vars.remove_prefix(prefix); diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 58fff814..ad216d69 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -661,14 +661,14 @@ impl, const D: usize> CircuitBuilder { let subgroup = F::two_adic_subgroup(degree_bits); let constant_vecs = timed!( - &mut timing, + timing, "generate constant polynomials", self.constant_polys(&prefixed_gates, num_constants) ); let k_is = get_unique_coset_shifts(degree, self.config.num_routed_wires); let (sigma_vecs, forest) = timed!( - &mut timing, + timing, "generate sigma polynomials", self.sigma_vecs(&k_is, &subgroup) ); From b0738c2094bed3b345b7f9cc109056746f526e83 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 31 Jan 2022 21:05:23 +0100 Subject: [PATCH 086/143] Fix degree issue --- starky/src/fibonacci_stark.rs | 2 +- starky/src/get_challenges.rs | 1 + starky/src/verifier.rs | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index ffaa14a7..06e366dc 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -120,6 +120,6 @@ mod tests { &mut TimingTree::default(), )?; - verify(stark, proof, &config, num_rows) + verify(stark, proof, &config, 5) } } diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index d6a9b562..1498f764 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -24,6 +24,7 @@ fn get_challenges, C: GenericConfig, cons let num_challenges = config.num_challenges; let num_fri_queries = config.fri_config.num_query_rounds; let lde_size = 1 << (degree_bits + config.fri_config.rate_bits); + dbg!(lde_size); let mut challenger = Challenger::::new(); diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 63e063af..ea9945ff 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -31,6 +31,7 @@ where [(); S::PUBLIC_INPUTS]:, { let challenges = proof_with_pis.get_challenges(config, degree_bits)?; + dbg!(&challenges.fri_challenges.fri_query_indices); verify_with_challenges(stark, proof_with_pis, challenges, config) } From f2369f4fae0dbc4b334067780d014778a3820ad1 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Tue, 1 Feb 2022 10:48:53 +0100 Subject: [PATCH 087/143] Test pass --- plonky2/src/hash/merkle_proofs.rs | 1 - starky/src/fibonacci_stark.rs | 2 +- starky/src/get_challenges.rs | 1 - starky/src/prover.rs | 2 +- starky/src/stark.rs | 3 ++- starky/src/verifier.rs | 2 +- 6 files changed, 5 insertions(+), 6 deletions(-) diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index feb39791..c2f3655d 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -31,7 +31,6 @@ pub(crate) fn verify_merkle_proof>( merkle_cap: &MerkleCap, proof: &MerkleProof, ) -> Result<()> { - dbg!(leaf_index); let mut index = leaf_index; let mut current_digest = H::hash(&leaf_data, false); for &sibling_digest in proof.siblings.iter() { diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index 06e366dc..488f1d46 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -59,7 +59,7 @@ impl, const D: usize> Stark for FibonacciStar FE: FieldExtension, P: PackedField, { - // // Check public inputs. + // Check public inputs. // yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); // yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); // yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index 1498f764..d6a9b562 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -24,7 +24,6 @@ fn get_challenges, C: GenericConfig, cons let num_challenges = config.num_challenges; let num_fri_queries = config.fri_config.num_query_rounds; let lde_size = 1 << (degree_bits + config.fri_config.rate_bits); - dbg!(lde_size); let mut challenger = Challenger::::new(); diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 6a22e671..e9c31d98 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -124,7 +124,7 @@ where timing, "compute openings proof", PolynomialBatch::prove_openings( - &S::fri_instance(zeta, g, rate_bits), + &S::fri_instance(zeta, g, rate_bits, config.num_challenges), initial_merkle_trees, &mut challenger, &fri_params, diff --git a/starky/src/stark.rs b/starky/src/stark.rs index f91d4fdd..3b8c795a 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -67,10 +67,11 @@ pub trait Stark, const D: usize>: Sync { zeta: F::Extension, g: F::Extension, rate_bits: usize, + num_challenges: usize, ) -> FriInstanceInfo { let no_blinding_oracle = FriOracleInfo { blinding: false }; let trace_info = FriPolynomialInfo::from_range(0, 0..Self::COLUMNS); - let quotient_info = FriPolynomialInfo::from_range(1, 0..1 << rate_bits); + let quotient_info = FriPolynomialInfo::from_range(1, 0..(1 << rate_bits) * num_challenges); let zeta_batch = FriBatchInfo { point: zeta, polynomials: [trace_info.clone(), quotient_info].concat(), diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index ea9945ff..f01c72c5 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -31,7 +31,6 @@ where [(); S::PUBLIC_INPUTS]:, { let challenges = proof_with_pis.get_challenges(config, degree_bits)?; - dbg!(&challenges.fri_challenges.fri_query_indices); verify_with_challenges(stark, proof_with_pis, challenges, config) } @@ -115,6 +114,7 @@ where challenges.stark_zeta, F::primitive_root_of_unity(degree_bits).into(), config.fri_config.rate_bits, + config.num_challenges, ), &proof.openings.to_fri_openings(), &challenges.fri_challenges, From 9f8696ada55e8b87dbdef8f58a0e6c5183d5166f Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Tue, 1 Feb 2022 13:57:03 +0100 Subject: [PATCH 088/143] Fix bug --- starky/src/verifier.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index f01c72c5..5317cb5c 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -130,7 +130,7 @@ where fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F, F) { let n = 1 << log_n; let g = F::primitive_root_of_unity(log_n); - let z_x = x.exp_power_of_2(log_n); + let z_x = x.exp_power_of_2(log_n) - F::ONE; let invs = F::batch_multiplicative_inverse(&[ F::from_canonical_usize(n) * (x - F::ONE), F::from_canonical_usize(n) * (g * x - F::ONE), From 984f44b2817c1cacb513bcf35d2e883613d87625 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Tue, 1 Feb 2022 14:41:27 +0100 Subject: [PATCH 089/143] Fix lde -> coset_lde bug --- field/src/polynomial/mod.rs | 5 +++++ starky/src/fibonacci_stark.rs | 6 +++--- starky/src/prover.rs | 5 +++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/field/src/polynomial/mod.rs b/field/src/polynomial/mod.rs index 4264c914..d36757b3 100644 --- a/field/src/polynomial/mod.rs +++ b/field/src/polynomial/mod.rs @@ -57,6 +57,11 @@ impl PolynomialValues { fft_with_options(coeffs, Some(rate_bits), None) } + pub fn coset_lde(self, rate_bits: usize) -> Self { + let coeffs = ifft(self).lde(rate_bits); + coeffs.coset_fft_with_options(F::coset_shift(), Some(rate_bits), None) + } + pub fn degree(&self) -> usize { self.degree_plus_one() .checked_sub(1) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index 488f1d46..1d760455 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -60,9 +60,9 @@ impl, const D: usize> Stark for FibonacciStar P: PackedField, { // Check public inputs. - // yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); - // yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); - // yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); + yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); + yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); + yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); // x0 <- x1 yield_constr.one(vars.next_values[0] - vars.local_values[1]); diff --git a/starky/src/prover.rs b/starky/src/prover.rs index e9c31d98..352b03f7 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -1,6 +1,7 @@ use anyhow::{ensure, Result}; use itertools::Itertools; use plonky2::field::extension_field::Extendable; +use plonky2::field::extension_field::FieldExtension; use plonky2::field::field_types::Field; use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; @@ -169,13 +170,13 @@ where let lagrange_first = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[0] = F::ONE; - evals.lde(rate_bits) + evals.coset_lde(rate_bits) }; // Evaluation of the last Lagrange polynomial on the LDE domain. let lagrange_last = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[degree - 1] = F::ONE; - evals.lde(rate_bits) + evals.coset_lde(rate_bits) }; let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, rate_bits); From 8ab4f855b26715d02200588d60f3425390479d31 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Tue, 1 Feb 2022 17:02:11 +0100 Subject: [PATCH 090/143] Add `fri_challenges()` to Challenger. --- field/src/field_types.rs | 1 - plonky2/src/fri/proof.rs | 15 +++++++- plonky2/src/fri/verifier.rs | 3 +- plonky2/src/iop/challenger.rs | 56 ++++++++++++++++++++++++++++- plonky2/src/plonk/get_challenges.rs | 52 ++++++--------------------- plonky2/src/plonk/plonk_common.rs | 1 - plonky2/src/plonk/proof.rs | 15 +------- starky/src/get_challenges.rs | 46 +++++------------------- starky/src/proof.rs | 4 +-- starky/src/prover.rs | 1 - starky/src/verifier.rs | 2 -- 11 files changed, 90 insertions(+), 106 deletions(-) diff --git a/field/src/field_types.rs b/field/src/field_types.rs index 845d8e83..0d7b314f 100644 --- a/field/src/field_types.rs +++ b/field/src/field_types.rs @@ -389,7 +389,6 @@ pub trait Field: /// Representative `g` of the coset used in FRI, so that LDEs in FRI are done over `gH`. fn coset_shift() -> Self { Self::MULTIPLICATIVE_GROUP_GENERATOR - // Self::ONE } /// Equivalent to *self + x * y, but may be cheaper. diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index bca7b8db..1f9e6b16 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -16,7 +16,7 @@ use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::plonk_common::salt_size; -use crate::plonk::proof::{FriChallenges, FriInferredElements, ProofChallenges}; +use crate::plonk::proof::{FriInferredElements, ProofChallenges}; /// Evaluations and Merkle proof produced by the prover in a FRI query step. #[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)] @@ -362,3 +362,16 @@ impl, H: Hasher, const D: usize> CompressedFriPr } } } + +pub struct FriChallenges, const D: usize> { + // Scaling factor to combine polynomials. + pub fri_alpha: F::Extension, + + // Betas used in the FRI commit phase reductions. + pub fri_betas: Vec, + + pub fri_pow_response: F, + + // Indices at which the oracle is queried in FRI. + pub fri_query_indices: Vec, +} diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 3e70c025..47f10b8a 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -4,14 +4,13 @@ use plonky2_field::field_types::Field; use plonky2_field::interpolation::{barycentric_weights, interpolate}; use plonky2_util::{log2_strict, reverse_index_bits_in_place}; -use crate::fri::proof::{FriInitialTreeProof, FriProof, FriQueryRound}; +use crate::fri::proof::{FriChallenges, FriInitialTreeProof, FriProof, FriQueryRound}; use crate::fri::structure::{FriBatchInfo, FriInstanceInfo, FriOpenings}; use crate::fri::{FriConfig, FriParams}; use crate::hash::hash_types::RichField; use crate::hash::merkle_proofs::verify_merkle_proof; use crate::hash::merkle_tree::MerkleCap; use crate::plonk::config::{GenericConfig, Hasher}; -use crate::plonk::proof::{FriChallenges, OpeningSet, ProofChallenges}; use crate::util::reducing::ReducingFactor; use crate::util::reverse_bits; diff --git a/plonky2/src/iop/challenger.rs b/plonky2/src/iop/challenger.rs index b8ca4fb7..d7583646 100644 --- a/plonky2/src/iop/challenger.rs +++ b/plonky2/src/iop/challenger.rs @@ -2,7 +2,10 @@ use std::convert::TryInto; use std::marker::PhantomData; use plonky2_field::extension_field::{Extendable, FieldExtension}; +use plonky2_field::polynomial::PolynomialCoeffs; +use crate::fri::proof::FriChallenges; +use crate::fri::FriConfig; use crate::hash::hash_types::RichField; use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget}; use crate::hash::hashing::{PlonkyPermutation, SPONGE_RATE, SPONGE_WIDTH}; @@ -10,7 +13,7 @@ use crate::hash::merkle_tree::MerkleCap; use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::config::{AlgebraicHasher, GenericHashOut, Hasher}; +use crate::plonk::config::{AlgebraicHasher, GenericConfig, GenericHashOut, Hasher}; use crate::plonk::proof::{OpeningSet, OpeningSetTarget}; /// Observes prover messages, and generates challenges by hashing the transcript, a la Fiat-Shamir. @@ -152,6 +155,57 @@ impl> Challenger { .collect() } + pub fn fri_challenges, const D: usize>( + &mut self, + commit_phase_merkle_caps: &[MerkleCap], + final_poly: &PolynomialCoeffs, + pow_witness: F, + degree_bits: usize, + config: &FriConfig, + ) -> FriChallenges + where + F: RichField + Extendable, + { + let num_fri_queries = config.num_query_rounds; + let lde_size = 1 << (degree_bits + config.rate_bits); + // Scaling factor to combine polynomials. + let fri_alpha = self.get_extension_challenge::(); + + // Recover the random betas used in the FRI reductions. + let fri_betas = commit_phase_merkle_caps + .iter() + .map(|cap| { + self.observe_cap(cap); + self.get_extension_challenge::() + }) + .collect(); + + self.observe_extension_elements(&final_poly.coeffs); + + let fri_pow_response = C::InnerHasher::hash( + &self + .get_hash() + .elements + .iter() + .copied() + .chain(Some(pow_witness)) + .collect::>(), + false, + ) + .elements[0]; + + let fri_query_indices = (0..num_fri_queries) + .map(|_| self.get_challenge().to_canonical_u64() as usize % lde_size) + .collect(); + + FriChallenges { + fri_alpha, + fri_betas, + fri_pow_response, + fri_query_indices, + } + } + /// Absorb any buffered inputs. After calling this, the input buffer will be empty. fn absorb_buffered_inputs(&mut self) { if self.input_buffer.is_empty() { diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index 440705ce..3167fef7 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -1,10 +1,9 @@ use std::collections::HashSet; -use itertools::Itertools; use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::PolynomialCoeffs; -use crate::fri::proof::{CompressedFriProof, FriProof}; +use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof}; use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedOpenings}; use crate::hash::hash_types::RichField; use crate::hash::merkle_tree::MerkleCap; @@ -12,8 +11,8 @@ use crate::iop::challenger::Challenger; use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::proof::{ - CompressedProof, CompressedProofWithPublicInputs, FriChallenges, FriInferredElements, - OpeningSet, Proof, ProofChallenges, ProofWithPublicInputs, + CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet, Proof, + ProofChallenges, ProofWithPublicInputs, }; use crate::util::reverse_bits; @@ -30,8 +29,6 @@ fn get_challenges, C: GenericConfig, cons ) -> anyhow::Result> { let config = &common_data.config; let num_challenges = config.num_challenges; - let num_fri_queries = config.fri_config.num_query_rounds; - let lde_size = common_data.lde_size(); let mut challenger = Challenger::::new(); @@ -51,47 +48,18 @@ fn get_challenges, C: GenericConfig, cons challenger.observe_opening_set(openings); - // Scaling factor to combine polynomials. - let fri_alpha = challenger.get_extension_challenge::(); - - // Recover the random betas used in the FRI reductions. - let fri_betas = commit_phase_merkle_caps - .iter() - .map(|cap| { - challenger.observe_cap(cap); - challenger.get_extension_challenge::() - }) - .collect(); - - challenger.observe_extension_elements(&final_poly.coeffs); - - let fri_pow_response = C::InnerHasher::hash( - &challenger - .get_hash() - .elements - .iter() - .copied() - .chain(Some(pow_witness)) - .collect_vec(), - false, - ) - .elements[0]; - - let fri_query_indices = (0..num_fri_queries) - .map(|_| challenger.get_challenge().to_canonical_u64() as usize % lde_size) - .collect(); - Ok(ProofChallenges { plonk_betas, plonk_gammas, plonk_alphas, plonk_zeta, - fri_challenges: FriChallenges { - fri_alpha, - fri_betas, - fri_pow_response, - fri_query_indices, - }, + fri_challenges: challenger.fri_challenges::( + commit_phase_merkle_caps, + final_poly, + pow_witness, + common_data.degree_bits, + &config.fri_config, + ), }) } diff --git a/plonky2/src/plonk/plonk_common.rs b/plonky2/src/plonk/plonk_common.rs index 94279d12..519593b3 100644 --- a/plonky2/src/plonk/plonk_common.rs +++ b/plonky2/src/plonk/plonk_common.rs @@ -1,7 +1,6 @@ use plonky2_field::extension_field::Extendable; use plonky2_field::field_types::Field; use plonky2_field::packed_field::PackedField; -use plonky2_util::log2_strict; use crate::fri::oracle::SALT_SIZE; use crate::fri::structure::FriOracleInfo; diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 401b9f52..9d9eaaff 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -3,7 +3,7 @@ use rayon::prelude::*; use serde::{Deserialize, Serialize}; use crate::fri::oracle::PolynomialBatch; -use crate::fri::proof::{CompressedFriProof, FriProof, FriProofTarget}; +use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof, FriProofTarget}; use crate::fri::structure::{ FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget, }; @@ -242,19 +242,6 @@ pub(crate) struct ProofChallenges, const D: usize> pub fri_challenges: FriChallenges, } -pub struct FriChallenges, const D: usize> { - // Scaling factor to combine polynomials. - pub fri_alpha: F::Extension, - - // Betas used in the FRI commit phase reductions. - pub fri_betas: Vec, - - pub fri_pow_response: F, - - // Indices at which the oracle is queried in FRI. - pub fri_query_indices: Vec, -} - /// Coset elements that can be inferred in the FRI reduction steps. pub(crate) struct FriInferredElements, const D: usize>( pub Vec, diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index d6a9b562..9d9b808e 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -5,8 +5,7 @@ use plonky2::fri::proof::FriProof; use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; use plonky2::iop::challenger::Challenger; -use plonky2::plonk::config::{GenericConfig, Hasher}; -use plonky2::plonk::proof::FriChallenges; +use plonky2::plonk::config::GenericConfig; use crate::config::StarkConfig; use crate::proof::{StarkOpeningSet, StarkProof, StarkProofChallenges, StarkProofWithPublicInputs}; @@ -35,45 +34,16 @@ fn get_challenges, C: GenericConfig, cons openings.observe(&mut challenger); - // Scaling factor to combine polynomials. - let fri_alpha = challenger.get_extension_challenge::(); - - // Recover the random betas used in the FRI reductions. - let fri_betas = commit_phase_merkle_caps - .iter() - .map(|cap| { - challenger.observe_cap(cap); - challenger.get_extension_challenge::() - }) - .collect(); - - challenger.observe_extension_elements(&final_poly.coeffs); - - let fri_pow_response = C::InnerHasher::hash( - &challenger - .get_hash() - .elements - .iter() - .copied() - .chain(Some(pow_witness)) - .collect::>(), - false, - ) - .elements[0]; - - let fri_query_indices = (0..num_fri_queries) - .map(|_| challenger.get_challenge().to_canonical_u64() as usize % lde_size) - .collect(); - Ok(StarkProofChallenges { stark_alphas, stark_zeta, - fri_challenges: FriChallenges { - fri_alpha, - fri_betas, - fri_pow_response, - fri_query_indices, - }, + fri_challenges: challenger.fri_challenges::( + commit_phase_merkle_caps, + final_poly, + pow_witness, + degree_bits, + &config.fri_config, + ), }) } diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 2d9597d0..c2d2ac67 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -1,13 +1,11 @@ use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; use plonky2::fri::oracle::PolynomialBatch; -use plonky2::fri::proof::{CompressedFriProof, FriProof}; +use plonky2::fri::proof::{CompressedFriProof, FriChallenges, FriProof}; use plonky2::fri::structure::{FriOpeningBatch, FriOpenings}; use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; use plonky2::iop::challenger::Challenger; use plonky2::plonk::config::{GenericConfig, Hasher}; -use plonky2::plonk::proof::FriChallenges; use rayon::prelude::*; pub struct StarkProof, C: GenericConfig, const D: usize> { diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 352b03f7..1c5310e4 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -1,7 +1,6 @@ use anyhow::{ensure, Result}; use itertools::Itertools; use plonky2::field::extension_field::Extendable; -use plonky2::field::extension_field::FieldExtension; use plonky2::field::field_types::Field; use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 5317cb5c..298e4797 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -3,10 +3,8 @@ use plonky2::field::extension_field::{Extendable, FieldExtension}; use plonky2::field::field_types::Field; use plonky2::fri::verifier::verify_fri_proof; use plonky2::hash::hash_types::RichField; -use plonky2::plonk::circuit_data::CommonCircuitData; use plonky2::plonk::config::GenericConfig; use plonky2::plonk::plonk_common::reduce_with_powers; -use plonky2::plonk::proof::ProofWithPublicInputs; use plonky2_util::log2_strict; use crate::config::StarkConfig; From 1e04f4f5a437345340110d6107671910100d220d Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Tue, 1 Feb 2022 17:34:03 +0100 Subject: [PATCH 091/143] Comments --- plonky2/src/fri/verifier.rs | 2 +- starky/src/fibonacci_stark.rs | 2 +- starky/src/get_challenges.rs | 4 +++- starky/src/proof.rs | 2 +- starky/src/prover.rs | 4 +++- starky/src/verifier.rs | 22 +++++++++++----------- system_zero/src/system_zero.rs | 6 ++++-- 7 files changed, 24 insertions(+), 18 deletions(-) diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 47f10b8a..49cfa053 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -86,7 +86,7 @@ pub fn verify_fri_proof< ); let precomputed_reduced_evals = - PrecomputedReducedOpenings::from_os_and_alpha(&openings, challenges.fri_alpha); + PrecomputedReducedOpenings::from_os_and_alpha(openings, challenges.fri_alpha); for (&x_index, round_proof) in challenges .fri_query_indices .iter() diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index 1d760455..dc6d676a 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -120,6 +120,6 @@ mod tests { &mut TimingTree::default(), )?; - verify(stark, proof, &config, 5) + verify(stark, proof, &config) } } diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index 9d9b808e..79e1c032 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -10,6 +10,7 @@ use plonky2::plonk::config::GenericConfig; use crate::config::StarkConfig; use crate::proof::{StarkOpeningSet, StarkProof, StarkProofChallenges, StarkProofWithPublicInputs}; +#[allow(clippy::too_many_arguments)] fn get_challenges, C: GenericConfig, const D: usize>( trace_cap: &MerkleCap, quotient_polys_cap: &MerkleCap, @@ -61,7 +62,7 @@ impl, C: GenericConfig, const D: usize> .fri_query_indices) } - /// Computes all Fiat-Shamir challenges used in the Plonk proof. + /// Computes all Fiat-Shamir challenges used in the STARK proof. pub(crate) fn get_challenges( &self, config: &StarkConfig, @@ -93,6 +94,7 @@ impl, C: GenericConfig, const D: usize> } } +// TODO: Deal with the compressed stuff. // impl, C: GenericConfig, const D: usize> // CompressedProofWithPublicInputs // { diff --git a/starky/src/proof.rs b/starky/src/proof.rs index c2d2ac67..50ef21bc 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -25,7 +25,7 @@ pub struct StarkProofWithPublicInputs< const D: usize, > { pub proof: StarkProof, - // TODO: Maybe make it generic over a `S: Start` and replace with `[F; S::PUBLIC_INPUTS]`. + // TODO: Maybe make it generic over a `S: Stark` and replace with `[F; S::PUBLIC_INPUTS]`. pub public_inputs: Vec, } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 1c5310e4..d6543dae 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -20,7 +20,6 @@ use crate::proof::{StarkOpeningSet, StarkProof, StarkProofWithPublicInputs}; use crate::stark::Stark; use crate::vars::StarkEvaluationVars; -// TODO: Deal with public inputs. pub fn prove( stark: S, config: &StarkConfig, @@ -184,6 +183,7 @@ where let get_at_index = |comm: &PolynomialBatch, i: usize| -> [F; S::COLUMNS] { comm.get_lde_values(i).try_into().unwrap() }; + // Last element of the subgroup. let last = F::primitive_root_of_unity(degree_bits).inverse(); let coset = F::cyclic_subgroup_coset_known_order( F::primitive_root_of_unity(degree_bits + rate_bits), @@ -211,6 +211,8 @@ where stark.eval_packed_base(vars, &mut consumer); // TODO: Fix this once we use a genuine `PackedField`. let mut constraints_evals = consumer.accumulators(); + // We divide the constraints evaluations by `Z_H(x) / x - last`, i.e., the vanishing + // polynomial of `H` without it's last element. let denominator_inv = z_h_on_coset.eval_inverse(i); let z_last = coset[i] - last; for eval in &mut constraints_evals { diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 298e4797..6a4464e1 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -13,7 +13,7 @@ use crate::proof::{StarkOpeningSet, StarkProof, StarkProofChallenges, StarkProof use crate::stark::Stark; use crate::vars::StarkEvaluationVars; -pub(crate) fn verify< +pub fn verify< F: RichField + Extendable, C: GenericConfig, S: Stark, @@ -22,14 +22,14 @@ pub(crate) fn verify< stark: S, proof_with_pis: StarkProofWithPublicInputs, config: &StarkConfig, - degree_bits: usize, ) -> Result<()> where [(); S::COLUMNS]:, [(); S::PUBLIC_INPUTS]:, { + let degree_bits = log2_strict(recover_degree(&proof_with_pis.proof, config)); let challenges = proof_with_pis.get_challenges(config, degree_bits)?; - verify_with_challenges(stark, proof_with_pis, challenges, config) + verify_with_challenges(stark, proof_with_pis, challenges, degree_bits, config) } pub(crate) fn verify_with_challenges< @@ -41,6 +41,7 @@ pub(crate) fn verify_with_challenges< stark: S, proof_with_pis: StarkProofWithPublicInputs, challenges: StarkProofChallenges, + degree_bits: usize, config: &StarkConfig, ) -> Result<()> where @@ -51,9 +52,6 @@ where proof, public_inputs, } = proof_with_pis; - let degree = recover_degree(&proof, config); - let degree_bits = log2_strict(degree); - let local_values = &proof.openings.local_values; let next_values = &proof.openings.local_values; let StarkOpeningSet { @@ -80,17 +78,16 @@ where .iter() .map(|&alpha| F::Extension::from_basefield(alpha)) .collect::>(), - l_1.into(), - l_last.into(), + l_1, + l_last, ); stark.eval_ext(vars, &mut consumer); let acc = consumer.accumulators(); - // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. + // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x) / (x - last)`, at zeta. let quotient_polys_zeta = &proof.openings.quotient_polys; let zeta_pow_deg = challenges.stark_zeta.exp_power_of_2(degree_bits); let z_h_zeta = zeta_pow_deg - F::Extension::ONE; - let g = F::primitive_root_of_unity(degree_bits + config.fri_config.rate_bits); let last = F::primitive_root_of_unity(degree_bits).inverse(); let z_last = challenges.stark_zeta - last.into(); // `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations. @@ -124,7 +121,9 @@ where Ok(()) } -/// Evaluate the Lagrange basis `L_1` and `L_n` at a point `x`. +/// Evaluate the Lagrange polynomials `L_1` and `L_n` at a point `x`. +/// `L_1(x) = (x^n - 1)/(n * (x - 1))` +/// `L_n(x) = (x^n - 1)/(n * (g * x - 1))`, with `g` the first element of the subgroup. fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F, F) { let n = 1 << log_n; let g = F::primitive_root_of_unity(log_n); @@ -137,6 +136,7 @@ fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F, F) { (z_x * invs[0], z_x * invs[1]) } +/// Recover the length of the trace from a STARK proof and a STARK config. fn recover_degree, C: GenericConfig, const D: usize>( proof: &StarkProof, config: &StarkConfig, diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 38326b68..31b8434f 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -16,6 +16,7 @@ use crate::public_input_layout::NUM_PUBLIC_INPUTS; /// We require at least 2^16 rows as it helps support efficient 16-bit range checks. const MIN_TRACE_ROWS: usize = 1 << 16; +#[derive(Copy, Clone)] pub struct SystemZero, const D: usize> { _phantom: PhantomData, } @@ -92,6 +93,7 @@ mod tests { use starky::config::StarkConfig; use starky::prover::prove; use starky::stark::Stark; + use starky::verifier::verify; use crate::system_zero::SystemZero; @@ -108,8 +110,8 @@ mod tests { let config = StarkConfig::standard_fast_config(); let mut timing = TimingTree::new("prove", Level::Debug); let trace = system.generate_trace(); - prove::(system, &config, trace, public_inputs, &mut timing)?; + let proof = prove::(system, &config, trace, public_inputs, &mut timing)?; - Ok(()) + verify(system, proof, &config) } } From 8e07058ad2456c53b6ff3f104f221168b59d0c6c Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Tue, 1 Feb 2022 09:00:22 -0800 Subject: [PATCH 092/143] Remove `inner_config` param - redundant with `inner_common_data` (#453) * Remove inner_config param - redundant with inner_common_data * import * imports --- plonky2/src/plonk/recursive_verifier.rs | 59 +++++++------------------ 1 file changed, 15 insertions(+), 44 deletions(-) diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index fccaea5c..3e4260a9 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -3,7 +3,7 @@ use plonky2_field::extension_field::Extendable; use crate::hash::hash_types::{HashOutTarget, RichField}; use crate::iop::challenger::RecursiveChallenger; use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierCircuitTarget}; +use crate::plonk::circuit_data::{CommonCircuitData, VerifierCircuitTarget}; use crate::plonk::config::{AlgebraicHasher, GenericConfig}; use crate::plonk::proof::{OpeningSetTarget, ProofTarget, ProofWithPublicInputsTarget}; use crate::plonk::vanishing_poly::eval_vanishing_poly_recursively; @@ -16,7 +16,6 @@ impl, const D: usize> CircuitBuilder { pub fn verify_proof_with_pis>( &mut self, proof_with_pis: ProofWithPublicInputsTarget, - inner_config: &CircuitConfig, inner_verifier_data: &VerifierCircuitTarget, inner_common_data: &CommonCircuitData, ) where @@ -33,7 +32,6 @@ impl, const D: usize> CircuitBuilder { self.verify_proof( proof, public_inputs_hash, - inner_config, inner_verifier_data, inner_common_data, ); @@ -44,7 +42,6 @@ impl, const D: usize> CircuitBuilder { &mut self, proof: ProofTarget, public_inputs_hash: HashOutTarget, - inner_config: &CircuitConfig, inner_verifier_data: &VerifierCircuitTarget, inner_common_data: &CommonCircuitData, ) where @@ -52,7 +49,7 @@ impl, const D: usize> CircuitBuilder { { let one = self.one_extension(); - let num_challenges = inner_config.num_challenges; + let num_challenges = inner_common_data.config.num_challenges; let mut challenger = RecursiveChallenger::::new(self); @@ -211,7 +208,7 @@ mod tests { use crate::fri::FriConfig; use crate::gates::noop::NoopGate; use crate::iop::witness::{PartialWitness, Witness}; - use crate::plonk::circuit_data::VerifierOnlyCircuitData; + use crate::plonk::circuit_data::{CircuitConfig, VerifierOnlyCircuitData}; use crate::plonk::config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig}; use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs}; use crate::plonk::prover::prove; @@ -228,7 +225,7 @@ mod tests { let (proof, vd, cd) = dummy_proof::(&config, 4_000)?; let (proof, _vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, true, true)?; + recursive_proof::(proof, vd, cd, &config, None, true, true)?; test_serialization(&proof, &cd)?; Ok(()) @@ -250,12 +247,12 @@ mod tests { // Shrink it to 2^13. let (proof, vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, Some(13), false, false)?; + recursive_proof::(proof, vd, cd, &config, Some(13), false, false)?; assert_eq!(cd.degree_bits, 13); // Shrink it to 2^12. let (proof, _vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, true, true)?; + recursive_proof::(proof, vd, cd, &config, None, true, true)?; assert_eq!(cd.degree_bits, 12); test_serialization(&proof, &cd)?; @@ -281,16 +278,7 @@ mod tests { assert_eq!(cd.degree_bits, 12); // A standard recursive proof. - let (proof, vd, cd) = recursive_proof( - proof, - vd, - cd, - &standard_config, - &standard_config, - None, - false, - false, - )?; + let (proof, vd, cd) = recursive_proof(proof, vd, cd, &standard_config, None, false, false)?; assert_eq!(cd.degree_bits, 12); // A high-rate recursive proof, designed to be verifiable with fewer routed wires. @@ -303,16 +291,8 @@ mod tests { }, ..standard_config }; - let (proof, vd, cd) = recursive_proof::( - proof, - vd, - cd, - &standard_config, - &high_rate_config, - None, - true, - true, - )?; + let (proof, vd, cd) = + recursive_proof::(proof, vd, cd, &high_rate_config, None, true, true)?; assert_eq!(cd.degree_bits, 12); // A final proof, optimized for size. @@ -327,16 +307,8 @@ mod tests { }, ..high_rate_config }; - let (proof, _vd, cd) = recursive_proof::( - proof, - vd, - cd, - &high_rate_config, - &final_config, - None, - true, - true, - )?; + let (proof, _vd, cd) = + recursive_proof::(proof, vd, cd, &final_config, None, true, true)?; assert_eq!(cd.degree_bits, 12, "final proof too large"); test_serialization(&proof, &cd)?; @@ -357,11 +329,11 @@ mod tests { let (proof, vd, cd) = dummy_proof::(&config, 4_000)?; let (proof, vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; + recursive_proof::(proof, vd, cd, &config, None, false, false)?; test_serialization(&proof, &cd)?; let (proof, _vd, cd) = - recursive_proof::(proof, vd, cd, &config, &config, None, false, false)?; + recursive_proof::(proof, vd, cd, &config, None, false, false)?; test_serialization(&proof, &cd)?; Ok(()) @@ -398,7 +370,6 @@ mod tests { inner_proof: ProofWithPublicInputs, inner_vd: VerifierOnlyCircuitData, inner_cd: CommonCircuitData, - inner_config: &CircuitConfig, config: &CircuitConfig, min_degree_bits: Option, print_gate_counts: bool, @@ -417,14 +388,14 @@ mod tests { pw.set_proof_with_pis_target(&inner_proof, &pt); let inner_data = VerifierCircuitTarget { - constants_sigmas_cap: builder.add_virtual_cap(inner_config.fri_config.cap_height), + constants_sigmas_cap: builder.add_virtual_cap(inner_cd.config.fri_config.cap_height), }; pw.set_cap_target( &inner_data.constants_sigmas_cap, &inner_vd.constants_sigmas_cap, ); - builder.verify_proof_with_pis(pt, inner_config, &inner_data, &inner_cd); + builder.verify_proof_with_pis(pt, &inner_data, &inner_cd); if print_gate_counts { builder.print_gate_counts(0); From 43800ba23dca3cc3e0935cc0d21715fd129e6ccb Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Tue, 1 Feb 2022 22:40:19 -0800 Subject: [PATCH 093/143] Rename `PrimeField` -> `Field64` (#454) * Rename `PrimeField` -> `Field64` And add TODOs for moving around various methods which aren't well-defined in their current traits, or would be well-defined in a supertrait. * fix test * TODOs as per PR feedback --- .../src/arch/x86_64/avx2_goldilocks_field.rs | 4 +-- .../arch/x86_64/avx512_goldilocks_field.rs | 4 +-- field/src/field_types.rs | 28 +++++++++++++++---- field/src/goldilocks_field.rs | 4 +-- field/src/inversion.rs | 4 +-- field/src/prime_field_testing.rs | 8 +++--- plonky2/src/bin/generate_constants.rs | 2 +- plonky2/src/gadgets/arithmetic.rs | 4 +-- plonky2/src/gadgets/arithmetic_extension.rs | 4 +-- plonky2/src/gates/assert_le.rs | 6 ++-- plonky2/src/gates/base_sum.rs | 4 +-- plonky2/src/gates/comparison.rs | 6 ++-- plonky2/src/gates/subtraction_u32.rs | 2 +- .../arch/aarch64/poseidon_goldilocks_neon.rs | 2 +- plonky2/src/hash/hash_types.rs | 4 +-- plonky2/src/hash/poseidon.rs | 6 ++-- plonky2/src/hash/poseidon_goldilocks.rs | 2 +- plonky2/src/util/serialization.rs | 10 +++---- waksman/src/sorting.rs | 2 +- 19 files changed, 61 insertions(+), 45 deletions(-) diff --git a/field/src/arch/x86_64/avx2_goldilocks_field.rs b/field/src/arch/x86_64/avx2_goldilocks_field.rs index b9336cee..e185cb4c 100644 --- a/field/src/arch/x86_64/avx2_goldilocks_field.rs +++ b/field/src/arch/x86_64/avx2_goldilocks_field.rs @@ -5,7 +5,7 @@ use std::iter::{Product, Sum}; use std::mem::transmute; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}; -use crate::field_types::{Field, PrimeField}; +use crate::field_types::{Field, Field64}; use crate::goldilocks_field::GoldilocksField; use crate::ops::Square; use crate::packed_field::PackedField; @@ -510,7 +510,7 @@ unsafe fn interleave2(x: __m256i, y: __m256i) -> (__m256i, __m256i) { #[cfg(test)] mod tests { use crate::arch::x86_64::avx2_goldilocks_field::Avx2GoldilocksField; - use crate::field_types::PrimeField; + use crate::field_types::Field64; use crate::goldilocks_field::GoldilocksField; use crate::ops::Square; use crate::packed_field::PackedField; diff --git a/field/src/arch/x86_64/avx512_goldilocks_field.rs b/field/src/arch/x86_64/avx512_goldilocks_field.rs index ede87626..aaa05e93 100644 --- a/field/src/arch/x86_64/avx512_goldilocks_field.rs +++ b/field/src/arch/x86_64/avx512_goldilocks_field.rs @@ -5,7 +5,7 @@ use std::iter::{Product, Sum}; use std::mem::transmute; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign}; -use crate::field_types::{Field, PrimeField}; +use crate::field_types::{Field, Field64}; use crate::goldilocks_field::GoldilocksField; use crate::ops::Square; use crate::packed_field::PackedField; @@ -407,7 +407,7 @@ unsafe fn interleave4(x: __m512i, y: __m512i) -> (__m512i, __m512i) { #[cfg(test)] mod tests { use crate::arch::x86_64::avx512_goldilocks_field::Avx512GoldilocksField; - use crate::field_types::PrimeField; + use crate::field_types::Field64; use crate::goldilocks_field::GoldilocksField; use crate::ops::Square; use crate::packed_field::PackedField; diff --git a/field/src/field_types.rs b/field/src/field_types.rs index 0d7b314f..65d5bf21 100644 --- a/field/src/field_types.rs +++ b/field/src/field_types.rs @@ -264,17 +264,25 @@ pub trait Field: subgroup.into_iter().map(|x| x * shift).collect() } - // TODO: move these to a new `PrimeField` trait (for all prime fields, not just 64-bit ones) + // TODO: The current behavior for composite fields doesn't seem natural or useful. + // Rename to `from_noncanonical_biguint` and have it return `n % Self::characteristic()`. fn from_biguint(n: BigUint) -> Self; + // TODO: Move to a new `PrimeField` trait. fn to_biguint(&self) -> BigUint; + /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. + // TODO: Should probably be unsafe. fn from_canonical_u64(n: u64) -> Self; + /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. + // TODO: Should probably be unsafe. fn from_canonical_u32(n: u32) -> Self { Self::from_canonical_u64(n as u64) } + /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. + // TODO: Should probably be unsafe. fn from_canonical_usize(n: usize) -> Self { Self::from_canonical_u64(n as u64) } @@ -283,11 +291,11 @@ pub trait Field: Self::from_canonical_u64(b as u64) } - /// Returns `n % Self::CHARACTERISTIC`. + /// Returns `n % Self::characteristic()`. fn from_noncanonical_u128(n: u128) -> Self; - /// Returns `n % Self::CHARACTERISTIC`. May be cheaper than from_noncanonical_u128 when we know - /// that n < 2 ** 96. + /// Returns `n % Self::characteristic()`. May be cheaper than from_noncanonical_u128 when we know + /// that `n < 2 ** 96`. #[inline] fn from_noncanonical_u96((n_lo, n_hi): (u64, u32)) -> Self { // Default implementation. @@ -399,22 +407,28 @@ pub trait Field: } } -/// A finite field of prime order less than 2^64. -pub trait PrimeField: Field { +/// A finite field of order less than 2^64. +pub trait Field64: Field { const ORDER: u64; + // TODO: Only well-defined for prime 64-bit fields. Move to a new PrimeField64 trait? fn to_canonical_u64(&self) -> u64; + // TODO: Only well-defined for prime 64-bit fields. Move to a new PrimeField64 trait? fn to_noncanonical_u64(&self) -> u64; + /// Returns `x % Self::CHARACTERISTIC`. + // TODO: Move to `Field`. fn from_noncanonical_u64(n: u64) -> Self; #[inline] + // TODO: Move to `Field`. fn add_one(&self) -> Self { unsafe { self.add_canonical_u64(1) } } #[inline] + // TODO: Move to `Field`. fn sub_one(&self) -> Self { unsafe { self.sub_canonical_u64(1) } } @@ -423,6 +437,7 @@ pub trait PrimeField: Field { /// Equivalent to *self + Self::from_canonical_u64(rhs), but may be cheaper. The caller must /// ensure that 0 <= rhs < Self::ORDER. The function may return incorrect results if this /// precondition is not met. It is marked unsafe for this reason. + // TODO: Move to `Field`. #[inline] unsafe fn add_canonical_u64(&self, rhs: u64) -> Self { // Default implementation. @@ -433,6 +448,7 @@ pub trait PrimeField: Field { /// Equivalent to *self - Self::from_canonical_u64(rhs), but may be cheaper. The caller must /// ensure that 0 <= rhs < Self::ORDER. The function may return incorrect results if this /// precondition is not met. It is marked unsafe for this reason. + // TODO: Move to `Field`. #[inline] unsafe fn sub_canonical_u64(&self, rhs: u64) -> Self { // Default implementation. diff --git a/field/src/goldilocks_field.rs b/field/src/goldilocks_field.rs index 54866b1f..a121b4d2 100644 --- a/field/src/goldilocks_field.rs +++ b/field/src/goldilocks_field.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use crate::extension_field::quadratic::QuadraticExtension; use crate::extension_field::quartic::QuarticExtension; use crate::extension_field::{Extendable, Frobenius}; -use crate::field_types::{Field, PrimeField}; +use crate::field_types::{Field, Field64}; use crate::inversion::try_inverse_u64; const EPSILON: u64 = (1 << 32) - 1; @@ -123,7 +123,7 @@ impl Field for GoldilocksField { } } -impl PrimeField for GoldilocksField { +impl Field64 for GoldilocksField { const ORDER: u64 = 0xFFFFFFFF00000001; #[inline] diff --git a/field/src/inversion.rs b/field/src/inversion.rs index bbfb8e0d..10c02879 100644 --- a/field/src/inversion.rs +++ b/field/src/inversion.rs @@ -1,4 +1,4 @@ -use crate::field_types::PrimeField; +use crate::field_types::Field64; /// This is a 'safe' iteration for the modular inversion algorithm. It /// is safe in the sense that it will produce the right answer even @@ -63,7 +63,7 @@ unsafe fn unsafe_iteration(f: &mut u64, g: &mut u64, c: &mut i128, d: &mut i128, /// Elliptic and Hyperelliptic Cryptography, Algorithms 11.6 /// and 11.12. #[allow(clippy::many_single_char_names)] -pub(crate) fn try_inverse_u64(x: &F) -> Option { +pub(crate) fn try_inverse_u64(x: &F) -> Option { let mut f = x.to_noncanonical_u64(); let mut g = F::ORDER; // NB: These two are very rarely such that their absolute diff --git a/field/src/prime_field_testing.rs b/field/src/prime_field_testing.rs index 4aec6712..772336e9 100644 --- a/field/src/prime_field_testing.rs +++ b/field/src/prime_field_testing.rs @@ -1,4 +1,4 @@ -use crate::field_types::PrimeField; +use crate::field_types::Field64; /// Generates a series of non-negative integers less than `modulus` which cover a range of /// interesting test values. @@ -19,7 +19,7 @@ pub fn test_inputs(modulus: u64) -> Vec { /// word_bits)` and panic if the two resulting vectors differ. pub fn run_unaryop_test_cases(op: UnaryOp, expected_op: ExpectedOp) where - F: PrimeField, + F: Field64, UnaryOp: Fn(F) -> F, ExpectedOp: Fn(u64) -> u64, { @@ -43,7 +43,7 @@ where /// Apply the binary functions `op` and `expected_op` to each pair of inputs. pub fn run_binaryop_test_cases(op: BinaryOp, expected_op: ExpectedOp) where - F: PrimeField, + F: Field64, BinaryOp: Fn(F, F) -> F, ExpectedOp: Fn(u64, u64) -> u64, { @@ -70,7 +70,7 @@ macro_rules! test_prime_field_arithmetic { mod prime_field_arithmetic { use std::ops::{Add, Mul, Neg, Sub}; - use crate::field_types::{Field, PrimeField}; + use crate::field_types::{Field, Field64}; use crate::ops::Square; #[test] diff --git a/plonky2/src/bin/generate_constants.rs b/plonky2/src/bin/generate_constants.rs index d9757aff..d2744991 100644 --- a/plonky2/src/bin/generate_constants.rs +++ b/plonky2/src/bin/generate_constants.rs @@ -2,7 +2,7 @@ #![allow(clippy::needless_range_loop)] -use plonky2_field::field_types::PrimeField; +use plonky2_field::field_types::Field64; use plonky2_field::goldilocks_field::GoldilocksField; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; diff --git a/plonky2/src/gadgets/arithmetic.rs b/plonky2/src/gadgets/arithmetic.rs index 90f3b090..5bc8be07 100644 --- a/plonky2/src/gadgets/arithmetic.rs +++ b/plonky2/src/gadgets/arithmetic.rs @@ -1,7 +1,7 @@ use std::borrow::Borrow; use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::PrimeField; +use plonky2_field::field_types::Field64; use crate::gates::arithmetic_base::ArithmeticGate; use crate::gates::exponentiation::ExponentiationGate; @@ -325,7 +325,7 @@ impl, const D: usize> CircuitBuilder { /// Represents a base arithmetic operation in the circuit. Used to memoize results. #[derive(Copy, Clone, Eq, PartialEq, Hash)] -pub(crate) struct BaseArithmeticOperation { +pub(crate) struct BaseArithmeticOperation { const_0: F, const_1: F, multiplicand_0: Target, diff --git a/plonky2/src/gadgets/arithmetic_extension.rs b/plonky2/src/gadgets/arithmetic_extension.rs index 125fb49a..ae343aff 100644 --- a/plonky2/src/gadgets/arithmetic_extension.rs +++ b/plonky2/src/gadgets/arithmetic_extension.rs @@ -1,6 +1,6 @@ use plonky2_field::extension_field::FieldExtension; use plonky2_field::extension_field::{Extendable, OEF}; -use plonky2_field::field_types::{Field, PrimeField}; +use plonky2_field::field_types::{Field, Field64}; use plonky2_util::bits_u64; use crate::gates::arithmetic_extension::ArithmeticExtensionGate; @@ -544,7 +544,7 @@ impl, const D: usize> CircuitBuilder { /// Represents an extension arithmetic operation in the circuit. Used to memoize results. #[derive(Copy, Clone, Eq, PartialEq, Hash)] -pub(crate) struct ExtensionArithmeticOperation, const D: usize> { +pub(crate) struct ExtensionArithmeticOperation, const D: usize> { const_0: F, const_1: F, multiplicand_0: ExtensionTarget, diff --git a/plonky2/src/gates/assert_le.rs b/plonky2/src/gates/assert_le.rs index c385bb31..c087a963 100644 --- a/plonky2/src/gates/assert_le.rs +++ b/plonky2/src/gates/assert_le.rs @@ -1,7 +1,7 @@ use std::marker::PhantomData; use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::{Field, PrimeField}; +use plonky2_field::field_types::{Field, Field64}; use plonky2_field::packed_field::PackedField; use plonky2_util::{bits_u64, ceil_div_usize}; @@ -25,7 +25,7 @@ use crate::plonk::vars::{ /// A gate for checking that one value is less than or equal to another. #[derive(Clone, Debug)] -pub struct AssertLessThanGate, const D: usize> { +pub struct AssertLessThanGate, const D: usize> { pub(crate) num_bits: usize, pub(crate) num_chunks: usize, _phantom: PhantomData, @@ -455,7 +455,7 @@ mod tests { use anyhow::Result; use plonky2_field::extension_field::quartic::QuarticExtension; - use plonky2_field::field_types::{Field, PrimeField}; + use plonky2_field::field_types::{Field, Field64}; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; diff --git a/plonky2/src/gates/base_sum.rs b/plonky2/src/gates/base_sum.rs index a5391abf..e03a2c5b 100644 --- a/plonky2/src/gates/base_sum.rs +++ b/plonky2/src/gates/base_sum.rs @@ -1,7 +1,7 @@ use std::ops::Range; use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::{Field, PrimeField}; +use plonky2_field::field_types::{Field, Field64}; use plonky2_field::packed_field::PackedField; use crate::gates::gate::Gate; @@ -31,7 +31,7 @@ impl BaseSumGate { Self { num_limbs } } - pub fn new_from_config(config: &CircuitConfig) -> Self { + pub fn new_from_config(config: &CircuitConfig) -> Self { let num_limbs = F::BITS.min(config.num_routed_wires - Self::START_LIMBS); Self::new(num_limbs) } diff --git a/plonky2/src/gates/comparison.rs b/plonky2/src/gates/comparison.rs index 424ecb5b..bc3e69b9 100644 --- a/plonky2/src/gates/comparison.rs +++ b/plonky2/src/gates/comparison.rs @@ -1,7 +1,7 @@ use std::marker::PhantomData; use plonky2_field::extension_field::Extendable; -use plonky2_field::field_types::{Field, PrimeField}; +use plonky2_field::field_types::{Field, Field64}; use plonky2_field::packed_field::PackedField; use plonky2_util::{bits_u64, ceil_div_usize}; @@ -23,7 +23,7 @@ use crate::plonk::vars::{ /// A gate for checking that one value is less than or equal to another. #[derive(Clone, Debug)] -pub struct ComparisonGate, const D: usize> { +pub struct ComparisonGate, const D: usize> { pub(crate) num_bits: usize, pub(crate) num_chunks: usize, _phantom: PhantomData, @@ -520,7 +520,7 @@ mod tests { use std::marker::PhantomData; use anyhow::Result; - use plonky2_field::field_types::{Field, PrimeField}; + use plonky2_field::field_types::{Field, Field64}; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; diff --git a/plonky2/src/gates/subtraction_u32.rs b/plonky2/src/gates/subtraction_u32.rs index f083db5a..80bc03ed 100644 --- a/plonky2/src/gates/subtraction_u32.rs +++ b/plonky2/src/gates/subtraction_u32.rs @@ -338,7 +338,7 @@ mod tests { use anyhow::Result; use plonky2_field::extension_field::quartic::QuarticExtension; - use plonky2_field::field_types::{Field, PrimeField}; + use plonky2_field::field_types::{Field, Field64}; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; diff --git a/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs b/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs index f903cd96..f2276506 100644 --- a/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs +++ b/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs @@ -3,7 +3,7 @@ use std::arch::aarch64::*; use std::arch::asm; -use plonky2_field::field_types::PrimeField; +use plonky2_field::field_types::Field64; use plonky2_field::goldilocks_field::GoldilocksField; use plonky2_util::branch_hint; use static_assertions::const_assert; diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index 51a93fdc..ed6fca43 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -1,4 +1,4 @@ -use plonky2_field::field_types::{Field, PrimeField}; +use plonky2_field::field_types::{Field, Field64}; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -8,7 +8,7 @@ use crate::iop::target::Target; use crate::plonk::config::GenericHashOut; /// A prime order field with the features we need to use it as a base field in our argument system. -pub trait RichField: PrimeField + Poseidon {} +pub trait RichField: Field64 + Poseidon {} impl RichField for GoldilocksField {} diff --git a/plonky2/src/hash/poseidon.rs b/plonky2/src/hash/poseidon.rs index 9dc5f394..d2b47932 100644 --- a/plonky2/src/hash/poseidon.rs +++ b/plonky2/src/hash/poseidon.rs @@ -2,7 +2,7 @@ //! https://eprint.iacr.org/2019/458.pdf use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::field_types::{Field, PrimeField}; +use plonky2_field::field_types::{Field, Field64}; use unroll::unroll_for_loops; use crate::gates::gate::Gate; @@ -35,7 +35,7 @@ fn add_u160_u128((x_lo, x_hi): (u128, u32), y: u128) -> (u128, u32) { } #[inline(always)] -fn reduce_u160((n_lo, n_hi): (u128, u32)) -> F { +fn reduce_u160((n_lo, n_hi): (u128, u32)) -> F { let n_lo_hi = (n_lo >> 64) as u64; let n_lo_lo = n_lo as u64; let reduced_hi: u64 = F::from_noncanonical_u96((n_lo_hi, n_hi)).to_noncanonical_u64(); @@ -148,7 +148,7 @@ pub const ALL_ROUND_CONSTANTS: [u64; MAX_WIDTH * N_ROUNDS] = [ ]; const WIDTH: usize = SPONGE_WIDTH; -pub trait Poseidon: PrimeField { +pub trait Poseidon: Field64 { // Total number of round constants required: width of the input // times number of rounds. const N_ROUND_CONSTANTS: usize = WIDTH * N_ROUNDS; diff --git a/plonky2/src/hash/poseidon_goldilocks.rs b/plonky2/src/hash/poseidon_goldilocks.rs index b8f63ab4..ab886847 100644 --- a/plonky2/src/hash/poseidon_goldilocks.rs +++ b/plonky2/src/hash/poseidon_goldilocks.rs @@ -270,7 +270,7 @@ impl Poseidon for GoldilocksField { #[cfg(test)] mod tests { - use plonky2_field::field_types::{Field, PrimeField}; + use plonky2_field::field_types::{Field, Field64}; use plonky2_field::goldilocks_field::GoldilocksField as F; use crate::hash::poseidon::test_helpers::{check_consistency, check_test_vectors}; diff --git a/plonky2/src/util/serialization.rs b/plonky2/src/util/serialization.rs index 45a463a8..adc8baee 100644 --- a/plonky2/src/util/serialization.rs +++ b/plonky2/src/util/serialization.rs @@ -3,7 +3,7 @@ use std::io::Cursor; use std::io::{Read, Result, Write}; use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::field_types::PrimeField; +use plonky2_field::field_types::Field64; use plonky2_field::polynomial::PolynomialCoeffs; use crate::fri::proof::{ @@ -53,10 +53,10 @@ impl Buffer { Ok(u32::from_le_bytes(buf)) } - fn write_field(&mut self, x: F) -> Result<()> { + fn write_field(&mut self, x: F) -> Result<()> { self.0.write_all(&x.to_canonical_u64().to_le_bytes()) } - fn read_field(&mut self) -> Result { + fn read_field(&mut self) -> Result { let mut buf = [0; std::mem::size_of::()]; self.0.read_exact(&mut buf)?; Ok(F::from_canonical_u64(u64::from_le_bytes( @@ -116,13 +116,13 @@ impl Buffer { )) } - pub fn write_field_vec(&mut self, v: &[F]) -> Result<()> { + pub fn write_field_vec(&mut self, v: &[F]) -> Result<()> { for &a in v { self.write_field(a)?; } Ok(()) } - pub fn read_field_vec(&mut self, length: usize) -> Result> { + pub fn read_field_vec(&mut self, length: usize) -> Result> { (0..length) .map(|_| self.read_field()) .collect::>>() diff --git a/waksman/src/sorting.rs b/waksman/src/sorting.rs index b3e616d5..b154436e 100644 --- a/waksman/src/sorting.rs +++ b/waksman/src/sorting.rs @@ -183,7 +183,7 @@ impl, const D: usize> SimpleGenerator #[cfg(test)] mod tests { use anyhow::Result; - use plonky2::field::field_types::{Field, PrimeField}; + use plonky2::field::field_types::{Field, Field64}; use plonky2::iop::witness::PartialWitness; use plonky2::plonk::circuit_data::CircuitConfig; use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; From bff763e3e79135a998fbf6c3fb6cf46bac32427c Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Wed, 2 Feb 2022 11:23:03 +0100 Subject: [PATCH 094/143] Add distinction between (non-)wrapping constraints --- starky/src/constraint_consumer.rs | 65 +++++++++++++++++------------ starky/src/fibonacci_stark.rs | 13 +++--- starky/src/prover.rs | 4 +- starky/src/verifier.rs | 9 ++-- system_zero/src/core_registers.rs | 10 ++--- system_zero/src/permutation_unit.rs | 4 +- 6 files changed, 61 insertions(+), 44 deletions(-) diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index adb88e41..922475c3 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -14,6 +14,9 @@ pub struct ConstraintConsumer { /// Running sums of constraints that have been emitted so far, scaled by powers of alpha. constraint_accs: Vec

, + /// The evaluation of `X - g^(n-1)`. + z_last: P, + /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated /// with the first trace row, and zero at other points in the subgroup. lagrange_basis_first: P, @@ -24,10 +27,16 @@ pub struct ConstraintConsumer { } impl ConstraintConsumer

{ - pub fn new(alphas: Vec, lagrange_basis_first: P, lagrange_basis_last: P) -> Self { + pub fn new( + alphas: Vec, + z_last: P, + lagrange_basis_first: P, + lagrange_basis_last: P, + ) -> Self { Self { constraint_accs: vec![P::ZEROS; alphas.len()], alphas, + z_last, lagrange_basis_first, lagrange_basis_last, } @@ -41,31 +50,29 @@ impl ConstraintConsumer

{ .collect() } - /// Add one constraint. - pub fn one(&mut self, constraint: P) { + /// Add one constraint valid on all rows except the last. + pub fn constraint(&mut self, constraint: P) { + self.constraint_wrapping(constraint * self.z_last); + } + + /// Add one constraint on all rows. + pub fn constraint_wrapping(&mut self, constraint: P) { for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) { *acc *= alpha; *acc += constraint; } } - /// Add a series of constraints. - pub fn many(&mut self, constraints: impl IntoIterator) { - constraints - .into_iter() - .for_each(|constraint| self.one(constraint)); - } - /// Add one constraint, but first multiply it by a filter such that it will only apply to the /// first row of the trace. - pub fn one_first_row(&mut self, constraint: P) { - self.one(constraint * self.lagrange_basis_first); + pub fn constraint_first_row(&mut self, constraint: P) { + self.constraint_wrapping(constraint * self.lagrange_basis_first); } /// Add one constraint, but first multiply it by a filter such that it will only apply to the /// last row of the trace. - pub fn one_last_row(&mut self, constraint: P) { - self.one(constraint * self.lagrange_basis_last); + pub fn constraint_last_row(&mut self, constraint: P) { + self.constraint_wrapping(constraint * self.lagrange_basis_last); } } @@ -76,6 +83,9 @@ pub struct RecursiveConstraintConsumer, const D: us /// A running sum of constraints that have been emitted so far, scaled by powers of alpha. constraint_acc: ExtensionTarget, + /// The evaluation of `X - g^(n-1)`. + z_last: ExtensionTarget, + /// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated /// with the first trace row, and zero at other points in the subgroup. lagrange_basis_first: ExtensionTarget, @@ -88,42 +98,45 @@ pub struct RecursiveConstraintConsumer, const D: us } impl, const D: usize> RecursiveConstraintConsumer { - /// Add one constraint. - pub fn one(&mut self, builder: &mut CircuitBuilder, constraint: ExtensionTarget) { + /// Add one constraint valid on all rows except the last. + pub fn constraint( + &mut self, + builder: &mut CircuitBuilder, + constraint: ExtensionTarget, + ) { self.constraint_acc = builder.scalar_mul_add_extension(self.alpha, self.constraint_acc, constraint); } - /// Add a series of constraints. - pub fn many( + /// Add one constraint valid on all rows. + pub fn constraint_wrapping( &mut self, builder: &mut CircuitBuilder, - constraints: impl IntoIterator>, + constraint: ExtensionTarget, ) { - constraints - .into_iter() - .for_each(|constraint| self.one(builder, constraint)); + let filtered_constraint = builder.mul_extension(constraint, self.z_last); + self.constraint(builder, filtered_constraint); } /// Add one constraint, but first multiply it by a filter such that it will only apply to the /// first row of the trace. - pub fn one_first_row( + pub fn constraint_first_row( &mut self, builder: &mut CircuitBuilder, constraint: ExtensionTarget, ) { let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_first); - self.one(builder, filtered_constraint); + self.constraint(builder, filtered_constraint); } /// Add one constraint, but first multiply it by a filter such that it will only apply to the /// last row of the trace. - pub fn one_last_row( + pub fn constraint_last_row( &mut self, builder: &mut CircuitBuilder, constraint: ExtensionTarget, ) { let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_last); - self.one(builder, filtered_constraint); + self.constraint(builder, filtered_constraint); } } diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index dc6d676a..f3ffd8a2 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -60,14 +60,17 @@ impl, const D: usize> Stark for FibonacciStar P: PackedField, { // Check public inputs. - yield_constr.one_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); - yield_constr.one_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); - yield_constr.one_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); + yield_constr + .constraint_first_row(vars.local_values[0] - vars.public_inputs[Self::PI_INDEX_X0]); + yield_constr + .constraint_first_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_X1]); + yield_constr + .constraint_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]); // x0 <- x1 - yield_constr.one(vars.next_values[0] - vars.local_values[1]); + yield_constr.constraint(vars.next_values[0] - vars.local_values[1]); // x1 <- x0 + x1 - yield_constr.one(vars.next_values[1] - vars.local_values[0] - vars.local_values[1]); + yield_constr.constraint(vars.next_values[1] - vars.local_values[0] - vars.local_values[1]); } fn eval_ext_recursively( diff --git a/starky/src/prover.rs b/starky/src/prover.rs index d6543dae..4d7f8c3f 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -197,6 +197,7 @@ where // TODO: Set `P` to a genuine `PackedField` here. let mut consumer = ConstraintConsumer::::new( alphas.clone(), + coset[i] - last, lagrange_first.values[i], lagrange_last.values[i], ); @@ -214,9 +215,8 @@ where // We divide the constraints evaluations by `Z_H(x) / x - last`, i.e., the vanishing // polynomial of `H` without it's last element. let denominator_inv = z_h_on_coset.eval_inverse(i); - let z_last = coset[i] - last; for eval in &mut constraints_evals { - *eval *= denominator_inv * z_last; + *eval *= denominator_inv; } constraints_evals }) diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 6a4464e1..5753f21f 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -72,24 +72,25 @@ where }; let (l_1, l_last) = eval_l_1_and_l_last(degree_bits, challenges.stark_zeta); + let last = F::primitive_root_of_unity(degree_bits).inverse(); + let z_last = challenges.stark_zeta - last.into(); let mut consumer = ConstraintConsumer::::new( challenges .stark_alphas .iter() .map(|&alpha| F::Extension::from_basefield(alpha)) .collect::>(), + z_last, l_1, l_last, ); stark.eval_ext(vars, &mut consumer); let acc = consumer.accumulators(); - // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x) / (x - last)`, at zeta. + // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. let quotient_polys_zeta = &proof.openings.quotient_polys; let zeta_pow_deg = challenges.stark_zeta.exp_power_of_2(degree_bits); let z_h_zeta = zeta_pow_deg - F::Extension::ONE; - let last = F::primitive_root_of_unity(degree_bits).inverse(); - let z_last = challenges.stark_zeta - last.into(); // `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations. // Each chunk of `quotient_degree_factor` holds the evaluations of `t_0(zeta),...,t_{quotient_degree_factor-1}(zeta)` // where the "real" quotient polynomial is `t(X) = t_0(X) + t_1(X)*X^n + t_2(X)*X^{2n} + ...`. @@ -99,7 +100,7 @@ where .chunks(1 << config.fri_config.rate_bits) .enumerate() { - ensure!(acc[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg) / z_last); + ensure!(acc[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); } let merkle_caps = &[proof.trace_cap, proof.quotient_polys_cap]; diff --git a/system_zero/src/core_registers.rs b/system_zero/src/core_registers.rs index 249c16a3..21faa288 100644 --- a/system_zero/src/core_registers.rs +++ b/system_zero/src/core_registers.rs @@ -55,16 +55,16 @@ impl, const D: usize> SystemZero { let local_clock = vars.local_values[COL_CLOCK]; let next_clock = vars.next_values[COL_CLOCK]; let delta_clock = next_clock - local_clock; - yield_constr.one_first_row(local_clock); - yield_constr.one(delta_clock - FE::ONE); + yield_constr.constraint_first_row(local_clock); + yield_constr.constraint(delta_clock - FE::ONE); // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. let local_range_16 = vars.local_values[COL_RANGE_16]; let next_range_16 = vars.next_values[COL_RANGE_16]; let delta_range_16 = next_range_16 - local_range_16; - yield_constr.one_first_row(local_range_16); - yield_constr.one_last_row(local_range_16 - FE::from_canonical_u64((1 << 16) - 1)); - yield_constr.one(delta_range_16 * (delta_range_16 - FE::ONE)); + yield_constr.constraint_first_row(local_range_16); + yield_constr.constraint_last_row(local_range_16 - FE::from_canonical_u64((1 << 16) - 1)); + yield_constr.constraint(delta_range_16 * (delta_range_16 - FE::ONE)); todo!() } diff --git a/system_zero/src/permutation_unit.rs b/system_zero/src/permutation_unit.rs index a490b49d..43883fca 100644 --- a/system_zero/src/permutation_unit.rs +++ b/system_zero/src/permutation_unit.rs @@ -53,7 +53,7 @@ impl, const D: usize> SystemZero { // Assert that the computed output matches the outputs in the trace. for i in 0..SPONGE_WIDTH { let out = local_values[col_permutation_output(i)]; - yield_constr.one(state[i] - out); + yield_constr.constraint(state[i] - out); } } @@ -80,7 +80,7 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let out = local_values[col_permutation_output(i)]; let diff = builder.sub_extension(state[i], out); - yield_constr.one(builder, diff); + yield_constr.constraint(builder, diff); } } } From be44edcd78b8c2b43c0c1140d59b4c7da954f9c6 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Wed, 2 Feb 2022 13:20:49 +0100 Subject: [PATCH 095/143] Minor --- starky/src/verifier.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 5753f21f..843bdb63 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -126,13 +126,10 @@ where /// `L_1(x) = (x^n - 1)/(n * (x - 1))` /// `L_n(x) = (x^n - 1)/(n * (g * x - 1))`, with `g` the first element of the subgroup. fn eval_l_1_and_l_last(log_n: usize, x: F) -> (F, F) { - let n = 1 << log_n; + let n = F::from_canonical_usize(1 << log_n); let g = F::primitive_root_of_unity(log_n); let z_x = x.exp_power_of_2(log_n) - F::ONE; - let invs = F::batch_multiplicative_inverse(&[ - F::from_canonical_usize(n) * (x - F::ONE), - F::from_canonical_usize(n) * (g * x - F::ONE), - ]); + let invs = F::batch_multiplicative_inverse(&[n * (x - F::ONE), n * (g * x - F::ONE)]); (z_x * invs[0], z_x * invs[1]) } From bc5bc8245d597e20e4c25955c181c70210160e3c Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 3 Feb 2022 11:49:44 +0100 Subject: [PATCH 096/143] PR feedback --- field/src/polynomial/mod.rs | 3 ++- plonky2/src/plonk/proof.rs | 8 ++++---- starky/src/constraint_consumer.rs | 8 ++++---- starky/src/proof.rs | 22 ++++++++++++++++++---- starky/src/prover.rs | 7 +++---- starky/src/stark.rs | 1 + starky/src/verifier.rs | 16 ++++++++-------- 7 files changed, 40 insertions(+), 25 deletions(-) diff --git a/field/src/polynomial/mod.rs b/field/src/polynomial/mod.rs index d36757b3..7fccb46e 100644 --- a/field/src/polynomial/mod.rs +++ b/field/src/polynomial/mod.rs @@ -57,7 +57,8 @@ impl PolynomialValues { fft_with_options(coeffs, Some(rate_bits), None) } - pub fn coset_lde(self, rate_bits: usize) -> Self { + /// Low-degree extend `Self` (seen as evaluations over the subgroup) onto a coset. + pub fn lde_onto_coset(self, rate_bits: usize) -> Self { let coeffs = ifft(self).lde(rate_bits); coeffs.coset_fft_with_options(F::coset_shift(), Some(rate_bits), None) } diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 9d9eaaff..803e64d4 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -227,16 +227,16 @@ impl, C: GenericConfig, const D: usize> } pub(crate) struct ProofChallenges, const D: usize> { - // Random values used in Plonk's permutation argument. + /// Random values used in Plonk's permutation argument. pub plonk_betas: Vec, - // Random values used in Plonk's permutation argument. + /// Random values used in Plonk's permutation argument. pub plonk_gammas: Vec, - // Random values used to combine PLONK constraints. + /// Random values used to combine PLONK constraints. pub plonk_alphas: Vec, - // Point at which the PLONK polynomials are opened. + /// Point at which the PLONK polynomials are opened. pub plonk_zeta: F::Extension, pub fri_challenges: FriChallenges, diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index 922475c3..091215dd 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -104,8 +104,8 @@ impl, const D: usize> RecursiveConstraintConsumer, constraint: ExtensionTarget, ) { - self.constraint_acc = - builder.scalar_mul_add_extension(self.alpha, self.constraint_acc, constraint); + let filtered_constraint = builder.mul_extension(constraint, self.z_last); + self.constraint(builder, filtered_constraint); } /// Add one constraint valid on all rows. @@ -114,8 +114,8 @@ impl, const D: usize> RecursiveConstraintConsumer, constraint: ExtensionTarget, ) { - let filtered_constraint = builder.mul_extension(constraint, self.z_last); - self.constraint(builder, filtered_constraint); + self.constraint_acc = + builder.scalar_mul_add_extension(self.alpha, self.constraint_acc, constraint); } /// Add one constraint, but first multiply it by a filter such that it will only apply to the diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 50ef21bc..5f96f1f4 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -52,10 +52,10 @@ pub struct CompressedStarkProofWithPublicInputs< } pub(crate) struct StarkProofChallenges, const D: usize> { - // Random values used to combine PLONK constraints. + /// Random values used to combine STARK constraints. pub stark_alphas: Vec, - // Point at which the PLONK polynomials are opened. + /// Point at which the STARK polynomials are opened. pub stark_zeta: F::Extension, pub fri_challenges: FriChallenges, @@ -66,6 +66,7 @@ pub struct StarkOpeningSet, const D: usize> { pub local_values: Vec, pub next_values: Vec, pub permutation_zs: Vec, + pub permutation_zs_right: Vec, pub quotient_polys: Vec, } @@ -86,19 +87,28 @@ impl, const D: usize> StarkOpeningSet { local_values: eval_commitment(zeta, trace_commitment), next_values: eval_commitment(zeta * g, trace_commitment), permutation_zs: vec![/*TODO*/], + permutation_zs_right: vec![/*TODO*/], quotient_polys: eval_commitment(zeta, quotient_commitment), } } + // TODO: Replace with a `observe_fri_openings` function. // Note: Can't implement this directly on `Challenger` as it's in a different crate. pub fn observe>(&self, challenger: &mut Challenger) { let StarkOpeningSet { local_values, next_values, permutation_zs, + permutation_zs_right, quotient_polys, } = self; - for v in &[local_values, next_values, permutation_zs, quotient_polys] { + for v in &[ + local_values, + next_values, + permutation_zs, + permutation_zs_right, + quotient_polys, + ] { challenger.observe_extension_elements(v); } } @@ -113,7 +123,11 @@ impl, const D: usize> StarkOpeningSet { .concat(), }; let zeta_right_batch = FriOpeningBatch { - values: self.next_values.to_vec(), + values: [ + self.next_values.as_slice(), + self.permutation_zs_right.as_slice(), + ] + .concat(), }; FriOpenings { batches: vec![zeta_batch, zeta_right_batch], diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 4d7f8c3f..d2c63e02 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -168,13 +168,13 @@ where let lagrange_first = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[0] = F::ONE; - evals.coset_lde(rate_bits) + evals.lde_onto_coset(rate_bits) }; // Evaluation of the last Lagrange polynomial on the LDE domain. let lagrange_last = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[degree - 1] = F::ONE; - evals.coset_lde(rate_bits) + evals.lde_onto_coset(rate_bits) }; let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, rate_bits); @@ -212,8 +212,7 @@ where stark.eval_packed_base(vars, &mut consumer); // TODO: Fix this once we use a genuine `PackedField`. let mut constraints_evals = consumer.accumulators(); - // We divide the constraints evaluations by `Z_H(x) / x - last`, i.e., the vanishing - // polynomial of `H` without it's last element. + // We divide the constraints evaluations by `Z_H(x)`. let denominator_inv = z_h_on_coset.eval_inverse(i); for eval in &mut constraints_evals { *eval *= denominator_inv; diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 3b8c795a..00441240 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -9,6 +9,7 @@ use crate::vars::StarkEvaluationTargets; use crate::vars::StarkEvaluationVars; /// Represents a STARK system. +// TODO: Add a `constraint_degree` fn that returns the maximum constraint degree. pub trait Stark, const D: usize>: Sync { /// The total number of columns in the trace. const COLUMNS: usize; diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 843bdb63..b91fe457 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -58,6 +58,7 @@ where local_values, next_values, permutation_zs, + permutation_zs_right, quotient_polys, } = &proof.openings; let vars = StarkEvaluationVars { @@ -85,7 +86,7 @@ where l_last, ); stark.eval_ext(vars, &mut consumer); - let acc = consumer.accumulators(); + let vanishing_polys_zeta = consumer.accumulators(); // Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta. let quotient_polys_zeta = &proof.openings.quotient_polys; @@ -100,9 +101,10 @@ where .chunks(1 << config.fri_config.rate_bits) .enumerate() { - ensure!(acc[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); + ensure!(vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); } + // TODO: Permutation polynomials. let merkle_caps = &[proof.trace_cap, proof.quotient_polys_cap]; verify_fri_proof::( @@ -139,12 +141,10 @@ fn recover_degree, C: GenericConfig, cons proof: &StarkProof, config: &StarkConfig, ) -> usize { - 1 << (proof.opening_proof.query_round_proofs[0] + let initial_merkle_proof = &proof.opening_proof.query_round_proofs[0] .initial_trees_proof .evals_proofs[0] - .1 - .siblings - .len() - + config.fri_config.cap_height - - config.fri_config.rate_bits) + .1; + let lde_bits = config.fri_config.cap_height + initial_merkle_proof.siblings.len(); + 1 << (lde_bits - config.fri_config.rate_bits) } From 28082e97528a28b784070d92ddd93ca8e3dc1b92 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 3 Feb 2022 11:57:24 +0100 Subject: [PATCH 097/143] Clippy --- starky/src/constraint_consumer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index 091215dd..b7c9f399 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -105,7 +105,7 @@ impl, const D: usize> RecursiveConstraintConsumer, ) { let filtered_constraint = builder.mul_extension(constraint, self.z_last); - self.constraint(builder, filtered_constraint); + self.constraint_wrapping(builder, filtered_constraint); } /// Add one constraint valid on all rows. From f6c66eec58b119aa2862c3ea81f9c0bb56ca15c2 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 3 Feb 2022 14:43:47 +0100 Subject: [PATCH 098/143] Fix reducing tests --- plonky2/src/util/reducing.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/plonky2/src/util/reducing.rs b/plonky2/src/util/reducing.rs index b4e8d8cf..ccc41b02 100644 --- a/plonky2/src/util/reducing.rs +++ b/plonky2/src/util/reducing.rs @@ -260,7 +260,7 @@ mod tests { use anyhow::Result; use super::*; - use crate::iop::witness::PartialWitness; + use crate::iop::witness::{PartialWitness, Witness}; use crate::plonk::circuit_data::CircuitConfig; use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; use crate::plonk::verifier::verify; @@ -273,7 +273,7 @@ mod tests { let config = CircuitConfig::standard_recursion_config(); - let pw = PartialWitness::new(); + let mut pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); let alpha = FF::rand(); @@ -283,7 +283,10 @@ mod tests { let manual_reduce = builder.constant_extension(manual_reduce); let mut alpha_t = ReducingFactorTarget::new(builder.constant_extension(alpha)); - let vs_t = vs.iter().map(|&v| builder.constant(v)).collect::>(); + let vs_t = builder.add_virtual_targets(vs.len()); + for (&v, &v_t) in vs.iter().zip(&vs_t) { + pw.set_target(v_t, v); + } let circuit_reduce = alpha_t.reduce_base(&vs_t, &mut builder); builder.connect_extension(manual_reduce, circuit_reduce); @@ -302,7 +305,7 @@ mod tests { let config = CircuitConfig::standard_recursion_config(); - let pw = PartialWitness::new(); + let mut pw = PartialWitness::new(); let mut builder = CircuitBuilder::::new(config); let alpha = FF::rand(); @@ -312,10 +315,8 @@ mod tests { let manual_reduce = builder.constant_extension(manual_reduce); let mut alpha_t = ReducingFactorTarget::new(builder.constant_extension(alpha)); - let vs_t = vs - .iter() - .map(|&v| builder.constant_extension(v)) - .collect::>(); + let vs_t = builder.add_virtual_extension_targets(vs.len()); + pw.set_extension_targets(&vs_t, &vs); let circuit_reduce = alpha_t.reduce(&vs_t, &mut builder); builder.connect_extension(manual_reduce, circuit_reduce); From c6a332200ee74ea91b178c9fba7c715cb68640d9 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 3 Feb 2022 14:55:27 +0100 Subject: [PATCH 099/143] Disallow degree `quotient_degree_factor = 1` --- plonky2/src/plonk/circuit_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index ad216d69..67538d00 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -651,7 +651,7 @@ impl, const D: usize> CircuitBuilder { // `quotient_degree_factor` has to be between `max_filtered_constraint_degree-1` and `1< Date: Thu, 3 Feb 2022 16:21:34 +0100 Subject: [PATCH 100/143] Replace `observe_opening_set` by `observe_openings` taking a `FriOpenings` argument. --- plonky2/src/fri/recursive_verifier.rs | 2 +- plonky2/src/iop/challenger.rs | 48 ++++----------------------- plonky2/src/plonk/get_challenges.rs | 2 +- plonky2/src/plonk/prover.rs | 2 +- starky/src/get_challenges.rs | 2 +- starky/src/proof.rs | 24 +------------- starky/src/prover.rs | 2 +- 7 files changed, 13 insertions(+), 69 deletions(-) diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index a9a224d1..8dbb2038 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -146,7 +146,7 @@ impl, const D: usize> CircuitBuilder { // Size of the LDE domain. let n = params.lde_size(); - challenger.observe_opening_set(os); + challenger.observe_openings(&os.to_fri_openings()); // Scaling factor to combine polynomials. let alpha = challenger.get_extension_challenge(self); diff --git a/plonky2/src/iop/challenger.rs b/plonky2/src/iop/challenger.rs index d7583646..b1a4c12b 100644 --- a/plonky2/src/iop/challenger.rs +++ b/plonky2/src/iop/challenger.rs @@ -5,6 +5,7 @@ use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::polynomial::PolynomialCoeffs; use crate::fri::proof::FriChallenges; +use crate::fri::structure::{FriOpenings, FriOpeningsTarget}; use crate::fri::FriConfig; use crate::hash::hash_types::RichField; use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget}; @@ -14,7 +15,6 @@ use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::config::{AlgebraicHasher, GenericConfig, GenericHashOut, Hasher}; -use crate::plonk::proof::{OpeningSet, OpeningSetTarget}; /// Observes prover messages, and generates challenges by hashing the transcript, a la Fiat-Shamir. #[derive(Clone)] @@ -72,29 +72,12 @@ impl> Challenger { } } - pub fn observe_opening_set(&mut self, os: &OpeningSet) + pub fn observe_openings(&mut self, openings: &FriOpenings) where F: RichField + Extendable, { - let OpeningSet { - constants, - plonk_sigmas, - wires, - plonk_zs, - plonk_zs_right, - partial_products, - quotient_polys, - } = os; - for v in &[ - constants, - plonk_sigmas, - wires, - plonk_zs, - plonk_zs_right, - partial_products, - quotient_polys, - ] { - self.observe_extension_elements(v); + for v in &openings.batches { + self.observe_extension_elements(&v.values); } } @@ -269,26 +252,9 @@ impl, H: AlgebraicHasher, const D: usize> } } - pub fn observe_opening_set(&mut self, os: &OpeningSetTarget) { - let OpeningSetTarget { - constants, - plonk_sigmas, - wires, - plonk_zs, - plonk_zs_right, - partial_products, - quotient_polys, - } = os; - for v in &[ - constants, - plonk_sigmas, - wires, - plonk_zs, - plonk_zs_right, - partial_products, - quotient_polys, - ] { - self.observe_extension_elements(v); + pub fn observe_openings(&mut self, openings: &FriOpeningsTarget) { + for v in &openings.batches { + self.observe_extension_elements(&v.values); } } diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index 3167fef7..fb1517e4 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -46,7 +46,7 @@ fn get_challenges, C: GenericConfig, cons challenger.observe_cap(quotient_polys_cap); let plonk_zeta = challenger.get_extension_challenge::(); - challenger.observe_opening_set(openings); + challenger.observe_openings(&openings.to_fri_openings()); Ok(ProofChallenges { plonk_betas, diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 09caf81e..6178bba1 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -196,7 +196,7 @@ pub(crate) fn prove, C: GenericConfig, co common_data, ) ); - challenger.observe_opening_set(&openings); + challenger.observe_openings(&openings.to_fri_openings()); let opening_proof = timed!( timing, diff --git a/starky/src/get_challenges.rs b/starky/src/get_challenges.rs index 79e1c032..7e89ca3e 100644 --- a/starky/src/get_challenges.rs +++ b/starky/src/get_challenges.rs @@ -33,7 +33,7 @@ fn get_challenges, C: GenericConfig, cons challenger.observe_cap(quotient_polys_cap); let stark_zeta = challenger.get_extension_challenge::(); - openings.observe(&mut challenger); + challenger.observe_openings(&openings.to_fri_openings()); Ok(StarkProofChallenges { stark_alphas, diff --git a/starky/src/proof.rs b/starky/src/proof.rs index 5f96f1f4..b7ecd912 100644 --- a/starky/src/proof.rs +++ b/starky/src/proof.rs @@ -4,8 +4,7 @@ use plonky2::fri::proof::{CompressedFriProof, FriChallenges, FriProof}; use plonky2::fri::structure::{FriOpeningBatch, FriOpenings}; use plonky2::hash::hash_types::RichField; use plonky2::hash::merkle_tree::MerkleCap; -use plonky2::iop::challenger::Challenger; -use plonky2::plonk::config::{GenericConfig, Hasher}; +use plonky2::plonk::config::GenericConfig; use rayon::prelude::*; pub struct StarkProof, C: GenericConfig, const D: usize> { @@ -92,27 +91,6 @@ impl, const D: usize> StarkOpeningSet { } } - // TODO: Replace with a `observe_fri_openings` function. - // Note: Can't implement this directly on `Challenger` as it's in a different crate. - pub fn observe>(&self, challenger: &mut Challenger) { - let StarkOpeningSet { - local_values, - next_values, - permutation_zs, - permutation_zs_right, - quotient_polys, - } = self; - for v in &[ - local_values, - next_values, - permutation_zs, - permutation_zs_right, - quotient_polys, - ] { - challenger.observe_extension_elements(v); - } - } - pub(crate) fn to_fri_openings(&self) -> FriOpenings { let zeta_batch = FriOpeningBatch { values: [ diff --git a/starky/src/prover.rs b/starky/src/prover.rs index d2c63e02..080ab317 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -113,7 +113,7 @@ where "Opening point is in the subgroup." ); let openings = StarkOpeningSet::new(zeta, g, &trace_commitment, "ient_commitment); - openings.observe(&mut challenger); + challenger.observe_openings(&openings.to_fri_openings()); // TODO: Add permuation checks let initial_merkle_trees = &[&trace_commitment, "ient_commitment]; From 2a699ee004a9b938941abc2dc1c81c6d4bf19f11 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Thu, 3 Feb 2022 16:52:04 +0100 Subject: [PATCH 101/143] Ignore `test_curve_mul` and unignore recursive tests --- plonky2/src/gadgets/curve.rs | 1 + plonky2/src/plonk/recursive_verifier.rs | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/plonky2/src/gadgets/curve.rs b/plonky2/src/gadgets/curve.rs index 2ff22319..8c182345 100644 --- a/plonky2/src/gadgets/curve.rs +++ b/plonky2/src/gadgets/curve.rs @@ -296,6 +296,7 @@ mod tests { } #[test] + #[ignore] fn test_curve_mul() -> Result<()> { const D: usize = 2; type C = PoseidonGoldilocksConfig; diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index 3e4260a9..5d898d0d 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -215,7 +215,6 @@ mod tests { use crate::util::timing::TimingTree; #[test] - #[ignore] fn test_recursive_verifier() -> Result<()> { init_logger(); const D: usize = 2; @@ -232,7 +231,6 @@ mod tests { } #[test] - #[ignore] fn test_recursive_recursive_verifier() -> Result<()> { init_logger(); const D: usize = 2; @@ -317,7 +315,6 @@ mod tests { } #[test] - #[ignore] fn test_recursive_verifier_multi_hash() -> Result<()> { init_logger(); const D: usize = 2; From d99cabded9b8f05e46f81665b8f9dc0d844d8f7d Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 15:56:59 +0100 Subject: [PATCH 102/143] Working --- starky/src/fibonacci_stark.rs | 18 ++++++++++++++++++ starky/src/lib.rs | 2 ++ starky/src/stark.rs | 3 +++ system_zero/src/system_zero.rs | 4 ++++ 4 files changed, 27 insertions(+) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index f3ffd8a2..ad9685bb 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -81,6 +81,10 @@ impl, const D: usize> Stark for FibonacciStar ) { todo!() } + + fn degree(&self) -> usize { + 2 + } } #[cfg(test)] @@ -93,6 +97,7 @@ mod tests { use crate::config::StarkConfig; use crate::fibonacci_stark::FibonacciStark; use crate::prover::prove; + use crate::stark_testing::test_low_degree; use crate::verifier::verify; fn fibonacci(n: usize, x0: usize, x1: usize) -> usize { @@ -125,4 +130,17 @@ mod tests { verify(stark, proof, &config) } + + #[test] + fn test_fibonacci_stark_degree() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + type S = FibonacciStark; + + let config = StarkConfig::standard_fast_config(); + let num_rows = 1 << 5; + let stark = S::new(num_rows); + test_low_degree(stark) + } } diff --git a/starky/src/lib.rs b/starky/src/lib.rs index e56c0ef6..dd3a6ec3 100644 --- a/starky/src/lib.rs +++ b/starky/src/lib.rs @@ -17,3 +17,5 @@ pub mod verifier; #[cfg(test)] pub mod fibonacci_stark; +#[cfg(test)] +pub mod stark_testing; diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 00441240..9721c203 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -62,6 +62,9 @@ pub trait Stark, const D: usize>: Sync { yield_constr: &mut RecursiveConstraintConsumer, ); + /// The maximum constraint degree. + fn degree(&self) -> usize; + /// Computes the FRI instance used to prove this Stark. // TODO: Permutation polynomials. fn fri_instance( diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 31b8434f..e5990af9 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -80,6 +80,10 @@ impl, const D: usize> Stark for SystemZero usize { + 3 + } } #[cfg(test)] From 1011c302accfe188ba4f73205480f4bab00b5413 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 16:02:45 +0100 Subject: [PATCH 103/143] Add test for system zero --- starky/src/fibonacci_stark.rs | 4 ++-- starky/src/lib.rs | 3 +-- system_zero/src/system_zero.rs | 13 +++++++++++++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index ad9685bb..a7cb0a87 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -97,7 +97,7 @@ mod tests { use crate::config::StarkConfig; use crate::fibonacci_stark::FibonacciStark; use crate::prover::prove; - use crate::stark_testing::test_low_degree; + use crate::stark_testing::test_stark_low_degree; use crate::verifier::verify; fn fibonacci(n: usize, x0: usize, x1: usize) -> usize { @@ -141,6 +141,6 @@ mod tests { let config = StarkConfig::standard_fast_config(); let num_rows = 1 << 5; let stark = S::new(num_rows); - test_low_degree(stark) + test_stark_low_degree(stark) } } diff --git a/starky/src/lib.rs b/starky/src/lib.rs index dd3a6ec3..dc61e7e7 100644 --- a/starky/src/lib.rs +++ b/starky/src/lib.rs @@ -12,10 +12,9 @@ mod get_challenges; pub mod proof; pub mod prover; pub mod stark; +pub mod stark_testing; pub mod vars; pub mod verifier; #[cfg(test)] pub mod fibonacci_stark; -#[cfg(test)] -pub mod stark_testing; diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index e5990af9..6b8576c9 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -97,6 +97,7 @@ mod tests { use starky::config::StarkConfig; use starky::prover::prove; use starky::stark::Stark; + use starky::stark_testing::test_stark_low_degree; use starky::verifier::verify; use crate::system_zero::SystemZero; @@ -118,4 +119,16 @@ mod tests { verify(system, proof, &config) } + + #[test] + #[ignore] // TODO + fn degree() -> Result<()> { + type F = GoldilocksField; + type C = PoseidonGoldilocksConfig; + const D: usize = 2; + + type S = SystemZero; + let system = S::default(); + test_stark_low_degree(system) + } } From 6b2b8b6e5d6d33eec07981307ff89d3a4951ed80 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 16:36:22 +0100 Subject: [PATCH 104/143] Use stark degree in compute_quotient --- plonky2/src/plonk/prover.rs | 4 ++-- starky/src/fibonacci_stark.rs | 12 ++++-------- starky/src/prover.rs | 33 +++++++++++++++++++-------------- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 6178bba1..79968b0e 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -337,10 +337,10 @@ fn compute_quotient_polys< assert!( max_degree_bits <= common_data.config.fri_config.rate_bits, "Having constraints of degree higher than the rate is not supported yet. \ - If we need this in the future, we can precompute the larger LDE before computing the `ListPolynomialCommitment`s." + If we need this in the future, we can precompute the larger LDE before computing the `PolynomialBatch`s." ); - // We reuse the LDE computed in `ListPolynomialCommitment` and extract every `step` points to get + // We reuse the LDE computed in `PolynomialBatch` and extract every `step` points to get // an LDE matching `max_filtered_constraint_degree`. let step = 1 << (common_data.config.fri_config.rate_bits - max_degree_bits); // When opening the `Z`s polys at the "next" point in Plonk, need to look at the point `next_step` diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index a7cb0a87..2a4c8229 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -83,7 +83,7 @@ impl, const D: usize> Stark for FibonacciStar } fn degree(&self) -> usize { - 2 + 3 } } @@ -100,8 +100,8 @@ mod tests { use crate::stark_testing::test_stark_low_degree; use crate::verifier::verify; - fn fibonacci(n: usize, x0: usize, x1: usize) -> usize { - (0..n).fold((0, 1), |x, _| (x.1, x.0 + x.1)).1 + fn fibonacci(n: usize, x0: F, x1: F) -> F { + (0..n).fold((x0, x1), |x, _| (x.1, x.0 + x.1)).1 } #[test] @@ -113,11 +113,7 @@ mod tests { let config = StarkConfig::standard_fast_config(); let num_rows = 1 << 5; - let public_inputs = [ - F::ZERO, - F::ONE, - F::from_canonical_usize(fibonacci(num_rows - 1, 0, 1)), - ]; + let public_inputs = [F::ZERO, F::ONE, fibonacci(num_rows - 1, F::ZERO, F::ONE)]; let stark = S::new(num_rows); let trace = stark.generate_trace(public_inputs[0], public_inputs[1]); let proof = prove::( diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 080ab317..2bea24ff 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -11,7 +11,7 @@ use plonky2::plonk::config::GenericConfig; use plonky2::timed; use plonky2::util::timing::TimingTree; use plonky2::util::transpose; -use plonky2_util::log2_strict; +use plonky2_util::{log2_ceil, log2_strict}; use rayon::prelude::*; use crate::config::StarkConfig; @@ -145,8 +145,6 @@ where /// Computes the quotient polynomials `(sum alpha^i C_i(x)) / Z_H(x)` for `alpha` in `alphas`, /// where the `C_i`s are the Stark constraints. -// TODO: This won't work for the Fibonacci example because the constraints wrap around the subgroup. -// The denominator should be the vanishing polynomial of `H` without its last element. fn compute_quotient_polys( stark: &S, trace_commitment: &PolynomialBatch, @@ -164,34 +162,44 @@ where { let degree = 1 << degree_bits; + let max_degree_bits = log2_ceil(stark.degree() - 1); + assert!( + max_degree_bits <= rate_bits, + "Having constraints of degree higher than the rate is not supported yet." + ); + let step = 1 << (rate_bits - max_degree_bits); + // When opening the `Z`s polys at the "next" point, need to look at the point `next_step` steps away. + let next_step = 1 << max_degree_bits; + // Evaluation of the first Lagrange polynomial on the LDE domain. let lagrange_first = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[0] = F::ONE; - evals.lde_onto_coset(rate_bits) + evals.lde_onto_coset(max_degree_bits) }; // Evaluation of the last Lagrange polynomial on the LDE domain. let lagrange_last = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[degree - 1] = F::ONE; - evals.lde_onto_coset(rate_bits) + evals.lde_onto_coset(max_degree_bits) }; - let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, rate_bits); + let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, max_degree_bits); // Retrieve the LDE values at index `i`. let get_at_index = |comm: &PolynomialBatch, i: usize| -> [F; S::COLUMNS] { - comm.get_lde_values(i).try_into().unwrap() + comm.get_lde_values(i * step).try_into().unwrap() }; // Last element of the subgroup. let last = F::primitive_root_of_unity(degree_bits).inverse(); + let size = degree << max_degree_bits; let coset = F::cyclic_subgroup_coset_known_order( - F::primitive_root_of_unity(degree_bits + rate_bits), + F::primitive_root_of_unity(degree_bits + max_degree_bits), F::coset_shift(), - degree << rate_bits, + size, ); - let quotient_values = (0..degree << rate_bits) + let quotient_values = (0..size) .into_par_iter() .map(|i| { // TODO: Set `P` to a genuine `PackedField` here. @@ -203,10 +211,7 @@ where ); let vars = StarkEvaluationVars:: { local_values: &get_at_index(trace_commitment, i), - next_values: &get_at_index( - trace_commitment, - (i + (1 << rate_bits)) % (degree << rate_bits), - ), + next_values: &get_at_index(trace_commitment, (i + next_step) % size), public_inputs: &public_inputs, }; stark.eval_packed_base(vars, &mut consumer); From 978e14030cc6d67801e32a817f28d2d4391444fa Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 16:39:34 +0100 Subject: [PATCH 105/143] Fix degree --- starky/src/fibonacci_stark.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index 2a4c8229..ce50dfd5 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -83,7 +83,7 @@ impl, const D: usize> Stark for FibonacciStar } fn degree(&self) -> usize { - 3 + 2 } } From f5ddf32490e1f3aaff836699d11d6a8b3a83143b Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 16:42:22 +0100 Subject: [PATCH 106/143] Add file --- starky/src/stark_testing.rs | 100 ++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 starky/src/stark_testing.rs diff --git a/starky/src/stark_testing.rs b/starky/src/stark_testing.rs new file mode 100644 index 00000000..6851ec75 --- /dev/null +++ b/starky/src/stark_testing.rs @@ -0,0 +1,100 @@ +use anyhow::{ensure, Result}; +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; +use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; +use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; +use plonky2::hash::hash_types::RichField; +use plonky2::util::transpose; +use plonky2_util::{log2_ceil, log2_strict}; + +use crate::constraint_consumer::ConstraintConsumer; +use crate::stark::Stark; +use crate::vars::StarkEvaluationVars; + +const WITNESS_SIZE: usize = 1 << 5; + +/// Tests that the constraints imposed by the given STARK are low-degree by applying them to random +/// low-degree witness polynomials. +pub fn test_stark_low_degree, S: Stark, const D: usize>( + stark: S, +) -> Result<()> +where + [(); S::COLUMNS]:, + [(); S::PUBLIC_INPUTS]:, +{ + let rate_bits = log2_ceil(stark.degree() + 1); + + let wire_ldes = random_low_degree_matrix::(S::COLUMNS, rate_bits); + let public_inputs = F::rand_arr::<{ S::PUBLIC_INPUTS }>(); + + let lagrange_first = { + let mut evals = PolynomialValues::new(vec![F::ZERO; WITNESS_SIZE]); + evals.values[0] = F::ONE; + evals.lde(rate_bits) + }; + let lagrange_last = { + let mut evals = PolynomialValues::new(vec![F::ZERO; WITNESS_SIZE]); + evals.values[WITNESS_SIZE - 1] = F::ONE; + evals.lde(rate_bits) + }; + + let z_h_on_coset = ZeroPolyOnCoset::::new(log2_strict(WITNESS_SIZE), rate_bits); + + let last = F::primitive_root_of_unity(log2_strict(WITNESS_SIZE)).inverse(); + let subgroup = F::cyclic_subgroup_known_order( + F::primitive_root_of_unity(log2_strict(WITNESS_SIZE) + rate_bits), + WITNESS_SIZE << rate_bits, + ); + let n = wire_ldes.len(); + let alpha = F::rand(); + let constraint_evals = (0..wire_ldes.len()) + .map(|i| { + let vars = StarkEvaluationVars { + local_values: &wire_ldes[i].clone().try_into().unwrap(), + next_values: &wire_ldes[(i + (1 << rate_bits)) % n] + .clone() + .try_into() + .unwrap(), + public_inputs: &public_inputs, + }; + + let mut consumer = ConstraintConsumer::::new( + vec![alpha], + subgroup[i] - last, + lagrange_first.values[i], + lagrange_last.values[i], + ); + stark.eval_packed_base(vars, &mut consumer); + consumer.accumulators()[0] + }) + .collect::>(); + + let constraint_eval_degree = PolynomialValues::new(constraint_evals).degree(); + let maximum_degree = WITNESS_SIZE * stark.degree() - 1; + + ensure!( + constraint_eval_degree <= maximum_degree, + "Expected degrees at most {} * {} - 1 = {}, actual {:?}", + WITNESS_SIZE, + stark.degree(), + maximum_degree, + constraint_eval_degree + ); + + Ok(()) +} + +fn random_low_degree_matrix(num_polys: usize, rate_bits: usize) -> Vec> { + let polys = (0..num_polys) + .map(|_| random_low_degree_values(rate_bits)) + .collect::>(); + + transpose(&polys) +} + +fn random_low_degree_values(rate_bits: usize) -> Vec { + PolynomialCoeffs::new(F::rand_vec(WITNESS_SIZE)) + .lde(rate_bits) + .fft() + .values +} From 01f065b812499116f09161c80603bc959ccd25d8 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 16:46:33 +0100 Subject: [PATCH 107/143] Minor --- starky/src/stark_testing.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/starky/src/stark_testing.rs b/starky/src/stark_testing.rs index 6851ec75..b8b31cf9 100644 --- a/starky/src/stark_testing.rs +++ b/starky/src/stark_testing.rs @@ -25,6 +25,7 @@ where let rate_bits = log2_ceil(stark.degree() + 1); let wire_ldes = random_low_degree_matrix::(S::COLUMNS, rate_bits); + let size = wire_ldes.len(); let public_inputs = F::rand_arr::<{ S::PUBLIC_INPUTS }>(); let lagrange_first = { @@ -41,17 +42,14 @@ where let z_h_on_coset = ZeroPolyOnCoset::::new(log2_strict(WITNESS_SIZE), rate_bits); let last = F::primitive_root_of_unity(log2_strict(WITNESS_SIZE)).inverse(); - let subgroup = F::cyclic_subgroup_known_order( - F::primitive_root_of_unity(log2_strict(WITNESS_SIZE) + rate_bits), - WITNESS_SIZE << rate_bits, - ); - let n = wire_ldes.len(); + let subgroup = + F::cyclic_subgroup_known_order(F::primitive_root_of_unity(log2_strict(size)), size); let alpha = F::rand(); - let constraint_evals = (0..wire_ldes.len()) + let constraint_evals = (0..size) .map(|i| { let vars = StarkEvaluationVars { local_values: &wire_ldes[i].clone().try_into().unwrap(), - next_values: &wire_ldes[(i + (1 << rate_bits)) % n] + next_values: &wire_ldes[(i + (1 << rate_bits)) % size] .clone() .try_into() .unwrap(), From 0df1545f0c6474d7cbc05b2f548c87ab354080b0 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Fri, 4 Feb 2022 07:59:05 -0800 Subject: [PATCH 108/143] Merkle tree bugfixes + tests (#467) * Merkle tree bugfixes + tests * Minor: Clippy + lints --- plonky2/src/hash/merkle_tree.rs | 59 ++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 5 deletions(-) diff --git a/plonky2/src/hash/merkle_tree.rs b/plonky2/src/hash/merkle_tree.rs index e10b5019..54c62eeb 100644 --- a/plonky2/src/hash/merkle_tree.rs +++ b/plonky2/src/hash/merkle_tree.rs @@ -90,6 +90,20 @@ fn fill_digests_buf>( leaves: &[Vec], cap_height: usize, ) { + // Special case of a tree that's all cap. The usual case will panic because we'll try to split + // an empty slice into chunks of `0`. (We would not need this if there was a way to split into + // `blah` chunks as opposed to chunks _of_ `blah`.) + if digests_buf.is_empty() { + debug_assert_eq!(cap_buf.len(), leaves.len()); + cap_buf + .par_iter_mut() + .zip(leaves) + .for_each(|(cap_buf, leaf)| { + cap_buf.write(H::hash(leaf, false)); + }); + return; + } + let subtree_digests_len = digests_buf.len() >> cap_height; let subtree_leaves_len = leaves.len() >> cap_height; let digests_chunks = digests_buf.par_chunks_exact_mut(subtree_digests_len); @@ -108,6 +122,12 @@ fn fill_digests_buf>( impl> MerkleTree { pub fn new(leaves: Vec>, cap_height: usize) -> Self { + let log2_leaves_len = log2_strict(leaves.len()); + assert!( + cap_height <= log2_leaves_len, + "cap height should be at most log2(leaves.len())" + ); + let num_digests = 2 * (leaves.len() - (1 << cap_height)); let mut digests = Vec::with_capacity(num_digests); @@ -194,16 +214,45 @@ mod tests { const D: usize, >( leaves: Vec>, - n: usize, + cap_height: usize, ) -> Result<()> { - let tree = MerkleTree::::new(leaves.clone(), 1); - for i in 0..n { + let tree = MerkleTree::::new(leaves.clone(), cap_height); + for (i, leaf) in leaves.into_iter().enumerate() { let proof = tree.prove(i); - verify_merkle_proof(leaves[i].clone(), i, &tree.cap, &proof)?; + verify_merkle_proof(leaf, i, &tree.cap, &proof)?; } Ok(()) } + #[test] + #[should_panic] + fn test_cap_height_too_big() { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let log_n = 8; + let cap_height = log_n + 1; // Should panic if `cap_height > len_n`. + + let leaves = random_data::(1 << log_n, 7); + let _ = MerkleTree::>::Hasher>::new(leaves, cap_height); + } + + #[test] + fn test_cap_height_eq_log2_len() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let log_n = 8; + let n = 1 << log_n; + let leaves = random_data::(n, 7); + + verify_all_leaves::(leaves, log_n)?; + + Ok(()) + } + #[test] fn test_merkle_trees() -> Result<()> { const D: usize = 2; @@ -214,7 +263,7 @@ mod tests { let n = 1 << log_n; let leaves = random_data::(n, 7); - verify_all_leaves::(leaves, n)?; + verify_all_leaves::(leaves, 1)?; Ok(()) } From 431bde2c72311d5437e03bd84e613540e66ae40a Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 17:04:07 +0100 Subject: [PATCH 109/143] Fix number of quotient polys --- starky/src/fibonacci_stark.rs | 2 +- starky/src/prover.rs | 6 +++--- starky/src/stark.rs | 6 ++++-- starky/src/stark_testing.rs | 6 +++--- starky/src/verifier.rs | 4 ++-- 5 files changed, 13 insertions(+), 11 deletions(-) diff --git a/starky/src/fibonacci_stark.rs b/starky/src/fibonacci_stark.rs index ce50dfd5..c77775e8 100644 --- a/starky/src/fibonacci_stark.rs +++ b/starky/src/fibonacci_stark.rs @@ -82,7 +82,7 @@ impl, const D: usize> Stark for FibonacciStar todo!() } - fn degree(&self) -> usize { + fn constraint_degree(&self) -> usize { 2 } } diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 2bea24ff..171ea92a 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -82,7 +82,7 @@ where .flat_map(|mut quotient_poly| { quotient_poly.trim(); quotient_poly - .pad(degree << rate_bits) + .pad(degree * (stark.constraint_degree() - 1)) .expect("Quotient has failed, the vanishing polynomial is not divisible by `Z_H"); // Split quotient into degree-n chunks. quotient_poly.chunks(degree) @@ -123,7 +123,7 @@ where timing, "compute openings proof", PolynomialBatch::prove_openings( - &S::fri_instance(zeta, g, rate_bits, config.num_challenges), + &stark.fri_instance(zeta, g, rate_bits, config.num_challenges), initial_merkle_trees, &mut challenger, &fri_params, @@ -162,7 +162,7 @@ where { let degree = 1 << degree_bits; - let max_degree_bits = log2_ceil(stark.degree() - 1); + let max_degree_bits = log2_ceil(stark.constraint_degree() - 1); assert!( max_degree_bits <= rate_bits, "Having constraints of degree higher than the rate is not supported yet." diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 9721c203..8f112939 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -63,11 +63,12 @@ pub trait Stark, const D: usize>: Sync { ); /// The maximum constraint degree. - fn degree(&self) -> usize; + fn constraint_degree(&self) -> usize; /// Computes the FRI instance used to prove this Stark. // TODO: Permutation polynomials. fn fri_instance( + &self, zeta: F::Extension, g: F::Extension, rate_bits: usize, @@ -75,7 +76,8 @@ pub trait Stark, const D: usize>: Sync { ) -> FriInstanceInfo { let no_blinding_oracle = FriOracleInfo { blinding: false }; let trace_info = FriPolynomialInfo::from_range(0, 0..Self::COLUMNS); - let quotient_info = FriPolynomialInfo::from_range(1, 0..(1 << rate_bits) * num_challenges); + let quotient_info = + FriPolynomialInfo::from_range(1, 0..(self.constraint_degree() - 1) * num_challenges); let zeta_batch = FriBatchInfo { point: zeta, polynomials: [trace_info.clone(), quotient_info].concat(), diff --git a/starky/src/stark_testing.rs b/starky/src/stark_testing.rs index b8b31cf9..b28a206e 100644 --- a/starky/src/stark_testing.rs +++ b/starky/src/stark_testing.rs @@ -22,7 +22,7 @@ where [(); S::COLUMNS]:, [(); S::PUBLIC_INPUTS]:, { - let rate_bits = log2_ceil(stark.degree() + 1); + let rate_bits = log2_ceil(stark.constraint_degree() + 1); let wire_ldes = random_low_degree_matrix::(S::COLUMNS, rate_bits); let size = wire_ldes.len(); @@ -68,13 +68,13 @@ where .collect::>(); let constraint_eval_degree = PolynomialValues::new(constraint_evals).degree(); - let maximum_degree = WITNESS_SIZE * stark.degree() - 1; + let maximum_degree = WITNESS_SIZE * stark.constraint_degree() - 1; ensure!( constraint_eval_degree <= maximum_degree, "Expected degrees at most {} * {} - 1 = {}, actual {:?}", WITNESS_SIZE, - stark.degree(), + stark.constraint_degree(), maximum_degree, constraint_eval_degree ); diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index b91fe457..b04ec684 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -98,7 +98,7 @@ where // So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each // `quotient_degree_factor`-sized chunk of the original evaluations. for (i, chunk) in quotient_polys_zeta - .chunks(1 << config.fri_config.rate_bits) + .chunks(stark.constraint_degree() - 1) .enumerate() { ensure!(vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); @@ -108,7 +108,7 @@ where let merkle_caps = &[proof.trace_cap, proof.quotient_polys_cap]; verify_fri_proof::( - &S::fri_instance( + &stark.fri_instance( challenges.stark_zeta, F::primitive_root_of_unity(degree_bits).into(), config.fri_config.rate_bits, From 1c30a5a84e5187834ebc354b40f430ab39d27d00 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 17:16:18 +0100 Subject: [PATCH 110/143] Typo --- system_zero/src/system_zero.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 6b8576c9..9d78939c 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -81,7 +81,7 @@ impl, const D: usize> Stark for SystemZero usize { + fn constraint_degree(&self) -> usize { 3 } } From 9c6b2394f162a83577d35a56fb34cf1b29fec6bd Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 20:20:12 +0100 Subject: [PATCH 111/143] PR feedback --- starky/src/stark_testing.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/starky/src/stark_testing.rs b/starky/src/stark_testing.rs index b28a206e..8d41e645 100644 --- a/starky/src/stark_testing.rs +++ b/starky/src/stark_testing.rs @@ -2,7 +2,6 @@ use anyhow::{ensure, Result}; use plonky2::field::extension_field::Extendable; use plonky2::field::field_types::Field; use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues}; -use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; use plonky2::hash::hash_types::RichField; use plonky2::util::transpose; use plonky2_util::{log2_ceil, log2_strict}; @@ -24,8 +23,8 @@ where { let rate_bits = log2_ceil(stark.constraint_degree() + 1); - let wire_ldes = random_low_degree_matrix::(S::COLUMNS, rate_bits); - let size = wire_ldes.len(); + let trace_ldes = random_low_degree_matrix::(S::COLUMNS, rate_bits); + let size = trace_ldes.len(); let public_inputs = F::rand_arr::<{ S::PUBLIC_INPUTS }>(); let lagrange_first = { @@ -39,8 +38,6 @@ where evals.lde(rate_bits) }; - let z_h_on_coset = ZeroPolyOnCoset::::new(log2_strict(WITNESS_SIZE), rate_bits); - let last = F::primitive_root_of_unity(log2_strict(WITNESS_SIZE)).inverse(); let subgroup = F::cyclic_subgroup_known_order(F::primitive_root_of_unity(log2_strict(size)), size); @@ -48,8 +45,8 @@ where let constraint_evals = (0..size) .map(|i| { let vars = StarkEvaluationVars { - local_values: &wire_ldes[i].clone().try_into().unwrap(), - next_values: &wire_ldes[(i + (1 << rate_bits)) % size] + local_values: &trace_ldes[i].clone().try_into().unwrap(), + next_values: &trace_ldes[(i + (1 << rate_bits)) % size] .clone() .try_into() .unwrap(), From fc502add01dc90e8fd6ce83fba23e111fa8b374b Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 20:24:58 +0100 Subject: [PATCH 112/143] Add `quotient_degree_factor` function --- starky/src/prover.rs | 4 ++-- starky/src/stark.rs | 7 ++++++- starky/src/verifier.rs | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 171ea92a..335d5384 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -82,7 +82,7 @@ where .flat_map(|mut quotient_poly| { quotient_poly.trim(); quotient_poly - .pad(degree * (stark.constraint_degree() - 1)) + .pad(degree * stark.quotient_degree_factor()) .expect("Quotient has failed, the vanishing polynomial is not divisible by `Z_H"); // Split quotient into degree-n chunks. quotient_poly.chunks(degree) @@ -162,7 +162,7 @@ where { let degree = 1 << degree_bits; - let max_degree_bits = log2_ceil(stark.constraint_degree() - 1); + let max_degree_bits = log2_ceil(stark.quotient_degree_factor()); assert!( max_degree_bits <= rate_bits, "Having constraints of degree higher than the rate is not supported yet." diff --git a/starky/src/stark.rs b/starky/src/stark.rs index 8f112939..4b20553e 100644 --- a/starky/src/stark.rs +++ b/starky/src/stark.rs @@ -65,6 +65,11 @@ pub trait Stark, const D: usize>: Sync { /// The maximum constraint degree. fn constraint_degree(&self) -> usize; + /// The maximum constraint degree. + fn quotient_degree_factor(&self) -> usize { + 1.max(self.constraint_degree() - 1) + } + /// Computes the FRI instance used to prove this Stark. // TODO: Permutation polynomials. fn fri_instance( @@ -77,7 +82,7 @@ pub trait Stark, const D: usize>: Sync { let no_blinding_oracle = FriOracleInfo { blinding: false }; let trace_info = FriPolynomialInfo::from_range(0, 0..Self::COLUMNS); let quotient_info = - FriPolynomialInfo::from_range(1, 0..(self.constraint_degree() - 1) * num_challenges); + FriPolynomialInfo::from_range(1, 0..self.quotient_degree_factor() * num_challenges); let zeta_batch = FriBatchInfo { point: zeta, polynomials: [trace_info.clone(), quotient_info].concat(), diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index b04ec684..bb0634f5 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -98,7 +98,7 @@ where // So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each // `quotient_degree_factor`-sized chunk of the original evaluations. for (i, chunk) in quotient_polys_zeta - .chunks(stark.constraint_degree() - 1) + .chunks(stark.quotient_degree_factor()) .enumerate() { ensure!(vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg)); From 511cb863fc7f43da53a0eafa1629735ae02551e6 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 4 Feb 2022 20:42:49 +0100 Subject: [PATCH 113/143] s/max_degree_bits/quotient_degree_bits --- plonky2/src/plonk/prover.rs | 12 ++++++------ starky/src/prover.rs | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 79968b0e..5e23211d 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -333,28 +333,28 @@ fn compute_quotient_polys< alphas: &[F], ) -> Vec> { let num_challenges = common_data.config.num_challenges; - let max_degree_bits = log2_ceil(common_data.quotient_degree_factor); + let quotient_degree_bits = log2_ceil(common_data.quotient_degree_factor); assert!( - max_degree_bits <= common_data.config.fri_config.rate_bits, + quotient_degree_bits <= common_data.config.fri_config.rate_bits, "Having constraints of degree higher than the rate is not supported yet. \ If we need this in the future, we can precompute the larger LDE before computing the `PolynomialBatch`s." ); // We reuse the LDE computed in `PolynomialBatch` and extract every `step` points to get // an LDE matching `max_filtered_constraint_degree`. - let step = 1 << (common_data.config.fri_config.rate_bits - max_degree_bits); + let step = 1 << (common_data.config.fri_config.rate_bits - quotient_degree_bits); // When opening the `Z`s polys at the "next" point in Plonk, need to look at the point `next_step` // steps away since we work on an LDE of degree `max_filtered_constraint_degree`. - let next_step = 1 << max_degree_bits; + let next_step = 1 << quotient_degree_bits; - let points = F::two_adic_subgroup(common_data.degree_bits + max_degree_bits); + let points = F::two_adic_subgroup(common_data.degree_bits + quotient_degree_bits); let lde_size = points.len(); // Retrieve the LDE values at index `i`. let get_at_index = |comm: &'a PolynomialBatch, i: usize| -> &'a [F] { comm.get_lde_values(i * step) }; - let z_h_on_coset = ZeroPolyOnCoset::new(common_data.degree_bits, max_degree_bits); + let z_h_on_coset = ZeroPolyOnCoset::new(common_data.degree_bits, quotient_degree_bits); let points_batches = points.par_chunks(BATCH_SIZE); let quotient_values: Vec> = points_batches diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 335d5384..d8913b9d 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -162,29 +162,29 @@ where { let degree = 1 << degree_bits; - let max_degree_bits = log2_ceil(stark.quotient_degree_factor()); + let quotient_degree_bits = log2_ceil(stark.quotient_degree_factor()); assert!( - max_degree_bits <= rate_bits, + quotient_degree_bits <= rate_bits, "Having constraints of degree higher than the rate is not supported yet." ); - let step = 1 << (rate_bits - max_degree_bits); + let step = 1 << (rate_bits - quotient_degree_bits); // When opening the `Z`s polys at the "next" point, need to look at the point `next_step` steps away. - let next_step = 1 << max_degree_bits; + let next_step = 1 << quotient_degree_bits; // Evaluation of the first Lagrange polynomial on the LDE domain. let lagrange_first = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[0] = F::ONE; - evals.lde_onto_coset(max_degree_bits) + evals.lde_onto_coset(quotient_degree_bits) }; // Evaluation of the last Lagrange polynomial on the LDE domain. let lagrange_last = { let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); evals.values[degree - 1] = F::ONE; - evals.lde_onto_coset(max_degree_bits) + evals.lde_onto_coset(quotient_degree_bits) }; - let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, max_degree_bits); + let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, quotient_degree_bits); // Retrieve the LDE values at index `i`. let get_at_index = |comm: &PolynomialBatch, i: usize| -> [F; S::COLUMNS] { @@ -192,9 +192,9 @@ where }; // Last element of the subgroup. let last = F::primitive_root_of_unity(degree_bits).inverse(); - let size = degree << max_degree_bits; + let size = degree << quotient_degree_bits; let coset = F::cyclic_subgroup_coset_known_order( - F::primitive_root_of_unity(degree_bits + max_degree_bits), + F::primitive_root_of_unity(degree_bits + quotient_degree_bits), F::coset_shift(), size, ); From 659f1337f24549c61906b3cb735bb485b6eecf10 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Fri, 4 Feb 2022 12:41:29 -0800 Subject: [PATCH 114/143] Permit small circuits in `compute_quotient_polys` (#469) * Permit small circuits in `compute_quotient_polys` * PR comments --- plonky2/src/plonk/prover.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 5e23211d..5bc89d25 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -5,7 +5,7 @@ use anyhow::Result; use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::{PolynomialCoeffs, PolynomialValues}; use plonky2_field::zero_poly_coset::ZeroPolyOnCoset; -use plonky2_util::log2_ceil; +use plonky2_util::{ceil_div_usize, log2_ceil}; use rayon::prelude::*; use crate::field::field_types::Field; @@ -357,12 +357,18 @@ fn compute_quotient_polys< let z_h_on_coset = ZeroPolyOnCoset::new(common_data.degree_bits, quotient_degree_bits); let points_batches = points.par_chunks(BATCH_SIZE); + let num_batches = ceil_div_usize(points.len(), BATCH_SIZE); let quotient_values: Vec> = points_batches .enumerate() .map(|(batch_i, xs_batch)| { - assert_eq!(xs_batch.len(), BATCH_SIZE); + // Each batch must be the same size, except the last one, which may be smaller. + debug_assert!( + xs_batch.len() == BATCH_SIZE + || (batch_i == num_batches - 1 && xs_batch.len() <= BATCH_SIZE) + ); + let indices_batch: Vec = - (BATCH_SIZE * batch_i..BATCH_SIZE * (batch_i + 1)).collect(); + (BATCH_SIZE * batch_i..BATCH_SIZE * batch_i + xs_batch.len()).collect(); let mut shifted_xs_batch = Vec::with_capacity(xs_batch.len()); let mut local_zs_batch = Vec::with_capacity(xs_batch.len()); From b6a60e721d1c01d172285b1927b4d06c75a66bdb Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Fri, 4 Feb 2022 13:08:57 -0800 Subject: [PATCH 115/143] Separate methods for hashing with or without padding (#458) * Separate methods for hashing with or without padding This should be a tad better for for performance, and lets us do padding in a generic way, rather than each hash reimplementing it. This also disables padding for public inputs. It seems unnecessary since the number of public inputs is fixed for any given instance. * PR feedback * update --- plonky2/src/fri/prover.rs | 3 +- plonky2/src/fri/recursive_verifier.rs | 2 +- plonky2/src/hash/hashing.rs | 48 ++++++------------------- plonky2/src/hash/keccak.rs | 2 +- plonky2/src/hash/merkle_proofs.rs | 2 +- plonky2/src/hash/merkle_tree.rs | 4 +-- plonky2/src/hash/path_compression.rs | 2 +- plonky2/src/hash/poseidon.rs | 6 ++-- plonky2/src/iop/challenger.rs | 3 +- plonky2/src/plonk/circuit_builder.rs | 4 +-- plonky2/src/plonk/config.rs | 16 ++++++++- plonky2/src/plonk/proof.rs | 4 +-- plonky2/src/plonk/prover.rs | 2 +- plonky2/src/plonk/recursive_verifier.rs | 2 +- 14 files changed, 43 insertions(+), 57 deletions(-) diff --git a/plonky2/src/fri/prover.rs b/plonky2/src/fri/prover.rs index e814beae..5cd5fdf1 100644 --- a/plonky2/src/fri/prover.rs +++ b/plonky2/src/fri/prover.rs @@ -115,14 +115,13 @@ fn fri_proof_of_work, C: GenericConfig, c (0..=F::NEG_ONE.to_canonical_u64()) .into_par_iter() .find_any(|&i| { - C::InnerHasher::hash( + C::InnerHasher::hash_no_pad( ¤t_hash .elements .iter() .copied() .chain(Some(F::from_canonical_u64(i))) .collect_vec(), - false, ) .elements[0] .to_canonical_u64() diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 8dbb2038..65e6e024 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -114,7 +114,7 @@ impl, const D: usize> CircuitBuilder { let mut inputs = challenger.get_hash(self).elements.to_vec(); inputs.push(proof.pow_witness); - let hash = self.hash_n_to_m::(inputs, 1, false)[0]; + let hash = self.hash_n_to_m_no_pad::(inputs, 1)[0]; self.assert_leading_zeros( hash, config.proof_of_work_bits + (64 - F::order().bits()) as u32, diff --git a/plonky2/src/hash/hashing.rs b/plonky2/src/hash/hashing.rs index 997a6b12..ea205654 100644 --- a/plonky2/src/hash/hashing.rs +++ b/plonky2/src/hash/hashing.rs @@ -18,7 +18,7 @@ pub fn hash_or_noop>(inputs: Vec) -> Ha if inputs.len() <= 4 { HashOut::from_partial(inputs) } else { - hash_n_to_hash::(&inputs, false) + hash_n_to_hash_no_pad::(&inputs) } } @@ -28,34 +28,23 @@ impl, const D: usize> CircuitBuilder { if inputs.len() <= 4 { HashOutTarget::from_partial(inputs, zero) } else { - self.hash_n_to_hash::(inputs, false) + self.hash_n_to_hash_no_pad::(inputs) } } - pub fn hash_n_to_hash>( + pub fn hash_n_to_hash_no_pad>( &mut self, inputs: Vec, - pad: bool, ) -> HashOutTarget { - HashOutTarget::from_vec(self.hash_n_to_m::(inputs, 4, pad)) + HashOutTarget::from_vec(self.hash_n_to_m_no_pad::(inputs, 4)) } - pub fn hash_n_to_m>( + pub fn hash_n_to_m_no_pad>( &mut self, - mut inputs: Vec, + inputs: Vec, num_outputs: usize, - pad: bool, ) -> Vec { let zero = self.zero(); - let one = self.one(); - - if pad { - inputs.push(zero); - while (inputs.len() + 1) % SPONGE_WIDTH != 0 { - inputs.push(one); - } - inputs.push(zero); - } let mut state = [zero; SPONGE_WIDTH]; @@ -97,24 +86,12 @@ pub trait PlonkyPermutation { fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH]; } -/// If `pad` is enabled, the message is padded using the pad10*1 rule. In general this is required -/// for the hash to be secure, but it can safely be disabled in certain cases, like if the input -/// length is fixed. -pub fn hash_n_to_m>( +/// Hash a message without any padding step. Note that this can enable length-extension attacks. +/// However, it is still collision-resistant in cases where the input has a fixed length. +pub fn hash_n_to_m_no_pad>( inputs: &[F], num_outputs: usize, - pad: bool, ) -> Vec { - if pad { - let mut padded_inputs = inputs.to_vec(); - padded_inputs.push(F::ZERO); - while (padded_inputs.len() + 1) % SPONGE_WIDTH != 0 { - padded_inputs.push(F::ONE); - } - padded_inputs.push(F::ZERO); - return hash_n_to_m::(&padded_inputs, num_outputs, false); - } - let mut state = [F::ZERO; SPONGE_WIDTH]; // Absorb all input chunks. @@ -136,9 +113,6 @@ pub fn hash_n_to_m>( } } -pub fn hash_n_to_hash>( - inputs: &[F], - pad: bool, -) -> HashOut { - HashOut::from_vec(hash_n_to_m::(inputs, 4, pad)) +pub fn hash_n_to_hash_no_pad>(inputs: &[F]) -> HashOut { + HashOut::from_vec(hash_n_to_m_no_pad::(inputs, 4)) } diff --git a/plonky2/src/hash/keccak.rs b/plonky2/src/hash/keccak.rs index a537f5e3..9a061d82 100644 --- a/plonky2/src/hash/keccak.rs +++ b/plonky2/src/hash/keccak.rs @@ -56,7 +56,7 @@ impl Hasher for KeccakHash { type Hash = BytesHash; type Permutation = KeccakPermutation; - fn hash(input: &[F], _pad: bool) -> Self::Hash { + fn hash_no_pad(input: &[F]) -> Self::Hash { let mut buffer = Buffer::new(Vec::new()); buffer.write_field_vec(input).unwrap(); let mut arr = [0; N]; diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index c2f3655d..f90f0657 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -32,7 +32,7 @@ pub(crate) fn verify_merkle_proof>( proof: &MerkleProof, ) -> Result<()> { let mut index = leaf_index; - let mut current_digest = H::hash(&leaf_data, false); + let mut current_digest = H::hash_no_pad(&leaf_data); for &sibling_digest in proof.siblings.iter() { let bit = index & 1; index >>= 1; diff --git a/plonky2/src/hash/merkle_tree.rs b/plonky2/src/hash/merkle_tree.rs index 54c62eeb..e9460c14 100644 --- a/plonky2/src/hash/merkle_tree.rs +++ b/plonky2/src/hash/merkle_tree.rs @@ -63,7 +63,7 @@ fn fill_subtree>( ) -> H::Hash { assert_eq!(leaves.len(), digests_buf.len() / 2 + 1); if digests_buf.is_empty() { - H::hash(&leaves[0], false) + H::hash_no_pad(&leaves[0]) } else { // Layout is: left recursive output || left child digest // || right child digest || right recursive output. @@ -99,7 +99,7 @@ fn fill_digests_buf>( .par_iter_mut() .zip(leaves) .for_each(|(cap_buf, leaf)| { - cap_buf.write(H::hash(leaf, false)); + cap_buf.write(H::hash_no_pad(leaf)); }); return; } diff --git a/plonky2/src/hash/path_compression.rs b/plonky2/src/hash/path_compression.rs index c5c3f36e..56c355fd 100644 --- a/plonky2/src/hash/path_compression.rs +++ b/plonky2/src/hash/path_compression.rs @@ -66,7 +66,7 @@ pub(crate) fn decompress_merkle_proofs>( for (&i, v) in leaves_indices.iter().zip(leaves_data) { // Observe the leaves. - seen.insert(i + num_leaves, H::hash(v, false)); + seen.insert(i + num_leaves, H::hash_no_pad(v)); } // Iterators over the siblings. diff --git a/plonky2/src/hash/poseidon.rs b/plonky2/src/hash/poseidon.rs index d2b47932..08c2851a 100644 --- a/plonky2/src/hash/poseidon.rs +++ b/plonky2/src/hash/poseidon.rs @@ -9,7 +9,7 @@ use crate::gates::gate::Gate; use crate::gates::poseidon::PoseidonGate; use crate::gates::poseidon_mds::PoseidonMdsGate; use crate::hash::hash_types::{HashOut, RichField}; -use crate::hash::hashing::{compress, hash_n_to_hash, PlonkyPermutation, SPONGE_WIDTH}; +use crate::hash::hashing::{compress, hash_n_to_hash_no_pad, PlonkyPermutation, SPONGE_WIDTH}; use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; @@ -633,8 +633,8 @@ impl Hasher for PoseidonHash { type Hash = HashOut; type Permutation = PoseidonPermutation; - fn hash(input: &[F], pad: bool) -> Self::Hash { - hash_n_to_hash::(input, pad) + fn hash_no_pad(input: &[F]) -> Self::Hash { + hash_n_to_hash_no_pad::(input) } fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash { diff --git a/plonky2/src/iop/challenger.rs b/plonky2/src/iop/challenger.rs index b1a4c12b..c85f30d1 100644 --- a/plonky2/src/iop/challenger.rs +++ b/plonky2/src/iop/challenger.rs @@ -165,7 +165,7 @@ impl> Challenger { self.observe_extension_elements(&final_poly.coeffs); - let fri_pow_response = C::InnerHasher::hash( + let fri_pow_response = C::InnerHasher::hash_no_pad( &self .get_hash() .elements @@ -173,7 +173,6 @@ impl> Challenger { .copied() .chain(Some(pow_witness)) .collect::>(), - false, ) .elements[0]; diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 67538d00..cf89bf1a 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -621,7 +621,7 @@ impl, const D: usize> CircuitBuilder { // those hash wires match the claimed public inputs. let num_public_inputs = self.public_inputs.len(); let public_inputs_hash = - self.hash_n_to_hash::(self.public_inputs.clone(), true); + self.hash_n_to_hash_no_pad::(self.public_inputs.clone()); let pi_gate = self.add_gate(PublicInputGate, vec![]); for (&hash_part, wire) in public_inputs_hash .elements @@ -749,7 +749,7 @@ impl, const D: usize> CircuitBuilder { constants_sigmas_cap.flatten(), vec![/* Add other circuit data here */], ]; - let circuit_digest = C::Hasher::hash(&circuit_digest_parts.concat(), false); + let circuit_digest = C::Hasher::hash_no_pad(&circuit_digest_parts.concat()); let common = CommonCircuitData { config: self.config, diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index 281d0025..fdca7037 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -31,7 +31,21 @@ pub trait Hasher: Sized + Clone + Debug + Eq + PartialEq { /// Permutation used in the sponge construction. type Permutation: PlonkyPermutation; - fn hash(input: &[F], pad: bool) -> Self::Hash; + /// Hash a message without any padding step. Note that this can enable length-extension attacks. + /// However, it is still collision-resistant in cases where the input has a fixed length. + fn hash_no_pad(input: &[F]) -> Self::Hash; + + /// Pad the message using the `pad10*1` rule, then hash it. + fn hash_pad(input: &[F]) -> Self::Hash { + let mut padded_input = input.to_vec(); + padded_input.push(F::ONE); + while (padded_input.len() + 1) % SPONGE_WIDTH != 0 { + padded_input.push(F::ZERO); + } + padded_input.push(F::ONE); + Self::hash_no_pad(&padded_input) + } + fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash; } diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 803e64d4..2cf0a885 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -91,7 +91,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn get_public_inputs_hash( &self, ) -> <>::InnerHasher as Hasher>::Hash { - C::InnerHasher::hash(&self.public_inputs, true) + C::InnerHasher::hash_no_pad(&self.public_inputs) } pub fn to_bytes(&self) -> anyhow::Result> { @@ -207,7 +207,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn get_public_inputs_hash( &self, ) -> <>::InnerHasher as Hasher>::Hash { - C::InnerHasher::hash(&self.public_inputs, true) + C::InnerHasher::hash_no_pad(&self.public_inputs) } pub fn to_bytes(&self) -> anyhow::Result> { diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 5bc89d25..7a172aff 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -44,7 +44,7 @@ pub(crate) fn prove, C: GenericConfig, co ); let public_inputs = partition_witness.get_targets(&prover_data.public_inputs); - let public_inputs_hash = C::InnerHasher::hash(&public_inputs, true); + let public_inputs_hash = C::InnerHasher::hash_no_pad(&public_inputs); if cfg!(debug_assertions) { // Display the marked targets for debugging purposes. diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index 5d898d0d..cb2bc1e0 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -27,7 +27,7 @@ impl, const D: usize> CircuitBuilder { } = proof_with_pis; assert_eq!(public_inputs.len(), inner_common_data.num_public_inputs); - let public_inputs_hash = self.hash_n_to_hash::(public_inputs, true); + let public_inputs_hash = self.hash_n_to_hash_no_pad::(public_inputs); self.verify_proof( proof, From 83a572717e6a78c2c81ec93c5ddf8d3267288307 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Fri, 4 Feb 2022 16:50:57 -0800 Subject: [PATCH 116/143] Implement Poseidon in system_zero/permutation_unit (#459) * Implement Poseidon in system_zero/permutation_unit * Minor cleanup * Daniel PR comments * Update dependencies --- starky/src/constraint_consumer.rs | 4 +- system_zero/Cargo.toml | 2 + system_zero/src/column_layout.rs | 41 +++- system_zero/src/permutation_unit.rs | 309 +++++++++++++++++++++++++--- system_zero/src/system_zero.rs | 8 +- 5 files changed, 321 insertions(+), 43 deletions(-) diff --git a/starky/src/constraint_consumer.rs b/starky/src/constraint_consumer.rs index b7c9f399..c909b520 100644 --- a/starky/src/constraint_consumer.rs +++ b/starky/src/constraint_consumer.rs @@ -12,7 +12,9 @@ pub struct ConstraintConsumer { alphas: Vec, /// Running sums of constraints that have been emitted so far, scaled by powers of alpha. - constraint_accs: Vec

, + // TODO(JN): This is pub so it can be used in a test. Once we have an API for accessing this + // result, it should be made private. + pub constraint_accs: Vec

, /// The evaluation of `X - g^(n-1)`. z_last: P, diff --git a/system_zero/Cargo.toml b/system_zero/Cargo.toml index b908dea0..e5b617c9 100644 --- a/system_zero/Cargo.toml +++ b/system_zero/Cargo.toml @@ -10,3 +10,5 @@ starky = { path = "../starky" } anyhow = "1.0.40" env_logger = "0.9.0" log = "0.4.14" +rand = "0.8.4" +rand_chacha = "0.3.1" diff --git a/system_zero/src/column_layout.rs b/system_zero/src/column_layout.rs index 3d8fc2c0..7a9e92e5 100644 --- a/system_zero/src/column_layout.rs +++ b/system_zero/src/column_layout.rs @@ -24,35 +24,56 @@ pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1; const START_PERMUTATION_UNIT: usize = COL_STACK_PTR + 1; -pub(crate) const fn col_permutation_full_first(round: usize, i: usize) -> usize { +const START_PERMUTATION_FULL_FIRST: usize = START_PERMUTATION_UNIT + SPONGE_WIDTH; + +pub(crate) const fn col_permutation_full_first_mid_sbox(round: usize, i: usize) -> usize { debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_UNIT + round * SPONGE_WIDTH + i + START_PERMUTATION_FULL_FIRST + 2 * round * SPONGE_WIDTH + i +} + +pub(crate) const fn col_permutation_full_first_after_mds(round: usize, i: usize) -> usize { + debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_PERMUTATION_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i } const START_PERMUTATION_PARTIAL: usize = - col_permutation_full_first(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; + col_permutation_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; -pub(crate) const fn col_permutation_partial(round: usize) -> usize { +pub(crate) const fn col_permutation_partial_mid_sbox(round: usize) -> usize { debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); - START_PERMUTATION_PARTIAL + round + START_PERMUTATION_PARTIAL + 2 * round } -const START_PERMUTATION_FULL_SECOND: usize = COL_STACK_PTR + 1; +pub(crate) const fn col_permutation_partial_after_sbox(round: usize) -> usize { + debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); + START_PERMUTATION_PARTIAL + 2 * round + 1 +} -pub(crate) const fn col_permutation_full_second(round: usize, i: usize) -> usize { +const START_PERMUTATION_FULL_SECOND: usize = + col_permutation_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1; + +pub(crate) const fn col_permutation_full_second_mid_sbox(round: usize, i: usize) -> usize { debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_FULL_SECOND + round * SPONGE_WIDTH + i + START_PERMUTATION_FULL_SECOND + 2 * round * SPONGE_WIDTH + i +} + +pub(crate) const fn col_permutation_full_second_after_mds(round: usize, i: usize) -> usize { + debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_PERMUTATION_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i } pub(crate) const fn col_permutation_input(i: usize) -> usize { - col_permutation_full_first(0, i) + debug_assert!(i < SPONGE_WIDTH); + START_PERMUTATION_UNIT + i } pub(crate) const fn col_permutation_output(i: usize) -> usize { debug_assert!(i < SPONGE_WIDTH); - col_permutation_full_second(poseidon::HALF_N_FULL_ROUNDS, i) + col_permutation_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i) } const END_PERMUTATION_UNIT: usize = col_permutation_output(SPONGE_WIDTH - 1); diff --git a/system_zero/src/permutation_unit.rs b/system_zero/src/permutation_unit.rs index 43883fca..7f12b9ce 100644 --- a/system_zero/src/permutation_unit.rs +++ b/system_zero/src/permutation_unit.rs @@ -2,36 +2,120 @@ use plonky2::field::extension_field::{Extendable, FieldExtension}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::hash::hashing::SPONGE_WIDTH; +use plonky2::hash::poseidon::{HALF_N_FULL_ROUNDS, N_PARTIAL_ROUNDS}; use plonky2::plonk::circuit_builder::CircuitBuilder; use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use starky::vars::StarkEvaluationTargets; use starky::vars::StarkEvaluationVars; -use crate::column_layout::{col_permutation_input, col_permutation_output, NUM_COLUMNS}; +use crate::column_layout::{ + col_permutation_full_first_after_mds as col_full_1st_after_mds, + col_permutation_full_first_mid_sbox as col_full_1st_mid_sbox, + col_permutation_full_second_after_mds as col_full_2nd_after_mds, + col_permutation_full_second_mid_sbox as col_full_2nd_mid_sbox, + col_permutation_input as col_input, + col_permutation_partial_after_sbox as col_partial_after_sbox, + col_permutation_partial_mid_sbox as col_partial_mid_sbox, NUM_COLUMNS, +}; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::system_zero::SystemZero; +fn constant_layer( + mut state: [P; SPONGE_WIDTH], + round: usize, +) -> [P; SPONGE_WIDTH] +where + F: RichField, + FE: FieldExtension, + P: PackedField, +{ + // One day I might actually vectorize this, but today is not that day. + for i in 0..P::WIDTH { + let mut unpacked_state = [P::Scalar::default(); SPONGE_WIDTH]; + for j in 0..SPONGE_WIDTH { + unpacked_state[j] = state[j].as_slice()[i]; + } + F::constant_layer_field(&mut unpacked_state, round); + for j in 0..SPONGE_WIDTH { + state[j].as_slice_mut()[i] = unpacked_state[j]; + } + } + state +} + +fn mds_layer(mut state: [P; SPONGE_WIDTH]) -> [P; SPONGE_WIDTH] +where + F: RichField, + FE: FieldExtension, + P: PackedField, +{ + for i in 0..P::WIDTH { + let mut unpacked_state = [P::Scalar::default(); SPONGE_WIDTH]; + for j in 0..SPONGE_WIDTH { + unpacked_state[j] = state[j].as_slice()[i]; + } + unpacked_state = F::mds_layer_field(&unpacked_state); + for j in 0..SPONGE_WIDTH { + state[j].as_slice_mut()[i] = unpacked_state[j]; + } + } + state +} + impl, const D: usize> SystemZero { - pub(crate) fn generate_permutation_unit(&self, values: &mut [F; NUM_COLUMNS]) { + pub(crate) fn generate_permutation_unit(values: &mut [F; NUM_COLUMNS]) { // Load inputs. let mut state = [F::ZERO; SPONGE_WIDTH]; for i in 0..SPONGE_WIDTH { - state[i] = values[col_permutation_input(i)]; + state[i] = values[col_input(i)]; } - // TODO: First full rounds. - // TODO: Partial rounds. - // TODO: Second full rounds. + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer(&mut state, r); - // Write outputs. - for i in 0..SPONGE_WIDTH { - values[col_permutation_output(i)] = state[i]; + for i in 0..SPONGE_WIDTH { + let state_cubed = state[i].cube(); + values[col_full_1st_mid_sbox(r, i)] = state_cubed; + state[i] *= state_cubed.square(); // Form state ** 7. + } + + state = F::mds_layer(&state); + + for i in 0..SPONGE_WIDTH { + values[col_full_1st_after_mds(r, i)] = state[i]; + } + } + + for r in 0..N_PARTIAL_ROUNDS { + F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + r); + + let state0_cubed = state[0].cube(); + values[col_partial_mid_sbox(r)] = state0_cubed; + state[0] *= state0_cubed.square(); // Form state ** 7. + values[col_partial_after_sbox(r)] = state[0]; + + state = F::mds_layer(&state); + } + + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r); + + for i in 0..SPONGE_WIDTH { + let state_cubed = state[i].cube(); + values[col_full_2nd_mid_sbox(r, i)] = state_cubed; + state[i] *= state_cubed.square(); // Form state ** 7. + } + + state = F::mds_layer(&state); + + for i in 0..SPONGE_WIDTH { + values[col_full_2nd_after_mds(r, i)] = state[i]; + } } } #[inline] pub(crate) fn eval_permutation_unit( - &self, vars: StarkEvaluationVars, yield_constr: &mut ConstraintConsumer

, ) where @@ -43,22 +127,64 @@ impl, const D: usize> SystemZero { // Load inputs. let mut state = [P::ZEROS; SPONGE_WIDTH]; for i in 0..SPONGE_WIDTH { - state[i] = local_values[col_permutation_input(i)]; + state[i] = local_values[col_input(i)]; } - // TODO: First full rounds. - // TODO: Partial rounds. - // TODO: Second full rounds. + for r in 0..HALF_N_FULL_ROUNDS { + state = constant_layer(state, r); - // Assert that the computed output matches the outputs in the trace. - for i in 0..SPONGE_WIDTH { - let out = local_values[col_permutation_output(i)]; - yield_constr.constraint(state[i] - out); + for i in 0..SPONGE_WIDTH { + let state_cubed = state[i] * state[i].square(); + yield_constr + .constraint_wrapping(state_cubed - local_values[col_full_1st_mid_sbox(r, i)]); + let state_cubed = local_values[col_full_1st_mid_sbox(r, i)]; + state[i] *= state_cubed.square(); // Form state ** 7. + } + + state = mds_layer(state); + + for i in 0..SPONGE_WIDTH { + yield_constr + .constraint_wrapping(state[i] - local_values[col_full_1st_after_mds(r, i)]); + state[i] = local_values[col_full_1st_after_mds(r, i)]; + } + } + + for r in 0..N_PARTIAL_ROUNDS { + state = constant_layer(state, HALF_N_FULL_ROUNDS + r); + + let state0_cubed = state[0] * state[0].square(); + yield_constr.constraint_wrapping(state0_cubed - local_values[col_partial_mid_sbox(r)]); + let state0_cubed = local_values[col_partial_mid_sbox(r)]; + state[0] *= state0_cubed.square(); // Form state ** 7. + yield_constr.constraint_wrapping(state[0] - local_values[col_partial_after_sbox(r)]); + state[0] = local_values[col_partial_after_sbox(r)]; + + state = mds_layer(state); + } + + for r in 0..HALF_N_FULL_ROUNDS { + state = constant_layer(state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r); + + for i in 0..SPONGE_WIDTH { + let state_cubed = state[i] * state[i].square(); + yield_constr + .constraint_wrapping(state_cubed - local_values[col_full_2nd_mid_sbox(r, i)]); + let state_cubed = local_values[col_full_2nd_mid_sbox(r, i)]; + state[i] *= state_cubed.square(); // Form state ** 7. + } + + state = mds_layer(state); + + for i in 0..SPONGE_WIDTH { + yield_constr + .constraint_wrapping(state[i] - local_values[col_full_2nd_after_mds(r, i)]); + state[i] = local_values[col_full_2nd_after_mds(r, i)]; + } } } pub(crate) fn eval_permutation_unit_recursively( - &self, builder: &mut CircuitBuilder, vars: StarkEvaluationTargets, yield_constr: &mut RecursiveConstraintConsumer, @@ -69,18 +195,145 @@ impl, const D: usize> SystemZero { // Load inputs. let mut state = [zero; SPONGE_WIDTH]; for i in 0..SPONGE_WIDTH { - state[i] = local_values[col_permutation_input(i)]; + state[i] = local_values[col_input(i)]; } - // TODO: First full rounds. - // TODO: Partial rounds. - // TODO: Second full rounds. + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer_recursive(builder, &mut state, r); - // Assert that the computed output matches the outputs in the trace. - for i in 0..SPONGE_WIDTH { - let out = local_values[col_permutation_output(i)]; - let diff = builder.sub_extension(state[i], out); - yield_constr.constraint(builder, diff); + for i in 0..SPONGE_WIDTH { + let state_cubed = builder.cube_extension(state[i]); + let diff = + builder.sub_extension(state_cubed, local_values[col_full_1st_mid_sbox(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + let state_cubed = local_values[col_full_1st_mid_sbox(r, i)]; + state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); + // Form state ** 7. + } + + state = F::mds_layer_recursive(builder, &state); + + for i in 0..SPONGE_WIDTH { + let diff = + builder.sub_extension(state[i], local_values[col_full_1st_after_mds(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + state[i] = local_values[col_full_1st_after_mds(r, i)]; + } + } + + for r in 0..N_PARTIAL_ROUNDS { + F::constant_layer_recursive(builder, &mut state, HALF_N_FULL_ROUNDS + r); + + let state0_cubed = builder.cube_extension(state[0]); + let diff = builder.sub_extension(state0_cubed, local_values[col_partial_mid_sbox(r)]); + yield_constr.constraint_wrapping(builder, diff); + let state0_cubed = local_values[col_partial_mid_sbox(r)]; + state[0] = builder.mul_many_extension(&[state[0], state0_cubed, state0_cubed]); // Form state ** 7. + let diff = builder.sub_extension(state[0], local_values[col_partial_after_sbox(r)]); + yield_constr.constraint_wrapping(builder, diff); + state[0] = local_values[col_partial_after_sbox(r)]; + + state = F::mds_layer_recursive(builder, &state); + } + + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer_recursive( + builder, + &mut state, + HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r, + ); + + for i in 0..SPONGE_WIDTH { + let state_cubed = builder.cube_extension(state[i]); + let diff = + builder.sub_extension(state_cubed, local_values[col_full_2nd_mid_sbox(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + let state_cubed = local_values[col_full_2nd_mid_sbox(r, i)]; + state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); + // Form state ** 7. + } + + state = F::mds_layer_recursive(builder, &state); + + for i in 0..SPONGE_WIDTH { + let diff = + builder.sub_extension(state[i], local_values[col_full_2nd_after_mds(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + state[i] = local_values[col_full_2nd_after_mds(r, i)]; + } } } } + +#[cfg(test)] +mod tests { + use plonky2::field::field_types::Field; + use plonky2::field::goldilocks_field::GoldilocksField; + use plonky2::hash::poseidon::Poseidon; + use rand::SeedableRng; + use rand_chacha::ChaCha8Rng; + use starky::constraint_consumer::ConstraintConsumer; + use starky::vars::StarkEvaluationVars; + + use crate::column_layout::{ + col_permutation_input as col_input, col_permutation_output as col_output, NUM_COLUMNS, + }; + use crate::permutation_unit::SPONGE_WIDTH; + use crate::public_input_layout::NUM_PUBLIC_INPUTS; + use crate::system_zero::SystemZero; + + #[test] + fn generate_eval_consistency() { + const D: usize = 1; + type F = GoldilocksField; + + let mut values = [F::default(); NUM_COLUMNS]; + SystemZero::::generate_permutation_unit(&mut values); + + let vars = StarkEvaluationVars { + local_values: &values, + next_values: &[F::default(); NUM_COLUMNS], + public_inputs: &[F::default(); NUM_PUBLIC_INPUTS], + }; + + let mut constrant_consumer = ConstraintConsumer::new( + vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)], + GoldilocksField::ONE, + GoldilocksField::ONE, + GoldilocksField::ONE, + ); + SystemZero::::eval_permutation_unit(vars, &mut constrant_consumer); + for &acc in &constrant_consumer.constraint_accs { + assert_eq!(acc, GoldilocksField::ZERO); + } + } + + #[test] + fn poseidon_result() { + const D: usize = 1; + type F = GoldilocksField; + + let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25); + let state = [F::default(); SPONGE_WIDTH].map(|_| F::rand_from_rng(&mut rng)); + + // Get true Poseidon hash + let target = GoldilocksField::poseidon(state); + + // Get result from `generate_permutation_unit` + // Initialize `values` with randomness to test that the code doesn't rely on zero-filling. + let mut values = [F::default(); NUM_COLUMNS].map(|_| F::rand_from_rng(&mut rng)); + for i in 0..SPONGE_WIDTH { + values[col_input(i)] = state[i]; + } + SystemZero::::generate_permutation_unit(&mut values); + let mut result = [F::default(); SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + result[i] = values[col_output(i)]; + } + + assert_eq!(target, result); + } + + // TODO(JN): test degree + // TODO(JN): test `eval_permutation_unit_recursively` +} diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 9d78939c..70d3bbca 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -27,14 +27,14 @@ impl, const D: usize> SystemZero { let mut row = [F::ZERO; NUM_COLUMNS]; self.generate_first_row_core_registers(&mut row); - self.generate_permutation_unit(&mut row); + Self::generate_permutation_unit(&mut row); let mut trace = Vec::with_capacity(MIN_TRACE_ROWS); loop { let mut next_row = [F::ZERO; NUM_COLUMNS]; self.generate_next_row_core_registers(&row, &mut next_row); - self.generate_permutation_unit(&mut next_row); + Self::generate_permutation_unit(&mut next_row); trace.push(row); row = next_row; @@ -66,7 +66,7 @@ impl, const D: usize> Stark for SystemZero, { self.eval_core_registers(vars, yield_constr); - self.eval_permutation_unit(vars, yield_constr); + Self::eval_permutation_unit(vars, yield_constr); todo!() } @@ -77,7 +77,7 @@ impl, const D: usize> Stark for SystemZero, ) { self.eval_core_registers_recursively(builder, vars, yield_constr); - self.eval_permutation_unit_recursively(builder, vars, yield_constr); + Self::eval_permutation_unit_recursively(builder, vars, yield_constr); todo!() } From a51c517b5fb6cd64dd421598fcda7c2aaae455a5 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 6 Feb 2022 12:07:35 -0800 Subject: [PATCH 117/143] Update doc --- plonky2.pdf | Bin 215152 -> 215153 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/plonky2.pdf b/plonky2.pdf index 7aba5eb70423d2fd9e24206dcef989a107c38c98..299d1724e96e442aaff59c0502759b287e8698fc 100644 GIT binary patch delta 4524 zcmV;d5mWB)kPY#W4Ui=QGccEdN&zX6Pz!%D5OFDry7etZo1zzVx2b^w+xCH$7@LSh zdMP@A_v<^up%W*YY>NQla7Yf%%sCvHXWNWtKVFpZpJ!!Oc0DU)mx;1xqHVKbzW8*( zTHf+LOGEK|is$04I-2##!C5abI1W$oi_--FlN_0JbP!Tw?WHRQj;-)ZN6jsFiIXH6q&1Sp!G)t$(iO0rrs1n|S;?A=j( zix?piY-vgXktFMR%Hev|cDY)@s;IcgZ&qclhl5|t{Yo^sChwKs9%gOP6Bcqkn6M-3 z!>{ju`-MzpM>4K&e!?r?U00BTWR-txL@c6g7_T6es%Ek;6ULQjb2lBg&cb21;-mJC zto5n`IBJgYF)utTAc?AfK>)ZLxJE*fl1bT|a_dB{H~ygf5hJ#Fb4if}7vdRlrxD63 zvJynX`^dX!S=+Z+AtmD}>eXfmUJc=`^X2*ZqO5>E2mn+G(NTmyS6zj*m5_hlsF%T; z&8iUH$(2|GsC5b4SoDhA%=<}47P>sBV4S7h?v}>EANRp+z1r{0Ku6Sxw+?wr8emm} zWvW;$Qxy}JeI4(@QP2CGS`2dCH@QpRTapP_{!t?lG`a>i?_&~fF$j*Wil*ZE`*c=O zDA+XvxxmC|0uq0uO#@`!KgG9AFAVuU~rulJK0^jCHRF^hz}6@l%jv>fN<4GSh%p5&+&yo z=cs#r_U^H{#m)rBPLR-6v9ldt-gkbTpcc`l76Efvy~g{UncOBugBOyB9r0U;faoP@ zJI0rTY`G*3Xx^aJk(>(_8M_)`LhabJGyyGkI3jcG*=K{kkR|Kun!b>2!y1lXIWW@e zDPR7=_!p3kPzSS71l|JyHJ5=(0VsdPSxa-|wh_MTSIliWl^vJ>L4r(G4v7=lPU7`O zt12J7K2RJ;gNQ@U@*ze3_31`8Ipm0=mA9O@R3!jSJR03!zcAtIZWXS+xsdQbT*;M8 zGa+@lQZf_Dm{qaA_;evmXu@p8&1OEtb$(XtyYWB>MW%3bY>xYHh66T}6aIg4eeu<$ zj#f$wlN!0YzJ12W^>+1B@Ykz9UtX_&zW)0~$ZY@Z`X$c)1%JcL)Akh%#VcuqlCegH zGU%?7REbn;;zq7FQmQ!ku3ASj=$d-Hfp*&$ovSP4@~g`TPLLvtV})CUa7H3j%IkmJ zxzetIq=IheJ5k`aHYZYjOYMIF_f_dQF6#XOdM2n_=MVDKZQj|Iwz$cQHO#-KQCH7S zP{I4J-_w`Ub+*a7zTpo)v0zas%^geU>YEFH)2YmX@bg<_Ar9~;6YQLQaMc}7!aGVW zv`*-bhb9Mc6nVMc=uicXE&8VQTefu;ztw;ofickL#dm#TjIE}dFg(!TTo_b0#{MX_pmhTTfDCC z7EeWqlE*%(2wPj0?f|oSM&A&3JnIEHt7meCTWXaJP{HNh&hbW%xNFXXoEde)-J1?b z&{@3X9=2<{y0W0hNF{%P>u?)|H?u_XcY(Idn>$P2zJGU3wAqbtj@tv4wkKPSs&^fV z{%M`cKzL@rs^U1CbM$?UIMgyjj9B8lgf%Jv7PVHvw|QC9gck%DJrf`;+1hvnZd!rd%g=#aYxP||;*gku%hD!(ZSHQEGw zykAdvCQ1unmjOV#q45Mm`UFETMytklWDp0SyDj1+Gr@=crnR4XmLq-kaH*Sn-lBB( zWHVEE#Kil%8b3X(;}}gblF>^t*9$U_B-nKC$}M$!#$-i9ROOqN`gu|GB#kmBV?`>d z>Tb|~Bx4XicvgQ)p9>4686*iXUORitJll@FfTmcAfw&)1kRc*B1aZgTG^pohY=<<`R-Md{59 zcTNM|!UB+VkeB@+a-fmQTb^-*F$}hKWk>#~1(&j*#}<4`4(6ibY~TXn4`}@Wn82tO zr+gEff^x^Us+m9`5h=jf5I`oI(ZI*u>Fhz^9z|fYd@KA^xTfe`=YwN}D_UN|*zf?l zZ1Hm_lN~mIQg4+Y*WFHU)tpRFa}EJ1-3+9Z0{-KrsTo5Tr>KyD0bHBaH71cYYmQZ<{Xc(r_Ydm607SEpBjFMPl~)@CE09if61WF9^ox%&sJ0{-yaYtI0zzoL zr#`cZT}Y0~+D8jNs=4(#gARpyieR>R!;5aH%xkhx+n9gkOV;F{KJfG2WID6^PSfp_@Ju^~gHq!W( zHJSJaxJjr3iMdduf@>XwbO9VNfN*PZ%(^K3#Ih;?+Be z;a!c~gN5pwjE_RhCdovoqB98^x+7}`@58Cd(zcY8ZTk{LUN2o5yo!H+0DSeW9bdPv z9Mt3G6LB-SBKYOEEM}O54pe_wiDXeU_Z(xSio@eW zC-gCA)ESg2DaCZ8QE}xHWPUem79*9$4024<7PH$#Wa)T!|AH4E-xBz3uknv`8sEPk zp&GGEXQi2llJv!+pevIxDpLORoMn;3@?@JC4vAr(^F}mlQK?XGLajHMg{fV1CP_7r z#^|La_2iBsOXfy(;p-TbzdeIP2zkz+c@=|Z#-KV=HAHI4AiwWELSnSD%_0i46me$0 zKz(Cje98snbmGVRU66C`39kFfuYQGB7qVGB7qWHMjOb1GfT0 zL`F11FhVgyHZwvnG&nRjL`5?X3zqC!xVgB#ioX)!24i_o@7e@j6MtpM#g zX&ER-OQLO&R)9*h60}XyDo~A9inc*o18UJ~&|=a$P>)uN_KdUvG@{j^Jta+pCbTZJ zb<$?gg4T_;M%oJ6(1y@fN!vjO+A!J^(oWEYHiGt;v>WuGO`$y^?FD^k^JouA`@sO( z8rll!Ajp6rkhKuXm}crOf9WvLgH$;SCcy$1#=tn30Fz(}Xze{P4Q9Z7Fbn3uJW!Pf zU=b{VWuRBnJL_Zgm1@(dg}f`-Ox;-JT}Fx2Y)wJq(cCmpOXiTah>BF$2rdFBx=e}T>+9XvW|>VbyW2-2Vl zG=mn<3fe4uu2$uC3qQUo>acKQiaO28w-j}mtt~0)HoM+e)MNJRMp3Ut75^0VS+w$3 zQNKl3X+;ATeHu|TXwmoAiZT}cUQ;w=iEyE4*b<$86zP0f0?XhK904^)zX|;p^dmR} z+C6Gxi#4q0nFi$9G;nTI10~p8P9&WDB+i9Wma}QD`l67vS*@gvthpYbirEQ z@;*z$>iHDU#a(qY>$8KiUSMz>9^!R&)wS;0-agpB?Wv+*#U(8)0np~6j%I}V|w&)29x$aHallAeJ z55N9Qrm`a$*Ec`nmG7=ANI|knHh&@(Q8tWMkV;iE*_R1pA==zcr=7EK7_RtJdq>u~ z=m3tIBYeyY&k9JQ>R%8L-MpA%;5o-zEUSi!IM6t=s>vCRb+!)BNX`TViV^-Ox(W*{A%DkFFM~IT zvOBvHbpW+4fgp=sk(>E2=}1MF2NjI7wA*cI9Q^4J+|H}R-VAg^oyqHvyd(rxH4gWZZQas6|1J=`G<5?Q7G6o zyU56B0@8D$3JD+F7u;LdBg{GnGRr5F+w1R*M~_B`YS}n8S!s_Fg0tjwHf_g@Mlz!oVT8xeo^n_duHTIvqD;Ej9>4(p zsl1U%@LLzi@Aapn@oBdr-vmO|s{ujm6+S)f4dgMoHi zLAG2H2Q+Wc>PXH7i;P{3FrjvATAF|sJDiX?_UyAkpUIN-bxohiwqXs&PaPQP^^`CF z&G-WM^iaDBWwTWT+yj%aSSx=^bK}MlzUx=;wqRumV;;Z&smdX7BHKy4-e_6ngVzTL z9ElTw02dD_@~=<#bOQt_kSlLFaj8m}X$)ri@%3vME$$c5;+rc0|D%Oih%^g@P8U*S zp)_Vu?5;jtg(fmlw&2I=c#7ZoRk0t&10f1CiJRl&u>Wc}U^O}6FYAA+udcOPNFAEg zh{gKu1t06p;-}!RH-ElfFMnSD{VHO%|F*uw`M=sVXP3yVVkqWkXOY~bWk+n?EiMoSltb>2o4UvE8u)Hj7b#KGvDgh~O zXvar)m-9g(K?!a3>?pE)g@L^`B0qFv<` zmj>T@$M-<9EGG(+CY*K-tbn71{`~5 zApoC`3u}x{=!Gd17VJq-r@0N0$Zu0n7(yi}`m*!dKvLeeRMjDX9Azc^hQmpE6#vsn z{HJA5bYQZus5`E0_#I%PZz^V>Cd&2AcjrUHg}@%qRed^3FZ!thoNcSJ$X@EG>LZ2_IjO zAbFXZW@}U^U+cd0z9!X4v49lN;0hxiPe&(0=xR+R&(RB-c3S}Bwl4Xzv0lT8qy|Bo zg$Xkg`JP9aB#c$I(4m6=o;NAx8VP?GCMsU6CJS%G*B{8rVERxPeH>mUaTy@ea^3;< z5S6|v!P2dw%55SBB_I}q6I;#8I>6wTYs0$P_{wEcIqvAN9vYMLKJ+766WtVjXly2i zSe#Paf_G3JQEh~ishSb-6E&yYD8X}1!&0W9HmTQe%3YkC)vye8l#qsb;XQx1Sgm|t$EFj4hkV)$ujhRgx zLXA|`K56(_%};ML7*NKiL}r^e{Lw9Sn_UN0j-^f#y)cH>b}$w$%S(u0GtM+N5IF&3 zG#t`K_vfN#EPzKWa_`_6dMAH%GEnaFwi~v?T!&$I1aJp|hQM~(y#tpj3p2&7x8&CS z+7Ua@%0e2kukPFjML^25RYAdP!vru(Wm{j~8tlt+B2&3)0-kbEM9QEQe{LeC=gP>0 zMx=1QKlhr}#L)!OCR7Mi5t@v-+s{d$LxWo%R{64;`WRMNHeIh&n8<(e;Mpy+P-Mxl z;!mi9d8ZR;C`~r?xkR%d@+7M1C_VDIk5Lqz34lh6;q19X<1mfY^h_nsY^3o@YqIbU zAW5VHfhCcW!J7JQFxdgUXl1@mppYv?D6^B$MyRb@y3~k+JG#bzPGQft9UFRq0SoDP zZ_#(&!~X~lmnljt_zHip31|@khi@VxKOJp>R&*7Sb0Q^rb?Z7Jd1fI*WBx|htoURM z1@l#0U(m(SA)xz`at0`nv(hO33{Z?Q!-{)%ba&9WAz)BL!#5c)x4vp}haT1kiQ#&U z!h?k-oSctR%qq#kNUAe+8ipflhv>r<%G|b8n{E0MR9?>s4N-r^-vho{YRBUCm4mjt zd?s!tB0^k#%W8(!y?Gp4M?)3t@E}akaRavy4tdlgf*R0anepFYG7Ya_UWF zsH8krulE(YB&dHTS1KiS+X5^iMfwF6$&%mCBJt5a5ezXp8~vG}8$;;?F)j;}RR5kp zm=S^wop1(w&1)?SwKPXUd=60x6lCEGJCXOrre>G@aL_IM+S zzxjxhGyh*oj^Z%!-47}`o+#PZC+B2`1fCu0tp8=O;Y5Fhok*tCapV{?RU94eJ7J79 zqt2jJNhzlz&F2idI_2xLW;Ro4tRTlSZMM2i!YmzS_ZNBb;Yxw8_!@s#r|}y63AzzG zcUGE(DoHOM1w)aHS&{0emn?@#EKasL!XYv2YhFdGW{nE{CiHrfRan}oGexR_GDgo8 zsV6dqS#m|(7ru@`@!K;vq>z^knpZJsju@0@x&|eu4DvPalW+Z-Srljy#+ms7^No%1 zDHl+W!za+Vtia*oXej7Ue+d3BE_<4tgAEdg4H5x|4H5#k4H5&qFD@}LGB7wWFfcYV zI4v+ZConK4DGD!5Z)8MabY&nYL^?7sGBGhSFgP$UFg7zdxA#E5QSg$3@~gn?EAj&4zef+;s7d(wkWG0>l_U{M-2^3JjYDU z%?wSfgxnuM=wDHDb-tT?r|NdU>h7xhUWX9EzbJ$#SQva4h;|Bb`YDlt2<)OA5G6no z9HZ?M6@g-Kg0@Fg0#e`hB`#bCQ(zj*fLWlmH^Cg32e-fiSOm9$s@wrNunbm!UQO?; zkI`4EO_LUqdD%?eSm#|PiPUUOLF3WfG*C@i7LcztEuRVh@hh%S=T5dlCN8keNTd3*KgPw8oB2Y_z&cHdT9Lt7q?TWmG_P?xfm0z&% zepe~`!!f@nEo3jH6>|$cbs4?P!rZVflyiZey4;SR9izL9E+@*O84D|~wL)%}m77`1 z=5#FTvM9!M852u+tA%((%W$X5op{m*SX%`2YV2Rfa4y4PGtYt{3kP3RV2t~;8%u#U zw%U$=r5sDSzN0Yn{Ipi!Fvna@CmnM+=5jRxb2;YnCF<$(v6N#K$5)Q89A7z>a!w1+ zS$MUr@@zV1hdQovoZ~phIei@aIOBH4?M&K_!3j78jMmS=1#h4egagWVEWCZ8XXF8= zp5qsnH*%I3`>L5&g9-~DCzWPE8q|PVPzQ8>4(Z_0Nz(*0ycW<3+CV$#0G*)A!sl95 z?zV9GO;L}9-#Wk3y=GNgiu%kpa*F!R-s~wFF#GXK(V#`ue-sT_wE9!gutm>X6pdK) zX+qJcMc-d48nftXUD3ED!nvXeOY~kT()qFsR=^=R0&0$a6Z$XcM{owTd(`4WJ1q_* zmRSBkshIyl%(jXXAPMkfj3;CM264&~xwXQI)j0eI;9{e~mq Date: Sun, 6 Feb 2022 23:06:04 -0800 Subject: [PATCH 118/143] Add a `PolynomialValues::selector` method for convenience (#470) Also adds a test for `eval_l_1_and_l_last`. --- field/src/polynomial/mod.rs | 11 +++++++++++ starky/src/prover.rs | 13 +++---------- starky/src/stark_testing.rs | 12 ++---------- starky/src/verifier.rs | 24 ++++++++++++++++++++++++ 4 files changed, 40 insertions(+), 20 deletions(-) diff --git a/field/src/polynomial/mod.rs b/field/src/polynomial/mod.rs index 7fccb46e..4d6d55c2 100644 --- a/field/src/polynomial/mod.rs +++ b/field/src/polynomial/mod.rs @@ -26,6 +26,17 @@ impl PolynomialValues { PolynomialValues { values } } + pub fn zero(len: usize) -> Self { + Self::new(vec![F::ZERO; len]) + } + + /// Returns the polynomial whole value is one at the given index, and zero elsewhere. + pub fn selector(len: usize, index: usize) -> Self { + let mut result = Self::zero(len); + result.values[index] = F::ONE; + result + } + /// The number of values stored. pub fn len(&self) -> usize { self.values.len() diff --git a/starky/src/prover.rs b/starky/src/prover.rs index d8913b9d..35c30cbf 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -172,17 +172,10 @@ where let next_step = 1 << quotient_degree_bits; // Evaluation of the first Lagrange polynomial on the LDE domain. - let lagrange_first = { - let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); - evals.values[0] = F::ONE; - evals.lde_onto_coset(quotient_degree_bits) - }; + let lagrange_first = PolynomialValues::selector(degree, 0).lde_onto_coset(quotient_degree_bits); // Evaluation of the last Lagrange polynomial on the LDE domain. - let lagrange_last = { - let mut evals = PolynomialValues::new(vec![F::ZERO; degree]); - evals.values[degree - 1] = F::ONE; - evals.lde_onto_coset(quotient_degree_bits) - }; + let lagrange_last = + PolynomialValues::selector(degree, degree - 1).lde_onto_coset(quotient_degree_bits); let z_h_on_coset = ZeroPolyOnCoset::::new(degree_bits, quotient_degree_bits); diff --git a/starky/src/stark_testing.rs b/starky/src/stark_testing.rs index 8d41e645..222ebf39 100644 --- a/starky/src/stark_testing.rs +++ b/starky/src/stark_testing.rs @@ -27,16 +27,8 @@ where let size = trace_ldes.len(); let public_inputs = F::rand_arr::<{ S::PUBLIC_INPUTS }>(); - let lagrange_first = { - let mut evals = PolynomialValues::new(vec![F::ZERO; WITNESS_SIZE]); - evals.values[0] = F::ONE; - evals.lde(rate_bits) - }; - let lagrange_last = { - let mut evals = PolynomialValues::new(vec![F::ZERO; WITNESS_SIZE]); - evals.values[WITNESS_SIZE - 1] = F::ONE; - evals.lde(rate_bits) - }; + let lagrange_first = PolynomialValues::selector(WITNESS_SIZE, 0).lde(rate_bits); + let lagrange_last = PolynomialValues::selector(WITNESS_SIZE, WITNESS_SIZE - 1).lde(rate_bits); let last = F::primitive_root_of_unity(log2_strict(WITNESS_SIZE)).inverse(); let subgroup = diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index bb0634f5..91a51bed 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -148,3 +148,27 @@ fn recover_degree, C: GenericConfig, cons let lde_bits = config.fri_config.cap_height + initial_merkle_proof.siblings.len(); 1 << (lde_bits - config.fri_config.rate_bits) } + +#[cfg(test)] +mod tests { + use plonky2::field::field_types::Field; + use plonky2::field::goldilocks_field::GoldilocksField; + use plonky2::field::polynomial::PolynomialValues; + + use crate::verifier::eval_l_1_and_l_last; + + #[test] + fn test_eval_l_1_and_l_last() { + type F = GoldilocksField; + let log_n = 5; + let n = 1 << log_n; + + let x = F::rand(); // challenge point + let expected_l_first_x = PolynomialValues::selector(n, 0).ifft().eval(x); + let expected_l_last_x = PolynomialValues::selector(n, n - 1).ifft().eval(x); + + let (l_first_x, l_last_x) = eval_l_1_and_l_last(log_n, x); + assert_eq!(l_first_x, expected_l_first_x); + assert_eq!(l_last_x, expected_l_last_x); + } +} From a43e138f57c7f01d16ebcf77ce4befa2c73a7447 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 6 Feb 2022 23:13:12 -0800 Subject: [PATCH 119/143] Move some FRI stuff into the FRI module (#471) I think it would be nice to eventually have separate crates for IOP, FRI, PLONK, etc. This is one step toward that. --- plonky2/src/fri/challenges.rs | 81 +++++++++++++++++++++++++++++++++ plonky2/src/fri/mod.rs | 2 + plonky2/src/fri/witness_util.rs | 71 +++++++++++++++++++++++++++++ plonky2/src/iop/challenger.rs | 71 +---------------------------- plonky2/src/iop/witness.rs | 66 +-------------------------- 5 files changed, 157 insertions(+), 134 deletions(-) create mode 100644 plonky2/src/fri/challenges.rs create mode 100644 plonky2/src/fri/witness_util.rs diff --git a/plonky2/src/fri/challenges.rs b/plonky2/src/fri/challenges.rs new file mode 100644 index 00000000..ad1cfa5c --- /dev/null +++ b/plonky2/src/fri/challenges.rs @@ -0,0 +1,81 @@ +use plonky2_field::extension_field::Extendable; +use plonky2_field::polynomial::PolynomialCoeffs; + +use crate::fri::proof::FriChallenges; +use crate::fri::structure::{FriOpenings, FriOpeningsTarget}; +use crate::fri::FriConfig; +use crate::hash::hash_types::RichField; +use crate::hash::merkle_tree::MerkleCap; +use crate::iop::challenger::{Challenger, RecursiveChallenger}; +use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher}; + +impl> Challenger { + pub fn observe_openings(&mut self, openings: &FriOpenings) + where + F: RichField + Extendable, + { + for v in &openings.batches { + self.observe_extension_elements(&v.values); + } + } + + pub fn fri_challenges, const D: usize>( + &mut self, + commit_phase_merkle_caps: &[MerkleCap], + final_poly: &PolynomialCoeffs, + pow_witness: F, + degree_bits: usize, + config: &FriConfig, + ) -> FriChallenges + where + F: RichField + Extendable, + { + let num_fri_queries = config.num_query_rounds; + let lde_size = 1 << (degree_bits + config.rate_bits); + // Scaling factor to combine polynomials. + let fri_alpha = self.get_extension_challenge::(); + + // Recover the random betas used in the FRI reductions. + let fri_betas = commit_phase_merkle_caps + .iter() + .map(|cap| { + self.observe_cap(cap); + self.get_extension_challenge::() + }) + .collect(); + + self.observe_extension_elements(&final_poly.coeffs); + + let fri_pow_response = C::InnerHasher::hash_no_pad( + &self + .get_hash() + .elements + .iter() + .copied() + .chain(Some(pow_witness)) + .collect::>(), + ) + .elements[0]; + + let fri_query_indices = (0..num_fri_queries) + .map(|_| self.get_challenge().to_canonical_u64() as usize % lde_size) + .collect(); + + FriChallenges { + fri_alpha, + fri_betas, + fri_pow_response, + fri_query_indices, + } + } +} + +impl, H: AlgebraicHasher, const D: usize> + RecursiveChallenger +{ + pub fn observe_openings(&mut self, openings: &FriOpeningsTarget) { + for v in &openings.batches { + self.observe_extension_elements(&v.values); + } + } +} diff --git a/plonky2/src/fri/mod.rs b/plonky2/src/fri/mod.rs index 573a189b..a0cd428b 100644 --- a/plonky2/src/fri/mod.rs +++ b/plonky2/src/fri/mod.rs @@ -1,5 +1,6 @@ use crate::fri::reduction_strategies::FriReductionStrategy; +mod challenges; pub mod oracle; pub mod proof; pub mod prover; @@ -7,6 +8,7 @@ pub mod recursive_verifier; pub mod reduction_strategies; pub mod structure; pub mod verifier; +pub mod witness_util; #[derive(Debug, Clone, Eq, PartialEq)] pub struct FriConfig { diff --git a/plonky2/src/fri/witness_util.rs b/plonky2/src/fri/witness_util.rs new file mode 100644 index 00000000..70aebd03 --- /dev/null +++ b/plonky2/src/fri/witness_util.rs @@ -0,0 +1,71 @@ +use itertools::Itertools; +use plonky2_field::extension_field::Extendable; + +use crate::fri::proof::{FriProof, FriProofTarget}; +use crate::hash::hash_types::RichField; +use crate::iop::witness::Witness; +use crate::plonk::config::AlgebraicHasher; + +/// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`. +pub fn set_fri_proof_target( + witness: &mut W, + fri_proof: &FriProof, + fri_proof_target: &FriProofTarget, +) where + F: RichField + Extendable, + W: Witness + ?Sized, + H: AlgebraicHasher, +{ + witness.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness); + + for (&t, &x) in fri_proof_target + .final_poly + .0 + .iter() + .zip_eq(&fri_proof.final_poly.coeffs) + { + witness.set_extension_target(t, x); + } + + for (t, x) in fri_proof_target + .commit_phase_merkle_caps + .iter() + .zip_eq(&fri_proof.commit_phase_merkle_caps) + { + witness.set_cap_target(t, x); + } + + for (qt, q) in fri_proof_target + .query_round_proofs + .iter() + .zip_eq(&fri_proof.query_round_proofs) + { + for (at, a) in qt + .initial_trees_proof + .evals_proofs + .iter() + .zip_eq(&q.initial_trees_proof.evals_proofs) + { + for (&t, &x) in at.0.iter().zip_eq(&a.0) { + witness.set_target(t, x); + } + for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) { + witness.set_hash_target(t, x); + } + } + + for (st, s) in qt.steps.iter().zip_eq(&q.steps) { + for (&t, &x) in st.evals.iter().zip_eq(&s.evals) { + witness.set_extension_target(t, x); + } + for (&t, &x) in st + .merkle_proof + .siblings + .iter() + .zip_eq(&s.merkle_proof.siblings) + { + witness.set_hash_target(t, x); + } + } + } +} diff --git a/plonky2/src/iop/challenger.rs b/plonky2/src/iop/challenger.rs index c85f30d1..1519f6ec 100644 --- a/plonky2/src/iop/challenger.rs +++ b/plonky2/src/iop/challenger.rs @@ -2,11 +2,7 @@ use std::convert::TryInto; use std::marker::PhantomData; use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::polynomial::PolynomialCoeffs; -use crate::fri::proof::FriChallenges; -use crate::fri::structure::{FriOpenings, FriOpeningsTarget}; -use crate::fri::FriConfig; use crate::hash::hash_types::RichField; use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget}; use crate::hash::hashing::{PlonkyPermutation, SPONGE_RATE, SPONGE_WIDTH}; @@ -14,7 +10,7 @@ use crate::hash::merkle_tree::MerkleCap; use crate::iop::ext_target::ExtensionTarget; use crate::iop::target::Target; use crate::plonk::circuit_builder::CircuitBuilder; -use crate::plonk::config::{AlgebraicHasher, GenericConfig, GenericHashOut, Hasher}; +use crate::plonk::config::{AlgebraicHasher, GenericHashOut, Hasher}; /// Observes prover messages, and generates challenges by hashing the transcript, a la Fiat-Shamir. #[derive(Clone)] @@ -72,15 +68,6 @@ impl> Challenger { } } - pub fn observe_openings(&mut self, openings: &FriOpenings) - where - F: RichField + Extendable, - { - for v in &openings.batches { - self.observe_extension_elements(&v.values); - } - } - pub fn observe_hash>(&mut self, hash: OH::Hash) { self.observe_elements(&hash.to_vec()) } @@ -138,56 +125,6 @@ impl> Challenger { .collect() } - pub fn fri_challenges, const D: usize>( - &mut self, - commit_phase_merkle_caps: &[MerkleCap], - final_poly: &PolynomialCoeffs, - pow_witness: F, - degree_bits: usize, - config: &FriConfig, - ) -> FriChallenges - where - F: RichField + Extendable, - { - let num_fri_queries = config.num_query_rounds; - let lde_size = 1 << (degree_bits + config.rate_bits); - // Scaling factor to combine polynomials. - let fri_alpha = self.get_extension_challenge::(); - - // Recover the random betas used in the FRI reductions. - let fri_betas = commit_phase_merkle_caps - .iter() - .map(|cap| { - self.observe_cap(cap); - self.get_extension_challenge::() - }) - .collect(); - - self.observe_extension_elements(&final_poly.coeffs); - - let fri_pow_response = C::InnerHasher::hash_no_pad( - &self - .get_hash() - .elements - .iter() - .copied() - .chain(Some(pow_witness)) - .collect::>(), - ) - .elements[0]; - - let fri_query_indices = (0..num_fri_queries) - .map(|_| self.get_challenge().to_canonical_u64() as usize % lde_size) - .collect(); - - FriChallenges { - fri_alpha, - fri_betas, - fri_pow_response, - fri_query_indices, - } - } - /// Absorb any buffered inputs. After calling this, the input buffer will be empty. fn absorb_buffered_inputs(&mut self) { if self.input_buffer.is_empty() { @@ -251,12 +188,6 @@ impl, H: AlgebraicHasher, const D: usize> } } - pub fn observe_openings(&mut self, openings: &FriOpeningsTarget) { - for v in &openings.batches { - self.observe_extension_elements(&v.values); - } - } - pub fn observe_hash(&mut self, hash: &HashOutTarget) { self.observe_elements(&hash.elements) } diff --git a/plonky2/src/iop/witness.rs b/plonky2/src/iop/witness.rs index efe4d911..4efd35d3 100644 --- a/plonky2/src/iop/witness.rs +++ b/plonky2/src/iop/witness.rs @@ -5,7 +5,7 @@ use num::{BigUint, FromPrimitive, Zero}; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::field_types::Field; -use crate::fri::proof::{FriProof, FriProofTarget}; +use crate::fri::witness_util::set_fri_proof_target; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; use crate::gadgets::nonnative::NonNativeTarget; @@ -258,69 +258,7 @@ pub trait Witness { self.set_extension_target(t, x); } - self.set_fri_proof_target(&proof.opening_proof, &proof_target.opening_proof); - } - - /// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`. - fn set_fri_proof_target, const D: usize>( - &mut self, - fri_proof: &FriProof, - fri_proof_target: &FriProofTarget, - ) where - F: RichField + Extendable, - { - self.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness); - - for (&t, &x) in fri_proof_target - .final_poly - .0 - .iter() - .zip_eq(&fri_proof.final_poly.coeffs) - { - self.set_extension_target(t, x); - } - - for (t, x) in fri_proof_target - .commit_phase_merkle_caps - .iter() - .zip_eq(&fri_proof.commit_phase_merkle_caps) - { - self.set_cap_target(t, x); - } - - for (qt, q) in fri_proof_target - .query_round_proofs - .iter() - .zip_eq(&fri_proof.query_round_proofs) - { - for (at, a) in qt - .initial_trees_proof - .evals_proofs - .iter() - .zip_eq(&q.initial_trees_proof.evals_proofs) - { - for (&t, &x) in at.0.iter().zip_eq(&a.0) { - self.set_target(t, x); - } - for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) { - self.set_hash_target(t, x); - } - } - - for (st, s) in qt.steps.iter().zip_eq(&q.steps) { - for (&t, &x) in st.evals.iter().zip_eq(&s.evals) { - self.set_extension_target(t, x); - } - for (&t, &x) in st - .merkle_proof - .siblings - .iter() - .zip_eq(&s.merkle_proof.siblings) - { - self.set_hash_target(t, x); - } - } - } + set_fri_proof_target(self, &proof.opening_proof, &proof_target.opening_proof); } fn set_wire(&mut self, wire: Wire, value: F) { From 0a96d33f784ae6d3cf51c955fd469fbb348e600a Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 08:33:13 +0100 Subject: [PATCH 120/143] Standardize `set_*` method parameters order --- plonky2/src/fri/witness_util.rs | 2 +- plonky2/src/iop/witness.rs | 8 ++++---- plonky2/src/plonk/recursive_verifier.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plonky2/src/fri/witness_util.rs b/plonky2/src/fri/witness_util.rs index 70aebd03..741f839d 100644 --- a/plonky2/src/fri/witness_util.rs +++ b/plonky2/src/fri/witness_util.rs @@ -9,8 +9,8 @@ use crate::plonk::config::AlgebraicHasher; /// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`. pub fn set_fri_proof_target( witness: &mut W, - fri_proof: &FriProof, fri_proof_target: &FriProofTarget, + fri_proof: &FriProof, ) where F: RichField + Extendable, W: Witness + ?Sized, diff --git a/plonky2/src/iop/witness.rs b/plonky2/src/iop/witness.rs index 4efd35d3..43dc752d 100644 --- a/plonky2/src/iop/witness.rs +++ b/plonky2/src/iop/witness.rs @@ -162,8 +162,8 @@ pub trait Witness { /// `ProofWithPublicInputs`. fn set_proof_with_pis_target, const D: usize>( &mut self, - proof_with_pis: &ProofWithPublicInputs, proof_with_pis_target: &ProofWithPublicInputsTarget, + proof_with_pis: &ProofWithPublicInputs, ) where F: RichField + Extendable, C::Hasher: AlgebraicHasher, @@ -182,14 +182,14 @@ pub trait Witness { self.set_target(pi_t, pi); } - self.set_proof_target(proof, pt); + self.set_proof_target(pt, proof); } /// Set the targets in a `ProofTarget` to their corresponding values in a `Proof`. fn set_proof_target, const D: usize>( &mut self, - proof: &Proof, proof_target: &ProofTarget, + proof: &Proof, ) where F: RichField + Extendable, C::Hasher: AlgebraicHasher, @@ -258,7 +258,7 @@ pub trait Witness { self.set_extension_target(t, x); } - set_fri_proof_target(self, &proof.opening_proof, &proof_target.opening_proof); + set_fri_proof_target(self, &proof_target.opening_proof, &proof.opening_proof); } fn set_wire(&mut self, wire: Wire, value: F) { diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index cb2bc1e0..8795ef41 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -382,7 +382,7 @@ mod tests { let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); let pt = builder.add_virtual_proof_with_pis(&inner_cd); - pw.set_proof_with_pis_target(&inner_proof, &pt); + pw.set_proof_with_pis_target(&pt, &inner_proof); let inner_data = VerifierCircuitTarget { constants_sigmas_cap: builder.add_virtual_cap(inner_cd.config.fri_config.cap_height), From b40827e65540cafb7be30f85ab367e0e6b01534f Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 6 Feb 2022 23:35:46 -0800 Subject: [PATCH 121/143] `trim_to_len` helper function (#472) * trim_to_len helper function Seems a little nicer IMO to only remove a certain number of zeros, vs removing all trailing zeros then re-adding some. * PR feedback --- field/src/polynomial/mod.rs | 13 +++++++++++-- plonky2/src/plonk/prover.rs | 7 +++---- starky/src/prover.rs | 5 ++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/field/src/polynomial/mod.rs b/field/src/polynomial/mod.rs index 4d6d55c2..ac3beb8e 100644 --- a/field/src/polynomial/mod.rs +++ b/field/src/polynomial/mod.rs @@ -197,12 +197,21 @@ impl PolynomialCoeffs { poly } - /// Removes leading zero coefficients. + /// Removes any leading zero coefficients. pub fn trim(&mut self) { self.coeffs.truncate(self.degree_plus_one()); } - /// Removes leading zero coefficients. + /// Removes some leading zero coefficients, such that a desired length is reached. Fails if a + /// nonzero coefficient is encountered before then. + pub fn trim_to_len(&mut self, len: usize) -> Result<()> { + ensure!(self.len() >= len); + ensure!(self.coeffs[len..].iter().all(F::is_zero)); + self.coeffs.truncate(len); + Ok(()) + } + + /// Removes any leading zero coefficients. pub fn trimmed(&self) -> Self { let coeffs = self.coeffs[..self.degree_plus_one()].to_vec(); Self { coeffs } diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 7a172aff..d49014f0 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -148,11 +148,10 @@ pub(crate) fn prove, C: GenericConfig, co quotient_polys .into_par_iter() .flat_map(|mut quotient_poly| { - quotient_poly.trim(); - quotient_poly.pad(quotient_degree).expect( - "Quotient has failed, the vanishing polynomial is not divisible by `Z_H", + quotient_poly.trim_to_len(quotient_degree).expect( + "Quotient has failed, the vanishing polynomial is not divisible by Z_H", ); - // Split t into degree-n chunks. + // Split quotient into degree-n chunks. quotient_poly.chunks(degree) }) .collect() diff --git a/starky/src/prover.rs b/starky/src/prover.rs index 35c30cbf..de97ecce 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -80,10 +80,9 @@ where let all_quotient_chunks = quotient_polys .into_par_iter() .flat_map(|mut quotient_poly| { - quotient_poly.trim(); quotient_poly - .pad(degree * stark.quotient_degree_factor()) - .expect("Quotient has failed, the vanishing polynomial is not divisible by `Z_H"); + .trim_to_len(degree * stark.quotient_degree_factor()) + .expect("Quotient has failed, the vanishing polynomial is not divisible by Z_H"); // Split quotient into degree-n chunks. quotient_poly.chunks(degree) }) From afe89a61f4f74f108046c5990fbbd4f903d5d178 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 11:23:49 +0100 Subject: [PATCH 122/143] Add methods --- plonky2/src/fri/challenges.rs | 54 ++++++++++++++++++++++- plonky2/src/fri/proof.rs | 7 +++ plonky2/src/plonk/get_challenges.rs | 68 ++++++++++++++++++++++++++--- plonky2/src/plonk/proof.rs | 12 ++++- plonky2/src/plonk/verifier.rs | 6 +-- 5 files changed, 136 insertions(+), 11 deletions(-) diff --git a/plonky2/src/fri/challenges.rs b/plonky2/src/fri/challenges.rs index ad1cfa5c..82438383 100644 --- a/plonky2/src/fri/challenges.rs +++ b/plonky2/src/fri/challenges.rs @@ -1,12 +1,16 @@ use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::PolynomialCoeffs; -use crate::fri::proof::FriChallenges; +use crate::fri::proof::{FriChallenges, FriChallengesTarget}; use crate::fri::structure::{FriOpenings, FriOpeningsTarget}; use crate::fri::FriConfig; -use crate::hash::hash_types::RichField; +use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; +use crate::hash::hash_types::{MerkleCapTarget, RichField}; use crate::hash::merkle_tree::MerkleCap; use crate::iop::challenger::{Challenger, RecursiveChallenger}; +use crate::iop::target::Target; +use crate::plonk::circuit_builder::CircuitBuilder; +use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher}; impl> Challenger { @@ -78,4 +82,50 @@ impl, H: AlgebraicHasher, const D: usize> self.observe_extension_elements(&v.values); } } + + pub fn fri_challenges>( + &mut self, + builder: &mut CircuitBuilder, + commit_phase_merkle_caps: &[MerkleCapTarget], + final_poly: &PolynomialCoeffsExtTarget, + pow_witness: Target, + inner_common_data: &CommonCircuitData, + ) -> FriChallengesTarget { + let num_fri_queries = inner_common_data.config.fri_config.num_query_rounds; + // Scaling factor to combine polynomials. + let fri_alpha = self.get_extension_challenge(builder); + + // Recover the random betas used in the FRI reductions. + let fri_betas = commit_phase_merkle_caps + .iter() + .map(|cap| { + self.observe_cap(cap); + self.get_extension_challenge(builder) + }) + .collect(); + + self.observe_extension_elements(&final_poly.0); + + let pow_inputs = self + .get_hash(builder) + .elements + .iter() + .copied() + .chain(Some(pow_witness)) + .collect(); + let fri_pow_response = builder + .hash_n_to_hash_no_pad::(pow_inputs) + .elements[0]; + + let fri_query_indices = (0..num_fri_queries) + .map(|_| self.get_challenge(builder)) + .collect(); + + FriChallengesTarget { + fri_alpha, + fri_betas, + fri_pow_response, + fri_query_indices, + } + } } diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index 1f9e6b16..44f74cba 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -375,3 +375,10 @@ pub struct FriChallenges, const D: usize> { // Indices at which the oracle is queried in FRI. pub fri_query_indices: Vec, } + +pub struct FriChallengesTarget { + pub fri_alpha: ExtensionTarget, + pub fri_betas: Vec>, + pub fri_pow_response: Target, + pub fri_query_indices: Vec, +} diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index fb1517e4..59701a55 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -5,14 +5,17 @@ use plonky2_field::polynomial::PolynomialCoeffs; use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof}; use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedOpenings}; -use crate::hash::hash_types::RichField; +use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; +use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField}; use crate::hash::merkle_tree::MerkleCap; -use crate::iop::challenger::Challenger; +use crate::iop::challenger::{Challenger, RecursiveChallenger}; +use crate::iop::target::Target; +use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CommonCircuitData; -use crate::plonk::config::{GenericConfig, Hasher}; +use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher}; use crate::plonk::proof::{ - CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet, Proof, - ProofChallenges, ProofWithPublicInputs, + CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet, + OpeningSetTarget, Proof, ProofChallenges, ProofChallengesTarget, ProofWithPublicInputs, }; use crate::util::reverse_bits; @@ -219,3 +222,58 @@ impl, C: GenericConfig, const D: usize> FriInferredElements(fri_inferred_elements) } } + +impl, const D: usize> CircuitBuilder { + pub(crate) fn get_challenges>( + &mut self, + public_inputs_hash: HashOutTarget, + wires_cap: &MerkleCapTarget, + plonk_zs_partial_products_cap: &MerkleCapTarget, + quotient_polys_cap: &MerkleCapTarget, + openings: &OpeningSetTarget, + commit_phase_merkle_caps: &[MerkleCapTarget], + final_poly: &PolynomialCoeffsExtTarget, + pow_witness: Target, + inner_common_data: &CommonCircuitData, + ) -> ProofChallengesTarget + where + C::Hasher: AlgebraicHasher, + { + let config = &inner_common_data.config; + let num_challenges = config.num_challenges; + + let mut challenger = RecursiveChallenger::::new(self); + + // Observe the instance. + let digest = + HashOutTarget::from_vec(self.constants(&inner_common_data.circuit_digest.elements)); + challenger.observe_hash(&digest); + challenger.observe_hash(&public_inputs_hash); + + challenger.observe_cap(wires_cap); + let plonk_betas = challenger.get_n_challenges(self, num_challenges); + let plonk_gammas = challenger.get_n_challenges(self, num_challenges); + + challenger.observe_cap(plonk_zs_partial_products_cap); + let plonk_alphas = challenger.get_n_challenges(self, num_challenges); + + challenger.observe_cap(quotient_polys_cap); + let plonk_zeta = challenger.get_extension_challenge(self); + + challenger.observe_openings(&openings.to_fri_openings()); + + ProofChallengesTarget { + plonk_betas, + plonk_gammas, + plonk_alphas, + plonk_zeta, + fri_challenges: challenger.fri_challenges::( + self, + commit_phase_merkle_caps, + final_poly, + pow_witness, + inner_common_data, + ), + } + } +} diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 2cf0a885..a9bb1210 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -3,7 +3,9 @@ use rayon::prelude::*; use serde::{Deserialize, Serialize}; use crate::fri::oracle::PolynomialBatch; -use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof, FriProofTarget}; +use crate::fri::proof::{ + CompressedFriProof, FriChallenges, FriChallengesTarget, FriProof, FriProofTarget, +}; use crate::fri::structure::{ FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget, }; @@ -242,6 +244,14 @@ pub(crate) struct ProofChallenges, const D: usize> pub fri_challenges: FriChallenges, } +pub(crate) struct ProofChallengesTarget { + pub plonk_betas: Vec, + pub plonk_gammas: Vec, + pub plonk_alphas: Vec, + pub plonk_zeta: ExtensionTarget, + pub fri_challenges: FriChallengesTarget, +} + /// Coset elements that can be inferred in the FRI reduction steps. pub(crate) struct FriInferredElements, const D: usize>( pub Vec, diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index 46d41bfe..da5c9718 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -30,9 +30,9 @@ pub(crate) fn verify_with_challenges< verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, ) -> Result<()> { - assert_eq!( - proof_with_pis.public_inputs.len(), - common_data.num_public_inputs + ensure!( + proof_with_pis.public_inputs.len() == common_data.num_public_inputs, + "Number of public inputs doesn't match circuit data." ); let public_inputs_hash = &proof_with_pis.get_public_inputs_hash(); From 61fcc9048e6a78c0863c975b795e50b5e02f1985 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 11:47:58 +0100 Subject: [PATCH 123/143] Working --- plonky2/src/fri/recursive_verifier.rs | 59 ++++++-------------- plonky2/src/plonk/get_challenges.rs | 46 +++++++++++++++- plonky2/src/plonk/recursive_verifier.rs | 71 +++++++++---------------- 3 files changed, 87 insertions(+), 89 deletions(-) diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 65e6e024..a07d0137 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -3,9 +3,12 @@ use plonky2_field::extension_field::Extendable; use plonky2_util::{log2_strict, reverse_index_bits_in_place}; use crate::fri::proof::{ - FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, FriQueryStepTarget, + FriChallengesTarget, FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, + FriQueryStepTarget, +}; +use crate::fri::structure::{ + FriBatchInfoTarget, FriInstanceInfoTarget, FriOpenings, FriOpeningsTarget, }; -use crate::fri::structure::{FriBatchInfoTarget, FriInstanceInfoTarget, FriOpeningsTarget}; use crate::fri::{FriConfig, FriParams}; use crate::gadgets::interpolation::InterpolationGate; use crate::gates::gate::Gate; @@ -107,16 +110,11 @@ impl, const D: usize> CircuitBuilder { fn fri_verify_proof_of_work>( &mut self, - proof: &FriProofTarget, - challenger: &mut RecursiveChallenger, + fri_pow_response: Target, config: &FriConfig, ) { - let mut inputs = challenger.get_hash(self).elements.to_vec(); - inputs.push(proof.pow_witness); - - let hash = self.hash_n_to_m_no_pad::(inputs, 1)[0]; self.assert_leading_zeros( - hash, + fri_pow_response, config.proof_of_work_bits + (64 - F::order().bits()) as u32, ); } @@ -124,11 +122,10 @@ impl, const D: usize> CircuitBuilder { pub fn verify_fri_proof>( &mut self, instance: &FriInstanceInfoTarget, - // Openings of the PLONK polynomials. - os: &OpeningSetTarget, + os: &FriOpeningsTarget, + challenges: &FriChallengesTarget, initial_merkle_caps: &[MerkleCapTarget], proof: &FriProofTarget, - challenger: &mut RecursiveChallenger, params: &FriParams, ) where C::Hasher: AlgebraicHasher, @@ -146,29 +143,10 @@ impl, const D: usize> CircuitBuilder { // Size of the LDE domain. let n = params.lde_size(); - challenger.observe_openings(&os.to_fri_openings()); - - // Scaling factor to combine polynomials. - let alpha = challenger.get_extension_challenge(self); - - let betas = with_context!( - self, - "recover the random betas used in the FRI reductions.", - proof - .commit_phase_merkle_caps - .iter() - .map(|cap| { - challenger.observe_cap(cap); - challenger.get_extension_challenge(self) - }) - .collect::>() - ); - challenger.observe_extension_elements(&proof.final_poly.0); - with_context!( self, "check PoW", - self.fri_verify_proof_of_work::(proof, challenger, ¶ms.config) + self.fri_verify_proof_of_work::(challenges.fri_pow_response, ¶ms.config) ); // Check that parameters are coherent. @@ -181,7 +159,7 @@ impl, const D: usize> CircuitBuilder { let precomputed_reduced_evals = with_context!( self, "precompute reduced evaluations", - PrecomputedReducedOpeningsTarget::from_os_and_alpha(&os.to_fri_openings(), alpha, self) + PrecomputedReducedOpeningsTarget::from_os_and_alpha(os, challenges.fri_alpha, self) ); for (i, round_proof) in proof.query_round_proofs.iter().enumerate() { @@ -201,13 +179,12 @@ impl, const D: usize> CircuitBuilder { &format!("verify one (of {}) query rounds", num_queries), self.fri_verifier_query_round::( instance, - alpha, + challenges, &precomputed_reduced_evals, initial_merkle_caps, proof, - challenger, + challenges.fri_query_indices[i], n, - &betas, round_proof, params, ) @@ -291,13 +268,12 @@ impl, const D: usize> CircuitBuilder { fn fri_verifier_query_round>( &mut self, instance: &FriInstanceInfoTarget, - alpha: ExtensionTarget, + challenges: &FriChallengesTarget, precomputed_reduced_evals: &PrecomputedReducedOpeningsTarget, initial_merkle_caps: &[MerkleCapTarget], proof: &FriProofTarget, - challenger: &mut RecursiveChallenger, + x_index: Target, n: usize, - betas: &[ExtensionTarget], round_proof: &FriQueryRoundTarget, params: &FriParams, ) where @@ -308,7 +284,6 @@ impl, const D: usize> CircuitBuilder { // Note that this `low_bits` decomposition permits non-canonical binary encodings. Here we // verify that this has a negligible impact on soundness error. Self::assert_noncanonical_indices_ok(¶ms.config); - let x_index = challenger.get_challenge(self); let mut x_index_bits = self.low_bits(x_index, n_log, F::BITS); let cap_index = @@ -341,7 +316,7 @@ impl, const D: usize> CircuitBuilder { self.fri_combine_initial::( instance, &round_proof.initial_trees_proof, - alpha, + challenges.fri_alpha, subgroup_x, precomputed_reduced_evals, params, @@ -368,7 +343,7 @@ impl, const D: usize> CircuitBuilder { x_index_within_coset_bits, arity_bits, evals, - betas[i], + challenges.fri_betas[i], ) ); diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index 59701a55..507123bf 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -3,7 +3,7 @@ use std::collections::HashSet; use plonky2_field::extension_field::Extendable; use plonky2_field::polynomial::PolynomialCoeffs; -use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof}; +use crate::fri::proof::{CompressedFriProof, FriChallenges, FriProof, FriProofTarget}; use crate::fri::verifier::{compute_evaluation, fri_combine_initial, PrecomputedReducedOpenings}; use crate::gadgets::polynomial::PolynomialCoeffsExtTarget; use crate::hash::hash_types::{HashOutTarget, MerkleCapTarget, RichField}; @@ -15,7 +15,8 @@ use crate::plonk::circuit_data::CommonCircuitData; use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher}; use crate::plonk::proof::{ CompressedProof, CompressedProofWithPublicInputs, FriInferredElements, OpeningSet, - OpeningSetTarget, Proof, ProofChallenges, ProofChallengesTarget, ProofWithPublicInputs, + OpeningSetTarget, Proof, ProofChallenges, ProofChallengesTarget, ProofTarget, + ProofWithPublicInputs, ProofWithPublicInputsTarget, }; use crate::util::reverse_bits; @@ -277,3 +278,44 @@ impl, const D: usize> CircuitBuilder { } } } + +impl ProofWithPublicInputsTarget { + pub(crate) fn get_challenges, C: GenericConfig>( + &self, + builder: &mut CircuitBuilder, + public_inputs_hash: HashOutTarget, + inner_common_data: &CommonCircuitData, + ) -> ProofChallengesTarget + where + C::Hasher: AlgebraicHasher, + { + let ProofTarget { + wires_cap, + plonk_zs_partial_products_cap, + quotient_polys_cap, + openings, + opening_proof: + FriProofTarget { + commit_phase_merkle_caps, + final_poly, + pow_witness, + .. + }, + } = &self.proof; + + let public_inputs_hash = + builder.hash_n_to_hash_no_pad::(self.public_inputs.clone()); + + builder.get_challenges( + public_inputs_hash, + wires_cap, + plonk_zs_partial_products_cap, + quotient_polys_cap, + openings, + commit_phase_merkle_caps, + final_poly, + *pow_witness, + inner_common_data, + ) + } +} diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index cb2bc1e0..57ff3dd7 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -5,7 +5,9 @@ use crate::iop::challenger::RecursiveChallenger; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::{CommonCircuitData, VerifierCircuitTarget}; use crate::plonk::config::{AlgebraicHasher, GenericConfig}; -use crate::plonk::proof::{OpeningSetTarget, ProofTarget, ProofWithPublicInputsTarget}; +use crate::plonk::proof::{ + OpeningSetTarget, ProofChallengesTarget, ProofTarget, ProofWithPublicInputsTarget, +}; use crate::plonk::vanishing_poly::eval_vanishing_poly_recursively; use crate::plonk::vars::EvaluationTargets; use crate::util::reducing::ReducingFactorTarget; @@ -13,7 +15,7 @@ use crate::with_context; impl, const D: usize> CircuitBuilder { /// Recursively verifies an inner proof. - pub fn verify_proof_with_pis>( + pub fn verify_proof>( &mut self, proof_with_pis: ProofWithPublicInputsTarget, inner_verifier_data: &VerifierCircuitTarget, @@ -21,27 +23,29 @@ impl, const D: usize> CircuitBuilder { ) where C::Hasher: AlgebraicHasher, { - let ProofWithPublicInputsTarget { - proof, - public_inputs, - } = proof_with_pis; + assert_eq!( + proof_with_pis.public_inputs.len(), + inner_common_data.num_public_inputs + ); + let public_inputs_hash = + self.hash_n_to_hash_no_pad::(proof_with_pis.public_inputs.clone()); + let challenges = proof_with_pis.get_challenges(self, public_inputs_hash, inner_common_data); - assert_eq!(public_inputs.len(), inner_common_data.num_public_inputs); - let public_inputs_hash = self.hash_n_to_hash_no_pad::(public_inputs); - - self.verify_proof( - proof, + self.verify_proof_with_challenges( + proof_with_pis.proof, public_inputs_hash, + challenges, inner_verifier_data, inner_common_data, ); } /// Recursively verifies an inner proof. - pub fn verify_proof>( + fn verify_proof_with_challenges>( &mut self, proof: ProofTarget, public_inputs_hash: HashOutTarget, + challenges: ProofChallengesTarget, inner_verifier_data: &VerifierCircuitTarget, inner_common_data: &CommonCircuitData, ) where @@ -51,30 +55,6 @@ impl, const D: usize> CircuitBuilder { let num_challenges = inner_common_data.config.num_challenges; - let mut challenger = RecursiveChallenger::::new(self); - - let (betas, gammas, alphas, zeta) = - with_context!(self, "observe proof and generates challenges", { - // Observe the instance. - let digest = HashOutTarget::from_vec( - self.constants(&inner_common_data.circuit_digest.elements), - ); - challenger.observe_hash(&digest); - challenger.observe_hash(&public_inputs_hash); - - challenger.observe_cap(&proof.wires_cap); - let betas = challenger.get_n_challenges(self, num_challenges); - let gammas = challenger.get_n_challenges(self, num_challenges); - - challenger.observe_cap(&proof.plonk_zs_partial_products_cap); - let alphas = challenger.get_n_challenges(self, num_challenges); - - challenger.observe_cap(&proof.quotient_polys_cap); - let zeta = challenger.get_extension_challenge(self); - - (betas, gammas, alphas, zeta) - }); - let local_constants = &proof.openings.constants; let local_wires = &proof.openings.wires; let vars = EvaluationTargets { @@ -87,23 +67,24 @@ impl, const D: usize> CircuitBuilder { let s_sigmas = &proof.openings.plonk_sigmas; let partial_products = &proof.openings.partial_products; - let zeta_pow_deg = self.exp_power_of_2_extension(zeta, inner_common_data.degree_bits); + let zeta_pow_deg = + self.exp_power_of_2_extension(challenges.plonk_zeta, inner_common_data.degree_bits); let vanishing_polys_zeta = with_context!( self, "evaluate the vanishing polynomial at our challenge point, zeta.", eval_vanishing_poly_recursively( self, inner_common_data, - zeta, + challenges.plonk_zeta, zeta_pow_deg, vars, local_zs, next_zs, partial_products, s_sigmas, - &betas, - &gammas, - &alphas, + &challenges.plonk_betas, + &challenges.plonk_gammas, + &challenges.plonk_alphas, ) ); @@ -128,16 +109,16 @@ impl, const D: usize> CircuitBuilder { proof.quotient_polys_cap, ]; - let fri_instance = inner_common_data.get_fri_instance_target(self, zeta); + let fri_instance = inner_common_data.get_fri_instance_target(self, challenges.plonk_zeta); with_context!( self, "verify FRI proof", self.verify_fri_proof::( &fri_instance, - &proof.openings, + &proof.openings.to_fri_openings(), + &challenges.fri_challenges, merkle_caps, &proof.opening_proof, - &mut challenger, &inner_common_data.fri_params, ) ); @@ -392,7 +373,7 @@ mod tests { &inner_vd.constants_sigmas_cap, ); - builder.verify_proof_with_pis(pt, &inner_data, &inner_cd); + builder.verify_proof(pt, &inner_data, &inner_cd); if print_gate_counts { builder.print_gate_counts(0); From d7bdc75082c935c79263cdbaabea6ca9a67b9ae7 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 11:56:16 +0100 Subject: [PATCH 124/143] Further cleaning --- plonky2/src/plonk/get_challenges.rs | 11 +++++----- plonky2/src/plonk/proof.rs | 11 +++++----- plonky2/src/plonk/verifier.rs | 32 ++++++++++++++++------------- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index 507123bf..dd6dbb5a 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -75,7 +75,7 @@ impl, C: GenericConfig, const D: usize> common_data: &CommonCircuitData, ) -> anyhow::Result> { Ok(self - .get_challenges(common_data)? + .get_challenges(self.get_public_inputs_hash(), common_data)? .fri_challenges .fri_query_indices) } @@ -83,6 +83,7 @@ impl, C: GenericConfig, const D: usize> /// Computes all Fiat-Shamir challenges used in the Plonk proof. pub(crate) fn get_challenges( &self, + public_inputs_hash: <>::InnerHasher as Hasher>::Hash, common_data: &CommonCircuitData, ) -> anyhow::Result> { let Proof { @@ -100,7 +101,7 @@ impl, C: GenericConfig, const D: usize> } = &self.proof; get_challenges( - self.get_public_inputs_hash(), + public_inputs_hash, wires_cap, plonk_zs_partial_products_cap, quotient_polys_cap, @@ -119,6 +120,7 @@ impl, C: GenericConfig, const D: usize> /// Computes all Fiat-Shamir challenges used in the Plonk proof. pub(crate) fn get_challenges( &self, + public_inputs_hash: <>::InnerHasher as Hasher>::Hash, common_data: &CommonCircuitData, ) -> anyhow::Result> { let CompressedProof { @@ -136,7 +138,7 @@ impl, C: GenericConfig, const D: usize> } = &self.proof; get_challenges( - self.get_public_inputs_hash(), + public_inputs_hash, wires_cap, plonk_zs_partial_products_cap, quotient_polys_cap, @@ -303,9 +305,6 @@ impl ProofWithPublicInputsTarget { }, } = &self.proof; - let public_inputs_hash = - builder.hash_n_to_hash_no_pad::(self.public_inputs.clone()); - builder.get_challenges( public_inputs_hash, wires_cap, diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index a9bb1210..9fc4d3f6 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -174,7 +174,7 @@ impl, C: GenericConfig, const D: usize> self, common_data: &CommonCircuitData, ) -> anyhow::Result> { - let challenges = self.get_challenges(common_data)?; + let challenges = self.get_challenges(self.get_public_inputs_hash(), common_data)?; let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data); let decompressed_proof = self.proof @@ -190,16 +190,15 @@ impl, C: GenericConfig, const D: usize> verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, ) -> anyhow::Result<()> { - let challenges = self.get_challenges(common_data)?; + let public_inputs_hash = self.get_public_inputs_hash(); + let challenges = self.get_challenges(public_inputs_hash, common_data)?; let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data); let decompressed_proof = self.proof .decompress(&challenges, fri_inferred_elements, &common_data.fri_params); verify_with_challenges( - ProofWithPublicInputs { - public_inputs: self.public_inputs, - proof: decompressed_proof, - }, + decompressed_proof, + public_inputs_hash, challenges, verifier_data, common_data, diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index da5c9718..1f5a18d2 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -5,9 +5,9 @@ use plonky2_field::field_types::Field; use crate::fri::verifier::verify_fri_proof; use crate::hash::hash_types::RichField; use crate::plonk::circuit_data::{CommonCircuitData, VerifierOnlyCircuitData}; -use crate::plonk::config::GenericConfig; +use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::plonk_common::reduce_with_powers; -use crate::plonk::proof::{ProofChallenges, ProofWithPublicInputs}; +use crate::plonk::proof::{Proof, ProofChallenges, ProofWithPublicInputs}; use crate::plonk::vanishing_poly::eval_vanishing_poly; use crate::plonk::vars::EvaluationVars; @@ -16,8 +16,19 @@ pub(crate) fn verify, C: GenericConfig, c verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, ) -> Result<()> { - let challenges = proof_with_pis.get_challenges(common_data)?; - verify_with_challenges(proof_with_pis, challenges, verifier_data, common_data) + ensure!( + proof_with_pis.public_inputs.len() == common_data.num_public_inputs, + "Number of public inputs doesn't match circuit data." + ); + let public_inputs_hash = proof_with_pis.get_public_inputs_hash(); + let challenges = proof_with_pis.get_challenges(public_inputs_hash, common_data)?; + verify_with_challenges( + proof_with_pis.proof, + public_inputs_hash, + challenges, + verifier_data, + common_data, + ) } pub(crate) fn verify_with_challenges< @@ -25,25 +36,18 @@ pub(crate) fn verify_with_challenges< C: GenericConfig, const D: usize, >( - proof_with_pis: ProofWithPublicInputs, + proof: Proof, + public_inputs_hash: <>::InnerHasher as Hasher>::Hash, challenges: ProofChallenges, verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, ) -> Result<()> { - ensure!( - proof_with_pis.public_inputs.len() == common_data.num_public_inputs, - "Number of public inputs doesn't match circuit data." - ); - let public_inputs_hash = &proof_with_pis.get_public_inputs_hash(); - - let ProofWithPublicInputs { proof, .. } = proof_with_pis; - let local_constants = &proof.openings.constants; let local_wires = &proof.openings.wires; let vars = EvaluationVars { local_constants, local_wires, - public_inputs_hash, + public_inputs_hash: &public_inputs_hash, }; let local_zs = &proof.openings.plonk_zs; let next_zs = &proof.openings.plonk_zs_right; From f39352896fc4b40ab46a3ca724ffe9c3c5c44cc9 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 12:02:37 +0100 Subject: [PATCH 125/143] Unused --- plonky2/src/fri/recursive_verifier.rs | 6 +----- plonky2/src/plonk/proof.rs | 5 +++++ plonky2/src/plonk/recursive_verifier.rs | 3 --- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index a07d0137..221cb928 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -6,9 +6,7 @@ use crate::fri::proof::{ FriChallengesTarget, FriInitialTreeProofTarget, FriProofTarget, FriQueryRoundTarget, FriQueryStepTarget, }; -use crate::fri::structure::{ - FriBatchInfoTarget, FriInstanceInfoTarget, FriOpenings, FriOpeningsTarget, -}; +use crate::fri::structure::{FriBatchInfoTarget, FriInstanceInfoTarget, FriOpeningsTarget}; use crate::fri::{FriConfig, FriParams}; use crate::gadgets::interpolation::InterpolationGate; use crate::gates::gate::Gate; @@ -17,12 +15,10 @@ use crate::gates::low_degree_interpolation::LowDegreeInterpolationGate; use crate::gates::random_access::RandomAccessGate; use crate::hash::hash_types::MerkleCapTarget; use crate::hash::hash_types::RichField; -use crate::iop::challenger::RecursiveChallenger; use crate::iop::ext_target::{flatten_target, ExtensionTarget}; use crate::iop::target::{BoolTarget, Target}; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::config::{AlgebraicHasher, GenericConfig}; -use crate::plonk::proof::OpeningSetTarget; use crate::util::reducing::ReducingFactorTarget; use crate::with_context; diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 9fc4d3f6..3de608d4 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -1,3 +1,4 @@ +use anyhow::ensure; use plonky2_field::extension_field::Extendable; use rayon::prelude::*; use serde::{Deserialize, Serialize}; @@ -190,6 +191,10 @@ impl, C: GenericConfig, const D: usize> verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, ) -> anyhow::Result<()> { + ensure!( + self.public_inputs.len() == common_data.num_public_inputs, + "Number of public inputs doesn't match circuit data." + ); let public_inputs_hash = self.get_public_inputs_hash(); let challenges = self.get_challenges(public_inputs_hash, common_data)?; let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data); diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index 57ff3dd7..dfa415f4 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -1,7 +1,6 @@ use plonky2_field::extension_field::Extendable; use crate::hash::hash_types::{HashOutTarget, RichField}; -use crate::iop::challenger::RecursiveChallenger; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::{CommonCircuitData, VerifierCircuitTarget}; use crate::plonk::config::{AlgebraicHasher, GenericConfig}; @@ -53,8 +52,6 @@ impl, const D: usize> CircuitBuilder { { let one = self.one_extension(); - let num_challenges = inner_common_data.config.num_challenges; - let local_constants = &proof.openings.constants; let local_wires = &proof.openings.wires; let vars = EvaluationTargets { From 02746d8a0f8659de1e6a36533298ddaf060b80e4 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 12:03:52 +0100 Subject: [PATCH 126/143] Minor --- plonky2/src/plonk/verifier.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index 1f5a18d2..5d69dcb1 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -22,6 +22,7 @@ pub(crate) fn verify, C: GenericConfig, c ); let public_inputs_hash = proof_with_pis.get_public_inputs_hash(); let challenges = proof_with_pis.get_challenges(public_inputs_hash, common_data)?; + verify_with_challenges( proof_with_pis.proof, public_inputs_hash, From 415da246a53c0c005849085aea79eac0468f1508 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 12:10:14 +0100 Subject: [PATCH 127/143] Naming --- plonky2/src/fri/recursive_verifier.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index 221cb928..f51b8fe6 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -118,7 +118,7 @@ impl, const D: usize> CircuitBuilder { pub fn verify_fri_proof>( &mut self, instance: &FriInstanceInfoTarget, - os: &FriOpeningsTarget, + openings: &FriOpeningsTarget, challenges: &FriChallengesTarget, initial_merkle_caps: &[MerkleCapTarget], proof: &FriProofTarget, @@ -155,7 +155,11 @@ impl, const D: usize> CircuitBuilder { let precomputed_reduced_evals = with_context!( self, "precompute reduced evaluations", - PrecomputedReducedOpeningsTarget::from_os_and_alpha(os, challenges.fri_alpha, self) + PrecomputedReducedOpeningsTarget::from_os_and_alpha( + openings, + challenges.fri_alpha, + self + ) ); for (i, round_proof) in proof.query_round_proofs.iter().enumerate() { From 0cc776922a60886e2761bca77ab59c055a55e61a Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Mon, 7 Feb 2022 22:11:37 +0100 Subject: [PATCH 128/143] Make `get_challenges` private. --- plonky2/src/plonk/get_challenges.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index dd6dbb5a..a67a6207 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -227,7 +227,7 @@ impl, C: GenericConfig, const D: usize> } impl, const D: usize> CircuitBuilder { - pub(crate) fn get_challenges>( + fn get_challenges>( &mut self, public_inputs_hash: HashOutTarget, wires_cap: &MerkleCapTarget, From efb1365021c3e8d784121b8501b57bd22d72c5d6 Mon Sep 17 00:00:00 2001 From: Jakub Nabaglo Date: Mon, 7 Feb 2022 14:29:31 -0800 Subject: [PATCH 129/143] Split `system_zero::column_layout` into submodules (#475) --- system_zero/src/column_layout.rs | 149 ++++++++++++++-------------- system_zero/src/permutation_unit.rs | 62 ++++++------ 2 files changed, 104 insertions(+), 107 deletions(-) diff --git a/system_zero/src/column_layout.rs b/system_zero/src/column_layout.rs index 7a9e92e5..fa5d627a 100644 --- a/system_zero/src/column_layout.rs +++ b/system_zero/src/column_layout.rs @@ -1,6 +1,3 @@ -use plonky2::hash::hashing::SPONGE_WIDTH; -use plonky2::hash::poseidon; - //// CORE REGISTERS /// A cycle counter. Starts at 0; increments by 1. @@ -21,87 +18,91 @@ pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1; pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1; //// PERMUTATION UNIT +pub(crate) mod permutation { + use plonky2::hash::hashing::SPONGE_WIDTH; + use plonky2::hash::poseidon; -const START_PERMUTATION_UNIT: usize = COL_STACK_PTR + 1; + const START_UNIT: usize = super::COL_STACK_PTR + 1; -const START_PERMUTATION_FULL_FIRST: usize = START_PERMUTATION_UNIT + SPONGE_WIDTH; + const START_FULL_FIRST: usize = START_UNIT + SPONGE_WIDTH; -pub(crate) const fn col_permutation_full_first_mid_sbox(round: usize, i: usize) -> usize { - debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_FULL_FIRST + 2 * round * SPONGE_WIDTH + i + pub const fn col_full_first_mid_sbox(round: usize, i: usize) -> usize { + debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_FIRST + 2 * round * SPONGE_WIDTH + i + } + + pub const fn col_full_first_after_mds(round: usize, i: usize) -> usize { + debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i + } + + const START_PARTIAL: usize = + col_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; + + pub const fn col_partial_mid_sbox(round: usize) -> usize { + debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); + START_PARTIAL + 2 * round + } + + pub const fn col_partial_after_sbox(round: usize) -> usize { + debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); + START_PARTIAL + 2 * round + 1 + } + + const START_FULL_SECOND: usize = col_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1; + + pub const fn col_full_second_mid_sbox(round: usize, i: usize) -> usize { + debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_SECOND + 2 * round * SPONGE_WIDTH + i + } + + pub const fn col_full_second_after_mds(round: usize, i: usize) -> usize { + debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i + } + + pub const fn col_input(i: usize) -> usize { + debug_assert!(i < SPONGE_WIDTH); + START_UNIT + i + } + + pub const fn col_output(i: usize) -> usize { + debug_assert!(i < SPONGE_WIDTH); + col_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i) + } + + pub(super) const END_UNIT: usize = col_output(SPONGE_WIDTH - 1); } -pub(crate) const fn col_permutation_full_first_after_mds(round: usize, i: usize) -> usize { - debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i -} - -const START_PERMUTATION_PARTIAL: usize = - col_permutation_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; - -pub(crate) const fn col_permutation_partial_mid_sbox(round: usize) -> usize { - debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); - START_PERMUTATION_PARTIAL + 2 * round -} - -pub(crate) const fn col_permutation_partial_after_sbox(round: usize) -> usize { - debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); - START_PERMUTATION_PARTIAL + 2 * round + 1 -} - -const START_PERMUTATION_FULL_SECOND: usize = - col_permutation_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1; - -pub(crate) const fn col_permutation_full_second_mid_sbox(round: usize, i: usize) -> usize { - debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_FULL_SECOND + 2 * round * SPONGE_WIDTH + i -} - -pub(crate) const fn col_permutation_full_second_after_mds(round: usize, i: usize) -> usize { - debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i -} - -pub(crate) const fn col_permutation_input(i: usize) -> usize { - debug_assert!(i < SPONGE_WIDTH); - START_PERMUTATION_UNIT + i -} - -pub(crate) const fn col_permutation_output(i: usize) -> usize { - debug_assert!(i < SPONGE_WIDTH); - col_permutation_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i) -} - -const END_PERMUTATION_UNIT: usize = col_permutation_output(SPONGE_WIDTH - 1); - //// MEMORY UNITS //// DECOMPOSITION UNITS +pub(crate) mod decomposition { -const START_DECOMPOSITION_UNITS: usize = END_PERMUTATION_UNIT + 1; + const START_UNITS: usize = super::permutation::END_UNIT + 1; -const NUM_DECOMPOSITION_UNITS: usize = 4; -/// The number of bits associated with a single decomposition unit. -const DECOMPOSITION_UNIT_BITS: usize = 32; -/// One column for the value being decomposed, plus one column per bit. -const DECOMPOSITION_UNIT_COLS: usize = 1 + DECOMPOSITION_UNIT_BITS; + const NUM_UNITS: usize = 4; + /// The number of bits associated with a single decomposition unit. + const UNIT_BITS: usize = 32; + /// One column for the value being decomposed, plus one column per bit. + const UNIT_COLS: usize = 1 + UNIT_BITS; -pub(crate) const fn col_decomposition_input(unit: usize) -> usize { - debug_assert!(unit < NUM_DECOMPOSITION_UNITS); - START_DECOMPOSITION_UNITS + unit * DECOMPOSITION_UNIT_COLS + pub const fn col_input(unit: usize) -> usize { + debug_assert!(unit < NUM_UNITS); + START_UNITS + unit * UNIT_COLS + } + + pub const fn col_bit(unit: usize, bit: usize) -> usize { + debug_assert!(unit < NUM_UNITS); + debug_assert!(bit < UNIT_BITS); + START_UNITS + unit * UNIT_COLS + 1 + bit + } + + pub(super) const END_UNITS: usize = START_UNITS + UNIT_COLS * NUM_UNITS; } -pub(crate) const fn col_decomposition_bit(unit: usize, bit: usize) -> usize { - debug_assert!(unit < NUM_DECOMPOSITION_UNITS); - debug_assert!(bit < DECOMPOSITION_UNIT_BITS); - START_DECOMPOSITION_UNITS + unit * DECOMPOSITION_UNIT_COLS + 1 + bit -} - -const END_DECOMPOSITION_UNITS: usize = - START_DECOMPOSITION_UNITS + DECOMPOSITION_UNIT_COLS * NUM_DECOMPOSITION_UNITS; - -pub(crate) const NUM_COLUMNS: usize = END_DECOMPOSITION_UNITS; +pub(crate) const NUM_COLUMNS: usize = decomposition::END_UNITS; diff --git a/system_zero/src/permutation_unit.rs b/system_zero/src/permutation_unit.rs index 7f12b9ce..e15474e4 100644 --- a/system_zero/src/permutation_unit.rs +++ b/system_zero/src/permutation_unit.rs @@ -8,15 +8,11 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use starky::vars::StarkEvaluationTargets; use starky::vars::StarkEvaluationVars; -use crate::column_layout::{ - col_permutation_full_first_after_mds as col_full_1st_after_mds, - col_permutation_full_first_mid_sbox as col_full_1st_mid_sbox, - col_permutation_full_second_after_mds as col_full_2nd_after_mds, - col_permutation_full_second_mid_sbox as col_full_2nd_mid_sbox, - col_permutation_input as col_input, - col_permutation_partial_after_sbox as col_partial_after_sbox, - col_permutation_partial_mid_sbox as col_partial_mid_sbox, NUM_COLUMNS, +use crate::column_layout::permutation::{ + col_full_first_after_mds, col_full_first_mid_sbox, col_full_second_after_mds, + col_full_second_mid_sbox, col_input, col_partial_after_sbox, col_partial_mid_sbox, }; +use crate::column_layout::NUM_COLUMNS; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::system_zero::SystemZero; @@ -75,14 +71,14 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let state_cubed = state[i].cube(); - values[col_full_1st_mid_sbox(r, i)] = state_cubed; + values[col_full_first_mid_sbox(r, i)] = state_cubed; state[i] *= state_cubed.square(); // Form state ** 7. } state = F::mds_layer(&state); for i in 0..SPONGE_WIDTH { - values[col_full_1st_after_mds(r, i)] = state[i]; + values[col_full_first_after_mds(r, i)] = state[i]; } } @@ -102,14 +98,14 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let state_cubed = state[i].cube(); - values[col_full_2nd_mid_sbox(r, i)] = state_cubed; + values[col_full_second_mid_sbox(r, i)] = state_cubed; state[i] *= state_cubed.square(); // Form state ** 7. } state = F::mds_layer(&state); for i in 0..SPONGE_WIDTH { - values[col_full_2nd_after_mds(r, i)] = state[i]; + values[col_full_second_after_mds(r, i)] = state[i]; } } } @@ -136,8 +132,8 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let state_cubed = state[i] * state[i].square(); yield_constr - .constraint_wrapping(state_cubed - local_values[col_full_1st_mid_sbox(r, i)]); - let state_cubed = local_values[col_full_1st_mid_sbox(r, i)]; + .constraint_wrapping(state_cubed - local_values[col_full_first_mid_sbox(r, i)]); + let state_cubed = local_values[col_full_first_mid_sbox(r, i)]; state[i] *= state_cubed.square(); // Form state ** 7. } @@ -145,8 +141,8 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { yield_constr - .constraint_wrapping(state[i] - local_values[col_full_1st_after_mds(r, i)]); - state[i] = local_values[col_full_1st_after_mds(r, i)]; + .constraint_wrapping(state[i] - local_values[col_full_first_after_mds(r, i)]); + state[i] = local_values[col_full_first_after_mds(r, i)]; } } @@ -168,9 +164,10 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let state_cubed = state[i] * state[i].square(); - yield_constr - .constraint_wrapping(state_cubed - local_values[col_full_2nd_mid_sbox(r, i)]); - let state_cubed = local_values[col_full_2nd_mid_sbox(r, i)]; + yield_constr.constraint_wrapping( + state_cubed - local_values[col_full_second_mid_sbox(r, i)], + ); + let state_cubed = local_values[col_full_second_mid_sbox(r, i)]; state[i] *= state_cubed.square(); // Form state ** 7. } @@ -178,8 +175,8 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { yield_constr - .constraint_wrapping(state[i] - local_values[col_full_2nd_after_mds(r, i)]); - state[i] = local_values[col_full_2nd_after_mds(r, i)]; + .constraint_wrapping(state[i] - local_values[col_full_second_after_mds(r, i)]); + state[i] = local_values[col_full_second_after_mds(r, i)]; } } } @@ -204,9 +201,9 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let state_cubed = builder.cube_extension(state[i]); let diff = - builder.sub_extension(state_cubed, local_values[col_full_1st_mid_sbox(r, i)]); + builder.sub_extension(state_cubed, local_values[col_full_first_mid_sbox(r, i)]); yield_constr.constraint_wrapping(builder, diff); - let state_cubed = local_values[col_full_1st_mid_sbox(r, i)]; + let state_cubed = local_values[col_full_first_mid_sbox(r, i)]; state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); // Form state ** 7. } @@ -215,9 +212,9 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let diff = - builder.sub_extension(state[i], local_values[col_full_1st_after_mds(r, i)]); + builder.sub_extension(state[i], local_values[col_full_first_after_mds(r, i)]); yield_constr.constraint_wrapping(builder, diff); - state[i] = local_values[col_full_1st_after_mds(r, i)]; + state[i] = local_values[col_full_first_after_mds(r, i)]; } } @@ -245,10 +242,10 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let state_cubed = builder.cube_extension(state[i]); - let diff = - builder.sub_extension(state_cubed, local_values[col_full_2nd_mid_sbox(r, i)]); + let diff = builder + .sub_extension(state_cubed, local_values[col_full_second_mid_sbox(r, i)]); yield_constr.constraint_wrapping(builder, diff); - let state_cubed = local_values[col_full_2nd_mid_sbox(r, i)]; + let state_cubed = local_values[col_full_second_mid_sbox(r, i)]; state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); // Form state ** 7. } @@ -257,9 +254,9 @@ impl, const D: usize> SystemZero { for i in 0..SPONGE_WIDTH { let diff = - builder.sub_extension(state[i], local_values[col_full_2nd_after_mds(r, i)]); + builder.sub_extension(state[i], local_values[col_full_second_after_mds(r, i)]); yield_constr.constraint_wrapping(builder, diff); - state[i] = local_values[col_full_2nd_after_mds(r, i)]; + state[i] = local_values[col_full_second_after_mds(r, i)]; } } } @@ -275,9 +272,8 @@ mod tests { use starky::constraint_consumer::ConstraintConsumer; use starky::vars::StarkEvaluationVars; - use crate::column_layout::{ - col_permutation_input as col_input, col_permutation_output as col_output, NUM_COLUMNS, - }; + use crate::column_layout::permutation::{col_input, col_output}; + use crate::column_layout::NUM_COLUMNS; use crate::permutation_unit::SPONGE_WIDTH; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::system_zero::SystemZero; From 8262389edda8a4916fe45bc79437e7cf91626580 Mon Sep 17 00:00:00 2001 From: BGluth Date: Wed, 9 Feb 2022 10:23:07 -0700 Subject: [PATCH 130/143] Added `Debug`, `Clone`, and `Copy` to ecdsa types --- plonky2/src/curve/ecdsa.rs | 4 ++++ plonky2/src/gadgets/ecdsa.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index c84c4c10..3a5d3c7a 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -2,12 +2,16 @@ use crate::curve::curve_msm::msm_parallel; use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; use crate::field::field_types::Field; +#[derive(Copy, Clone, Debug)] pub struct ECDSASignature { pub r: C::ScalarField, pub s: C::ScalarField, } +#[derive(Copy, Clone, Debug)] pub struct ECDSASecretKey(pub C::ScalarField); + +#[derive(Copy, Clone, Debug)] pub struct ECDSAPublicKey(pub AffinePoint); pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { diff --git a/plonky2/src/gadgets/ecdsa.rs b/plonky2/src/gadgets/ecdsa.rs index eba04d85..0a95e189 100644 --- a/plonky2/src/gadgets/ecdsa.rs +++ b/plonky2/src/gadgets/ecdsa.rs @@ -7,9 +7,13 @@ use crate::gadgets::nonnative::NonNativeTarget; use crate::hash::hash_types::RichField; use crate::plonk::circuit_builder::CircuitBuilder; +#[derive(Clone, Debug)] pub struct ECDSASecretKeyTarget(NonNativeTarget); + +#[derive(Clone, Debug)] pub struct ECDSAPublicKeyTarget(AffinePointTarget); +#[derive(Clone, Debug)] pub struct ECDSASignatureTarget { pub r: NonNativeTarget, pub s: NonNativeTarget, From adf5444f3fbd7e5011c3b0cd7f5f9a0efdbdb764 Mon Sep 17 00:00:00 2001 From: BGluth Date: Wed, 9 Feb 2022 18:31:58 -0700 Subject: [PATCH 131/143] `from_partial` (non-target) now takes in a slice - Doesn't need to take in a `Vec`. --- plonky2/src/hash/hash_types.rs | 14 ++++++-------- plonky2/src/hash/hashing.rs | 4 ++-- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index ed6fca43..8187979b 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -31,14 +31,12 @@ impl HashOut { } } - pub fn from_partial(mut elements: Vec) -> Self { - debug_assert!(elements.len() <= 4); - while elements.len() < 4 { - elements.push(F::ZERO); - } - Self { - elements: [elements[0], elements[1], elements[2], elements[3]], - } + pub fn from_partial(elements_in: &[F]) -> Self { + debug_assert!(elements_in.len() <= 4); + + let mut elements = [F::ZERO; 4]; + elements[0..elements_in.len()].copy_from_slice(elements_in); + Self { elements } } pub fn rand_from_rng(rng: &mut R) -> Self { diff --git a/plonky2/src/hash/hashing.rs b/plonky2/src/hash/hashing.rs index ea205654..eb238e51 100644 --- a/plonky2/src/hash/hashing.rs +++ b/plonky2/src/hash/hashing.rs @@ -14,11 +14,11 @@ pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY; /// Hash the vector if necessary to reduce its length to ~256 bits. If it already fits, this is a /// no-op. -pub fn hash_or_noop>(inputs: Vec) -> HashOut { +pub fn hash_or_noop>(inputs: &[F]) -> HashOut { if inputs.len() <= 4 { HashOut::from_partial(inputs) } else { - hash_n_to_hash_no_pad::(&inputs) + hash_n_to_hash_no_pad::(inputs) } } From cfe52ad6040a3584b3744e486257129a7c01baff Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Wed, 9 Feb 2022 21:50:18 -0800 Subject: [PATCH 132/143] Add `PrimeField`, `PrimeField64` traits (#457) * Add PrimeField, PrimeField64 traits * fix * fixes * fix * `to_biguint` -> `to_canonical_biguint` --- field/src/extension_field/quadratic.rs | 4 -- field/src/extension_field/quartic.rs | 8 --- field/src/field_types.rs | 20 +++---- field/src/goldilocks_field.rs | 42 ++++++++------- field/src/inversion.rs | 4 +- field/src/prime_field_testing.rs | 6 +-- field/src/secp256k1_base.rs | 36 +++++++------ field/src/secp256k1_scalar.rs | 36 +++++++------ plonky2/src/curve/curve_msm.rs | 6 ++- plonky2/src/curve/curve_multiplication.rs | 3 +- plonky2/src/curve/curve_types.rs | 10 ++-- plonky2/src/curve/secp256k1.rs | 3 +- plonky2/src/gadgets/nonnative.rs | 66 +++++++++++++---------- plonky2/src/gates/assert_le.rs | 3 +- plonky2/src/gates/comparison.rs | 3 +- plonky2/src/gates/subtraction_u32.rs | 3 +- plonky2/src/hash/hash_types.rs | 4 +- plonky2/src/hash/poseidon.rs | 6 +-- plonky2/src/hash/poseidon_goldilocks.rs | 3 +- plonky2/src/iop/generator.rs | 6 +-- plonky2/src/iop/witness.rs | 14 +++-- plonky2/src/util/serialization.rs | 6 +-- waksman/src/sorting.rs | 2 +- 23 files changed, 159 insertions(+), 135 deletions(-) diff --git a/field/src/extension_field/quadratic.rs b/field/src/extension_field/quadratic.rs index e072d323..488304d2 100644 --- a/field/src/extension_field/quadratic.rs +++ b/field/src/extension_field/quadratic.rs @@ -95,10 +95,6 @@ impl> Field for QuadraticExtension { Self([F::from_biguint(low), F::from_biguint(high)]) } - fn to_biguint(&self) -> BigUint { - self.0[0].to_biguint() + F::order() * self.0[1].to_biguint() - } - fn from_canonical_u64(n: u64) -> Self { F::from_canonical_u64(n).into() } diff --git a/field/src/extension_field/quartic.rs b/field/src/extension_field/quartic.rs index 4e9cebf9..7b4a6950 100644 --- a/field/src/extension_field/quartic.rs +++ b/field/src/extension_field/quartic.rs @@ -107,14 +107,6 @@ impl> Field for QuarticExtension { ]) } - fn to_biguint(&self) -> BigUint { - let mut result = self.0[3].to_biguint(); - result = result * F::order() + self.0[2].to_biguint(); - result = result * F::order() + self.0[1].to_biguint(); - result = result * F::order() + self.0[0].to_biguint(); - result - } - fn from_canonical_u64(n: u64) -> Self { F::from_canonical_u64(n).into() } diff --git a/field/src/field_types.rs b/field/src/field_types.rs index 65d5bf21..95696475 100644 --- a/field/src/field_types.rs +++ b/field/src/field_types.rs @@ -268,9 +268,6 @@ pub trait Field: // Rename to `from_noncanonical_biguint` and have it return `n % Self::characteristic()`. fn from_biguint(n: BigUint) -> Self; - // TODO: Move to a new `PrimeField` trait. - fn to_biguint(&self) -> BigUint; - /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. // TODO: Should probably be unsafe. fn from_canonical_u64(n: u64) -> Self; @@ -407,16 +404,14 @@ pub trait Field: } } +pub trait PrimeField: Field { + fn to_canonical_biguint(&self) -> BigUint; +} + /// A finite field of order less than 2^64. pub trait Field64: Field { const ORDER: u64; - // TODO: Only well-defined for prime 64-bit fields. Move to a new PrimeField64 trait? - fn to_canonical_u64(&self) -> u64; - - // TODO: Only well-defined for prime 64-bit fields. Move to a new PrimeField64 trait? - fn to_noncanonical_u64(&self) -> u64; - /// Returns `x % Self::CHARACTERISTIC`. // TODO: Move to `Field`. fn from_noncanonical_u64(n: u64) -> Self; @@ -456,6 +451,13 @@ pub trait Field64: Field { } } +/// A finite field of prime order less than 2^64. +pub trait PrimeField64: PrimeField + Field64 { + fn to_canonical_u64(&self) -> u64; + + fn to_noncanonical_u64(&self) -> u64; +} + /// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`. #[derive(Clone)] pub struct Powers { diff --git a/field/src/goldilocks_field.rs b/field/src/goldilocks_field.rs index a121b4d2..6c033bb2 100644 --- a/field/src/goldilocks_field.rs +++ b/field/src/goldilocks_field.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use crate::extension_field::quadratic::QuadraticExtension; use crate::extension_field::quartic::QuarticExtension; use crate::extension_field::{Extendable, Frobenius}; -use crate::field_types::{Field, Field64}; +use crate::field_types::{Field, Field64, PrimeField, PrimeField64}; use crate::inversion::try_inverse_u64; const EPSILON: u64 = (1 << 32) - 1; @@ -98,10 +98,6 @@ impl Field for GoldilocksField { Self(n.mod_floor(&Self::order()).to_u64_digits()[0]) } - fn to_biguint(&self) -> BigUint { - self.to_canonical_u64().into() - } - #[inline] fn from_canonical_u64(n: u64) -> Self { debug_assert!(n < Self::ORDER); @@ -123,23 +119,15 @@ impl Field for GoldilocksField { } } +impl PrimeField for GoldilocksField { + fn to_canonical_biguint(&self) -> BigUint { + self.to_canonical_u64().into() + } +} + impl Field64 for GoldilocksField { const ORDER: u64 = 0xFFFFFFFF00000001; - #[inline] - fn to_canonical_u64(&self) -> u64 { - let mut c = self.0; - // We only need one condition subtraction, since 2 * ORDER would not fit in a u64. - if c >= Self::ORDER { - c -= Self::ORDER; - } - c - } - - fn to_noncanonical_u64(&self) -> u64 { - self.0 - } - #[inline] fn from_noncanonical_u64(n: u64) -> Self { Self(n) @@ -160,6 +148,22 @@ impl Field64 for GoldilocksField { } } +impl PrimeField64 for GoldilocksField { + #[inline] + fn to_canonical_u64(&self) -> u64 { + let mut c = self.0; + // We only need one condition subtraction, since 2 * ORDER would not fit in a u64. + if c >= Self::ORDER { + c -= Self::ORDER; + } + c + } + + fn to_noncanonical_u64(&self) -> u64 { + self.0 + } +} + impl Neg for GoldilocksField { type Output = Self; diff --git a/field/src/inversion.rs b/field/src/inversion.rs index 10c02879..5eabc45c 100644 --- a/field/src/inversion.rs +++ b/field/src/inversion.rs @@ -1,4 +1,4 @@ -use crate::field_types::Field64; +use crate::field_types::PrimeField64; /// This is a 'safe' iteration for the modular inversion algorithm. It /// is safe in the sense that it will produce the right answer even @@ -63,7 +63,7 @@ unsafe fn unsafe_iteration(f: &mut u64, g: &mut u64, c: &mut i128, d: &mut i128, /// Elliptic and Hyperelliptic Cryptography, Algorithms 11.6 /// and 11.12. #[allow(clippy::many_single_char_names)] -pub(crate) fn try_inverse_u64(x: &F) -> Option { +pub(crate) fn try_inverse_u64(x: &F) -> Option { let mut f = x.to_noncanonical_u64(); let mut g = F::ORDER; // NB: These two are very rarely such that their absolute diff --git a/field/src/prime_field_testing.rs b/field/src/prime_field_testing.rs index 772336e9..24d5e3c7 100644 --- a/field/src/prime_field_testing.rs +++ b/field/src/prime_field_testing.rs @@ -1,4 +1,4 @@ -use crate::field_types::Field64; +use crate::field_types::PrimeField64; /// Generates a series of non-negative integers less than `modulus` which cover a range of /// interesting test values. @@ -19,7 +19,7 @@ pub fn test_inputs(modulus: u64) -> Vec { /// word_bits)` and panic if the two resulting vectors differ. pub fn run_unaryop_test_cases(op: UnaryOp, expected_op: ExpectedOp) where - F: Field64, + F: PrimeField64, UnaryOp: Fn(F) -> F, ExpectedOp: Fn(u64) -> u64, { @@ -43,7 +43,7 @@ where /// Apply the binary functions `op` and `expected_op` to each pair of inputs. pub fn run_binaryop_test_cases(op: BinaryOp, expected_op: ExpectedOp) where - F: Field64, + F: PrimeField64, BinaryOp: Fn(F, F) -> F, ExpectedOp: Fn(u64, u64) -> u64, { diff --git a/field/src/secp256k1_base.rs b/field/src/secp256k1_base.rs index 23702420..1972aed7 100644 --- a/field/src/secp256k1_base.rs +++ b/field/src/secp256k1_base.rs @@ -10,7 +10,7 @@ use num::{Integer, One}; use rand::Rng; use serde::{Deserialize, Serialize}; -use crate::field_types::Field; +use crate::field_types::{Field, PrimeField}; /// The base field of the secp256k1 elliptic curve. /// @@ -42,7 +42,7 @@ impl Default for Secp256K1Base { impl PartialEq for Secp256K1Base { fn eq(&self, other: &Self) -> bool { - self.to_biguint() == other.to_biguint() + self.to_canonical_biguint() == other.to_canonical_biguint() } } @@ -50,19 +50,19 @@ impl Eq for Secp256K1Base {} impl Hash for Secp256K1Base { fn hash(&self, state: &mut H) { - self.to_biguint().hash(state) + self.to_canonical_biguint().hash(state) } } impl Display for Secp256K1Base { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Display::fmt(&self.to_biguint(), f) + Display::fmt(&self.to_canonical_biguint(), f) } } impl Debug for Secp256K1Base { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Debug::fmt(&self.to_biguint(), f) + Debug::fmt(&self.to_canonical_biguint(), f) } } @@ -107,14 +107,6 @@ impl Field for Secp256K1Base { Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one()))) } - fn to_biguint(&self) -> BigUint { - let mut result = biguint_from_array(self.0); - if result >= Self::order() { - result -= Self::order(); - } - result - } - fn from_biguint(val: BigUint) -> Self { Self( val.to_u64_digits() @@ -146,6 +138,16 @@ impl Field for Secp256K1Base { } } +impl PrimeField for Secp256K1Base { + fn to_canonical_biguint(&self) -> BigUint { + let mut result = biguint_from_array(self.0); + if result >= Self::order() { + result -= Self::order(); + } + result + } +} + impl Neg for Secp256K1Base { type Output = Self; @@ -154,7 +156,7 @@ impl Neg for Secp256K1Base { if self.is_zero() { Self::ZERO } else { - Self::from_biguint(Self::order() - self.to_biguint()) + Self::from_biguint(Self::order() - self.to_canonical_biguint()) } } } @@ -164,7 +166,7 @@ impl Add for Secp256K1Base { #[inline] fn add(self, rhs: Self) -> Self { - let mut result = self.to_biguint() + rhs.to_biguint(); + let mut result = self.to_canonical_biguint() + rhs.to_canonical_biguint(); if result >= Self::order() { result -= Self::order(); } @@ -207,7 +209,9 @@ impl Mul for Secp256K1Base { #[inline] fn mul(self, rhs: Self) -> Self { - Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order())) + Self::from_biguint( + (self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()), + ) } } diff --git a/field/src/secp256k1_scalar.rs b/field/src/secp256k1_scalar.rs index f10892af..1e506426 100644 --- a/field/src/secp256k1_scalar.rs +++ b/field/src/secp256k1_scalar.rs @@ -11,7 +11,7 @@ use num::{Integer, One}; use rand::Rng; use serde::{Deserialize, Serialize}; -use crate::field_types::Field; +use crate::field_types::{Field, PrimeField}; /// The base field of the secp256k1 elliptic curve. /// @@ -45,7 +45,7 @@ impl Default for Secp256K1Scalar { impl PartialEq for Secp256K1Scalar { fn eq(&self, other: &Self) -> bool { - self.to_biguint() == other.to_biguint() + self.to_canonical_biguint() == other.to_canonical_biguint() } } @@ -53,19 +53,19 @@ impl Eq for Secp256K1Scalar {} impl Hash for Secp256K1Scalar { fn hash(&self, state: &mut H) { - self.to_biguint().hash(state) + self.to_canonical_biguint().hash(state) } } impl Display for Secp256K1Scalar { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Display::fmt(&self.to_biguint(), f) + Display::fmt(&self.to_canonical_biguint(), f) } } impl Debug for Secp256K1Scalar { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Debug::fmt(&self.to_biguint(), f) + Debug::fmt(&self.to_canonical_biguint(), f) } } @@ -116,14 +116,6 @@ impl Field for Secp256K1Scalar { Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one()))) } - fn to_biguint(&self) -> BigUint { - let mut result = biguint_from_array(self.0); - if result >= Self::order() { - result -= Self::order(); - } - result - } - fn from_biguint(val: BigUint) -> Self { Self( val.to_u64_digits() @@ -155,6 +147,16 @@ impl Field for Secp256K1Scalar { } } +impl PrimeField for Secp256K1Scalar { + fn to_canonical_biguint(&self) -> BigUint { + let mut result = biguint_from_array(self.0); + if result >= Self::order() { + result -= Self::order(); + } + result + } +} + impl Neg for Secp256K1Scalar { type Output = Self; @@ -163,7 +165,7 @@ impl Neg for Secp256K1Scalar { if self.is_zero() { Self::ZERO } else { - Self::from_biguint(Self::order() - self.to_biguint()) + Self::from_biguint(Self::order() - self.to_canonical_biguint()) } } } @@ -173,7 +175,7 @@ impl Add for Secp256K1Scalar { #[inline] fn add(self, rhs: Self) -> Self { - let mut result = self.to_biguint() + rhs.to_biguint(); + let mut result = self.to_canonical_biguint() + rhs.to_canonical_biguint(); if result >= Self::order() { result -= Self::order(); } @@ -216,7 +218,9 @@ impl Mul for Secp256K1Scalar { #[inline] fn mul(self, rhs: Self) -> Self { - Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order())) + Self::from_biguint( + (self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()), + ) } } diff --git a/plonky2/src/curve/curve_msm.rs b/plonky2/src/curve/curve_msm.rs index 388c0321..4c274c1c 100644 --- a/plonky2/src/curve/curve_msm.rs +++ b/plonky2/src/curve/curve_msm.rs @@ -1,5 +1,6 @@ use itertools::Itertools; use plonky2_field::field_types::Field; +use plonky2_field::field_types::PrimeField; use rayon::prelude::*; use crate::curve::curve_summation::affine_multisummation_best; @@ -160,7 +161,7 @@ pub(crate) fn to_digits(x: &C::ScalarField, w: usize) -> Vec { // Convert x to a bool array. let x_canonical: Vec<_> = x - .to_biguint() + .to_canonical_biguint() .to_u64_digits() .iter() .cloned() @@ -187,6 +188,7 @@ pub(crate) fn to_digits(x: &C::ScalarField, w: usize) -> Vec { mod tests { use num::BigUint; use plonky2_field::field_types::Field; + use plonky2_field::field_types::PrimeField; use plonky2_field::secp256k1_scalar::Secp256K1Scalar; use crate::curve::curve_msm::{msm_execute, msm_precompute, to_digits}; @@ -206,7 +208,7 @@ mod tests { 0b11111111111111111111111111111111, ]; let x = Secp256K1Scalar::from_biguint(BigUint::from_slice(&x_canonical)); - assert_eq!(x.to_biguint().to_u32_digits(), x_canonical); + assert_eq!(x.to_canonical_biguint().to_u32_digits(), x_canonical); assert_eq!( to_digits::(&x, 17), vec![ diff --git a/plonky2/src/curve/curve_multiplication.rs b/plonky2/src/curve/curve_multiplication.rs index 30da4973..c6fbbd83 100644 --- a/plonky2/src/curve/curve_multiplication.rs +++ b/plonky2/src/curve/curve_multiplication.rs @@ -1,6 +1,7 @@ use std::ops::Mul; use plonky2_field::field_types::Field; +use plonky2_field::field_types::PrimeField; use crate::curve::curve_types::{Curve, CurveScalar, ProjectivePoint}; @@ -88,7 +89,7 @@ fn to_digits(x: &C::ScalarField) -> Vec { ); let digits_per_u64 = 64 / WINDOW_BITS; let mut digits = Vec::with_capacity(digits_per_scalar::()); - for limb in x.to_biguint().to_u64_digits() { + for limb in x.to_canonical_biguint().to_u64_digits() { for j in 0..digits_per_u64 { digits.push((limb >> (j * WINDOW_BITS) as u64) % BASE as u64); } diff --git a/plonky2/src/curve/curve_types.rs b/plonky2/src/curve/curve_types.rs index 9599f6fe..0a9e8711 100644 --- a/plonky2/src/curve/curve_types.rs +++ b/plonky2/src/curve/curve_types.rs @@ -1,7 +1,7 @@ use std::fmt::Debug; use std::ops::Neg; -use plonky2_field::field_types::Field; +use plonky2_field::field_types::{Field, PrimeField}; use plonky2_field::ops::Square; // To avoid implementation conflicts from associated types, @@ -10,8 +10,8 @@ pub struct CurveScalar(pub ::ScalarField); /// A short Weierstrass curve. pub trait Curve: 'static + Sync + Sized + Copy + Debug { - type BaseField: Field; - type ScalarField: Field; + type BaseField: PrimeField; + type ScalarField: PrimeField; const A: Self::BaseField; const B: Self::BaseField; @@ -261,9 +261,9 @@ impl Neg for ProjectivePoint { } pub fn base_to_scalar(x: C::BaseField) -> C::ScalarField { - C::ScalarField::from_biguint(x.to_biguint()) + C::ScalarField::from_biguint(x.to_canonical_biguint()) } pub fn scalar_to_base(x: C::ScalarField) -> C::BaseField { - C::BaseField::from_biguint(x.to_biguint()) + C::BaseField::from_biguint(x.to_canonical_biguint()) } diff --git a/plonky2/src/curve/secp256k1.rs b/plonky2/src/curve/secp256k1.rs index d9039719..6a460735 100644 --- a/plonky2/src/curve/secp256k1.rs +++ b/plonky2/src/curve/secp256k1.rs @@ -40,6 +40,7 @@ const SECP256K1_GENERATOR_Y: Secp256K1Base = Secp256K1Base([ mod tests { use num::BigUint; use plonky2_field::field_types::Field; + use plonky2_field::field_types::PrimeField; use plonky2_field::secp256k1_scalar::Secp256K1Scalar; use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; @@ -86,7 +87,7 @@ mod tests { ) -> ProjectivePoint { let mut g = rhs; let mut sum = ProjectivePoint::ZERO; - for limb in lhs.to_biguint().to_u64_digits().iter() { + for limb in lhs.to_canonical_biguint().to_u64_digits().iter() { for j in 0..64 { if (limb >> j & 1u64) != 0u64 { sum = sum + g; diff --git a/plonky2/src/gadgets/nonnative.rs b/plonky2/src/gadgets/nonnative.rs index 245b0403..3f8d29e8 100644 --- a/plonky2/src/gadgets/nonnative.rs +++ b/plonky2/src/gadgets/nonnative.rs @@ -1,6 +1,7 @@ use std::marker::PhantomData; use num::{BigUint, Integer, One, Zero}; +use plonky2_field::field_types::PrimeField; use plonky2_field::{extension_field::Extendable, field_types::Field}; use plonky2_util::ceil_div_usize; @@ -34,12 +35,12 @@ impl, const D: usize> CircuitBuilder { x.value.clone() } - pub fn constant_nonnative(&mut self, x: FF) -> NonNativeTarget { - let x_biguint = self.constant_biguint(&x.to_biguint()); + pub fn constant_nonnative(&mut self, x: FF) -> NonNativeTarget { + let x_biguint = self.constant_biguint(&x.to_canonical_biguint()); self.biguint_to_nonnative(&x_biguint) } - pub fn zero_nonnative(&mut self) -> NonNativeTarget { + pub fn zero_nonnative(&mut self) -> NonNativeTarget { self.constant_nonnative(FF::ZERO) } @@ -62,7 +63,7 @@ impl, const D: usize> CircuitBuilder { } } - pub fn add_nonnative( + pub fn add_nonnative( &mut self, a: &NonNativeTarget, b: &NonNativeTarget, @@ -105,7 +106,7 @@ impl, const D: usize> CircuitBuilder { } } - pub fn add_many_nonnative( + pub fn add_many_nonnative( &mut self, to_add: &[NonNativeTarget], ) -> NonNativeTarget { @@ -149,7 +150,7 @@ impl, const D: usize> CircuitBuilder { } // Subtract two `NonNativeTarget`s. - pub fn sub_nonnative( + pub fn sub_nonnative( &mut self, a: &NonNativeTarget, b: &NonNativeTarget, @@ -177,7 +178,7 @@ impl, const D: usize> CircuitBuilder { diff } - pub fn mul_nonnative( + pub fn mul_nonnative( &mut self, a: &NonNativeTarget, b: &NonNativeTarget, @@ -208,7 +209,7 @@ impl, const D: usize> CircuitBuilder { prod } - pub fn mul_many_nonnative( + pub fn mul_many_nonnative( &mut self, to_mul: &[NonNativeTarget], ) -> NonNativeTarget { @@ -223,14 +224,20 @@ impl, const D: usize> CircuitBuilder { accumulator } - pub fn neg_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + pub fn neg_nonnative( + &mut self, + x: &NonNativeTarget, + ) -> NonNativeTarget { let zero_target = self.constant_biguint(&BigUint::zero()); let zero_ff = self.biguint_to_nonnative(&zero_target); self.sub_nonnative(&zero_ff, x) } - pub fn inv_nonnative(&mut self, x: &NonNativeTarget) -> NonNativeTarget { + pub fn inv_nonnative( + &mut self, + x: &NonNativeTarget, + ) -> NonNativeTarget { let num_limbs = x.value.num_limbs(); let inv_biguint = self.add_virtual_biguint_target(num_limbs); let div = self.add_virtual_biguint_target(num_limbs); @@ -307,7 +314,7 @@ impl, const D: usize> CircuitBuilder { } #[derive(Debug)] -struct NonNativeAdditionGenerator, const D: usize, FF: Field> { +struct NonNativeAdditionGenerator, const D: usize, FF: PrimeField> { a: NonNativeTarget, b: NonNativeTarget, sum: NonNativeTarget, @@ -315,7 +322,7 @@ struct NonNativeAdditionGenerator, const D: usize, _phantom: PhantomData, } -impl, const D: usize, FF: Field> SimpleGenerator +impl, const D: usize, FF: PrimeField> SimpleGenerator for NonNativeAdditionGenerator { fn dependencies(&self) -> Vec { @@ -332,8 +339,8 @@ impl, const D: usize, FF: Field> SimpleGenerator fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { let a = witness.get_nonnative_target(self.a.clone()); let b = witness.get_nonnative_target(self.b.clone()); - let a_biguint = a.to_biguint(); - let b_biguint = b.to_biguint(); + let a_biguint = a.to_canonical_biguint(); + let b_biguint = b.to_canonical_biguint(); let sum_biguint = a_biguint + b_biguint; let modulus = FF::order(); let (overflow, sum_reduced) = if sum_biguint > modulus { @@ -348,14 +355,15 @@ impl, const D: usize, FF: Field> SimpleGenerator } #[derive(Debug)] -struct NonNativeMultipleAddsGenerator, const D: usize, FF: Field> { +struct NonNativeMultipleAddsGenerator, const D: usize, FF: PrimeField> +{ summands: Vec>, sum: NonNativeTarget, overflow: U32Target, _phantom: PhantomData, } -impl, const D: usize, FF: Field> SimpleGenerator +impl, const D: usize, FF: PrimeField> SimpleGenerator for NonNativeMultipleAddsGenerator { fn dependencies(&self) -> Vec { @@ -373,7 +381,7 @@ impl, const D: usize, FF: Field> SimpleGenerator .collect(); let summand_biguints: Vec<_> = summands .iter() - .map(|summand| summand.to_biguint()) + .map(|summand| summand.to_canonical_biguint()) .collect(); let sum_biguint = summand_biguints @@ -398,7 +406,7 @@ struct NonNativeSubtractionGenerator, const D: usiz _phantom: PhantomData, } -impl, const D: usize, FF: Field> SimpleGenerator +impl, const D: usize, FF: PrimeField> SimpleGenerator for NonNativeSubtractionGenerator { fn dependencies(&self) -> Vec { @@ -415,8 +423,8 @@ impl, const D: usize, FF: Field> SimpleGenerator fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { let a = witness.get_nonnative_target(self.a.clone()); let b = witness.get_nonnative_target(self.b.clone()); - let a_biguint = a.to_biguint(); - let b_biguint = b.to_biguint(); + let a_biguint = a.to_canonical_biguint(); + let b_biguint = b.to_canonical_biguint(); let modulus = FF::order(); let (diff_biguint, overflow) = if a_biguint > b_biguint { @@ -439,7 +447,7 @@ struct NonNativeMultiplicationGenerator, const D: u _phantom: PhantomData, } -impl, const D: usize, FF: Field> SimpleGenerator +impl, const D: usize, FF: PrimeField> SimpleGenerator for NonNativeMultiplicationGenerator { fn dependencies(&self) -> Vec { @@ -456,8 +464,8 @@ impl, const D: usize, FF: Field> SimpleGenerator fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { let a = witness.get_nonnative_target(self.a.clone()); let b = witness.get_nonnative_target(self.b.clone()); - let a_biguint = a.to_biguint(); - let b_biguint = b.to_biguint(); + let a_biguint = a.to_canonical_biguint(); + let b_biguint = b.to_canonical_biguint(); let prod_biguint = a_biguint * b_biguint; @@ -470,14 +478,14 @@ impl, const D: usize, FF: Field> SimpleGenerator } #[derive(Debug)] -struct NonNativeInverseGenerator, const D: usize, FF: Field> { +struct NonNativeInverseGenerator, const D: usize, FF: PrimeField> { x: NonNativeTarget, inv: BigUintTarget, div: BigUintTarget, _phantom: PhantomData, } -impl, const D: usize, FF: Field> SimpleGenerator +impl, const D: usize, FF: PrimeField> SimpleGenerator for NonNativeInverseGenerator { fn dependencies(&self) -> Vec { @@ -488,8 +496,8 @@ impl, const D: usize, FF: Field> SimpleGenerator let x = witness.get_nonnative_target(self.x.clone()); let inv = x.inverse(); - let x_biguint = x.to_biguint(); - let inv_biguint = inv.to_biguint(); + let x_biguint = x.to_canonical_biguint(); + let inv_biguint = inv.to_canonical_biguint(); let prod = x_biguint * &inv_biguint; let modulus = FF::order(); let (div, _rem) = prod.div_rem(&modulus); @@ -502,7 +510,7 @@ impl, const D: usize, FF: Field> SimpleGenerator #[cfg(test)] mod tests { use anyhow::Result; - use plonky2_field::field_types::Field; + use plonky2_field::field_types::{Field, PrimeField}; use plonky2_field::secp256k1_base::Secp256K1Base; use crate::iop::witness::PartialWitness; @@ -587,7 +595,7 @@ mod tests { let x_ff = FF::rand(); let mut y_ff = FF::rand(); - while y_ff.to_biguint() > x_ff.to_biguint() { + while y_ff.to_canonical_biguint() > x_ff.to_canonical_biguint() { y_ff = FF::rand(); } let diff_ff = x_ff - y_ff; diff --git a/plonky2/src/gates/assert_le.rs b/plonky2/src/gates/assert_le.rs index c087a963..cec7274b 100644 --- a/plonky2/src/gates/assert_le.rs +++ b/plonky2/src/gates/assert_le.rs @@ -455,7 +455,8 @@ mod tests { use anyhow::Result; use plonky2_field::extension_field::quartic::QuarticExtension; - use plonky2_field::field_types::{Field, Field64}; + use plonky2_field::field_types::Field; + use plonky2_field::field_types::PrimeField64; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; diff --git a/plonky2/src/gates/comparison.rs b/plonky2/src/gates/comparison.rs index bc3e69b9..b1cf7b98 100644 --- a/plonky2/src/gates/comparison.rs +++ b/plonky2/src/gates/comparison.rs @@ -520,7 +520,8 @@ mod tests { use std::marker::PhantomData; use anyhow::Result; - use plonky2_field::field_types::{Field, Field64}; + use plonky2_field::field_types::Field; + use plonky2_field::field_types::PrimeField64; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; diff --git a/plonky2/src/gates/subtraction_u32.rs b/plonky2/src/gates/subtraction_u32.rs index 80bc03ed..b1e4d84f 100644 --- a/plonky2/src/gates/subtraction_u32.rs +++ b/plonky2/src/gates/subtraction_u32.rs @@ -338,7 +338,8 @@ mod tests { use anyhow::Result; use plonky2_field::extension_field::quartic::QuarticExtension; - use plonky2_field::field_types::{Field, Field64}; + use plonky2_field::field_types::Field; + use plonky2_field::field_types::PrimeField64; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index ed6fca43..0a1cedd0 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -1,4 +1,4 @@ -use plonky2_field::field_types::{Field, Field64}; +use plonky2_field::field_types::{Field, PrimeField64}; use plonky2_field::goldilocks_field::GoldilocksField; use rand::Rng; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -8,7 +8,7 @@ use crate::iop::target::Target; use crate::plonk::config::GenericHashOut; /// A prime order field with the features we need to use it as a base field in our argument system. -pub trait RichField: Field64 + Poseidon {} +pub trait RichField: PrimeField64 + Poseidon {} impl RichField for GoldilocksField {} diff --git a/plonky2/src/hash/poseidon.rs b/plonky2/src/hash/poseidon.rs index 08c2851a..09c5d2fc 100644 --- a/plonky2/src/hash/poseidon.rs +++ b/plonky2/src/hash/poseidon.rs @@ -2,7 +2,7 @@ //! https://eprint.iacr.org/2019/458.pdf use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::field_types::{Field, Field64}; +use plonky2_field::field_types::{Field, PrimeField64}; use unroll::unroll_for_loops; use crate::gates::gate::Gate; @@ -35,7 +35,7 @@ fn add_u160_u128((x_lo, x_hi): (u128, u32), y: u128) -> (u128, u32) { } #[inline(always)] -fn reduce_u160((n_lo, n_hi): (u128, u32)) -> F { +fn reduce_u160((n_lo, n_hi): (u128, u32)) -> F { let n_lo_hi = (n_lo >> 64) as u64; let n_lo_lo = n_lo as u64; let reduced_hi: u64 = F::from_noncanonical_u96((n_lo_hi, n_hi)).to_noncanonical_u64(); @@ -148,7 +148,7 @@ pub const ALL_ROUND_CONSTANTS: [u64; MAX_WIDTH * N_ROUNDS] = [ ]; const WIDTH: usize = SPONGE_WIDTH; -pub trait Poseidon: Field64 { +pub trait Poseidon: PrimeField64 { // Total number of round constants required: width of the input // times number of rounds. const N_ROUND_CONSTANTS: usize = WIDTH * N_ROUNDS; diff --git a/plonky2/src/hash/poseidon_goldilocks.rs b/plonky2/src/hash/poseidon_goldilocks.rs index ab886847..7b82bb01 100644 --- a/plonky2/src/hash/poseidon_goldilocks.rs +++ b/plonky2/src/hash/poseidon_goldilocks.rs @@ -270,7 +270,8 @@ impl Poseidon for GoldilocksField { #[cfg(test)] mod tests { - use plonky2_field::field_types::{Field, Field64}; + use plonky2_field::field_types::Field; + use plonky2_field::field_types::PrimeField64; use plonky2_field::goldilocks_field::GoldilocksField as F; use crate::hash::poseidon::test_helpers::{check_consistency, check_test_vectors}; diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 73978f5c..1569e889 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use num::BigUint; use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::field_types::Field; +use plonky2_field::field_types::{Field, PrimeField}; use crate::gadgets::arithmetic_u32::U32Target; use crate::gadgets::biguint::BigUintTarget; @@ -180,8 +180,8 @@ impl GeneratedValues { } } - pub fn set_nonnative_target(&mut self, target: NonNativeTarget, value: FF) { - self.set_biguint_target(target.value, value.to_biguint()) + pub fn set_nonnative_target(&mut self, target: NonNativeTarget, value: FF) { + self.set_biguint_target(target.value, value.to_canonical_biguint()) } pub fn set_hash_target(&mut self, ht: HashOutTarget, value: HashOut) { diff --git a/plonky2/src/iop/witness.rs b/plonky2/src/iop/witness.rs index 43dc752d..e1bdf06e 100644 --- a/plonky2/src/iop/witness.rs +++ b/plonky2/src/iop/witness.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use itertools::Itertools; use num::{BigUint, FromPrimitive, Zero}; use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::field_types::Field; +use plonky2_field::field_types::{Field, PrimeField}; use crate::fri::witness_util::set_fri_proof_target; use crate::gadgets::arithmetic_u32::U32Target; @@ -62,20 +62,26 @@ pub trait Witness { panic!("not a bool") } - fn get_biguint_target(&self, target: BigUintTarget) -> BigUint { + fn get_biguint_target(&self, target: BigUintTarget) -> BigUint + where + F: PrimeField, + { let mut result = BigUint::zero(); let limb_base = BigUint::from_u64(1 << 32u64).unwrap(); for i in (0..target.num_limbs()).rev() { let limb = target.get_limb(i); result *= &limb_base; - result += self.get_target(limb.0).to_biguint(); + result += self.get_target(limb.0).to_canonical_biguint(); } result } - fn get_nonnative_target(&self, target: NonNativeTarget) -> FF { + fn get_nonnative_target(&self, target: NonNativeTarget) -> FF + where + F: PrimeField, + { let val = self.get_biguint_target(target.value); FF::from_biguint(val) } diff --git a/plonky2/src/util/serialization.rs b/plonky2/src/util/serialization.rs index adc8baee..d0326073 100644 --- a/plonky2/src/util/serialization.rs +++ b/plonky2/src/util/serialization.rs @@ -3,7 +3,7 @@ use std::io::Cursor; use std::io::{Read, Result, Write}; use plonky2_field::extension_field::{Extendable, FieldExtension}; -use plonky2_field::field_types::Field64; +use plonky2_field::field_types::{Field64, PrimeField64}; use plonky2_field::polynomial::PolynomialCoeffs; use crate::fri::proof::{ @@ -53,7 +53,7 @@ impl Buffer { Ok(u32::from_le_bytes(buf)) } - fn write_field(&mut self, x: F) -> Result<()> { + fn write_field(&mut self, x: F) -> Result<()> { self.0.write_all(&x.to_canonical_u64().to_le_bytes()) } fn read_field(&mut self) -> Result { @@ -116,7 +116,7 @@ impl Buffer { )) } - pub fn write_field_vec(&mut self, v: &[F]) -> Result<()> { + pub fn write_field_vec(&mut self, v: &[F]) -> Result<()> { for &a in v { self.write_field(a)?; } diff --git a/waksman/src/sorting.rs b/waksman/src/sorting.rs index b154436e..286205b1 100644 --- a/waksman/src/sorting.rs +++ b/waksman/src/sorting.rs @@ -183,7 +183,7 @@ impl, const D: usize> SimpleGenerator #[cfg(test)] mod tests { use anyhow::Result; - use plonky2::field::field_types::{Field, Field64}; + use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::iop::witness::PartialWitness; use plonky2::plonk::circuit_data::CircuitConfig; use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; From b2c747b171df954757d64a0fad4482d235e22a1d Mon Sep 17 00:00:00 2001 From: BGluth Date: Wed, 9 Feb 2022 23:34:26 -0700 Subject: [PATCH 133/143] Also did the same to the circuit version - And removed the `debug_assert!`. --- plonky2/src/hash/hash_types.rs | 14 ++++---------- plonky2/src/hash/hashing.rs | 4 ++-- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/plonky2/src/hash/hash_types.rs b/plonky2/src/hash/hash_types.rs index 8187979b..f7605306 100644 --- a/plonky2/src/hash/hash_types.rs +++ b/plonky2/src/hash/hash_types.rs @@ -32,8 +32,6 @@ impl HashOut { } pub fn from_partial(elements_in: &[F]) -> Self { - debug_assert!(elements_in.len() <= 4); - let mut elements = [F::ZERO; 4]; elements[0..elements_in.len()].copy_from_slice(elements_in); Self { elements } @@ -102,14 +100,10 @@ impl HashOutTarget { } } - pub fn from_partial(mut elements: Vec, zero: Target) -> Self { - debug_assert!(elements.len() <= 4); - while elements.len() < 4 { - elements.push(zero); - } - Self { - elements: [elements[0], elements[1], elements[2], elements[3]], - } + pub fn from_partial(elements_in: &[Target], zero: Target) -> Self { + let mut elements = [zero; 4]; + elements[0..elements_in.len()].copy_from_slice(elements_in); + Self { elements } } } diff --git a/plonky2/src/hash/hashing.rs b/plonky2/src/hash/hashing.rs index eb238e51..468bd1b8 100644 --- a/plonky2/src/hash/hashing.rs +++ b/plonky2/src/hash/hashing.rs @@ -12,7 +12,7 @@ pub(crate) const SPONGE_RATE: usize = 8; pub(crate) const SPONGE_CAPACITY: usize = 4; pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY; -/// Hash the vector if necessary to reduce its length to ~256 bits. If it already fits, this is a +/// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a /// no-op. pub fn hash_or_noop>(inputs: &[F]) -> HashOut { if inputs.len() <= 4 { @@ -26,7 +26,7 @@ impl, const D: usize> CircuitBuilder { pub fn hash_or_noop>(&mut self, inputs: Vec) -> HashOutTarget { let zero = self.zero(); if inputs.len() <= 4 { - HashOutTarget::from_partial(inputs, zero) + HashOutTarget::from_partial(&inputs, zero) } else { self.hash_n_to_hash_no_pad::(inputs) } From 645d45f227a2c1537529a544f625ede6ca964bc2 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Thu, 10 Feb 2022 12:05:04 -0800 Subject: [PATCH 134/143] Column definitions for addition, range checks & lookups (#477) * Column definitions for addition, range checks & lookups I implemented addition (unsigned for now) as an example of how the arithmetic unit can interact with the 16-bit range check unit. Range checks and lookups aren't implemented yet. * Missing constraints * Tweaks to get tests passing * Reorg registers into files * Minor --- field/src/field_types.rs | 6 + plonky2/src/plonk/plonk_common.rs | 2 +- starky/src/lib.rs | 2 - system_zero/src/arithmetic/addition.rs | 70 ++++++++++++ system_zero/src/arithmetic/division.rs | 31 +++++ system_zero/src/arithmetic/mod.rs | 75 ++++++++++++ system_zero/src/arithmetic/multiplication.rs | 31 +++++ system_zero/src/arithmetic/subtraction.rs | 31 +++++ system_zero/src/column_layout.rs | 108 ------------------ system_zero/src/core_registers.rs | 39 +++++-- system_zero/src/lib.rs | 5 +- system_zero/src/permutation_unit.rs | 11 +- system_zero/src/registers/arithmetic.rs | 37 ++++++ system_zero/src/registers/boolean.rs | 10 ++ system_zero/src/registers/core.rs | 20 ++++ system_zero/src/registers/logic.rs | 3 + system_zero/src/registers/lookup.rs | 21 ++++ system_zero/src/registers/memory.rs | 3 + system_zero/src/registers/mod.rs | 20 ++++ system_zero/src/registers/permutation.rs | 57 +++++++++ system_zero/src/registers/range_check_16.rs | 11 ++ .../src/registers/range_check_degree.rs | 11 ++ system_zero/src/system_zero.rs | 20 +++- 23 files changed, 489 insertions(+), 135 deletions(-) create mode 100644 system_zero/src/arithmetic/addition.rs create mode 100644 system_zero/src/arithmetic/division.rs create mode 100644 system_zero/src/arithmetic/mod.rs create mode 100644 system_zero/src/arithmetic/multiplication.rs create mode 100644 system_zero/src/arithmetic/subtraction.rs delete mode 100644 system_zero/src/column_layout.rs create mode 100644 system_zero/src/registers/arithmetic.rs create mode 100644 system_zero/src/registers/boolean.rs create mode 100644 system_zero/src/registers/core.rs create mode 100644 system_zero/src/registers/logic.rs create mode 100644 system_zero/src/registers/lookup.rs create mode 100644 system_zero/src/registers/memory.rs create mode 100644 system_zero/src/registers/mod.rs create mode 100644 system_zero/src/registers/permutation.rs create mode 100644 system_zero/src/registers/range_check_16.rs create mode 100644 system_zero/src/registers/range_check_degree.rs diff --git a/field/src/field_types.rs b/field/src/field_types.rs index 95696475..83826b9f 100644 --- a/field/src/field_types.rs +++ b/field/src/field_types.rs @@ -278,6 +278,12 @@ pub trait Field: Self::from_canonical_u64(n as u64) } + /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. + // TODO: Should probably be unsafe. + fn from_canonical_u16(n: u16) -> Self { + Self::from_canonical_u64(n as u64) + } + /// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`. // TODO: Should probably be unsafe. fn from_canonical_usize(n: usize) -> Self { diff --git a/plonky2/src/plonk/plonk_common.rs b/plonky2/src/plonk/plonk_common.rs index 519593b3..09cf2652 100644 --- a/plonky2/src/plonk/plonk_common.rs +++ b/plonky2/src/plonk/plonk_common.rs @@ -138,7 +138,7 @@ where sum } -pub(crate) fn reduce_with_powers_ext_recursive, const D: usize>( +pub fn reduce_with_powers_ext_recursive, const D: usize>( builder: &mut CircuitBuilder, terms: &[ExtensionTarget], alpha: Target, diff --git a/starky/src/lib.rs b/starky/src/lib.rs index dc61e7e7..eefab529 100644 --- a/starky/src/lib.rs +++ b/starky/src/lib.rs @@ -1,8 +1,6 @@ // TODO: Remove these when crate is closer to being finished. #![allow(dead_code)] #![allow(unused_variables)] -#![allow(unreachable_code)] -#![allow(clippy::diverging_sub_expression)] #![allow(incomplete_features)] #![feature(generic_const_exprs)] diff --git a/system_zero/src/arithmetic/addition.rs b/system_zero/src/arithmetic/addition.rs new file mode 100644 index 00000000..653d533b --- /dev/null +++ b/system_zero/src/arithmetic/addition.rs @@ -0,0 +1,70 @@ +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use plonky2::plonk::plonk_common::reduce_with_powers_ext_recursive; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +use crate::registers::arithmetic::*; +use crate::registers::NUM_COLUMNS; + +pub(crate) fn generate_addition(values: &mut [F; NUM_COLUMNS]) { + let in_1 = values[COL_ADD_INPUT_1].to_canonical_u64(); + let in_2 = values[COL_ADD_INPUT_2].to_canonical_u64(); + let in_3 = values[COL_ADD_INPUT_3].to_canonical_u64(); + let output = in_1 + in_2 + in_3; + + values[COL_ADD_OUTPUT_1] = F::from_canonical_u16(output as u16); + values[COL_ADD_OUTPUT_2] = F::from_canonical_u16((output >> 16) as u16); + values[COL_ADD_OUTPUT_3] = F::from_canonical_u16((output >> 32) as u16); +} + +pub(crate) fn eval_addition>( + local_values: &[P; NUM_COLUMNS], + yield_constr: &mut ConstraintConsumer

, +) { + let is_add = local_values[IS_ADD]; + let in_1 = local_values[COL_ADD_INPUT_1]; + let in_2 = local_values[COL_ADD_INPUT_2]; + let in_3 = local_values[COL_ADD_INPUT_3]; + let out_1 = local_values[COL_ADD_OUTPUT_1]; + let out_2 = local_values[COL_ADD_OUTPUT_2]; + let out_3 = local_values[COL_ADD_OUTPUT_3]; + + let weight_2 = F::from_canonical_u64(1 << 16); + let weight_3 = F::from_canonical_u64(1 << 32); + // Note that this can't overflow. Since each output limb has been range checked as 16-bits, + // this sum can be around 48 bits at most. + let out = out_1 + out_2 * weight_2 + out_3 * weight_3; + + let computed_out = in_1 + in_2 + in_3; + + yield_constr.constraint_wrapping(is_add * (out - computed_out)); +} + +pub(crate) fn eval_addition_recursively, const D: usize>( + builder: &mut CircuitBuilder, + local_values: &[ExtensionTarget; NUM_COLUMNS], + yield_constr: &mut RecursiveConstraintConsumer, +) { + let is_add = local_values[IS_ADD]; + let in_1 = local_values[COL_ADD_INPUT_1]; + let in_2 = local_values[COL_ADD_INPUT_2]; + let in_3 = local_values[COL_ADD_INPUT_3]; + let out_1 = local_values[COL_ADD_OUTPUT_1]; + let out_2 = local_values[COL_ADD_OUTPUT_2]; + let out_3 = local_values[COL_ADD_OUTPUT_3]; + + let limb_base = builder.constant(F::from_canonical_u64(1 << 16)); + // Note that this can't overflow. Since each output limb has been range checked as 16-bits, + // this sum can be around 48 bits at most. + let out = reduce_with_powers_ext_recursive(builder, &[out_1, out_2, out_3], limb_base); + + let computed_out = builder.add_many_extension(&[in_1, in_2, in_3]); + + let diff = builder.sub_extension(out, computed_out); + let filtered_diff = builder.mul_extension(is_add, diff); + yield_constr.constraint_wrapping(builder, filtered_diff); +} diff --git a/system_zero/src/arithmetic/division.rs b/system_zero/src/arithmetic/division.rs new file mode 100644 index 00000000..2f15b233 --- /dev/null +++ b/system_zero/src/arithmetic/division.rs @@ -0,0 +1,31 @@ +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +use crate::registers::arithmetic::*; +use crate::registers::NUM_COLUMNS; + +pub(crate) fn generate_division(values: &mut [F; NUM_COLUMNS]) { + // TODO +} + +pub(crate) fn eval_division>( + local_values: &[P; NUM_COLUMNS], + yield_constr: &mut ConstraintConsumer

, +) { + let is_div = local_values[IS_DIV]; + // TODO +} + +pub(crate) fn eval_division_recursively, const D: usize>( + builder: &mut CircuitBuilder, + local_values: &[ExtensionTarget; NUM_COLUMNS], + yield_constr: &mut RecursiveConstraintConsumer, +) { + let is_div = local_values[IS_DIV]; + // TODO +} diff --git a/system_zero/src/arithmetic/mod.rs b/system_zero/src/arithmetic/mod.rs new file mode 100644 index 00000000..c635d58d --- /dev/null +++ b/system_zero/src/arithmetic/mod.rs @@ -0,0 +1,75 @@ +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; +use starky::vars::StarkEvaluationTargets; +use starky::vars::StarkEvaluationVars; + +use crate::arithmetic::addition::{eval_addition, eval_addition_recursively, generate_addition}; +use crate::arithmetic::division::{eval_division, eval_division_recursively, generate_division}; +use crate::arithmetic::multiplication::{ + eval_multiplication, eval_multiplication_recursively, generate_multiplication, +}; +use crate::arithmetic::subtraction::{ + eval_subtraction, eval_subtraction_recursively, generate_subtraction, +}; +use crate::public_input_layout::NUM_PUBLIC_INPUTS; +use crate::registers::arithmetic::*; +use crate::registers::NUM_COLUMNS; + +mod addition; +mod division; +mod multiplication; +mod subtraction; + +pub(crate) fn generate_arithmetic_unit(values: &mut [F; NUM_COLUMNS]) { + if values[IS_ADD].is_one() { + generate_addition(values); + } else if values[IS_SUB].is_one() { + generate_subtraction(values); + } else if values[IS_MUL].is_one() { + generate_multiplication(values); + } else if values[IS_DIV].is_one() { + generate_division(values); + } +} + +pub(crate) fn eval_arithmetic_unit>( + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, +) { + let local_values = &vars.local_values; + + // Check that the operation flag values are binary. + for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] { + let val = local_values[col]; + yield_constr.constraint_wrapping(val * val - val); + } + + eval_addition(local_values, yield_constr); + eval_subtraction(local_values, yield_constr); + eval_multiplication(local_values, yield_constr); + eval_division(local_values, yield_constr); +} + +pub(crate) fn eval_arithmetic_unit_recursively, const D: usize>( + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, +) { + let local_values = &vars.local_values; + + // Check that the operation flag values are binary. + for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] { + let val = local_values[col]; + let constraint = builder.mul_add_extension(val, val, val); + yield_constr.constraint_wrapping(builder, constraint); + } + + eval_addition_recursively(builder, local_values, yield_constr); + eval_subtraction_recursively(builder, local_values, yield_constr); + eval_multiplication_recursively(builder, local_values, yield_constr); + eval_division_recursively(builder, local_values, yield_constr); +} diff --git a/system_zero/src/arithmetic/multiplication.rs b/system_zero/src/arithmetic/multiplication.rs new file mode 100644 index 00000000..2eefad38 --- /dev/null +++ b/system_zero/src/arithmetic/multiplication.rs @@ -0,0 +1,31 @@ +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +use crate::registers::arithmetic::*; +use crate::registers::NUM_COLUMNS; + +pub(crate) fn generate_multiplication(values: &mut [F; NUM_COLUMNS]) { + // TODO +} + +pub(crate) fn eval_multiplication>( + local_values: &[P; NUM_COLUMNS], + yield_constr: &mut ConstraintConsumer

, +) { + let is_mul = local_values[IS_MUL]; + // TODO +} + +pub(crate) fn eval_multiplication_recursively, const D: usize>( + builder: &mut CircuitBuilder, + local_values: &[ExtensionTarget; NUM_COLUMNS], + yield_constr: &mut RecursiveConstraintConsumer, +) { + let is_mul = local_values[IS_MUL]; + // TODO +} diff --git a/system_zero/src/arithmetic/subtraction.rs b/system_zero/src/arithmetic/subtraction.rs new file mode 100644 index 00000000..3613dee6 --- /dev/null +++ b/system_zero/src/arithmetic/subtraction.rs @@ -0,0 +1,31 @@ +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::Field; +use plonky2::field::packed_field::PackedField; +use plonky2::hash::hash_types::RichField; +use plonky2::iop::ext_target::ExtensionTarget; +use plonky2::plonk::circuit_builder::CircuitBuilder; +use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; + +use crate::registers::arithmetic::*; +use crate::registers::NUM_COLUMNS; + +pub(crate) fn generate_subtraction(values: &mut [F; NUM_COLUMNS]) { + // TODO +} + +pub(crate) fn eval_subtraction>( + local_values: &[P; NUM_COLUMNS], + yield_constr: &mut ConstraintConsumer

, +) { + let is_sub = local_values[IS_SUB]; + // TODO +} + +pub(crate) fn eval_subtraction_recursively, const D: usize>( + builder: &mut CircuitBuilder, + local_values: &[ExtensionTarget; NUM_COLUMNS], + yield_constr: &mut RecursiveConstraintConsumer, +) { + let is_sub = local_values[IS_SUB]; + // TODO +} diff --git a/system_zero/src/column_layout.rs b/system_zero/src/column_layout.rs deleted file mode 100644 index fa5d627a..00000000 --- a/system_zero/src/column_layout.rs +++ /dev/null @@ -1,108 +0,0 @@ -//// CORE REGISTERS - -/// A cycle counter. Starts at 0; increments by 1. -pub(crate) const COL_CLOCK: usize = 0; - -/// A column which contains the values `[0, ... 2^16 - 1]`, potentially with duplicates. Used for -/// 16-bit range checks. -/// -/// For ease of verification, we enforce that it must begin with 0 and end with `2^16 - 1`, and each -/// delta must be either 0 or 1. -pub(crate) const COL_RANGE_16: usize = COL_CLOCK + 1; - -/// Pointer to the current instruction. -pub(crate) const COL_INSTRUCTION_PTR: usize = COL_RANGE_16 + 1; -/// Pointer to the base of the current call's stack frame. -pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1; -/// Pointer to the tip of the current call's stack frame. -pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1; - -//// PERMUTATION UNIT -pub(crate) mod permutation { - use plonky2::hash::hashing::SPONGE_WIDTH; - use plonky2::hash::poseidon; - - const START_UNIT: usize = super::COL_STACK_PTR + 1; - - const START_FULL_FIRST: usize = START_UNIT + SPONGE_WIDTH; - - pub const fn col_full_first_mid_sbox(round: usize, i: usize) -> usize { - debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_FULL_FIRST + 2 * round * SPONGE_WIDTH + i - } - - pub const fn col_full_first_after_mds(round: usize, i: usize) -> usize { - debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i - } - - const START_PARTIAL: usize = - col_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; - - pub const fn col_partial_mid_sbox(round: usize) -> usize { - debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); - START_PARTIAL + 2 * round - } - - pub const fn col_partial_after_sbox(round: usize) -> usize { - debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); - START_PARTIAL + 2 * round + 1 - } - - const START_FULL_SECOND: usize = col_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1; - - pub const fn col_full_second_mid_sbox(round: usize, i: usize) -> usize { - debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_FULL_SECOND + 2 * round * SPONGE_WIDTH + i - } - - pub const fn col_full_second_after_mds(round: usize, i: usize) -> usize { - debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); - debug_assert!(i < SPONGE_WIDTH); - START_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i - } - - pub const fn col_input(i: usize) -> usize { - debug_assert!(i < SPONGE_WIDTH); - START_UNIT + i - } - - pub const fn col_output(i: usize) -> usize { - debug_assert!(i < SPONGE_WIDTH); - col_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i) - } - - pub(super) const END_UNIT: usize = col_output(SPONGE_WIDTH - 1); -} - -//// MEMORY UNITS - -//// DECOMPOSITION UNITS -pub(crate) mod decomposition { - - const START_UNITS: usize = super::permutation::END_UNIT + 1; - - const NUM_UNITS: usize = 4; - /// The number of bits associated with a single decomposition unit. - const UNIT_BITS: usize = 32; - /// One column for the value being decomposed, plus one column per bit. - const UNIT_COLS: usize = 1 + UNIT_BITS; - - pub const fn col_input(unit: usize) -> usize { - debug_assert!(unit < NUM_UNITS); - START_UNITS + unit * UNIT_COLS - } - - pub const fn col_bit(unit: usize, bit: usize) -> usize { - debug_assert!(unit < NUM_UNITS); - debug_assert!(bit < UNIT_BITS); - START_UNITS + unit * UNIT_COLS + 1 + bit - } - - pub(super) const END_UNITS: usize = START_UNITS + UNIT_COLS * NUM_UNITS; -} - -pub(crate) const NUM_COLUMNS: usize = decomposition::END_UNITS; diff --git a/system_zero/src/core_registers.rs b/system_zero/src/core_registers.rs index 21faa288..03e7fa04 100644 --- a/system_zero/src/core_registers.rs +++ b/system_zero/src/core_registers.rs @@ -6,10 +6,9 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use starky::vars::StarkEvaluationTargets; use starky::vars::StarkEvaluationVars; -use crate::column_layout::{ - COL_CLOCK, COL_FRAME_PTR, COL_INSTRUCTION_PTR, COL_RANGE_16, COL_STACK_PTR, NUM_COLUMNS, -}; use crate::public_input_layout::NUM_PUBLIC_INPUTS; +use crate::registers::core::*; +use crate::registers::NUM_COLUMNS; use crate::system_zero::SystemZero; impl, const D: usize> SystemZero { @@ -35,11 +34,11 @@ impl, const D: usize> SystemZero { let next_range_16 = (prev_range_16 + 1).min((1 << 16) - 1); next_values[COL_RANGE_16] = F::from_canonical_u64(next_range_16); - next_values[COL_INSTRUCTION_PTR] = todo!(); + // next_values[COL_INSTRUCTION_PTR] = todo!(); - next_values[COL_FRAME_PTR] = todo!(); + // next_values[COL_FRAME_PTR] = todo!(); - next_values[COL_STACK_PTR] = todo!(); + // next_values[COL_STACK_PTR] = todo!(); } #[inline] @@ -64,9 +63,9 @@ impl, const D: usize> SystemZero { let delta_range_16 = next_range_16 - local_range_16; yield_constr.constraint_first_row(local_range_16); yield_constr.constraint_last_row(local_range_16 - FE::from_canonical_u64((1 << 16) - 1)); - yield_constr.constraint(delta_range_16 * (delta_range_16 - FE::ONE)); + yield_constr.constraint(delta_range_16 * delta_range_16 - delta_range_16); - todo!() + // TODO constraints for stack etc. } pub(crate) fn eval_core_registers_recursively( @@ -75,6 +74,28 @@ impl, const D: usize> SystemZero { vars: StarkEvaluationTargets, yield_constr: &mut RecursiveConstraintConsumer, ) { - todo!() + let one_ext = builder.one_extension(); + let max_u16 = builder.constant(F::from_canonical_u64((1 << 16) - 1)); + let max_u16_ext = builder.convert_to_ext(max_u16); + + // The clock must start with 0, and increment by 1. + let local_clock = vars.local_values[COL_CLOCK]; + let next_clock = vars.next_values[COL_CLOCK]; + let delta_clock = builder.sub_extension(next_clock, local_clock); + yield_constr.constraint_first_row(builder, local_clock); + let constraint = builder.sub_extension(delta_clock, one_ext); + yield_constr.constraint(builder, constraint); + + // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. + let local_range_16 = vars.local_values[COL_RANGE_16]; + let next_range_16 = vars.next_values[COL_RANGE_16]; + let delta_range_16 = builder.sub_extension(next_range_16, local_range_16); + yield_constr.constraint_first_row(builder, local_range_16); + let constraint = builder.sub_extension(local_range_16, max_u16_ext); + yield_constr.constraint_last_row(builder, constraint); + let constraint = builder.mul_add_extension(delta_range_16, delta_range_16, delta_range_16); + yield_constr.constraint(builder, constraint); + + // TODO constraints for stack etc. } } diff --git a/system_zero/src/lib.rs b/system_zero/src/lib.rs index 029c2abd..1c097573 100644 --- a/system_zero/src/lib.rs +++ b/system_zero/src/lib.rs @@ -1,12 +1,11 @@ // TODO: Remove these when crate is closer to being finished. #![allow(dead_code)] #![allow(unused_variables)] -#![allow(unreachable_code)] -#![allow(clippy::diverging_sub_expression)] -mod column_layout; +mod arithmetic; mod core_registers; mod memory; mod permutation_unit; mod public_input_layout; +mod registers; pub mod system_zero; diff --git a/system_zero/src/permutation_unit.rs b/system_zero/src/permutation_unit.rs index e15474e4..2681f2d9 100644 --- a/system_zero/src/permutation_unit.rs +++ b/system_zero/src/permutation_unit.rs @@ -8,12 +8,9 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use starky::vars::StarkEvaluationTargets; use starky::vars::StarkEvaluationVars; -use crate::column_layout::permutation::{ - col_full_first_after_mds, col_full_first_mid_sbox, col_full_second_after_mds, - col_full_second_mid_sbox, col_input, col_partial_after_sbox, col_partial_mid_sbox, -}; -use crate::column_layout::NUM_COLUMNS; use crate::public_input_layout::NUM_PUBLIC_INPUTS; +use crate::registers::permutation::*; +use crate::registers::NUM_COLUMNS; use crate::system_zero::SystemZero; fn constant_layer( @@ -272,10 +269,10 @@ mod tests { use starky::constraint_consumer::ConstraintConsumer; use starky::vars::StarkEvaluationVars; - use crate::column_layout::permutation::{col_input, col_output}; - use crate::column_layout::NUM_COLUMNS; use crate::permutation_unit::SPONGE_WIDTH; use crate::public_input_layout::NUM_PUBLIC_INPUTS; + use crate::registers::permutation::{col_input, col_output}; + use crate::registers::NUM_COLUMNS; use crate::system_zero::SystemZero; #[test] diff --git a/system_zero/src/registers/arithmetic.rs b/system_zero/src/registers/arithmetic.rs new file mode 100644 index 00000000..92c0d2c3 --- /dev/null +++ b/system_zero/src/registers/arithmetic.rs @@ -0,0 +1,37 @@ +//! Arithmetic unit. + +pub(crate) const IS_ADD: usize = super::START_ARITHMETIC; +pub(crate) const IS_SUB: usize = IS_ADD + 1; +pub(crate) const IS_MUL: usize = IS_SUB + 1; +pub(crate) const IS_DIV: usize = IS_MUL + 1; + +const START_SHARED_COLS: usize = IS_DIV + 1; + +/// Within the arithmetic unit, there are shared columns which can be used by any arithmetic +/// circuit, depending on which one is active this cycle. +// Can be increased as needed as other operations are implemented. +const NUM_SHARED_COLS: usize = 3; + +const fn shared_col(i: usize) -> usize { + debug_assert!(i < NUM_SHARED_COLS); + START_SHARED_COLS + i +} + +/// The first value to be added; treated as an unsigned u32. +pub(crate) const COL_ADD_INPUT_1: usize = shared_col(0); +/// The second value to be added; treated as an unsigned u32. +pub(crate) const COL_ADD_INPUT_2: usize = shared_col(1); +/// The third value to be added; treated as an unsigned u32. +pub(crate) const COL_ADD_INPUT_3: usize = shared_col(2); + +// Note: Addition outputs three 16-bit chunks, and since these values need to be range-checked +// anyway, we might as well use the range check unit's columns as our addition outputs. So the +// three proceeding columns are basically aliases, not columns owned by the arithmetic unit. +/// The first 16-bit chunk of the output, based on little-endian ordering. +pub(crate) const COL_ADD_OUTPUT_1: usize = super::range_check_16::col_rc_16_input(0); +/// The second 16-bit chunk of the output, based on little-endian ordering. +pub(crate) const COL_ADD_OUTPUT_2: usize = super::range_check_16::col_rc_16_input(1); +/// The third 16-bit chunk of the output, based on little-endian ordering. +pub(crate) const COL_ADD_OUTPUT_3: usize = super::range_check_16::col_rc_16_input(2); + +pub(super) const END: usize = super::START_ARITHMETIC + NUM_SHARED_COLS; diff --git a/system_zero/src/registers/boolean.rs b/system_zero/src/registers/boolean.rs new file mode 100644 index 00000000..c59af8d4 --- /dev/null +++ b/system_zero/src/registers/boolean.rs @@ -0,0 +1,10 @@ +//! Boolean unit. Contains columns whose values must be 0 or 1. + +const NUM_BITS: usize = 128; + +pub const fn col_bit(index: usize) -> usize { + debug_assert!(index < NUM_BITS); + super::START_BOOLEAN + index +} + +pub(super) const END: usize = super::START_BOOLEAN + NUM_BITS; diff --git a/system_zero/src/registers/core.rs b/system_zero/src/registers/core.rs new file mode 100644 index 00000000..3fafab55 --- /dev/null +++ b/system_zero/src/registers/core.rs @@ -0,0 +1,20 @@ +//! Core registers. + +/// A cycle counter. Starts at 0; increments by 1. +pub(crate) const COL_CLOCK: usize = super::START_CORE; + +/// A column which contains the values `[0, ... 2^16 - 1]`, potentially with duplicates. Used for +/// 16-bit range checks. +/// +/// For ease of verification, we enforce that it must begin with 0 and end with `2^16 - 1`, and each +/// delta must be either 0 or 1. +pub(crate) const COL_RANGE_16: usize = COL_CLOCK + 1; + +/// Pointer to the current instruction. +pub(crate) const COL_INSTRUCTION_PTR: usize = COL_RANGE_16 + 1; +/// Pointer to the base of the current call's stack frame. +pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1; +/// Pointer to the tip of the current call's stack frame. +pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1; + +pub(super) const END: usize = COL_STACK_PTR + 1; diff --git a/system_zero/src/registers/logic.rs b/system_zero/src/registers/logic.rs new file mode 100644 index 00000000..07f3f0e0 --- /dev/null +++ b/system_zero/src/registers/logic.rs @@ -0,0 +1,3 @@ +//! Logic unit. + +pub(super) const END: usize = super::START_LOGIC; diff --git a/system_zero/src/registers/lookup.rs b/system_zero/src/registers/lookup.rs new file mode 100644 index 00000000..eb773acf --- /dev/null +++ b/system_zero/src/registers/lookup.rs @@ -0,0 +1,21 @@ +//! Lookup unit. +//! See https://zcash.github.io/halo2/design/proving-system/lookup.html + +const START_UNIT: usize = super::START_LOOKUP; + +const NUM_LOOKUPS: usize = + super::range_check_16::NUM_RANGE_CHECKS + super::range_check_degree::NUM_RANGE_CHECKS; + +/// This column contains a permutation of the input values. +const fn col_permuted_input(i: usize) -> usize { + debug_assert!(i < NUM_LOOKUPS); + START_UNIT + 2 * i +} + +/// This column contains a permutation of the table values. +const fn col_permuted_table(i: usize) -> usize { + debug_assert!(i < NUM_LOOKUPS); + START_UNIT + 2 * i + 1 +} + +pub(super) const END: usize = START_UNIT + NUM_LOOKUPS; diff --git a/system_zero/src/registers/memory.rs b/system_zero/src/registers/memory.rs new file mode 100644 index 00000000..1373d0d8 --- /dev/null +++ b/system_zero/src/registers/memory.rs @@ -0,0 +1,3 @@ +//! Memory unit. + +pub(super) const END: usize = super::START_MEMORY; diff --git a/system_zero/src/registers/mod.rs b/system_zero/src/registers/mod.rs new file mode 100644 index 00000000..134a28bf --- /dev/null +++ b/system_zero/src/registers/mod.rs @@ -0,0 +1,20 @@ +pub(crate) mod arithmetic; +pub(crate) mod boolean; +pub(crate) mod core; +pub(crate) mod logic; +pub(crate) mod lookup; +pub(crate) mod memory; +pub(crate) mod permutation; +pub(crate) mod range_check_16; +pub(crate) mod range_check_degree; + +const START_ARITHMETIC: usize = 0; +const START_BOOLEAN: usize = arithmetic::END; +const START_CORE: usize = boolean::END; +const START_LOGIC: usize = core::END; +const START_LOOKUP: usize = logic::END; +const START_MEMORY: usize = lookup::END; +const START_PERMUTATION: usize = memory::END; +const START_RANGE_CHECK_16: usize = permutation::END; +const START_RANGE_CHECK_DEGREE: usize = range_check_16::END; +pub(crate) const NUM_COLUMNS: usize = range_check_degree::END; diff --git a/system_zero/src/registers/permutation.rs b/system_zero/src/registers/permutation.rs new file mode 100644 index 00000000..cde76af2 --- /dev/null +++ b/system_zero/src/registers/permutation.rs @@ -0,0 +1,57 @@ +//! Permutation unit. + +use plonky2::hash::hashing::SPONGE_WIDTH; +use plonky2::hash::poseidon; + +const START_FULL_FIRST: usize = super::START_PERMUTATION + SPONGE_WIDTH; + +pub const fn col_full_first_mid_sbox(round: usize, i: usize) -> usize { + debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_FIRST + 2 * round * SPONGE_WIDTH + i +} + +pub const fn col_full_first_after_mds(round: usize, i: usize) -> usize { + debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i +} + +const START_PARTIAL: usize = + col_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; + +pub const fn col_partial_mid_sbox(round: usize) -> usize { + debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); + START_PARTIAL + 2 * round +} + +pub const fn col_partial_after_sbox(round: usize) -> usize { + debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); + START_PARTIAL + 2 * round + 1 +} + +const START_FULL_SECOND: usize = col_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1; + +pub const fn col_full_second_mid_sbox(round: usize, i: usize) -> usize { + debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_SECOND + 2 * round * SPONGE_WIDTH + i +} + +pub const fn col_full_second_after_mds(round: usize, i: usize) -> usize { + debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); + debug_assert!(i < SPONGE_WIDTH); + START_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i +} + +pub const fn col_input(i: usize) -> usize { + debug_assert!(i < SPONGE_WIDTH); + super::START_PERMUTATION + i +} + +pub const fn col_output(i: usize) -> usize { + debug_assert!(i < SPONGE_WIDTH); + col_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i) +} + +pub(super) const END: usize = col_output(SPONGE_WIDTH - 1) + 1; diff --git a/system_zero/src/registers/range_check_16.rs b/system_zero/src/registers/range_check_16.rs new file mode 100644 index 00000000..c44db494 --- /dev/null +++ b/system_zero/src/registers/range_check_16.rs @@ -0,0 +1,11 @@ +//! Range check unit which checks that values are in `[0, 2^16)`. + +pub(super) const NUM_RANGE_CHECKS: usize = 5; + +/// The input of the `i`th range check, i.e. the value being range checked. +pub(crate) const fn col_rc_16_input(i: usize) -> usize { + debug_assert!(i < NUM_RANGE_CHECKS); + super::START_RANGE_CHECK_16 + i +} + +pub(super) const END: usize = super::START_RANGE_CHECK_16 + NUM_RANGE_CHECKS; diff --git a/system_zero/src/registers/range_check_degree.rs b/system_zero/src/registers/range_check_degree.rs new file mode 100644 index 00000000..6d61e6e2 --- /dev/null +++ b/system_zero/src/registers/range_check_degree.rs @@ -0,0 +1,11 @@ +//! Range check unit which checks that values are in `[0, degree)`. + +pub(super) const NUM_RANGE_CHECKS: usize = 5; + +/// The input of the `i`th range check, i.e. the value being range checked. +pub(crate) const fn col_rc_degree_input(i: usize) -> usize { + debug_assert!(i < NUM_RANGE_CHECKS); + super::START_RANGE_CHECK_DEGREE + i +} + +pub(super) const END: usize = super::START_RANGE_CHECK_DEGREE + NUM_RANGE_CHECKS; diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 70d3bbca..780b1d38 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -9,9 +9,12 @@ use starky::stark::Stark; use starky::vars::StarkEvaluationTargets; use starky::vars::StarkEvaluationVars; -use crate::column_layout::NUM_COLUMNS; +use crate::arithmetic::{ + eval_arithmetic_unit, eval_arithmetic_unit_recursively, generate_arithmetic_unit, +}; use crate::memory::TransactionMemory; use crate::public_input_layout::NUM_PUBLIC_INPUTS; +use crate::registers::NUM_COLUMNS; /// We require at least 2^16 rows as it helps support efficient 16-bit range checks. const MIN_TRACE_ROWS: usize = 1 << 16; @@ -34,10 +37,16 @@ impl, const D: usize> SystemZero { loop { let mut next_row = [F::ZERO; NUM_COLUMNS]; self.generate_next_row_core_registers(&row, &mut next_row); + generate_arithmetic_unit(&mut next_row); Self::generate_permutation_unit(&mut next_row); trace.push(row); row = next_row; + + // TODO: Replace with proper termination condition. + if trace.len() == (1 << 16) - 1 { + break; + } } trace.push(row); @@ -66,8 +75,9 @@ impl, const D: usize> Stark for SystemZero, { self.eval_core_registers(vars, yield_constr); + eval_arithmetic_unit(vars, yield_constr); Self::eval_permutation_unit(vars, yield_constr); - todo!() + // TODO: Other units } fn eval_ext_recursively( @@ -77,8 +87,9 @@ impl, const D: usize> Stark for SystemZero, ) { self.eval_core_registers_recursively(builder, vars, yield_constr); + eval_arithmetic_unit_recursively(builder, vars, yield_constr); Self::eval_permutation_unit_recursively(builder, vars, yield_constr); - todo!() + // TODO: Other units } fn constraint_degree(&self) -> usize { @@ -103,7 +114,7 @@ mod tests { use crate::system_zero::SystemZero; #[test] - #[ignore] // TODO + #[ignore] // A bit slow. fn run() -> Result<()> { type F = GoldilocksField; type C = PoseidonGoldilocksConfig; @@ -121,7 +132,6 @@ mod tests { } #[test] - #[ignore] // TODO fn degree() -> Result<()> { type F = GoldilocksField; type C = PoseidonGoldilocksConfig; From 7c71eb66908260f163938dc1ad4b1b7851893aed Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 11 Feb 2022 10:25:51 +0100 Subject: [PATCH 135/143] Fix mul_add -> mul_sub typo --- system_zero/src/arithmetic/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_zero/src/arithmetic/mod.rs b/system_zero/src/arithmetic/mod.rs index c635d58d..45a9f7d9 100644 --- a/system_zero/src/arithmetic/mod.rs +++ b/system_zero/src/arithmetic/mod.rs @@ -64,7 +64,7 @@ pub(crate) fn eval_arithmetic_unit_recursively, con // Check that the operation flag values are binary. for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] { let val = local_values[col]; - let constraint = builder.mul_add_extension(val, val, val); + let constraint = builder.mul_sub_extension(val, val, val); yield_constr.constraint_wrapping(builder, constraint); } From 1d013b95ddfb02519c75cc8d5e3f64684a79b269 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 11 Feb 2022 16:22:57 +0100 Subject: [PATCH 136/143] Fix `hash_or_noop` in Merkle proof. --- plonky2/src/hash/merkle_proofs.rs | 2 +- plonky2/src/plonk/config.rs | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index f90f0657..7ef81570 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -32,7 +32,7 @@ pub(crate) fn verify_merkle_proof>( proof: &MerkleProof, ) -> Result<()> { let mut index = leaf_index; - let mut current_digest = H::hash_no_pad(&leaf_data); + let mut current_digest = H::hash_or_noop(&leaf_data); for &sibling_digest in proof.siblings.iter() { let bit = index & 1; index >>= 1; diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index fdca7037..76891240 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -46,6 +46,17 @@ pub trait Hasher: Sized + Clone + Debug + Eq + PartialEq { Self::hash_no_pad(&padded_input) } + /// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a + /// no-op. + fn hash_or_noop(inputs: &[F]) -> Self::Hash { + if inputs.len() <= 4 { + let inputs_bytes = HashOut::from_partial(inputs).to_bytes(); + Self::Hash::from_bytes(&inputs_bytes) + } else { + Self::hash_no_pad(inputs) + } + } + fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash; } From f7256a6efc361d206879b65c5240ba7fe25d7a3c Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Fri, 11 Feb 2022 16:41:44 +0100 Subject: [PATCH 137/143] Other fixes --- plonky2/src/hash/merkle_tree.rs | 4 ++-- plonky2/src/hash/path_compression.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plonky2/src/hash/merkle_tree.rs b/plonky2/src/hash/merkle_tree.rs index e9460c14..f9890aa5 100644 --- a/plonky2/src/hash/merkle_tree.rs +++ b/plonky2/src/hash/merkle_tree.rs @@ -63,7 +63,7 @@ fn fill_subtree>( ) -> H::Hash { assert_eq!(leaves.len(), digests_buf.len() / 2 + 1); if digests_buf.is_empty() { - H::hash_no_pad(&leaves[0]) + H::hash_or_noop(&leaves[0]) } else { // Layout is: left recursive output || left child digest // || right child digest || right recursive output. @@ -99,7 +99,7 @@ fn fill_digests_buf>( .par_iter_mut() .zip(leaves) .for_each(|(cap_buf, leaf)| { - cap_buf.write(H::hash_no_pad(leaf)); + cap_buf.write(H::hash_or_noop(leaf)); }); return; } diff --git a/plonky2/src/hash/path_compression.rs b/plonky2/src/hash/path_compression.rs index 56c355fd..fe7850f4 100644 --- a/plonky2/src/hash/path_compression.rs +++ b/plonky2/src/hash/path_compression.rs @@ -66,7 +66,7 @@ pub(crate) fn decompress_merkle_proofs>( for (&i, v) in leaves_indices.iter().zip(leaves_data) { // Observe the leaves. - seen.insert(i + num_leaves, H::hash_no_pad(v)); + seen.insert(i + num_leaves, H::hash_or_noop(v)); } // Iterators over the siblings. From 736b65b0a7d595b0e1417bd08607edcda859e548 Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Sat, 12 Feb 2022 15:18:20 +0100 Subject: [PATCH 138/143] PR feedback --- plonky2/src/hash/hashing.rs | 10 ---------- plonky2/src/plonk/config.rs | 9 +++++++-- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/plonky2/src/hash/hashing.rs b/plonky2/src/hash/hashing.rs index 468bd1b8..9d043ea3 100644 --- a/plonky2/src/hash/hashing.rs +++ b/plonky2/src/hash/hashing.rs @@ -12,16 +12,6 @@ pub(crate) const SPONGE_RATE: usize = 8; pub(crate) const SPONGE_CAPACITY: usize = 4; pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY; -/// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a -/// no-op. -pub fn hash_or_noop>(inputs: &[F]) -> HashOut { - if inputs.len() <= 4 { - HashOut::from_partial(inputs) - } else { - hash_n_to_hash_no_pad::(inputs) - } -} - impl, const D: usize> CircuitBuilder { pub fn hash_or_noop>(&mut self, inputs: Vec) -> HashOutTarget { let zero = self.zero(); diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index 76891240..40179c38 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -3,6 +3,7 @@ use std::fmt::Debug; use plonky2_field::extension_field::quadratic::QuadraticExtension; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::goldilocks_field::GoldilocksField; +use plonky2_util::ceil_div_usize; use serde::{de::DeserializeOwned, Serialize}; use crate::hash::hash_types::HashOut; @@ -49,8 +50,12 @@ pub trait Hasher: Sized + Clone + Debug + Eq + PartialEq { /// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a /// no-op. fn hash_or_noop(inputs: &[F]) -> Self::Hash { - if inputs.len() <= 4 { - let inputs_bytes = HashOut::from_partial(inputs).to_bytes(); + if inputs.len() * ceil_div_usize(F::BITS, 8) <= Self::HASH_SIZE { + let mut inputs_bytes = inputs + .iter() + .flat_map(|x| x.to_canonical_u64().to_le_bytes()) + .collect::>(); + inputs_bytes.resize(Self::HASH_SIZE, 0); Self::Hash::from_bytes(&inputs_bytes) } else { Self::hash_no_pad(inputs) From 7af2d05828240123e70f108dc0baf67a5338788c Mon Sep 17 00:00:00 2001 From: wborgeaud Date: Sun, 13 Feb 2022 15:04:40 +0100 Subject: [PATCH 139/143] Save allocation and add const generic bound --- plonky2/benches/merkle.rs | 7 +++++- plonky2/src/fri/oracle.rs | 17 ++++++++++---- plonky2/src/fri/proof.rs | 5 ++++- plonky2/src/fri/prover.rs | 10 +++++++-- plonky2/src/fri/verifier.rs | 21 ++++++++++------- plonky2/src/gates/gate_testing.rs | 7 ++++-- plonky2/src/hash/merkle_proofs.rs | 5 ++++- plonky2/src/hash/merkle_tree.rs | 25 +++++++++++++-------- plonky2/src/hash/path_compression.rs | 5 ++++- plonky2/src/plonk/circuit_builder.rs | 15 ++++++++++--- plonky2/src/plonk/circuit_data.rs | 30 ++++++++++++++++++++----- plonky2/src/plonk/config.rs | 18 ++++++++------- plonky2/src/plonk/proof.rs | 15 ++++++++++--- plonky2/src/plonk/prover.rs | 5 ++++- plonky2/src/plonk/recursive_verifier.rs | 15 ++++++++++--- plonky2/src/plonk/verifier.rs | 10 +++++++-- starky/src/prover.rs | 3 ++- starky/src/verifier.rs | 4 +++- 18 files changed, 160 insertions(+), 57 deletions(-) diff --git a/plonky2/benches/merkle.rs b/plonky2/benches/merkle.rs index 7445682b..8bc43730 100644 --- a/plonky2/benches/merkle.rs +++ b/plonky2/benches/merkle.rs @@ -1,3 +1,5 @@ +#![feature(generic_const_exprs)] + use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use plonky2::field::goldilocks_field::GoldilocksField; use plonky2::hash::hash_types::RichField; @@ -9,7 +11,10 @@ use tynm::type_name; const ELEMS_PER_LEAF: usize = 135; -pub(crate) fn bench_merkle_tree>(c: &mut Criterion) { +pub(crate) fn bench_merkle_tree>(c: &mut Criterion) +where + [(); H::HASH_SIZE]:, +{ let mut group = c.benchmark_group(&format!( "merkle-tree<{}, {}>", type_name::(), diff --git a/plonky2/src/fri/oracle.rs b/plonky2/src/fri/oracle.rs index 0922962a..bd1e9ac5 100644 --- a/plonky2/src/fri/oracle.rs +++ b/plonky2/src/fri/oracle.rs @@ -12,7 +12,7 @@ use crate::fri::FriParams; use crate::hash::hash_types::RichField; use crate::hash::merkle_tree::MerkleTree; use crate::iop::challenger::Challenger; -use crate::plonk::config::GenericConfig; +use crate::plonk::config::{GenericConfig, Hasher}; use crate::timed; use crate::util::reducing::ReducingFactor; use crate::util::reverse_bits; @@ -43,7 +43,10 @@ impl, C: GenericConfig, const D: usize> cap_height: usize, timing: &mut TimingTree, fft_root_table: Option<&FftRootTable>, - ) -> Self { + ) -> Self + where + [(); C::Hasher::HASH_SIZE]:, + { let coeffs = timed!( timing, "IFFT", @@ -68,7 +71,10 @@ impl, C: GenericConfig, const D: usize> cap_height: usize, timing: &mut TimingTree, fft_root_table: Option<&FftRootTable>, - ) -> Self { + ) -> Self + where + [(); C::Hasher::HASH_SIZE]:, + { let degree = polynomials[0].len(); let lde_values = timed!( timing, @@ -133,7 +139,10 @@ impl, C: GenericConfig, const D: usize> challenger: &mut Challenger, fri_params: &FriParams, timing: &mut TimingTree, - ) -> FriProof { + ) -> FriProof + where + [(); C::Hasher::HASH_SIZE]:, + { assert!(D > 1, "Not implemented for D=1."); let alpha = challenger.get_extension_challenge::(); let mut alpha = ReducingFactor::new(alpha); diff --git a/plonky2/src/fri/proof.rs b/plonky2/src/fri/proof.rs index 44f74cba..9c6961a4 100644 --- a/plonky2/src/fri/proof.rs +++ b/plonky2/src/fri/proof.rs @@ -245,7 +245,10 @@ impl, H: Hasher, const D: usize> CompressedFriPr challenges: &ProofChallenges, fri_inferred_elements: FriInferredElements, params: &FriParams, - ) -> FriProof { + ) -> FriProof + where + [(); H::HASH_SIZE]:, + { let CompressedFriProof { commit_phase_merkle_caps, query_round_proofs, diff --git a/plonky2/src/fri/prover.rs b/plonky2/src/fri/prover.rs index 5cd5fdf1..5a20ab9d 100644 --- a/plonky2/src/fri/prover.rs +++ b/plonky2/src/fri/prover.rs @@ -24,7 +24,10 @@ pub fn fri_proof, C: GenericConfig, const challenger: &mut Challenger, fri_params: &FriParams, timing: &mut TimingTree, -) -> FriProof { +) -> FriProof +where + [(); C::Hasher::HASH_SIZE]:, +{ let n = lde_polynomial_values.len(); assert_eq!(lde_polynomial_coeffs.len(), n); @@ -68,7 +71,10 @@ fn fri_committed_trees, C: GenericConfig, ) -> ( Vec>, PolynomialCoeffs, -) { +) +where + [(); C::Hasher::HASH_SIZE]:, +{ let mut trees = Vec::new(); let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR; diff --git a/plonky2/src/fri/verifier.rs b/plonky2/src/fri/verifier.rs index 49cfa053..2607ab0d 100644 --- a/plonky2/src/fri/verifier.rs +++ b/plonky2/src/fri/verifier.rs @@ -56,18 +56,17 @@ pub(crate) fn fri_verify_proof_of_work, const D: us Ok(()) } -pub fn verify_fri_proof< - F: RichField + Extendable, - C: GenericConfig, - const D: usize, ->( +pub fn verify_fri_proof, C: GenericConfig, const D: usize>( instance: &FriInstanceInfo, openings: &FriOpenings, challenges: &FriChallenges, initial_merkle_caps: &[MerkleCap], proof: &FriProof, params: &FriParams, -) -> Result<()> { +) -> Result<()> +where + [(); C::Hasher::HASH_SIZE]:, +{ ensure!( params.final_poly_len() == proof.final_poly.len(), "Final polynomial has wrong degree." @@ -112,7 +111,10 @@ fn fri_verify_initial_proof>( x_index: usize, proof: &FriInitialTreeProof, initial_merkle_caps: &[MerkleCap], -) -> Result<()> { +) -> Result<()> +where + [(); H::HASH_SIZE]:, +{ for ((evals, merkle_proof), cap) in proof.evals_proofs.iter().zip(initial_merkle_caps) { verify_merkle_proof::(evals.clone(), x_index, cap, merkle_proof)?; } @@ -177,7 +179,10 @@ fn fri_verifier_query_round< n: usize, round_proof: &FriQueryRound, params: &FriParams, -) -> Result<()> { +) -> Result<()> +where + [(); C::Hasher::HASH_SIZE]:, +{ fri_verify_initial_proof::( x_index, &round_proof.initial_trees_proof, diff --git a/plonky2/src/gates/gate_testing.rs b/plonky2/src/gates/gate_testing.rs index ea1ef9a4..51768ba8 100644 --- a/plonky2/src/gates/gate_testing.rs +++ b/plonky2/src/gates/gate_testing.rs @@ -10,7 +10,7 @@ use crate::hash::hash_types::RichField; use crate::iop::witness::{PartialWitness, Witness}; use crate::plonk::circuit_builder::CircuitBuilder; use crate::plonk::circuit_data::CircuitConfig; -use crate::plonk::config::GenericConfig; +use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch}; use crate::plonk::verifier::verify; use crate::util::transpose; @@ -92,7 +92,10 @@ pub fn test_eval_fns< const D: usize, >( gate: G, -) -> Result<()> { +) -> Result<()> +where + [(); C::Hasher::HASH_SIZE]:, +{ // Test that `eval_unfiltered` and `eval_unfiltered_base` are coherent. let wires_base = F::rand_vec(gate.num_wires()); let constants_base = F::rand_vec(gate.num_constants()); diff --git a/plonky2/src/hash/merkle_proofs.rs b/plonky2/src/hash/merkle_proofs.rs index 7ef81570..c3ebf406 100644 --- a/plonky2/src/hash/merkle_proofs.rs +++ b/plonky2/src/hash/merkle_proofs.rs @@ -30,7 +30,10 @@ pub(crate) fn verify_merkle_proof>( leaf_index: usize, merkle_cap: &MerkleCap, proof: &MerkleProof, -) -> Result<()> { +) -> Result<()> +where + [(); H::HASH_SIZE]:, +{ let mut index = leaf_index; let mut current_digest = H::hash_or_noop(&leaf_data); for &sibling_digest in proof.siblings.iter() { diff --git a/plonky2/src/hash/merkle_tree.rs b/plonky2/src/hash/merkle_tree.rs index f9890aa5..5fbc441c 100644 --- a/plonky2/src/hash/merkle_tree.rs +++ b/plonky2/src/hash/merkle_tree.rs @@ -60,7 +60,10 @@ fn capacity_up_to_mut(v: &mut Vec, len: usize) -> &mut [MaybeUninit] { fn fill_subtree>( digests_buf: &mut [MaybeUninit], leaves: &[Vec], -) -> H::Hash { +) -> H::Hash +where + [(); H::HASH_SIZE]:, +{ assert_eq!(leaves.len(), digests_buf.len() / 2 + 1); if digests_buf.is_empty() { H::hash_or_noop(&leaves[0]) @@ -89,7 +92,9 @@ fn fill_digests_buf>( cap_buf: &mut [MaybeUninit], leaves: &[Vec], cap_height: usize, -) { +) where + [(); H::HASH_SIZE]:, +{ // Special case of a tree that's all cap. The usual case will panic because we'll try to split // an empty slice into chunks of `0`. (We would not need this if there was a way to split into // `blah` chunks as opposed to chunks _of_ `blah`.) @@ -121,7 +126,10 @@ fn fill_digests_buf>( } impl> MerkleTree { - pub fn new(leaves: Vec>, cap_height: usize) -> Self { + pub fn new(leaves: Vec>, cap_height: usize) -> Self + where + [(); H::HASH_SIZE]:, + { let log2_leaves_len = log2_strict(leaves.len()); assert!( cap_height <= log2_leaves_len, @@ -208,14 +216,13 @@ mod tests { (0..n).map(|_| F::rand_vec(k)).collect() } - fn verify_all_leaves< - F: RichField + Extendable, - C: GenericConfig, - const D: usize, - >( + fn verify_all_leaves, C: GenericConfig, const D: usize>( leaves: Vec>, cap_height: usize, - ) -> Result<()> { + ) -> Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { let tree = MerkleTree::::new(leaves.clone(), cap_height); for (i, leaf) in leaves.into_iter().enumerate() { let proof = tree.prove(i); diff --git a/plonky2/src/hash/path_compression.rs b/plonky2/src/hash/path_compression.rs index fe7850f4..6dae3d94 100644 --- a/plonky2/src/hash/path_compression.rs +++ b/plonky2/src/hash/path_compression.rs @@ -57,7 +57,10 @@ pub(crate) fn decompress_merkle_proofs>( compressed_proofs: &[MerkleProof], height: usize, cap_height: usize, -) -> Vec> { +) -> Vec> +where + [(); H::HASH_SIZE]:, +{ let num_leaves = 1 << height; let compressed_proofs = compressed_proofs.to_vec(); let mut decompressed_proofs = Vec::with_capacity(compressed_proofs.len()); diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index cf89bf1a..7811c0db 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -610,7 +610,10 @@ impl, const D: usize> CircuitBuilder { } /// Builds a "full circuit", with both prover and verifier data. - pub fn build>(mut self) -> CircuitData { + pub fn build>(mut self) -> CircuitData + where + [(); C::Hasher::HASH_SIZE]:, + { let mut timing = TimingTree::new("preprocess", Level::Trace); let start = Instant::now(); let rate_bits = self.config.fri_config.rate_bits; @@ -776,7 +779,10 @@ impl, const D: usize> CircuitBuilder { } /// Builds a "prover circuit", with data needed to generate proofs but not verify them. - pub fn build_prover>(self) -> ProverCircuitData { + pub fn build_prover>(self) -> ProverCircuitData + where + [(); C::Hasher::HASH_SIZE]:, + { // TODO: Can skip parts of this. let CircuitData { prover_only, @@ -790,7 +796,10 @@ impl, const D: usize> CircuitBuilder { } /// Builds a "verifier circuit", with data needed to verify proofs but not generate them. - pub fn build_verifier>(self) -> VerifierCircuitData { + pub fn build_verifier>(self) -> VerifierCircuitData + where + [(); C::Hasher::HASH_SIZE]:, + { // TODO: Can skip parts of this. let CircuitData { verifier_only, diff --git a/plonky2/src/plonk/circuit_data.rs b/plonky2/src/plonk/circuit_data.rs index 7e667b8d..3d4ee2df 100644 --- a/plonky2/src/plonk/circuit_data.rs +++ b/plonky2/src/plonk/circuit_data.rs @@ -104,7 +104,10 @@ pub struct CircuitData, C: GenericConfig, impl, C: GenericConfig, const D: usize> CircuitData { - pub fn prove(&self, inputs: PartialWitness) -> Result> { + pub fn prove(&self, inputs: PartialWitness) -> Result> + where + [(); C::Hasher::HASH_SIZE]:, + { prove( &self.prover_only, &self.common, @@ -113,14 +116,20 @@ impl, C: GenericConfig, const D: usize> ) } - pub fn verify(&self, proof_with_pis: ProofWithPublicInputs) -> Result<()> { + pub fn verify(&self, proof_with_pis: ProofWithPublicInputs) -> Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { verify(proof_with_pis, &self.verifier_only, &self.common) } pub fn verify_compressed( &self, compressed_proof_with_pis: CompressedProofWithPublicInputs, - ) -> Result<()> { + ) -> Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { compressed_proof_with_pis.verify(&self.verifier_only, &self.common) } } @@ -144,7 +153,10 @@ pub struct ProverCircuitData< impl, C: GenericConfig, const D: usize> ProverCircuitData { - pub fn prove(&self, inputs: PartialWitness) -> Result> { + pub fn prove(&self, inputs: PartialWitness) -> Result> + where + [(); C::Hasher::HASH_SIZE]:, + { prove( &self.prover_only, &self.common, @@ -168,14 +180,20 @@ pub struct VerifierCircuitData< impl, C: GenericConfig, const D: usize> VerifierCircuitData { - pub fn verify(&self, proof_with_pis: ProofWithPublicInputs) -> Result<()> { + pub fn verify(&self, proof_with_pis: ProofWithPublicInputs) -> Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { verify(proof_with_pis, &self.verifier_only, &self.common) } pub fn verify_compressed( &self, compressed_proof_with_pis: CompressedProofWithPublicInputs, - ) -> Result<()> { + ) -> Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { compressed_proof_with_pis.verify(&self.verifier_only, &self.common) } } diff --git a/plonky2/src/plonk/config.rs b/plonky2/src/plonk/config.rs index 40179c38..cb6d9a9b 100644 --- a/plonky2/src/plonk/config.rs +++ b/plonky2/src/plonk/config.rs @@ -3,7 +3,6 @@ use std::fmt::Debug; use plonky2_field::extension_field::quadratic::QuadraticExtension; use plonky2_field::extension_field::{Extendable, FieldExtension}; use plonky2_field::goldilocks_field::GoldilocksField; -use plonky2_util::ceil_div_usize; use serde::{de::DeserializeOwned, Serialize}; use crate::hash::hash_types::HashOut; @@ -49,13 +48,16 @@ pub trait Hasher: Sized + Clone + Debug + Eq + PartialEq { /// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a /// no-op. - fn hash_or_noop(inputs: &[F]) -> Self::Hash { - if inputs.len() * ceil_div_usize(F::BITS, 8) <= Self::HASH_SIZE { - let mut inputs_bytes = inputs - .iter() - .flat_map(|x| x.to_canonical_u64().to_le_bytes()) - .collect::>(); - inputs_bytes.resize(Self::HASH_SIZE, 0); + fn hash_or_noop(inputs: &[F]) -> Self::Hash + where + [(); Self::HASH_SIZE]:, + { + if inputs.len() <= 4 { + let mut inputs_bytes = [0u8; Self::HASH_SIZE]; + for i in 0..inputs.len() { + inputs_bytes[i * 8..(i + 1) * 8] + .copy_from_slice(&inputs[i].to_canonical_u64().to_le_bytes()); + } Self::Hash::from_bytes(&inputs_bytes) } else { Self::hash_no_pad(inputs) diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 3de608d4..145ef694 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -138,7 +138,10 @@ impl, C: GenericConfig, const D: usize> challenges: &ProofChallenges, fri_inferred_elements: FriInferredElements, params: &FriParams, - ) -> Proof { + ) -> Proof + where + [(); C::Hasher::HASH_SIZE]:, + { let CompressedProof { wires_cap, plonk_zs_partial_products_cap, @@ -174,7 +177,10 @@ impl, C: GenericConfig, const D: usize> pub fn decompress( self, common_data: &CommonCircuitData, - ) -> anyhow::Result> { + ) -> anyhow::Result> + where + [(); C::Hasher::HASH_SIZE]:, + { let challenges = self.get_challenges(self.get_public_inputs_hash(), common_data)?; let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data); let decompressed_proof = @@ -190,7 +196,10 @@ impl, C: GenericConfig, const D: usize> self, verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, - ) -> anyhow::Result<()> { + ) -> anyhow::Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { ensure!( self.public_inputs.len() == common_data.num_public_inputs, "Number of public inputs doesn't match circuit data." diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index d49014f0..1d99b60a 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -31,7 +31,10 @@ pub(crate) fn prove, C: GenericConfig, co common_data: &CommonCircuitData, inputs: PartialWitness, timing: &mut TimingTree, -) -> Result> { +) -> Result> +where + [(); C::Hasher::HASH_SIZE]:, +{ let config = &common_data.config; let num_challenges = config.num_challenges; let quotient_degree = common_data.quotient_degree(); diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/plonk/recursive_verifier.rs index c91cbba2..6210bb29 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/plonk/recursive_verifier.rs @@ -187,7 +187,9 @@ mod tests { use crate::gates::noop::NoopGate; use crate::iop::witness::{PartialWitness, Witness}; use crate::plonk::circuit_data::{CircuitConfig, VerifierOnlyCircuitData}; - use crate::plonk::config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig}; + use crate::plonk::config::{ + GenericConfig, Hasher, KeccakGoldilocksConfig, PoseidonGoldilocksConfig, + }; use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs}; use crate::plonk::prover::prove; use crate::util::timing::TimingTree; @@ -322,7 +324,10 @@ mod tests { ProofWithPublicInputs, VerifierOnlyCircuitData, CommonCircuitData, - )> { + )> + where + [(); C::Hasher::HASH_SIZE]:, + { let mut builder = CircuitBuilder::::new(config.clone()); for _ in 0..num_dummy_gates { builder.add_gate(NoopGate, vec![]); @@ -356,6 +361,7 @@ mod tests { )> where InnerC::Hasher: AlgebraicHasher, + [(); C::Hasher::HASH_SIZE]:, { let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); @@ -407,7 +413,10 @@ mod tests { >( proof: &ProofWithPublicInputs, cd: &CommonCircuitData, - ) -> Result<()> { + ) -> Result<()> + where + [(); C::Hasher::HASH_SIZE]:, + { let proof_bytes = proof.to_bytes()?; info!("Proof length: {} bytes", proof_bytes.len()); let proof_from_bytes = ProofWithPublicInputs::from_bytes(proof_bytes, cd)?; diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index 5d69dcb1..ee0e976f 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -15,7 +15,10 @@ pub(crate) fn verify, C: GenericConfig, c proof_with_pis: ProofWithPublicInputs, verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, -) -> Result<()> { +) -> Result<()> +where + [(); C::Hasher::HASH_SIZE]:, +{ ensure!( proof_with_pis.public_inputs.len() == common_data.num_public_inputs, "Number of public inputs doesn't match circuit data." @@ -42,7 +45,10 @@ pub(crate) fn verify_with_challenges< challenges: ProofChallenges, verifier_data: &VerifierOnlyCircuitData, common_data: &CommonCircuitData, -) -> Result<()> { +) -> Result<()> +where + [(); C::Hasher::HASH_SIZE]:, +{ let local_constants = &proof.openings.constants; let local_wires = &proof.openings.wires; let vars = EvaluationVars { diff --git a/starky/src/prover.rs b/starky/src/prover.rs index de97ecce..e88aa619 100644 --- a/starky/src/prover.rs +++ b/starky/src/prover.rs @@ -7,7 +7,7 @@ use plonky2::field::zero_poly_coset::ZeroPolyOnCoset; use plonky2::fri::oracle::PolynomialBatch; use plonky2::hash::hash_types::RichField; use plonky2::iop::challenger::Challenger; -use plonky2::plonk::config::GenericConfig; +use plonky2::plonk::config::{GenericConfig, Hasher}; use plonky2::timed; use plonky2::util::timing::TimingTree; use plonky2::util::transpose; @@ -33,6 +33,7 @@ where S: Stark, [(); S::COLUMNS]:, [(); S::PUBLIC_INPUTS]:, + [(); C::Hasher::HASH_SIZE]:, { let degree = trace.len(); let degree_bits = log2_strict(degree); diff --git a/starky/src/verifier.rs b/starky/src/verifier.rs index 91a51bed..8bf1faab 100644 --- a/starky/src/verifier.rs +++ b/starky/src/verifier.rs @@ -3,7 +3,7 @@ use plonky2::field::extension_field::{Extendable, FieldExtension}; use plonky2::field::field_types::Field; use plonky2::fri::verifier::verify_fri_proof; use plonky2::hash::hash_types::RichField; -use plonky2::plonk::config::GenericConfig; +use plonky2::plonk::config::{GenericConfig, Hasher}; use plonky2::plonk::plonk_common::reduce_with_powers; use plonky2_util::log2_strict; @@ -26,6 +26,7 @@ pub fn verify< where [(); S::COLUMNS]:, [(); S::PUBLIC_INPUTS]:, + [(); C::Hasher::HASH_SIZE]:, { let degree_bits = log2_strict(recover_degree(&proof_with_pis.proof, config)); let challenges = proof_with_pis.get_challenges(config, degree_bits)?; @@ -47,6 +48,7 @@ pub(crate) fn verify_with_challenges< where [(); S::COLUMNS]:, [(); S::PUBLIC_INPUTS]:, + [(); C::Hasher::HASH_SIZE]:, { let StarkProofWithPublicInputs { proof, From 55ca718a777fcdc98b54a37a8fd512b9efc5d022 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Sun, 13 Feb 2022 10:51:27 -0800 Subject: [PATCH 140/143] Test no longer ignored --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1db24c69..4dbd5906 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ in the Plonky2 directory. To see recursion performance, one can run this test, which generates a chain of three recursion proofs: ```sh -RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive_recursive_verifier -- --ignored +RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive_recursive_verifier ``` From c9171517a4ed57ca41c4cf831af09211e92d88d8 Mon Sep 17 00:00:00 2001 From: BGluth Date: Mon, 14 Feb 2022 10:53:20 -0700 Subject: [PATCH 141/143] Derived more traits for ecdsa types --- plonky2/src/curve/curve_types.rs | 3 ++- plonky2/src/curve/ecdsa.rs | 8 +++++--- plonky2/src/curve/secp256k1.rs | 3 ++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/plonky2/src/curve/curve_types.rs b/plonky2/src/curve/curve_types.rs index 0a9e8711..15f80bc6 100644 --- a/plonky2/src/curve/curve_types.rs +++ b/plonky2/src/curve/curve_types.rs @@ -3,6 +3,7 @@ use std::ops::Neg; use plonky2_field::field_types::{Field, PrimeField}; use plonky2_field::ops::Square; +use serde::{Deserialize, Serialize}; // To avoid implementation conflicts from associated types, // see https://github.com/rust-lang/rust/issues/20400 @@ -36,7 +37,7 @@ pub trait Curve: 'static + Sync + Sized + Copy + Debug { } /// A point on a short Weierstrass curve, represented in affine coordinates. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct AffinePoint { pub x: C::BaseField, pub y: C::BaseField, diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 3a5d3c7a..11e05535 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -1,17 +1,19 @@ +use serde::{Deserialize, Serialize}; + use crate::curve::curve_msm::msm_parallel; use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; use crate::field::field_types::Field; -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct ECDSASignature { pub r: C::ScalarField, pub s: C::ScalarField, } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct ECDSASecretKey(pub C::ScalarField); -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct ECDSAPublicKey(pub AffinePoint); pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { diff --git a/plonky2/src/curve/secp256k1.rs b/plonky2/src/curve/secp256k1.rs index 6a460735..18040dae 100644 --- a/plonky2/src/curve/secp256k1.rs +++ b/plonky2/src/curve/secp256k1.rs @@ -1,10 +1,11 @@ use plonky2_field::field_types::Field; use plonky2_field::secp256k1_base::Secp256K1Base; use plonky2_field::secp256k1_scalar::Secp256K1Scalar; +use serde::{Deserialize, Serialize}; use crate::curve::curve_types::{AffinePoint, Curve}; -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct Secp256K1; impl Curve for Secp256K1 { From 1467732616868f43055f44012096477933959ec5 Mon Sep 17 00:00:00 2001 From: BGluth Date: Mon, 14 Feb 2022 12:41:24 -0700 Subject: [PATCH 142/143] Impled `Hash` for `AffinePoint` --- plonky2/src/curve/curve_types.rs | 12 ++++++++++++ plonky2/src/curve/ecdsa.rs | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/plonky2/src/curve/curve_types.rs b/plonky2/src/curve/curve_types.rs index 15f80bc6..264120c7 100644 --- a/plonky2/src/curve/curve_types.rs +++ b/plonky2/src/curve/curve_types.rs @@ -1,4 +1,5 @@ use std::fmt::Debug; +use std::hash::Hash; use std::ops::Neg; use plonky2_field::field_types::{Field, PrimeField}; @@ -120,6 +121,17 @@ impl PartialEq for AffinePoint { impl Eq for AffinePoint {} +impl Hash for AffinePoint { + fn hash(&self, state: &mut H) { + if self.zero { + self.zero.hash(state); + } else { + self.x.hash(state); + self.y.hash(state); + } + } +} + /// A point on a short Weierstrass curve, represented in projective coordinates. #[derive(Copy, Clone, Debug)] pub struct ProjectivePoint { diff --git a/plonky2/src/curve/ecdsa.rs b/plonky2/src/curve/ecdsa.rs index 11e05535..cabe038a 100644 --- a/plonky2/src/curve/ecdsa.rs +++ b/plonky2/src/curve/ecdsa.rs @@ -13,7 +13,7 @@ pub struct ECDSASignature { #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct ECDSASecretKey(pub C::ScalarField); -#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct ECDSAPublicKey(pub AffinePoint); pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { From 8d699edf21a1e7276aa465df0a88595b6df1656b Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Mon, 14 Feb 2022 13:47:33 -0800 Subject: [PATCH 143/143] Move some methods outside `impl System` (#484) I didn't really have a good reason for putting there; seems more idiomatic to make them global since they don't need `self`/`Self`. --- system_zero/src/arithmetic/addition.rs | 4 +- system_zero/src/arithmetic/division.rs | 4 +- system_zero/src/arithmetic/mod.rs | 4 +- system_zero/src/arithmetic/multiplication.rs | 4 +- system_zero/src/arithmetic/subtraction.rs | 4 +- system_zero/src/core_registers.rs | 170 +++++---- system_zero/src/permutation_unit.rs | 354 +++++++++---------- system_zero/src/system_zero.rs | 24 +- 8 files changed, 282 insertions(+), 286 deletions(-) diff --git a/system_zero/src/arithmetic/addition.rs b/system_zero/src/arithmetic/addition.rs index 653d533b..7aa0d81a 100644 --- a/system_zero/src/arithmetic/addition.rs +++ b/system_zero/src/arithmetic/addition.rs @@ -1,5 +1,5 @@ use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; +use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; @@ -10,7 +10,7 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use crate::registers::arithmetic::*; use crate::registers::NUM_COLUMNS; -pub(crate) fn generate_addition(values: &mut [F; NUM_COLUMNS]) { +pub(crate) fn generate_addition(values: &mut [F; NUM_COLUMNS]) { let in_1 = values[COL_ADD_INPUT_1].to_canonical_u64(); let in_2 = values[COL_ADD_INPUT_2].to_canonical_u64(); let in_3 = values[COL_ADD_INPUT_3].to_canonical_u64(); diff --git a/system_zero/src/arithmetic/division.rs b/system_zero/src/arithmetic/division.rs index 2f15b233..e91288b9 100644 --- a/system_zero/src/arithmetic/division.rs +++ b/system_zero/src/arithmetic/division.rs @@ -1,5 +1,5 @@ use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; +use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; @@ -9,7 +9,7 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use crate::registers::arithmetic::*; use crate::registers::NUM_COLUMNS; -pub(crate) fn generate_division(values: &mut [F; NUM_COLUMNS]) { +pub(crate) fn generate_division(values: &mut [F; NUM_COLUMNS]) { // TODO } diff --git a/system_zero/src/arithmetic/mod.rs b/system_zero/src/arithmetic/mod.rs index 45a9f7d9..a2b3a4f8 100644 --- a/system_zero/src/arithmetic/mod.rs +++ b/system_zero/src/arithmetic/mod.rs @@ -1,5 +1,5 @@ use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; +use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::plonk::circuit_builder::CircuitBuilder; @@ -24,7 +24,7 @@ mod division; mod multiplication; mod subtraction; -pub(crate) fn generate_arithmetic_unit(values: &mut [F; NUM_COLUMNS]) { +pub(crate) fn generate_arithmetic_unit(values: &mut [F; NUM_COLUMNS]) { if values[IS_ADD].is_one() { generate_addition(values); } else if values[IS_SUB].is_one() { diff --git a/system_zero/src/arithmetic/multiplication.rs b/system_zero/src/arithmetic/multiplication.rs index 2eefad38..70c181d8 100644 --- a/system_zero/src/arithmetic/multiplication.rs +++ b/system_zero/src/arithmetic/multiplication.rs @@ -1,5 +1,5 @@ use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; +use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; @@ -9,7 +9,7 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use crate::registers::arithmetic::*; use crate::registers::NUM_COLUMNS; -pub(crate) fn generate_multiplication(values: &mut [F; NUM_COLUMNS]) { +pub(crate) fn generate_multiplication(values: &mut [F; NUM_COLUMNS]) { // TODO } diff --git a/system_zero/src/arithmetic/subtraction.rs b/system_zero/src/arithmetic/subtraction.rs index 3613dee6..267bac72 100644 --- a/system_zero/src/arithmetic/subtraction.rs +++ b/system_zero/src/arithmetic/subtraction.rs @@ -1,5 +1,5 @@ use plonky2::field::extension_field::Extendable; -use plonky2::field::field_types::Field; +use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::iop::ext_target::ExtensionTarget; @@ -9,7 +9,7 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume use crate::registers::arithmetic::*; use crate::registers::NUM_COLUMNS; -pub(crate) fn generate_subtraction(values: &mut [F; NUM_COLUMNS]) { +pub(crate) fn generate_subtraction(values: &mut [F; NUM_COLUMNS]) { // TODO } diff --git a/system_zero/src/core_registers.rs b/system_zero/src/core_registers.rs index 03e7fa04..c8c6533b 100644 --- a/system_zero/src/core_registers.rs +++ b/system_zero/src/core_registers.rs @@ -1,4 +1,5 @@ -use plonky2::field::extension_field::{Extendable, FieldExtension}; +use plonky2::field::extension_field::Extendable; +use plonky2::field::field_types::{Field, PrimeField64}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::plonk::circuit_builder::CircuitBuilder; @@ -9,93 +10,84 @@ use starky::vars::StarkEvaluationVars; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::registers::core::*; use crate::registers::NUM_COLUMNS; -use crate::system_zero::SystemZero; -impl, const D: usize> SystemZero { - pub(crate) fn generate_first_row_core_registers(&self, first_values: &mut [F; NUM_COLUMNS]) { - first_values[COL_CLOCK] = F::ZERO; - first_values[COL_RANGE_16] = F::ZERO; - first_values[COL_INSTRUCTION_PTR] = F::ZERO; - first_values[COL_FRAME_PTR] = F::ZERO; - first_values[COL_STACK_PTR] = F::ZERO; - } - - pub(crate) fn generate_next_row_core_registers( - &self, - local_values: &[F; NUM_COLUMNS], - next_values: &mut [F; NUM_COLUMNS], - ) { - // We increment the clock by 1. - next_values[COL_CLOCK] = local_values[COL_CLOCK] + F::ONE; - - // We increment the 16-bit table by 1, unless we've reached the max value of 2^16 - 1, in - // which case we repeat that value. - let prev_range_16 = local_values[COL_RANGE_16].to_canonical_u64(); - let next_range_16 = (prev_range_16 + 1).min((1 << 16) - 1); - next_values[COL_RANGE_16] = F::from_canonical_u64(next_range_16); - - // next_values[COL_INSTRUCTION_PTR] = todo!(); - - // next_values[COL_FRAME_PTR] = todo!(); - - // next_values[COL_STACK_PTR] = todo!(); - } - - #[inline] - pub(crate) fn eval_core_registers( - &self, - vars: StarkEvaluationVars, - yield_constr: &mut ConstraintConsumer

, - ) where - FE: FieldExtension, - P: PackedField, - { - // The clock must start with 0, and increment by 1. - let local_clock = vars.local_values[COL_CLOCK]; - let next_clock = vars.next_values[COL_CLOCK]; - let delta_clock = next_clock - local_clock; - yield_constr.constraint_first_row(local_clock); - yield_constr.constraint(delta_clock - FE::ONE); - - // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. - let local_range_16 = vars.local_values[COL_RANGE_16]; - let next_range_16 = vars.next_values[COL_RANGE_16]; - let delta_range_16 = next_range_16 - local_range_16; - yield_constr.constraint_first_row(local_range_16); - yield_constr.constraint_last_row(local_range_16 - FE::from_canonical_u64((1 << 16) - 1)); - yield_constr.constraint(delta_range_16 * delta_range_16 - delta_range_16); - - // TODO constraints for stack etc. - } - - pub(crate) fn eval_core_registers_recursively( - &self, - builder: &mut CircuitBuilder, - vars: StarkEvaluationTargets, - yield_constr: &mut RecursiveConstraintConsumer, - ) { - let one_ext = builder.one_extension(); - let max_u16 = builder.constant(F::from_canonical_u64((1 << 16) - 1)); - let max_u16_ext = builder.convert_to_ext(max_u16); - - // The clock must start with 0, and increment by 1. - let local_clock = vars.local_values[COL_CLOCK]; - let next_clock = vars.next_values[COL_CLOCK]; - let delta_clock = builder.sub_extension(next_clock, local_clock); - yield_constr.constraint_first_row(builder, local_clock); - let constraint = builder.sub_extension(delta_clock, one_ext); - yield_constr.constraint(builder, constraint); - - // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. - let local_range_16 = vars.local_values[COL_RANGE_16]; - let next_range_16 = vars.next_values[COL_RANGE_16]; - let delta_range_16 = builder.sub_extension(next_range_16, local_range_16); - yield_constr.constraint_first_row(builder, local_range_16); - let constraint = builder.sub_extension(local_range_16, max_u16_ext); - yield_constr.constraint_last_row(builder, constraint); - let constraint = builder.mul_add_extension(delta_range_16, delta_range_16, delta_range_16); - yield_constr.constraint(builder, constraint); - - // TODO constraints for stack etc. - } +pub(crate) fn generate_first_row_core_registers(first_values: &mut [F; NUM_COLUMNS]) { + first_values[COL_CLOCK] = F::ZERO; + first_values[COL_RANGE_16] = F::ZERO; + first_values[COL_INSTRUCTION_PTR] = F::ZERO; + first_values[COL_FRAME_PTR] = F::ZERO; + first_values[COL_STACK_PTR] = F::ZERO; +} + +pub(crate) fn generate_next_row_core_registers( + local_values: &[F; NUM_COLUMNS], + next_values: &mut [F; NUM_COLUMNS], +) { + // We increment the clock by 1. + next_values[COL_CLOCK] = local_values[COL_CLOCK] + F::ONE; + + // We increment the 16-bit table by 1, unless we've reached the max value of 2^16 - 1, in + // which case we repeat that value. + let prev_range_16 = local_values[COL_RANGE_16].to_canonical_u64(); + let next_range_16 = (prev_range_16 + 1).min((1 << 16) - 1); + next_values[COL_RANGE_16] = F::from_canonical_u64(next_range_16); + + // next_values[COL_INSTRUCTION_PTR] = todo!(); + + // next_values[COL_FRAME_PTR] = todo!(); + + // next_values[COL_STACK_PTR] = todo!(); +} + +#[inline] +pub(crate) fn eval_core_registers>( + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, +) { + // The clock must start with 0, and increment by 1. + let local_clock = vars.local_values[COL_CLOCK]; + let next_clock = vars.next_values[COL_CLOCK]; + let delta_clock = next_clock - local_clock; + yield_constr.constraint_first_row(local_clock); + yield_constr.constraint(delta_clock - F::ONE); + + // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. + let local_range_16 = vars.local_values[COL_RANGE_16]; + let next_range_16 = vars.next_values[COL_RANGE_16]; + let delta_range_16 = next_range_16 - local_range_16; + yield_constr.constraint_first_row(local_range_16); + yield_constr.constraint_last_row(local_range_16 - F::from_canonical_u64((1 << 16) - 1)); + yield_constr.constraint(delta_range_16 * delta_range_16 - delta_range_16); + + // TODO constraints for stack etc. +} + +pub(crate) fn eval_core_registers_recursively, const D: usize>( + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, +) { + let one_ext = builder.one_extension(); + let max_u16 = builder.constant(F::from_canonical_u64((1 << 16) - 1)); + let max_u16_ext = builder.convert_to_ext(max_u16); + + // The clock must start with 0, and increment by 1. + let local_clock = vars.local_values[COL_CLOCK]; + let next_clock = vars.next_values[COL_CLOCK]; + let delta_clock = builder.sub_extension(next_clock, local_clock); + yield_constr.constraint_first_row(builder, local_clock); + let constraint = builder.sub_extension(delta_clock, one_ext); + yield_constr.constraint(builder, constraint); + + // The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1. + let local_range_16 = vars.local_values[COL_RANGE_16]; + let next_range_16 = vars.next_values[COL_RANGE_16]; + let delta_range_16 = builder.sub_extension(next_range_16, local_range_16); + yield_constr.constraint_first_row(builder, local_range_16); + let constraint = builder.sub_extension(local_range_16, max_u16_ext); + yield_constr.constraint_last_row(builder, constraint); + let constraint = builder.mul_add_extension(delta_range_16, delta_range_16, delta_range_16); + yield_constr.constraint(builder, constraint); + + // TODO constraints for stack etc. } diff --git a/system_zero/src/permutation_unit.rs b/system_zero/src/permutation_unit.rs index 2681f2d9..366cff65 100644 --- a/system_zero/src/permutation_unit.rs +++ b/system_zero/src/permutation_unit.rs @@ -2,7 +2,7 @@ use plonky2::field::extension_field::{Extendable, FieldExtension}; use plonky2::field::packed_field::PackedField; use plonky2::hash::hash_types::RichField; use plonky2::hash::hashing::SPONGE_WIDTH; -use plonky2::hash::poseidon::{HALF_N_FULL_ROUNDS, N_PARTIAL_ROUNDS}; +use plonky2::hash::poseidon::{Poseidon, HALF_N_FULL_ROUNDS, N_PARTIAL_ROUNDS}; use plonky2::plonk::circuit_builder::CircuitBuilder; use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use starky::vars::StarkEvaluationTargets; @@ -11,15 +11,14 @@ use starky::vars::StarkEvaluationVars; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::registers::permutation::*; use crate::registers::NUM_COLUMNS; -use crate::system_zero::SystemZero; -fn constant_layer( +fn constant_layer( mut state: [P; SPONGE_WIDTH], round: usize, ) -> [P; SPONGE_WIDTH] where - F: RichField, - FE: FieldExtension, + F: Poseidon, + FE: FieldExtension, P: PackedField, { // One day I might actually vectorize this, but today is not that day. @@ -36,10 +35,10 @@ where state } -fn mds_layer(mut state: [P; SPONGE_WIDTH]) -> [P; SPONGE_WIDTH] +fn mds_layer(mut state: [P; SPONGE_WIDTH]) -> [P; SPONGE_WIDTH] where - F: RichField, - FE: FieldExtension, + F: Poseidon, + FE: FieldExtension, P: PackedField, { for i in 0..P::WIDTH { @@ -55,206 +54,204 @@ where state } -impl, const D: usize> SystemZero { - pub(crate) fn generate_permutation_unit(values: &mut [F; NUM_COLUMNS]) { - // Load inputs. - let mut state = [F::ZERO; SPONGE_WIDTH]; +pub(crate) fn generate_permutation_unit(values: &mut [F; NUM_COLUMNS]) { + // Load inputs. + let mut state = [F::ZERO; SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + state[i] = values[col_input(i)]; + } + + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer(&mut state, r); + for i in 0..SPONGE_WIDTH { - state[i] = values[col_input(i)]; + let state_cubed = state[i].cube(); + values[col_full_first_mid_sbox(r, i)] = state_cubed; + state[i] *= state_cubed.square(); // Form state ** 7. } - for r in 0..HALF_N_FULL_ROUNDS { - F::constant_layer(&mut state, r); + state = F::mds_layer(&state); - for i in 0..SPONGE_WIDTH { - let state_cubed = state[i].cube(); - values[col_full_first_mid_sbox(r, i)] = state_cubed; - state[i] *= state_cubed.square(); // Form state ** 7. - } - - state = F::mds_layer(&state); - - for i in 0..SPONGE_WIDTH { - values[col_full_first_after_mds(r, i)] = state[i]; - } - } - - for r in 0..N_PARTIAL_ROUNDS { - F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + r); - - let state0_cubed = state[0].cube(); - values[col_partial_mid_sbox(r)] = state0_cubed; - state[0] *= state0_cubed.square(); // Form state ** 7. - values[col_partial_after_sbox(r)] = state[0]; - - state = F::mds_layer(&state); - } - - for r in 0..HALF_N_FULL_ROUNDS { - F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r); - - for i in 0..SPONGE_WIDTH { - let state_cubed = state[i].cube(); - values[col_full_second_mid_sbox(r, i)] = state_cubed; - state[i] *= state_cubed.square(); // Form state ** 7. - } - - state = F::mds_layer(&state); - - for i in 0..SPONGE_WIDTH { - values[col_full_second_after_mds(r, i)] = state[i]; - } + for i in 0..SPONGE_WIDTH { + values[col_full_first_after_mds(r, i)] = state[i]; } } - #[inline] - pub(crate) fn eval_permutation_unit( - vars: StarkEvaluationVars, - yield_constr: &mut ConstraintConsumer

, - ) where - FE: FieldExtension, - P: PackedField, - { - let local_values = &vars.local_values; + for r in 0..N_PARTIAL_ROUNDS { + F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + r); + + let state0_cubed = state[0].cube(); + values[col_partial_mid_sbox(r)] = state0_cubed; + state[0] *= state0_cubed.square(); // Form state ** 7. + values[col_partial_after_sbox(r)] = state[0]; + + state = F::mds_layer(&state); + } + + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r); - // Load inputs. - let mut state = [P::ZEROS; SPONGE_WIDTH]; for i in 0..SPONGE_WIDTH { - state[i] = local_values[col_input(i)]; + let state_cubed = state[i].cube(); + values[col_full_second_mid_sbox(r, i)] = state_cubed; + state[i] *= state_cubed.square(); // Form state ** 7. } - for r in 0..HALF_N_FULL_ROUNDS { - state = constant_layer(state, r); + state = F::mds_layer(&state); - for i in 0..SPONGE_WIDTH { - let state_cubed = state[i] * state[i].square(); - yield_constr - .constraint_wrapping(state_cubed - local_values[col_full_first_mid_sbox(r, i)]); - let state_cubed = local_values[col_full_first_mid_sbox(r, i)]; - state[i] *= state_cubed.square(); // Form state ** 7. - } + for i in 0..SPONGE_WIDTH { + values[col_full_second_after_mds(r, i)] = state[i]; + } + } +} - state = mds_layer(state); +#[inline] +pub(crate) fn eval_permutation_unit( + vars: StarkEvaluationVars, + yield_constr: &mut ConstraintConsumer

, +) where + F: Poseidon, + FE: FieldExtension, + P: PackedField, +{ + let local_values = &vars.local_values; - for i in 0..SPONGE_WIDTH { - yield_constr - .constraint_wrapping(state[i] - local_values[col_full_first_after_mds(r, i)]); - state[i] = local_values[col_full_first_after_mds(r, i)]; - } + // Load inputs. + let mut state = [P::ZEROS; SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + state[i] = local_values[col_input(i)]; + } + + for r in 0..HALF_N_FULL_ROUNDS { + state = constant_layer(state, r); + + for i in 0..SPONGE_WIDTH { + let state_cubed = state[i] * state[i].square(); + yield_constr + .constraint_wrapping(state_cubed - local_values[col_full_first_mid_sbox(r, i)]); + let state_cubed = local_values[col_full_first_mid_sbox(r, i)]; + state[i] *= state_cubed.square(); // Form state ** 7. } - for r in 0..N_PARTIAL_ROUNDS { - state = constant_layer(state, HALF_N_FULL_ROUNDS + r); + state = mds_layer(state); - let state0_cubed = state[0] * state[0].square(); - yield_constr.constraint_wrapping(state0_cubed - local_values[col_partial_mid_sbox(r)]); - let state0_cubed = local_values[col_partial_mid_sbox(r)]; - state[0] *= state0_cubed.square(); // Form state ** 7. - yield_constr.constraint_wrapping(state[0] - local_values[col_partial_after_sbox(r)]); - state[0] = local_values[col_partial_after_sbox(r)]; - - state = mds_layer(state); - } - - for r in 0..HALF_N_FULL_ROUNDS { - state = constant_layer(state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r); - - for i in 0..SPONGE_WIDTH { - let state_cubed = state[i] * state[i].square(); - yield_constr.constraint_wrapping( - state_cubed - local_values[col_full_second_mid_sbox(r, i)], - ); - let state_cubed = local_values[col_full_second_mid_sbox(r, i)]; - state[i] *= state_cubed.square(); // Form state ** 7. - } - - state = mds_layer(state); - - for i in 0..SPONGE_WIDTH { - yield_constr - .constraint_wrapping(state[i] - local_values[col_full_second_after_mds(r, i)]); - state[i] = local_values[col_full_second_after_mds(r, i)]; - } + for i in 0..SPONGE_WIDTH { + yield_constr + .constraint_wrapping(state[i] - local_values[col_full_first_after_mds(r, i)]); + state[i] = local_values[col_full_first_after_mds(r, i)]; } } - pub(crate) fn eval_permutation_unit_recursively( - builder: &mut CircuitBuilder, - vars: StarkEvaluationTargets, - yield_constr: &mut RecursiveConstraintConsumer, - ) { - let zero = builder.zero_extension(); - let local_values = &vars.local_values; + for r in 0..N_PARTIAL_ROUNDS { + state = constant_layer(state, HALF_N_FULL_ROUNDS + r); + + let state0_cubed = state[0] * state[0].square(); + yield_constr.constraint_wrapping(state0_cubed - local_values[col_partial_mid_sbox(r)]); + let state0_cubed = local_values[col_partial_mid_sbox(r)]; + state[0] *= state0_cubed.square(); // Form state ** 7. + yield_constr.constraint_wrapping(state[0] - local_values[col_partial_after_sbox(r)]); + state[0] = local_values[col_partial_after_sbox(r)]; + + state = mds_layer(state); + } + + for r in 0..HALF_N_FULL_ROUNDS { + state = constant_layer(state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r); - // Load inputs. - let mut state = [zero; SPONGE_WIDTH]; for i in 0..SPONGE_WIDTH { - state[i] = local_values[col_input(i)]; + let state_cubed = state[i] * state[i].square(); + yield_constr + .constraint_wrapping(state_cubed - local_values[col_full_second_mid_sbox(r, i)]); + let state_cubed = local_values[col_full_second_mid_sbox(r, i)]; + state[i] *= state_cubed.square(); // Form state ** 7. } - for r in 0..HALF_N_FULL_ROUNDS { - F::constant_layer_recursive(builder, &mut state, r); + state = mds_layer(state); - for i in 0..SPONGE_WIDTH { - let state_cubed = builder.cube_extension(state[i]); - let diff = - builder.sub_extension(state_cubed, local_values[col_full_first_mid_sbox(r, i)]); - yield_constr.constraint_wrapping(builder, diff); - let state_cubed = local_values[col_full_first_mid_sbox(r, i)]; - state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); - // Form state ** 7. - } - - state = F::mds_layer_recursive(builder, &state); - - for i in 0..SPONGE_WIDTH { - let diff = - builder.sub_extension(state[i], local_values[col_full_first_after_mds(r, i)]); - yield_constr.constraint_wrapping(builder, diff); - state[i] = local_values[col_full_first_after_mds(r, i)]; - } + for i in 0..SPONGE_WIDTH { + yield_constr + .constraint_wrapping(state[i] - local_values[col_full_second_after_mds(r, i)]); + state[i] = local_values[col_full_second_after_mds(r, i)]; } + } +} - for r in 0..N_PARTIAL_ROUNDS { - F::constant_layer_recursive(builder, &mut state, HALF_N_FULL_ROUNDS + r); +pub(crate) fn eval_permutation_unit_recursively, const D: usize>( + builder: &mut CircuitBuilder, + vars: StarkEvaluationTargets, + yield_constr: &mut RecursiveConstraintConsumer, +) { + let zero = builder.zero_extension(); + let local_values = &vars.local_values; - let state0_cubed = builder.cube_extension(state[0]); - let diff = builder.sub_extension(state0_cubed, local_values[col_partial_mid_sbox(r)]); + // Load inputs. + let mut state = [zero; SPONGE_WIDTH]; + for i in 0..SPONGE_WIDTH { + state[i] = local_values[col_input(i)]; + } + + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer_recursive(builder, &mut state, r); + + for i in 0..SPONGE_WIDTH { + let state_cubed = builder.cube_extension(state[i]); + let diff = + builder.sub_extension(state_cubed, local_values[col_full_first_mid_sbox(r, i)]); yield_constr.constraint_wrapping(builder, diff); - let state0_cubed = local_values[col_partial_mid_sbox(r)]; - state[0] = builder.mul_many_extension(&[state[0], state0_cubed, state0_cubed]); // Form state ** 7. - let diff = builder.sub_extension(state[0], local_values[col_partial_after_sbox(r)]); - yield_constr.constraint_wrapping(builder, diff); - state[0] = local_values[col_partial_after_sbox(r)]; - - state = F::mds_layer_recursive(builder, &state); + let state_cubed = local_values[col_full_first_mid_sbox(r, i)]; + state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); + // Form state ** 7. } - for r in 0..HALF_N_FULL_ROUNDS { - F::constant_layer_recursive( - builder, - &mut state, - HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r, - ); + state = F::mds_layer_recursive(builder, &state); - for i in 0..SPONGE_WIDTH { - let state_cubed = builder.cube_extension(state[i]); - let diff = builder - .sub_extension(state_cubed, local_values[col_full_second_mid_sbox(r, i)]); - yield_constr.constraint_wrapping(builder, diff); - let state_cubed = local_values[col_full_second_mid_sbox(r, i)]; - state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); - // Form state ** 7. - } + for i in 0..SPONGE_WIDTH { + let diff = + builder.sub_extension(state[i], local_values[col_full_first_after_mds(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + state[i] = local_values[col_full_first_after_mds(r, i)]; + } + } - state = F::mds_layer_recursive(builder, &state); + for r in 0..N_PARTIAL_ROUNDS { + F::constant_layer_recursive(builder, &mut state, HALF_N_FULL_ROUNDS + r); - for i in 0..SPONGE_WIDTH { - let diff = - builder.sub_extension(state[i], local_values[col_full_second_after_mds(r, i)]); - yield_constr.constraint_wrapping(builder, diff); - state[i] = local_values[col_full_second_after_mds(r, i)]; - } + let state0_cubed = builder.cube_extension(state[0]); + let diff = builder.sub_extension(state0_cubed, local_values[col_partial_mid_sbox(r)]); + yield_constr.constraint_wrapping(builder, diff); + let state0_cubed = local_values[col_partial_mid_sbox(r)]; + state[0] = builder.mul_many_extension(&[state[0], state0_cubed, state0_cubed]); // Form state ** 7. + let diff = builder.sub_extension(state[0], local_values[col_partial_after_sbox(r)]); + yield_constr.constraint_wrapping(builder, diff); + state[0] = local_values[col_partial_after_sbox(r)]; + + state = F::mds_layer_recursive(builder, &state); + } + + for r in 0..HALF_N_FULL_ROUNDS { + F::constant_layer_recursive( + builder, + &mut state, + HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r, + ); + + for i in 0..SPONGE_WIDTH { + let state_cubed = builder.cube_extension(state[i]); + let diff = + builder.sub_extension(state_cubed, local_values[col_full_second_mid_sbox(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + let state_cubed = local_values[col_full_second_mid_sbox(r, i)]; + state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]); + // Form state ** 7. + } + + state = F::mds_layer_recursive(builder, &state); + + for i in 0..SPONGE_WIDTH { + let diff = + builder.sub_extension(state[i], local_values[col_full_second_after_mds(r, i)]); + yield_constr.constraint_wrapping(builder, diff); + state[i] = local_values[col_full_second_after_mds(r, i)]; } } } @@ -269,11 +266,10 @@ mod tests { use starky::constraint_consumer::ConstraintConsumer; use starky::vars::StarkEvaluationVars; - use crate::permutation_unit::SPONGE_WIDTH; + use crate::permutation_unit::{eval_permutation_unit, generate_permutation_unit, SPONGE_WIDTH}; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::registers::permutation::{col_input, col_output}; use crate::registers::NUM_COLUMNS; - use crate::system_zero::SystemZero; #[test] fn generate_eval_consistency() { @@ -281,7 +277,7 @@ mod tests { type F = GoldilocksField; let mut values = [F::default(); NUM_COLUMNS]; - SystemZero::::generate_permutation_unit(&mut values); + generate_permutation_unit(&mut values); let vars = StarkEvaluationVars { local_values: &values, @@ -295,7 +291,7 @@ mod tests { GoldilocksField::ONE, GoldilocksField::ONE, ); - SystemZero::::eval_permutation_unit(vars, &mut constrant_consumer); + eval_permutation_unit(vars, &mut constrant_consumer); for &acc in &constrant_consumer.constraint_accs { assert_eq!(acc, GoldilocksField::ZERO); } @@ -318,7 +314,7 @@ mod tests { for i in 0..SPONGE_WIDTH { values[col_input(i)] = state[i]; } - SystemZero::::generate_permutation_unit(&mut values); + generate_permutation_unit(&mut values); let mut result = [F::default(); SPONGE_WIDTH]; for i in 0..SPONGE_WIDTH { result[i] = values[col_output(i)]; diff --git a/system_zero/src/system_zero.rs b/system_zero/src/system_zero.rs index 780b1d38..2eeb4697 100644 --- a/system_zero/src/system_zero.rs +++ b/system_zero/src/system_zero.rs @@ -12,7 +12,14 @@ use starky::vars::StarkEvaluationVars; use crate::arithmetic::{ eval_arithmetic_unit, eval_arithmetic_unit_recursively, generate_arithmetic_unit, }; +use crate::core_registers::{ + eval_core_registers, eval_core_registers_recursively, generate_first_row_core_registers, + generate_next_row_core_registers, +}; use crate::memory::TransactionMemory; +use crate::permutation_unit::{ + eval_permutation_unit, eval_permutation_unit_recursively, generate_permutation_unit, +}; use crate::public_input_layout::NUM_PUBLIC_INPUTS; use crate::registers::NUM_COLUMNS; @@ -29,16 +36,17 @@ impl, const D: usize> SystemZero { let memory = TransactionMemory::default(); let mut row = [F::ZERO; NUM_COLUMNS]; - self.generate_first_row_core_registers(&mut row); - Self::generate_permutation_unit(&mut row); + generate_first_row_core_registers(&mut row); + generate_arithmetic_unit(&mut row); + generate_permutation_unit(&mut row); let mut trace = Vec::with_capacity(MIN_TRACE_ROWS); loop { let mut next_row = [F::ZERO; NUM_COLUMNS]; - self.generate_next_row_core_registers(&row, &mut next_row); + generate_next_row_core_registers(&row, &mut next_row); generate_arithmetic_unit(&mut next_row); - Self::generate_permutation_unit(&mut next_row); + generate_permutation_unit(&mut next_row); trace.push(row); row = next_row; @@ -74,9 +82,9 @@ impl, const D: usize> Stark for SystemZero, P: PackedField, { - self.eval_core_registers(vars, yield_constr); + eval_core_registers(vars, yield_constr); eval_arithmetic_unit(vars, yield_constr); - Self::eval_permutation_unit(vars, yield_constr); + eval_permutation_unit::(vars, yield_constr); // TODO: Other units } @@ -86,9 +94,9 @@ impl, const D: usize> Stark for SystemZero, yield_constr: &mut RecursiveConstraintConsumer, ) { - self.eval_core_registers_recursively(builder, vars, yield_constr); + eval_core_registers_recursively(builder, vars, yield_constr); eval_arithmetic_unit_recursively(builder, vars, yield_constr); - Self::eval_permutation_unit_recursively(builder, vars, yield_constr); + eval_permutation_unit_recursively(builder, vars, yield_constr); // TODO: Other units }