mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-08 08:43:06 +00:00
Merge branch 'main' into recursive_starks
This commit is contained in:
commit
14d8cf2ceb
@ -26,7 +26,7 @@ in the Plonky2 directory.
|
||||
To see recursion performance, one can run this test, which generates a chain of three recursion proofs:
|
||||
|
||||
```sh
|
||||
RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive_recursive_verifier -- --ignored
|
||||
RUST_LOG=debug RUSTFLAGS=-Ctarget-cpu=native cargo test --release test_recursive_recursive_verifier
|
||||
```
|
||||
|
||||
|
||||
|
||||
@ -95,10 +95,6 @@ impl<F: Extendable<2>> Field for QuadraticExtension<F> {
|
||||
Self([F::from_biguint(low), F::from_biguint(high)])
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
self.0[0].to_biguint() + F::order() * self.0[1].to_biguint()
|
||||
}
|
||||
|
||||
fn from_canonical_u64(n: u64) -> Self {
|
||||
F::from_canonical_u64(n).into()
|
||||
}
|
||||
|
||||
@ -107,14 +107,6 @@ impl<F: Extendable<4>> Field for QuarticExtension<F> {
|
||||
])
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
let mut result = self.0[3].to_biguint();
|
||||
result = result * F::order() + self.0[2].to_biguint();
|
||||
result = result * F::order() + self.0[1].to_biguint();
|
||||
result = result * F::order() + self.0[0].to_biguint();
|
||||
result
|
||||
}
|
||||
|
||||
fn from_canonical_u64(n: u64) -> Self {
|
||||
F::from_canonical_u64(n).into()
|
||||
}
|
||||
|
||||
@ -268,9 +268,6 @@ pub trait Field:
|
||||
// Rename to `from_noncanonical_biguint` and have it return `n % Self::characteristic()`.
|
||||
fn from_biguint(n: BigUint) -> Self;
|
||||
|
||||
// TODO: Move to a new `PrimeField` trait.
|
||||
fn to_biguint(&self) -> BigUint;
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_u64(n: u64) -> Self;
|
||||
@ -281,6 +278,12 @@ pub trait Field:
|
||||
Self::from_canonical_u64(n as u64)
|
||||
}
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_u16(n: u16) -> Self {
|
||||
Self::from_canonical_u64(n as u64)
|
||||
}
|
||||
|
||||
/// Returns `n`. Assumes that `n` is already in canonical form, i.e. `n < Self::order()`.
|
||||
// TODO: Should probably be unsafe.
|
||||
fn from_canonical_usize(n: usize) -> Self {
|
||||
@ -407,16 +410,14 @@ pub trait Field:
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PrimeField: Field {
|
||||
fn to_canonical_biguint(&self) -> BigUint;
|
||||
}
|
||||
|
||||
/// A finite field of order less than 2^64.
|
||||
pub trait Field64: Field {
|
||||
const ORDER: u64;
|
||||
|
||||
// TODO: Only well-defined for prime 64-bit fields. Move to a new PrimeField64 trait?
|
||||
fn to_canonical_u64(&self) -> u64;
|
||||
|
||||
// TODO: Only well-defined for prime 64-bit fields. Move to a new PrimeField64 trait?
|
||||
fn to_noncanonical_u64(&self) -> u64;
|
||||
|
||||
/// Returns `x % Self::CHARACTERISTIC`.
|
||||
// TODO: Move to `Field`.
|
||||
fn from_noncanonical_u64(n: u64) -> Self;
|
||||
@ -456,6 +457,13 @@ pub trait Field64: Field {
|
||||
}
|
||||
}
|
||||
|
||||
/// A finite field of prime order less than 2^64.
|
||||
pub trait PrimeField64: PrimeField + Field64 {
|
||||
fn to_canonical_u64(&self) -> u64;
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64;
|
||||
}
|
||||
|
||||
/// An iterator over the powers of a certain base element `b`: `b^0, b^1, b^2, ...`.
|
||||
#[derive(Clone)]
|
||||
pub struct Powers<F: Field> {
|
||||
|
||||
@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::extension_field::quadratic::QuadraticExtension;
|
||||
use crate::extension_field::quartic::QuarticExtension;
|
||||
use crate::extension_field::{Extendable, Frobenius};
|
||||
use crate::field_types::{Field, Field64};
|
||||
use crate::field_types::{Field, Field64, PrimeField, PrimeField64};
|
||||
use crate::inversion::try_inverse_u64;
|
||||
|
||||
const EPSILON: u64 = (1 << 32) - 1;
|
||||
@ -98,10 +98,6 @@ impl Field for GoldilocksField {
|
||||
Self(n.mod_floor(&Self::order()).to_u64_digits()[0])
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
self.to_canonical_u64().into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_canonical_u64(n: u64) -> Self {
|
||||
debug_assert!(n < Self::ORDER);
|
||||
@ -123,23 +119,15 @@ impl Field for GoldilocksField {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField for GoldilocksField {
|
||||
fn to_canonical_biguint(&self) -> BigUint {
|
||||
self.to_canonical_u64().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl Field64 for GoldilocksField {
|
||||
const ORDER: u64 = 0xFFFFFFFF00000001;
|
||||
|
||||
#[inline]
|
||||
fn to_canonical_u64(&self) -> u64 {
|
||||
let mut c = self.0;
|
||||
// We only need one condition subtraction, since 2 * ORDER would not fit in a u64.
|
||||
if c >= Self::ORDER {
|
||||
c -= Self::ORDER;
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_noncanonical_u64(n: u64) -> Self {
|
||||
Self(n)
|
||||
@ -160,6 +148,22 @@ impl Field64 for GoldilocksField {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField64 for GoldilocksField {
|
||||
#[inline]
|
||||
fn to_canonical_u64(&self) -> u64 {
|
||||
let mut c = self.0;
|
||||
// We only need one condition subtraction, since 2 * ORDER would not fit in a u64.
|
||||
if c >= Self::ORDER {
|
||||
c -= Self::ORDER;
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
fn to_noncanonical_u64(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for GoldilocksField {
|
||||
type Output = Self;
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::field_types::Field64;
|
||||
use crate::field_types::PrimeField64;
|
||||
|
||||
/// This is a 'safe' iteration for the modular inversion algorithm. It
|
||||
/// is safe in the sense that it will produce the right answer even
|
||||
@ -63,7 +63,7 @@ unsafe fn unsafe_iteration(f: &mut u64, g: &mut u64, c: &mut i128, d: &mut i128,
|
||||
/// Elliptic and Hyperelliptic Cryptography, Algorithms 11.6
|
||||
/// and 11.12.
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
pub(crate) fn try_inverse_u64<F: Field64>(x: &F) -> Option<F> {
|
||||
pub(crate) fn try_inverse_u64<F: PrimeField64>(x: &F) -> Option<F> {
|
||||
let mut f = x.to_noncanonical_u64();
|
||||
let mut g = F::ORDER;
|
||||
// NB: These two are very rarely such that their absolute
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::field_types::Field64;
|
||||
use crate::field_types::PrimeField64;
|
||||
|
||||
/// Generates a series of non-negative integers less than `modulus` which cover a range of
|
||||
/// interesting test values.
|
||||
@ -19,7 +19,7 @@ pub fn test_inputs(modulus: u64) -> Vec<u64> {
|
||||
/// word_bits)` and panic if the two resulting vectors differ.
|
||||
pub fn run_unaryop_test_cases<F, UnaryOp, ExpectedOp>(op: UnaryOp, expected_op: ExpectedOp)
|
||||
where
|
||||
F: Field64,
|
||||
F: PrimeField64,
|
||||
UnaryOp: Fn(F) -> F,
|
||||
ExpectedOp: Fn(u64) -> u64,
|
||||
{
|
||||
@ -43,7 +43,7 @@ where
|
||||
/// Apply the binary functions `op` and `expected_op` to each pair of inputs.
|
||||
pub fn run_binaryop_test_cases<F, BinaryOp, ExpectedOp>(op: BinaryOp, expected_op: ExpectedOp)
|
||||
where
|
||||
F: Field64,
|
||||
F: PrimeField64,
|
||||
BinaryOp: Fn(F, F) -> F,
|
||||
ExpectedOp: Fn(u64, u64) -> u64,
|
||||
{
|
||||
|
||||
@ -10,7 +10,7 @@ use num::{Integer, One};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::field_types::Field;
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
|
||||
/// The base field of the secp256k1 elliptic curve.
|
||||
///
|
||||
@ -42,7 +42,7 @@ impl Default for Secp256K1Base {
|
||||
|
||||
impl PartialEq for Secp256K1Base {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.to_biguint() == other.to_biguint()
|
||||
self.to_canonical_biguint() == other.to_canonical_biguint()
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,19 +50,19 @@ impl Eq for Secp256K1Base {}
|
||||
|
||||
impl Hash for Secp256K1Base {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.to_biguint().hash(state)
|
||||
self.to_canonical_biguint().hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Secp256K1Base {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(&self.to_biguint(), f)
|
||||
Display::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Secp256K1Base {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Debug::fmt(&self.to_biguint(), f)
|
||||
Debug::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,14 +107,6 @@ impl Field for Secp256K1Base {
|
||||
Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one())))
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn from_biguint(val: BigUint) -> Self {
|
||||
Self(
|
||||
val.to_u64_digits()
|
||||
@ -146,6 +138,16 @@ impl Field for Secp256K1Base {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField for Secp256K1Base {
|
||||
fn to_canonical_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for Secp256K1Base {
|
||||
type Output = Self;
|
||||
|
||||
@ -154,7 +156,7 @@ impl Neg for Secp256K1Base {
|
||||
if self.is_zero() {
|
||||
Self::ZERO
|
||||
} else {
|
||||
Self::from_biguint(Self::order() - self.to_biguint())
|
||||
Self::from_biguint(Self::order() - self.to_canonical_biguint())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -164,7 +166,7 @@ impl Add for Secp256K1Base {
|
||||
|
||||
#[inline]
|
||||
fn add(self, rhs: Self) -> Self {
|
||||
let mut result = self.to_biguint() + rhs.to_biguint();
|
||||
let mut result = self.to_canonical_biguint() + rhs.to_canonical_biguint();
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
@ -207,7 +209,9 @@ impl Mul for Secp256K1Base {
|
||||
|
||||
#[inline]
|
||||
fn mul(self, rhs: Self) -> Self {
|
||||
Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order()))
|
||||
Self::from_biguint(
|
||||
(self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ use num::{Integer, One};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::field_types::Field;
|
||||
use crate::field_types::{Field, PrimeField};
|
||||
|
||||
/// The base field of the secp256k1 elliptic curve.
|
||||
///
|
||||
@ -45,7 +45,7 @@ impl Default for Secp256K1Scalar {
|
||||
|
||||
impl PartialEq for Secp256K1Scalar {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.to_biguint() == other.to_biguint()
|
||||
self.to_canonical_biguint() == other.to_canonical_biguint()
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,19 +53,19 @@ impl Eq for Secp256K1Scalar {}
|
||||
|
||||
impl Hash for Secp256K1Scalar {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.to_biguint().hash(state)
|
||||
self.to_canonical_biguint().hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Secp256K1Scalar {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Display::fmt(&self.to_biguint(), f)
|
||||
Display::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Secp256K1Scalar {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
Debug::fmt(&self.to_biguint(), f)
|
||||
Debug::fmt(&self.to_canonical_biguint(), f)
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,14 +116,6 @@ impl Field for Secp256K1Scalar {
|
||||
Some(self.exp_biguint(&(Self::order() - BigUint::one() - BigUint::one())))
|
||||
}
|
||||
|
||||
fn to_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn from_biguint(val: BigUint) -> Self {
|
||||
Self(
|
||||
val.to_u64_digits()
|
||||
@ -155,6 +147,16 @@ impl Field for Secp256K1Scalar {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrimeField for Secp256K1Scalar {
|
||||
fn to_canonical_biguint(&self) -> BigUint {
|
||||
let mut result = biguint_from_array(self.0);
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl Neg for Secp256K1Scalar {
|
||||
type Output = Self;
|
||||
|
||||
@ -163,7 +165,7 @@ impl Neg for Secp256K1Scalar {
|
||||
if self.is_zero() {
|
||||
Self::ZERO
|
||||
} else {
|
||||
Self::from_biguint(Self::order() - self.to_biguint())
|
||||
Self::from_biguint(Self::order() - self.to_canonical_biguint())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -173,7 +175,7 @@ impl Add for Secp256K1Scalar {
|
||||
|
||||
#[inline]
|
||||
fn add(self, rhs: Self) -> Self {
|
||||
let mut result = self.to_biguint() + rhs.to_biguint();
|
||||
let mut result = self.to_canonical_biguint() + rhs.to_canonical_biguint();
|
||||
if result >= Self::order() {
|
||||
result -= Self::order();
|
||||
}
|
||||
@ -216,7 +218,9 @@ impl Mul for Secp256K1Scalar {
|
||||
|
||||
#[inline]
|
||||
fn mul(self, rhs: Self) -> Self {
|
||||
Self::from_biguint((self.to_biguint() * rhs.to_biguint()).mod_floor(&Self::order()))
|
||||
Self::from_biguint(
|
||||
(self.to_canonical_biguint() * rhs.to_canonical_biguint()).mod_floor(&Self::order()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#![feature(generic_const_exprs)]
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use plonky2::field::goldilocks_field::GoldilocksField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
@ -9,7 +11,10 @@ use tynm::type_name;
|
||||
|
||||
const ELEMS_PER_LEAF: usize = 135;
|
||||
|
||||
pub(crate) fn bench_merkle_tree<F: RichField, H: Hasher<F>>(c: &mut Criterion) {
|
||||
pub(crate) fn bench_merkle_tree<F: RichField, H: Hasher<F>>(c: &mut Criterion)
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let mut group = c.benchmark_group(&format!(
|
||||
"merkle-tree<{}, {}>",
|
||||
type_name::<F>(),
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
use itertools::Itertools;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::curve::curve_summation::affine_multisummation_best;
|
||||
@ -160,7 +161,7 @@ pub(crate) fn to_digits<C: Curve>(x: &C::ScalarField, w: usize) -> Vec<usize> {
|
||||
|
||||
// Convert x to a bool array.
|
||||
let x_canonical: Vec<_> = x
|
||||
.to_biguint()
|
||||
.to_canonical_biguint()
|
||||
.to_u64_digits()
|
||||
.iter()
|
||||
.cloned()
|
||||
@ -187,6 +188,7 @@ pub(crate) fn to_digits<C: Curve>(x: &C::ScalarField, w: usize) -> Vec<usize> {
|
||||
mod tests {
|
||||
use num::BigUint;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::secp256k1_scalar::Secp256K1Scalar;
|
||||
|
||||
use crate::curve::curve_msm::{msm_execute, msm_precompute, to_digits};
|
||||
@ -206,7 +208,7 @@ mod tests {
|
||||
0b11111111111111111111111111111111,
|
||||
];
|
||||
let x = Secp256K1Scalar::from_biguint(BigUint::from_slice(&x_canonical));
|
||||
assert_eq!(x.to_biguint().to_u32_digits(), x_canonical);
|
||||
assert_eq!(x.to_canonical_biguint().to_u32_digits(), x_canonical);
|
||||
assert_eq!(
|
||||
to_digits::<Secp256K1>(&x, 17),
|
||||
vec![
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
use std::ops::Mul;
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
|
||||
use crate::curve::curve_types::{Curve, CurveScalar, ProjectivePoint};
|
||||
|
||||
@ -88,7 +89,7 @@ fn to_digits<C: Curve>(x: &C::ScalarField) -> Vec<u64> {
|
||||
);
|
||||
let digits_per_u64 = 64 / WINDOW_BITS;
|
||||
let mut digits = Vec::with_capacity(digits_per_scalar::<C>());
|
||||
for limb in x.to_biguint().to_u64_digits() {
|
||||
for limb in x.to_canonical_biguint().to_u64_digits() {
|
||||
for j in 0..digits_per_u64 {
|
||||
digits.push((limb >> (j * WINDOW_BITS) as u64) % BASE as u64);
|
||||
}
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
use std::ops::Neg;
|
||||
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::ops::Square;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// To avoid implementation conflicts from associated types,
|
||||
// see https://github.com/rust-lang/rust/issues/20400
|
||||
@ -10,8 +12,8 @@ pub struct CurveScalar<C: Curve>(pub <C as Curve>::ScalarField);
|
||||
|
||||
/// A short Weierstrass curve.
|
||||
pub trait Curve: 'static + Sync + Sized + Copy + Debug {
|
||||
type BaseField: Field;
|
||||
type ScalarField: Field;
|
||||
type BaseField: PrimeField;
|
||||
type ScalarField: PrimeField;
|
||||
|
||||
const A: Self::BaseField;
|
||||
const B: Self::BaseField;
|
||||
@ -36,7 +38,7 @@ pub trait Curve: 'static + Sync + Sized + Copy + Debug {
|
||||
}
|
||||
|
||||
/// A point on a short Weierstrass curve, represented in affine coordinates.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct AffinePoint<C: Curve> {
|
||||
pub x: C::BaseField,
|
||||
pub y: C::BaseField,
|
||||
@ -119,6 +121,17 @@ impl<C: Curve> PartialEq for AffinePoint<C> {
|
||||
|
||||
impl<C: Curve> Eq for AffinePoint<C> {}
|
||||
|
||||
impl<C: Curve> Hash for AffinePoint<C> {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
if self.zero {
|
||||
self.zero.hash(state);
|
||||
} else {
|
||||
self.x.hash(state);
|
||||
self.y.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A point on a short Weierstrass curve, represented in projective coordinates.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ProjectivePoint<C: Curve> {
|
||||
@ -261,9 +274,9 @@ impl<C: Curve> Neg for ProjectivePoint<C> {
|
||||
}
|
||||
|
||||
pub fn base_to_scalar<C: Curve>(x: C::BaseField) -> C::ScalarField {
|
||||
C::ScalarField::from_biguint(x.to_biguint())
|
||||
C::ScalarField::from_biguint(x.to_canonical_biguint())
|
||||
}
|
||||
|
||||
pub fn scalar_to_base<C: Curve>(x: C::ScalarField) -> C::BaseField {
|
||||
C::BaseField::from_biguint(x.to_biguint())
|
||||
C::BaseField::from_biguint(x.to_canonical_biguint())
|
||||
}
|
||||
|
||||
@ -1,13 +1,19 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::curve::curve_msm::msm_parallel;
|
||||
use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar};
|
||||
use crate::field::field_types::Field;
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct ECDSASignature<C: Curve> {
|
||||
pub r: C::ScalarField,
|
||||
pub s: C::ScalarField,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct ECDSASecretKey<C: Curve>(pub C::ScalarField);
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct ECDSAPublicKey<C: Curve>(pub AffinePoint<C>);
|
||||
|
||||
pub fn sign_message<C: Curve>(msg: C::ScalarField, sk: ECDSASecretKey<C>) -> ECDSASignature<C> {
|
||||
|
||||
@ -1,10 +1,11 @@
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::secp256k1_base::Secp256K1Base;
|
||||
use plonky2_field::secp256k1_scalar::Secp256K1Scalar;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::curve::curve_types::{AffinePoint, Curve};
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[derive(Debug, Copy, Clone, Deserialize, Eq, Hash, PartialEq, Serialize)]
|
||||
pub struct Secp256K1;
|
||||
|
||||
impl Curve for Secp256K1 {
|
||||
@ -40,6 +41,7 @@ const SECP256K1_GENERATOR_Y: Secp256K1Base = Secp256K1Base([
|
||||
mod tests {
|
||||
use num::BigUint;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::secp256k1_scalar::Secp256K1Scalar;
|
||||
|
||||
use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint};
|
||||
@ -86,7 +88,7 @@ mod tests {
|
||||
) -> ProjectivePoint<Secp256K1> {
|
||||
let mut g = rhs;
|
||||
let mut sum = ProjectivePoint::ZERO;
|
||||
for limb in lhs.to_biguint().to_u64_digits().iter() {
|
||||
for limb in lhs.to_canonical_biguint().to_u64_digits().iter() {
|
||||
for j in 0..64 {
|
||||
if (limb >> j & 1u64) != 0u64 {
|
||||
sum = sum + g;
|
||||
|
||||
@ -12,7 +12,7 @@ use crate::fri::FriParams;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::hash::merkle_tree::MerkleTree;
|
||||
use crate::iop::challenger::Challenger;
|
||||
use crate::plonk::config::GenericConfig;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::timed;
|
||||
use crate::util::reducing::ReducingFactor;
|
||||
use crate::util::reverse_bits;
|
||||
@ -43,7 +43,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
cap_height: usize,
|
||||
timing: &mut TimingTree,
|
||||
fft_root_table: Option<&FftRootTable<F>>,
|
||||
) -> Self {
|
||||
) -> Self
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let coeffs = timed!(
|
||||
timing,
|
||||
"IFFT",
|
||||
@ -68,7 +71,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
cap_height: usize,
|
||||
timing: &mut TimingTree,
|
||||
fft_root_table: Option<&FftRootTable<F>>,
|
||||
) -> Self {
|
||||
) -> Self
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let degree = polynomials[0].len();
|
||||
let lde_values = timed!(
|
||||
timing,
|
||||
@ -133,7 +139,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
fri_params: &FriParams,
|
||||
timing: &mut TimingTree,
|
||||
) -> FriProof<F, C::Hasher, D> {
|
||||
) -> FriProof<F, C::Hasher, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
assert!(D > 1, "Not implemented for D=1.");
|
||||
let alpha = challenger.get_extension_challenge::<D>();
|
||||
let mut alpha = ReducingFactor::new(alpha);
|
||||
|
||||
@ -245,7 +245,10 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriPr
|
||||
challenges: &ProofChallenges<F, D>,
|
||||
fri_inferred_elements: FriInferredElements<F, D>,
|
||||
params: &FriParams,
|
||||
) -> FriProof<F, H, D> {
|
||||
) -> FriProof<F, H, D>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let CompressedFriProof {
|
||||
commit_phase_merkle_caps,
|
||||
query_round_proofs,
|
||||
|
||||
@ -24,7 +24,10 @@ pub fn fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const
|
||||
challenger: &mut Challenger<F, C::Hasher>,
|
||||
fri_params: &FriParams,
|
||||
timing: &mut TimingTree,
|
||||
) -> FriProof<F, C::Hasher, D> {
|
||||
) -> FriProof<F, C::Hasher, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let n = lde_polynomial_values.len();
|
||||
assert_eq!(lde_polynomial_coeffs.len(), n);
|
||||
|
||||
@ -68,7 +71,10 @@ fn fri_committed_trees<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
|
||||
) -> (
|
||||
Vec<MerkleTree<F, C::Hasher>>,
|
||||
PolynomialCoeffs<F::Extension>,
|
||||
) {
|
||||
)
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let mut trees = Vec::new();
|
||||
|
||||
let mut shift = F::MULTIPLICATIVE_GROUP_GENERATOR;
|
||||
|
||||
@ -56,18 +56,17 @@ pub(crate) fn fri_verify_proof_of_work<F: RichField + Extendable<D>, const D: us
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn verify_fri_proof<
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
>(
|
||||
pub fn verify_fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
|
||||
instance: &FriInstanceInfo<F, D>,
|
||||
openings: &FriOpenings<F, D>,
|
||||
challenges: &FriChallenges<F, D>,
|
||||
initial_merkle_caps: &[MerkleCap<F, C::Hasher>],
|
||||
proof: &FriProof<F, C::Hasher, D>,
|
||||
params: &FriParams,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
ensure!(
|
||||
params.final_poly_len() == proof.final_poly.len(),
|
||||
"Final polynomial has wrong degree."
|
||||
@ -112,7 +111,10 @@ fn fri_verify_initial_proof<F: RichField, H: Hasher<F>>(
|
||||
x_index: usize,
|
||||
proof: &FriInitialTreeProof<F, H>,
|
||||
initial_merkle_caps: &[MerkleCap<F, H>],
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
for ((evals, merkle_proof), cap) in proof.evals_proofs.iter().zip(initial_merkle_caps) {
|
||||
verify_merkle_proof::<F, H>(evals.clone(), x_index, cap, merkle_proof)?;
|
||||
}
|
||||
@ -177,7 +179,10 @@ fn fri_verifier_query_round<
|
||||
n: usize,
|
||||
round_proof: &FriQueryRound<F, C::Hasher, D>,
|
||||
params: &FriParams,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
fri_verify_initial_proof::<F, C::Hasher>(
|
||||
x_index,
|
||||
&round_proof.initial_trees_proof,
|
||||
|
||||
@ -7,9 +7,13 @@ use crate::gadgets::nonnative::NonNativeTarget;
|
||||
use crate::hash::hash_types::RichField;
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ECDSASecretKeyTarget<C: Curve>(NonNativeTarget<C::ScalarField>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ECDSAPublicKeyTarget<C: Curve>(AffinePointTarget<C>);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ECDSASignatureTarget<C: Curve> {
|
||||
pub r: NonNativeTarget<C::ScalarField>,
|
||||
pub s: NonNativeTarget<C::ScalarField>,
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use num::{BigUint, Integer, One, Zero};
|
||||
use plonky2_field::field_types::PrimeField;
|
||||
use plonky2_field::{extension_field::Extendable, field_types::Field};
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
@ -34,12 +35,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
x.value.clone()
|
||||
}
|
||||
|
||||
pub fn constant_nonnative<FF: Field>(&mut self, x: FF) -> NonNativeTarget<FF> {
|
||||
let x_biguint = self.constant_biguint(&x.to_biguint());
|
||||
pub fn constant_nonnative<FF: PrimeField>(&mut self, x: FF) -> NonNativeTarget<FF> {
|
||||
let x_biguint = self.constant_biguint(&x.to_canonical_biguint());
|
||||
self.biguint_to_nonnative(&x_biguint)
|
||||
}
|
||||
|
||||
pub fn zero_nonnative<FF: Field>(&mut self) -> NonNativeTarget<FF> {
|
||||
pub fn zero_nonnative<FF: PrimeField>(&mut self) -> NonNativeTarget<FF> {
|
||||
self.constant_nonnative(FF::ZERO)
|
||||
}
|
||||
|
||||
@ -62,7 +63,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_nonnative<FF: Field>(
|
||||
pub fn add_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: &NonNativeTarget<FF>,
|
||||
@ -105,7 +106,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_many_nonnative<FF: Field>(
|
||||
pub fn add_many_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
to_add: &[NonNativeTarget<FF>],
|
||||
) -> NonNativeTarget<FF> {
|
||||
@ -149,7 +150,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
// Subtract two `NonNativeTarget`s.
|
||||
pub fn sub_nonnative<FF: Field>(
|
||||
pub fn sub_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: &NonNativeTarget<FF>,
|
||||
@ -177,7 +178,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
diff
|
||||
}
|
||||
|
||||
pub fn mul_nonnative<FF: Field>(
|
||||
pub fn mul_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
a: &NonNativeTarget<FF>,
|
||||
b: &NonNativeTarget<FF>,
|
||||
@ -208,7 +209,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
prod
|
||||
}
|
||||
|
||||
pub fn mul_many_nonnative<FF: Field>(
|
||||
pub fn mul_many_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
to_mul: &[NonNativeTarget<FF>],
|
||||
) -> NonNativeTarget<FF> {
|
||||
@ -223,14 +224,20 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
accumulator
|
||||
}
|
||||
|
||||
pub fn neg_nonnative<FF: Field>(&mut self, x: &NonNativeTarget<FF>) -> NonNativeTarget<FF> {
|
||||
pub fn neg_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
x: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let zero_target = self.constant_biguint(&BigUint::zero());
|
||||
let zero_ff = self.biguint_to_nonnative(&zero_target);
|
||||
|
||||
self.sub_nonnative(&zero_ff, x)
|
||||
}
|
||||
|
||||
pub fn inv_nonnative<FF: Field>(&mut self, x: &NonNativeTarget<FF>) -> NonNativeTarget<FF> {
|
||||
pub fn inv_nonnative<FF: PrimeField>(
|
||||
&mut self,
|
||||
x: &NonNativeTarget<FF>,
|
||||
) -> NonNativeTarget<FF> {
|
||||
let num_limbs = x.value.num_limbs();
|
||||
let inv_biguint = self.add_virtual_biguint_target(num_limbs);
|
||||
let div = self.add_virtual_biguint_target(num_limbs);
|
||||
@ -307,7 +314,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeAdditionGenerator<F: RichField + Extendable<D>, const D: usize, FF: Field> {
|
||||
struct NonNativeAdditionGenerator<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> {
|
||||
a: NonNativeTarget<FF>,
|
||||
b: NonNativeTarget<FF>,
|
||||
sum: NonNativeTarget<FF>,
|
||||
@ -315,7 +322,7 @@ struct NonNativeAdditionGenerator<F: RichField + Extendable<D>, const D: usize,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeAdditionGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
@ -332,8 +339,8 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let a = witness.get_nonnative_target(self.a.clone());
|
||||
let b = witness.get_nonnative_target(self.b.clone());
|
||||
let a_biguint = a.to_biguint();
|
||||
let b_biguint = b.to_biguint();
|
||||
let a_biguint = a.to_canonical_biguint();
|
||||
let b_biguint = b.to_canonical_biguint();
|
||||
let sum_biguint = a_biguint + b_biguint;
|
||||
let modulus = FF::order();
|
||||
let (overflow, sum_reduced) = if sum_biguint > modulus {
|
||||
@ -348,14 +355,15 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeMultipleAddsGenerator<F: RichField + Extendable<D>, const D: usize, FF: Field> {
|
||||
struct NonNativeMultipleAddsGenerator<F: RichField + Extendable<D>, const D: usize, FF: PrimeField>
|
||||
{
|
||||
summands: Vec<NonNativeTarget<FF>>,
|
||||
sum: NonNativeTarget<FF>,
|
||||
overflow: U32Target,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeMultipleAddsGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
@ -373,7 +381,7 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
.collect();
|
||||
let summand_biguints: Vec<_> = summands
|
||||
.iter()
|
||||
.map(|summand| summand.to_biguint())
|
||||
.map(|summand| summand.to_canonical_biguint())
|
||||
.collect();
|
||||
|
||||
let sum_biguint = summand_biguints
|
||||
@ -398,7 +406,7 @@ struct NonNativeSubtractionGenerator<F: RichField + Extendable<D>, const D: usiz
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeSubtractionGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
@ -415,8 +423,8 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let a = witness.get_nonnative_target(self.a.clone());
|
||||
let b = witness.get_nonnative_target(self.b.clone());
|
||||
let a_biguint = a.to_biguint();
|
||||
let b_biguint = b.to_biguint();
|
||||
let a_biguint = a.to_canonical_biguint();
|
||||
let b_biguint = b.to_canonical_biguint();
|
||||
|
||||
let modulus = FF::order();
|
||||
let (diff_biguint, overflow) = if a_biguint > b_biguint {
|
||||
@ -439,7 +447,7 @@ struct NonNativeMultiplicationGenerator<F: RichField + Extendable<D>, const D: u
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeMultiplicationGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
@ -456,8 +464,8 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
|
||||
let a = witness.get_nonnative_target(self.a.clone());
|
||||
let b = witness.get_nonnative_target(self.b.clone());
|
||||
let a_biguint = a.to_biguint();
|
||||
let b_biguint = b.to_biguint();
|
||||
let a_biguint = a.to_canonical_biguint();
|
||||
let b_biguint = b.to_canonical_biguint();
|
||||
|
||||
let prod_biguint = a_biguint * b_biguint;
|
||||
|
||||
@ -470,14 +478,14 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NonNativeInverseGenerator<F: RichField + Extendable<D>, const D: usize, FF: Field> {
|
||||
struct NonNativeInverseGenerator<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> {
|
||||
x: NonNativeTarget<FF>,
|
||||
inv: BigUintTarget,
|
||||
div: BigUintTarget,
|
||||
_phantom: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
impl<F: RichField + Extendable<D>, const D: usize, FF: PrimeField> SimpleGenerator<F>
|
||||
for NonNativeInverseGenerator<F, D, FF>
|
||||
{
|
||||
fn dependencies(&self) -> Vec<Target> {
|
||||
@ -488,8 +496,8 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
let x = witness.get_nonnative_target(self.x.clone());
|
||||
let inv = x.inverse();
|
||||
|
||||
let x_biguint = x.to_biguint();
|
||||
let inv_biguint = inv.to_biguint();
|
||||
let x_biguint = x.to_canonical_biguint();
|
||||
let inv_biguint = inv.to_canonical_biguint();
|
||||
let prod = x_biguint * &inv_biguint;
|
||||
let modulus = FF::order();
|
||||
let (div, _rem) = prod.div_rem(&modulus);
|
||||
@ -502,7 +510,7 @@ impl<F: RichField + Extendable<D>, const D: usize, FF: Field> SimpleGenerator<F>
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
use plonky2_field::secp256k1_base::Secp256K1Base;
|
||||
|
||||
use crate::iop::witness::PartialWitness;
|
||||
@ -587,7 +595,7 @@ mod tests {
|
||||
|
||||
let x_ff = FF::rand();
|
||||
let mut y_ff = FF::rand();
|
||||
while y_ff.to_biguint() > x_ff.to_biguint() {
|
||||
while y_ff.to_canonical_biguint() > x_ff.to_canonical_biguint() {
|
||||
y_ff = FF::rand();
|
||||
}
|
||||
let diff_ff = x_ff - y_ff;
|
||||
|
||||
@ -455,7 +455,8 @@ mod tests {
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::extension_field::quartic::QuarticExtension;
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
|
||||
|
||||
@ -520,7 +520,8 @@ mod tests {
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@ use crate::hash::hash_types::RichField;
|
||||
use crate::iop::witness::{PartialWitness, Witness};
|
||||
use crate::plonk::circuit_builder::CircuitBuilder;
|
||||
use crate::plonk::circuit_data::CircuitConfig;
|
||||
use crate::plonk::config::GenericConfig;
|
||||
use crate::plonk::config::{GenericConfig, Hasher};
|
||||
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch};
|
||||
use crate::plonk::verifier::verify;
|
||||
use crate::util::transpose;
|
||||
@ -92,7 +92,10 @@ pub fn test_eval_fns<
|
||||
const D: usize,
|
||||
>(
|
||||
gate: G,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
// Test that `eval_unfiltered` and `eval_unfiltered_base` are coherent.
|
||||
let wires_base = F::rand_vec(gate.num_wires());
|
||||
let constants_base = F::rand_vec(gate.num_constants());
|
||||
|
||||
@ -338,7 +338,8 @@ mod tests {
|
||||
|
||||
use anyhow::Result;
|
||||
use plonky2_field::extension_field::quartic::QuarticExtension;
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::field_types::{Field, PrimeField64};
|
||||
use plonky2_field::goldilocks_field::GoldilocksField;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
@ -8,7 +8,7 @@ use crate::iop::target::Target;
|
||||
use crate::plonk::config::GenericHashOut;
|
||||
|
||||
/// A prime order field with the features we need to use it as a base field in our argument system.
|
||||
pub trait RichField: Field64 + Poseidon {}
|
||||
pub trait RichField: PrimeField64 + Poseidon {}
|
||||
|
||||
impl RichField for GoldilocksField {}
|
||||
|
||||
@ -31,14 +31,10 @@ impl<F: Field> HashOut<F> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_partial(mut elements: Vec<F>) -> Self {
|
||||
debug_assert!(elements.len() <= 4);
|
||||
while elements.len() < 4 {
|
||||
elements.push(F::ZERO);
|
||||
}
|
||||
Self {
|
||||
elements: [elements[0], elements[1], elements[2], elements[3]],
|
||||
}
|
||||
pub fn from_partial(elements_in: &[F]) -> Self {
|
||||
let mut elements = [F::ZERO; 4];
|
||||
elements[0..elements_in.len()].copy_from_slice(elements_in);
|
||||
Self { elements }
|
||||
}
|
||||
|
||||
pub fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
|
||||
@ -104,14 +100,10 @@ impl HashOutTarget {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_partial(mut elements: Vec<Target>, zero: Target) -> Self {
|
||||
debug_assert!(elements.len() <= 4);
|
||||
while elements.len() < 4 {
|
||||
elements.push(zero);
|
||||
}
|
||||
Self {
|
||||
elements: [elements[0], elements[1], elements[2], elements[3]],
|
||||
}
|
||||
pub fn from_partial(elements_in: &[Target], zero: Target) -> Self {
|
||||
let mut elements = [zero; 4];
|
||||
elements[0..elements_in.len()].copy_from_slice(elements_in);
|
||||
Self { elements }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -12,21 +12,11 @@ pub(crate) const SPONGE_RATE: usize = 8;
|
||||
pub(crate) const SPONGE_CAPACITY: usize = 4;
|
||||
pub const SPONGE_WIDTH: usize = SPONGE_RATE + SPONGE_CAPACITY;
|
||||
|
||||
/// Hash the vector if necessary to reduce its length to ~256 bits. If it already fits, this is a
|
||||
/// no-op.
|
||||
pub fn hash_or_noop<F: RichField, P: PlonkyPermutation<F>>(inputs: Vec<F>) -> HashOut<F> {
|
||||
if inputs.len() <= 4 {
|
||||
HashOut::from_partial(inputs)
|
||||
} else {
|
||||
hash_n_to_hash_no_pad::<F, P>(&inputs)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
pub fn hash_or_noop<H: AlgebraicHasher<F>>(&mut self, inputs: Vec<Target>) -> HashOutTarget {
|
||||
let zero = self.zero();
|
||||
if inputs.len() <= 4 {
|
||||
HashOutTarget::from_partial(inputs, zero)
|
||||
HashOutTarget::from_partial(&inputs, zero)
|
||||
} else {
|
||||
self.hash_n_to_hash_no_pad::<H>(inputs)
|
||||
}
|
||||
|
||||
@ -30,9 +30,12 @@ pub(crate) fn verify_merkle_proof<F: RichField, H: Hasher<F>>(
|
||||
leaf_index: usize,
|
||||
merkle_cap: &MerkleCap<F, H>,
|
||||
proof: &MerkleProof<F, H>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let mut index = leaf_index;
|
||||
let mut current_digest = H::hash_no_pad(&leaf_data);
|
||||
let mut current_digest = H::hash_or_noop(&leaf_data);
|
||||
for &sibling_digest in proof.siblings.iter() {
|
||||
let bit = index & 1;
|
||||
index >>= 1;
|
||||
|
||||
@ -60,10 +60,13 @@ fn capacity_up_to_mut<T>(v: &mut Vec<T>, len: usize) -> &mut [MaybeUninit<T>] {
|
||||
fn fill_subtree<F: RichField, H: Hasher<F>>(
|
||||
digests_buf: &mut [MaybeUninit<H::Hash>],
|
||||
leaves: &[Vec<F>],
|
||||
) -> H::Hash {
|
||||
) -> H::Hash
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
assert_eq!(leaves.len(), digests_buf.len() / 2 + 1);
|
||||
if digests_buf.is_empty() {
|
||||
H::hash_no_pad(&leaves[0])
|
||||
H::hash_or_noop(&leaves[0])
|
||||
} else {
|
||||
// Layout is: left recursive output || left child digest
|
||||
// || right child digest || right recursive output.
|
||||
@ -89,7 +92,9 @@ fn fill_digests_buf<F: RichField, H: Hasher<F>>(
|
||||
cap_buf: &mut [MaybeUninit<H::Hash>],
|
||||
leaves: &[Vec<F>],
|
||||
cap_height: usize,
|
||||
) {
|
||||
) where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
// Special case of a tree that's all cap. The usual case will panic because we'll try to split
|
||||
// an empty slice into chunks of `0`. (We would not need this if there was a way to split into
|
||||
// `blah` chunks as opposed to chunks _of_ `blah`.)
|
||||
@ -99,7 +104,7 @@ fn fill_digests_buf<F: RichField, H: Hasher<F>>(
|
||||
.par_iter_mut()
|
||||
.zip(leaves)
|
||||
.for_each(|(cap_buf, leaf)| {
|
||||
cap_buf.write(H::hash_no_pad(leaf));
|
||||
cap_buf.write(H::hash_or_noop(leaf));
|
||||
});
|
||||
return;
|
||||
}
|
||||
@ -121,7 +126,10 @@ fn fill_digests_buf<F: RichField, H: Hasher<F>>(
|
||||
}
|
||||
|
||||
impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
|
||||
pub fn new(leaves: Vec<Vec<F>>, cap_height: usize) -> Self {
|
||||
pub fn new(leaves: Vec<Vec<F>>, cap_height: usize) -> Self
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let log2_leaves_len = log2_strict(leaves.len());
|
||||
assert!(
|
||||
cap_height <= log2_leaves_len,
|
||||
@ -208,14 +216,13 @@ mod tests {
|
||||
(0..n).map(|_| F::rand_vec(k)).collect()
|
||||
}
|
||||
|
||||
fn verify_all_leaves<
|
||||
F: RichField + Extendable<D>,
|
||||
C: GenericConfig<D, F = F>,
|
||||
const D: usize,
|
||||
>(
|
||||
fn verify_all_leaves<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
|
||||
leaves: Vec<Vec<F>>,
|
||||
cap_height: usize,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let tree = MerkleTree::<F, C::Hasher>::new(leaves.clone(), cap_height);
|
||||
for (i, leaf) in leaves.into_iter().enumerate() {
|
||||
let proof = tree.prove(i);
|
||||
|
||||
@ -57,7 +57,10 @@ pub(crate) fn decompress_merkle_proofs<F: RichField, H: Hasher<F>>(
|
||||
compressed_proofs: &[MerkleProof<F, H>],
|
||||
height: usize,
|
||||
cap_height: usize,
|
||||
) -> Vec<MerkleProof<F, H>> {
|
||||
) -> Vec<MerkleProof<F, H>>
|
||||
where
|
||||
[(); H::HASH_SIZE]:,
|
||||
{
|
||||
let num_leaves = 1 << height;
|
||||
let compressed_proofs = compressed_proofs.to_vec();
|
||||
let mut decompressed_proofs = Vec::with_capacity(compressed_proofs.len());
|
||||
@ -66,7 +69,7 @@ pub(crate) fn decompress_merkle_proofs<F: RichField, H: Hasher<F>>(
|
||||
|
||||
for (&i, v) in leaves_indices.iter().zip(leaves_data) {
|
||||
// Observe the leaves.
|
||||
seen.insert(i + num_leaves, H::hash_no_pad(v));
|
||||
seen.insert(i + num_leaves, H::hash_or_noop(v));
|
||||
}
|
||||
|
||||
// Iterators over the siblings.
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
//! https://eprint.iacr.org/2019/458.pdf
|
||||
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::field_types::{Field, PrimeField64};
|
||||
use unroll::unroll_for_loops;
|
||||
|
||||
use crate::gates::gate::Gate;
|
||||
@ -35,7 +35,7 @@ fn add_u160_u128((x_lo, x_hi): (u128, u32), y: u128) -> (u128, u32) {
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn reduce_u160<F: Field64>((n_lo, n_hi): (u128, u32)) -> F {
|
||||
fn reduce_u160<F: PrimeField64>((n_lo, n_hi): (u128, u32)) -> F {
|
||||
let n_lo_hi = (n_lo >> 64) as u64;
|
||||
let n_lo_lo = n_lo as u64;
|
||||
let reduced_hi: u64 = F::from_noncanonical_u96((n_lo_hi, n_hi)).to_noncanonical_u64();
|
||||
@ -148,7 +148,7 @@ pub const ALL_ROUND_CONSTANTS: [u64; MAX_WIDTH * N_ROUNDS] = [
|
||||
];
|
||||
|
||||
const WIDTH: usize = SPONGE_WIDTH;
|
||||
pub trait Poseidon: Field64 {
|
||||
pub trait Poseidon: PrimeField64 {
|
||||
// Total number of round constants required: width of the input
|
||||
// times number of rounds.
|
||||
const N_ROUND_CONSTANTS: usize = WIDTH * N_ROUNDS;
|
||||
|
||||
@ -270,7 +270,8 @@ impl Poseidon for GoldilocksField {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use plonky2_field::field_types::{Field, Field64};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::PrimeField64;
|
||||
use plonky2_field::goldilocks_field::GoldilocksField as F;
|
||||
|
||||
use crate::hash::poseidon::test_helpers::{check_consistency, check_test_vectors};
|
||||
|
||||
@ -3,7 +3,7 @@ use std::marker::PhantomData;
|
||||
|
||||
use num::BigUint;
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
|
||||
use crate::gadgets::arithmetic_u32::U32Target;
|
||||
use crate::gadgets::biguint::BigUintTarget;
|
||||
@ -180,8 +180,8 @@ impl<F: Field> GeneratedValues<F> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_nonnative_target<FF: Field>(&mut self, target: NonNativeTarget<FF>, value: FF) {
|
||||
self.set_biguint_target(target.value, value.to_biguint())
|
||||
pub fn set_nonnative_target<FF: PrimeField>(&mut self, target: NonNativeTarget<FF>, value: FF) {
|
||||
self.set_biguint_target(target.value, value.to_canonical_biguint())
|
||||
}
|
||||
|
||||
pub fn set_hash_target(&mut self, ht: HashOutTarget, value: HashOut<F>) {
|
||||
|
||||
@ -3,7 +3,7 @@ use std::collections::HashMap;
|
||||
use itertools::Itertools;
|
||||
use num::{BigUint, FromPrimitive, Zero};
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::Field;
|
||||
use plonky2_field::field_types::{Field, PrimeField};
|
||||
|
||||
use crate::fri::structure::{FriOpenings, FriOpeningsTarget};
|
||||
use crate::fri::witness_util::set_fri_proof_target;
|
||||
@ -63,20 +63,26 @@ pub trait Witness<F: Field> {
|
||||
panic!("not a bool")
|
||||
}
|
||||
|
||||
fn get_biguint_target(&self, target: BigUintTarget) -> BigUint {
|
||||
fn get_biguint_target(&self, target: BigUintTarget) -> BigUint
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
let mut result = BigUint::zero();
|
||||
|
||||
let limb_base = BigUint::from_u64(1 << 32u64).unwrap();
|
||||
for i in (0..target.num_limbs()).rev() {
|
||||
let limb = target.get_limb(i);
|
||||
result *= &limb_base;
|
||||
result += self.get_target(limb.0).to_biguint();
|
||||
result += self.get_target(limb.0).to_canonical_biguint();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn get_nonnative_target<FF: Field>(&self, target: NonNativeTarget<FF>) -> FF {
|
||||
fn get_nonnative_target<FF: PrimeField>(&self, target: NonNativeTarget<FF>) -> FF
|
||||
where
|
||||
F: PrimeField,
|
||||
{
|
||||
let val = self.get_biguint_target(target.value);
|
||||
FF::from_biguint(val)
|
||||
}
|
||||
|
||||
@ -601,7 +601,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
/// Builds a "full circuit", with both prover and verifier data.
|
||||
pub fn build<C: GenericConfig<D, F = F>>(mut self) -> CircuitData<F, C, D> {
|
||||
pub fn build<C: GenericConfig<D, F = F>>(mut self) -> CircuitData<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let mut timing = TimingTree::new("preprocess", Level::Trace);
|
||||
let start = Instant::now();
|
||||
let rate_bits = self.config.fri_config.rate_bits;
|
||||
@ -767,7 +770,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
/// Builds a "prover circuit", with data needed to generate proofs but not verify them.
|
||||
pub fn build_prover<C: GenericConfig<D, F = F>>(self) -> ProverCircuitData<F, C, D> {
|
||||
pub fn build_prover<C: GenericConfig<D, F = F>>(self) -> ProverCircuitData<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
// TODO: Can skip parts of this.
|
||||
let CircuitData {
|
||||
prover_only,
|
||||
@ -781,7 +787,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
|
||||
}
|
||||
|
||||
/// Builds a "verifier circuit", with data needed to verify proofs but not generate them.
|
||||
pub fn build_verifier<C: GenericConfig<D, F = F>>(self) -> VerifierCircuitData<F, C, D> {
|
||||
pub fn build_verifier<C: GenericConfig<D, F = F>>(self) -> VerifierCircuitData<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
// TODO: Can skip parts of this.
|
||||
let CircuitData {
|
||||
verifier_only,
|
||||
|
||||
@ -104,7 +104,10 @@ pub struct CircuitData<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>,
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
CircuitData<F, C, D>
|
||||
{
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>> {
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
prove(
|
||||
&self.prover_only,
|
||||
&self.common,
|
||||
@ -113,14 +116,20 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()> {
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
verify(proof_with_pis, &self.verifier_only, &self.common)
|
||||
}
|
||||
|
||||
pub fn verify_compressed(
|
||||
&self,
|
||||
compressed_proof_with_pis: CompressedProofWithPublicInputs<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
compressed_proof_with_pis.verify(&self.verifier_only, &self.common)
|
||||
}
|
||||
}
|
||||
@ -144,7 +153,10 @@ pub struct ProverCircuitData<
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
ProverCircuitData<F, C, D>
|
||||
{
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>> {
|
||||
pub fn prove(&self, inputs: PartialWitness<F>) -> Result<ProofWithPublicInputs<F, C, D>>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
prove(
|
||||
&self.prover_only,
|
||||
&self.common,
|
||||
@ -168,14 +180,20 @@ pub struct VerifierCircuitData<
|
||||
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
VerifierCircuitData<F, C, D>
|
||||
{
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()> {
|
||||
pub fn verify(&self, proof_with_pis: ProofWithPublicInputs<F, C, D>) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
verify(proof_with_pis, &self.verifier_only, &self.common)
|
||||
}
|
||||
|
||||
pub fn verify_compressed(
|
||||
&self,
|
||||
compressed_proof_with_pis: CompressedProofWithPublicInputs<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
compressed_proof_with_pis.verify(&self.verifier_only, &self.common)
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,6 +46,24 @@ pub trait Hasher<F: RichField>: Sized + Clone + Debug + Eq + PartialEq {
|
||||
Self::hash_no_pad(&padded_input)
|
||||
}
|
||||
|
||||
/// Hash the slice if necessary to reduce its length to ~256 bits. If it already fits, this is a
|
||||
/// no-op.
|
||||
fn hash_or_noop(inputs: &[F]) -> Self::Hash
|
||||
where
|
||||
[(); Self::HASH_SIZE]:,
|
||||
{
|
||||
if inputs.len() <= 4 {
|
||||
let mut inputs_bytes = [0u8; Self::HASH_SIZE];
|
||||
for i in 0..inputs.len() {
|
||||
inputs_bytes[i * 8..(i + 1) * 8]
|
||||
.copy_from_slice(&inputs[i].to_canonical_u64().to_le_bytes());
|
||||
}
|
||||
Self::Hash::from_bytes(&inputs_bytes)
|
||||
} else {
|
||||
Self::hash_no_pad(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash;
|
||||
}
|
||||
|
||||
|
||||
@ -138,7 +138,7 @@ where
|
||||
sum
|
||||
}
|
||||
|
||||
pub(crate) fn reduce_with_powers_ext_recursive<F: RichField + Extendable<D>, const D: usize>(
|
||||
pub fn reduce_with_powers_ext_recursive<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
terms: &[ExtensionTarget<D>],
|
||||
alpha: Target,
|
||||
|
||||
@ -138,7 +138,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
challenges: &ProofChallenges<F, D>,
|
||||
fri_inferred_elements: FriInferredElements<F, D>,
|
||||
params: &FriParams,
|
||||
) -> Proof<F, C, D> {
|
||||
) -> Proof<F, C, D>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let CompressedProof {
|
||||
wires_cap,
|
||||
plonk_zs_partial_products_cap,
|
||||
@ -174,7 +177,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
pub fn decompress(
|
||||
self,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> anyhow::Result<ProofWithPublicInputs<F, C, D>> {
|
||||
) -> anyhow::Result<ProofWithPublicInputs<F, C, D>>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let challenges = self.get_challenges(self.get_public_inputs_hash(), common_data)?;
|
||||
let fri_inferred_elements = self.get_inferred_elements(&challenges, common_data);
|
||||
let decompressed_proof =
|
||||
@ -190,7 +196,10 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
|
||||
self,
|
||||
verifier_data: &VerifierOnlyCircuitData<C, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> anyhow::Result<()> {
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
ensure!(
|
||||
self.public_inputs.len() == common_data.num_public_inputs,
|
||||
"Number of public inputs doesn't match circuit data."
|
||||
|
||||
@ -31,7 +31,10 @@ pub(crate) fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, co
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
inputs: PartialWitness<F>,
|
||||
timing: &mut TimingTree,
|
||||
) -> Result<ProofWithPublicInputs<F, C, D>> {
|
||||
) -> Result<ProofWithPublicInputs<F, C, D>>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let config = &common_data.config;
|
||||
let num_challenges = config.num_challenges;
|
||||
let quotient_degree = common_data.quotient_degree();
|
||||
|
||||
@ -187,7 +187,9 @@ mod tests {
|
||||
use crate::gates::noop::NoopGate;
|
||||
use crate::iop::witness::{PartialWitness, Witness};
|
||||
use crate::plonk::circuit_data::{CircuitConfig, VerifierOnlyCircuitData};
|
||||
use crate::plonk::config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig};
|
||||
use crate::plonk::config::{
|
||||
GenericConfig, Hasher, KeccakGoldilocksConfig, PoseidonGoldilocksConfig,
|
||||
};
|
||||
use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs};
|
||||
use crate::plonk::prover::prove;
|
||||
use crate::util::timing::TimingTree;
|
||||
@ -322,7 +324,10 @@ mod tests {
|
||||
ProofWithPublicInputs<F, C, D>,
|
||||
VerifierOnlyCircuitData<C, D>,
|
||||
CommonCircuitData<F, C, D>,
|
||||
)> {
|
||||
)>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
|
||||
for _ in 0..num_dummy_gates {
|
||||
builder.add_gate(NoopGate, vec![]);
|
||||
@ -356,6 +361,7 @@ mod tests {
|
||||
)>
|
||||
where
|
||||
InnerC::Hasher: AlgebraicHasher<F>,
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
|
||||
let mut pw = PartialWitness::new();
|
||||
@ -407,7 +413,10 @@ mod tests {
|
||||
>(
|
||||
proof: &ProofWithPublicInputs<F, C, D>,
|
||||
cd: &CommonCircuitData<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let proof_bytes = proof.to_bytes()?;
|
||||
info!("Proof length: {} bytes", proof_bytes.len());
|
||||
let proof_from_bytes = ProofWithPublicInputs::from_bytes(proof_bytes, cd)?;
|
||||
|
||||
@ -15,7 +15,10 @@ pub(crate) fn verify<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, c
|
||||
proof_with_pis: ProofWithPublicInputs<F, C, D>,
|
||||
verifier_data: &VerifierOnlyCircuitData<C, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
ensure!(
|
||||
proof_with_pis.public_inputs.len() == common_data.num_public_inputs,
|
||||
"Number of public inputs doesn't match circuit data."
|
||||
@ -42,7 +45,10 @@ pub(crate) fn verify_with_challenges<
|
||||
challenges: ProofChallenges<F, D>,
|
||||
verifier_data: &VerifierOnlyCircuitData<C, D>,
|
||||
common_data: &CommonCircuitData<F, C, D>,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let local_constants = &proof.openings.constants;
|
||||
let local_wires = &proof.openings.wires;
|
||||
let vars = EvaluationVars {
|
||||
|
||||
@ -3,7 +3,7 @@ use std::io::Cursor;
|
||||
use std::io::{Read, Result, Write};
|
||||
|
||||
use plonky2_field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2_field::field_types::Field64;
|
||||
use plonky2_field::field_types::{Field64, PrimeField64};
|
||||
use plonky2_field::polynomial::PolynomialCoeffs;
|
||||
|
||||
use crate::fri::proof::{
|
||||
@ -53,7 +53,7 @@ impl Buffer {
|
||||
Ok(u32::from_le_bytes(buf))
|
||||
}
|
||||
|
||||
fn write_field<F: Field64>(&mut self, x: F) -> Result<()> {
|
||||
fn write_field<F: PrimeField64>(&mut self, x: F) -> Result<()> {
|
||||
self.0.write_all(&x.to_canonical_u64().to_le_bytes())
|
||||
}
|
||||
fn read_field<F: Field64>(&mut self) -> Result<F> {
|
||||
@ -116,7 +116,7 @@ impl Buffer {
|
||||
))
|
||||
}
|
||||
|
||||
pub fn write_field_vec<F: Field64>(&mut self, v: &[F]) -> Result<()> {
|
||||
pub fn write_field_vec<F: PrimeField64>(&mut self, v: &[F]) -> Result<()> {
|
||||
for &a in v {
|
||||
self.write_field(a)?;
|
||||
}
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
// TODO: Remove these when crate is closer to being finished.
|
||||
#![allow(dead_code)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unreachable_code)]
|
||||
#![allow(clippy::diverging_sub_expression)]
|
||||
#![allow(incomplete_features)]
|
||||
#![feature(generic_const_exprs)]
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ use plonky2::field::zero_poly_coset::ZeroPolyOnCoset;
|
||||
use plonky2::fri::oracle::PolynomialBatch;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::challenger::Challenger;
|
||||
use plonky2::plonk::config::GenericConfig;
|
||||
use plonky2::plonk::config::{GenericConfig, Hasher};
|
||||
use plonky2::timed;
|
||||
use plonky2::util::timing::TimingTree;
|
||||
use plonky2::util::transpose;
|
||||
@ -33,6 +33,7 @@ where
|
||||
S: Stark<F, D>,
|
||||
[(); S::COLUMNS]:,
|
||||
[(); S::PUBLIC_INPUTS]:,
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let degree = trace.len();
|
||||
let degree_bits = log2_strict(degree);
|
||||
|
||||
@ -3,7 +3,7 @@ use plonky2::field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2::field::field_types::Field;
|
||||
use plonky2::fri::verifier::verify_fri_proof;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::plonk::config::GenericConfig;
|
||||
use plonky2::plonk::config::{GenericConfig, Hasher};
|
||||
use plonky2::plonk::plonk_common::reduce_with_powers;
|
||||
|
||||
use crate::config::StarkConfig;
|
||||
@ -25,6 +25,7 @@ pub fn verify<
|
||||
where
|
||||
[(); S::COLUMNS]:,
|
||||
[(); S::PUBLIC_INPUTS]:,
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
ensure!(proof_with_pis.public_inputs.len() == S::PUBLIC_INPUTS);
|
||||
let degree_bits = proof_with_pis.proof.recover_degree_bits(config);
|
||||
@ -47,6 +48,7 @@ pub(crate) fn verify_with_challenges<
|
||||
where
|
||||
[(); S::COLUMNS]:,
|
||||
[(); S::PUBLIC_INPUTS]:,
|
||||
[(); C::Hasher::HASH_SIZE]:,
|
||||
{
|
||||
let StarkProofWithPublicInputs {
|
||||
proof,
|
||||
|
||||
70
system_zero/src/arithmetic/addition.rs
Normal file
70
system_zero/src/arithmetic/addition.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use plonky2::field::extension_field::Extendable;
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::ext_target::ExtensionTarget;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2::plonk::plonk_common::reduce_with_powers_ext_recursive;
|
||||
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
|
||||
use crate::registers::arithmetic::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
pub(crate) fn generate_addition<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
|
||||
let in_1 = values[COL_ADD_INPUT_1].to_canonical_u64();
|
||||
let in_2 = values[COL_ADD_INPUT_2].to_canonical_u64();
|
||||
let in_3 = values[COL_ADD_INPUT_3].to_canonical_u64();
|
||||
let output = in_1 + in_2 + in_3;
|
||||
|
||||
values[COL_ADD_OUTPUT_1] = F::from_canonical_u16(output as u16);
|
||||
values[COL_ADD_OUTPUT_2] = F::from_canonical_u16((output >> 16) as u16);
|
||||
values[COL_ADD_OUTPUT_3] = F::from_canonical_u16((output >> 32) as u16);
|
||||
}
|
||||
|
||||
pub(crate) fn eval_addition<F: Field, P: PackedField<Scalar = F>>(
|
||||
local_values: &[P; NUM_COLUMNS],
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
let is_add = local_values[IS_ADD];
|
||||
let in_1 = local_values[COL_ADD_INPUT_1];
|
||||
let in_2 = local_values[COL_ADD_INPUT_2];
|
||||
let in_3 = local_values[COL_ADD_INPUT_3];
|
||||
let out_1 = local_values[COL_ADD_OUTPUT_1];
|
||||
let out_2 = local_values[COL_ADD_OUTPUT_2];
|
||||
let out_3 = local_values[COL_ADD_OUTPUT_3];
|
||||
|
||||
let weight_2 = F::from_canonical_u64(1 << 16);
|
||||
let weight_3 = F::from_canonical_u64(1 << 32);
|
||||
// Note that this can't overflow. Since each output limb has been range checked as 16-bits,
|
||||
// this sum can be around 48 bits at most.
|
||||
let out = out_1 + out_2 * weight_2 + out_3 * weight_3;
|
||||
|
||||
let computed_out = in_1 + in_2 + in_3;
|
||||
|
||||
yield_constr.constraint_wrapping(is_add * (out - computed_out));
|
||||
}
|
||||
|
||||
pub(crate) fn eval_addition_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
local_values: &[ExtensionTarget<D>; NUM_COLUMNS],
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let is_add = local_values[IS_ADD];
|
||||
let in_1 = local_values[COL_ADD_INPUT_1];
|
||||
let in_2 = local_values[COL_ADD_INPUT_2];
|
||||
let in_3 = local_values[COL_ADD_INPUT_3];
|
||||
let out_1 = local_values[COL_ADD_OUTPUT_1];
|
||||
let out_2 = local_values[COL_ADD_OUTPUT_2];
|
||||
let out_3 = local_values[COL_ADD_OUTPUT_3];
|
||||
|
||||
let limb_base = builder.constant(F::from_canonical_u64(1 << 16));
|
||||
// Note that this can't overflow. Since each output limb has been range checked as 16-bits,
|
||||
// this sum can be around 48 bits at most.
|
||||
let out = reduce_with_powers_ext_recursive(builder, &[out_1, out_2, out_3], limb_base);
|
||||
|
||||
let computed_out = builder.add_many_extension(&[in_1, in_2, in_3]);
|
||||
|
||||
let diff = builder.sub_extension(out, computed_out);
|
||||
let filtered_diff = builder.mul_extension(is_add, diff);
|
||||
yield_constr.constraint_wrapping(builder, filtered_diff);
|
||||
}
|
||||
31
system_zero/src/arithmetic/division.rs
Normal file
31
system_zero/src/arithmetic/division.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use plonky2::field::extension_field::Extendable;
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::ext_target::ExtensionTarget;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
|
||||
use crate::registers::arithmetic::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
pub(crate) fn generate_division<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
pub(crate) fn eval_division<F: Field, P: PackedField<Scalar = F>>(
|
||||
local_values: &[P; NUM_COLUMNS],
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
let is_div = local_values[IS_DIV];
|
||||
// TODO
|
||||
}
|
||||
|
||||
pub(crate) fn eval_division_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
local_values: &[ExtensionTarget<D>; NUM_COLUMNS],
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let is_div = local_values[IS_DIV];
|
||||
// TODO
|
||||
}
|
||||
75
system_zero/src/arithmetic/mod.rs
Normal file
75
system_zero/src/arithmetic/mod.rs
Normal file
@ -0,0 +1,75 @@
|
||||
use plonky2::field::extension_field::Extendable;
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use starky::vars::StarkEvaluationTargets;
|
||||
use starky::vars::StarkEvaluationVars;
|
||||
|
||||
use crate::arithmetic::addition::{eval_addition, eval_addition_recursively, generate_addition};
|
||||
use crate::arithmetic::division::{eval_division, eval_division_recursively, generate_division};
|
||||
use crate::arithmetic::multiplication::{
|
||||
eval_multiplication, eval_multiplication_recursively, generate_multiplication,
|
||||
};
|
||||
use crate::arithmetic::subtraction::{
|
||||
eval_subtraction, eval_subtraction_recursively, generate_subtraction,
|
||||
};
|
||||
use crate::public_input_layout::NUM_PUBLIC_INPUTS;
|
||||
use crate::registers::arithmetic::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
mod addition;
|
||||
mod division;
|
||||
mod multiplication;
|
||||
mod subtraction;
|
||||
|
||||
pub(crate) fn generate_arithmetic_unit<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
|
||||
if values[IS_ADD].is_one() {
|
||||
generate_addition(values);
|
||||
} else if values[IS_SUB].is_one() {
|
||||
generate_subtraction(values);
|
||||
} else if values[IS_MUL].is_one() {
|
||||
generate_multiplication(values);
|
||||
} else if values[IS_DIV].is_one() {
|
||||
generate_division(values);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn eval_arithmetic_unit<F: Field, P: PackedField<Scalar = F>>(
|
||||
vars: StarkEvaluationVars<F, P, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
let local_values = &vars.local_values;
|
||||
|
||||
// Check that the operation flag values are binary.
|
||||
for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] {
|
||||
let val = local_values[col];
|
||||
yield_constr.constraint_wrapping(val * val - val);
|
||||
}
|
||||
|
||||
eval_addition(local_values, yield_constr);
|
||||
eval_subtraction(local_values, yield_constr);
|
||||
eval_multiplication(local_values, yield_constr);
|
||||
eval_division(local_values, yield_constr);
|
||||
}
|
||||
|
||||
pub(crate) fn eval_arithmetic_unit_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let local_values = &vars.local_values;
|
||||
|
||||
// Check that the operation flag values are binary.
|
||||
for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] {
|
||||
let val = local_values[col];
|
||||
let constraint = builder.mul_sub_extension(val, val, val);
|
||||
yield_constr.constraint_wrapping(builder, constraint);
|
||||
}
|
||||
|
||||
eval_addition_recursively(builder, local_values, yield_constr);
|
||||
eval_subtraction_recursively(builder, local_values, yield_constr);
|
||||
eval_multiplication_recursively(builder, local_values, yield_constr);
|
||||
eval_division_recursively(builder, local_values, yield_constr);
|
||||
}
|
||||
31
system_zero/src/arithmetic/multiplication.rs
Normal file
31
system_zero/src/arithmetic/multiplication.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use plonky2::field::extension_field::Extendable;
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::ext_target::ExtensionTarget;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
|
||||
use crate::registers::arithmetic::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
pub(crate) fn generate_multiplication<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
pub(crate) fn eval_multiplication<F: Field, P: PackedField<Scalar = F>>(
|
||||
local_values: &[P; NUM_COLUMNS],
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
let is_mul = local_values[IS_MUL];
|
||||
// TODO
|
||||
}
|
||||
|
||||
pub(crate) fn eval_multiplication_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
local_values: &[ExtensionTarget<D>; NUM_COLUMNS],
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let is_mul = local_values[IS_MUL];
|
||||
// TODO
|
||||
}
|
||||
31
system_zero/src/arithmetic/subtraction.rs
Normal file
31
system_zero/src/arithmetic/subtraction.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use plonky2::field::extension_field::Extendable;
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::iop::ext_target::ExtensionTarget;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
|
||||
use crate::registers::arithmetic::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
pub(crate) fn generate_subtraction<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
pub(crate) fn eval_subtraction<F: Field, P: PackedField<Scalar = F>>(
|
||||
local_values: &[P; NUM_COLUMNS],
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
let is_sub = local_values[IS_SUB];
|
||||
// TODO
|
||||
}
|
||||
|
||||
pub(crate) fn eval_subtraction_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
local_values: &[ExtensionTarget<D>; NUM_COLUMNS],
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let is_sub = local_values[IS_SUB];
|
||||
// TODO
|
||||
}
|
||||
@ -1,107 +0,0 @@
|
||||
use plonky2::hash::hashing::SPONGE_WIDTH;
|
||||
use plonky2::hash::poseidon;
|
||||
|
||||
//// CORE REGISTERS
|
||||
|
||||
/// A cycle counter. Starts at 0; increments by 1.
|
||||
pub(crate) const COL_CLOCK: usize = 0;
|
||||
|
||||
/// A column which contains the values `[0, ... 2^16 - 1]`, potentially with duplicates. Used for
|
||||
/// 16-bit range checks.
|
||||
///
|
||||
/// For ease of verification, we enforce that it must begin with 0 and end with `2^16 - 1`, and each
|
||||
/// delta must be either 0 or 1.
|
||||
pub(crate) const COL_RANGE_16: usize = COL_CLOCK + 1;
|
||||
|
||||
/// Pointer to the current instruction.
|
||||
pub(crate) const COL_INSTRUCTION_PTR: usize = COL_RANGE_16 + 1;
|
||||
/// Pointer to the base of the current call's stack frame.
|
||||
pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1;
|
||||
/// Pointer to the tip of the current call's stack frame.
|
||||
pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1;
|
||||
|
||||
//// PERMUTATION UNIT
|
||||
|
||||
const START_PERMUTATION_UNIT: usize = COL_STACK_PTR + 1;
|
||||
|
||||
const START_PERMUTATION_FULL_FIRST: usize = START_PERMUTATION_UNIT + SPONGE_WIDTH;
|
||||
|
||||
pub(crate) const fn col_permutation_full_first_mid_sbox(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_PERMUTATION_FULL_FIRST + 2 * round * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
pub(crate) const fn col_permutation_full_first_after_mds(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_PERMUTATION_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
const START_PERMUTATION_PARTIAL: usize =
|
||||
col_permutation_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1;
|
||||
|
||||
pub(crate) const fn col_permutation_partial_mid_sbox(round: usize) -> usize {
|
||||
debug_assert!(round < poseidon::N_PARTIAL_ROUNDS);
|
||||
START_PERMUTATION_PARTIAL + 2 * round
|
||||
}
|
||||
|
||||
pub(crate) const fn col_permutation_partial_after_sbox(round: usize) -> usize {
|
||||
debug_assert!(round < poseidon::N_PARTIAL_ROUNDS);
|
||||
START_PERMUTATION_PARTIAL + 2 * round + 1
|
||||
}
|
||||
|
||||
const START_PERMUTATION_FULL_SECOND: usize =
|
||||
col_permutation_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1;
|
||||
|
||||
pub(crate) const fn col_permutation_full_second_mid_sbox(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_PERMUTATION_FULL_SECOND + 2 * round * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
pub(crate) const fn col_permutation_full_second_after_mds(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_PERMUTATION_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
pub(crate) const fn col_permutation_input(i: usize) -> usize {
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_PERMUTATION_UNIT + i
|
||||
}
|
||||
|
||||
pub(crate) const fn col_permutation_output(i: usize) -> usize {
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
col_permutation_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i)
|
||||
}
|
||||
|
||||
const END_PERMUTATION_UNIT: usize = col_permutation_output(SPONGE_WIDTH - 1);
|
||||
|
||||
//// MEMORY UNITS
|
||||
|
||||
//// DECOMPOSITION UNITS
|
||||
|
||||
const START_DECOMPOSITION_UNITS: usize = END_PERMUTATION_UNIT + 1;
|
||||
|
||||
const NUM_DECOMPOSITION_UNITS: usize = 4;
|
||||
/// The number of bits associated with a single decomposition unit.
|
||||
const DECOMPOSITION_UNIT_BITS: usize = 32;
|
||||
/// One column for the value being decomposed, plus one column per bit.
|
||||
const DECOMPOSITION_UNIT_COLS: usize = 1 + DECOMPOSITION_UNIT_BITS;
|
||||
|
||||
pub(crate) const fn col_decomposition_input(unit: usize) -> usize {
|
||||
debug_assert!(unit < NUM_DECOMPOSITION_UNITS);
|
||||
START_DECOMPOSITION_UNITS + unit * DECOMPOSITION_UNIT_COLS
|
||||
}
|
||||
|
||||
pub(crate) const fn col_decomposition_bit(unit: usize, bit: usize) -> usize {
|
||||
debug_assert!(unit < NUM_DECOMPOSITION_UNITS);
|
||||
debug_assert!(bit < DECOMPOSITION_UNIT_BITS);
|
||||
START_DECOMPOSITION_UNITS + unit * DECOMPOSITION_UNIT_COLS + 1 + bit
|
||||
}
|
||||
|
||||
const END_DECOMPOSITION_UNITS: usize =
|
||||
START_DECOMPOSITION_UNITS + DECOMPOSITION_UNIT_COLS * NUM_DECOMPOSITION_UNITS;
|
||||
|
||||
pub(crate) const NUM_COLUMNS: usize = END_DECOMPOSITION_UNITS;
|
||||
@ -1,4 +1,5 @@
|
||||
use plonky2::field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2::field::extension_field::Extendable;
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
@ -6,75 +7,87 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume
|
||||
use starky::vars::StarkEvaluationTargets;
|
||||
use starky::vars::StarkEvaluationVars;
|
||||
|
||||
use crate::column_layout::{
|
||||
COL_CLOCK, COL_FRAME_PTR, COL_INSTRUCTION_PTR, COL_RANGE_16, COL_STACK_PTR, NUM_COLUMNS,
|
||||
};
|
||||
use crate::public_input_layout::NUM_PUBLIC_INPUTS;
|
||||
use crate::system_zero::SystemZero;
|
||||
use crate::registers::core::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SystemZero<F, D> {
|
||||
pub(crate) fn generate_first_row_core_registers(&self, first_values: &mut [F; NUM_COLUMNS]) {
|
||||
first_values[COL_CLOCK] = F::ZERO;
|
||||
first_values[COL_RANGE_16] = F::ZERO;
|
||||
first_values[COL_INSTRUCTION_PTR] = F::ZERO;
|
||||
first_values[COL_FRAME_PTR] = F::ZERO;
|
||||
first_values[COL_STACK_PTR] = F::ZERO;
|
||||
}
|
||||
|
||||
pub(crate) fn generate_next_row_core_registers(
|
||||
&self,
|
||||
local_values: &[F; NUM_COLUMNS],
|
||||
next_values: &mut [F; NUM_COLUMNS],
|
||||
) {
|
||||
// We increment the clock by 1.
|
||||
next_values[COL_CLOCK] = local_values[COL_CLOCK] + F::ONE;
|
||||
|
||||
// We increment the 16-bit table by 1, unless we've reached the max value of 2^16 - 1, in
|
||||
// which case we repeat that value.
|
||||
let prev_range_16 = local_values[COL_RANGE_16].to_canonical_u64();
|
||||
let next_range_16 = (prev_range_16 + 1).min((1 << 16) - 1);
|
||||
next_values[COL_RANGE_16] = F::from_canonical_u64(next_range_16);
|
||||
|
||||
next_values[COL_INSTRUCTION_PTR] = todo!();
|
||||
|
||||
next_values[COL_FRAME_PTR] = todo!();
|
||||
|
||||
next_values[COL_STACK_PTR] = todo!();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn eval_core_registers<FE, P, const D2: usize>(
|
||||
&self,
|
||||
vars: StarkEvaluationVars<FE, P, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) where
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
// The clock must start with 0, and increment by 1.
|
||||
let local_clock = vars.local_values[COL_CLOCK];
|
||||
let next_clock = vars.next_values[COL_CLOCK];
|
||||
let delta_clock = next_clock - local_clock;
|
||||
yield_constr.constraint_first_row(local_clock);
|
||||
yield_constr.constraint(delta_clock - FE::ONE);
|
||||
|
||||
// The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1.
|
||||
let local_range_16 = vars.local_values[COL_RANGE_16];
|
||||
let next_range_16 = vars.next_values[COL_RANGE_16];
|
||||
let delta_range_16 = next_range_16 - local_range_16;
|
||||
yield_constr.constraint_first_row(local_range_16);
|
||||
yield_constr.constraint_last_row(local_range_16 - FE::from_canonical_u64((1 << 16) - 1));
|
||||
yield_constr.constraint(delta_range_16 * (delta_range_16 - FE::ONE));
|
||||
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub(crate) fn eval_core_registers_recursively(
|
||||
&self,
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
todo!()
|
||||
}
|
||||
pub(crate) fn generate_first_row_core_registers<F: Field>(first_values: &mut [F; NUM_COLUMNS]) {
|
||||
first_values[COL_CLOCK] = F::ZERO;
|
||||
first_values[COL_RANGE_16] = F::ZERO;
|
||||
first_values[COL_INSTRUCTION_PTR] = F::ZERO;
|
||||
first_values[COL_FRAME_PTR] = F::ZERO;
|
||||
first_values[COL_STACK_PTR] = F::ZERO;
|
||||
}
|
||||
|
||||
pub(crate) fn generate_next_row_core_registers<F: PrimeField64>(
|
||||
local_values: &[F; NUM_COLUMNS],
|
||||
next_values: &mut [F; NUM_COLUMNS],
|
||||
) {
|
||||
// We increment the clock by 1.
|
||||
next_values[COL_CLOCK] = local_values[COL_CLOCK] + F::ONE;
|
||||
|
||||
// We increment the 16-bit table by 1, unless we've reached the max value of 2^16 - 1, in
|
||||
// which case we repeat that value.
|
||||
let prev_range_16 = local_values[COL_RANGE_16].to_canonical_u64();
|
||||
let next_range_16 = (prev_range_16 + 1).min((1 << 16) - 1);
|
||||
next_values[COL_RANGE_16] = F::from_canonical_u64(next_range_16);
|
||||
|
||||
// next_values[COL_INSTRUCTION_PTR] = todo!();
|
||||
|
||||
// next_values[COL_FRAME_PTR] = todo!();
|
||||
|
||||
// next_values[COL_STACK_PTR] = todo!();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn eval_core_registers<F: Field, P: PackedField<Scalar = F>>(
|
||||
vars: StarkEvaluationVars<F, P, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
// The clock must start with 0, and increment by 1.
|
||||
let local_clock = vars.local_values[COL_CLOCK];
|
||||
let next_clock = vars.next_values[COL_CLOCK];
|
||||
let delta_clock = next_clock - local_clock;
|
||||
yield_constr.constraint_first_row(local_clock);
|
||||
yield_constr.constraint(delta_clock - F::ONE);
|
||||
|
||||
// The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1.
|
||||
let local_range_16 = vars.local_values[COL_RANGE_16];
|
||||
let next_range_16 = vars.next_values[COL_RANGE_16];
|
||||
let delta_range_16 = next_range_16 - local_range_16;
|
||||
yield_constr.constraint_first_row(local_range_16);
|
||||
yield_constr.constraint_last_row(local_range_16 - F::from_canonical_u64((1 << 16) - 1));
|
||||
yield_constr.constraint(delta_range_16 * delta_range_16 - delta_range_16);
|
||||
|
||||
// TODO constraints for stack etc.
|
||||
}
|
||||
|
||||
pub(crate) fn eval_core_registers_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let one_ext = builder.one_extension();
|
||||
let max_u16 = builder.constant(F::from_canonical_u64((1 << 16) - 1));
|
||||
let max_u16_ext = builder.convert_to_ext(max_u16);
|
||||
|
||||
// The clock must start with 0, and increment by 1.
|
||||
let local_clock = vars.local_values[COL_CLOCK];
|
||||
let next_clock = vars.next_values[COL_CLOCK];
|
||||
let delta_clock = builder.sub_extension(next_clock, local_clock);
|
||||
yield_constr.constraint_first_row(builder, local_clock);
|
||||
let constraint = builder.sub_extension(delta_clock, one_ext);
|
||||
yield_constr.constraint(builder, constraint);
|
||||
|
||||
// The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1.
|
||||
let local_range_16 = vars.local_values[COL_RANGE_16];
|
||||
let next_range_16 = vars.next_values[COL_RANGE_16];
|
||||
let delta_range_16 = builder.sub_extension(next_range_16, local_range_16);
|
||||
yield_constr.constraint_first_row(builder, local_range_16);
|
||||
let constraint = builder.sub_extension(local_range_16, max_u16_ext);
|
||||
yield_constr.constraint_last_row(builder, constraint);
|
||||
let constraint = builder.mul_add_extension(delta_range_16, delta_range_16, delta_range_16);
|
||||
yield_constr.constraint(builder, constraint);
|
||||
|
||||
// TODO constraints for stack etc.
|
||||
}
|
||||
|
||||
@ -1,12 +1,11 @@
|
||||
// TODO: Remove these when crate is closer to being finished.
|
||||
#![allow(dead_code)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unreachable_code)]
|
||||
#![allow(clippy::diverging_sub_expression)]
|
||||
|
||||
mod column_layout;
|
||||
mod arithmetic;
|
||||
mod core_registers;
|
||||
mod memory;
|
||||
mod permutation_unit;
|
||||
mod public_input_layout;
|
||||
mod registers;
|
||||
pub mod system_zero;
|
||||
|
||||
@ -2,31 +2,23 @@ use plonky2::field::extension_field::{Extendable, FieldExtension};
|
||||
use plonky2::field::packed_field::PackedField;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::hash::hashing::SPONGE_WIDTH;
|
||||
use plonky2::hash::poseidon::{HALF_N_FULL_ROUNDS, N_PARTIAL_ROUNDS};
|
||||
use plonky2::hash::poseidon::{Poseidon, HALF_N_FULL_ROUNDS, N_PARTIAL_ROUNDS};
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use starky::vars::StarkEvaluationTargets;
|
||||
use starky::vars::StarkEvaluationVars;
|
||||
|
||||
use crate::column_layout::{
|
||||
col_permutation_full_first_after_mds as col_full_1st_after_mds,
|
||||
col_permutation_full_first_mid_sbox as col_full_1st_mid_sbox,
|
||||
col_permutation_full_second_after_mds as col_full_2nd_after_mds,
|
||||
col_permutation_full_second_mid_sbox as col_full_2nd_mid_sbox,
|
||||
col_permutation_input as col_input,
|
||||
col_permutation_partial_after_sbox as col_partial_after_sbox,
|
||||
col_permutation_partial_mid_sbox as col_partial_mid_sbox, NUM_COLUMNS,
|
||||
};
|
||||
use crate::public_input_layout::NUM_PUBLIC_INPUTS;
|
||||
use crate::system_zero::SystemZero;
|
||||
use crate::registers::permutation::*;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
fn constant_layer<F, FE, P, const D2: usize>(
|
||||
fn constant_layer<F, FE, P, const D: usize>(
|
||||
mut state: [P; SPONGE_WIDTH],
|
||||
round: usize,
|
||||
) -> [P; SPONGE_WIDTH]
|
||||
where
|
||||
F: RichField,
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
F: Poseidon,
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
// One day I might actually vectorize this, but today is not that day.
|
||||
@ -43,10 +35,10 @@ where
|
||||
state
|
||||
}
|
||||
|
||||
fn mds_layer<F, FE, P, const D2: usize>(mut state: [P; SPONGE_WIDTH]) -> [P; SPONGE_WIDTH]
|
||||
fn mds_layer<F, FE, P, const D: usize>(mut state: [P; SPONGE_WIDTH]) -> [P; SPONGE_WIDTH]
|
||||
where
|
||||
F: RichField,
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
F: Poseidon,
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
for i in 0..P::WIDTH {
|
||||
@ -62,205 +54,204 @@ where
|
||||
state
|
||||
}
|
||||
|
||||
impl<F: RichField + Extendable<D>, const D: usize> SystemZero<F, D> {
|
||||
pub(crate) fn generate_permutation_unit(values: &mut [F; NUM_COLUMNS]) {
|
||||
// Load inputs.
|
||||
let mut state = [F::ZERO; SPONGE_WIDTH];
|
||||
pub(crate) fn generate_permutation_unit<F: Poseidon>(values: &mut [F; NUM_COLUMNS]) {
|
||||
// Load inputs.
|
||||
let mut state = [F::ZERO; SPONGE_WIDTH];
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
state[i] = values[col_input(i)];
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer(&mut state, r);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
state[i] = values[col_input(i)];
|
||||
let state_cubed = state[i].cube();
|
||||
values[col_full_first_mid_sbox(r, i)] = state_cubed;
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer(&mut state, r);
|
||||
state = F::mds_layer(&state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = state[i].cube();
|
||||
values[col_full_1st_mid_sbox(r, i)] = state_cubed;
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
state = F::mds_layer(&state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
values[col_full_1st_after_mds(r, i)] = state[i];
|
||||
}
|
||||
}
|
||||
|
||||
for r in 0..N_PARTIAL_ROUNDS {
|
||||
F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + r);
|
||||
|
||||
let state0_cubed = state[0].cube();
|
||||
values[col_partial_mid_sbox(r)] = state0_cubed;
|
||||
state[0] *= state0_cubed.square(); // Form state ** 7.
|
||||
values[col_partial_after_sbox(r)] = state[0];
|
||||
|
||||
state = F::mds_layer(&state);
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = state[i].cube();
|
||||
values[col_full_2nd_mid_sbox(r, i)] = state_cubed;
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
state = F::mds_layer(&state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
values[col_full_2nd_after_mds(r, i)] = state[i];
|
||||
}
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
values[col_full_first_after_mds(r, i)] = state[i];
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn eval_permutation_unit<FE, P, const D2: usize>(
|
||||
vars: StarkEvaluationVars<FE, P, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) where
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
let local_values = &vars.local_values;
|
||||
for r in 0..N_PARTIAL_ROUNDS {
|
||||
F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + r);
|
||||
|
||||
let state0_cubed = state[0].cube();
|
||||
values[col_partial_mid_sbox(r)] = state0_cubed;
|
||||
state[0] *= state0_cubed.square(); // Form state ** 7.
|
||||
values[col_partial_after_sbox(r)] = state[0];
|
||||
|
||||
state = F::mds_layer(&state);
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer(&mut state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r);
|
||||
|
||||
// Load inputs.
|
||||
let mut state = [P::ZEROS; SPONGE_WIDTH];
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
state[i] = local_values[col_input(i)];
|
||||
let state_cubed = state[i].cube();
|
||||
values[col_full_second_mid_sbox(r, i)] = state_cubed;
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
state = constant_layer(state, r);
|
||||
state = F::mds_layer(&state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = state[i] * state[i].square();
|
||||
yield_constr
|
||||
.constraint_wrapping(state_cubed - local_values[col_full_1st_mid_sbox(r, i)]);
|
||||
let state_cubed = local_values[col_full_1st_mid_sbox(r, i)];
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
values[col_full_second_after_mds(r, i)] = state[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state = mds_layer(state);
|
||||
#[inline]
|
||||
pub(crate) fn eval_permutation_unit<F, FE, P, const D: usize>(
|
||||
vars: StarkEvaluationVars<FE, P, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) where
|
||||
F: Poseidon,
|
||||
FE: FieldExtension<D, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
let local_values = &vars.local_values;
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
yield_constr
|
||||
.constraint_wrapping(state[i] - local_values[col_full_1st_after_mds(r, i)]);
|
||||
state[i] = local_values[col_full_1st_after_mds(r, i)];
|
||||
}
|
||||
// Load inputs.
|
||||
let mut state = [P::ZEROS; SPONGE_WIDTH];
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
state[i] = local_values[col_input(i)];
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
state = constant_layer(state, r);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = state[i] * state[i].square();
|
||||
yield_constr
|
||||
.constraint_wrapping(state_cubed - local_values[col_full_first_mid_sbox(r, i)]);
|
||||
let state_cubed = local_values[col_full_first_mid_sbox(r, i)];
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
for r in 0..N_PARTIAL_ROUNDS {
|
||||
state = constant_layer(state, HALF_N_FULL_ROUNDS + r);
|
||||
state = mds_layer(state);
|
||||
|
||||
let state0_cubed = state[0] * state[0].square();
|
||||
yield_constr.constraint_wrapping(state0_cubed - local_values[col_partial_mid_sbox(r)]);
|
||||
let state0_cubed = local_values[col_partial_mid_sbox(r)];
|
||||
state[0] *= state0_cubed.square(); // Form state ** 7.
|
||||
yield_constr.constraint_wrapping(state[0] - local_values[col_partial_after_sbox(r)]);
|
||||
state[0] = local_values[col_partial_after_sbox(r)];
|
||||
|
||||
state = mds_layer(state);
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
state = constant_layer(state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = state[i] * state[i].square();
|
||||
yield_constr
|
||||
.constraint_wrapping(state_cubed - local_values[col_full_2nd_mid_sbox(r, i)]);
|
||||
let state_cubed = local_values[col_full_2nd_mid_sbox(r, i)];
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
state = mds_layer(state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
yield_constr
|
||||
.constraint_wrapping(state[i] - local_values[col_full_2nd_after_mds(r, i)]);
|
||||
state[i] = local_values[col_full_2nd_after_mds(r, i)];
|
||||
}
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
yield_constr
|
||||
.constraint_wrapping(state[i] - local_values[col_full_first_after_mds(r, i)]);
|
||||
state[i] = local_values[col_full_first_after_mds(r, i)];
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn eval_permutation_unit_recursively(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let zero = builder.zero_extension();
|
||||
let local_values = &vars.local_values;
|
||||
for r in 0..N_PARTIAL_ROUNDS {
|
||||
state = constant_layer(state, HALF_N_FULL_ROUNDS + r);
|
||||
|
||||
let state0_cubed = state[0] * state[0].square();
|
||||
yield_constr.constraint_wrapping(state0_cubed - local_values[col_partial_mid_sbox(r)]);
|
||||
let state0_cubed = local_values[col_partial_mid_sbox(r)];
|
||||
state[0] *= state0_cubed.square(); // Form state ** 7.
|
||||
yield_constr.constraint_wrapping(state[0] - local_values[col_partial_after_sbox(r)]);
|
||||
state[0] = local_values[col_partial_after_sbox(r)];
|
||||
|
||||
state = mds_layer(state);
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
state = constant_layer(state, HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r);
|
||||
|
||||
// Load inputs.
|
||||
let mut state = [zero; SPONGE_WIDTH];
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
state[i] = local_values[col_input(i)];
|
||||
let state_cubed = state[i] * state[i].square();
|
||||
yield_constr
|
||||
.constraint_wrapping(state_cubed - local_values[col_full_second_mid_sbox(r, i)]);
|
||||
let state_cubed = local_values[col_full_second_mid_sbox(r, i)];
|
||||
state[i] *= state_cubed.square(); // Form state ** 7.
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer_recursive(builder, &mut state, r);
|
||||
state = mds_layer(state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = builder.cube_extension(state[i]);
|
||||
let diff =
|
||||
builder.sub_extension(state_cubed, local_values[col_full_1st_mid_sbox(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
let state_cubed = local_values[col_full_1st_mid_sbox(r, i)];
|
||||
state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]);
|
||||
// Form state ** 7.
|
||||
}
|
||||
|
||||
state = F::mds_layer_recursive(builder, &state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let diff =
|
||||
builder.sub_extension(state[i], local_values[col_full_1st_after_mds(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
state[i] = local_values[col_full_1st_after_mds(r, i)];
|
||||
}
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
yield_constr
|
||||
.constraint_wrapping(state[i] - local_values[col_full_second_after_mds(r, i)]);
|
||||
state[i] = local_values[col_full_second_after_mds(r, i)];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for r in 0..N_PARTIAL_ROUNDS {
|
||||
F::constant_layer_recursive(builder, &mut state, HALF_N_FULL_ROUNDS + r);
|
||||
pub(crate) fn eval_permutation_unit_recursively<F: RichField + Extendable<D>, const D: usize>(
|
||||
builder: &mut CircuitBuilder<F, D>,
|
||||
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let zero = builder.zero_extension();
|
||||
let local_values = &vars.local_values;
|
||||
|
||||
let state0_cubed = builder.cube_extension(state[0]);
|
||||
let diff = builder.sub_extension(state0_cubed, local_values[col_partial_mid_sbox(r)]);
|
||||
// Load inputs.
|
||||
let mut state = [zero; SPONGE_WIDTH];
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
state[i] = local_values[col_input(i)];
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer_recursive(builder, &mut state, r);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = builder.cube_extension(state[i]);
|
||||
let diff =
|
||||
builder.sub_extension(state_cubed, local_values[col_full_first_mid_sbox(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
let state0_cubed = local_values[col_partial_mid_sbox(r)];
|
||||
state[0] = builder.mul_many_extension(&[state[0], state0_cubed, state0_cubed]); // Form state ** 7.
|
||||
let diff = builder.sub_extension(state[0], local_values[col_partial_after_sbox(r)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
state[0] = local_values[col_partial_after_sbox(r)];
|
||||
|
||||
state = F::mds_layer_recursive(builder, &state);
|
||||
let state_cubed = local_values[col_full_first_mid_sbox(r, i)];
|
||||
state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]);
|
||||
// Form state ** 7.
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer_recursive(
|
||||
builder,
|
||||
&mut state,
|
||||
HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r,
|
||||
);
|
||||
state = F::mds_layer_recursive(builder, &state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = builder.cube_extension(state[i]);
|
||||
let diff =
|
||||
builder.sub_extension(state_cubed, local_values[col_full_2nd_mid_sbox(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
let state_cubed = local_values[col_full_2nd_mid_sbox(r, i)];
|
||||
state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]);
|
||||
// Form state ** 7.
|
||||
}
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let diff =
|
||||
builder.sub_extension(state[i], local_values[col_full_first_after_mds(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
state[i] = local_values[col_full_first_after_mds(r, i)];
|
||||
}
|
||||
}
|
||||
|
||||
state = F::mds_layer_recursive(builder, &state);
|
||||
for r in 0..N_PARTIAL_ROUNDS {
|
||||
F::constant_layer_recursive(builder, &mut state, HALF_N_FULL_ROUNDS + r);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let diff =
|
||||
builder.sub_extension(state[i], local_values[col_full_2nd_after_mds(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
state[i] = local_values[col_full_2nd_after_mds(r, i)];
|
||||
}
|
||||
let state0_cubed = builder.cube_extension(state[0]);
|
||||
let diff = builder.sub_extension(state0_cubed, local_values[col_partial_mid_sbox(r)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
let state0_cubed = local_values[col_partial_mid_sbox(r)];
|
||||
state[0] = builder.mul_many_extension(&[state[0], state0_cubed, state0_cubed]); // Form state ** 7.
|
||||
let diff = builder.sub_extension(state[0], local_values[col_partial_after_sbox(r)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
state[0] = local_values[col_partial_after_sbox(r)];
|
||||
|
||||
state = F::mds_layer_recursive(builder, &state);
|
||||
}
|
||||
|
||||
for r in 0..HALF_N_FULL_ROUNDS {
|
||||
F::constant_layer_recursive(
|
||||
builder,
|
||||
&mut state,
|
||||
HALF_N_FULL_ROUNDS + N_PARTIAL_ROUNDS + r,
|
||||
);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let state_cubed = builder.cube_extension(state[i]);
|
||||
let diff =
|
||||
builder.sub_extension(state_cubed, local_values[col_full_second_mid_sbox(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
let state_cubed = local_values[col_full_second_mid_sbox(r, i)];
|
||||
state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]);
|
||||
// Form state ** 7.
|
||||
}
|
||||
|
||||
state = F::mds_layer_recursive(builder, &state);
|
||||
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
let diff =
|
||||
builder.sub_extension(state[i], local_values[col_full_second_after_mds(r, i)]);
|
||||
yield_constr.constraint_wrapping(builder, diff);
|
||||
state[i] = local_values[col_full_second_after_mds(r, i)];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -275,12 +266,10 @@ mod tests {
|
||||
use starky::constraint_consumer::ConstraintConsumer;
|
||||
use starky::vars::StarkEvaluationVars;
|
||||
|
||||
use crate::column_layout::{
|
||||
col_permutation_input as col_input, col_permutation_output as col_output, NUM_COLUMNS,
|
||||
};
|
||||
use crate::permutation_unit::SPONGE_WIDTH;
|
||||
use crate::permutation_unit::{eval_permutation_unit, generate_permutation_unit, SPONGE_WIDTH};
|
||||
use crate::public_input_layout::NUM_PUBLIC_INPUTS;
|
||||
use crate::system_zero::SystemZero;
|
||||
use crate::registers::permutation::{col_input, col_output};
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
#[test]
|
||||
fn generate_eval_consistency() {
|
||||
@ -288,7 +277,7 @@ mod tests {
|
||||
type F = GoldilocksField;
|
||||
|
||||
let mut values = [F::default(); NUM_COLUMNS];
|
||||
SystemZero::<F, D>::generate_permutation_unit(&mut values);
|
||||
generate_permutation_unit(&mut values);
|
||||
|
||||
let vars = StarkEvaluationVars {
|
||||
local_values: &values,
|
||||
@ -302,7 +291,7 @@ mod tests {
|
||||
GoldilocksField::ONE,
|
||||
GoldilocksField::ONE,
|
||||
);
|
||||
SystemZero::<F, D>::eval_permutation_unit(vars, &mut constrant_consumer);
|
||||
eval_permutation_unit(vars, &mut constrant_consumer);
|
||||
for &acc in &constrant_consumer.constraint_accs {
|
||||
assert_eq!(acc, GoldilocksField::ZERO);
|
||||
}
|
||||
@ -325,7 +314,7 @@ mod tests {
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
values[col_input(i)] = state[i];
|
||||
}
|
||||
SystemZero::<F, D>::generate_permutation_unit(&mut values);
|
||||
generate_permutation_unit(&mut values);
|
||||
let mut result = [F::default(); SPONGE_WIDTH];
|
||||
for i in 0..SPONGE_WIDTH {
|
||||
result[i] = values[col_output(i)];
|
||||
|
||||
37
system_zero/src/registers/arithmetic.rs
Normal file
37
system_zero/src/registers/arithmetic.rs
Normal file
@ -0,0 +1,37 @@
|
||||
//! Arithmetic unit.
|
||||
|
||||
pub(crate) const IS_ADD: usize = super::START_ARITHMETIC;
|
||||
pub(crate) const IS_SUB: usize = IS_ADD + 1;
|
||||
pub(crate) const IS_MUL: usize = IS_SUB + 1;
|
||||
pub(crate) const IS_DIV: usize = IS_MUL + 1;
|
||||
|
||||
const START_SHARED_COLS: usize = IS_DIV + 1;
|
||||
|
||||
/// Within the arithmetic unit, there are shared columns which can be used by any arithmetic
|
||||
/// circuit, depending on which one is active this cycle.
|
||||
// Can be increased as needed as other operations are implemented.
|
||||
const NUM_SHARED_COLS: usize = 3;
|
||||
|
||||
const fn shared_col(i: usize) -> usize {
|
||||
debug_assert!(i < NUM_SHARED_COLS);
|
||||
START_SHARED_COLS + i
|
||||
}
|
||||
|
||||
/// The first value to be added; treated as an unsigned u32.
|
||||
pub(crate) const COL_ADD_INPUT_1: usize = shared_col(0);
|
||||
/// The second value to be added; treated as an unsigned u32.
|
||||
pub(crate) const COL_ADD_INPUT_2: usize = shared_col(1);
|
||||
/// The third value to be added; treated as an unsigned u32.
|
||||
pub(crate) const COL_ADD_INPUT_3: usize = shared_col(2);
|
||||
|
||||
// Note: Addition outputs three 16-bit chunks, and since these values need to be range-checked
|
||||
// anyway, we might as well use the range check unit's columns as our addition outputs. So the
|
||||
// three proceeding columns are basically aliases, not columns owned by the arithmetic unit.
|
||||
/// The first 16-bit chunk of the output, based on little-endian ordering.
|
||||
pub(crate) const COL_ADD_OUTPUT_1: usize = super::range_check_16::col_rc_16_input(0);
|
||||
/// The second 16-bit chunk of the output, based on little-endian ordering.
|
||||
pub(crate) const COL_ADD_OUTPUT_2: usize = super::range_check_16::col_rc_16_input(1);
|
||||
/// The third 16-bit chunk of the output, based on little-endian ordering.
|
||||
pub(crate) const COL_ADD_OUTPUT_3: usize = super::range_check_16::col_rc_16_input(2);
|
||||
|
||||
pub(super) const END: usize = super::START_ARITHMETIC + NUM_SHARED_COLS;
|
||||
10
system_zero/src/registers/boolean.rs
Normal file
10
system_zero/src/registers/boolean.rs
Normal file
@ -0,0 +1,10 @@
|
||||
//! Boolean unit. Contains columns whose values must be 0 or 1.
|
||||
|
||||
const NUM_BITS: usize = 128;
|
||||
|
||||
pub const fn col_bit(index: usize) -> usize {
|
||||
debug_assert!(index < NUM_BITS);
|
||||
super::START_BOOLEAN + index
|
||||
}
|
||||
|
||||
pub(super) const END: usize = super::START_BOOLEAN + NUM_BITS;
|
||||
20
system_zero/src/registers/core.rs
Normal file
20
system_zero/src/registers/core.rs
Normal file
@ -0,0 +1,20 @@
|
||||
//! Core registers.
|
||||
|
||||
/// A cycle counter. Starts at 0; increments by 1.
|
||||
pub(crate) const COL_CLOCK: usize = super::START_CORE;
|
||||
|
||||
/// A column which contains the values `[0, ... 2^16 - 1]`, potentially with duplicates. Used for
|
||||
/// 16-bit range checks.
|
||||
///
|
||||
/// For ease of verification, we enforce that it must begin with 0 and end with `2^16 - 1`, and each
|
||||
/// delta must be either 0 or 1.
|
||||
pub(crate) const COL_RANGE_16: usize = COL_CLOCK + 1;
|
||||
|
||||
/// Pointer to the current instruction.
|
||||
pub(crate) const COL_INSTRUCTION_PTR: usize = COL_RANGE_16 + 1;
|
||||
/// Pointer to the base of the current call's stack frame.
|
||||
pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1;
|
||||
/// Pointer to the tip of the current call's stack frame.
|
||||
pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1;
|
||||
|
||||
pub(super) const END: usize = COL_STACK_PTR + 1;
|
||||
3
system_zero/src/registers/logic.rs
Normal file
3
system_zero/src/registers/logic.rs
Normal file
@ -0,0 +1,3 @@
|
||||
//! Logic unit.
|
||||
|
||||
pub(super) const END: usize = super::START_LOGIC;
|
||||
21
system_zero/src/registers/lookup.rs
Normal file
21
system_zero/src/registers/lookup.rs
Normal file
@ -0,0 +1,21 @@
|
||||
//! Lookup unit.
|
||||
//! See https://zcash.github.io/halo2/design/proving-system/lookup.html
|
||||
|
||||
const START_UNIT: usize = super::START_LOOKUP;
|
||||
|
||||
const NUM_LOOKUPS: usize =
|
||||
super::range_check_16::NUM_RANGE_CHECKS + super::range_check_degree::NUM_RANGE_CHECKS;
|
||||
|
||||
/// This column contains a permutation of the input values.
|
||||
const fn col_permuted_input(i: usize) -> usize {
|
||||
debug_assert!(i < NUM_LOOKUPS);
|
||||
START_UNIT + 2 * i
|
||||
}
|
||||
|
||||
/// This column contains a permutation of the table values.
|
||||
const fn col_permuted_table(i: usize) -> usize {
|
||||
debug_assert!(i < NUM_LOOKUPS);
|
||||
START_UNIT + 2 * i + 1
|
||||
}
|
||||
|
||||
pub(super) const END: usize = START_UNIT + NUM_LOOKUPS;
|
||||
3
system_zero/src/registers/memory.rs
Normal file
3
system_zero/src/registers/memory.rs
Normal file
@ -0,0 +1,3 @@
|
||||
//! Memory unit.
|
||||
|
||||
pub(super) const END: usize = super::START_MEMORY;
|
||||
20
system_zero/src/registers/mod.rs
Normal file
20
system_zero/src/registers/mod.rs
Normal file
@ -0,0 +1,20 @@
|
||||
pub(crate) mod arithmetic;
|
||||
pub(crate) mod boolean;
|
||||
pub(crate) mod core;
|
||||
pub(crate) mod logic;
|
||||
pub(crate) mod lookup;
|
||||
pub(crate) mod memory;
|
||||
pub(crate) mod permutation;
|
||||
pub(crate) mod range_check_16;
|
||||
pub(crate) mod range_check_degree;
|
||||
|
||||
const START_ARITHMETIC: usize = 0;
|
||||
const START_BOOLEAN: usize = arithmetic::END;
|
||||
const START_CORE: usize = boolean::END;
|
||||
const START_LOGIC: usize = core::END;
|
||||
const START_LOOKUP: usize = logic::END;
|
||||
const START_MEMORY: usize = lookup::END;
|
||||
const START_PERMUTATION: usize = memory::END;
|
||||
const START_RANGE_CHECK_16: usize = permutation::END;
|
||||
const START_RANGE_CHECK_DEGREE: usize = range_check_16::END;
|
||||
pub(crate) const NUM_COLUMNS: usize = range_check_degree::END;
|
||||
57
system_zero/src/registers/permutation.rs
Normal file
57
system_zero/src/registers/permutation.rs
Normal file
@ -0,0 +1,57 @@
|
||||
//! Permutation unit.
|
||||
|
||||
use plonky2::hash::hashing::SPONGE_WIDTH;
|
||||
use plonky2::hash::poseidon;
|
||||
|
||||
const START_FULL_FIRST: usize = super::START_PERMUTATION + SPONGE_WIDTH;
|
||||
|
||||
pub const fn col_full_first_mid_sbox(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_FULL_FIRST + 2 * round * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
pub const fn col_full_first_after_mds(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
const START_PARTIAL: usize =
|
||||
col_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1;
|
||||
|
||||
pub const fn col_partial_mid_sbox(round: usize) -> usize {
|
||||
debug_assert!(round < poseidon::N_PARTIAL_ROUNDS);
|
||||
START_PARTIAL + 2 * round
|
||||
}
|
||||
|
||||
pub const fn col_partial_after_sbox(round: usize) -> usize {
|
||||
debug_assert!(round < poseidon::N_PARTIAL_ROUNDS);
|
||||
START_PARTIAL + 2 * round + 1
|
||||
}
|
||||
|
||||
const START_FULL_SECOND: usize = col_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1;
|
||||
|
||||
pub const fn col_full_second_mid_sbox(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_FULL_SECOND + 2 * round * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
pub const fn col_full_second_after_mds(round: usize, i: usize) -> usize {
|
||||
debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS);
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
START_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i
|
||||
}
|
||||
|
||||
pub const fn col_input(i: usize) -> usize {
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
super::START_PERMUTATION + i
|
||||
}
|
||||
|
||||
pub const fn col_output(i: usize) -> usize {
|
||||
debug_assert!(i < SPONGE_WIDTH);
|
||||
col_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i)
|
||||
}
|
||||
|
||||
pub(super) const END: usize = col_output(SPONGE_WIDTH - 1) + 1;
|
||||
11
system_zero/src/registers/range_check_16.rs
Normal file
11
system_zero/src/registers/range_check_16.rs
Normal file
@ -0,0 +1,11 @@
|
||||
//! Range check unit which checks that values are in `[0, 2^16)`.
|
||||
|
||||
pub(super) const NUM_RANGE_CHECKS: usize = 5;
|
||||
|
||||
/// The input of the `i`th range check, i.e. the value being range checked.
|
||||
pub(crate) const fn col_rc_16_input(i: usize) -> usize {
|
||||
debug_assert!(i < NUM_RANGE_CHECKS);
|
||||
super::START_RANGE_CHECK_16 + i
|
||||
}
|
||||
|
||||
pub(super) const END: usize = super::START_RANGE_CHECK_16 + NUM_RANGE_CHECKS;
|
||||
11
system_zero/src/registers/range_check_degree.rs
Normal file
11
system_zero/src/registers/range_check_degree.rs
Normal file
@ -0,0 +1,11 @@
|
||||
//! Range check unit which checks that values are in `[0, degree)`.
|
||||
|
||||
pub(super) const NUM_RANGE_CHECKS: usize = 5;
|
||||
|
||||
/// The input of the `i`th range check, i.e. the value being range checked.
|
||||
pub(crate) const fn col_rc_degree_input(i: usize) -> usize {
|
||||
debug_assert!(i < NUM_RANGE_CHECKS);
|
||||
super::START_RANGE_CHECK_DEGREE + i
|
||||
}
|
||||
|
||||
pub(super) const END: usize = super::START_RANGE_CHECK_DEGREE + NUM_RANGE_CHECKS;
|
||||
@ -9,9 +9,19 @@ use starky::stark::Stark;
|
||||
use starky::vars::StarkEvaluationTargets;
|
||||
use starky::vars::StarkEvaluationVars;
|
||||
|
||||
use crate::column_layout::NUM_COLUMNS;
|
||||
use crate::arithmetic::{
|
||||
eval_arithmetic_unit, eval_arithmetic_unit_recursively, generate_arithmetic_unit,
|
||||
};
|
||||
use crate::core_registers::{
|
||||
eval_core_registers, eval_core_registers_recursively, generate_first_row_core_registers,
|
||||
generate_next_row_core_registers,
|
||||
};
|
||||
use crate::memory::TransactionMemory;
|
||||
use crate::permutation_unit::{
|
||||
eval_permutation_unit, eval_permutation_unit_recursively, generate_permutation_unit,
|
||||
};
|
||||
use crate::public_input_layout::NUM_PUBLIC_INPUTS;
|
||||
use crate::registers::NUM_COLUMNS;
|
||||
|
||||
/// We require at least 2^16 rows as it helps support efficient 16-bit range checks.
|
||||
const MIN_TRACE_ROWS: usize = 1 << 16;
|
||||
@ -26,18 +36,25 @@ impl<F: RichField + Extendable<D>, const D: usize> SystemZero<F, D> {
|
||||
let memory = TransactionMemory::default();
|
||||
|
||||
let mut row = [F::ZERO; NUM_COLUMNS];
|
||||
self.generate_first_row_core_registers(&mut row);
|
||||
Self::generate_permutation_unit(&mut row);
|
||||
generate_first_row_core_registers(&mut row);
|
||||
generate_arithmetic_unit(&mut row);
|
||||
generate_permutation_unit(&mut row);
|
||||
|
||||
let mut trace = Vec::with_capacity(MIN_TRACE_ROWS);
|
||||
|
||||
loop {
|
||||
let mut next_row = [F::ZERO; NUM_COLUMNS];
|
||||
self.generate_next_row_core_registers(&row, &mut next_row);
|
||||
Self::generate_permutation_unit(&mut next_row);
|
||||
generate_next_row_core_registers(&row, &mut next_row);
|
||||
generate_arithmetic_unit(&mut next_row);
|
||||
generate_permutation_unit(&mut next_row);
|
||||
|
||||
trace.push(row);
|
||||
row = next_row;
|
||||
|
||||
// TODO: Replace with proper termination condition.
|
||||
if trace.len() == (1 << 16) - 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
trace.push(row);
|
||||
@ -65,9 +82,10 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for SystemZero<F,
|
||||
FE: FieldExtension<D2, BaseField = F>,
|
||||
P: PackedField<Scalar = FE>,
|
||||
{
|
||||
self.eval_core_registers(vars, yield_constr);
|
||||
Self::eval_permutation_unit(vars, yield_constr);
|
||||
todo!()
|
||||
eval_core_registers(vars, yield_constr);
|
||||
eval_arithmetic_unit(vars, yield_constr);
|
||||
eval_permutation_unit::<F, FE, P, D2>(vars, yield_constr);
|
||||
// TODO: Other units
|
||||
}
|
||||
|
||||
fn eval_ext_recursively(
|
||||
@ -76,9 +94,10 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for SystemZero<F,
|
||||
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
self.eval_core_registers_recursively(builder, vars, yield_constr);
|
||||
Self::eval_permutation_unit_recursively(builder, vars, yield_constr);
|
||||
todo!()
|
||||
eval_core_registers_recursively(builder, vars, yield_constr);
|
||||
eval_arithmetic_unit_recursively(builder, vars, yield_constr);
|
||||
eval_permutation_unit_recursively(builder, vars, yield_constr);
|
||||
// TODO: Other units
|
||||
}
|
||||
|
||||
fn constraint_degree(&self) -> usize {
|
||||
@ -103,7 +122,7 @@ mod tests {
|
||||
use crate::system_zero::SystemZero;
|
||||
|
||||
#[test]
|
||||
#[ignore] // TODO
|
||||
#[ignore] // A bit slow.
|
||||
fn run() -> Result<()> {
|
||||
type F = GoldilocksField;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
@ -121,7 +140,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore] // TODO
|
||||
fn degree() -> Result<()> {
|
||||
type F = GoldilocksField;
|
||||
type C = PoseidonGoldilocksConfig;
|
||||
|
||||
@ -183,7 +183,7 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use plonky2::field::field_types::{Field, Field64};
|
||||
use plonky2::field::field_types::{Field, PrimeField64};
|
||||
use plonky2::iop::witness::PartialWitness;
|
||||
use plonky2::plonk::circuit_data::CircuitConfig;
|
||||
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user