Merge branch 'main' of github.com:mir-protocol/plonky2 into Fp12

This commit is contained in:
Dmitry Vagner 2022-10-28 02:03:47 -07:00
commit fa05a33040
77 changed files with 1078 additions and 284 deletions

View File

@ -55,8 +55,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilderGlv<F, D>
) {
let k1 = self.add_virtual_nonnative_target_sized::<Secp256K1Scalar>(4);
let k2 = self.add_virtual_nonnative_target_sized::<Secp256K1Scalar>(4);
let k1_neg = self.add_virtual_bool_target();
let k2_neg = self.add_virtual_bool_target();
let k1_neg = self.add_virtual_bool_target_unsafe();
let k2_neg = self.add_virtual_bool_target_unsafe();
self.add_simple_generator(GLVDecompositionGenerator::<F, D> {
k: k.clone(),

View File

@ -183,7 +183,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilderNonNative<F, D>
b: &NonNativeTarget<FF>,
) -> NonNativeTarget<FF> {
let sum = self.add_virtual_nonnative_target::<FF>();
let overflow = self.add_virtual_bool_target();
let overflow = self.add_virtual_bool_target_unsafe();
self.add_simple_generator(NonNativeAdditionGenerator::<F, D, FF> {
a: a.clone(),
@ -282,7 +282,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilderNonNative<F, D>
b: &NonNativeTarget<FF>,
) -> NonNativeTarget<FF> {
let diff = self.add_virtual_nonnative_target::<FF>();
let overflow = self.add_virtual_bool_target();
let overflow = self.add_virtual_bool_target_unsafe();
self.add_simple_generator(NonNativeSubtractionGenerator::<F, D, FF> {
a: a.clone(),

View File

@ -53,10 +53,14 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
compare::generate(local_values, columns::IS_GT);
} else if local_values[columns::IS_ADDMOD].is_one() {
modular::generate(local_values, columns::IS_ADDMOD);
} else if local_values[columns::IS_SUBMOD].is_one() {
modular::generate(local_values, columns::IS_SUBMOD);
} else if local_values[columns::IS_MULMOD].is_one() {
modular::generate(local_values, columns::IS_MULMOD);
} else if local_values[columns::IS_MOD].is_one() {
modular::generate(local_values, columns::IS_MOD);
} else if local_values[columns::IS_DIV].is_one() {
modular::generate(local_values, columns::IS_DIV);
} else {
todo!("the requested operation has not yet been implemented");
}

View File

@ -26,7 +26,8 @@ pub const IS_SDIV: usize = IS_DIV + 1;
pub const IS_MOD: usize = IS_SDIV + 1;
pub const IS_SMOD: usize = IS_MOD + 1;
pub const IS_ADDMOD: usize = IS_SMOD + 1;
pub const IS_MULMOD: usize = IS_ADDMOD + 1;
pub const IS_SUBMOD: usize = IS_ADDMOD + 1;
pub const IS_MULMOD: usize = IS_SUBMOD + 1;
pub const IS_LT: usize = IS_MULMOD + 1;
pub const IS_GT: usize = IS_LT + 1;
pub const IS_SLT: usize = IS_GT + 1;
@ -37,9 +38,9 @@ pub const IS_SAR: usize = IS_SHR + 1;
const START_SHARED_COLS: usize = IS_SAR + 1;
pub(crate) const ALL_OPERATIONS: [usize; 16] = [
IS_ADD, IS_MUL, IS_SUB, IS_DIV, IS_SDIV, IS_MOD, IS_SMOD, IS_ADDMOD, IS_MULMOD, IS_LT, IS_GT,
IS_SLT, IS_SGT, IS_SHL, IS_SHR, IS_SAR,
pub(crate) const ALL_OPERATIONS: [usize; 17] = [
IS_ADD, IS_MUL, IS_SUB, IS_DIV, IS_SDIV, IS_MOD, IS_SMOD, IS_ADDMOD, IS_SUBMOD, IS_MULMOD,
IS_LT, IS_GT, IS_SLT, IS_SGT, IS_SHL, IS_SHR, IS_SAR,
];
/// Within the Arithmetic Unit, there are shared columns which can be
@ -84,4 +85,11 @@ pub(crate) const MODULAR_AUX_INPUT: Range<usize> = AUX_INPUT_1;
pub(crate) const MODULAR_MOD_IS_ZERO: usize = AUX_INPUT_1.end - 1;
pub(crate) const MODULAR_OUT_AUX_RED: Range<usize> = AUX_INPUT_2;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_NUMERATOR: Range<usize> = MODULAR_INPUT_0;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_DENOMINATOR: Range<usize> = MODULAR_MODULUS;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_OUTPUT: Range<usize> = MODULAR_QUO_INPUT.start..MODULAR_QUO_INPUT.start + 16;
pub const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS;

View File

@ -1,4 +1,5 @@
//! Support for the EVM modular instructions ADDMOD, MULMOD and MOD.
//! Support for the EVM modular instructions ADDMOD, MULMOD and MOD,
//! as well as DIV.
//!
//! This crate verifies an EVM modular instruction, which takes three
//! 256-bit inputs A, B and M, and produces a 256-bit output C satisfying
@ -82,8 +83,11 @@
//! - if modulus is non-zero, correct output is obtained
//! - if modulus is 0, then the test output < modulus, checking that
//! the output is reduced, will fail, because output is non-negative.
//!
//! In the case of DIV, we do something similar, except that we "replace"
//! the modulus with "2^256" to force the quotient to be zero.
use num::{BigUint, Zero};
use num::{bigint::Sign, BigInt, One, Zero};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
@ -98,55 +102,65 @@ use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::range_check_error;
/// Convert the base-2^16 representation of a number into a BigUint.
/// Convert the base-2^16 representation of a number into a BigInt.
///
/// Given `N` unsigned 16-bit values in `limbs`, return the BigUint
/// Given `N` signed (16 + ε)-bit values in `limbs`, return the BigInt
///
/// \sum_{i=0}^{N-1} limbs[i] * β^i.
///
fn columns_to_biguint<const N: usize>(limbs: &[i64; N]) -> BigUint {
/// This is basically "evaluate the given polynomial at β". Although
/// the input type is i64, the values must always be in (-2^16 - ε,
/// 2^16 + ε) because of the caller's range check on the inputs (the ε
/// allows us to convert calculated output, which can be bigger than
/// 2^16).
fn columns_to_bigint<const N: usize>(limbs: &[i64; N]) -> BigInt {
const BASE: i64 = 1i64 << LIMB_BITS;
// Although the input type is i64, the values must always be in
// [0, 2^16 + ε) because of the caller's range check on the inputs
// (the ε allows us to convert calculated output, which can be
// bigger than 2^16).
debug_assert!(limbs.iter().all(|&x| x >= 0));
let mut limbs_u32 = Vec::with_capacity(N / 2 + 1);
let mut pos_limbs_u32 = Vec::with_capacity(N / 2 + 1);
let mut neg_limbs_u32 = Vec::with_capacity(N / 2 + 1);
let mut cy = 0i64; // cy is necessary to handle ε > 0
for i in 0..(N / 2) {
let t = cy + limbs[2 * i] + BASE * limbs[2 * i + 1];
limbs_u32.push(t as u32);
cy = t >> 32;
pos_limbs_u32.push(if t > 0 { t as u32 } else { 0u32 });
neg_limbs_u32.push(if t < 0 { -t as u32 } else { 0u32 });
cy = t / (1i64 << 32);
}
if N & 1 != 0 {
// If N is odd we need to add the last limb on its own
let t = cy + limbs[N - 1];
limbs_u32.push(t as u32);
cy = t >> 32;
pos_limbs_u32.push(if t > 0 { t as u32 } else { 0u32 });
neg_limbs_u32.push(if t < 0 { -t as u32 } else { 0u32 });
cy = t / (1i64 << 32);
}
limbs_u32.push(cy as u32);
pos_limbs_u32.push(if cy > 0 { cy as u32 } else { 0u32 });
neg_limbs_u32.push(if cy < 0 { -cy as u32 } else { 0u32 });
BigUint::from_slice(&limbs_u32)
let pos = BigInt::from_slice(Sign::Plus, &pos_limbs_u32);
let neg = BigInt::from_slice(Sign::Plus, &neg_limbs_u32);
pos - neg
}
/// Convert a BigUint into a base-2^16 representation.
/// Convert a BigInt into a base-2^16 representation.
///
/// Given a BigUint `num`, return an array of `N` unsigned 16-bit
/// Given a BigInt `num`, return an array of `N` signed 16-bit
/// values, say `limbs`, such that
///
/// num = \sum_{i=0}^{N-1} limbs[i] * β^i.
///
/// Note that `N` must be at least ceil(log2(num)/16) in order to be
/// big enough to hold `num`.
fn biguint_to_columns<const N: usize>(num: &BigUint) -> [i64; N] {
fn bigint_to_columns<const N: usize>(num: &BigInt) -> [i64; N] {
assert!(num.bits() <= 16 * N as u64);
let mut output = [0i64; N];
for (i, limb) in num.iter_u32_digits().enumerate() {
output[2 * i] = limb as u16 as i64;
output[2 * i + 1] = (limb >> LIMB_BITS) as i64;
}
if num.sign() == Sign::Minus {
for c in output.iter_mut() {
*c = -*c;
}
}
output
}
@ -156,6 +170,7 @@ fn biguint_to_columns<const N: usize>(num: &BigUint) -> [i64; N] {
/// zero if they are not used.
fn generate_modular_op<F: RichField>(
lv: &mut [F; NUM_ARITH_COLUMNS],
filter: usize,
operation: fn([i64; N_LIMBS], [i64; N_LIMBS]) -> [i64; 2 * N_LIMBS - 1],
) {
// Inputs are all range-checked in [0, 2^16), so the "as i64"
@ -164,38 +179,54 @@ fn generate_modular_op<F: RichField>(
let input1_limbs = read_value_i64_limbs(lv, MODULAR_INPUT_1);
let mut modulus_limbs = read_value_i64_limbs(lv, MODULAR_MODULUS);
// The use of BigUints is just to avoid having to implement
// modular reduction.
let mut modulus = columns_to_biguint(&modulus_limbs);
// BigInts are just used to avoid having to implement modular
// reduction.
let mut modulus = columns_to_bigint(&modulus_limbs);
// constr_poly is initialised to the calculated input, and is
// used as such for the BigUint reduction; later, other values are
// used as such for the BigInt reduction; later, other values are
// added/subtracted, which is where its meaning as the "constraint
// polynomial" comes in.
let mut constr_poly = [0i64; 2 * N_LIMBS];
constr_poly[..2 * N_LIMBS - 1].copy_from_slice(&operation(input0_limbs, input1_limbs));
// two_exp_256 == 2^256
let two_exp_256 = {
let mut t = BigInt::zero();
t.set_bit(256, true);
t
};
let mut mod_is_zero = F::ZERO;
if modulus.is_zero() {
modulus += 1u32;
modulus_limbs[0] += 1i64;
if filter == columns::IS_DIV {
// set modulus = 2^256
modulus = two_exp_256.clone();
// modulus_limbs don't play a role below
} else {
// set modulus = 1
modulus = BigInt::one();
modulus_limbs[0] = 1i64;
}
mod_is_zero = F::ONE;
}
let input = columns_to_biguint(&constr_poly);
let input = columns_to_bigint(&constr_poly);
// modulus != 0 here, because, if the given modulus was zero, then
// we added 1 to it above.
let output = &input % &modulus;
let output_limbs = biguint_to_columns::<N_LIMBS>(&output);
let quot = (&input - &output) / &modulus; // exact division
let quot_limbs = biguint_to_columns::<{ 2 * N_LIMBS }>(&quot);
// it was set to 1 or 2^256 above
let mut output = &input % &modulus;
// output will be -ve (but > -modulus) if input was -ve, so we can
// add modulus to obtain a "canonical" +ve output.
if output.sign() == Sign::Minus {
output += &modulus;
}
let output_limbs = bigint_to_columns::<N_LIMBS>(&output);
let quot = (&input - &output) / &modulus; // exact division; can be -ve
let quot_limbs = bigint_to_columns::<{ 2 * N_LIMBS }>(&quot);
// two_exp_256 == 2^256
let mut two_exp_256 = BigUint::zero();
two_exp_256.set_bit(256, true);
// output < modulus here, so the proof requires (output - modulus) % 2^256:
let out_aux_red = biguint_to_columns::<N_LIMBS>(&(two_exp_256 + output - modulus));
let out_aux_red = bigint_to_columns::<N_LIMBS>(&(two_exp_256 + output - modulus));
// constr_poly is the array of coefficients of the polynomial
//
@ -215,7 +246,7 @@ fn generate_modular_op<F: RichField>(
lv[MODULAR_OUTPUT].copy_from_slice(&output_limbs.map(|c| F::from_canonical_i64(c)));
lv[MODULAR_OUT_AUX_RED].copy_from_slice(&out_aux_red.map(|c| F::from_canonical_i64(c)));
lv[MODULAR_QUO_INPUT].copy_from_slice(&quot_limbs.map(|c| F::from_canonical_i64(c)));
lv[MODULAR_QUO_INPUT].copy_from_slice(&quot_limbs.map(|c| F::from_noncanonical_i64(c)));
lv[MODULAR_AUX_INPUT].copy_from_slice(&aux_limbs.map(|c| F::from_noncanonical_i64(c)));
lv[MODULAR_MOD_IS_ZERO] = mod_is_zero;
}
@ -225,9 +256,10 @@ fn generate_modular_op<F: RichField>(
/// `filter` must be one of `columns::IS_{ADDMOD,MULMOD,MOD}`.
pub(crate) fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS], filter: usize) {
match filter {
columns::IS_ADDMOD => generate_modular_op(lv, pol_add),
columns::IS_MULMOD => generate_modular_op(lv, pol_mul_wide),
columns::IS_MOD => generate_modular_op(lv, |a, _| pol_extend(a)),
columns::IS_ADDMOD => generate_modular_op(lv, filter, pol_add),
columns::IS_SUBMOD => generate_modular_op(lv, filter, pol_sub),
columns::IS_MULMOD => generate_modular_op(lv, filter, pol_mul_wide),
columns::IS_MOD | columns::IS_DIV => generate_modular_op(lv, filter, |a, _| pol_extend(a)),
_ => panic!("generate modular operation called with unknown opcode"),
}
}
@ -240,7 +272,6 @@ pub(crate) fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS], filter: us
/// c(x) + q(x) * m(x) + (x - β) * s(x)
///
/// and check consistency when m = 0, and that c is reduced.
#[allow(clippy::needless_range_loop)]
fn modular_constr_poly<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
@ -268,19 +299,33 @@ fn modular_constr_poly<P: PackedField>(
// modulus = 0.
modulus[0] += mod_is_zero;
let output = &lv[MODULAR_OUTPUT];
let mut output = read_value::<N_LIMBS, _>(lv, MODULAR_OUTPUT);
// Needed to compensate for adding mod_is_zero to modulus above,
// since the call eval_packed_generic_lt() below subtracts modulus
// verify in the case of a DIV.
output[0] += mod_is_zero * lv[IS_DIV];
// Verify that the output is reduced, i.e. output < modulus.
let out_aux_red = &lv[MODULAR_OUT_AUX_RED];
let is_less_than = P::ONES;
// this sets is_less_than to 1 unless we get mod_is_zero when
// doing a DIV; in that case, we need is_less_than=0, since the
// function checks
//
// output - modulus == out_aux_red + is_less_than*2^256
//
// and we were given output = out_aux_red
let is_less_than = P::ONES - mod_is_zero * lv[IS_DIV];
eval_packed_generic_lt(
yield_constr,
filter,
output,
&output,
&modulus,
out_aux_red,
is_less_than,
);
// restore output[0]
output[0] -= mod_is_zero * lv[IS_DIV];
// prod = q(x) * m(x)
let quot = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_QUO_INPUT);
@ -292,7 +337,7 @@ fn modular_constr_poly<P: PackedField>(
// constr_poly = c(x) + q(x) * m(x)
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign(&mut constr_poly, output);
pol_add_assign(&mut constr_poly, &output);
// constr_poly = c(x) + q(x) * m(x) + (x - β) * s(x)
let mut aux = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_AUX_INPUT);
@ -310,7 +355,11 @@ pub(crate) fn eval_packed_generic<P: PackedField>(
) {
// NB: The CTL code guarantees that filter is 0 or 1, i.e. that
// only one of the operations below is "live".
let filter = lv[columns::IS_ADDMOD] + lv[columns::IS_MULMOD] + lv[columns::IS_MOD];
let filter = lv[columns::IS_ADDMOD]
+ lv[columns::IS_MULMOD]
+ lv[columns::IS_MOD]
+ lv[columns::IS_SUBMOD]
+ lv[columns::IS_DIV];
// constr_poly has 2*N_LIMBS limbs
let constr_poly = modular_constr_poly(lv, yield_constr, filter);
@ -319,13 +368,15 @@ pub(crate) fn eval_packed_generic<P: PackedField>(
let input1 = read_value(lv, MODULAR_INPUT_1);
let add_input = pol_add(input0, input1);
let sub_input = pol_sub(input0, input1);
let mul_input = pol_mul_wide(input0, input1);
let mod_input = pol_extend(input0);
for (input, &filter) in [
(&add_input, &lv[columns::IS_ADDMOD]),
(&sub_input, &lv[columns::IS_SUBMOD]),
(&mul_input, &lv[columns::IS_MULMOD]),
(&mod_input, &lv[columns::IS_MOD]),
(&mod_input, &(lv[columns::IS_MOD] + lv[columns::IS_DIV])),
] {
// Need constr_poly_copy to be the first argument to
// pol_sub_assign, since it is the longer of the two
@ -367,18 +418,25 @@ fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>
modulus[0] = builder.add_extension(modulus[0], mod_is_zero);
let output = &lv[MODULAR_OUTPUT];
let mut output = read_value::<N_LIMBS, _>(lv, MODULAR_OUTPUT);
output[0] = builder.mul_add_extension(mod_is_zero, lv[IS_DIV], output[0]);
let out_aux_red = &lv[MODULAR_OUT_AUX_RED];
let is_less_than = builder.one_extension();
let one = builder.one_extension();
let is_less_than =
builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, lv[IS_DIV], one);
eval_ext_circuit_lt(
builder,
yield_constr,
filter,
output,
&output,
&modulus,
out_aux_red,
is_less_than,
);
output[0] =
builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, lv[IS_DIV], output[0]);
let quot = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_QUO_INPUT);
let prod = pol_mul_wide2_ext_circuit(builder, quot, modulus);
@ -388,7 +446,7 @@ fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>
}
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign_ext_circuit(builder, &mut constr_poly, output);
pol_add_assign_ext_circuit(builder, &mut constr_poly, &output);
let mut aux = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_AUX_INPUT);
aux[2 * N_LIMBS - 1] = builder.zero_extension();
@ -406,8 +464,10 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
) {
let filter = builder.add_many_extension([
lv[columns::IS_ADDMOD],
lv[columns::IS_SUBMOD],
lv[columns::IS_MULMOD],
lv[columns::IS_MOD],
lv[columns::IS_DIV],
]);
let constr_poly = modular_constr_poly_ext_circuit(lv, builder, yield_constr, filter);
@ -416,13 +476,16 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
let input1 = read_value(lv, MODULAR_INPUT_1);
let add_input = pol_add_ext_circuit(builder, input0, input1);
let sub_input = pol_sub_ext_circuit(builder, input0, input1);
let mul_input = pol_mul_wide_ext_circuit(builder, input0, input1);
let mod_input = pol_extend_ext_circuit(builder, input0);
let mod_div_filter = builder.add_extension(lv[columns::IS_MOD], lv[columns::IS_DIV]);
for (input, &filter) in [
(&add_input, &lv[columns::IS_ADDMOD]),
(&sub_input, &lv[columns::IS_SUBMOD]),
(&mul_input, &lv[columns::IS_MULMOD]),
(&mod_input, &lv[columns::IS_MOD]),
(&mod_input, &mod_div_filter),
] {
let mut constr_poly_copy = constr_poly;
pol_sub_assign_ext_circuit(builder, &mut constr_poly_copy, input);
@ -458,8 +521,10 @@ mod tests {
// if `IS_ADDMOD == 0`, then the constraints should be met even
// if all values are garbage.
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
@ -480,11 +545,13 @@ mod tests {
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::rand_from_rng(&mut rng));
for op_filter in [IS_ADDMOD, IS_MOD, IS_MULMOD] {
for op_filter in [IS_ADDMOD, IS_DIV, IS_SUBMOD, IS_MOD, IS_MULMOD] {
// Reset operation columns, then select one
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
lv[op_filter] = F::ONE;
for i in 0..N_RND_TESTS {
@ -529,11 +596,13 @@ mod tests {
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::rand_from_rng(&mut rng));
for op_filter in [IS_ADDMOD, IS_MOD, IS_MULMOD] {
for op_filter in [IS_ADDMOD, IS_SUBMOD, IS_DIV, IS_MOD, IS_MULMOD] {
// Reset operation columns, then select one
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
lv[op_filter] = F::ONE;
for _i in 0..N_RND_TESTS {
@ -548,7 +617,11 @@ mod tests {
generate(&mut lv, op_filter);
// check that the correct output was generated
assert!(lv[MODULAR_OUTPUT].iter().all(|&c| c == F::ZERO));
if op_filter == IS_DIV {
assert!(lv[DIV_OUTPUT].iter().all(|&c| c == F::ZERO));
} else {
assert!(lv[MODULAR_OUTPUT].iter().all(|&c| c == F::ZERO));
}
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
@ -563,7 +636,11 @@ mod tests {
.all(|&acc| acc == F::ZERO));
// Corrupt one output limb by setting it to a non-zero value
let random_oi = MODULAR_OUTPUT.start + rng.gen::<usize>() % N_LIMBS;
let random_oi = if op_filter == IS_DIV {
DIV_OUTPUT.start + rng.gen::<usize>() % N_LIMBS
} else {
MODULAR_OUTPUT.start + rng.gen::<usize>() % N_LIMBS
};
lv[random_oi] = F::from_canonical_u16(rng.gen_range(1..u16::MAX));
eval_packed_generic(&lv, &mut constraint_consumer);

View File

@ -118,6 +118,32 @@ pub(crate) fn pol_add_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
sum
}
/// Return a(x) - b(x); returned array is bigger than necessary to
/// make the interface consistent with `pol_mul_wide`.
pub(crate) fn pol_sub<T>(a: [T; N_LIMBS], b: [T; N_LIMBS]) -> [T; 2 * N_LIMBS - 1]
where
T: Sub<Output = T> + Copy + Default,
{
let mut diff = pol_zero();
for i in 0..N_LIMBS {
diff[i] = a[i] - b[i];
}
diff
}
pub(crate) fn pol_sub_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; N_LIMBS],
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut sum = [zero; 2 * N_LIMBS - 1];
for i in 0..N_LIMBS {
sum[i] = builder.sub_extension(a[i], b[i]);
}
sum
}
/// a(x) -= b(x), but must have deg(a) >= deg(b).
pub(crate) fn pol_sub_assign<T>(a: &mut [T], b: &[T])
where

View File

@ -9,5 +9,5 @@ fn main() {
args.next();
let file_contents: Vec<_> = args.map(|path| fs::read_to_string(path).unwrap()).collect();
let assembled = assemble_to_bytes(&file_contents[..]);
println!("{}", encode(&assembled));
println!("{}", encode(assembled));
}

View File

@ -42,6 +42,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/memory/metadata.asm"),
include_str!("asm/memory/packing.asm"),
include_str!("asm/memory/txn_fields.asm"),
include_str!("asm/mpt/accounts.asm"),
include_str!("asm/mpt/delete/delete.asm"),
include_str!("asm/mpt/hash/hash.asm"),
include_str!("asm/mpt/hash/hash_trie_specific.asm"),
@ -53,8 +54,8 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/mpt/load/load.asm"),
include_str!("asm/mpt/load/load_trie_specific.asm"),
include_str!("asm/mpt/read.asm"),
include_str!("asm/mpt/storage_read.asm"),
include_str!("asm/mpt/storage_write.asm"),
include_str!("asm/mpt/storage/storage_read.asm"),
include_str!("asm/mpt/storage/storage_write.asm"),
include_str!("asm/mpt/util.asm"),
include_str!("asm/ripemd/box.asm"),
include_str!("asm/ripemd/compression.asm"),
@ -80,6 +81,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/transactions/type_2.asm"),
include_str!("asm/util/assertions.asm"),
include_str!("asm/util/basic_macros.asm"),
include_str!("asm/util/keccak.asm"),
];
let parsed_files = files.iter().map(|f| parse(f)).collect_vec();

View File

@ -393,3 +393,21 @@
%mstore_kernel_general_2
// stack: (empty)
%endmacro
%macro mload_main
// stack: offset
DUP1
// stack: offset, offset
%update_msize
// stack: offset
%mload_current(@SEGMENT_MAIN_MEMORY)
%endmacro
%macro mstore_main
// stack: offset, value
DUP1
// stack: offset, offset, value
%update_msize
// stack: offset, value
%mstore_current(@SEGMENT_MAIN_MEMORY)
%endmacro

View File

@ -45,3 +45,23 @@
%macro callvalue
%mload_context_metadata(@CTX_METADATA_CALL_VALUE)
%endmacro
%macro msize
%mload_context_metadata(@CTX_METADATA_MSIZE)
%endmacro
%macro update_msize
// stack: offset
%add_const(32)
// stack: 32 + offset
%div_const(32)
// stack: (offset+32)/32 = ceil_div_usize(offset+1, 32)
%mul_const(32)
// stack: ceil_div_usize(offset+1, 32) * 32
%msize
// stack: current_msize, ceil_div_usize(offset+1, 32) * 32
%max
// stack: new_msize
%mstore_context_metadata(@CTX_METADATA_MSIZE)
%endmacro

View File

@ -0,0 +1,53 @@
// Return a pointer to the current account's data in the state trie.
%macro current_account_data
ADDRESS %mpt_read_state_trie
// stack: account_ptr
// account_ptr should be non-null as long as the prover provided the proper
// Merkle data. But a bad prover may not have, and we don't want return a
// null pointer for security reasons.
DUP1 ISZERO %jumpi(panic)
// stack: account_ptr
%endmacro
// Returns a pointer to the root of the storage trie associated with the current account.
%macro current_storage_trie
// stack: (empty)
%current_account_data
// stack: account_ptr
%add_const(2)
// stack: storage_root_ptr_ptr
%mload_trie_data
// stack: storage_root_ptr
%endmacro
global make_default_account:
PANIC // TODO
// Create a copy of the given account. The copy can then safely be mutated as
// needed, while leaving the original account data untouched.
//
// This writes the new account's data to MPT data, but does not register the new
// account in the state trie.
//
// Pre stack: old_account_ptr, retdest
// Post stack: new_account_ptr
global make_account_copy:
// stack: old_account_ptr, retdest
%get_trie_data_size // pointer to new account we're about to create
// stack: new_account_ptr, old_account_ptr, retdest
DUP2 %mload_trie_data %append_to_trie_data
DUP2 %add_const(1) %mload_trie_data %append_to_trie_data
DUP2 %add_const(3) %mload_trie_data %append_to_trie_data
SWAP1 %add_const(4) %mload_trie_data %append_to_trie_data
// stack: new_account_ptr, retdest
SWAP1
JUMP
// Convenience macro to call make_account_copy and return where we left off.
%macro make_account_copy
%stack (old_account_ptr) -> (old_account_ptr, %%after)
%jump(make_account_copy)
%%after:
%endmacro

View File

@ -95,4 +95,4 @@ encode_receipt:
PANIC // TODO
encode_storage_value:
PANIC // TODO
PANIC // TODO: RLP encode as variable-len scalar?

View File

@ -2,9 +2,10 @@
// Mutate the state trie, inserting the given key-value pair.
global mpt_insert_state_trie:
// stack: num_nibbles, key, value_ptr, retdest
%stack (num_nibbles, key, value_ptr)
-> (num_nibbles, key, value_ptr, mpt_insert_state_trie_save)
// stack: key, value_ptr, retdest
%stack (key, value_ptr)
-> (key, value_ptr, mpt_insert_state_trie_save)
PUSH 64 // num_nibbles
%mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT)
// stack: state_root_ptr, num_nibbles, key, value_ptr, mpt_insert_state_trie_save, retdest
%jump(mpt_insert)

View File

@ -3,26 +3,26 @@
// state trie. Returns null if the address is not found.
global mpt_read_state_trie:
// stack: addr, retdest
// The key is the hash of the address. Since KECCAK_GENERAL takes input from
// memory, we will write addr bytes to SEGMENT_KERNEL_GENERAL[0..20] first.
%stack (addr) -> (0, @SEGMENT_KERNEL_GENERAL, 0, addr, 20, mpt_read_state_trie_after_mstore)
%jump(mstore_unpacking)
mpt_read_state_trie_after_mstore:
// stack: retdest
%stack () -> (0, @SEGMENT_KERNEL_GENERAL, 0, 20) // context, segment, offset, len
KECCAK_GENERAL
%addr_to_state_key
// stack: key, retdest
PUSH 64 // num_nibbles
%mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT) // node_ptr
// stack: node_ptr, num_nibbles, key, retdest
%jump(mpt_read)
// Convenience macro to call mpt_read_state_trie and return where we left off.
%macro mpt_read_state_trie
%stack (addr) -> (addr, %%after)
%jump(mpt_read_state_trie)
%%after:
%endmacro
// Read a value from a MPT.
//
// Arguments:
// - the virtual address of the trie to search in
// - the key, as a U256
// - the number of nibbles in the key (should start at 64)
// - the key, as a U256
//
// This function returns a pointer to the value, or 0 if the key is not found.
global mpt_read:

View File

@ -0,0 +1,30 @@
// Read a word from the current account's storage trie.
//
// Pre stack: slot, retdest
// Post stack: value
global storage_read:
// stack: slot, retdest
%stack (slot) -> (slot, after_storage_read)
%slot_to_storage_key
// stack: storage_key, after_storage_read, retdest
PUSH 64 // storage_key has 64 nibbles
%current_storage_trie
// stack: storage_root_ptr, 64, storage_key, after_storage_read, retdest
%jump(mpt_read)
after_storage_read:
// stack: value_ptr, retdest
DUP1 %jumpi(storage_key_exists)
// Storage key not found. Return default value_ptr = 0,
// which derefs to 0 since @SEGMENT_TRIE_DATA[0] = 0.
%stack (value_ptr, retdest) -> (retdest, 0)
JUMP
storage_key_exists:
// stack: value_ptr, retdest
%mload_trie_data
// stack: value, retdest
SWAP1
JUMP

View File

@ -0,0 +1,44 @@
// Write a word to the current account's storage trie.
//
// Pre stack: slot, value, retdest
// Post stack: (empty)
global storage_write:
// TODO: If value = 0, delete the key instead of inserting 0.
// stack: slot, value, retdest
// First we write the value to MPT data, and get a pointer to it.
%get_trie_data_size
// stack: value_ptr, slot, value, retdest
SWAP2
// stack: value, slot, value_ptr, retdest
%append_to_trie_data
// stack: slot, value_ptr, retdest
// Next, call mpt_insert on the current account's storage root.
%stack (slot, value_ptr) -> (slot, value_ptr, after_storage_insert)
%slot_to_storage_key
// stack: storage_key, value_ptr, after_storage_write, retdest
PUSH 64 // storage_key has 64 nibbles
%current_storage_trie
// stack: storage_root_ptr, 64, storage_key, value_ptr, after_storage_insert, retdest
%jump(mpt_insert)
after_storage_insert:
// stack: new_storage_root_ptr, retdest
%current_account_data
// stack: old_account_ptr, new_storage_root_ptr, retdest
%make_account_copy
// stack: new_account_ptr, new_storage_root_ptr, retdest
// Update the copied account with our new storage root pointer.
%stack (new_account_ptr, new_storage_root_ptr) -> (new_account_ptr, new_storage_root_ptr, new_account_ptr)
%add_const(2)
// stack: new_account_storage_root_ptr_ptr, new_storage_root_ptr, new_account_ptr, retdest
%mstore_trie_data
// stack: new_account_ptr, retdest
// Save this updated account to the state trie.
ADDRESS %addr_to_state_key
// stack: state_key, new_account_ptr, retdest
%jump(mpt_insert_state_trie)

View File

@ -1,2 +0,0 @@
global storage_read:
// TODO

View File

@ -1,2 +0,0 @@
global storage_write:
// TODO

View File

@ -165,3 +165,14 @@
SWAP4 %div_const(4) SWAP4 // bits_2 -> len_2 (in nibbles)
// stack: len_common, key_common, len_1, key_1, len_2, key_2
%endmacro
// Computes state_key = Keccak256(addr). Clobbers @SEGMENT_KERNEL_GENERAL.
%macro addr_to_state_key
%keccak256_word(20)
%endmacro
// Given a storage slot (a 256-bit integer), computes storage_key = Keccak256(slot).
// Clobbers @SEGMENT_KERNEL_GENERAL.
%macro slot_to_storage_key
%keccak256_word(32)
%endmacro

View File

@ -0,0 +1,14 @@
// Computes Keccak256(input_word). Clobbers @SEGMENT_KERNEL_GENERAL.
//
// Pre stack: input_word
// Post stack: hash
%macro keccak256_word(num_bytes)
// Since KECCAK_GENERAL takes its input from memory, we will first write
// input_word's bytes to @SEGMENT_KERNEL_GENERAL[0..$num_bytes].
%stack (word) -> (0, @SEGMENT_KERNEL_GENERAL, 0, word, $num_bytes, %%after_mstore)
%jump(mstore_unpacking)
%%after_mstore:
// stack: offset
%stack (offset) -> (0, @SEGMENT_KERNEL_GENERAL, 0, $num_bytes) // context, segment, offset, len
KECCAK_GENERAL
%endmacro

View File

@ -83,7 +83,7 @@ impl Macro {
self.params
.iter()
.position(|p| p == param)
.unwrap_or_else(|| panic!("No such param: {} {:?}", param, &self.params))
.unwrap_or_else(|| panic!("No such param: {param} {:?}", &self.params))
}
}
@ -140,7 +140,7 @@ fn find_macros(files: &[File]) -> HashMap<MacroSignature, Macro> {
items: items.clone(),
};
let old = macros.insert(signature.clone(), macro_);
assert!(old.is_none(), "Duplicate macro signature: {:?}", signature);
assert!(old.is_none(), "Duplicate macro signature: {signature:?}");
}
}
}
@ -186,9 +186,9 @@ fn expand_macro_call(
};
let macro_ = macros
.get(&signature)
.unwrap_or_else(|| panic!("No such macro: {:?}", signature));
.unwrap_or_else(|| panic!("No such macro: {signature:?}"));
let get_actual_label = |macro_label| format!("@{}.{}", macro_counter, macro_label);
let get_actual_label = |macro_label| format!("@{macro_counter}.{macro_label}");
let get_arg = |var| {
let param_index = macro_.get_param_index(var);
@ -242,7 +242,7 @@ fn inline_constants(body: Vec<Item>, constants: &HashMap<String, U256>) -> Vec<I
let resolve_const = |c| {
*constants
.get(&c)
.unwrap_or_else(|| panic!("No such constant: {}", c))
.unwrap_or_else(|| panic!("No such constant: {c}"))
};
body.into_iter()
@ -283,15 +283,15 @@ fn find_labels(
| Item::Repeat(_, _)
| Item::StackManipulation(_, _)
| Item::MacroLabelDeclaration(_) => {
panic!("Item should have been expanded already: {:?}", item);
panic!("Item should have been expanded already: {item:?}");
}
Item::GlobalLabelDeclaration(label) => {
let old = global_labels.insert(label.clone(), *offset);
assert!(old.is_none(), "Duplicate global label: {}", label);
assert!(old.is_none(), "Duplicate global label: {label}");
}
Item::LocalLabelDeclaration(label) => {
let old = local_labels.insert(label.clone(), *offset);
assert!(old.is_none(), "Duplicate local label: {}", label);
assert!(old.is_none(), "Duplicate local label: {label}");
}
Item::Push(target) => *offset += 1 + push_target_size(target) as usize,
Item::ProverInput(prover_input_fn) => {
@ -319,7 +319,7 @@ fn assemble_file(
| Item::Repeat(_, _)
| Item::StackManipulation(_, _)
| Item::MacroLabelDeclaration(_) => {
panic!("Item should have been expanded already: {:?}", item);
panic!("Item should have been expanded already: {item:?}");
}
Item::GlobalLabelDeclaration(_) | Item::LocalLabelDeclaration(_) => {
// Nothing to do; we processed labels in the prior phase.
@ -331,7 +331,7 @@ fn assemble_file(
let offset = local_labels
.get(&label)
.or_else(|| global_labels.get(&label))
.unwrap_or_else(|| panic!("No such label: {}", label));
.unwrap_or_else(|| panic!("No such label: {label}"));
// We want the BYTES_PER_OFFSET least significant bytes in BE order.
// It's easiest to rev the first BYTES_PER_OFFSET bytes of the LE encoding.
(0..BYTES_PER_OFFSET)
@ -339,9 +339,9 @@ fn assemble_file(
.map(|i| offset.to_le_bytes()[i as usize])
.collect()
}
PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {}", v),
PushTarget::MacroVar(v) => panic!("Variable not in a macro: {}", v),
PushTarget::Constant(c) => panic!("Constant wasn't inlined: {}", c),
PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {v}"),
PushTarget::MacroVar(v) => panic!("Variable not in a macro: {v}"),
PushTarget::Constant(c) => panic!("Constant wasn't inlined: {c}"),
};
code.push(get_push_opcode(target_bytes.len() as u8));
code.extend(target_bytes);
@ -362,9 +362,9 @@ fn push_target_size(target: &PushTarget) -> u8 {
match target {
PushTarget::Literal(n) => u256_to_trimmed_be_bytes(n).len() as u8,
PushTarget::Label(_) => BYTES_PER_OFFSET,
PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {}", v),
PushTarget::MacroVar(v) => panic!("Variable not in a macro: {}", v),
PushTarget::Constant(c) => panic!("Constant wasn't inlined: {}", c),
PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {v}"),
PushTarget::MacroVar(v) => panic!("Variable not in a macro: {v}"),
PushTarget::Constant(c) => panic!("Constant wasn't inlined: {c}"),
}
}

View File

@ -23,10 +23,12 @@ pub(crate) enum ContextMetadata {
/// Pointer to the initial version of the state trie, at the creation of this context. Used when
/// we need to revert a context.
StateTrieCheckpointPointer = 9,
/// Size of the active main memory.
MSize = 10,
}
impl ContextMetadata {
pub(crate) const COUNT: usize = 10;
pub(crate) const COUNT: usize = 11;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -40,6 +42,7 @@ impl ContextMetadata {
Self::CallValue,
Self::Static,
Self::StateTrieCheckpointPointer,
Self::MSize,
]
}
@ -56,6 +59,7 @@ impl ContextMetadata {
ContextMetadata::CallValue => "CTX_METADATA_CALL_VALUE",
ContextMetadata::Static => "CTX_METADATA_STATIC",
ContextMetadata::StateTrieCheckpointPointer => "CTX_METADATA_STATE_TRIE_CHECKPOINT_PTR",
ContextMetadata::MSize => "CTX_METADATA_MSIZE",
}
}
}

View File

@ -21,7 +21,7 @@ fn cost_estimate_item(item: &Item) -> u32 {
Push(Label(_)) => cost_estimate_push(BYTES_PER_OFFSET as usize),
ProverInput(_) => 1,
StandardOp(op) => cost_estimate_standard_op(op.as_str()),
_ => panic!("Unexpected item: {:?}", item),
_ => panic!("Unexpected item: {item:?}"),
}
}

View File

@ -9,6 +9,7 @@ use plonky2::field::goldilocks_field::GoldilocksField;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::assembler::Kernel;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField;
use crate::generation::memory::{MemoryContextState, MemorySegmentState};
@ -45,9 +46,7 @@ impl InterpreterMemory {
mem
}
}
impl InterpreterMemory {
fn mload_general(&self, context: usize, segment: Segment, offset: usize) -> U256 {
let value = self.context_memory[context].segments[segment as usize].get(offset);
assert!(
@ -328,6 +327,8 @@ impl<'a> Interpreter<'a> {
if self.debug_offsets.contains(&self.offset) {
println!("At {}, stack={:?}", self.offset_name(), self.stack());
} else if let Some(label) = self.offset_label() {
println!("At {label}");
}
Ok(())
@ -335,12 +336,16 @@ impl<'a> Interpreter<'a> {
/// Get a string representation of the current offset for debugging purposes.
fn offset_name(&self) -> String {
self.offset_label()
.unwrap_or_else(|| self.offset.to_string())
}
fn offset_label(&self) -> Option<String> {
// TODO: Not sure we should use KERNEL? Interpreter is more general in other places.
let label = KERNEL
KERNEL
.global_labels
.iter()
.find_map(|(k, v)| (*v == self.offset).then(|| k.clone()));
label.unwrap_or_else(|| self.offset.to_string())
.find_map(|(k, v)| (*v == self.offset).then(|| k.clone()))
}
fn run_stop(&mut self) {
@ -511,6 +516,8 @@ impl<'a> Interpreter<'a> {
fn run_keccak_general(&mut self) {
let context = self.pop().as_usize();
let segment = Segment::all()[self.pop().as_usize()];
// Not strictly needed but here to avoid surprises with MSIZE.
assert_ne!(segment, Segment::MainMemory, "Call KECCAK256 instead.");
let offset = self.pop().as_usize();
let size = self.pop().as_usize();
let bytes = (offset..offset + size)
@ -590,11 +597,10 @@ impl<'a> Interpreter<'a> {
}
fn run_msize(&mut self) {
let num_bytes = self.memory.context_memory[self.context].segments
[Segment::MainMemory as usize]
.content
.len();
self.push(U256::from(num_bytes));
self.push(
self.memory.context_memory[self.context].segments[Segment::ContextMetadata as usize]
.get(ContextMetadata::MSize as usize),
)
}
fn run_jumpdest(&mut self) {

View File

@ -134,6 +134,6 @@ pub(crate) fn get_opcode(mnemonic: &str) -> u8 {
"REVERT" => 0xfd,
"INVALID" => 0xfe,
"SELFDESTRUCT" => 0xff,
_ => panic!("Unrecognized mnemonic {}", mnemonic),
_ => panic!("Unrecognized mnemonic {mnemonic}"),
}
}

View File

@ -35,7 +35,7 @@ fn expand(names: Vec<StackPlaceholder>, replacements: Vec<StackReplacement>) ->
stack_blocks.insert(name.clone(), n);
(0..n)
.map(|i| {
let literal_name = format!("@{}.{}", name, i);
let literal_name = format!("@{name}.{i}");
StackItem::NamedItem(literal_name)
})
.collect_vec()
@ -52,7 +52,7 @@ fn expand(names: Vec<StackPlaceholder>, replacements: Vec<StackReplacement>) ->
let n = *stack_blocks.get(&name).unwrap();
(0..n)
.map(|i| {
let literal_name = format!("@{}.{}", name, i);
let literal_name = format!("@{name}.{i}");
StackItem::NamedItem(literal_name)
})
.collect_vec()
@ -64,7 +64,7 @@ fn expand(names: Vec<StackPlaceholder>, replacements: Vec<StackReplacement>) ->
StackReplacement::MacroLabel(_)
| StackReplacement::MacroVar(_)
| StackReplacement::Constant(_) => {
panic!("Should have been expanded already: {:?}", item)
panic!("Should have been expanded already: {item:?}")
}
})
.collect_vec();
@ -157,7 +157,7 @@ fn shortest_path(
}
}
panic!("No path found from {:?} to {:?}", src, dst)
panic!("No path found from {src:?} to {dst:?}")
}
/// A node in the priority queue used by Dijkstra's algorithm.
@ -279,7 +279,7 @@ impl StackOp {
PushTarget::MacroLabel(_)
| PushTarget::MacroVar(_)
| PushTarget::Constant(_) => {
panic!("Target should have been expanded already: {:?}", target)
panic!("Target should have been expanded already: {target:?}")
}
};
// This is just a rough estimate; we can update it after implementing PUSH.
@ -326,8 +326,8 @@ impl StackOp {
match self {
StackOp::Push(target) => Item::Push(target),
Pop => Item::StandardOp("POP".into()),
StackOp::Dup(n) => Item::StandardOp(format!("DUP{}", n)),
StackOp::Swap(n) => Item::StandardOp(format!("SWAP{}", n)),
StackOp::Dup(n) => Item::StandardOp(format!("DUP{n}")),
StackOp::Swap(n) => Item::StandardOp(format!("SWAP{n}")),
}
}
}

View File

@ -150,7 +150,7 @@ mod bn {
assert_eq!(stack, vec![U256::MAX, U256::MAX]);
// Multiple calls
let ec_mul_hex = format!("0x{:x}", ec_mul);
let ec_mul_hex = format!("0x{ec_mul:x}");
let initial_stack = u256ify([
"0xdeadbeef",
s,
@ -288,7 +288,7 @@ mod secp {
assert_eq!(stack, u256ify([identity.1, identity.0])?);
// Multiple calls
let ec_mul_hex = format!("0x{:x}", ec_mul);
let ec_mul_hex = format!("0x{ec_mul:x}");
let initial_stack = u256ify([
"0xdeadbeef",
s,

View File

@ -6,18 +6,20 @@ use super::nibbles;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::mpt::{test_account_1_rlp, test_account_2};
use crate::cpu::kernel::tests::mpt::{
nibbles_64, nibbles_count, test_account_1_rlp, test_account_2,
};
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::generation::TrieInputs;
#[test]
fn mpt_insert_empty() -> Result<()> {
test_state_trie(Default::default(), nibbles(0xABC), test_account_2())
test_state_trie(Default::default(), nibbles_64(0xABC), test_account_2())
}
#[test]
fn mpt_insert_leaf_identical_keys() -> Result<()> {
let key = nibbles(0xABC);
let key = nibbles_64(0xABC);
let state_trie = PartialTrie::Leaf {
nibbles: key,
value: test_account_1_rlp(),
@ -28,37 +30,39 @@ fn mpt_insert_leaf_identical_keys() -> Result<()> {
#[test]
fn mpt_insert_leaf_nonoverlapping_keys() -> Result<()> {
let state_trie = PartialTrie::Leaf {
nibbles: nibbles(0xABC),
nibbles: nibbles_64(0xABC),
value: test_account_1_rlp(),
};
test_state_trie(state_trie, nibbles(0x123), test_account_2())
test_state_trie(state_trie, nibbles_64(0x123), test_account_2())
}
#[test]
fn mpt_insert_leaf_overlapping_keys() -> Result<()> {
let state_trie = PartialTrie::Leaf {
nibbles: nibbles(0xABC),
nibbles: nibbles_64(0xABC),
value: test_account_1_rlp(),
};
test_state_trie(state_trie, nibbles(0xADE), test_account_2())
test_state_trie(state_trie, nibbles_64(0xADE), test_account_2())
}
#[test]
#[ignore] // TODO: Not valid for state trie, all keys have same len.
fn mpt_insert_leaf_insert_key_extends_leaf_key() -> Result<()> {
let state_trie = PartialTrie::Leaf {
nibbles: nibbles(0xABC),
value: test_account_1_rlp(),
};
test_state_trie(state_trie, nibbles(0xABCDE), test_account_2())
test_state_trie(state_trie, nibbles_64(0xABCDE), test_account_2())
}
#[test]
#[ignore] // TODO: Not valid for state trie, all keys have same len.
fn mpt_insert_leaf_leaf_key_extends_insert_key() -> Result<()> {
let state_trie = PartialTrie::Leaf {
nibbles: nibbles(0xABCDE),
value: test_account_1_rlp(),
};
test_state_trie(state_trie, nibbles(0xABC), test_account_2())
test_state_trie(state_trie, nibbles_64(0xABC), test_account_2())
}
#[test]
@ -69,7 +73,7 @@ fn mpt_insert_branch_replacing_empty_child() -> Result<()> {
value: vec![],
};
test_state_trie(state_trie, nibbles(0xABC), test_account_2())
test_state_trie(state_trie, nibbles_64(0xABC), test_account_2())
}
#[test]
@ -92,7 +96,7 @@ fn mpt_insert_extension_nonoverlapping_keys() -> Result<()> {
}
.into(),
};
test_state_trie(state_trie, nibbles(0x12345), test_account_2())
test_state_trie(state_trie, nibbles_64(0x12345), test_account_2())
}
#[test]
@ -115,29 +119,33 @@ fn mpt_insert_extension_insert_key_extends_node_key() -> Result<()> {
}
.into(),
};
test_state_trie(state_trie, nibbles(0xABCDEF), test_account_2())
test_state_trie(state_trie, nibbles_64(0xABCDEF), test_account_2())
}
#[test]
fn mpt_insert_branch_to_leaf_same_key() -> Result<()> {
let leaf = PartialTrie::Leaf {
nibbles: nibbles(0xBCD),
nibbles: nibbles_count(0xBCD, 63),
value: test_account_1_rlp(),
}
.into();
let mut children = std::array::from_fn(|_| PartialTrie::Empty.into());
children[0xA] = leaf;
children[0] = leaf;
let state_trie = PartialTrie::Branch {
children,
value: vec![],
};
test_state_trie(state_trie, nibbles(0xABCD), test_account_2())
test_state_trie(state_trie, nibbles_64(0xABCD), test_account_2())
}
/// Note: The account's storage_root is ignored, as we can't insert a new storage_root without the
/// accompanying trie data. An empty trie's storage_root is used instead.
fn test_state_trie(state_trie: PartialTrie, k: Nibbles, mut account: AccountRlp) -> Result<()> {
assert_eq!(k.count, 64);
// Ignore any storage_root; see documentation note.
account.storage_root = PartialTrie::Empty.calc_hash();
let trie_inputs = TrieInputs {
@ -177,7 +185,6 @@ fn test_state_trie(state_trie: PartialTrie, k: Nibbles, mut account: AccountRlp)
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(value_ptr.into()); // value_ptr
interpreter.push(k.packed); // key
interpreter.push(k.count.into()); // num_nibbles
interpreter.run()?;
assert_eq!(

View File

@ -13,13 +13,22 @@ mod read;
/// Note that this preserves all nibbles (eg. `0x123` is not interpreted as `0x0123`).
pub(crate) fn nibbles<T: Into<U256>>(v: T) -> Nibbles {
let packed = v.into();
Nibbles {
count: Nibbles::get_num_nibbles_in_key(&packed),
packed,
}
}
pub(crate) fn nibbles_64<T: Into<U256>>(v: T) -> Nibbles {
let packed = v.into();
Nibbles { count: 64, packed }
}
pub(crate) fn nibbles_count<T: Into<U256>>(v: T, count: usize) -> Nibbles {
let packed = v.into();
Nibbles { count, packed }
}
pub(crate) fn test_account_1() -> AccountRlp {
AccountRlp {
nonce: U256::from(1111),

View File

@ -68,11 +68,10 @@ pub(crate) fn mpt_prover_inputs<F>(
PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
PartialTrie::Branch { children, value } => {
if value.is_empty() {
// There's no value, so value_len = 0.
prover_inputs.push(U256::zero());
prover_inputs.push(U256::zero()); // value_present = 0
} else {
let parsed_value = parse_value(value);
prover_inputs.push(parsed_value.len().into());
prover_inputs.push(U256::one()); // value_present = 1
prover_inputs.extend(parsed_value);
}
for child in children {
@ -107,8 +106,7 @@ pub(crate) fn mpt_prover_inputs_state_trie(
PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
PartialTrie::Branch { children, value } => {
assert!(value.is_empty(), "State trie should not have branch values");
// There's no value, so value_len = 0.
prover_inputs.push(U256::zero());
prover_inputs.push(U256::zero()); // value_present = 0
for (i, child) in children.iter().enumerate() {
let extended_key = key.merge(&Nibbles {

View File

@ -235,7 +235,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
.zip(verifier_data_target)
.enumerate()
{
builder.verify_proof(
builder.verify_proof::<C>(
recursive_proof,
&verifier_data_target,
&verifier_data[i].common,
@ -579,7 +579,7 @@ where
{
let recursive_proofs = std::array::from_fn(|i| {
let verifier_data = &verifier_data[i];
builder.add_virtual_proof_with_pis(&verifier_data.common)
builder.add_virtual_proof_with_pis::<C>(&verifier_data.common)
});
let verifier_data = std::array::from_fn(|i| {
let verifier_data = &verifier_data[i];

View File

@ -45,7 +45,7 @@ impl<F: OEF<D>, const D: usize> Display for ExtensionAlgebra<F, D> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "({})", self.0[0])?;
for i in 1..D {
write!(f, " + ({})*b^{}", self.0[i], i)?;
write!(f, " + ({})*b^{i}", self.0[i])?;
}
Ok(())
}

View File

@ -73,7 +73,7 @@ impl<F: RichField + Extendable<D>, const D: usize> InsertionGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for InsertionGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -24,7 +24,7 @@ pub(crate) fn bench_keccak<F: RichField>(c: &mut Criterion) {
pub(crate) fn bench_poseidon<F: Poseidon>(c: &mut Criterion) {
c.bench_function(
&format!("poseidon<{}, {}>", type_name::<F>(), SPONGE_WIDTH),
&format!("poseidon<{}, {SPONGE_WIDTH}>", type_name::<F>()),
|b| {
b.iter_batched(
|| F::rand_arr::<SPONGE_WIDTH>(),

View File

@ -32,7 +32,7 @@ use structopt::StructOpt;
type ProofTuple<F, C, const D: usize> = (
ProofWithPublicInputs<F, C, D>,
VerifierOnlyCircuitData<C, D>,
CommonCircuitData<F, C, D>,
CommonCircuitData<F, D>,
);
#[derive(Clone, StructOpt, Debug)]
@ -112,7 +112,7 @@ where
let (inner_proof, inner_vd, inner_cd) = inner;
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
let mut pw = PartialWitness::new();
let pt = builder.add_virtual_proof_with_pis(inner_cd);
let pt = builder.add_virtual_proof_with_pis::<InnerC>(inner_cd);
pw.set_proof_with_pis_target(&pt, inner_proof);
let inner_data = VerifierCircuitTarget {
@ -121,7 +121,7 @@ where
};
pw.set_verifier_data_target(&inner_data, inner_vd);
builder.verify_proof(pt, &inner_data, inner_cd);
builder.verify_proof::<InnerC>(pt, &inner_data, inner_cd);
builder.print_gate_counts(0);
if let Some(min_degree_bits) = min_degree_bits {
@ -150,7 +150,7 @@ where
fn test_serialization<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
proof: &ProofWithPublicInputs<F, C, D>,
vd: &VerifierOnlyCircuitData<C, D>,
cd: &CommonCircuitData<F, C, D>,
cd: &CommonCircuitData<F, D>,
) -> Result<()>
where
[(); C::Hasher::HASH_SIZE]:,

View File

@ -31,7 +31,7 @@ impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
let x_squared = witness.get_target(self.x_squared);
let x = x_squared.sqrt().unwrap();
println!("Square root: {}", x);
println!("Square root: {x}");
out_buffer.set_target(self.x, x);
}
@ -75,7 +75,7 @@ fn main() -> Result<()> {
let proof = data.prove(pw.clone())?;
let x_squared_actual = proof.public_inputs[0];
println!("Field element (square): {}", x_squared_actual);
println!("Field element (square): {x_squared_actual}");
data.verify(proof)
}

View File

@ -21,7 +21,7 @@ pub(crate) fn main() {
// Print the constants in the format we prefer in our code.
for chunk in constants.chunks(4) {
for (i, c) in chunk.iter().enumerate() {
print!("{:#018x},", c);
print!("{c:#018x},");
if i != chunk.len() - 1 {
print!(" ");
}

View File

@ -54,7 +54,7 @@ impl FriConfig {
/// FRI parameters, including generated parameters which are specific to an instance size, in
/// contrast to `FriConfig` which is user-specified and independent of instance size.
#[derive(Debug, Eq, PartialEq)]
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct FriParams {
/// User-specified FRI configuration.
pub config: FriConfig,

View File

@ -176,7 +176,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
with_context!(
self,
level,
&format!("verify one (of {}) query rounds", num_queries),
&format!("verify one (of {num_queries}) query rounds"),
self.fri_verifier_query_round::<C>(
instance,
challenges,
@ -207,7 +207,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
{
with_context!(
self,
&format!("verify {}'th initial Merkle proof", i),
&format!("verify {i}'th initial Merkle proof"),
self.verify_merkle_proof_to_cap_with_cap_index::<H>(
evals.clone(),
x_index_bits,

View File

@ -345,7 +345,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
pub fn is_equal(&mut self, x: Target, y: Target) -> BoolTarget {
let zero = self.zero();
let equal = self.add_virtual_bool_target();
let equal = self.add_virtual_bool_target_unsafe();
let not_equal = self.not(equal);
let inv = self.add_virtual_target();
self.add_simple_generator(EqualityGenerator { x, y, equal, inv });

View File

@ -53,7 +53,7 @@ impl ArithmeticGate {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticGate {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -51,7 +51,7 @@ impl<const D: usize> ArithmeticExtensionGate<D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ArithmeticExtensionGate<D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -49,7 +49,7 @@ impl<const B: usize> BaseSumGate<B> {
impl<F: RichField + Extendable<D>, const D: usize, const B: usize> Gate<F, D> for BaseSumGate<B> {
fn id(&self) -> String {
format!("{:?} + Base: {}", self, B)
format!("{self:?} + Base: {B}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -33,7 +33,7 @@ impl ConstantGate {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ConstantGate {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -70,7 +70,7 @@ impl<F: RichField + Extendable<D>, const D: usize> ExponentiationGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ExponentiationGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -87,7 +87,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D>
for HighDegreeInterpolationGate<F, D>
{
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -80,7 +80,7 @@ impl<F: RichField + Extendable<D>, const D: usize> LowDegreeInterpolationGate<F,
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for LowDegreeInterpolationGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -48,7 +48,7 @@ impl<const D: usize> MulExtensionGate<D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for MulExtensionGate<D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -98,7 +98,7 @@ impl<F: RichField + Extendable<D>, const D: usize> PoseidonGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for PoseidonGate<F, D> {
fn id(&self) -> String {
format!("{:?}<WIDTH={}>", self, SPONGE_WIDTH)
format!("{self:?}<WIDTH={SPONGE_WIDTH}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -117,7 +117,7 @@ impl<F: RichField + Extendable<D> + Poseidon, const D: usize> PoseidonMdsGate<F,
impl<F: RichField + Extendable<D> + Poseidon, const D: usize> Gate<F, D> for PoseidonMdsGate<F, D> {
fn id(&self) -> String {
format!("{:?}<WIDTH={}>", self, SPONGE_WIDTH)
format!("{self:?}<WIDTH={SPONGE_WIDTH}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -115,7 +115,7 @@ impl<F: RichField + Extendable<D>, const D: usize> RandomAccessGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for RandomAccessGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -55,7 +55,7 @@ impl<const D: usize> ReducingGate<D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingGate<D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -58,7 +58,7 @@ impl<const D: usize> ReducingExtensionGate<D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ReducingExtensionGate<D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -22,7 +22,7 @@ pub(crate) fn generate_partial_witness<
>(
inputs: PartialWitness<F>,
prover_data: &'a ProverOnlyCircuitData<F, C, D>,
common_data: &'a CommonCircuitData<F, C, D>,
common_data: &'a CommonCircuitData<F, D>,
) -> PartitionWitness<'a, F> {
let config = &common_data.config;
let generators = &prover_data.generators;

View File

@ -18,4 +18,5 @@ pub mod gates;
pub mod hash;
pub mod iop;
pub mod plonk;
pub mod recursion;
pub mod util;

View File

@ -34,7 +34,7 @@ use crate::iop::target::{BoolTarget, Target};
use crate::iop::wire::Wire;
use crate::plonk::circuit_data::{
CircuitConfig, CircuitData, CommonCircuitData, ProverCircuitData, ProverOnlyCircuitData,
VerifierCircuitData, VerifierOnlyCircuitData,
VerifierCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,
};
use crate::plonk::config::{GenericConfig, Hasher};
use crate::plonk::copy_constraint::CopyConstraint;
@ -83,6 +83,15 @@ pub struct CircuitBuilder<F: RichField + Extendable<D>, const D: usize> {
/// List of constant generators used to fill the constant wires.
constant_generators: Vec<ConstantGenerator<F>>,
/// Optional common data. When it is `Some(goal_data)`, the `build` function panics if the resulting
/// common data doesn't equal `goal_data`.
/// This is used in cyclic recursion.
pub(crate) goal_common_data: Option<CommonCircuitData<F, D>>,
/// Optional verifier data that is registered as public inputs.
/// This is used in cyclic recursion to hold the circuit's own verifier key.
pub(crate) verifier_data_public_input: Option<VerifierCircuitTarget>,
}
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
@ -102,6 +111,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
arithmetic_results: HashMap::new(),
current_slots: HashMap::new(),
constant_generators: Vec::new(),
goal_common_data: None,
verifier_data_public_input: None,
};
builder.check_config();
builder
@ -144,6 +155,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
targets.iter().for_each(|&t| self.register_public_input(t));
}
pub fn num_public_inputs(&self) -> usize {
self.public_inputs.len()
}
/// Adds a new "virtual" target. This is not an actual wire in the witness, but just a target
/// that help facilitate witness generation. In particular, a generator can assign a values to a
/// virtual target, which can then be copied to other (virtual or concrete) targets. When we
@ -198,8 +213,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
PolynomialCoeffsExtTarget(coeffs)
}
// TODO: Unsafe
pub fn add_virtual_bool_target(&mut self) -> BoolTarget {
pub fn add_virtual_bool_target_unsafe(&mut self) -> BoolTarget {
BoolTarget::new_unsafe(self.add_virtual_target())
}
@ -215,6 +229,21 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
self.register_public_input(t);
t
}
/// Add a virtual verifier data, register it as a public input and set it to `self.verifier_data_public_input`.
/// WARNING: Do not register any public input after calling this! TODO: relax this
pub(crate) fn add_verifier_data_public_input(&mut self) {
let verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self.add_virtual_cap(self.config.fri_config.cap_height),
circuit_digest: self.add_virtual_hash(),
};
// The verifier data are public inputs.
self.register_public_inputs(&verifier_data.circuit_digest.elements);
for i in 0..self.config.fri_config.num_cap_elements() {
self.register_public_inputs(&verifier_data.constants_sigmas_cap.0[i].elements);
}
self.verifier_data_public_input = Some(verifier_data);
}
/// Adds a gate to the circuit, and returns its index.
pub fn add_gate<G: Gate<F, D>>(&mut self, gate_type: G, mut constants: Vec<F>) -> usize {
@ -827,6 +856,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
k_is,
num_partial_products,
};
if let Some(goal_data) = self.goal_common_data {
assert_eq!(goal_data, common);
}
let prover_only = ProverOnlyCircuitData {
generators: self.generators,

View File

@ -106,7 +106,7 @@ impl CircuitConfig {
pub struct CircuitData<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
pub prover_only: ProverOnlyCircuitData<F, C, D>,
pub verifier_only: VerifierOnlyCircuitData<C, D>,
pub common: CommonCircuitData<F, C, D>,
pub common: CommonCircuitData<F, D>,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
@ -196,7 +196,7 @@ pub struct ProverCircuitData<
const D: usize,
> {
pub prover_only: ProverOnlyCircuitData<F, C, D>,
pub common: CommonCircuitData<F, C, D>,
pub common: CommonCircuitData<F, D>,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
@ -223,7 +223,7 @@ pub struct VerifierCircuitData<
const D: usize,
> {
pub verifier_only: VerifierOnlyCircuitData<C, D>,
pub common: CommonCircuitData<F, C, D>,
pub common: CommonCircuitData<F, D>,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
@ -276,7 +276,7 @@ pub struct ProverOnlyCircuitData<
}
/// Circuit data required by the verifier, but not the prover.
#[derive(Debug)]
#[derive(Debug, Eq, PartialEq)]
pub struct VerifierOnlyCircuitData<C: GenericConfig<D>, const D: usize> {
/// A commitment to each constant polynomial and each permutation polynomial.
pub constants_sigmas_cap: MerkleCap<C::F, C::Hasher>,
@ -286,18 +286,14 @@ pub struct VerifierOnlyCircuitData<C: GenericConfig<D>, const D: usize> {
}
/// Circuit data required by both the prover and the verifier.
#[derive(Debug, Eq, PartialEq)]
pub struct CommonCircuitData<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
> {
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct CommonCircuitData<F: RichField + Extendable<D>, const D: usize> {
pub config: CircuitConfig,
pub(crate) fri_params: FriParams,
/// The types of gates used in this circuit, along with their prefixes.
pub(crate) gates: Vec<GateRef<C::F, D>>,
pub(crate) gates: Vec<GateRef<F, D>>,
/// Information on the circuit's selector polynomials.
pub(crate) selectors_info: SelectorsInfo,
@ -314,15 +310,13 @@ pub struct CommonCircuitData<
pub(crate) num_public_inputs: usize,
/// The `{k_i}` valued used in `S_ID_i` in Plonk's permutation argument.
pub(crate) k_is: Vec<C::F>,
pub(crate) k_is: Vec<F>,
/// The number of partial products needed to compute the `Z` polynomials.
pub(crate) num_partial_products: usize,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
CommonCircuitData<F, C, D>
{
impl<F: RichField + Extendable<D>, const D: usize> CommonCircuitData<F, D> {
pub const fn degree_bits(&self) -> usize {
self.fri_params.degree_bits
}
@ -494,6 +488,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
/// is intentionally missing certain fields, such as `CircuitConfig`, because we support only a
/// limited form of dynamic inner circuits. We can't practically make things like the wire count
/// dynamic, at least not without setting a maximum wire count and paying for the worst case.
#[derive(Clone)]
pub struct VerifierCircuitTarget {
/// A commitment to each constant polynomial and each permutation polynomial.
pub constants_sigmas_cap: MerkleCapTarget,

View File

@ -30,7 +30,7 @@ fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, cons
final_poly: &PolynomialCoeffs<F::Extension>,
pow_witness: F,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofChallenges<F, D>> {
let config = &common_data.config;
let num_challenges = config.num_challenges;
@ -74,7 +74,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub(crate) fn fri_query_indices(
&self,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<Vec<usize>> {
Ok(self
.get_challenges(self.get_public_inputs_hash(), circuit_digest, common_data)?
@ -87,7 +87,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
&self,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofChallenges<F, D>> {
let Proof {
wires_cap,
@ -103,7 +103,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
},
} = &self.proof;
get_challenges(
get_challenges::<F, C, D>(
public_inputs_hash,
wires_cap,
plonk_zs_partial_products_cap,
@ -126,7 +126,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
&self,
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofChallenges<F, D>> {
let CompressedProof {
wires_cap,
@ -142,7 +142,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
},
} = &self.proof;
get_challenges(
get_challenges::<F, C, D>(
public_inputs_hash,
wires_cap,
plonk_zs_partial_products_cap,
@ -160,7 +160,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub(crate) fn get_inferred_elements(
&self,
challenges: &ProofChallenges<F, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> FriInferredElements<F, D> {
let ProofChallenges {
plonk_zeta,
@ -244,7 +244,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
final_poly: &PolynomialCoeffsExtTarget<D>,
pow_witness: Target,
inner_circuit_digest: HashOutTarget,
inner_common_data: &CommonCircuitData<F, C, D>,
inner_common_data: &CommonCircuitData<F, D>,
) -> ProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
@ -292,7 +292,7 @@ impl<const D: usize> ProofWithPublicInputsTarget<D> {
builder: &mut CircuitBuilder<F, D>,
public_inputs_hash: HashOutTarget,
inner_circuit_digest: HashOutTarget,
inner_common_data: &CommonCircuitData<F, C, D>,
inner_common_data: &CommonCircuitData<F, D>,
) -> ProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
@ -311,7 +311,7 @@ impl<const D: usize> ProofWithPublicInputsTarget<D> {
},
} = &self.proof;
builder.get_challenges(
builder.get_challenges::<C>(
public_inputs_hash,
wires_cap,
plonk_zs_partial_products_cap,

View File

@ -1,6 +1,5 @@
pub mod circuit_builder;
pub mod circuit_data;
pub mod conditional_recursive_verifier;
pub mod config;
pub(crate) mod copy_constraint;
mod get_challenges;
@ -8,7 +7,6 @@ pub(crate) mod permutation_argument;
pub mod plonk_common;
pub mod proof;
pub mod prover;
pub mod recursive_verifier;
mod validate_shape;
pub(crate) mod vanishing_poly;
pub mod vars;

View File

@ -82,7 +82,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub fn compress(
self,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<CompressedProofWithPublicInputs<F, C, D>> {
let indices = self.fri_query_indices(circuit_digest, common_data)?;
let compressed_proof = self.proof.compress(&indices, &common_data.fri_params);
@ -106,7 +106,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub fn from_bytes(
bytes: Vec<u8>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<Self> {
let mut buffer = Buffer::new(bytes);
let proof = buffer.read_proof_with_public_inputs(common_data)?;
@ -178,7 +178,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub fn decompress(
self,
circuit_digest: &<<C as GenericConfig<D>>::Hasher as Hasher<C::F>>::Hash,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<ProofWithPublicInputs<F, C, D>>
where
[(); C::Hasher::HASH_SIZE]:,
@ -198,7 +198,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub(crate) fn verify(
self,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<()>
where
[(); C::Hasher::HASH_SIZE]:,
@ -240,7 +240,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
pub fn from_bytes(
bytes: Vec<u8>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<Self> {
let mut buffer = Buffer::new(bytes);
let proof = buffer.read_compressed_proof_with_public_inputs(common_data)?;
@ -303,7 +303,7 @@ impl<F: RichField + Extendable<D>, const D: usize> OpeningSet<F, D> {
wires_commitment: &PolynomialBatch<F, C, D>,
zs_partial_products_commitment: &PolynomialBatch<F, C, D>,
quotient_polys_commitment: &PolynomialBatch<F, C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Self {
let eval_commitment = |z: F::Extension, c: &PolynomialBatch<F, C, D>| {
c.polynomials

View File

@ -28,7 +28,7 @@ use crate::util::transpose;
pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
prover_data: &ProverOnlyCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
inputs: PartialWitness<F>,
timing: &mut TimingTree,
) -> Result<ProofWithPublicInputs<F, C, D>>
@ -233,7 +233,7 @@ fn all_wires_permutation_partial_products<
betas: &[F],
gammas: &[F],
prover_data: &ProverOnlyCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Vec<Vec<PolynomialValues<F>>> {
(0..common_data.config.num_challenges)
.map(|i| {
@ -260,7 +260,7 @@ fn wires_permutation_partial_products_and_zs<
beta: F,
gamma: F,
prover_data: &ProverOnlyCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Vec<PolynomialValues<F>> {
let degree = common_data.quotient_degree_factor;
let subgroup = &prover_data.subgroup;
@ -318,7 +318,7 @@ fn compute_quotient_polys<
C: GenericConfig<D, F = F>,
const D: usize,
>(
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
prover_data: &'a ProverOnlyCircuitData<F, C, D>,
public_inputs_hash: &<<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
wires_commitment: &'a PolynomialBatch<F, C, D>,
@ -424,7 +424,7 @@ fn compute_quotient_polys<
public_inputs_hash,
);
let mut quotient_values_batch = eval_vanishing_poly_base_batch(
let mut quotient_values_batch = eval_vanishing_poly_base_batch::<F, C, D>(
common_data,
&indices_batch,
&shifted_xs_batch,

View File

@ -8,7 +8,7 @@ use crate::plonk::proof::{OpeningSet, Proof, ProofWithPublicInputs};
pub(crate) fn validate_proof_with_pis_shape<F, C, const D: usize>(
proof_with_pis: &ProofWithPublicInputs<F, C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<()>
where
F: RichField + Extendable<D>,
@ -32,7 +32,7 @@ where
fn validate_proof_shape<F, C, const D: usize>(
proof: &Proof<F, C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<()>
where
F: RichField + Extendable<D>,

View File

@ -25,7 +25,7 @@ pub(crate) fn eval_vanishing_poly<
C: GenericConfig<D, F = F>,
const D: usize,
>(
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
x: F::Extension,
vars: EvaluationVars<F, D>,
local_zs: &[F::Extension],
@ -39,7 +39,7 @@ pub(crate) fn eval_vanishing_poly<
let max_degree = common_data.quotient_degree_factor;
let num_prods = common_data.num_partial_products;
let constraint_terms = evaluate_gate_constraints(common_data, vars);
let constraint_terms = evaluate_gate_constraints::<F, C, D>(common_data, vars);
// The L_0(x) (Z(x) - 1) vanishing terms.
let mut vanishing_z_1_terms = Vec::new();
@ -100,7 +100,7 @@ pub(crate) fn eval_vanishing_poly_base_batch<
C: GenericConfig<D, F = F>,
const D: usize,
>(
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
indices_batch: &[usize],
xs_batch: &[F],
vars_batch: EvaluationVarsBaseBatch<F>,
@ -126,7 +126,8 @@ pub(crate) fn eval_vanishing_poly_base_batch<
let num_gate_constraints = common_data.num_gate_constraints;
let constraint_terms_batch = evaluate_gate_constraints_base_batch(common_data, vars_batch);
let constraint_terms_batch =
evaluate_gate_constraints_base_batch::<F, C, D>(common_data, vars_batch);
debug_assert!(constraint_terms_batch.len() == n * num_gate_constraints);
let num_challenges = common_data.config.num_challenges;
@ -210,7 +211,7 @@ pub fn evaluate_gate_constraints<
C: GenericConfig<D, F = F>,
const D: usize,
>(
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
vars: EvaluationVars<F, D>,
) -> Vec<F::Extension> {
let mut constraints = vec![F::Extension::ZERO; common_data.num_gate_constraints];
@ -244,7 +245,7 @@ pub fn evaluate_gate_constraints_base_batch<
C: GenericConfig<D, F = F>,
const D: usize,
>(
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
vars_batch: EvaluationVarsBaseBatch<F>,
) -> Vec<F> {
let mut constraints_batch = vec![F::ZERO; common_data.num_gate_constraints * vars_batch.len()];
@ -276,7 +277,7 @@ pub fn evaluate_gate_constraints_circuit<
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
vars: EvaluationTargets<D>,
) -> Vec<ExtensionTarget<D>> {
let mut all_gate_constraints = vec![builder.zero_extension(); common_data.num_gate_constraints];
@ -311,7 +312,7 @@ pub(crate) fn eval_vanishing_poly_circuit<
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
x: ExtensionTarget<D>,
x_pow_deg: ExtensionTarget<D>,
vars: EvaluationTargets<D>,
@ -329,7 +330,7 @@ pub(crate) fn eval_vanishing_poly_circuit<
let constraint_terms = with_context!(
builder,
"evaluate gate constraints",
evaluate_gate_constraints_circuit(builder, common_data, vars,)
evaluate_gate_constraints_circuit::<F, C, D>(builder, common_data, vars,)
);
// The L_0(x) (Z(x) - 1) vanishing terms.

View File

@ -15,7 +15,7 @@ use crate::plonk::vars::EvaluationVars;
pub(crate) fn verify<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
proof_with_pis: ProofWithPublicInputs<F, C, D>,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
[(); C::Hasher::HASH_SIZE]:,
@ -47,7 +47,7 @@ pub(crate) fn verify_with_challenges<
public_inputs_hash: <<C as GenericConfig<D>>::InnerHasher as Hasher<F>>::Hash,
challenges: ProofChallenges<F, D>,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
[(); C::Hasher::HASH_SIZE]:,
@ -65,7 +65,7 @@ where
let partial_products = &proof.openings.partial_products;
// Evaluate the vanishing polynomial at our challenge point, zeta.
let vanishing_polys_zeta = eval_vanishing_poly(
let vanishing_polys_zeta = eval_vanishing_poly::<F, C, D>(
common_data,
challenges.plonk_zeta,
vars,

View File

@ -24,13 +24,12 @@ use crate::plonk::proof::{
use crate::with_context;
/// Generate a proof having a given `CommonCircuitData`.
#[allow(unused)] // TODO: should be used soon.
pub(crate) fn dummy_proof<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<(
ProofWithPublicInputs<F, C, D>,
VerifierOnlyCircuitData<C, D>,
@ -80,7 +79,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
inner_verifier_data0: &VerifierCircuitTarget,
proof_with_pis1: &ProofWithPublicInputsTarget<D>,
inner_verifier_data1: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, C, D>,
inner_common_data: &CommonCircuitData<F, D>,
) where
C::Hasher: AlgebraicHasher<F>,
{
@ -144,7 +143,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
),
};
self.verify_proof(selected_proof, &selected_verifier_data, inner_common_data);
self.verify_proof::<C>(selected_proof, &selected_verifier_data, inner_common_data);
}
/// Conditionally verify a proof with a new generated dummy proof.
@ -153,18 +152,18 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
condition: BoolTarget,
proof_with_pis: &ProofWithPublicInputsTarget<D>,
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, C, D>,
inner_common_data: &CommonCircuitData<F, D>,
) -> (ProofWithPublicInputsTarget<D>, VerifierCircuitTarget)
where
C::Hasher: AlgebraicHasher<F>,
{
let dummy_proof = self.add_virtual_proof_with_pis(inner_common_data);
let dummy_proof = self.add_virtual_proof_with_pis::<C>(inner_common_data);
let dummy_verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self
.add_virtual_cap(inner_common_data.config.fri_config.cap_height),
circuit_digest: self.add_virtual_hash(),
};
self.conditionally_verify_proof(
self.conditionally_verify_proof::<C>(
condition,
proof_with_pis,
inner_verifier_data,
@ -183,7 +182,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
.collect()
}
fn select_hash(
pub(crate) fn select_hash(
&mut self,
b: BoolTarget,
h0: HashOutTarget,
@ -406,10 +405,10 @@ mod tests {
// Conditionally verify the two proofs.
let mut builder = CircuitBuilder::<F, D>::new(config);
let mut pw = PartialWitness::new();
let pt = builder.add_virtual_proof_with_pis(&data.common);
let pt = builder.add_virtual_proof_with_pis::<C>(&data.common);
pw.set_proof_with_pis_target(&pt, &proof);
let dummy_pt = builder.add_virtual_proof_with_pis(&data.common);
pw.set_proof_with_pis_target(&dummy_pt, &dummy_proof);
let dummy_pt = builder.add_virtual_proof_with_pis::<C>(&data.common);
pw.set_proof_with_pis_target::<C, D>(&dummy_pt, &dummy_proof);
let inner_data = VerifierCircuitTarget {
constants_sigmas_cap: builder.add_virtual_cap(data.common.config.fri_config.cap_height),
circuit_digest: builder.add_virtual_hash(),
@ -421,7 +420,7 @@ mod tests {
};
pw.set_verifier_data_target(&dummy_inner_data, &dummy_data);
let b = builder.constant_bool(F::rand().0 % 2 == 0);
builder.conditionally_verify_proof(
builder.conditionally_verify_proof::<C>(
b,
&pt,
&inner_data,

View File

@ -0,0 +1,437 @@
#![allow(clippy::int_plus_one)] // Makes more sense for some inequalities below.
use anyhow::{ensure, Result};
use itertools::Itertools;
use plonky2_field::extension::Extendable;
use crate::gates::noop::NoopGate;
use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::target::{BoolTarget, Target};
use crate::iop::witness::{PartialWitness, Witness};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{
CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,
};
use crate::plonk::config::Hasher;
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
use crate::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
use crate::recursion::conditional_recursive_verifier::dummy_proof;
pub struct CyclicRecursionData<
'a,
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
> {
proof: &'a Option<ProofWithPublicInputs<F, C, D>>,
verifier_data: &'a VerifierOnlyCircuitData<C, D>,
common_data: &'a CommonCircuitData<F, D>,
}
pub struct CyclicRecursionTarget<const D: usize> {
pub proof: ProofWithPublicInputsTarget<D>,
pub verifier_data: VerifierCircuitTarget,
pub dummy_proof: ProofWithPublicInputsTarget<D>,
pub dummy_verifier_data: VerifierCircuitTarget,
pub base_case: BoolTarget,
}
impl<C: GenericConfig<D>, const D: usize> VerifierOnlyCircuitData<C, D> {
fn from_slice(slice: &[C::F], common_data: &CommonCircuitData<C::F, D>) -> Result<Self>
where
C::Hasher: AlgebraicHasher<C::F>,
{
// The structure of the public inputs is `[..., circuit_digest, constants_sigmas_cap]`.
let cap_len = common_data.config.fri_config.num_cap_elements();
let len = slice.len();
ensure!(len >= 4 + 4 * cap_len, "Not enough public inputs");
let constants_sigmas_cap = MerkleCap(
(0..cap_len)
.map(|i| HashOut {
elements: std::array::from_fn(|j| slice[len - 4 * (cap_len - i) + j]),
})
.collect(),
);
let circuit_digest =
HashOut::from_partial(&slice[len - 4 - 4 * cap_len..len - 4 * cap_len]);
Ok(Self {
circuit_digest,
constants_sigmas_cap,
})
}
}
impl VerifierCircuitTarget {
fn from_slice<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
slice: &[Target],
common_data: &CommonCircuitData<F, D>,
) -> Result<Self> {
let cap_len = common_data.config.fri_config.num_cap_elements();
let len = slice.len();
ensure!(len >= 4 + 4 * cap_len, "Not enough public inputs");
let constants_sigmas_cap = MerkleCapTarget(
(0..cap_len)
.map(|i| HashOutTarget {
elements: std::array::from_fn(|j| slice[len - 4 * (cap_len - i) + j]),
})
.collect(),
);
let circuit_digest = HashOutTarget {
elements: std::array::from_fn(|i| slice[len - 4 - 4 * cap_len + i]),
};
Ok(Self {
circuit_digest,
constants_sigmas_cap,
})
}
}
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Cyclic recursion gadget.
/// WARNING: Do not register any public input after calling this! TODO: relax this
pub fn cyclic_recursion<C: GenericConfig<D, F = F>>(
&mut self,
// Flag set to true for the base case of the cycle where we verify a dummy proof to bootstrap the cycle. Set to false otherwise.
base_case: BoolTarget,
previous_virtual_public_inputs: &[Target],
common_data: &mut CommonCircuitData<F, D>,
) -> Result<CyclicRecursionTarget<D>>
where
C::Hasher: AlgebraicHasher<F>,
[(); C::Hasher::HASH_SIZE]:,
{
if self.verifier_data_public_input.is_none() {
self.add_verifier_data_public_input();
}
let verifier_data = self.verifier_data_public_input.clone().unwrap();
common_data.num_public_inputs = self.num_public_inputs();
self.goal_common_data = Some(common_data.clone());
let dummy_verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self.add_virtual_cap(self.config.fri_config.cap_height),
circuit_digest: self.add_virtual_hash(),
};
let proof = self.add_virtual_proof_with_pis::<C>(common_data);
let dummy_proof = self.add_virtual_proof_with_pis::<C>(common_data);
let pis = VerifierCircuitTarget::from_slice::<F, C, D>(&proof.public_inputs, common_data)?;
// Connect previous verifier data to current one. This guarantees that every proof in the cycle uses the same verifier data.
self.connect_hashes(pis.circuit_digest, verifier_data.circuit_digest);
for (h0, h1) in pis
.constants_sigmas_cap
.0
.iter()
.zip_eq(&verifier_data.constants_sigmas_cap.0)
{
self.connect_hashes(*h0, *h1);
}
for (x, y) in previous_virtual_public_inputs
.iter()
.zip(&proof.public_inputs)
{
self.connect(*x, *y);
}
// Verify the dummy proof if `base_case` is set to true, otherwise verify the "real" proof.
self.conditionally_verify_proof::<C>(
base_case,
&dummy_proof,
&dummy_verifier_data,
&proof,
&verifier_data,
common_data,
);
// Make sure we have enough gates to match `common_data`.
while self.num_gates() < (common_data.degree() / 2) {
self.add_gate(NoopGate, vec![]);
}
// Make sure we have every gate to match `common_data`.
for g in &common_data.gates {
self.add_gate_to_gate_set(g.clone());
}
Ok(CyclicRecursionTarget {
proof,
verifier_data: verifier_data.clone(),
dummy_proof,
dummy_verifier_data,
base_case,
})
}
}
/// Set the targets in a `CyclicRecursionTarget` to their corresponding values in a `CyclicRecursionData`.
pub fn set_cyclic_recursion_data_target<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pw: &mut PartialWitness<F>,
cyclic_recursion_data_target: &CyclicRecursionTarget<D>,
cyclic_recursion_data: &CyclicRecursionData<F, C, D>,
// Public inputs to set in the base case to seed some initial data.
public_inputs: &[F],
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
[(); C::Hasher::HASH_SIZE]:,
{
if let Some(proof) = cyclic_recursion_data.proof {
pw.set_bool_target(cyclic_recursion_data_target.base_case, false);
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.proof, proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.verifier_data,
cyclic_recursion_data.verifier_data,
);
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.dummy_proof, proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.dummy_verifier_data,
cyclic_recursion_data.verifier_data,
);
} else {
let (dummy_proof, dummy_data) = dummy_proof::<F, C, D>(cyclic_recursion_data.common_data)?;
pw.set_bool_target(cyclic_recursion_data_target.base_case, true);
let mut proof = dummy_proof.clone();
proof.public_inputs[0..public_inputs.len()].copy_from_slice(public_inputs);
let pis_len = proof.public_inputs.len();
// The circuit checks that the verifier data is the same throughout the cycle, so
// we set the verifier data to the "real" verifier data even though it's unused in the base case.
let num_cap = cyclic_recursion_data
.common_data
.config
.fri_config
.num_cap_elements();
let s = pis_len - 4 - 4 * num_cap;
proof.public_inputs[s..s + 4]
.copy_from_slice(&cyclic_recursion_data.verifier_data.circuit_digest.elements);
for i in 0..num_cap {
proof.public_inputs[s + 4 * (1 + i)..s + 4 * (2 + i)].copy_from_slice(
&cyclic_recursion_data.verifier_data.constants_sigmas_cap.0[i].elements,
);
}
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.proof, &proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.verifier_data,
cyclic_recursion_data.verifier_data,
);
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.dummy_proof, &dummy_proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.dummy_verifier_data,
&dummy_data,
);
}
Ok(())
}
/// Additional checks to be performed on a cyclic recursive proof in addition to verifying the proof.
/// Checks that the `base_case` flag is boolean and that the purported verifier data in the public inputs
/// match the real verifier data.
pub fn check_cyclic_proof_verifier_data<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
proof: &ProofWithPublicInputs<F, C, D>,
verifier_data: &VerifierOnlyCircuitData<C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
{
let pis = VerifierOnlyCircuitData::<C, D>::from_slice(&proof.public_inputs, common_data)?;
ensure!(verifier_data.constants_sigmas_cap == pis.constants_sigmas_cap);
ensure!(verifier_data.circuit_digest == pis.circuit_digest);
Ok(())
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use plonky2_field::extension::Extendable;
use plonky2_field::types::PrimeField64;
use crate::field::types::Field;
use crate::gates::noop::NoopGate;
use crate::hash::hash_types::RichField;
use crate::hash::hashing::hash_n_to_hash_no_pad;
use crate::hash::poseidon::{PoseidonHash, PoseidonPermutation};
use crate::iop::witness::PartialWitness;
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierCircuitTarget};
use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher, PoseidonGoldilocksConfig};
use crate::recursion::cyclic_recursion::{
check_cyclic_proof_verifier_data, set_cyclic_recursion_data_target, CyclicRecursionData,
};
// Generates `CommonCircuitData` usable for recursion.
fn common_data_for_recursion<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>() -> CommonCircuitData<F, D>
where
C::Hasher: AlgebraicHasher<F>,
[(); C::Hasher::HASH_SIZE]:,
{
let config = CircuitConfig::standard_recursion_config();
let builder = CircuitBuilder::<F, D>::new(config);
let data = builder.build::<C>();
let config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(config);
let proof = builder.add_virtual_proof_with_pis::<C>(&data.common);
let verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: builder.add_virtual_cap(data.common.config.fri_config.cap_height),
circuit_digest: builder.add_virtual_hash(),
};
builder.verify_proof::<C>(proof, &verifier_data, &data.common);
let data = builder.build::<C>();
let config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(config);
let proof = builder.add_virtual_proof_with_pis::<C>(&data.common);
let verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: builder.add_virtual_cap(data.common.config.fri_config.cap_height),
circuit_digest: builder.add_virtual_hash(),
};
builder.verify_proof::<C>(proof, &verifier_data, &data.common);
while builder.num_gates() < 1 << 12 {
builder.add_gate(NoopGate, vec![]);
}
builder.build::<C>().common
}
#[test]
fn test_cyclic_recursion() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let config = CircuitConfig::standard_recursion_config();
let mut pw = PartialWitness::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
// Circuit that computes a repeated hash.
let initial_hash = builder.add_virtual_hash();
builder.register_public_inputs(&initial_hash.elements);
// Hash from the previous proof.
let old_hash = builder.add_virtual_hash();
// The input hash is either the previous hash or the initial hash depending on whether
// the last proof was a base case.
let input_hash = builder.add_virtual_hash();
let h = builder.hash_n_to_hash_no_pad::<PoseidonHash>(input_hash.elements.to_vec());
builder.register_public_inputs(&h.elements);
// Previous counter.
let old_counter = builder.add_virtual_target();
let one = builder.one();
let new_counter = builder.add_virtual_public_input();
let old_pis = [
initial_hash.elements.as_slice(),
old_hash.elements.as_slice(),
[old_counter].as_slice(),
]
.concat();
let mut common_data = common_data_for_recursion::<F, C, D>();
let base_case = builder.add_virtual_bool_target_safe();
// Add cyclic recursion gadget.
let cyclic_data_target =
builder.cyclic_recursion::<C>(base_case, &old_pis, &mut common_data)?;
let input_hash_bis =
builder.select_hash(cyclic_data_target.base_case, initial_hash, old_hash);
builder.connect_hashes(input_hash, input_hash_bis);
let not_base_case = builder.sub(one, cyclic_data_target.base_case.target);
// New counter is the previous counter +1 if the previous proof wasn't a base case.
let new_counter_bis = builder.add(old_counter, not_base_case);
builder.connect(new_counter, new_counter_bis);
let cyclic_circuit_data = builder.build::<C>();
let cyclic_recursion_data = CyclicRecursionData {
proof: &None, // Base case: We don't have a proof to put here yet.
verifier_data: &cyclic_circuit_data.verifier_only,
common_data: &cyclic_circuit_data.common,
};
let initial_hash = [F::ZERO, F::ONE, F::TWO, F::from_canonical_usize(3)];
set_cyclic_recursion_data_target(
&mut pw,
&cyclic_data_target,
&cyclic_recursion_data,
&initial_hash,
)?;
let proof = cyclic_circuit_data.prove(pw)?;
check_cyclic_proof_verifier_data(
&proof,
cyclic_recursion_data.verifier_data,
cyclic_recursion_data.common_data,
)?;
cyclic_circuit_data.verify(proof.clone())?;
// 1st recursive layer.
let mut pw = PartialWitness::new();
let cyclic_recursion_data = CyclicRecursionData {
proof: &Some(proof), // Input previous proof.
verifier_data: &cyclic_circuit_data.verifier_only,
common_data: &cyclic_circuit_data.common,
};
set_cyclic_recursion_data_target(
&mut pw,
&cyclic_data_target,
&cyclic_recursion_data,
&[],
)?;
let proof = cyclic_circuit_data.prove(pw)?;
check_cyclic_proof_verifier_data(
&proof,
cyclic_recursion_data.verifier_data,
cyclic_recursion_data.common_data,
)?;
cyclic_circuit_data.verify(proof.clone())?;
// 2nd recursive layer.
let mut pw = PartialWitness::new();
let cyclic_recursion_data = CyclicRecursionData {
proof: &Some(proof), // Input previous proof.
verifier_data: &cyclic_circuit_data.verifier_only,
common_data: &cyclic_circuit_data.common,
};
set_cyclic_recursion_data_target(
&mut pw,
&cyclic_data_target,
&cyclic_recursion_data,
&[],
)?;
let proof = cyclic_circuit_data.prove(pw)?;
check_cyclic_proof_verifier_data(
&proof,
cyclic_recursion_data.verifier_data,
cyclic_recursion_data.common_data,
)?;
// Verify that the proof correctly computes a repeated hash.
let initial_hash = &proof.public_inputs[..4];
let hash = &proof.public_inputs[4..8];
let counter = proof.public_inputs[8];
let mut h: [F; 4] = initial_hash.try_into().unwrap();
assert_eq!(
hash,
std::iter::repeat_with(|| {
h = hash_n_to_hash_no_pad::<F, PoseidonPermutation>(&h).elements;
h
})
.nth(counter.to_canonical_u64() as usize)
.unwrap()
);
cyclic_circuit_data.verify(proof)
}
}

View File

@ -0,0 +1,3 @@
pub mod conditional_recursive_verifier;
pub mod cyclic_recursion;
pub mod recursive_verifier;

View File

@ -19,7 +19,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
&mut self,
proof_with_pis: ProofWithPublicInputsTarget<D>,
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, C, D>,
inner_common_data: &CommonCircuitData<F, D>,
) where
C::Hasher: AlgebraicHasher<F>,
{
@ -29,14 +29,14 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
);
let public_inputs_hash =
self.hash_n_to_hash_no_pad::<C::InnerHasher>(proof_with_pis.public_inputs.clone());
let challenges = proof_with_pis.get_challenges(
let challenges = proof_with_pis.get_challenges::<F, C>(
self,
public_inputs_hash,
inner_verifier_data.circuit_digest,
inner_common_data,
);
self.verify_proof_with_challenges(
self.verify_proof_with_challenges::<C>(
proof_with_pis.proof,
public_inputs_hash,
challenges,
@ -52,7 +52,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
public_inputs_hash: HashOutTarget,
challenges: ProofChallengesTarget<D>,
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, C, D>,
inner_common_data: &CommonCircuitData<F, D>,
) where
C::Hasher: AlgebraicHasher<F>,
{
@ -75,7 +75,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
let vanishing_polys_zeta = with_context!(
self,
"evaluate the vanishing polynomial at our challenge point, zeta.",
eval_vanishing_poly_circuit(
eval_vanishing_poly_circuit::<F, C, D>(
self,
inner_common_data,
challenges.plonk_zeta,
@ -129,9 +129,9 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
pub fn add_virtual_proof_with_pis<InnerC: GenericConfig<D, F = F>>(
&mut self,
common_data: &CommonCircuitData<F, InnerC, D>,
common_data: &CommonCircuitData<F, D>,
) -> ProofWithPublicInputsTarget<D> {
let proof = self.add_virtual_proof(common_data);
let proof = self.add_virtual_proof::<InnerC>(common_data);
let public_inputs = self.add_virtual_targets(common_data.num_public_inputs);
ProofWithPublicInputsTarget {
proof,
@ -141,7 +141,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
fn add_virtual_proof<InnerC: GenericConfig<D, F = F>>(
&mut self,
common_data: &CommonCircuitData<F, InnerC, D>,
common_data: &CommonCircuitData<F, D>,
) -> ProofTarget<D> {
let config = &common_data.config;
let fri_params = &common_data.fri_params;
@ -159,14 +159,14 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
wires_cap: self.add_virtual_cap(cap_height),
plonk_zs_partial_products_cap: self.add_virtual_cap(cap_height),
quotient_polys_cap: self.add_virtual_cap(cap_height),
openings: self.add_opening_set(common_data),
openings: self.add_opening_set::<InnerC>(common_data),
opening_proof: self.add_virtual_fri_proof(num_leaves_per_oracle, fri_params),
}
}
fn add_opening_set<InnerC: GenericConfig<D, F = F>>(
&mut self,
common_data: &CommonCircuitData<F, InnerC, D>,
common_data: &CommonCircuitData<F, D>,
) -> OpeningSetTarget<D> {
let config = &common_data.config;
let num_challenges = config.num_challenges;
@ -330,7 +330,7 @@ mod tests {
) -> Result<(
ProofWithPublicInputs<F, C, D>,
VerifierOnlyCircuitData<C, D>,
CommonCircuitData<F, C, D>,
CommonCircuitData<F, D>,
)>
where
[(); C::Hasher::HASH_SIZE]:,
@ -356,7 +356,7 @@ mod tests {
>(
inner_proof: ProofWithPublicInputs<F, InnerC, D>,
inner_vd: VerifierOnlyCircuitData<InnerC, D>,
inner_cd: CommonCircuitData<F, InnerC, D>,
inner_cd: CommonCircuitData<F, D>,
config: &CircuitConfig,
min_degree_bits: Option<usize>,
print_gate_counts: bool,
@ -364,7 +364,7 @@ mod tests {
) -> Result<(
ProofWithPublicInputs<F, C, D>,
VerifierOnlyCircuitData<C, D>,
CommonCircuitData<F, C, D>,
CommonCircuitData<F, D>,
)>
where
InnerC::Hasher: AlgebraicHasher<F>,
@ -372,7 +372,7 @@ mod tests {
{
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
let mut pw = PartialWitness::new();
let pt = builder.add_virtual_proof_with_pis(&inner_cd);
let pt = builder.add_virtual_proof_with_pis::<InnerC>(&inner_cd);
pw.set_proof_with_pis_target(&pt, &inner_proof);
let inner_data = VerifierCircuitTarget {
@ -385,7 +385,7 @@ mod tests {
);
pw.set_hash_target(inner_data.circuit_digest, inner_vd.circuit_digest);
builder.verify_proof(pt, &inner_data, &inner_cd);
builder.verify_proof::<InnerC>(pt, &inner_data, &inner_cd);
if print_gate_counts {
builder.print_gate_counts(0);
@ -422,7 +422,7 @@ mod tests {
>(
proof: &ProofWithPublicInputs<F, C, D>,
vd: &VerifierOnlyCircuitData<C, D>,
cd: &CommonCircuitData<F, C, D>,
cd: &CommonCircuitData<F, D>,
) -> Result<()>
where
[(); C::Hasher::HASH_SIZE]:,

View File

@ -165,7 +165,7 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<OpeningSet<F, D>> {
let config = &common_data.config;
let constants = self.read_field_ext_vec::<F, D>(common_data.num_constants)?;
@ -233,7 +233,7 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<FriInitialTreeProof<F, C::Hasher>> {
let config = &common_data.config;
let salt = salt_size(common_data.fri_params.hiding);
@ -312,12 +312,12 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<Vec<FriQueryRound<F, C::Hasher, D>>> {
let config = &common_data.config;
let mut fqrs = Vec::with_capacity(config.fri_config.num_query_rounds);
for _ in 0..config.fri_config.num_query_rounds {
let initial_trees_proof = self.read_fri_initial_proof(common_data)?;
let initial_trees_proof = self.read_fri_initial_proof::<F, C, D>(common_data)?;
let steps = common_data
.fri_params
.reduction_arity_bits
@ -345,13 +345,13 @@ impl Buffer {
}
fn read_fri_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<FriProof<F, C::Hasher, D>> {
let config = &common_data.config;
let commit_phase_merkle_caps = (0..common_data.fri_params.reduction_arity_bits.len())
.map(|_| self.read_merkle_cap(config.fri_config.cap_height))
.collect::<Result<Vec<_>>>()?;
let query_round_proofs = self.read_fri_query_rounds(common_data)?;
let query_round_proofs = self.read_fri_query_rounds::<F, C, D>(common_data)?;
let final_poly = PolynomialCoeffs::new(
self.read_field_ext_vec::<F, D>(common_data.fri_params.final_poly_len())?,
);
@ -376,14 +376,14 @@ impl Buffer {
}
pub fn read_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<Proof<F, C, D>> {
let config = &common_data.config;
let wires_cap = self.read_merkle_cap(config.fri_config.cap_height)?;
let plonk_zs_partial_products_cap = self.read_merkle_cap(config.fri_config.cap_height)?;
let quotient_polys_cap = self.read_merkle_cap(config.fri_config.cap_height)?;
let openings = self.read_opening_set(common_data)?;
let opening_proof = self.read_fri_proof(common_data)?;
let openings = self.read_opening_set::<F, C, D>(common_data)?;
let opening_proof = self.read_fri_proof::<F, C, D>(common_data)?;
Ok(Proof {
wires_cap,
@ -415,7 +415,7 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<ProofWithPublicInputs<F, C, D>> {
let proof = self.read_proof(common_data)?;
let public_inputs = self.read_field_vec(
@ -460,7 +460,7 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<CompressedFriQueryRounds<F, C::Hasher, D>> {
let config = &common_data.config;
let original_indices = (0..config.fri_config.num_query_rounds)
@ -471,7 +471,7 @@ impl Buffer {
indices.dedup();
let mut pairs = Vec::new();
for &i in &indices {
pairs.push((i, self.read_fri_initial_proof(common_data)?));
pairs.push((i, self.read_fri_initial_proof::<F, C, D>(common_data)?));
}
let initial_trees_proofs = HashMap::from_iter(pairs);
@ -521,13 +521,13 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<CompressedFriProof<F, C::Hasher, D>> {
let config = &common_data.config;
let commit_phase_merkle_caps = (0..common_data.fri_params.reduction_arity_bits.len())
.map(|_| self.read_merkle_cap(config.fri_config.cap_height))
.collect::<Result<Vec<_>>>()?;
let query_round_proofs = self.read_compressed_fri_query_rounds(common_data)?;
let query_round_proofs = self.read_compressed_fri_query_rounds::<F, C, D>(common_data)?;
let final_poly = PolynomialCoeffs::new(
self.read_field_ext_vec::<F, D>(common_data.fri_params.final_poly_len())?,
);
@ -560,14 +560,14 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<CompressedProof<F, C, D>> {
let config = &common_data.config;
let wires_cap = self.read_merkle_cap(config.fri_config.cap_height)?;
let plonk_zs_partial_products_cap = self.read_merkle_cap(config.fri_config.cap_height)?;
let quotient_polys_cap = self.read_merkle_cap(config.fri_config.cap_height)?;
let openings = self.read_opening_set(common_data)?;
let opening_proof = self.read_compressed_fri_proof(common_data)?;
let openings = self.read_opening_set::<F, C, D>(common_data)?;
let opening_proof = self.read_compressed_fri_proof::<F, C, D>(common_data)?;
Ok(CompressedProof {
wires_cap,
@ -599,7 +599,7 @@ impl Buffer {
const D: usize,
>(
&mut self,
common_data: &CommonCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<CompressedProofWithPublicInputs<F, C, D>> {
let proof = self.read_compressed_proof(common_data)?;
let public_inputs = self.read_field_vec(

View File

@ -84,7 +84,7 @@ impl<F: RichField + Extendable<D>, const D: usize> U32AddManyGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32AddManyGate<F, D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -86,7 +86,7 @@ impl<F: RichField + Extendable<D>, const D: usize> U32ArithmeticGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32ArithmeticGate<F, D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -91,7 +91,7 @@ impl<F: RichField + Extendable<D>, const D: usize> ComparisonGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for ComparisonGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -48,7 +48,7 @@ impl<F: RichField + Extendable<D>, const D: usize> U32RangeCheckGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32RangeCheckGate<F, D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -80,7 +80,7 @@ impl<F: RichField + Extendable<D>, const D: usize> U32SubtractionGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for U32SubtractionGate<F, D> {
fn id(&self) -> String {
format!("{:?}", self)
format!("{self:?}")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -31,7 +31,7 @@ pub fn log2_ceil(n: usize) -> usize {
/// Computes `log_2(n)`, panicking if `n` is not a power of two.
pub fn log2_strict(n: usize) -> usize {
let res = n.trailing_zeros();
assert!(n.wrapping_shr(res) == 1, "Not a power of two: {}", n);
assert!(n.wrapping_shr(res) == 1, "Not a power of two: {n}");
// Tell the optimizer about the semantics of `log2_strict`. i.e. it can replace `n` with
// `1 << res` and vice versa.
assume(n == 1 << res);

View File

@ -84,7 +84,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AssertLessThanGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for AssertLessThanGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {

View File

@ -74,7 +74,7 @@ impl<F: RichField + Extendable<D>, const D: usize> SwitchGate<F, D> {
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for SwitchGate<F, D> {
fn id(&self) -> String {
format!("{:?}<D={}>", self, D)
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {