diff --git a/ecdsa/src/gadgets/glv.rs b/ecdsa/src/gadgets/glv.rs index 4302023e..539b5de3 100644 --- a/ecdsa/src/gadgets/glv.rs +++ b/ecdsa/src/gadgets/glv.rs @@ -55,8 +55,8 @@ impl, const D: usize> CircuitBuilderGlv ) { let k1 = self.add_virtual_nonnative_target_sized::(4); let k2 = self.add_virtual_nonnative_target_sized::(4); - let k1_neg = self.add_virtual_bool_target(); - let k2_neg = self.add_virtual_bool_target(); + let k1_neg = self.add_virtual_bool_target_unsafe(); + let k2_neg = self.add_virtual_bool_target_unsafe(); self.add_simple_generator(GLVDecompositionGenerator:: { k: k.clone(), diff --git a/ecdsa/src/gadgets/nonnative.rs b/ecdsa/src/gadgets/nonnative.rs index c6ff4753..29520bed 100644 --- a/ecdsa/src/gadgets/nonnative.rs +++ b/ecdsa/src/gadgets/nonnative.rs @@ -183,7 +183,7 @@ impl, const D: usize> CircuitBuilderNonNative b: &NonNativeTarget, ) -> NonNativeTarget { let sum = self.add_virtual_nonnative_target::(); - let overflow = self.add_virtual_bool_target(); + let overflow = self.add_virtual_bool_target_unsafe(); self.add_simple_generator(NonNativeAdditionGenerator:: { a: a.clone(), @@ -282,7 +282,7 @@ impl, const D: usize> CircuitBuilderNonNative b: &NonNativeTarget, ) -> NonNativeTarget { let diff = self.add_virtual_nonnative_target::(); - let overflow = self.add_virtual_bool_target(); + let overflow = self.add_virtual_bool_target_unsafe(); self.add_simple_generator(NonNativeSubtractionGenerator:: { a: a.clone(), diff --git a/evm/src/arithmetic/arithmetic_stark.rs b/evm/src/arithmetic/arithmetic_stark.rs index fc168cee..08813a3b 100644 --- a/evm/src/arithmetic/arithmetic_stark.rs +++ b/evm/src/arithmetic/arithmetic_stark.rs @@ -53,10 +53,14 @@ impl ArithmeticStark { compare::generate(local_values, columns::IS_GT); } else if local_values[columns::IS_ADDMOD].is_one() { modular::generate(local_values, columns::IS_ADDMOD); + } else if local_values[columns::IS_SUBMOD].is_one() { + modular::generate(local_values, columns::IS_SUBMOD); } else if local_values[columns::IS_MULMOD].is_one() { modular::generate(local_values, columns::IS_MULMOD); } else if local_values[columns::IS_MOD].is_one() { modular::generate(local_values, columns::IS_MOD); + } else if local_values[columns::IS_DIV].is_one() { + modular::generate(local_values, columns::IS_DIV); } else { todo!("the requested operation has not yet been implemented"); } diff --git a/evm/src/arithmetic/columns.rs b/evm/src/arithmetic/columns.rs index ee73f223..10bf72d9 100644 --- a/evm/src/arithmetic/columns.rs +++ b/evm/src/arithmetic/columns.rs @@ -26,7 +26,8 @@ pub const IS_SDIV: usize = IS_DIV + 1; pub const IS_MOD: usize = IS_SDIV + 1; pub const IS_SMOD: usize = IS_MOD + 1; pub const IS_ADDMOD: usize = IS_SMOD + 1; -pub const IS_MULMOD: usize = IS_ADDMOD + 1; +pub const IS_SUBMOD: usize = IS_ADDMOD + 1; +pub const IS_MULMOD: usize = IS_SUBMOD + 1; pub const IS_LT: usize = IS_MULMOD + 1; pub const IS_GT: usize = IS_LT + 1; pub const IS_SLT: usize = IS_GT + 1; @@ -37,9 +38,9 @@ pub const IS_SAR: usize = IS_SHR + 1; const START_SHARED_COLS: usize = IS_SAR + 1; -pub(crate) const ALL_OPERATIONS: [usize; 16] = [ - IS_ADD, IS_MUL, IS_SUB, IS_DIV, IS_SDIV, IS_MOD, IS_SMOD, IS_ADDMOD, IS_MULMOD, IS_LT, IS_GT, - IS_SLT, IS_SGT, IS_SHL, IS_SHR, IS_SAR, +pub(crate) const ALL_OPERATIONS: [usize; 17] = [ + IS_ADD, IS_MUL, IS_SUB, IS_DIV, IS_SDIV, IS_MOD, IS_SMOD, IS_ADDMOD, IS_SUBMOD, IS_MULMOD, + IS_LT, IS_GT, IS_SLT, IS_SGT, IS_SHL, IS_SHR, IS_SAR, ]; /// Within the Arithmetic Unit, there are shared columns which can be @@ -84,4 +85,11 @@ pub(crate) const MODULAR_AUX_INPUT: Range = AUX_INPUT_1; pub(crate) const MODULAR_MOD_IS_ZERO: usize = AUX_INPUT_1.end - 1; pub(crate) const MODULAR_OUT_AUX_RED: Range = AUX_INPUT_2; +#[allow(unused)] // TODO: Will be used when hooking into the CPU +pub(crate) const DIV_NUMERATOR: Range = MODULAR_INPUT_0; +#[allow(unused)] // TODO: Will be used when hooking into the CPU +pub(crate) const DIV_DENOMINATOR: Range = MODULAR_MODULUS; +#[allow(unused)] // TODO: Will be used when hooking into the CPU +pub(crate) const DIV_OUTPUT: Range = MODULAR_QUO_INPUT.start..MODULAR_QUO_INPUT.start + 16; + pub const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS; diff --git a/evm/src/arithmetic/modular.rs b/evm/src/arithmetic/modular.rs index 53051cda..d0020166 100644 --- a/evm/src/arithmetic/modular.rs +++ b/evm/src/arithmetic/modular.rs @@ -1,4 +1,5 @@ -//! Support for the EVM modular instructions ADDMOD, MULMOD and MOD. +//! Support for the EVM modular instructions ADDMOD, MULMOD and MOD, +//! as well as DIV. //! //! This crate verifies an EVM modular instruction, which takes three //! 256-bit inputs A, B and M, and produces a 256-bit output C satisfying @@ -82,8 +83,11 @@ //! - if modulus is non-zero, correct output is obtained //! - if modulus is 0, then the test output < modulus, checking that //! the output is reduced, will fail, because output is non-negative. +//! +//! In the case of DIV, we do something similar, except that we "replace" +//! the modulus with "2^256" to force the quotient to be zero. -use num::{BigUint, Zero}; +use num::{bigint::Sign, BigInt, One, Zero}; use plonky2::field::extension::Extendable; use plonky2::field::packed::PackedField; use plonky2::field::types::Field; @@ -98,55 +102,65 @@ use crate::arithmetic::utils::*; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::range_check_error; -/// Convert the base-2^16 representation of a number into a BigUint. +/// Convert the base-2^16 representation of a number into a BigInt. /// -/// Given `N` unsigned 16-bit values in `limbs`, return the BigUint +/// Given `N` signed (16 + ε)-bit values in `limbs`, return the BigInt /// /// \sum_{i=0}^{N-1} limbs[i] * β^i. /// -fn columns_to_biguint(limbs: &[i64; N]) -> BigUint { +/// This is basically "evaluate the given polynomial at β". Although +/// the input type is i64, the values must always be in (-2^16 - ε, +/// 2^16 + ε) because of the caller's range check on the inputs (the ε +/// allows us to convert calculated output, which can be bigger than +/// 2^16). +fn columns_to_bigint(limbs: &[i64; N]) -> BigInt { const BASE: i64 = 1i64 << LIMB_BITS; - // Although the input type is i64, the values must always be in - // [0, 2^16 + ε) because of the caller's range check on the inputs - // (the ε allows us to convert calculated output, which can be - // bigger than 2^16). - debug_assert!(limbs.iter().all(|&x| x >= 0)); - - let mut limbs_u32 = Vec::with_capacity(N / 2 + 1); + let mut pos_limbs_u32 = Vec::with_capacity(N / 2 + 1); + let mut neg_limbs_u32 = Vec::with_capacity(N / 2 + 1); let mut cy = 0i64; // cy is necessary to handle ε > 0 for i in 0..(N / 2) { let t = cy + limbs[2 * i] + BASE * limbs[2 * i + 1]; - limbs_u32.push(t as u32); - cy = t >> 32; + pos_limbs_u32.push(if t > 0 { t as u32 } else { 0u32 }); + neg_limbs_u32.push(if t < 0 { -t as u32 } else { 0u32 }); + cy = t / (1i64 << 32); } if N & 1 != 0 { // If N is odd we need to add the last limb on its own let t = cy + limbs[N - 1]; - limbs_u32.push(t as u32); - cy = t >> 32; + pos_limbs_u32.push(if t > 0 { t as u32 } else { 0u32 }); + neg_limbs_u32.push(if t < 0 { -t as u32 } else { 0u32 }); + cy = t / (1i64 << 32); } - limbs_u32.push(cy as u32); + pos_limbs_u32.push(if cy > 0 { cy as u32 } else { 0u32 }); + neg_limbs_u32.push(if cy < 0 { -cy as u32 } else { 0u32 }); - BigUint::from_slice(&limbs_u32) + let pos = BigInt::from_slice(Sign::Plus, &pos_limbs_u32); + let neg = BigInt::from_slice(Sign::Plus, &neg_limbs_u32); + pos - neg } -/// Convert a BigUint into a base-2^16 representation. +/// Convert a BigInt into a base-2^16 representation. /// -/// Given a BigUint `num`, return an array of `N` unsigned 16-bit +/// Given a BigInt `num`, return an array of `N` signed 16-bit /// values, say `limbs`, such that /// /// num = \sum_{i=0}^{N-1} limbs[i] * β^i. /// /// Note that `N` must be at least ceil(log2(num)/16) in order to be /// big enough to hold `num`. -fn biguint_to_columns(num: &BigUint) -> [i64; N] { +fn bigint_to_columns(num: &BigInt) -> [i64; N] { assert!(num.bits() <= 16 * N as u64); let mut output = [0i64; N]; for (i, limb) in num.iter_u32_digits().enumerate() { output[2 * i] = limb as u16 as i64; output[2 * i + 1] = (limb >> LIMB_BITS) as i64; } + if num.sign() == Sign::Minus { + for c in output.iter_mut() { + *c = -*c; + } + } output } @@ -156,6 +170,7 @@ fn biguint_to_columns(num: &BigUint) -> [i64; N] { /// zero if they are not used. fn generate_modular_op( lv: &mut [F; NUM_ARITH_COLUMNS], + filter: usize, operation: fn([i64; N_LIMBS], [i64; N_LIMBS]) -> [i64; 2 * N_LIMBS - 1], ) { // Inputs are all range-checked in [0, 2^16), so the "as i64" @@ -164,38 +179,54 @@ fn generate_modular_op( let input1_limbs = read_value_i64_limbs(lv, MODULAR_INPUT_1); let mut modulus_limbs = read_value_i64_limbs(lv, MODULAR_MODULUS); - // The use of BigUints is just to avoid having to implement - // modular reduction. - let mut modulus = columns_to_biguint(&modulus_limbs); + // BigInts are just used to avoid having to implement modular + // reduction. + let mut modulus = columns_to_bigint(&modulus_limbs); // constr_poly is initialised to the calculated input, and is - // used as such for the BigUint reduction; later, other values are + // used as such for the BigInt reduction; later, other values are // added/subtracted, which is where its meaning as the "constraint // polynomial" comes in. let mut constr_poly = [0i64; 2 * N_LIMBS]; constr_poly[..2 * N_LIMBS - 1].copy_from_slice(&operation(input0_limbs, input1_limbs)); + // two_exp_256 == 2^256 + let two_exp_256 = { + let mut t = BigInt::zero(); + t.set_bit(256, true); + t + }; + let mut mod_is_zero = F::ZERO; if modulus.is_zero() { - modulus += 1u32; - modulus_limbs[0] += 1i64; + if filter == columns::IS_DIV { + // set modulus = 2^256 + modulus = two_exp_256.clone(); + // modulus_limbs don't play a role below + } else { + // set modulus = 1 + modulus = BigInt::one(); + modulus_limbs[0] = 1i64; + } mod_is_zero = F::ONE; } - let input = columns_to_biguint(&constr_poly); + let input = columns_to_bigint(&constr_poly); // modulus != 0 here, because, if the given modulus was zero, then - // we added 1 to it above. - let output = &input % &modulus; - let output_limbs = biguint_to_columns::(&output); - let quot = (&input - &output) / &modulus; // exact division - let quot_limbs = biguint_to_columns::<{ 2 * N_LIMBS }>("); + // it was set to 1 or 2^256 above + let mut output = &input % &modulus; + // output will be -ve (but > -modulus) if input was -ve, so we can + // add modulus to obtain a "canonical" +ve output. + if output.sign() == Sign::Minus { + output += &modulus; + } + let output_limbs = bigint_to_columns::(&output); + let quot = (&input - &output) / &modulus; // exact division; can be -ve + let quot_limbs = bigint_to_columns::<{ 2 * N_LIMBS }>("); - // two_exp_256 == 2^256 - let mut two_exp_256 = BigUint::zero(); - two_exp_256.set_bit(256, true); // output < modulus here, so the proof requires (output - modulus) % 2^256: - let out_aux_red = biguint_to_columns::(&(two_exp_256 + output - modulus)); + let out_aux_red = bigint_to_columns::(&(two_exp_256 + output - modulus)); // constr_poly is the array of coefficients of the polynomial // @@ -215,7 +246,7 @@ fn generate_modular_op( lv[MODULAR_OUTPUT].copy_from_slice(&output_limbs.map(|c| F::from_canonical_i64(c))); lv[MODULAR_OUT_AUX_RED].copy_from_slice(&out_aux_red.map(|c| F::from_canonical_i64(c))); - lv[MODULAR_QUO_INPUT].copy_from_slice("_limbs.map(|c| F::from_canonical_i64(c))); + lv[MODULAR_QUO_INPUT].copy_from_slice("_limbs.map(|c| F::from_noncanonical_i64(c))); lv[MODULAR_AUX_INPUT].copy_from_slice(&aux_limbs.map(|c| F::from_noncanonical_i64(c))); lv[MODULAR_MOD_IS_ZERO] = mod_is_zero; } @@ -225,9 +256,10 @@ fn generate_modular_op( /// `filter` must be one of `columns::IS_{ADDMOD,MULMOD,MOD}`. pub(crate) fn generate(lv: &mut [F; NUM_ARITH_COLUMNS], filter: usize) { match filter { - columns::IS_ADDMOD => generate_modular_op(lv, pol_add), - columns::IS_MULMOD => generate_modular_op(lv, pol_mul_wide), - columns::IS_MOD => generate_modular_op(lv, |a, _| pol_extend(a)), + columns::IS_ADDMOD => generate_modular_op(lv, filter, pol_add), + columns::IS_SUBMOD => generate_modular_op(lv, filter, pol_sub), + columns::IS_MULMOD => generate_modular_op(lv, filter, pol_mul_wide), + columns::IS_MOD | columns::IS_DIV => generate_modular_op(lv, filter, |a, _| pol_extend(a)), _ => panic!("generate modular operation called with unknown opcode"), } } @@ -240,7 +272,6 @@ pub(crate) fn generate(lv: &mut [F; NUM_ARITH_COLUMNS], filter: us /// c(x) + q(x) * m(x) + (x - β) * s(x) /// /// and check consistency when m = 0, and that c is reduced. -#[allow(clippy::needless_range_loop)] fn modular_constr_poly( lv: &[P; NUM_ARITH_COLUMNS], yield_constr: &mut ConstraintConsumer

, @@ -268,19 +299,33 @@ fn modular_constr_poly( // modulus = 0. modulus[0] += mod_is_zero; - let output = &lv[MODULAR_OUTPUT]; + let mut output = read_value::(lv, MODULAR_OUTPUT); + + // Needed to compensate for adding mod_is_zero to modulus above, + // since the call eval_packed_generic_lt() below subtracts modulus + // verify in the case of a DIV. + output[0] += mod_is_zero * lv[IS_DIV]; // Verify that the output is reduced, i.e. output < modulus. let out_aux_red = &lv[MODULAR_OUT_AUX_RED]; - let is_less_than = P::ONES; + // this sets is_less_than to 1 unless we get mod_is_zero when + // doing a DIV; in that case, we need is_less_than=0, since the + // function checks + // + // output - modulus == out_aux_red + is_less_than*2^256 + // + // and we were given output = out_aux_red + let is_less_than = P::ONES - mod_is_zero * lv[IS_DIV]; eval_packed_generic_lt( yield_constr, filter, - output, + &output, &modulus, out_aux_red, is_less_than, ); + // restore output[0] + output[0] -= mod_is_zero * lv[IS_DIV]; // prod = q(x) * m(x) let quot = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_QUO_INPUT); @@ -292,7 +337,7 @@ fn modular_constr_poly( // constr_poly = c(x) + q(x) * m(x) let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap(); - pol_add_assign(&mut constr_poly, output); + pol_add_assign(&mut constr_poly, &output); // constr_poly = c(x) + q(x) * m(x) + (x - β) * s(x) let mut aux = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_AUX_INPUT); @@ -310,7 +355,11 @@ pub(crate) fn eval_packed_generic( ) { // NB: The CTL code guarantees that filter is 0 or 1, i.e. that // only one of the operations below is "live". - let filter = lv[columns::IS_ADDMOD] + lv[columns::IS_MULMOD] + lv[columns::IS_MOD]; + let filter = lv[columns::IS_ADDMOD] + + lv[columns::IS_MULMOD] + + lv[columns::IS_MOD] + + lv[columns::IS_SUBMOD] + + lv[columns::IS_DIV]; // constr_poly has 2*N_LIMBS limbs let constr_poly = modular_constr_poly(lv, yield_constr, filter); @@ -319,13 +368,15 @@ pub(crate) fn eval_packed_generic( let input1 = read_value(lv, MODULAR_INPUT_1); let add_input = pol_add(input0, input1); + let sub_input = pol_sub(input0, input1); let mul_input = pol_mul_wide(input0, input1); let mod_input = pol_extend(input0); for (input, &filter) in [ (&add_input, &lv[columns::IS_ADDMOD]), + (&sub_input, &lv[columns::IS_SUBMOD]), (&mul_input, &lv[columns::IS_MULMOD]), - (&mod_input, &lv[columns::IS_MOD]), + (&mod_input, &(lv[columns::IS_MOD] + lv[columns::IS_DIV])), ] { // Need constr_poly_copy to be the first argument to // pol_sub_assign, since it is the longer of the two @@ -367,18 +418,25 @@ fn modular_constr_poly_ext_circuit, const D: usize> modulus[0] = builder.add_extension(modulus[0], mod_is_zero); - let output = &lv[MODULAR_OUTPUT]; + let mut output = read_value::(lv, MODULAR_OUTPUT); + output[0] = builder.mul_add_extension(mod_is_zero, lv[IS_DIV], output[0]); + let out_aux_red = &lv[MODULAR_OUT_AUX_RED]; - let is_less_than = builder.one_extension(); + let one = builder.one_extension(); + let is_less_than = + builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, lv[IS_DIV], one); + eval_ext_circuit_lt( builder, yield_constr, filter, - output, + &output, &modulus, out_aux_red, is_less_than, ); + output[0] = + builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, lv[IS_DIV], output[0]); let quot = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_QUO_INPUT); let prod = pol_mul_wide2_ext_circuit(builder, quot, modulus); @@ -388,7 +446,7 @@ fn modular_constr_poly_ext_circuit, const D: usize> } let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap(); - pol_add_assign_ext_circuit(builder, &mut constr_poly, output); + pol_add_assign_ext_circuit(builder, &mut constr_poly, &output); let mut aux = read_value::<{ 2 * N_LIMBS }, _>(lv, MODULAR_AUX_INPUT); aux[2 * N_LIMBS - 1] = builder.zero_extension(); @@ -406,8 +464,10 @@ pub(crate) fn eval_ext_circuit, const D: usize>( ) { let filter = builder.add_many_extension([ lv[columns::IS_ADDMOD], + lv[columns::IS_SUBMOD], lv[columns::IS_MULMOD], lv[columns::IS_MOD], + lv[columns::IS_DIV], ]); let constr_poly = modular_constr_poly_ext_circuit(lv, builder, yield_constr, filter); @@ -416,13 +476,16 @@ pub(crate) fn eval_ext_circuit, const D: usize>( let input1 = read_value(lv, MODULAR_INPUT_1); let add_input = pol_add_ext_circuit(builder, input0, input1); + let sub_input = pol_sub_ext_circuit(builder, input0, input1); let mul_input = pol_mul_wide_ext_circuit(builder, input0, input1); let mod_input = pol_extend_ext_circuit(builder, input0); + let mod_div_filter = builder.add_extension(lv[columns::IS_MOD], lv[columns::IS_DIV]); for (input, &filter) in [ (&add_input, &lv[columns::IS_ADDMOD]), + (&sub_input, &lv[columns::IS_SUBMOD]), (&mul_input, &lv[columns::IS_MULMOD]), - (&mod_input, &lv[columns::IS_MOD]), + (&mod_input, &mod_div_filter), ] { let mut constr_poly_copy = constr_poly; pol_sub_assign_ext_circuit(builder, &mut constr_poly_copy, input); @@ -458,8 +521,10 @@ mod tests { // if `IS_ADDMOD == 0`, then the constraints should be met even // if all values are garbage. lv[IS_ADDMOD] = F::ZERO; + lv[IS_SUBMOD] = F::ZERO; lv[IS_MULMOD] = F::ZERO; lv[IS_MOD] = F::ZERO; + lv[IS_DIV] = F::ZERO; let mut constraint_consumer = ConstraintConsumer::new( vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)], @@ -480,11 +545,13 @@ mod tests { let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25); let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::rand_from_rng(&mut rng)); - for op_filter in [IS_ADDMOD, IS_MOD, IS_MULMOD] { + for op_filter in [IS_ADDMOD, IS_DIV, IS_SUBMOD, IS_MOD, IS_MULMOD] { // Reset operation columns, then select one lv[IS_ADDMOD] = F::ZERO; + lv[IS_SUBMOD] = F::ZERO; lv[IS_MULMOD] = F::ZERO; lv[IS_MOD] = F::ZERO; + lv[IS_DIV] = F::ZERO; lv[op_filter] = F::ONE; for i in 0..N_RND_TESTS { @@ -529,11 +596,13 @@ mod tests { let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25); let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::rand_from_rng(&mut rng)); - for op_filter in [IS_ADDMOD, IS_MOD, IS_MULMOD] { + for op_filter in [IS_ADDMOD, IS_SUBMOD, IS_DIV, IS_MOD, IS_MULMOD] { // Reset operation columns, then select one lv[IS_ADDMOD] = F::ZERO; + lv[IS_SUBMOD] = F::ZERO; lv[IS_MULMOD] = F::ZERO; lv[IS_MOD] = F::ZERO; + lv[IS_DIV] = F::ZERO; lv[op_filter] = F::ONE; for _i in 0..N_RND_TESTS { @@ -548,7 +617,11 @@ mod tests { generate(&mut lv, op_filter); // check that the correct output was generated - assert!(lv[MODULAR_OUTPUT].iter().all(|&c| c == F::ZERO)); + if op_filter == IS_DIV { + assert!(lv[DIV_OUTPUT].iter().all(|&c| c == F::ZERO)); + } else { + assert!(lv[MODULAR_OUTPUT].iter().all(|&c| c == F::ZERO)); + } let mut constraint_consumer = ConstraintConsumer::new( vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)], @@ -563,7 +636,11 @@ mod tests { .all(|&acc| acc == F::ZERO)); // Corrupt one output limb by setting it to a non-zero value - let random_oi = MODULAR_OUTPUT.start + rng.gen::() % N_LIMBS; + let random_oi = if op_filter == IS_DIV { + DIV_OUTPUT.start + rng.gen::() % N_LIMBS + } else { + MODULAR_OUTPUT.start + rng.gen::() % N_LIMBS + }; lv[random_oi] = F::from_canonical_u16(rng.gen_range(1..u16::MAX)); eval_packed_generic(&lv, &mut constraint_consumer); diff --git a/evm/src/arithmetic/utils.rs b/evm/src/arithmetic/utils.rs index 871a9646..74999ab4 100644 --- a/evm/src/arithmetic/utils.rs +++ b/evm/src/arithmetic/utils.rs @@ -118,6 +118,32 @@ pub(crate) fn pol_add_ext_circuit, const D: usize>( sum } +/// Return a(x) - b(x); returned array is bigger than necessary to +/// make the interface consistent with `pol_mul_wide`. +pub(crate) fn pol_sub(a: [T; N_LIMBS], b: [T; N_LIMBS]) -> [T; 2 * N_LIMBS - 1] +where + T: Sub + Copy + Default, +{ + let mut diff = pol_zero(); + for i in 0..N_LIMBS { + diff[i] = a[i] - b[i]; + } + diff +} + +pub(crate) fn pol_sub_ext_circuit, const D: usize>( + builder: &mut CircuitBuilder, + a: [ExtensionTarget; N_LIMBS], + b: [ExtensionTarget; N_LIMBS], +) -> [ExtensionTarget; 2 * N_LIMBS - 1] { + let zero = builder.zero_extension(); + let mut sum = [zero; 2 * N_LIMBS - 1]; + for i in 0..N_LIMBS { + sum[i] = builder.sub_extension(a[i], b[i]); + } + sum +} + /// a(x) -= b(x), but must have deg(a) >= deg(b). pub(crate) fn pol_sub_assign(a: &mut [T], b: &[T]) where diff --git a/evm/src/bin/assemble.rs b/evm/src/bin/assemble.rs index 68eeadf8..1cf3a67c 100644 --- a/evm/src/bin/assemble.rs +++ b/evm/src/bin/assemble.rs @@ -9,5 +9,5 @@ fn main() { args.next(); let file_contents: Vec<_> = args.map(|path| fs::read_to_string(path).unwrap()).collect(); let assembled = assemble_to_bytes(&file_contents[..]); - println!("{}", encode(&assembled)); + println!("{}", encode(assembled)); } diff --git a/evm/src/cpu/kernel/aggregator.rs b/evm/src/cpu/kernel/aggregator.rs index 5488ab67..194a93c8 100644 --- a/evm/src/cpu/kernel/aggregator.rs +++ b/evm/src/cpu/kernel/aggregator.rs @@ -42,6 +42,7 @@ pub(crate) fn combined_kernel() -> Kernel { include_str!("asm/memory/metadata.asm"), include_str!("asm/memory/packing.asm"), include_str!("asm/memory/txn_fields.asm"), + include_str!("asm/mpt/accounts.asm"), include_str!("asm/mpt/delete/delete.asm"), include_str!("asm/mpt/hash/hash.asm"), include_str!("asm/mpt/hash/hash_trie_specific.asm"), @@ -53,8 +54,8 @@ pub(crate) fn combined_kernel() -> Kernel { include_str!("asm/mpt/load/load.asm"), include_str!("asm/mpt/load/load_trie_specific.asm"), include_str!("asm/mpt/read.asm"), - include_str!("asm/mpt/storage_read.asm"), - include_str!("asm/mpt/storage_write.asm"), + include_str!("asm/mpt/storage/storage_read.asm"), + include_str!("asm/mpt/storage/storage_write.asm"), include_str!("asm/mpt/util.asm"), include_str!("asm/ripemd/box.asm"), include_str!("asm/ripemd/compression.asm"), @@ -80,6 +81,7 @@ pub(crate) fn combined_kernel() -> Kernel { include_str!("asm/transactions/type_2.asm"), include_str!("asm/util/assertions.asm"), include_str!("asm/util/basic_macros.asm"), + include_str!("asm/util/keccak.asm"), ]; let parsed_files = files.iter().map(|f| parse(f)).collect_vec(); diff --git a/evm/src/cpu/kernel/asm/memory/core.asm b/evm/src/cpu/kernel/asm/memory/core.asm index 2b4d2b68..f6bb99b6 100644 --- a/evm/src/cpu/kernel/asm/memory/core.asm +++ b/evm/src/cpu/kernel/asm/memory/core.asm @@ -393,3 +393,21 @@ %mstore_kernel_general_2 // stack: (empty) %endmacro + +%macro mload_main + // stack: offset + DUP1 + // stack: offset, offset + %update_msize + // stack: offset + %mload_current(@SEGMENT_MAIN_MEMORY) +%endmacro + +%macro mstore_main + // stack: offset, value + DUP1 + // stack: offset, offset, value + %update_msize + // stack: offset, value + %mstore_current(@SEGMENT_MAIN_MEMORY) +%endmacro diff --git a/evm/src/cpu/kernel/asm/memory/metadata.asm b/evm/src/cpu/kernel/asm/memory/metadata.asm index 644699e0..1a495682 100644 --- a/evm/src/cpu/kernel/asm/memory/metadata.asm +++ b/evm/src/cpu/kernel/asm/memory/metadata.asm @@ -45,3 +45,23 @@ %macro callvalue %mload_context_metadata(@CTX_METADATA_CALL_VALUE) %endmacro + +%macro msize + %mload_context_metadata(@CTX_METADATA_MSIZE) +%endmacro + +%macro update_msize + // stack: offset + %add_const(32) + // stack: 32 + offset + %div_const(32) + // stack: (offset+32)/32 = ceil_div_usize(offset+1, 32) + %mul_const(32) + // stack: ceil_div_usize(offset+1, 32) * 32 + %msize + // stack: current_msize, ceil_div_usize(offset+1, 32) * 32 + %max + // stack: new_msize + %mstore_context_metadata(@CTX_METADATA_MSIZE) +%endmacro + diff --git a/evm/src/cpu/kernel/asm/mpt/accounts.asm b/evm/src/cpu/kernel/asm/mpt/accounts.asm new file mode 100644 index 00000000..0e49da98 --- /dev/null +++ b/evm/src/cpu/kernel/asm/mpt/accounts.asm @@ -0,0 +1,53 @@ +// Return a pointer to the current account's data in the state trie. +%macro current_account_data + ADDRESS %mpt_read_state_trie + // stack: account_ptr + // account_ptr should be non-null as long as the prover provided the proper + // Merkle data. But a bad prover may not have, and we don't want return a + // null pointer for security reasons. + DUP1 ISZERO %jumpi(panic) + // stack: account_ptr +%endmacro + +// Returns a pointer to the root of the storage trie associated with the current account. +%macro current_storage_trie + // stack: (empty) + %current_account_data + // stack: account_ptr + %add_const(2) + // stack: storage_root_ptr_ptr + %mload_trie_data + // stack: storage_root_ptr +%endmacro + +global make_default_account: + PANIC // TODO + +// Create a copy of the given account. The copy can then safely be mutated as +// needed, while leaving the original account data untouched. +// +// This writes the new account's data to MPT data, but does not register the new +// account in the state trie. +// +// Pre stack: old_account_ptr, retdest +// Post stack: new_account_ptr +global make_account_copy: + // stack: old_account_ptr, retdest + %get_trie_data_size // pointer to new account we're about to create + // stack: new_account_ptr, old_account_ptr, retdest + + DUP2 %mload_trie_data %append_to_trie_data + DUP2 %add_const(1) %mload_trie_data %append_to_trie_data + DUP2 %add_const(3) %mload_trie_data %append_to_trie_data + SWAP1 %add_const(4) %mload_trie_data %append_to_trie_data + + // stack: new_account_ptr, retdest + SWAP1 + JUMP + +// Convenience macro to call make_account_copy and return where we left off. +%macro make_account_copy + %stack (old_account_ptr) -> (old_account_ptr, %%after) + %jump(make_account_copy) +%%after: +%endmacro diff --git a/evm/src/cpu/kernel/asm/mpt/hash/hash_trie_specific.asm b/evm/src/cpu/kernel/asm/mpt/hash/hash_trie_specific.asm index 4f9b58b4..39253b9f 100644 --- a/evm/src/cpu/kernel/asm/mpt/hash/hash_trie_specific.asm +++ b/evm/src/cpu/kernel/asm/mpt/hash/hash_trie_specific.asm @@ -95,4 +95,4 @@ encode_receipt: PANIC // TODO encode_storage_value: - PANIC // TODO + PANIC // TODO: RLP encode as variable-len scalar? diff --git a/evm/src/cpu/kernel/asm/mpt/insert/insert_trie_specific.asm b/evm/src/cpu/kernel/asm/mpt/insert/insert_trie_specific.asm index 4c03d96c..61630753 100644 --- a/evm/src/cpu/kernel/asm/mpt/insert/insert_trie_specific.asm +++ b/evm/src/cpu/kernel/asm/mpt/insert/insert_trie_specific.asm @@ -2,9 +2,10 @@ // Mutate the state trie, inserting the given key-value pair. global mpt_insert_state_trie: - // stack: num_nibbles, key, value_ptr, retdest - %stack (num_nibbles, key, value_ptr) - -> (num_nibbles, key, value_ptr, mpt_insert_state_trie_save) + // stack: key, value_ptr, retdest + %stack (key, value_ptr) + -> (key, value_ptr, mpt_insert_state_trie_save) + PUSH 64 // num_nibbles %mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT) // stack: state_root_ptr, num_nibbles, key, value_ptr, mpt_insert_state_trie_save, retdest %jump(mpt_insert) diff --git a/evm/src/cpu/kernel/asm/mpt/read.asm b/evm/src/cpu/kernel/asm/mpt/read.asm index d375bedc..08aa02c3 100644 --- a/evm/src/cpu/kernel/asm/mpt/read.asm +++ b/evm/src/cpu/kernel/asm/mpt/read.asm @@ -3,26 +3,26 @@ // state trie. Returns null if the address is not found. global mpt_read_state_trie: // stack: addr, retdest - // The key is the hash of the address. Since KECCAK_GENERAL takes input from - // memory, we will write addr bytes to SEGMENT_KERNEL_GENERAL[0..20] first. - %stack (addr) -> (0, @SEGMENT_KERNEL_GENERAL, 0, addr, 20, mpt_read_state_trie_after_mstore) - %jump(mstore_unpacking) -mpt_read_state_trie_after_mstore: - // stack: retdest - %stack () -> (0, @SEGMENT_KERNEL_GENERAL, 0, 20) // context, segment, offset, len - KECCAK_GENERAL + %addr_to_state_key // stack: key, retdest PUSH 64 // num_nibbles %mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT) // node_ptr // stack: node_ptr, num_nibbles, key, retdest %jump(mpt_read) +// Convenience macro to call mpt_read_state_trie and return where we left off. +%macro mpt_read_state_trie + %stack (addr) -> (addr, %%after) + %jump(mpt_read_state_trie) +%%after: +%endmacro + // Read a value from a MPT. // // Arguments: // - the virtual address of the trie to search in -// - the key, as a U256 // - the number of nibbles in the key (should start at 64) +// - the key, as a U256 // // This function returns a pointer to the value, or 0 if the key is not found. global mpt_read: diff --git a/evm/src/cpu/kernel/asm/mpt/storage/storage_read.asm b/evm/src/cpu/kernel/asm/mpt/storage/storage_read.asm new file mode 100644 index 00000000..d8801cda --- /dev/null +++ b/evm/src/cpu/kernel/asm/mpt/storage/storage_read.asm @@ -0,0 +1,30 @@ +// Read a word from the current account's storage trie. +// +// Pre stack: slot, retdest +// Post stack: value + +global storage_read: + // stack: slot, retdest + %stack (slot) -> (slot, after_storage_read) + %slot_to_storage_key + // stack: storage_key, after_storage_read, retdest + PUSH 64 // storage_key has 64 nibbles + %current_storage_trie + // stack: storage_root_ptr, 64, storage_key, after_storage_read, retdest + %jump(mpt_read) + +after_storage_read: + // stack: value_ptr, retdest + DUP1 %jumpi(storage_key_exists) + + // Storage key not found. Return default value_ptr = 0, + // which derefs to 0 since @SEGMENT_TRIE_DATA[0] = 0. + %stack (value_ptr, retdest) -> (retdest, 0) + JUMP + +storage_key_exists: + // stack: value_ptr, retdest + %mload_trie_data + // stack: value, retdest + SWAP1 + JUMP diff --git a/evm/src/cpu/kernel/asm/mpt/storage/storage_write.asm b/evm/src/cpu/kernel/asm/mpt/storage/storage_write.asm new file mode 100644 index 00000000..057e4bb5 --- /dev/null +++ b/evm/src/cpu/kernel/asm/mpt/storage/storage_write.asm @@ -0,0 +1,44 @@ +// Write a word to the current account's storage trie. +// +// Pre stack: slot, value, retdest +// Post stack: (empty) + +global storage_write: + // TODO: If value = 0, delete the key instead of inserting 0. + // stack: slot, value, retdest + + // First we write the value to MPT data, and get a pointer to it. + %get_trie_data_size + // stack: value_ptr, slot, value, retdest + SWAP2 + // stack: value, slot, value_ptr, retdest + %append_to_trie_data + // stack: slot, value_ptr, retdest + + // Next, call mpt_insert on the current account's storage root. + %stack (slot, value_ptr) -> (slot, value_ptr, after_storage_insert) + %slot_to_storage_key + // stack: storage_key, value_ptr, after_storage_write, retdest + PUSH 64 // storage_key has 64 nibbles + %current_storage_trie + // stack: storage_root_ptr, 64, storage_key, value_ptr, after_storage_insert, retdest + %jump(mpt_insert) + +after_storage_insert: + // stack: new_storage_root_ptr, retdest + %current_account_data + // stack: old_account_ptr, new_storage_root_ptr, retdest + %make_account_copy + // stack: new_account_ptr, new_storage_root_ptr, retdest + + // Update the copied account with our new storage root pointer. + %stack (new_account_ptr, new_storage_root_ptr) -> (new_account_ptr, new_storage_root_ptr, new_account_ptr) + %add_const(2) + // stack: new_account_storage_root_ptr_ptr, new_storage_root_ptr, new_account_ptr, retdest + %mstore_trie_data + // stack: new_account_ptr, retdest + + // Save this updated account to the state trie. + ADDRESS %addr_to_state_key + // stack: state_key, new_account_ptr, retdest + %jump(mpt_insert_state_trie) diff --git a/evm/src/cpu/kernel/asm/mpt/storage_read.asm b/evm/src/cpu/kernel/asm/mpt/storage_read.asm deleted file mode 100644 index 04fea17a..00000000 --- a/evm/src/cpu/kernel/asm/mpt/storage_read.asm +++ /dev/null @@ -1,2 +0,0 @@ -global storage_read: - // TODO diff --git a/evm/src/cpu/kernel/asm/mpt/storage_write.asm b/evm/src/cpu/kernel/asm/mpt/storage_write.asm deleted file mode 100644 index 940fb548..00000000 --- a/evm/src/cpu/kernel/asm/mpt/storage_write.asm +++ /dev/null @@ -1,2 +0,0 @@ -global storage_write: - // TODO diff --git a/evm/src/cpu/kernel/asm/mpt/util.asm b/evm/src/cpu/kernel/asm/mpt/util.asm index 0faa72f4..0f7689e1 100644 --- a/evm/src/cpu/kernel/asm/mpt/util.asm +++ b/evm/src/cpu/kernel/asm/mpt/util.asm @@ -165,3 +165,14 @@ SWAP4 %div_const(4) SWAP4 // bits_2 -> len_2 (in nibbles) // stack: len_common, key_common, len_1, key_1, len_2, key_2 %endmacro + +// Computes state_key = Keccak256(addr). Clobbers @SEGMENT_KERNEL_GENERAL. +%macro addr_to_state_key + %keccak256_word(20) +%endmacro + +// Given a storage slot (a 256-bit integer), computes storage_key = Keccak256(slot). +// Clobbers @SEGMENT_KERNEL_GENERAL. +%macro slot_to_storage_key + %keccak256_word(32) +%endmacro diff --git a/evm/src/cpu/kernel/asm/util/keccak.asm b/evm/src/cpu/kernel/asm/util/keccak.asm new file mode 100644 index 00000000..92ba8d38 --- /dev/null +++ b/evm/src/cpu/kernel/asm/util/keccak.asm @@ -0,0 +1,14 @@ +// Computes Keccak256(input_word). Clobbers @SEGMENT_KERNEL_GENERAL. +// +// Pre stack: input_word +// Post stack: hash +%macro keccak256_word(num_bytes) + // Since KECCAK_GENERAL takes its input from memory, we will first write + // input_word's bytes to @SEGMENT_KERNEL_GENERAL[0..$num_bytes]. + %stack (word) -> (0, @SEGMENT_KERNEL_GENERAL, 0, word, $num_bytes, %%after_mstore) + %jump(mstore_unpacking) +%%after_mstore: + // stack: offset + %stack (offset) -> (0, @SEGMENT_KERNEL_GENERAL, 0, $num_bytes) // context, segment, offset, len + KECCAK_GENERAL +%endmacro diff --git a/evm/src/cpu/kernel/assembler.rs b/evm/src/cpu/kernel/assembler.rs index 09af71bf..aad2dd53 100644 --- a/evm/src/cpu/kernel/assembler.rs +++ b/evm/src/cpu/kernel/assembler.rs @@ -83,7 +83,7 @@ impl Macro { self.params .iter() .position(|p| p == param) - .unwrap_or_else(|| panic!("No such param: {} {:?}", param, &self.params)) + .unwrap_or_else(|| panic!("No such param: {param} {:?}", &self.params)) } } @@ -140,7 +140,7 @@ fn find_macros(files: &[File]) -> HashMap { items: items.clone(), }; let old = macros.insert(signature.clone(), macro_); - assert!(old.is_none(), "Duplicate macro signature: {:?}", signature); + assert!(old.is_none(), "Duplicate macro signature: {signature:?}"); } } } @@ -186,9 +186,9 @@ fn expand_macro_call( }; let macro_ = macros .get(&signature) - .unwrap_or_else(|| panic!("No such macro: {:?}", signature)); + .unwrap_or_else(|| panic!("No such macro: {signature:?}")); - let get_actual_label = |macro_label| format!("@{}.{}", macro_counter, macro_label); + let get_actual_label = |macro_label| format!("@{macro_counter}.{macro_label}"); let get_arg = |var| { let param_index = macro_.get_param_index(var); @@ -242,7 +242,7 @@ fn inline_constants(body: Vec, constants: &HashMap) -> Vec { - panic!("Item should have been expanded already: {:?}", item); + panic!("Item should have been expanded already: {item:?}"); } Item::GlobalLabelDeclaration(label) => { let old = global_labels.insert(label.clone(), *offset); - assert!(old.is_none(), "Duplicate global label: {}", label); + assert!(old.is_none(), "Duplicate global label: {label}"); } Item::LocalLabelDeclaration(label) => { let old = local_labels.insert(label.clone(), *offset); - assert!(old.is_none(), "Duplicate local label: {}", label); + assert!(old.is_none(), "Duplicate local label: {label}"); } Item::Push(target) => *offset += 1 + push_target_size(target) as usize, Item::ProverInput(prover_input_fn) => { @@ -319,7 +319,7 @@ fn assemble_file( | Item::Repeat(_, _) | Item::StackManipulation(_, _) | Item::MacroLabelDeclaration(_) => { - panic!("Item should have been expanded already: {:?}", item); + panic!("Item should have been expanded already: {item:?}"); } Item::GlobalLabelDeclaration(_) | Item::LocalLabelDeclaration(_) => { // Nothing to do; we processed labels in the prior phase. @@ -331,7 +331,7 @@ fn assemble_file( let offset = local_labels .get(&label) .or_else(|| global_labels.get(&label)) - .unwrap_or_else(|| panic!("No such label: {}", label)); + .unwrap_or_else(|| panic!("No such label: {label}")); // We want the BYTES_PER_OFFSET least significant bytes in BE order. // It's easiest to rev the first BYTES_PER_OFFSET bytes of the LE encoding. (0..BYTES_PER_OFFSET) @@ -339,9 +339,9 @@ fn assemble_file( .map(|i| offset.to_le_bytes()[i as usize]) .collect() } - PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {}", v), - PushTarget::MacroVar(v) => panic!("Variable not in a macro: {}", v), - PushTarget::Constant(c) => panic!("Constant wasn't inlined: {}", c), + PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {v}"), + PushTarget::MacroVar(v) => panic!("Variable not in a macro: {v}"), + PushTarget::Constant(c) => panic!("Constant wasn't inlined: {c}"), }; code.push(get_push_opcode(target_bytes.len() as u8)); code.extend(target_bytes); @@ -362,9 +362,9 @@ fn push_target_size(target: &PushTarget) -> u8 { match target { PushTarget::Literal(n) => u256_to_trimmed_be_bytes(n).len() as u8, PushTarget::Label(_) => BYTES_PER_OFFSET, - PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {}", v), - PushTarget::MacroVar(v) => panic!("Variable not in a macro: {}", v), - PushTarget::Constant(c) => panic!("Constant wasn't inlined: {}", c), + PushTarget::MacroLabel(v) => panic!("Macro label not in a macro: {v}"), + PushTarget::MacroVar(v) => panic!("Variable not in a macro: {v}"), + PushTarget::Constant(c) => panic!("Constant wasn't inlined: {c}"), } } diff --git a/evm/src/cpu/kernel/constants/context_metadata.rs b/evm/src/cpu/kernel/constants/context_metadata.rs index a2c460fc..fab74373 100644 --- a/evm/src/cpu/kernel/constants/context_metadata.rs +++ b/evm/src/cpu/kernel/constants/context_metadata.rs @@ -23,10 +23,12 @@ pub(crate) enum ContextMetadata { /// Pointer to the initial version of the state trie, at the creation of this context. Used when /// we need to revert a context. StateTrieCheckpointPointer = 9, + /// Size of the active main memory. + MSize = 10, } impl ContextMetadata { - pub(crate) const COUNT: usize = 10; + pub(crate) const COUNT: usize = 11; pub(crate) fn all() -> [Self; Self::COUNT] { [ @@ -40,6 +42,7 @@ impl ContextMetadata { Self::CallValue, Self::Static, Self::StateTrieCheckpointPointer, + Self::MSize, ] } @@ -56,6 +59,7 @@ impl ContextMetadata { ContextMetadata::CallValue => "CTX_METADATA_CALL_VALUE", ContextMetadata::Static => "CTX_METADATA_STATIC", ContextMetadata::StateTrieCheckpointPointer => "CTX_METADATA_STATE_TRIE_CHECKPOINT_PTR", + ContextMetadata::MSize => "CTX_METADATA_MSIZE", } } } diff --git a/evm/src/cpu/kernel/cost_estimator.rs b/evm/src/cpu/kernel/cost_estimator.rs index 3dfcf63a..ae837647 100644 --- a/evm/src/cpu/kernel/cost_estimator.rs +++ b/evm/src/cpu/kernel/cost_estimator.rs @@ -21,7 +21,7 @@ fn cost_estimate_item(item: &Item) -> u32 { Push(Label(_)) => cost_estimate_push(BYTES_PER_OFFSET as usize), ProverInput(_) => 1, StandardOp(op) => cost_estimate_standard_op(op.as_str()), - _ => panic!("Unexpected item: {:?}", item), + _ => panic!("Unexpected item: {item:?}"), } } diff --git a/evm/src/cpu/kernel/interpreter.rs b/evm/src/cpu/kernel/interpreter.rs index c12459ab..5c2329e5 100644 --- a/evm/src/cpu/kernel/interpreter.rs +++ b/evm/src/cpu/kernel/interpreter.rs @@ -9,6 +9,7 @@ use plonky2::field::goldilocks_field::GoldilocksField; use crate::cpu::kernel::aggregator::KERNEL; use crate::cpu::kernel::assembler::Kernel; +use crate::cpu::kernel::constants::context_metadata::ContextMetadata; use crate::cpu::kernel::constants::global_metadata::GlobalMetadata; use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField; use crate::generation::memory::{MemoryContextState, MemorySegmentState}; @@ -45,9 +46,7 @@ impl InterpreterMemory { mem } -} -impl InterpreterMemory { fn mload_general(&self, context: usize, segment: Segment, offset: usize) -> U256 { let value = self.context_memory[context].segments[segment as usize].get(offset); assert!( @@ -328,6 +327,8 @@ impl<'a> Interpreter<'a> { if self.debug_offsets.contains(&self.offset) { println!("At {}, stack={:?}", self.offset_name(), self.stack()); + } else if let Some(label) = self.offset_label() { + println!("At {label}"); } Ok(()) @@ -335,12 +336,16 @@ impl<'a> Interpreter<'a> { /// Get a string representation of the current offset for debugging purposes. fn offset_name(&self) -> String { + self.offset_label() + .unwrap_or_else(|| self.offset.to_string()) + } + + fn offset_label(&self) -> Option { // TODO: Not sure we should use KERNEL? Interpreter is more general in other places. - let label = KERNEL + KERNEL .global_labels .iter() - .find_map(|(k, v)| (*v == self.offset).then(|| k.clone())); - label.unwrap_or_else(|| self.offset.to_string()) + .find_map(|(k, v)| (*v == self.offset).then(|| k.clone())) } fn run_stop(&mut self) { @@ -511,6 +516,8 @@ impl<'a> Interpreter<'a> { fn run_keccak_general(&mut self) { let context = self.pop().as_usize(); let segment = Segment::all()[self.pop().as_usize()]; + // Not strictly needed but here to avoid surprises with MSIZE. + assert_ne!(segment, Segment::MainMemory, "Call KECCAK256 instead."); let offset = self.pop().as_usize(); let size = self.pop().as_usize(); let bytes = (offset..offset + size) @@ -590,11 +597,10 @@ impl<'a> Interpreter<'a> { } fn run_msize(&mut self) { - let num_bytes = self.memory.context_memory[self.context].segments - [Segment::MainMemory as usize] - .content - .len(); - self.push(U256::from(num_bytes)); + self.push( + self.memory.context_memory[self.context].segments[Segment::ContextMetadata as usize] + .get(ContextMetadata::MSize as usize), + ) } fn run_jumpdest(&mut self) { diff --git a/evm/src/cpu/kernel/opcodes.rs b/evm/src/cpu/kernel/opcodes.rs index 20601267..31074ff6 100644 --- a/evm/src/cpu/kernel/opcodes.rs +++ b/evm/src/cpu/kernel/opcodes.rs @@ -134,6 +134,6 @@ pub(crate) fn get_opcode(mnemonic: &str) -> u8 { "REVERT" => 0xfd, "INVALID" => 0xfe, "SELFDESTRUCT" => 0xff, - _ => panic!("Unrecognized mnemonic {}", mnemonic), + _ => panic!("Unrecognized mnemonic {mnemonic}"), } } diff --git a/evm/src/cpu/kernel/stack/stack_manipulation.rs b/evm/src/cpu/kernel/stack/stack_manipulation.rs index 36e4b83a..73a24029 100644 --- a/evm/src/cpu/kernel/stack/stack_manipulation.rs +++ b/evm/src/cpu/kernel/stack/stack_manipulation.rs @@ -35,7 +35,7 @@ fn expand(names: Vec, replacements: Vec) -> stack_blocks.insert(name.clone(), n); (0..n) .map(|i| { - let literal_name = format!("@{}.{}", name, i); + let literal_name = format!("@{name}.{i}"); StackItem::NamedItem(literal_name) }) .collect_vec() @@ -52,7 +52,7 @@ fn expand(names: Vec, replacements: Vec) -> let n = *stack_blocks.get(&name).unwrap(); (0..n) .map(|i| { - let literal_name = format!("@{}.{}", name, i); + let literal_name = format!("@{name}.{i}"); StackItem::NamedItem(literal_name) }) .collect_vec() @@ -64,7 +64,7 @@ fn expand(names: Vec, replacements: Vec) -> StackReplacement::MacroLabel(_) | StackReplacement::MacroVar(_) | StackReplacement::Constant(_) => { - panic!("Should have been expanded already: {:?}", item) + panic!("Should have been expanded already: {item:?}") } }) .collect_vec(); @@ -157,7 +157,7 @@ fn shortest_path( } } - panic!("No path found from {:?} to {:?}", src, dst) + panic!("No path found from {src:?} to {dst:?}") } /// A node in the priority queue used by Dijkstra's algorithm. @@ -279,7 +279,7 @@ impl StackOp { PushTarget::MacroLabel(_) | PushTarget::MacroVar(_) | PushTarget::Constant(_) => { - panic!("Target should have been expanded already: {:?}", target) + panic!("Target should have been expanded already: {target:?}") } }; // This is just a rough estimate; we can update it after implementing PUSH. @@ -326,8 +326,8 @@ impl StackOp { match self { StackOp::Push(target) => Item::Push(target), Pop => Item::StandardOp("POP".into()), - StackOp::Dup(n) => Item::StandardOp(format!("DUP{}", n)), - StackOp::Swap(n) => Item::StandardOp(format!("SWAP{}", n)), + StackOp::Dup(n) => Item::StandardOp(format!("DUP{n}")), + StackOp::Swap(n) => Item::StandardOp(format!("SWAP{n}")), } } } diff --git a/evm/src/cpu/kernel/tests/curve_ops.rs b/evm/src/cpu/kernel/tests/curve_ops.rs index 0aaa94ea..9ba24185 100644 --- a/evm/src/cpu/kernel/tests/curve_ops.rs +++ b/evm/src/cpu/kernel/tests/curve_ops.rs @@ -150,7 +150,7 @@ mod bn { assert_eq!(stack, vec![U256::MAX, U256::MAX]); // Multiple calls - let ec_mul_hex = format!("0x{:x}", ec_mul); + let ec_mul_hex = format!("0x{ec_mul:x}"); let initial_stack = u256ify([ "0xdeadbeef", s, @@ -288,7 +288,7 @@ mod secp { assert_eq!(stack, u256ify([identity.1, identity.0])?); // Multiple calls - let ec_mul_hex = format!("0x{:x}", ec_mul); + let ec_mul_hex = format!("0x{ec_mul:x}"); let initial_stack = u256ify([ "0xdeadbeef", s, diff --git a/evm/src/cpu/kernel/tests/mpt/insert.rs b/evm/src/cpu/kernel/tests/mpt/insert.rs index 3a52948d..35ab6e80 100644 --- a/evm/src/cpu/kernel/tests/mpt/insert.rs +++ b/evm/src/cpu/kernel/tests/mpt/insert.rs @@ -6,18 +6,20 @@ use super::nibbles; use crate::cpu::kernel::aggregator::KERNEL; use crate::cpu::kernel::constants::global_metadata::GlobalMetadata; use crate::cpu::kernel::interpreter::Interpreter; -use crate::cpu::kernel::tests::mpt::{test_account_1_rlp, test_account_2}; +use crate::cpu::kernel::tests::mpt::{ + nibbles_64, nibbles_count, test_account_1_rlp, test_account_2, +}; use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp}; use crate::generation::TrieInputs; #[test] fn mpt_insert_empty() -> Result<()> { - test_state_trie(Default::default(), nibbles(0xABC), test_account_2()) + test_state_trie(Default::default(), nibbles_64(0xABC), test_account_2()) } #[test] fn mpt_insert_leaf_identical_keys() -> Result<()> { - let key = nibbles(0xABC); + let key = nibbles_64(0xABC); let state_trie = PartialTrie::Leaf { nibbles: key, value: test_account_1_rlp(), @@ -28,37 +30,39 @@ fn mpt_insert_leaf_identical_keys() -> Result<()> { #[test] fn mpt_insert_leaf_nonoverlapping_keys() -> Result<()> { let state_trie = PartialTrie::Leaf { - nibbles: nibbles(0xABC), + nibbles: nibbles_64(0xABC), value: test_account_1_rlp(), }; - test_state_trie(state_trie, nibbles(0x123), test_account_2()) + test_state_trie(state_trie, nibbles_64(0x123), test_account_2()) } #[test] fn mpt_insert_leaf_overlapping_keys() -> Result<()> { let state_trie = PartialTrie::Leaf { - nibbles: nibbles(0xABC), + nibbles: nibbles_64(0xABC), value: test_account_1_rlp(), }; - test_state_trie(state_trie, nibbles(0xADE), test_account_2()) + test_state_trie(state_trie, nibbles_64(0xADE), test_account_2()) } #[test] +#[ignore] // TODO: Not valid for state trie, all keys have same len. fn mpt_insert_leaf_insert_key_extends_leaf_key() -> Result<()> { let state_trie = PartialTrie::Leaf { nibbles: nibbles(0xABC), value: test_account_1_rlp(), }; - test_state_trie(state_trie, nibbles(0xABCDE), test_account_2()) + test_state_trie(state_trie, nibbles_64(0xABCDE), test_account_2()) } #[test] +#[ignore] // TODO: Not valid for state trie, all keys have same len. fn mpt_insert_leaf_leaf_key_extends_insert_key() -> Result<()> { let state_trie = PartialTrie::Leaf { nibbles: nibbles(0xABCDE), value: test_account_1_rlp(), }; - test_state_trie(state_trie, nibbles(0xABC), test_account_2()) + test_state_trie(state_trie, nibbles_64(0xABC), test_account_2()) } #[test] @@ -69,7 +73,7 @@ fn mpt_insert_branch_replacing_empty_child() -> Result<()> { value: vec![], }; - test_state_trie(state_trie, nibbles(0xABC), test_account_2()) + test_state_trie(state_trie, nibbles_64(0xABC), test_account_2()) } #[test] @@ -92,7 +96,7 @@ fn mpt_insert_extension_nonoverlapping_keys() -> Result<()> { } .into(), }; - test_state_trie(state_trie, nibbles(0x12345), test_account_2()) + test_state_trie(state_trie, nibbles_64(0x12345), test_account_2()) } #[test] @@ -115,29 +119,33 @@ fn mpt_insert_extension_insert_key_extends_node_key() -> Result<()> { } .into(), }; - test_state_trie(state_trie, nibbles(0xABCDEF), test_account_2()) + test_state_trie(state_trie, nibbles_64(0xABCDEF), test_account_2()) } #[test] fn mpt_insert_branch_to_leaf_same_key() -> Result<()> { let leaf = PartialTrie::Leaf { - nibbles: nibbles(0xBCD), + nibbles: nibbles_count(0xBCD, 63), value: test_account_1_rlp(), } .into(); + let mut children = std::array::from_fn(|_| PartialTrie::Empty.into()); - children[0xA] = leaf; + children[0] = leaf; let state_trie = PartialTrie::Branch { children, value: vec![], }; - test_state_trie(state_trie, nibbles(0xABCD), test_account_2()) + test_state_trie(state_trie, nibbles_64(0xABCD), test_account_2()) } /// Note: The account's storage_root is ignored, as we can't insert a new storage_root without the /// accompanying trie data. An empty trie's storage_root is used instead. fn test_state_trie(state_trie: PartialTrie, k: Nibbles, mut account: AccountRlp) -> Result<()> { + assert_eq!(k.count, 64); + + // Ignore any storage_root; see documentation note. account.storage_root = PartialTrie::Empty.calc_hash(); let trie_inputs = TrieInputs { @@ -177,7 +185,6 @@ fn test_state_trie(state_trie: PartialTrie, k: Nibbles, mut account: AccountRlp) interpreter.push(0xDEADBEEFu32.into()); interpreter.push(value_ptr.into()); // value_ptr interpreter.push(k.packed); // key - interpreter.push(k.count.into()); // num_nibbles interpreter.run()?; assert_eq!( diff --git a/evm/src/cpu/kernel/tests/mpt/mod.rs b/evm/src/cpu/kernel/tests/mpt/mod.rs index 2c7999df..4ac6396e 100644 --- a/evm/src/cpu/kernel/tests/mpt/mod.rs +++ b/evm/src/cpu/kernel/tests/mpt/mod.rs @@ -13,13 +13,22 @@ mod read; /// Note that this preserves all nibbles (eg. `0x123` is not interpreted as `0x0123`). pub(crate) fn nibbles>(v: T) -> Nibbles { let packed = v.into(); - Nibbles { count: Nibbles::get_num_nibbles_in_key(&packed), packed, } } +pub(crate) fn nibbles_64>(v: T) -> Nibbles { + let packed = v.into(); + Nibbles { count: 64, packed } +} + +pub(crate) fn nibbles_count>(v: T, count: usize) -> Nibbles { + let packed = v.into(); + Nibbles { count, packed } +} + pub(crate) fn test_account_1() -> AccountRlp { AccountRlp { nonce: U256::from(1111), diff --git a/evm/src/generation/mpt.rs b/evm/src/generation/mpt.rs index 8ceb195a..4107b978 100644 --- a/evm/src/generation/mpt.rs +++ b/evm/src/generation/mpt.rs @@ -68,11 +68,10 @@ pub(crate) fn mpt_prover_inputs( PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())), PartialTrie::Branch { children, value } => { if value.is_empty() { - // There's no value, so value_len = 0. - prover_inputs.push(U256::zero()); + prover_inputs.push(U256::zero()); // value_present = 0 } else { let parsed_value = parse_value(value); - prover_inputs.push(parsed_value.len().into()); + prover_inputs.push(U256::one()); // value_present = 1 prover_inputs.extend(parsed_value); } for child in children { @@ -107,8 +106,7 @@ pub(crate) fn mpt_prover_inputs_state_trie( PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())), PartialTrie::Branch { children, value } => { assert!(value.is_empty(), "State trie should not have branch values"); - // There's no value, so value_len = 0. - prover_inputs.push(U256::zero()); + prover_inputs.push(U256::zero()); // value_present = 0 for (i, child) in children.iter().enumerate() { let extended_key = key.merge(&Nibbles { diff --git a/evm/src/recursive_verifier.rs b/evm/src/recursive_verifier.rs index bc64bb57..445497f8 100644 --- a/evm/src/recursive_verifier.rs +++ b/evm/src/recursive_verifier.rs @@ -235,7 +235,7 @@ impl, C: GenericConfig, const D: usize> .zip(verifier_data_target) .enumerate() { - builder.verify_proof( + builder.verify_proof::( recursive_proof, &verifier_data_target, &verifier_data[i].common, @@ -579,7 +579,7 @@ where { let recursive_proofs = std::array::from_fn(|i| { let verifier_data = &verifier_data[i]; - builder.add_virtual_proof_with_pis(&verifier_data.common) + builder.add_virtual_proof_with_pis::(&verifier_data.common) }); let verifier_data = std::array::from_fn(|i| { let verifier_data = &verifier_data[i]; diff --git a/field/src/extension/algebra.rs b/field/src/extension/algebra.rs index 54bea694..5840ae81 100644 --- a/field/src/extension/algebra.rs +++ b/field/src/extension/algebra.rs @@ -45,7 +45,7 @@ impl, const D: usize> Display for ExtensionAlgebra { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "({})", self.0[0])?; for i in 1..D { - write!(f, " + ({})*b^{}", self.0[i], i)?; + write!(f, " + ({})*b^{i}", self.0[i])?; } Ok(()) } diff --git a/insertion/src/insertion_gate.rs b/insertion/src/insertion_gate.rs index ea8f4194..2757dd23 100644 --- a/insertion/src/insertion_gate.rs +++ b/insertion/src/insertion_gate.rs @@ -73,7 +73,7 @@ impl, const D: usize> InsertionGate { impl, const D: usize> Gate for InsertionGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/benches/hashing.rs b/plonky2/benches/hashing.rs index 4632c4c4..673e0572 100644 --- a/plonky2/benches/hashing.rs +++ b/plonky2/benches/hashing.rs @@ -24,7 +24,7 @@ pub(crate) fn bench_keccak(c: &mut Criterion) { pub(crate) fn bench_poseidon(c: &mut Criterion) { c.bench_function( - &format!("poseidon<{}, {}>", type_name::(), SPONGE_WIDTH), + &format!("poseidon<{}, {SPONGE_WIDTH}>", type_name::()), |b| { b.iter_batched( || F::rand_arr::(), diff --git a/plonky2/examples/bench_recursion.rs b/plonky2/examples/bench_recursion.rs index f4379e7a..c9d24be7 100644 --- a/plonky2/examples/bench_recursion.rs +++ b/plonky2/examples/bench_recursion.rs @@ -32,7 +32,7 @@ use structopt::StructOpt; type ProofTuple = ( ProofWithPublicInputs, VerifierOnlyCircuitData, - CommonCircuitData, + CommonCircuitData, ); #[derive(Clone, StructOpt, Debug)] @@ -112,7 +112,7 @@ where let (inner_proof, inner_vd, inner_cd) = inner; let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); - let pt = builder.add_virtual_proof_with_pis(inner_cd); + let pt = builder.add_virtual_proof_with_pis::(inner_cd); pw.set_proof_with_pis_target(&pt, inner_proof); let inner_data = VerifierCircuitTarget { @@ -121,7 +121,7 @@ where }; pw.set_verifier_data_target(&inner_data, inner_vd); - builder.verify_proof(pt, &inner_data, inner_cd); + builder.verify_proof::(pt, &inner_data, inner_cd); builder.print_gate_counts(0); if let Some(min_degree_bits) = min_degree_bits { @@ -150,7 +150,7 @@ where fn test_serialization, C: GenericConfig, const D: usize>( proof: &ProofWithPublicInputs, vd: &VerifierOnlyCircuitData, - cd: &CommonCircuitData, + cd: &CommonCircuitData, ) -> Result<()> where [(); C::Hasher::HASH_SIZE]:, diff --git a/plonky2/examples/square_root.rs b/plonky2/examples/square_root.rs index 0bc89f47..7d4d2fee 100644 --- a/plonky2/examples/square_root.rs +++ b/plonky2/examples/square_root.rs @@ -31,7 +31,7 @@ impl, const D: usize> SimpleGenerator let x_squared = witness.get_target(self.x_squared); let x = x_squared.sqrt().unwrap(); - println!("Square root: {}", x); + println!("Square root: {x}"); out_buffer.set_target(self.x, x); } @@ -75,7 +75,7 @@ fn main() -> Result<()> { let proof = data.prove(pw.clone())?; let x_squared_actual = proof.public_inputs[0]; - println!("Field element (square): {}", x_squared_actual); + println!("Field element (square): {x_squared_actual}"); data.verify(proof) } diff --git a/plonky2/src/bin/generate_constants.rs b/plonky2/src/bin/generate_constants.rs index 258c6b78..c5f8fae2 100644 --- a/plonky2/src/bin/generate_constants.rs +++ b/plonky2/src/bin/generate_constants.rs @@ -21,7 +21,7 @@ pub(crate) fn main() { // Print the constants in the format we prefer in our code. for chunk in constants.chunks(4) { for (i, c) in chunk.iter().enumerate() { - print!("{:#018x},", c); + print!("{c:#018x},"); if i != chunk.len() - 1 { print!(" "); } diff --git a/plonky2/src/fri/mod.rs b/plonky2/src/fri/mod.rs index 9c44b53b..90f1c940 100644 --- a/plonky2/src/fri/mod.rs +++ b/plonky2/src/fri/mod.rs @@ -54,7 +54,7 @@ impl FriConfig { /// FRI parameters, including generated parameters which are specific to an instance size, in /// contrast to `FriConfig` which is user-specified and independent of instance size. -#[derive(Debug, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct FriParams { /// User-specified FRI configuration. pub config: FriConfig, diff --git a/plonky2/src/fri/recursive_verifier.rs b/plonky2/src/fri/recursive_verifier.rs index ac7e3a87..d14420c1 100644 --- a/plonky2/src/fri/recursive_verifier.rs +++ b/plonky2/src/fri/recursive_verifier.rs @@ -176,7 +176,7 @@ impl, const D: usize> CircuitBuilder { with_context!( self, level, - &format!("verify one (of {}) query rounds", num_queries), + &format!("verify one (of {num_queries}) query rounds"), self.fri_verifier_query_round::( instance, challenges, @@ -207,7 +207,7 @@ impl, const D: usize> CircuitBuilder { { with_context!( self, - &format!("verify {}'th initial Merkle proof", i), + &format!("verify {i}'th initial Merkle proof"), self.verify_merkle_proof_to_cap_with_cap_index::( evals.clone(), x_index_bits, diff --git a/plonky2/src/gadgets/arithmetic.rs b/plonky2/src/gadgets/arithmetic.rs index f4722df4..33facd74 100644 --- a/plonky2/src/gadgets/arithmetic.rs +++ b/plonky2/src/gadgets/arithmetic.rs @@ -345,7 +345,7 @@ impl, const D: usize> CircuitBuilder { pub fn is_equal(&mut self, x: Target, y: Target) -> BoolTarget { let zero = self.zero(); - let equal = self.add_virtual_bool_target(); + let equal = self.add_virtual_bool_target_unsafe(); let not_equal = self.not(equal); let inv = self.add_virtual_target(); self.add_simple_generator(EqualityGenerator { x, y, equal, inv }); diff --git a/plonky2/src/gates/arithmetic_base.rs b/plonky2/src/gates/arithmetic_base.rs index 207af2d0..03560faf 100644 --- a/plonky2/src/gates/arithmetic_base.rs +++ b/plonky2/src/gates/arithmetic_base.rs @@ -53,7 +53,7 @@ impl ArithmeticGate { impl, const D: usize> Gate for ArithmeticGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/arithmetic_extension.rs b/plonky2/src/gates/arithmetic_extension.rs index e4e07f83..26b7074b 100644 --- a/plonky2/src/gates/arithmetic_extension.rs +++ b/plonky2/src/gates/arithmetic_extension.rs @@ -51,7 +51,7 @@ impl ArithmeticExtensionGate { impl, const D: usize> Gate for ArithmeticExtensionGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/base_sum.rs b/plonky2/src/gates/base_sum.rs index 5be54eeb..1252c8e3 100644 --- a/plonky2/src/gates/base_sum.rs +++ b/plonky2/src/gates/base_sum.rs @@ -49,7 +49,7 @@ impl BaseSumGate { impl, const D: usize, const B: usize> Gate for BaseSumGate { fn id(&self) -> String { - format!("{:?} + Base: {}", self, B) + format!("{self:?} + Base: {B}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/constant.rs b/plonky2/src/gates/constant.rs index 7d68b088..513caad9 100644 --- a/plonky2/src/gates/constant.rs +++ b/plonky2/src/gates/constant.rs @@ -33,7 +33,7 @@ impl ConstantGate { impl, const D: usize> Gate for ConstantGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/exponentiation.rs b/plonky2/src/gates/exponentiation.rs index aa977308..ca1ba395 100644 --- a/plonky2/src/gates/exponentiation.rs +++ b/plonky2/src/gates/exponentiation.rs @@ -70,7 +70,7 @@ impl, const D: usize> ExponentiationGate { impl, const D: usize> Gate for ExponentiationGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/high_degree_interpolation.rs b/plonky2/src/gates/high_degree_interpolation.rs index bcdf2276..0bc4ab65 100644 --- a/plonky2/src/gates/high_degree_interpolation.rs +++ b/plonky2/src/gates/high_degree_interpolation.rs @@ -87,7 +87,7 @@ impl, const D: usize> Gate for HighDegreeInterpolationGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/low_degree_interpolation.rs b/plonky2/src/gates/low_degree_interpolation.rs index 3edc4175..8fd2ed47 100644 --- a/plonky2/src/gates/low_degree_interpolation.rs +++ b/plonky2/src/gates/low_degree_interpolation.rs @@ -80,7 +80,7 @@ impl, const D: usize> LowDegreeInterpolationGate, const D: usize> Gate for LowDegreeInterpolationGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/multiplication_extension.rs b/plonky2/src/gates/multiplication_extension.rs index ab29bba7..8e6b44d7 100644 --- a/plonky2/src/gates/multiplication_extension.rs +++ b/plonky2/src/gates/multiplication_extension.rs @@ -48,7 +48,7 @@ impl MulExtensionGate { impl, const D: usize> Gate for MulExtensionGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/poseidon.rs b/plonky2/src/gates/poseidon.rs index 3ae8db83..26ec2594 100644 --- a/plonky2/src/gates/poseidon.rs +++ b/plonky2/src/gates/poseidon.rs @@ -98,7 +98,7 @@ impl, const D: usize> PoseidonGate { impl, const D: usize> Gate for PoseidonGate { fn id(&self) -> String { - format!("{:?}", self, SPONGE_WIDTH) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/poseidon_mds.rs b/plonky2/src/gates/poseidon_mds.rs index 2ccfa640..289246e1 100644 --- a/plonky2/src/gates/poseidon_mds.rs +++ b/plonky2/src/gates/poseidon_mds.rs @@ -117,7 +117,7 @@ impl + Poseidon, const D: usize> PoseidonMdsGate + Poseidon, const D: usize> Gate for PoseidonMdsGate { fn id(&self) -> String { - format!("{:?}", self, SPONGE_WIDTH) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/random_access.rs b/plonky2/src/gates/random_access.rs index fa365f16..3ea2f55e 100644 --- a/plonky2/src/gates/random_access.rs +++ b/plonky2/src/gates/random_access.rs @@ -115,7 +115,7 @@ impl, const D: usize> RandomAccessGate { impl, const D: usize> Gate for RandomAccessGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/reducing.rs b/plonky2/src/gates/reducing.rs index 31960b64..02f8ac2d 100644 --- a/plonky2/src/gates/reducing.rs +++ b/plonky2/src/gates/reducing.rs @@ -55,7 +55,7 @@ impl ReducingGate { impl, const D: usize> Gate for ReducingGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/gates/reducing_extension.rs b/plonky2/src/gates/reducing_extension.rs index 2ae25059..8b04ec99 100644 --- a/plonky2/src/gates/reducing_extension.rs +++ b/plonky2/src/gates/reducing_extension.rs @@ -58,7 +58,7 @@ impl ReducingExtensionGate { impl, const D: usize> Gate for ReducingExtensionGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/plonky2/src/iop/generator.rs b/plonky2/src/iop/generator.rs index 3614b2e4..9948198e 100644 --- a/plonky2/src/iop/generator.rs +++ b/plonky2/src/iop/generator.rs @@ -22,7 +22,7 @@ pub(crate) fn generate_partial_witness< >( inputs: PartialWitness, prover_data: &'a ProverOnlyCircuitData, - common_data: &'a CommonCircuitData, + common_data: &'a CommonCircuitData, ) -> PartitionWitness<'a, F> { let config = &common_data.config; let generators = &prover_data.generators; diff --git a/plonky2/src/lib.rs b/plonky2/src/lib.rs index 64acfe12..8a517a11 100644 --- a/plonky2/src/lib.rs +++ b/plonky2/src/lib.rs @@ -18,4 +18,5 @@ pub mod gates; pub mod hash; pub mod iop; pub mod plonk; +pub mod recursion; pub mod util; diff --git a/plonky2/src/plonk/circuit_builder.rs b/plonky2/src/plonk/circuit_builder.rs index 83587f2e..dfd23426 100644 --- a/plonky2/src/plonk/circuit_builder.rs +++ b/plonky2/src/plonk/circuit_builder.rs @@ -34,7 +34,7 @@ use crate::iop::target::{BoolTarget, Target}; use crate::iop::wire::Wire; use crate::plonk::circuit_data::{ CircuitConfig, CircuitData, CommonCircuitData, ProverCircuitData, ProverOnlyCircuitData, - VerifierCircuitData, VerifierOnlyCircuitData, + VerifierCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData, }; use crate::plonk::config::{GenericConfig, Hasher}; use crate::plonk::copy_constraint::CopyConstraint; @@ -83,6 +83,15 @@ pub struct CircuitBuilder, const D: usize> { /// List of constant generators used to fill the constant wires. constant_generators: Vec>, + + /// Optional common data. When it is `Some(goal_data)`, the `build` function panics if the resulting + /// common data doesn't equal `goal_data`. + /// This is used in cyclic recursion. + pub(crate) goal_common_data: Option>, + + /// Optional verifier data that is registered as public inputs. + /// This is used in cyclic recursion to hold the circuit's own verifier key. + pub(crate) verifier_data_public_input: Option, } impl, const D: usize> CircuitBuilder { @@ -102,6 +111,8 @@ impl, const D: usize> CircuitBuilder { arithmetic_results: HashMap::new(), current_slots: HashMap::new(), constant_generators: Vec::new(), + goal_common_data: None, + verifier_data_public_input: None, }; builder.check_config(); builder @@ -144,6 +155,10 @@ impl, const D: usize> CircuitBuilder { targets.iter().for_each(|&t| self.register_public_input(t)); } + pub fn num_public_inputs(&self) -> usize { + self.public_inputs.len() + } + /// Adds a new "virtual" target. This is not an actual wire in the witness, but just a target /// that help facilitate witness generation. In particular, a generator can assign a values to a /// virtual target, which can then be copied to other (virtual or concrete) targets. When we @@ -198,8 +213,7 @@ impl, const D: usize> CircuitBuilder { PolynomialCoeffsExtTarget(coeffs) } - // TODO: Unsafe - pub fn add_virtual_bool_target(&mut self) -> BoolTarget { + pub fn add_virtual_bool_target_unsafe(&mut self) -> BoolTarget { BoolTarget::new_unsafe(self.add_virtual_target()) } @@ -215,6 +229,21 @@ impl, const D: usize> CircuitBuilder { self.register_public_input(t); t } + /// Add a virtual verifier data, register it as a public input and set it to `self.verifier_data_public_input`. + /// WARNING: Do not register any public input after calling this! TODO: relax this + pub(crate) fn add_verifier_data_public_input(&mut self) { + let verifier_data = VerifierCircuitTarget { + constants_sigmas_cap: self.add_virtual_cap(self.config.fri_config.cap_height), + circuit_digest: self.add_virtual_hash(), + }; + // The verifier data are public inputs. + self.register_public_inputs(&verifier_data.circuit_digest.elements); + for i in 0..self.config.fri_config.num_cap_elements() { + self.register_public_inputs(&verifier_data.constants_sigmas_cap.0[i].elements); + } + + self.verifier_data_public_input = Some(verifier_data); + } /// Adds a gate to the circuit, and returns its index. pub fn add_gate>(&mut self, gate_type: G, mut constants: Vec) -> usize { @@ -827,6 +856,9 @@ impl, const D: usize> CircuitBuilder { k_is, num_partial_products, }; + if let Some(goal_data) = self.goal_common_data { + assert_eq!(goal_data, common); + } let prover_only = ProverOnlyCircuitData { generators: self.generators, diff --git a/plonky2/src/plonk/circuit_data.rs b/plonky2/src/plonk/circuit_data.rs index 5143e730..b5e411f1 100644 --- a/plonky2/src/plonk/circuit_data.rs +++ b/plonky2/src/plonk/circuit_data.rs @@ -106,7 +106,7 @@ impl CircuitConfig { pub struct CircuitData, C: GenericConfig, const D: usize> { pub prover_only: ProverOnlyCircuitData, pub verifier_only: VerifierOnlyCircuitData, - pub common: CommonCircuitData, + pub common: CommonCircuitData, } impl, C: GenericConfig, const D: usize> @@ -196,7 +196,7 @@ pub struct ProverCircuitData< const D: usize, > { pub prover_only: ProverOnlyCircuitData, - pub common: CommonCircuitData, + pub common: CommonCircuitData, } impl, C: GenericConfig, const D: usize> @@ -223,7 +223,7 @@ pub struct VerifierCircuitData< const D: usize, > { pub verifier_only: VerifierOnlyCircuitData, - pub common: CommonCircuitData, + pub common: CommonCircuitData, } impl, C: GenericConfig, const D: usize> @@ -276,7 +276,7 @@ pub struct ProverOnlyCircuitData< } /// Circuit data required by the verifier, but not the prover. -#[derive(Debug)] +#[derive(Debug, Eq, PartialEq)] pub struct VerifierOnlyCircuitData, const D: usize> { /// A commitment to each constant polynomial and each permutation polynomial. pub constants_sigmas_cap: MerkleCap, @@ -286,18 +286,14 @@ pub struct VerifierOnlyCircuitData, const D: usize> { } /// Circuit data required by both the prover and the verifier. -#[derive(Debug, Eq, PartialEq)] -pub struct CommonCircuitData< - F: RichField + Extendable, - C: GenericConfig, - const D: usize, -> { +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct CommonCircuitData, const D: usize> { pub config: CircuitConfig, pub(crate) fri_params: FriParams, /// The types of gates used in this circuit, along with their prefixes. - pub(crate) gates: Vec>, + pub(crate) gates: Vec>, /// Information on the circuit's selector polynomials. pub(crate) selectors_info: SelectorsInfo, @@ -314,15 +310,13 @@ pub struct CommonCircuitData< pub(crate) num_public_inputs: usize, /// The `{k_i}` valued used in `S_ID_i` in Plonk's permutation argument. - pub(crate) k_is: Vec, + pub(crate) k_is: Vec, /// The number of partial products needed to compute the `Z` polynomials. pub(crate) num_partial_products: usize, } -impl, C: GenericConfig, const D: usize> - CommonCircuitData -{ +impl, const D: usize> CommonCircuitData { pub const fn degree_bits(&self) -> usize { self.fri_params.degree_bits } @@ -494,6 +488,7 @@ impl, C: GenericConfig, const D: usize> /// is intentionally missing certain fields, such as `CircuitConfig`, because we support only a /// limited form of dynamic inner circuits. We can't practically make things like the wire count /// dynamic, at least not without setting a maximum wire count and paying for the worst case. +#[derive(Clone)] pub struct VerifierCircuitTarget { /// A commitment to each constant polynomial and each permutation polynomial. pub constants_sigmas_cap: MerkleCapTarget, diff --git a/plonky2/src/plonk/get_challenges.rs b/plonky2/src/plonk/get_challenges.rs index f497380f..116529e7 100644 --- a/plonky2/src/plonk/get_challenges.rs +++ b/plonky2/src/plonk/get_challenges.rs @@ -30,7 +30,7 @@ fn get_challenges, C: GenericConfig, cons final_poly: &PolynomialCoeffs, pow_witness: F, circuit_digest: &<>::Hasher as Hasher>::Hash, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result> { let config = &common_data.config; let num_challenges = config.num_challenges; @@ -74,7 +74,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn fri_query_indices( &self, circuit_digest: &<>::Hasher as Hasher>::Hash, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result> { Ok(self .get_challenges(self.get_public_inputs_hash(), circuit_digest, common_data)? @@ -87,7 +87,7 @@ impl, C: GenericConfig, const D: usize> &self, public_inputs_hash: <>::InnerHasher as Hasher>::Hash, circuit_digest: &<>::Hasher as Hasher>::Hash, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result> { let Proof { wires_cap, @@ -103,7 +103,7 @@ impl, C: GenericConfig, const D: usize> }, } = &self.proof; - get_challenges( + get_challenges::( public_inputs_hash, wires_cap, plonk_zs_partial_products_cap, @@ -126,7 +126,7 @@ impl, C: GenericConfig, const D: usize> &self, public_inputs_hash: <>::InnerHasher as Hasher>::Hash, circuit_digest: &<>::Hasher as Hasher>::Hash, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result> { let CompressedProof { wires_cap, @@ -142,7 +142,7 @@ impl, C: GenericConfig, const D: usize> }, } = &self.proof; - get_challenges( + get_challenges::( public_inputs_hash, wires_cap, plonk_zs_partial_products_cap, @@ -160,7 +160,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn get_inferred_elements( &self, challenges: &ProofChallenges, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> FriInferredElements { let ProofChallenges { plonk_zeta, @@ -244,7 +244,7 @@ impl, const D: usize> CircuitBuilder { final_poly: &PolynomialCoeffsExtTarget, pow_witness: Target, inner_circuit_digest: HashOutTarget, - inner_common_data: &CommonCircuitData, + inner_common_data: &CommonCircuitData, ) -> ProofChallengesTarget where C::Hasher: AlgebraicHasher, @@ -292,7 +292,7 @@ impl ProofWithPublicInputsTarget { builder: &mut CircuitBuilder, public_inputs_hash: HashOutTarget, inner_circuit_digest: HashOutTarget, - inner_common_data: &CommonCircuitData, + inner_common_data: &CommonCircuitData, ) -> ProofChallengesTarget where C::Hasher: AlgebraicHasher, @@ -311,7 +311,7 @@ impl ProofWithPublicInputsTarget { }, } = &self.proof; - builder.get_challenges( + builder.get_challenges::( public_inputs_hash, wires_cap, plonk_zs_partial_products_cap, diff --git a/plonky2/src/plonk/mod.rs b/plonky2/src/plonk/mod.rs index 8cd7443f..604c1f79 100644 --- a/plonky2/src/plonk/mod.rs +++ b/plonky2/src/plonk/mod.rs @@ -1,6 +1,5 @@ pub mod circuit_builder; pub mod circuit_data; -pub mod conditional_recursive_verifier; pub mod config; pub(crate) mod copy_constraint; mod get_challenges; @@ -8,7 +7,6 @@ pub(crate) mod permutation_argument; pub mod plonk_common; pub mod proof; pub mod prover; -pub mod recursive_verifier; mod validate_shape; pub(crate) mod vanishing_poly; pub mod vars; diff --git a/plonky2/src/plonk/proof.rs b/plonky2/src/plonk/proof.rs index 2ec26c75..1a7a26db 100644 --- a/plonky2/src/plonk/proof.rs +++ b/plonky2/src/plonk/proof.rs @@ -82,7 +82,7 @@ impl, C: GenericConfig, const D: usize> pub fn compress( self, circuit_digest: &<>::Hasher as Hasher>::Hash, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result> { let indices = self.fri_query_indices(circuit_digest, common_data)?; let compressed_proof = self.proof.compress(&indices, &common_data.fri_params); @@ -106,7 +106,7 @@ impl, C: GenericConfig, const D: usize> pub fn from_bytes( bytes: Vec, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result { let mut buffer = Buffer::new(bytes); let proof = buffer.read_proof_with_public_inputs(common_data)?; @@ -178,7 +178,7 @@ impl, C: GenericConfig, const D: usize> pub fn decompress( self, circuit_digest: &<>::Hasher as Hasher>::Hash, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result> where [(); C::Hasher::HASH_SIZE]:, @@ -198,7 +198,7 @@ impl, C: GenericConfig, const D: usize> pub(crate) fn verify( self, verifier_data: &VerifierOnlyCircuitData, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result<()> where [(); C::Hasher::HASH_SIZE]:, @@ -240,7 +240,7 @@ impl, C: GenericConfig, const D: usize> pub fn from_bytes( bytes: Vec, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result { let mut buffer = Buffer::new(bytes); let proof = buffer.read_compressed_proof_with_public_inputs(common_data)?; @@ -303,7 +303,7 @@ impl, const D: usize> OpeningSet { wires_commitment: &PolynomialBatch, zs_partial_products_commitment: &PolynomialBatch, quotient_polys_commitment: &PolynomialBatch, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Self { let eval_commitment = |z: F::Extension, c: &PolynomialBatch| { c.polynomials diff --git a/plonky2/src/plonk/prover.rs b/plonky2/src/plonk/prover.rs index 8476a2d9..621f20ef 100644 --- a/plonky2/src/plonk/prover.rs +++ b/plonky2/src/plonk/prover.rs @@ -28,7 +28,7 @@ use crate::util::transpose; pub fn prove, C: GenericConfig, const D: usize>( prover_data: &ProverOnlyCircuitData, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, inputs: PartialWitness, timing: &mut TimingTree, ) -> Result> @@ -233,7 +233,7 @@ fn all_wires_permutation_partial_products< betas: &[F], gammas: &[F], prover_data: &ProverOnlyCircuitData, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Vec>> { (0..common_data.config.num_challenges) .map(|i| { @@ -260,7 +260,7 @@ fn wires_permutation_partial_products_and_zs< beta: F, gamma: F, prover_data: &ProverOnlyCircuitData, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Vec> { let degree = common_data.quotient_degree_factor; let subgroup = &prover_data.subgroup; @@ -318,7 +318,7 @@ fn compute_quotient_polys< C: GenericConfig, const D: usize, >( - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, prover_data: &'a ProverOnlyCircuitData, public_inputs_hash: &<>::InnerHasher as Hasher>::Hash, wires_commitment: &'a PolynomialBatch, @@ -424,7 +424,7 @@ fn compute_quotient_polys< public_inputs_hash, ); - let mut quotient_values_batch = eval_vanishing_poly_base_batch( + let mut quotient_values_batch = eval_vanishing_poly_base_batch::( common_data, &indices_batch, &shifted_xs_batch, diff --git a/plonky2/src/plonk/validate_shape.rs b/plonky2/src/plonk/validate_shape.rs index f7ec1b6e..1e6708cc 100644 --- a/plonky2/src/plonk/validate_shape.rs +++ b/plonky2/src/plonk/validate_shape.rs @@ -8,7 +8,7 @@ use crate::plonk::proof::{OpeningSet, Proof, ProofWithPublicInputs}; pub(crate) fn validate_proof_with_pis_shape( proof_with_pis: &ProofWithPublicInputs, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result<()> where F: RichField + Extendable, @@ -32,7 +32,7 @@ where fn validate_proof_shape( proof: &Proof, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> anyhow::Result<()> where F: RichField + Extendable, diff --git a/plonky2/src/plonk/vanishing_poly.rs b/plonky2/src/plonk/vanishing_poly.rs index 303f698b..28d43f4f 100644 --- a/plonky2/src/plonk/vanishing_poly.rs +++ b/plonky2/src/plonk/vanishing_poly.rs @@ -25,7 +25,7 @@ pub(crate) fn eval_vanishing_poly< C: GenericConfig, const D: usize, >( - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, x: F::Extension, vars: EvaluationVars, local_zs: &[F::Extension], @@ -39,7 +39,7 @@ pub(crate) fn eval_vanishing_poly< let max_degree = common_data.quotient_degree_factor; let num_prods = common_data.num_partial_products; - let constraint_terms = evaluate_gate_constraints(common_data, vars); + let constraint_terms = evaluate_gate_constraints::(common_data, vars); // The L_0(x) (Z(x) - 1) vanishing terms. let mut vanishing_z_1_terms = Vec::new(); @@ -100,7 +100,7 @@ pub(crate) fn eval_vanishing_poly_base_batch< C: GenericConfig, const D: usize, >( - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, indices_batch: &[usize], xs_batch: &[F], vars_batch: EvaluationVarsBaseBatch, @@ -126,7 +126,8 @@ pub(crate) fn eval_vanishing_poly_base_batch< let num_gate_constraints = common_data.num_gate_constraints; - let constraint_terms_batch = evaluate_gate_constraints_base_batch(common_data, vars_batch); + let constraint_terms_batch = + evaluate_gate_constraints_base_batch::(common_data, vars_batch); debug_assert!(constraint_terms_batch.len() == n * num_gate_constraints); let num_challenges = common_data.config.num_challenges; @@ -210,7 +211,7 @@ pub fn evaluate_gate_constraints< C: GenericConfig, const D: usize, >( - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, vars: EvaluationVars, ) -> Vec { let mut constraints = vec![F::Extension::ZERO; common_data.num_gate_constraints]; @@ -244,7 +245,7 @@ pub fn evaluate_gate_constraints_base_batch< C: GenericConfig, const D: usize, >( - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, vars_batch: EvaluationVarsBaseBatch, ) -> Vec { let mut constraints_batch = vec![F::ZERO; common_data.num_gate_constraints * vars_batch.len()]; @@ -276,7 +277,7 @@ pub fn evaluate_gate_constraints_circuit< const D: usize, >( builder: &mut CircuitBuilder, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, vars: EvaluationTargets, ) -> Vec> { let mut all_gate_constraints = vec![builder.zero_extension(); common_data.num_gate_constraints]; @@ -311,7 +312,7 @@ pub(crate) fn eval_vanishing_poly_circuit< const D: usize, >( builder: &mut CircuitBuilder, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, x: ExtensionTarget, x_pow_deg: ExtensionTarget, vars: EvaluationTargets, @@ -329,7 +330,7 @@ pub(crate) fn eval_vanishing_poly_circuit< let constraint_terms = with_context!( builder, "evaluate gate constraints", - evaluate_gate_constraints_circuit(builder, common_data, vars,) + evaluate_gate_constraints_circuit::(builder, common_data, vars,) ); // The L_0(x) (Z(x) - 1) vanishing terms. diff --git a/plonky2/src/plonk/verifier.rs b/plonky2/src/plonk/verifier.rs index 37ddfffa..52681558 100644 --- a/plonky2/src/plonk/verifier.rs +++ b/plonky2/src/plonk/verifier.rs @@ -15,7 +15,7 @@ use crate::plonk::vars::EvaluationVars; pub(crate) fn verify, C: GenericConfig, const D: usize>( proof_with_pis: ProofWithPublicInputs, verifier_data: &VerifierOnlyCircuitData, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result<()> where [(); C::Hasher::HASH_SIZE]:, @@ -47,7 +47,7 @@ pub(crate) fn verify_with_challenges< public_inputs_hash: <>::InnerHasher as Hasher>::Hash, challenges: ProofChallenges, verifier_data: &VerifierOnlyCircuitData, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result<()> where [(); C::Hasher::HASH_SIZE]:, @@ -65,7 +65,7 @@ where let partial_products = &proof.openings.partial_products; // Evaluate the vanishing polynomial at our challenge point, zeta. - let vanishing_polys_zeta = eval_vanishing_poly( + let vanishing_polys_zeta = eval_vanishing_poly::( common_data, challenges.plonk_zeta, vars, diff --git a/plonky2/src/plonk/conditional_recursive_verifier.rs b/plonky2/src/recursion/conditional_recursive_verifier.rs similarity index 95% rename from plonky2/src/plonk/conditional_recursive_verifier.rs rename to plonky2/src/recursion/conditional_recursive_verifier.rs index 2c406904..6bafc623 100644 --- a/plonky2/src/plonk/conditional_recursive_verifier.rs +++ b/plonky2/src/recursion/conditional_recursive_verifier.rs @@ -24,13 +24,12 @@ use crate::plonk::proof::{ use crate::with_context; /// Generate a proof having a given `CommonCircuitData`. -#[allow(unused)] // TODO: should be used soon. pub(crate) fn dummy_proof< F: RichField + Extendable, C: GenericConfig, const D: usize, >( - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result<( ProofWithPublicInputs, VerifierOnlyCircuitData, @@ -80,7 +79,7 @@ impl, const D: usize> CircuitBuilder { inner_verifier_data0: &VerifierCircuitTarget, proof_with_pis1: &ProofWithPublicInputsTarget, inner_verifier_data1: &VerifierCircuitTarget, - inner_common_data: &CommonCircuitData, + inner_common_data: &CommonCircuitData, ) where C::Hasher: AlgebraicHasher, { @@ -144,7 +143,7 @@ impl, const D: usize> CircuitBuilder { ), }; - self.verify_proof(selected_proof, &selected_verifier_data, inner_common_data); + self.verify_proof::(selected_proof, &selected_verifier_data, inner_common_data); } /// Conditionally verify a proof with a new generated dummy proof. @@ -153,18 +152,18 @@ impl, const D: usize> CircuitBuilder { condition: BoolTarget, proof_with_pis: &ProofWithPublicInputsTarget, inner_verifier_data: &VerifierCircuitTarget, - inner_common_data: &CommonCircuitData, + inner_common_data: &CommonCircuitData, ) -> (ProofWithPublicInputsTarget, VerifierCircuitTarget) where C::Hasher: AlgebraicHasher, { - let dummy_proof = self.add_virtual_proof_with_pis(inner_common_data); + let dummy_proof = self.add_virtual_proof_with_pis::(inner_common_data); let dummy_verifier_data = VerifierCircuitTarget { constants_sigmas_cap: self .add_virtual_cap(inner_common_data.config.fri_config.cap_height), circuit_digest: self.add_virtual_hash(), }; - self.conditionally_verify_proof( + self.conditionally_verify_proof::( condition, proof_with_pis, inner_verifier_data, @@ -183,7 +182,7 @@ impl, const D: usize> CircuitBuilder { .collect() } - fn select_hash( + pub(crate) fn select_hash( &mut self, b: BoolTarget, h0: HashOutTarget, @@ -406,10 +405,10 @@ mod tests { // Conditionally verify the two proofs. let mut builder = CircuitBuilder::::new(config); let mut pw = PartialWitness::new(); - let pt = builder.add_virtual_proof_with_pis(&data.common); + let pt = builder.add_virtual_proof_with_pis::(&data.common); pw.set_proof_with_pis_target(&pt, &proof); - let dummy_pt = builder.add_virtual_proof_with_pis(&data.common); - pw.set_proof_with_pis_target(&dummy_pt, &dummy_proof); + let dummy_pt = builder.add_virtual_proof_with_pis::(&data.common); + pw.set_proof_with_pis_target::(&dummy_pt, &dummy_proof); let inner_data = VerifierCircuitTarget { constants_sigmas_cap: builder.add_virtual_cap(data.common.config.fri_config.cap_height), circuit_digest: builder.add_virtual_hash(), @@ -421,7 +420,7 @@ mod tests { }; pw.set_verifier_data_target(&dummy_inner_data, &dummy_data); let b = builder.constant_bool(F::rand().0 % 2 == 0); - builder.conditionally_verify_proof( + builder.conditionally_verify_proof::( b, &pt, &inner_data, diff --git a/plonky2/src/recursion/cyclic_recursion.rs b/plonky2/src/recursion/cyclic_recursion.rs new file mode 100644 index 00000000..f2ad7eb9 --- /dev/null +++ b/plonky2/src/recursion/cyclic_recursion.rs @@ -0,0 +1,437 @@ +#![allow(clippy::int_plus_one)] // Makes more sense for some inequalities below. +use anyhow::{ensure, Result}; +use itertools::Itertools; +use plonky2_field::extension::Extendable; + +use crate::gates::noop::NoopGate; +use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField}; +use crate::hash::merkle_tree::MerkleCap; +use crate::iop::target::{BoolTarget, Target}; +use crate::iop::witness::{PartialWitness, Witness}; +use crate::plonk::circuit_builder::CircuitBuilder; +use crate::plonk::circuit_data::{ + CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData, +}; +use crate::plonk::config::Hasher; +use crate::plonk::config::{AlgebraicHasher, GenericConfig}; +use crate::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget}; +use crate::recursion::conditional_recursive_verifier::dummy_proof; + +pub struct CyclicRecursionData< + 'a, + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +> { + proof: &'a Option>, + verifier_data: &'a VerifierOnlyCircuitData, + common_data: &'a CommonCircuitData, +} + +pub struct CyclicRecursionTarget { + pub proof: ProofWithPublicInputsTarget, + pub verifier_data: VerifierCircuitTarget, + pub dummy_proof: ProofWithPublicInputsTarget, + pub dummy_verifier_data: VerifierCircuitTarget, + pub base_case: BoolTarget, +} + +impl, const D: usize> VerifierOnlyCircuitData { + fn from_slice(slice: &[C::F], common_data: &CommonCircuitData) -> Result + where + C::Hasher: AlgebraicHasher, + { + // The structure of the public inputs is `[..., circuit_digest, constants_sigmas_cap]`. + let cap_len = common_data.config.fri_config.num_cap_elements(); + let len = slice.len(); + ensure!(len >= 4 + 4 * cap_len, "Not enough public inputs"); + let constants_sigmas_cap = MerkleCap( + (0..cap_len) + .map(|i| HashOut { + elements: std::array::from_fn(|j| slice[len - 4 * (cap_len - i) + j]), + }) + .collect(), + ); + let circuit_digest = + HashOut::from_partial(&slice[len - 4 - 4 * cap_len..len - 4 * cap_len]); + + Ok(Self { + circuit_digest, + constants_sigmas_cap, + }) + } +} + +impl VerifierCircuitTarget { + fn from_slice, C: GenericConfig, const D: usize>( + slice: &[Target], + common_data: &CommonCircuitData, + ) -> Result { + let cap_len = common_data.config.fri_config.num_cap_elements(); + let len = slice.len(); + ensure!(len >= 4 + 4 * cap_len, "Not enough public inputs"); + let constants_sigmas_cap = MerkleCapTarget( + (0..cap_len) + .map(|i| HashOutTarget { + elements: std::array::from_fn(|j| slice[len - 4 * (cap_len - i) + j]), + }) + .collect(), + ); + let circuit_digest = HashOutTarget { + elements: std::array::from_fn(|i| slice[len - 4 - 4 * cap_len + i]), + }; + + Ok(Self { + circuit_digest, + constants_sigmas_cap, + }) + } +} + +impl, const D: usize> CircuitBuilder { + /// Cyclic recursion gadget. + /// WARNING: Do not register any public input after calling this! TODO: relax this + pub fn cyclic_recursion>( + &mut self, + // Flag set to true for the base case of the cycle where we verify a dummy proof to bootstrap the cycle. Set to false otherwise. + base_case: BoolTarget, + previous_virtual_public_inputs: &[Target], + common_data: &mut CommonCircuitData, + ) -> Result> + where + C::Hasher: AlgebraicHasher, + [(); C::Hasher::HASH_SIZE]:, + { + if self.verifier_data_public_input.is_none() { + self.add_verifier_data_public_input(); + } + let verifier_data = self.verifier_data_public_input.clone().unwrap(); + common_data.num_public_inputs = self.num_public_inputs(); + self.goal_common_data = Some(common_data.clone()); + + let dummy_verifier_data = VerifierCircuitTarget { + constants_sigmas_cap: self.add_virtual_cap(self.config.fri_config.cap_height), + circuit_digest: self.add_virtual_hash(), + }; + + let proof = self.add_virtual_proof_with_pis::(common_data); + let dummy_proof = self.add_virtual_proof_with_pis::(common_data); + + let pis = VerifierCircuitTarget::from_slice::(&proof.public_inputs, common_data)?; + // Connect previous verifier data to current one. This guarantees that every proof in the cycle uses the same verifier data. + self.connect_hashes(pis.circuit_digest, verifier_data.circuit_digest); + for (h0, h1) in pis + .constants_sigmas_cap + .0 + .iter() + .zip_eq(&verifier_data.constants_sigmas_cap.0) + { + self.connect_hashes(*h0, *h1); + } + + for (x, y) in previous_virtual_public_inputs + .iter() + .zip(&proof.public_inputs) + { + self.connect(*x, *y); + } + + // Verify the dummy proof if `base_case` is set to true, otherwise verify the "real" proof. + self.conditionally_verify_proof::( + base_case, + &dummy_proof, + &dummy_verifier_data, + &proof, + &verifier_data, + common_data, + ); + + // Make sure we have enough gates to match `common_data`. + while self.num_gates() < (common_data.degree() / 2) { + self.add_gate(NoopGate, vec![]); + } + // Make sure we have every gate to match `common_data`. + for g in &common_data.gates { + self.add_gate_to_gate_set(g.clone()); + } + + Ok(CyclicRecursionTarget { + proof, + verifier_data: verifier_data.clone(), + dummy_proof, + dummy_verifier_data, + base_case, + }) + } +} + +/// Set the targets in a `CyclicRecursionTarget` to their corresponding values in a `CyclicRecursionData`. +pub fn set_cyclic_recursion_data_target< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +>( + pw: &mut PartialWitness, + cyclic_recursion_data_target: &CyclicRecursionTarget, + cyclic_recursion_data: &CyclicRecursionData, + // Public inputs to set in the base case to seed some initial data. + public_inputs: &[F], +) -> Result<()> +where + C::Hasher: AlgebraicHasher, + [(); C::Hasher::HASH_SIZE]:, +{ + if let Some(proof) = cyclic_recursion_data.proof { + pw.set_bool_target(cyclic_recursion_data_target.base_case, false); + pw.set_proof_with_pis_target(&cyclic_recursion_data_target.proof, proof); + pw.set_verifier_data_target( + &cyclic_recursion_data_target.verifier_data, + cyclic_recursion_data.verifier_data, + ); + pw.set_proof_with_pis_target(&cyclic_recursion_data_target.dummy_proof, proof); + pw.set_verifier_data_target( + &cyclic_recursion_data_target.dummy_verifier_data, + cyclic_recursion_data.verifier_data, + ); + } else { + let (dummy_proof, dummy_data) = dummy_proof::(cyclic_recursion_data.common_data)?; + pw.set_bool_target(cyclic_recursion_data_target.base_case, true); + let mut proof = dummy_proof.clone(); + proof.public_inputs[0..public_inputs.len()].copy_from_slice(public_inputs); + let pis_len = proof.public_inputs.len(); + // The circuit checks that the verifier data is the same throughout the cycle, so + // we set the verifier data to the "real" verifier data even though it's unused in the base case. + let num_cap = cyclic_recursion_data + .common_data + .config + .fri_config + .num_cap_elements(); + let s = pis_len - 4 - 4 * num_cap; + proof.public_inputs[s..s + 4] + .copy_from_slice(&cyclic_recursion_data.verifier_data.circuit_digest.elements); + for i in 0..num_cap { + proof.public_inputs[s + 4 * (1 + i)..s + 4 * (2 + i)].copy_from_slice( + &cyclic_recursion_data.verifier_data.constants_sigmas_cap.0[i].elements, + ); + } + + pw.set_proof_with_pis_target(&cyclic_recursion_data_target.proof, &proof); + pw.set_verifier_data_target( + &cyclic_recursion_data_target.verifier_data, + cyclic_recursion_data.verifier_data, + ); + pw.set_proof_with_pis_target(&cyclic_recursion_data_target.dummy_proof, &dummy_proof); + pw.set_verifier_data_target( + &cyclic_recursion_data_target.dummy_verifier_data, + &dummy_data, + ); + } + + Ok(()) +} + +/// Additional checks to be performed on a cyclic recursive proof in addition to verifying the proof. +/// Checks that the `base_case` flag is boolean and that the purported verifier data in the public inputs +/// match the real verifier data. +pub fn check_cyclic_proof_verifier_data< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, +>( + proof: &ProofWithPublicInputs, + verifier_data: &VerifierOnlyCircuitData, + common_data: &CommonCircuitData, +) -> Result<()> +where + C::Hasher: AlgebraicHasher, +{ + let pis = VerifierOnlyCircuitData::::from_slice(&proof.public_inputs, common_data)?; + ensure!(verifier_data.constants_sigmas_cap == pis.constants_sigmas_cap); + ensure!(verifier_data.circuit_digest == pis.circuit_digest); + + Ok(()) +} + +#[cfg(test)] +mod tests { + + use anyhow::Result; + use plonky2_field::extension::Extendable; + use plonky2_field::types::PrimeField64; + + use crate::field::types::Field; + use crate::gates::noop::NoopGate; + use crate::hash::hash_types::RichField; + use crate::hash::hashing::hash_n_to_hash_no_pad; + use crate::hash::poseidon::{PoseidonHash, PoseidonPermutation}; + use crate::iop::witness::PartialWitness; + use crate::plonk::circuit_builder::CircuitBuilder; + use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierCircuitTarget}; + use crate::plonk::config::{AlgebraicHasher, GenericConfig, Hasher, PoseidonGoldilocksConfig}; + use crate::recursion::cyclic_recursion::{ + check_cyclic_proof_verifier_data, set_cyclic_recursion_data_target, CyclicRecursionData, + }; + + // Generates `CommonCircuitData` usable for recursion. + fn common_data_for_recursion< + F: RichField + Extendable, + C: GenericConfig, + const D: usize, + >() -> CommonCircuitData + where + C::Hasher: AlgebraicHasher, + [(); C::Hasher::HASH_SIZE]:, + { + let config = CircuitConfig::standard_recursion_config(); + let builder = CircuitBuilder::::new(config); + let data = builder.build::(); + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + let proof = builder.add_virtual_proof_with_pis::(&data.common); + let verifier_data = VerifierCircuitTarget { + constants_sigmas_cap: builder.add_virtual_cap(data.common.config.fri_config.cap_height), + circuit_digest: builder.add_virtual_hash(), + }; + builder.verify_proof::(proof, &verifier_data, &data.common); + let data = builder.build::(); + + let config = CircuitConfig::standard_recursion_config(); + let mut builder = CircuitBuilder::::new(config); + let proof = builder.add_virtual_proof_with_pis::(&data.common); + let verifier_data = VerifierCircuitTarget { + constants_sigmas_cap: builder.add_virtual_cap(data.common.config.fri_config.cap_height), + circuit_digest: builder.add_virtual_hash(), + }; + builder.verify_proof::(proof, &verifier_data, &data.common); + while builder.num_gates() < 1 << 12 { + builder.add_gate(NoopGate, vec![]); + } + builder.build::().common + } + + #[test] + fn test_cyclic_recursion() -> Result<()> { + const D: usize = 2; + type C = PoseidonGoldilocksConfig; + type F = >::F; + + let config = CircuitConfig::standard_recursion_config(); + let mut pw = PartialWitness::new(); + let mut builder = CircuitBuilder::::new(config); + + // Circuit that computes a repeated hash. + let initial_hash = builder.add_virtual_hash(); + builder.register_public_inputs(&initial_hash.elements); + // Hash from the previous proof. + let old_hash = builder.add_virtual_hash(); + // The input hash is either the previous hash or the initial hash depending on whether + // the last proof was a base case. + let input_hash = builder.add_virtual_hash(); + let h = builder.hash_n_to_hash_no_pad::(input_hash.elements.to_vec()); + builder.register_public_inputs(&h.elements); + // Previous counter. + let old_counter = builder.add_virtual_target(); + let one = builder.one(); + let new_counter = builder.add_virtual_public_input(); + let old_pis = [ + initial_hash.elements.as_slice(), + old_hash.elements.as_slice(), + [old_counter].as_slice(), + ] + .concat(); + + let mut common_data = common_data_for_recursion::(); + + let base_case = builder.add_virtual_bool_target_safe(); + // Add cyclic recursion gadget. + let cyclic_data_target = + builder.cyclic_recursion::(base_case, &old_pis, &mut common_data)?; + let input_hash_bis = + builder.select_hash(cyclic_data_target.base_case, initial_hash, old_hash); + builder.connect_hashes(input_hash, input_hash_bis); + let not_base_case = builder.sub(one, cyclic_data_target.base_case.target); + // New counter is the previous counter +1 if the previous proof wasn't a base case. + let new_counter_bis = builder.add(old_counter, not_base_case); + builder.connect(new_counter, new_counter_bis); + + let cyclic_circuit_data = builder.build::(); + + let cyclic_recursion_data = CyclicRecursionData { + proof: &None, // Base case: We don't have a proof to put here yet. + verifier_data: &cyclic_circuit_data.verifier_only, + common_data: &cyclic_circuit_data.common, + }; + let initial_hash = [F::ZERO, F::ONE, F::TWO, F::from_canonical_usize(3)]; + set_cyclic_recursion_data_target( + &mut pw, + &cyclic_data_target, + &cyclic_recursion_data, + &initial_hash, + )?; + let proof = cyclic_circuit_data.prove(pw)?; + check_cyclic_proof_verifier_data( + &proof, + cyclic_recursion_data.verifier_data, + cyclic_recursion_data.common_data, + )?; + cyclic_circuit_data.verify(proof.clone())?; + + // 1st recursive layer. + let mut pw = PartialWitness::new(); + let cyclic_recursion_data = CyclicRecursionData { + proof: &Some(proof), // Input previous proof. + verifier_data: &cyclic_circuit_data.verifier_only, + common_data: &cyclic_circuit_data.common, + }; + set_cyclic_recursion_data_target( + &mut pw, + &cyclic_data_target, + &cyclic_recursion_data, + &[], + )?; + let proof = cyclic_circuit_data.prove(pw)?; + check_cyclic_proof_verifier_data( + &proof, + cyclic_recursion_data.verifier_data, + cyclic_recursion_data.common_data, + )?; + cyclic_circuit_data.verify(proof.clone())?; + + // 2nd recursive layer. + let mut pw = PartialWitness::new(); + let cyclic_recursion_data = CyclicRecursionData { + proof: &Some(proof), // Input previous proof. + verifier_data: &cyclic_circuit_data.verifier_only, + common_data: &cyclic_circuit_data.common, + }; + set_cyclic_recursion_data_target( + &mut pw, + &cyclic_data_target, + &cyclic_recursion_data, + &[], + )?; + let proof = cyclic_circuit_data.prove(pw)?; + check_cyclic_proof_verifier_data( + &proof, + cyclic_recursion_data.verifier_data, + cyclic_recursion_data.common_data, + )?; + + // Verify that the proof correctly computes a repeated hash. + let initial_hash = &proof.public_inputs[..4]; + let hash = &proof.public_inputs[4..8]; + let counter = proof.public_inputs[8]; + let mut h: [F; 4] = initial_hash.try_into().unwrap(); + assert_eq!( + hash, + std::iter::repeat_with(|| { + h = hash_n_to_hash_no_pad::(&h).elements; + h + }) + .nth(counter.to_canonical_u64() as usize) + .unwrap() + ); + + cyclic_circuit_data.verify(proof) + } +} diff --git a/plonky2/src/recursion/mod.rs b/plonky2/src/recursion/mod.rs new file mode 100644 index 00000000..33e8212e --- /dev/null +++ b/plonky2/src/recursion/mod.rs @@ -0,0 +1,3 @@ +pub mod conditional_recursive_verifier; +pub mod cyclic_recursion; +pub mod recursive_verifier; diff --git a/plonky2/src/plonk/recursive_verifier.rs b/plonky2/src/recursion/recursive_verifier.rs similarity index 95% rename from plonky2/src/plonk/recursive_verifier.rs rename to plonky2/src/recursion/recursive_verifier.rs index bb9076be..8dbab974 100644 --- a/plonky2/src/plonk/recursive_verifier.rs +++ b/plonky2/src/recursion/recursive_verifier.rs @@ -19,7 +19,7 @@ impl, const D: usize> CircuitBuilder { &mut self, proof_with_pis: ProofWithPublicInputsTarget, inner_verifier_data: &VerifierCircuitTarget, - inner_common_data: &CommonCircuitData, + inner_common_data: &CommonCircuitData, ) where C::Hasher: AlgebraicHasher, { @@ -29,14 +29,14 @@ impl, const D: usize> CircuitBuilder { ); let public_inputs_hash = self.hash_n_to_hash_no_pad::(proof_with_pis.public_inputs.clone()); - let challenges = proof_with_pis.get_challenges( + let challenges = proof_with_pis.get_challenges::( self, public_inputs_hash, inner_verifier_data.circuit_digest, inner_common_data, ); - self.verify_proof_with_challenges( + self.verify_proof_with_challenges::( proof_with_pis.proof, public_inputs_hash, challenges, @@ -52,7 +52,7 @@ impl, const D: usize> CircuitBuilder { public_inputs_hash: HashOutTarget, challenges: ProofChallengesTarget, inner_verifier_data: &VerifierCircuitTarget, - inner_common_data: &CommonCircuitData, + inner_common_data: &CommonCircuitData, ) where C::Hasher: AlgebraicHasher, { @@ -75,7 +75,7 @@ impl, const D: usize> CircuitBuilder { let vanishing_polys_zeta = with_context!( self, "evaluate the vanishing polynomial at our challenge point, zeta.", - eval_vanishing_poly_circuit( + eval_vanishing_poly_circuit::( self, inner_common_data, challenges.plonk_zeta, @@ -129,9 +129,9 @@ impl, const D: usize> CircuitBuilder { pub fn add_virtual_proof_with_pis>( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> ProofWithPublicInputsTarget { - let proof = self.add_virtual_proof(common_data); + let proof = self.add_virtual_proof::(common_data); let public_inputs = self.add_virtual_targets(common_data.num_public_inputs); ProofWithPublicInputsTarget { proof, @@ -141,7 +141,7 @@ impl, const D: usize> CircuitBuilder { fn add_virtual_proof>( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> ProofTarget { let config = &common_data.config; let fri_params = &common_data.fri_params; @@ -159,14 +159,14 @@ impl, const D: usize> CircuitBuilder { wires_cap: self.add_virtual_cap(cap_height), plonk_zs_partial_products_cap: self.add_virtual_cap(cap_height), quotient_polys_cap: self.add_virtual_cap(cap_height), - openings: self.add_opening_set(common_data), + openings: self.add_opening_set::(common_data), opening_proof: self.add_virtual_fri_proof(num_leaves_per_oracle, fri_params), } } fn add_opening_set>( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> OpeningSetTarget { let config = &common_data.config; let num_challenges = config.num_challenges; @@ -330,7 +330,7 @@ mod tests { ) -> Result<( ProofWithPublicInputs, VerifierOnlyCircuitData, - CommonCircuitData, + CommonCircuitData, )> where [(); C::Hasher::HASH_SIZE]:, @@ -356,7 +356,7 @@ mod tests { >( inner_proof: ProofWithPublicInputs, inner_vd: VerifierOnlyCircuitData, - inner_cd: CommonCircuitData, + inner_cd: CommonCircuitData, config: &CircuitConfig, min_degree_bits: Option, print_gate_counts: bool, @@ -364,7 +364,7 @@ mod tests { ) -> Result<( ProofWithPublicInputs, VerifierOnlyCircuitData, - CommonCircuitData, + CommonCircuitData, )> where InnerC::Hasher: AlgebraicHasher, @@ -372,7 +372,7 @@ mod tests { { let mut builder = CircuitBuilder::::new(config.clone()); let mut pw = PartialWitness::new(); - let pt = builder.add_virtual_proof_with_pis(&inner_cd); + let pt = builder.add_virtual_proof_with_pis::(&inner_cd); pw.set_proof_with_pis_target(&pt, &inner_proof); let inner_data = VerifierCircuitTarget { @@ -385,7 +385,7 @@ mod tests { ); pw.set_hash_target(inner_data.circuit_digest, inner_vd.circuit_digest); - builder.verify_proof(pt, &inner_data, &inner_cd); + builder.verify_proof::(pt, &inner_data, &inner_cd); if print_gate_counts { builder.print_gate_counts(0); @@ -422,7 +422,7 @@ mod tests { >( proof: &ProofWithPublicInputs, vd: &VerifierOnlyCircuitData, - cd: &CommonCircuitData, + cd: &CommonCircuitData, ) -> Result<()> where [(); C::Hasher::HASH_SIZE]:, diff --git a/plonky2/src/util/serialization.rs b/plonky2/src/util/serialization.rs index 978134b6..076e42f4 100644 --- a/plonky2/src/util/serialization.rs +++ b/plonky2/src/util/serialization.rs @@ -165,7 +165,7 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let constants = self.read_field_ext_vec::(common_data.num_constants)?; @@ -233,7 +233,7 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let salt = salt_size(common_data.fri_params.hiding); @@ -312,12 +312,12 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result>> { let config = &common_data.config; let mut fqrs = Vec::with_capacity(config.fri_config.num_query_rounds); for _ in 0..config.fri_config.num_query_rounds { - let initial_trees_proof = self.read_fri_initial_proof(common_data)?; + let initial_trees_proof = self.read_fri_initial_proof::(common_data)?; let steps = common_data .fri_params .reduction_arity_bits @@ -345,13 +345,13 @@ impl Buffer { } fn read_fri_proof, C: GenericConfig, const D: usize>( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let commit_phase_merkle_caps = (0..common_data.fri_params.reduction_arity_bits.len()) .map(|_| self.read_merkle_cap(config.fri_config.cap_height)) .collect::>>()?; - let query_round_proofs = self.read_fri_query_rounds(common_data)?; + let query_round_proofs = self.read_fri_query_rounds::(common_data)?; let final_poly = PolynomialCoeffs::new( self.read_field_ext_vec::(common_data.fri_params.final_poly_len())?, ); @@ -376,14 +376,14 @@ impl Buffer { } pub fn read_proof, C: GenericConfig, const D: usize>( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let wires_cap = self.read_merkle_cap(config.fri_config.cap_height)?; let plonk_zs_partial_products_cap = self.read_merkle_cap(config.fri_config.cap_height)?; let quotient_polys_cap = self.read_merkle_cap(config.fri_config.cap_height)?; - let openings = self.read_opening_set(common_data)?; - let opening_proof = self.read_fri_proof(common_data)?; + let openings = self.read_opening_set::(common_data)?; + let opening_proof = self.read_fri_proof::(common_data)?; Ok(Proof { wires_cap, @@ -415,7 +415,7 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let proof = self.read_proof(common_data)?; let public_inputs = self.read_field_vec( @@ -460,7 +460,7 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let original_indices = (0..config.fri_config.num_query_rounds) @@ -471,7 +471,7 @@ impl Buffer { indices.dedup(); let mut pairs = Vec::new(); for &i in &indices { - pairs.push((i, self.read_fri_initial_proof(common_data)?)); + pairs.push((i, self.read_fri_initial_proof::(common_data)?)); } let initial_trees_proofs = HashMap::from_iter(pairs); @@ -521,13 +521,13 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let commit_phase_merkle_caps = (0..common_data.fri_params.reduction_arity_bits.len()) .map(|_| self.read_merkle_cap(config.fri_config.cap_height)) .collect::>>()?; - let query_round_proofs = self.read_compressed_fri_query_rounds(common_data)?; + let query_round_proofs = self.read_compressed_fri_query_rounds::(common_data)?; let final_poly = PolynomialCoeffs::new( self.read_field_ext_vec::(common_data.fri_params.final_poly_len())?, ); @@ -560,14 +560,14 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let config = &common_data.config; let wires_cap = self.read_merkle_cap(config.fri_config.cap_height)?; let plonk_zs_partial_products_cap = self.read_merkle_cap(config.fri_config.cap_height)?; let quotient_polys_cap = self.read_merkle_cap(config.fri_config.cap_height)?; - let openings = self.read_opening_set(common_data)?; - let opening_proof = self.read_compressed_fri_proof(common_data)?; + let openings = self.read_opening_set::(common_data)?; + let opening_proof = self.read_compressed_fri_proof::(common_data)?; Ok(CompressedProof { wires_cap, @@ -599,7 +599,7 @@ impl Buffer { const D: usize, >( &mut self, - common_data: &CommonCircuitData, + common_data: &CommonCircuitData, ) -> Result> { let proof = self.read_compressed_proof(common_data)?; let public_inputs = self.read_field_vec( diff --git a/u32/src/gates/add_many_u32.rs b/u32/src/gates/add_many_u32.rs index 622c3df3..f37075cd 100644 --- a/u32/src/gates/add_many_u32.rs +++ b/u32/src/gates/add_many_u32.rs @@ -84,7 +84,7 @@ impl, const D: usize> U32AddManyGate { impl, const D: usize> Gate for U32AddManyGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/u32/src/gates/arithmetic_u32.rs b/u32/src/gates/arithmetic_u32.rs index c05ed86c..47889954 100644 --- a/u32/src/gates/arithmetic_u32.rs +++ b/u32/src/gates/arithmetic_u32.rs @@ -86,7 +86,7 @@ impl, const D: usize> U32ArithmeticGate { impl, const D: usize> Gate for U32ArithmeticGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/u32/src/gates/comparison.rs b/u32/src/gates/comparison.rs index 6af34208..a2a0cfcf 100644 --- a/u32/src/gates/comparison.rs +++ b/u32/src/gates/comparison.rs @@ -91,7 +91,7 @@ impl, const D: usize> ComparisonGate { impl, const D: usize> Gate for ComparisonGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/u32/src/gates/range_check_u32.rs b/u32/src/gates/range_check_u32.rs index 615abb98..6e8f2cd5 100644 --- a/u32/src/gates/range_check_u32.rs +++ b/u32/src/gates/range_check_u32.rs @@ -48,7 +48,7 @@ impl, const D: usize> U32RangeCheckGate { impl, const D: usize> Gate for U32RangeCheckGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/u32/src/gates/subtraction_u32.rs b/u32/src/gates/subtraction_u32.rs index 3737de55..b08d900b 100644 --- a/u32/src/gates/subtraction_u32.rs +++ b/u32/src/gates/subtraction_u32.rs @@ -80,7 +80,7 @@ impl, const D: usize> U32SubtractionGate { impl, const D: usize> Gate for U32SubtractionGate { fn id(&self) -> String { - format!("{:?}", self) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/util/src/lib.rs b/util/src/lib.rs index bbc2af98..b22a4236 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -31,7 +31,7 @@ pub fn log2_ceil(n: usize) -> usize { /// Computes `log_2(n)`, panicking if `n` is not a power of two. pub fn log2_strict(n: usize) -> usize { let res = n.trailing_zeros(); - assert!(n.wrapping_shr(res) == 1, "Not a power of two: {}", n); + assert!(n.wrapping_shr(res) == 1, "Not a power of two: {n}"); // Tell the optimizer about the semantics of `log2_strict`. i.e. it can replace `n` with // `1 << res` and vice versa. assume(n == 1 << res); diff --git a/waksman/src/gates/assert_le.rs b/waksman/src/gates/assert_le.rs index c67a7125..27242370 100644 --- a/waksman/src/gates/assert_le.rs +++ b/waksman/src/gates/assert_le.rs @@ -84,7 +84,7 @@ impl, const D: usize> AssertLessThanGate { impl, const D: usize> Gate for AssertLessThanGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec { diff --git a/waksman/src/gates/switch.rs b/waksman/src/gates/switch.rs index 2c2ca8c8..4509bf0a 100644 --- a/waksman/src/gates/switch.rs +++ b/waksman/src/gates/switch.rs @@ -74,7 +74,7 @@ impl, const D: usize> SwitchGate { impl, const D: usize> Gate for SwitchGate { fn id(&self) -> String { - format!("{:?}", self, D) + format!("{self:?}") } fn eval_unfiltered(&self, vars: EvaluationVars) -> Vec {