This commit is contained in:
Dmitry Vagner 2023-02-13 11:31:08 -08:00
commit 2158c1d267
58 changed files with 2284 additions and 558 deletions

View File

@ -24,7 +24,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-11-23
toolchain: nightly
override: true
- name: rust-cache
@ -61,7 +61,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-11-23
toolchain: nightly
override: true
components: rustfmt, clippy

View File

@ -14,51 +14,19 @@
//! GT: X > Z, inputs X, Z, output CY, auxiliary output Y
//! LT: Z < X, inputs Z, X, output CY, auxiliary output Y
use itertools::{izip, Itertools};
use ethereum_types::U256;
use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::read_value_u64_limbs;
use crate::arithmetic::utils::u256_to_array;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
fn u256_add_cc(input0: [u64; N_LIMBS], input1: [u64; N_LIMBS]) -> ([u64; N_LIMBS], u64) {
// Input and output have 16-bit limbs
let mut output = [0u64; N_LIMBS];
const MASK: u64 = (1u64 << LIMB_BITS) - 1u64;
let mut cy = 0u64;
for (i, a, b) in izip!(0.., input0, input1) {
let s = a + b + cy;
cy = s >> LIMB_BITS;
assert!(cy <= 1u64, "input limbs were larger than 16 bits");
output[i] = s & MASK;
}
(output, cy)
}
fn u256_sub_br(input0: [u64; N_LIMBS], input1: [u64; N_LIMBS]) -> ([u64; N_LIMBS], u64) {
const LIMB_BOUNDARY: u64 = 1 << LIMB_BITS;
const MASK: u64 = LIMB_BOUNDARY - 1u64;
let mut output = [0u64; N_LIMBS];
let mut br = 0u64;
for (i, a, b) in izip!(0.., input0, input1) {
let d = LIMB_BOUNDARY + a - b - br;
// if a < b, then d < 2^16 so br = 1
// if a >= b, then d >= 2^16 so br = 0
br = 1u64 - (d >> LIMB_BITS);
assert!(br <= 1u64, "input limbs were larger than 16 bits");
output[i] = d & MASK;
}
(output, br)
}
/// Generate row for ADD, SUB, GT and LT operations.
///
/// A row consists of four values, GENERAL_REGISTER_[012] and
@ -69,27 +37,35 @@ fn u256_sub_br(input0: [u64; N_LIMBS], input1: [u64; N_LIMBS]) -> ([u64; N_LIMBS
/// SUB: REGISTER_2 - REGISTER_0, output in REGISTER_1, ignore REGISTER_BIT
/// GT: REGISTER_0 > REGISTER_2, output in REGISTER_BIT, auxiliary output in REGISTER_1
/// LT: REGISTER_2 < REGISTER_0, output in REGISTER_BIT, auxiliary output in REGISTER_1
pub(crate) fn generate<F: RichField>(lv: &mut [F], filter: usize) {
pub(crate) fn generate<F: PrimeField64>(
lv: &mut [F],
filter: usize,
left_in: U256,
right_in: U256,
) {
// Swap left_in and right_in for LT
let (left_in, right_in) = if filter == IS_LT {
(right_in, left_in)
} else {
(left_in, right_in)
};
match filter {
IS_ADD => {
let x = read_value_u64_limbs(lv, GENERAL_REGISTER_0);
let y = read_value_u64_limbs(lv, GENERAL_REGISTER_1);
// x + y == z + cy*2^256
let (z, cy) = u256_add_cc(x, y);
lv[GENERAL_REGISTER_2].copy_from_slice(&z.map(F::from_canonical_u64));
lv[GENERAL_REGISTER_BIT] = F::from_canonical_u64(cy);
let (result, cy) = left_in.overflowing_add(right_in);
u256_to_array(&mut lv[GENERAL_REGISTER_0], left_in); // x
u256_to_array(&mut lv[GENERAL_REGISTER_1], right_in); // y
u256_to_array(&mut lv[GENERAL_REGISTER_2], result); // z
lv[GENERAL_REGISTER_BIT] = F::from_bool(cy);
}
IS_SUB | IS_GT | IS_LT => {
let x = read_value_u64_limbs(lv, GENERAL_REGISTER_0);
let z = read_value_u64_limbs(lv, GENERAL_REGISTER_2);
// y == z - x + cy*2^256
let (y, cy) = u256_sub_br(z, x);
lv[GENERAL_REGISTER_1].copy_from_slice(&y.map(F::from_canonical_u64));
lv[GENERAL_REGISTER_BIT] = F::from_canonical_u64(cy);
let (diff, cy) = right_in.overflowing_sub(left_in);
u256_to_array(&mut lv[GENERAL_REGISTER_0], left_in); // x
u256_to_array(&mut lv[GENERAL_REGISTER_2], right_in); // z
u256_to_array(&mut lv[GENERAL_REGISTER_1], diff); // y
lv[GENERAL_REGISTER_BIT] = F::from_bool(cy);
}
_ => panic!("unexpected operation filter"),
};
@ -144,7 +120,7 @@ const GOLDILOCKS_INVERSE_65536: u64 = 18446462594437939201;
/// is true if `(x_n + y_n)*2^(16*n) == cy_{n-1}*2^(16*n) +
/// z_n*2^(16*n) + cy_n*2^(16*n)` (again, this is `t` on line 127ff)
/// with the last `cy_n` checked against the `given_cy` given as input.
pub(crate) fn eval_packed_generic_add_cc<P: PackedField>(
pub(crate) fn eval_packed_generic_addcy<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
filter: P,
x: &[P],
@ -202,11 +178,11 @@ pub fn eval_packed_generic<P: PackedField>(
eval_packed_generic_check_is_one_bit(yield_constr, op_filter, cy);
// x + y = z + cy*2^256
eval_packed_generic_add_cc(yield_constr, op_filter, x, y, z, cy, false);
eval_packed_generic_addcy(yield_constr, op_filter, x, y, z, cy, false);
}
#[allow(clippy::needless_collect)]
pub(crate) fn eval_ext_circuit_add_cc<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit_addcy<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
filter: ExtensionTarget<D>,
@ -272,7 +248,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
let op_filter = builder.add_many_extension([is_add, is_sub, is_lt, is_gt]);
eval_ext_circuit_check_is_one_bit(builder, yield_constr, op_filter, cy);
eval_ext_circuit_add_cc(builder, yield_constr, op_filter, x, y, z, cy, false);
eval_ext_circuit_addcy(builder, yield_constr, op_filter, x, y, z, cy, false);
}
#[cfg(test)]
@ -328,7 +304,7 @@ mod tests {
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
// set operation filter and ensure all constraints are
// satisfied. we have to explicitly set the other
// satisfied. We have to explicitly set the other
// operation filters to zero since all are treated by
// the call.
lv[IS_ADD] = F::ZERO;
@ -337,7 +313,10 @@ mod tests {
lv[IS_GT] = F::ZERO;
lv[op_filter] = F::ONE;
generate(&mut lv, op_filter);
let left_in = U256::from(rng.gen::<[u8; 32]>());
let right_in = U256::from(rng.gen::<[u8; 32]>());
generate(&mut lv, op_filter, left_in, right_in);
let mut constrant_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],

View File

@ -4,11 +4,12 @@ use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::util::transpose;
use crate::arithmetic::operations::Operation;
use crate::arithmetic::{addcc, columns, modular, mul};
use crate::arithmetic::{addcy, columns, modular, mul, Operation};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
use crate::permutation::PermutationPair;
@ -33,6 +34,9 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
for i in 0..RANGE_MAX {
cols[columns::RANGE_COUNTER][i] = F::from_canonical_usize(i);
}
for i in RANGE_MAX..n_rows {
cols[columns::RANGE_COUNTER][i] = F::from_canonical_usize(RANGE_MAX - 1);
}
// For each column c in cols, generate the range-check
// permutations and put them in the corresponding range-check
@ -44,7 +48,8 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
}
}
pub fn generate(&self, operations: Vec<&dyn Operation<F>>) -> Vec<PolynomialValues<F>> {
#[allow(unused)]
pub(crate) fn generate(&self, operations: Vec<Operation>) -> Vec<PolynomialValues<F>> {
// The number of rows reserved is the smallest value that's
// guaranteed to avoid a reallocation: The only ops that use
// two rows are the modular operations and DIV, so the only
@ -96,14 +101,25 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
let lv = vars.local_values;
let nv = vars.next_values;
// Check the range column: First value must be 0, last row
// must be 2^16-1, and intermediate rows must increment by 0
// or 1.
let rc1 = lv[columns::RANGE_COUNTER];
let rc2 = nv[columns::RANGE_COUNTER];
yield_constr.constraint_first_row(rc1);
let incr = rc2 - rc1;
yield_constr.constraint_transition(incr * incr - incr);
let range_max = P::Scalar::from_canonical_u64((RANGE_MAX - 1) as u64);
yield_constr.constraint_last_row(rc1 - range_max);
mul::eval_packed_generic(lv, yield_constr);
addcc::eval_packed_generic(lv, yield_constr);
addcy::eval_packed_generic(lv, yield_constr);
modular::eval_packed_generic(lv, nv, yield_constr);
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
builder: &mut CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, { Self::COLUMNS }>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
@ -114,8 +130,20 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
let lv = vars.local_values;
let nv = vars.next_values;
let rc1 = lv[columns::RANGE_COUNTER];
let rc2 = nv[columns::RANGE_COUNTER];
yield_constr.constraint_first_row(builder, rc1);
let incr = builder.sub_extension(rc2, rc1);
let t = builder.mul_sub_extension(incr, incr, incr);
yield_constr.constraint_transition(builder, t);
let range_max =
builder.constant_extension(F::Extension::from_canonical_usize(RANGE_MAX - 1));
let t = builder.sub_extension(rc1, range_max);
yield_constr.constraint_last_row(builder, t);
mul::eval_ext_circuit(builder, lv, yield_constr);
addcc::eval_ext_circuit(builder, lv, yield_constr);
addcy::eval_ext_circuit(builder, lv, yield_constr);
modular::eval_ext_circuit(builder, lv, nv, yield_constr);
}
@ -148,7 +176,7 @@ mod tests {
use rand_chacha::ChaCha8Rng;
use super::{columns, ArithmeticStark};
use crate::arithmetic::operations::*;
use crate::arithmetic::*;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
#[test]
@ -189,34 +217,37 @@ mod tests {
};
// 123 + 456 == 579
let add = SimpleBinaryOp::new(columns::IS_ADD, U256::from(123), U256::from(456));
let add = Operation::binary(BinaryOperator::Add, U256::from(123), U256::from(456));
// (123 * 456) % 1007 == 703
let mulmod = ModularBinaryOp::new(
columns::IS_MULMOD,
let mulmod = Operation::ternary(
TernaryOperator::MulMod,
U256::from(123),
U256::from(456),
U256::from(1007),
);
// (123 - 456) % 1007 == 674
let submod = ModularBinaryOp::new(
columns::IS_SUBMOD,
U256::from(123),
U256::from(456),
// (1234 + 567) % 1007 == 794
let addmod = Operation::ternary(
TernaryOperator::AddMod,
U256::from(1234),
U256::from(567),
U256::from(1007),
);
// 123 * 456 == 56088
let mul = SimpleBinaryOp::new(columns::IS_MUL, U256::from(123), U256::from(456));
// 128 % 13 == 11
let modop = ModOp {
input: U256::from(128),
modulus: U256::from(13),
};
let mul = Operation::binary(BinaryOperator::Mul, U256::from(123), U256::from(456));
// 128 / 13 == 9
let div = DivOp {
numerator: U256::from(128),
denominator: U256::from(13),
};
let ops: Vec<&dyn Operation<F>> = vec![&add, &mulmod, &submod, &mul, &div, &modop];
let div = Operation::binary(BinaryOperator::Div, U256::from(128), U256::from(13));
// 128 < 13 == 0
let lt1 = Operation::binary(BinaryOperator::Lt, U256::from(128), U256::from(13));
// 13 < 128 == 1
let lt2 = Operation::binary(BinaryOperator::Lt, U256::from(13), U256::from(128));
// 128 < 128 == 0
let lt3 = Operation::binary(BinaryOperator::Lt, U256::from(128), U256::from(128));
// 128 % 13 == 11
let modop = Operation::binary(BinaryOperator::Mod, U256::from(128), U256::from(13));
let ops: Vec<Operation> = vec![add, mulmod, addmod, mul, modop, lt1, lt2, lt3, div];
let pols = stark.generate(ops);
@ -228,15 +259,21 @@ mod tests {
&& pols.iter().all(|v| v.len() == super::RANGE_MAX)
);
// Wrap the single value GENERAL_REGISTER_BIT in a Range.
let cmp_range = columns::GENERAL_REGISTER_BIT..columns::GENERAL_REGISTER_BIT + 1;
// Each operation has a single word answer that we can check
let expected_output = [
// Row (some ops take two rows), col, expected
(0, columns::GENERAL_REGISTER_2, 579), // ADD_OUTPUT
(1, columns::MODULAR_OUTPUT, 703),
(3, columns::MODULAR_OUTPUT, 674),
(5, columns::MUL_OUTPUT, 56088),
(6, columns::MODULAR_OUTPUT, 11),
(8, columns::DIV_OUTPUT, 9),
(0, &columns::GENERAL_REGISTER_2, 579), // ADD_OUTPUT
(1, &columns::MODULAR_OUTPUT, 703),
(3, &columns::MODULAR_OUTPUT, 794),
(5, &columns::MUL_OUTPUT, 56088),
(6, &columns::MODULAR_OUTPUT, 11),
(8, &cmp_range, 0),
(9, &cmp_range, 1),
(10, &cmp_range, 0),
(11, &columns::DIV_OUTPUT, 9),
];
for (row, col, expected) in expected_output {
@ -269,18 +306,14 @@ mod tests {
let ops = (0..super::RANGE_MAX)
.map(|_| {
SimpleBinaryOp::new(
columns::IS_MUL,
Operation::binary(
BinaryOperator::Mul,
U256::from(rng.gen::<[u8; 32]>()),
U256::from(rng.gen::<[u8; 32]>()),
)
})
.collect::<Vec<_>>();
// TODO: This is clearly not the right way to build this
// vector; I can't work out how to do it using the map above
// though, with or without Boxes.
let ops = ops.iter().map(|o| o as &dyn Operation<F>).collect();
let pols = stark.generate(ops);
// Trace should always have NUM_ARITH_COLUMNS columns and
@ -293,8 +326,8 @@ mod tests {
let ops = (0..super::RANGE_MAX)
.map(|_| {
ModularBinaryOp::new(
columns::IS_MULMOD,
Operation::ternary(
TernaryOperator::MulMod,
U256::from(rng.gen::<[u8; 32]>()),
U256::from(rng.gen::<[u8; 32]>()),
U256::from(rng.gen::<[u8; 32]>()),
@ -302,10 +335,6 @@ mod tests {
})
.collect::<Vec<_>>();
// TODO: This is clearly not the right way to build this
// vector; I can't work out how to do it using the map above
// though, with or without Boxes.
let ops = ops.iter().map(|o| o as &dyn Operation<F>).collect();
let pols = stark.generate(ops);
// Trace should always have NUM_ARITH_COLUMNS columns and

View File

@ -101,7 +101,9 @@ pub(crate) const MODULAR_AUX_INPUT_HI: Range<usize> = AUX_REGISTER_2;
// Must be set to MOD_IS_ZERO for DIV operation i.e. MOD_IS_ZERO * lv[IS_DIV]
pub(crate) const MODULAR_DIV_DENOM_IS_ZERO: usize = AUX_REGISTER_2.end;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_NUMERATOR: Range<usize> = MODULAR_INPUT_0;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_DENOMINATOR: Range<usize> = MODULAR_MODULUS;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_OUTPUT: Range<usize> =

View File

@ -1,9 +1,10 @@
use ethereum_types::U256;
use plonky2::field::types::PrimeField64;
use crate::bn254_arithmetic::BN_BASE;
use crate::util::{addmod, mulmod, submod};
mod addcc;
mod addcy;
mod modular;
mod mul;
mod utils;
@ -11,8 +12,6 @@ mod utils;
pub mod arithmetic_stark;
pub(crate) mod columns;
pub mod operations;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum BinaryOperator {
Add,
@ -47,31 +46,36 @@ impl BinaryOperator {
input0 % input1
}
}
BinaryOperator::Lt => {
if input0 < input1 {
U256::one()
} else {
U256::zero()
}
}
BinaryOperator::Gt => {
if input0 > input1 {
U256::one()
} else {
U256::zero()
}
}
BinaryOperator::Lt => U256::from((input0 < input1) as u8),
BinaryOperator::Gt => U256::from((input0 > input1) as u8),
BinaryOperator::AddFp254 => addmod(input0, input1, BN_BASE),
BinaryOperator::MulFp254 => mulmod(input0, input1, BN_BASE),
BinaryOperator::SubFp254 => submod(input0, input1, BN_BASE),
}
}
pub(crate) fn row_filter(&self) -> usize {
match self {
BinaryOperator::Add => columns::IS_ADD,
BinaryOperator::Mul => columns::IS_MUL,
BinaryOperator::Sub => columns::IS_SUB,
BinaryOperator::Div => columns::IS_DIV,
BinaryOperator::Mod => columns::IS_MOD,
BinaryOperator::Lt => columns::IS_LT,
BinaryOperator::Gt => columns::IS_GT,
BinaryOperator::AddFp254 => columns::IS_ADDMOD,
BinaryOperator::MulFp254 => columns::IS_MULMOD,
BinaryOperator::SubFp254 => columns::IS_SUBMOD,
}
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum TernaryOperator {
AddMod,
MulMod,
SubMod,
}
impl TernaryOperator {
@ -79,6 +83,15 @@ impl TernaryOperator {
match self {
TernaryOperator::AddMod => addmod(input0, input1, input2),
TernaryOperator::MulMod => mulmod(input0, input1, input2),
TernaryOperator::SubMod => submod(input0, input1, input2),
}
}
pub(crate) fn row_filter(&self) -> usize {
match self {
TernaryOperator::AddMod => columns::IS_ADDMOD,
TernaryOperator::MulMod => columns::IS_MULMOD,
TernaryOperator::SubMod => columns::IS_SUBMOD,
}
}
}
@ -134,4 +147,72 @@ impl Operation {
Operation::TernaryOperation { result, .. } => *result,
}
}
/// Convert operation into one or two rows of the trace.
///
/// Morally these types should be [F; NUM_ARITH_COLUMNS], but we
/// use vectors because that's what utils::transpose (who consumes
/// the result of this function as part of the range check code)
/// expects.
fn to_rows<F: PrimeField64>(&self) -> (Vec<F>, Option<Vec<F>>) {
match *self {
Operation::BinaryOperation {
operator,
input0,
input1,
result,
} => binary_op_to_rows(operator, input0, input1, result),
Operation::TernaryOperation {
operator,
input0,
input1,
input2,
result,
} => ternary_op_to_rows(operator.row_filter(), input0, input1, input2, result),
}
}
}
fn ternary_op_to_rows<F: PrimeField64>(
row_filter: usize,
input0: U256,
input1: U256,
input2: U256,
_result: U256,
) -> (Vec<F>, Option<Vec<F>>) {
let mut row1 = vec![F::ZERO; columns::NUM_ARITH_COLUMNS];
let mut row2 = vec![F::ZERO; columns::NUM_ARITH_COLUMNS];
row1[row_filter] = F::ONE;
modular::generate(&mut row1, &mut row2, row_filter, input0, input1, input2);
(row1, Some(row2))
}
fn binary_op_to_rows<F: PrimeField64>(
op: BinaryOperator,
input0: U256,
input1: U256,
result: U256,
) -> (Vec<F>, Option<Vec<F>>) {
let mut row = vec![F::ZERO; columns::NUM_ARITH_COLUMNS];
row[op.row_filter()] = F::ONE;
match op {
BinaryOperator::Add | BinaryOperator::Sub | BinaryOperator::Lt | BinaryOperator::Gt => {
addcy::generate(&mut row, op.row_filter(), input0, input1);
(row, None)
}
BinaryOperator::Mul => {
mul::generate(&mut row, input0, input1);
(row, None)
}
BinaryOperator::Div | BinaryOperator::Mod => {
ternary_op_to_rows::<F>(op.row_filter(), input0, U256::zero(), input1, result)
}
BinaryOperator::AddFp254 | BinaryOperator::MulFp254 | BinaryOperator::SubFp254 => {
ternary_op_to_rows::<F>(op.row_filter(), input0, input1, BN_BASE, result)
}
}
}

View File

@ -108,17 +108,18 @@
//! only require 96 columns, or 80 if the output doesn't need to be
//! reduced.
use ethereum_types::U256;
use num::bigint::Sign;
use num::{BigInt, One, Zero};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use super::columns;
use crate::arithmetic::addcc::{eval_ext_circuit_add_cc, eval_packed_generic_add_cc};
use crate::arithmetic::addcy::{eval_ext_circuit_addcy, eval_packed_generic_addcy};
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
@ -189,7 +190,7 @@ fn bigint_to_columns<const N: usize>(num: &BigInt) -> [i64; N] {
///
/// NB: `operation` can set the higher order elements in its result to
/// zero if they are not used.
fn generate_modular_op<F: RichField>(
fn generate_modular_op<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
filter: usize,
@ -213,6 +214,13 @@ fn generate_modular_op<F: RichField>(
let mut constr_poly = [0i64; 2 * N_LIMBS];
constr_poly[..2 * N_LIMBS - 1].copy_from_slice(&operation(input0_limbs, input1_limbs));
// two_exp_256 == 2^256
let two_exp_256 = {
let mut t = BigInt::zero();
t.set_bit(256, true);
t
};
let mut mod_is_zero = F::ZERO;
if modulus.is_zero() {
if filter == columns::IS_DIV {
@ -242,8 +250,8 @@ fn generate_modular_op<F: RichField>(
let quot = (&input - &output) / &modulus; // exact division; can be -ve
let quot_limbs = bigint_to_columns::<{ 2 * N_LIMBS }>(&quot);
// output < modulus here, so the proof requires (modulus - output).
let out_aux_red = bigint_to_columns::<N_LIMBS>(&(modulus - output));
// output < modulus here; the proof requires (output - modulus) % 2^256:
let out_aux_red = bigint_to_columns::<N_LIMBS>(&(two_exp_256 - modulus + output));
// constr_poly is the array of coefficients of the polynomial
//
@ -283,8 +291,20 @@ fn generate_modular_op<F: RichField>(
/// Generate the output and auxiliary values for modular operations.
///
/// `filter` must be one of `columns::IS_{ADDMOD,MULMOD,MOD}`.
pub(crate) fn generate<F: RichField>(lv: &mut [F], nv: &mut [F], filter: usize) {
pub(crate) fn generate<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
filter: usize,
input0: U256,
input1: U256,
modulus: U256,
) {
debug_assert!(lv.len() == NUM_ARITH_COLUMNS && nv.len() == NUM_ARITH_COLUMNS);
u256_to_array(&mut lv[MODULAR_INPUT_0], input0);
u256_to_array(&mut lv[MODULAR_INPUT_1], input1);
u256_to_array(&mut lv[MODULAR_MODULUS], modulus);
match filter {
columns::IS_ADDMOD => generate_modular_op(lv, nv, filter, pol_add),
columns::IS_SUBMOD => generate_modular_op(lv, nv, filter, pol_sub),
@ -332,30 +352,30 @@ fn modular_constr_poly<P: PackedField>(
yield_constr.constraint_transition(filter * (mod_is_zero * lv[IS_DIV] - div_denom_is_zero));
// Needed to compensate for adding mod_is_zero to modulus above,
// since the call eval_packed_generic_add_cc() below subtracts modulus
// since the call eval_packed_generic_addcy() below subtracts modulus
// to verify in the case of a DIV.
output[0] += div_denom_is_zero;
// Verify that the output is reduced, i.e. output < modulus.
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
// This sets is_greater_than to 0 unless we get mod_is_zero when
// doing a DIV; in that case, we need is_greater_than=1, since
// eval_packed_generic_add_cc checks
// This sets is_less_than to 1 unless we get mod_is_zero when
// doing a DIV; in that case, we need is_less_than=0, since
// eval_packed_generic_addcy checks
//
// output + out_aux_red == modulus + is_greater_than*2^256
// modulus + out_aux_red == output + is_less_than*2^256
//
// and we were given output = out_aux_red
let is_greater_than = mod_is_zero * lv[IS_DIV];
// and we are given output = out_aux_red when modulus is zero.
let is_less_than = P::ONES - mod_is_zero * lv[IS_DIV];
// NB: output and modulus in lv while out_aux_red and
// is_greater_than (via mod_is_zero) depend on nv, hence the
// is_less_than (via mod_is_zero) depend on nv, hence the
// 'is_two_row_op' argument is set to 'true'.
eval_packed_generic_add_cc(
eval_packed_generic_addcy(
yield_constr,
filter,
&output,
out_aux_red,
&modulus,
is_greater_than,
out_aux_red,
&output,
is_less_than,
true,
);
// restore output[0]
@ -483,16 +503,18 @@ fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>
output[0] = builder.add_extension(output[0], div_denom_is_zero);
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
let is_greater_than = builder.mul_extension(mod_is_zero, lv[IS_DIV]);
let one = builder.one_extension();
let is_less_than =
builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, lv[IS_DIV], one);
eval_ext_circuit_add_cc(
eval_ext_circuit_addcy(
builder,
yield_constr,
filter,
&output,
out_aux_red,
&modulus,
is_greater_than,
out_aux_red,
&output,
is_less_than,
true,
);
output[0] = builder.sub_extension(output[0], div_denom_is_zero);
@ -574,7 +596,6 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
#[cfg(test)]
mod tests {
use itertools::izip;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::{Field, Sample};
use rand::{Rng, SeedableRng};
@ -620,38 +641,40 @@ mod tests {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let mut nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
for op_filter in [IS_ADDMOD, IS_DIV, IS_SUBMOD, IS_MOD, IS_MULMOD] {
// Reset operation columns, then select one
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
lv[op_filter] = F::ONE;
for i in 0..N_RND_TESTS {
// set inputs to random values
for (ai, bi, mi) in izip!(MODULAR_INPUT_0, MODULAR_INPUT_1, MODULAR_MODULUS) {
lv[ai] = F::from_canonical_u16(rng.gen());
lv[bi] = F::from_canonical_u16(rng.gen());
lv[mi] = F::from_canonical_u16(rng.gen());
}
let mut lv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
let mut nv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
// Reset operation columns, then select one
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
lv[op_filter] = F::ONE;
let input0 = U256::from(rng.gen::<[u8; 32]>());
let input1 = U256::from(rng.gen::<[u8; 32]>());
let mut modulus_limbs = [0u8; 32];
// For the second half of the tests, set the top
// 16-start digits of the modulus to zero so it is
// much smaller than the inputs.
if i > N_RND_TESTS / 2 {
// 1 <= start < N_LIMBS
let start = (rng.gen::<usize>() % (N_LIMBS - 1)) + 1;
for mi in MODULAR_MODULUS.skip(start) {
lv[mi] = F::ZERO;
let start = (rng.gen::<usize>() % (modulus_limbs.len() - 1)) + 1;
for mi in modulus_limbs.iter_mut().skip(start) {
*mi = 0u8;
}
}
let modulus = U256::from(modulus_limbs);
generate(&mut lv, &mut nv, op_filter);
generate(&mut lv, &mut nv, op_filter, input0, input1, modulus);
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
@ -672,29 +695,29 @@ mod tests {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let mut nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
for op_filter in [IS_ADDMOD, IS_SUBMOD, IS_DIV, IS_MOD, IS_MULMOD] {
// Reset operation columns, then select one
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
lv[op_filter] = F::ONE;
for _i in 0..N_RND_TESTS {
// set inputs to random values and the modulus to zero;
// the output is defined to be zero when modulus is zero.
let mut lv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
let mut nv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
for (ai, bi, mi) in izip!(MODULAR_INPUT_0, MODULAR_INPUT_1, MODULAR_MODULUS) {
lv[ai] = F::from_canonical_u16(rng.gen());
lv[bi] = F::from_canonical_u16(rng.gen());
lv[mi] = F::ZERO;
}
// Reset operation columns, then select one
lv[IS_ADDMOD] = F::ZERO;
lv[IS_SUBMOD] = F::ZERO;
lv[IS_MULMOD] = F::ZERO;
lv[IS_MOD] = F::ZERO;
lv[IS_DIV] = F::ZERO;
lv[op_filter] = F::ONE;
generate(&mut lv, &mut nv, op_filter);
let input0 = U256::from(rng.gen::<[u8; 32]>());
let input1 = U256::from(rng.gen::<[u8; 32]>());
let modulus = U256::zero();
generate(&mut lv, &mut nv, op_filter, input0, input1, modulus);
// check that the correct output was generated
if op_filter == IS_DIV {

View File

@ -55,9 +55,10 @@
//! file `modular.rs`), we don't need to check that output is reduced,
//! since any value of output is less than β^16 and is hence reduced.
use ethereum_types::U256;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
@ -66,7 +67,12 @@ use crate::arithmetic::columns::*;
use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
pub fn generate<F: RichField>(lv: &mut [F]) {
pub fn generate<F: PrimeField64>(lv: &mut [F], left_in: U256, right_in: U256) {
// TODO: It would probably be clearer/cleaner to read the U256
// into an [i64;N] and then copy that to the lv table.
u256_to_array(&mut lv[MUL_INPUT_0], left_in);
u256_to_array(&mut lv[MUL_INPUT_1], right_in);
let input0 = read_value_i64_limbs(lv, MUL_INPUT_0);
let input1 = read_value_i64_limbs(lv, MUL_INPUT_1);
@ -252,7 +258,9 @@ mod tests {
lv[bi] = F::from_canonical_u16(rng.gen());
}
generate(&mut lv);
let left_in = U256::from(rng.gen::<[u8; 32]>());
let right_in = U256::from(rng.gen::<[u8; 32]>());
generate(&mut lv, left_in, right_in);
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],

View File

@ -1,166 +0,0 @@
use ethereum_types::U256;
use plonky2::hash::hash_types::RichField;
use static_assertions::const_assert;
use crate::arithmetic::columns::*;
use crate::arithmetic::{addcc, modular, mul};
#[inline]
fn u64_to_array<F: RichField>(out: &mut [F], x: u64) {
const_assert!(LIMB_BITS == 16);
debug_assert!(out.len() == 4);
out[0] = F::from_canonical_u16(x as u16);
out[1] = F::from_canonical_u16((x >> 16) as u16);
out[2] = F::from_canonical_u16((x >> 32) as u16);
out[3] = F::from_canonical_u16((x >> 48) as u16);
}
fn u256_to_array<F: RichField>(out: &mut [F], x: U256) {
const_assert!(N_LIMBS == 16);
debug_assert!(out.len() == N_LIMBS);
u64_to_array(&mut out[0..4], x.0[0]);
u64_to_array(&mut out[4..8], x.0[1]);
u64_to_array(&mut out[8..12], x.0[2]);
u64_to_array(&mut out[12..16], x.0[3]);
}
pub trait Operation<F: RichField> {
/// Convert operation into one or two rows of the trace.
///
/// Morally these types should be [F; NUM_ARITH_COLUMNS], but we
/// use vectors because that's what utils::transpose expects.
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>);
}
pub struct SimpleBinaryOp {
/// The operation is identified using the associated filter from
/// `columns::IS_ADD` etc., stored in `op_filter`.
op_filter: usize,
input0: U256,
input1: U256,
}
impl SimpleBinaryOp {
pub fn new(op_filter: usize, input0: U256, input1: U256) -> Self {
assert!(
op_filter == IS_ADD
|| op_filter == IS_SUB
|| op_filter == IS_MUL
|| op_filter == IS_LT
|| op_filter == IS_GT
);
Self {
op_filter,
input0,
input1,
}
}
}
impl<F: RichField> Operation<F> for SimpleBinaryOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
let mut row = vec![F::ZERO; NUM_ARITH_COLUMNS];
row[self.op_filter] = F::ONE;
if self.op_filter == IS_SUB || self.op_filter == IS_GT {
u256_to_array(&mut row[GENERAL_REGISTER_2], self.input0);
u256_to_array(&mut row[GENERAL_REGISTER_0], self.input1);
} else if self.op_filter == IS_LT {
u256_to_array(&mut row[GENERAL_REGISTER_0], self.input0);
u256_to_array(&mut row[GENERAL_REGISTER_2], self.input1);
} else {
assert!(
self.op_filter == IS_ADD || self.op_filter == IS_MUL,
"unrecognised operation"
);
u256_to_array(&mut row[GENERAL_REGISTER_0], self.input0);
u256_to_array(&mut row[GENERAL_REGISTER_1], self.input1);
}
if self.op_filter == IS_MUL {
mul::generate(&mut row);
} else {
addcc::generate(&mut row, self.op_filter);
}
(row, None)
}
}
pub struct ModularBinaryOp {
op_filter: usize,
input0: U256,
input1: U256,
modulus: U256,
}
impl ModularBinaryOp {
pub fn new(op_filter: usize, input0: U256, input1: U256, modulus: U256) -> Self {
assert!(op_filter == IS_ADDMOD || op_filter == IS_SUBMOD || op_filter == IS_MULMOD);
Self {
op_filter,
input0,
input1,
modulus,
}
}
}
fn modular_to_rows_helper<F: RichField>(
op_filter: usize,
input0: U256,
input1: U256,
modulus: U256,
) -> (Vec<F>, Option<Vec<F>>) {
let mut row1 = vec![F::ZERO; NUM_ARITH_COLUMNS];
let mut row2 = vec![F::ZERO; NUM_ARITH_COLUMNS];
row1[op_filter] = F::ONE;
u256_to_array(&mut row1[MODULAR_INPUT_0], input0);
u256_to_array(&mut row1[MODULAR_INPUT_1], input1);
u256_to_array(&mut row1[MODULAR_MODULUS], modulus);
modular::generate(&mut row1, &mut row2, op_filter);
(row1, Some(row2))
}
impl<F: RichField> Operation<F> for ModularBinaryOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
modular_to_rows_helper(self.op_filter, self.input0, self.input1, self.modulus)
}
}
pub struct ModOp {
pub input: U256,
pub modulus: U256,
}
impl<F: RichField> Operation<F> for ModOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
modular_to_rows_helper(IS_MOD, self.input, U256::zero(), self.modulus)
}
}
pub struct DivOp {
pub numerator: U256,
pub denominator: U256,
}
impl<F: RichField> Operation<F> for DivOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
let mut row1 = vec![F::ZERO; NUM_ARITH_COLUMNS];
let mut row2 = vec![F::ZERO; NUM_ARITH_COLUMNS];
row1[IS_DIV] = F::ONE;
u256_to_array(&mut row1[DIV_NUMERATOR], self.numerator);
u256_to_array(&mut row1[DIV_DENOMINATOR], self.denominator);
modular::generate(&mut row1, &mut row2, IS_DIV);
(row1, Some(row2))
}
}

View File

@ -1,11 +1,14 @@
use std::ops::{Add, AddAssign, Mul, Neg, Range, Shr, Sub, SubAssign};
use ethereum_types::U256;
use plonky2::field::extension::Extendable;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use static_assertions::const_assert;
use crate::arithmetic::columns::N_LIMBS;
use crate::arithmetic::columns::{LIMB_BITS, N_LIMBS};
/// Return an array of `N` zeros of type T.
pub(crate) fn pol_zero<T, const N: usize>() -> [T; N]
@ -315,24 +318,35 @@ pub(crate) fn read_value<const N: usize, T: Copy>(lv: &[T], value_idxs: Range<us
lv[value_idxs].try_into().unwrap()
}
/// Read the range `value_idxs` of values from `lv` into an array of
/// length `N`, interpreting the values as `u64`s. Panics if the
/// length of the range is not `N`.
pub(crate) fn read_value_u64_limbs<const N: usize, F: RichField>(
lv: &[F],
value_idxs: Range<usize>,
) -> [u64; N] {
let limbs: [_; N] = lv[value_idxs].try_into().unwrap();
limbs.map(|c| F::to_canonical_u64(&c))
}
/// Read the range `value_idxs` of values from `lv` into an array of
/// length `N`, interpreting the values as `i64`s. Panics if the
/// length of the range is not `N`.
pub(crate) fn read_value_i64_limbs<const N: usize, F: RichField>(
pub(crate) fn read_value_i64_limbs<const N: usize, F: PrimeField64>(
lv: &[F],
value_idxs: Range<usize>,
) -> [i64; N] {
let limbs: [_; N] = lv[value_idxs].try_into().unwrap();
limbs.map(|c| F::to_canonical_u64(&c) as i64)
limbs.map(|c| c.to_canonical_u64() as i64)
}
#[inline]
fn u64_to_array<F: Field>(out: &mut [F], x: u64) {
const_assert!(LIMB_BITS == 16);
debug_assert!(out.len() == 4);
out[0] = F::from_canonical_u16(x as u16);
out[1] = F::from_canonical_u16((x >> 16) as u16);
out[2] = F::from_canonical_u16((x >> 32) as u16);
out[3] = F::from_canonical_u16((x >> 48) as u16);
}
// TODO: Refactor/replace u256_limbs in evm/src/util.rs
pub(crate) fn u256_to_array<F: Field>(out: &mut [F], x: U256) {
const_assert!(N_LIMBS == 16);
debug_assert!(out.len() == N_LIMBS);
u64_to_array(&mut out[0..4], x.0[0]);
u64_to_array(&mut out[4..8], x.0[1]);
u64_to_array(&mut out[8..12], x.0[2]);
u64_to_array(&mut out[12..16], x.0[3]);
}

View File

@ -13,12 +13,13 @@ pub struct OpsColumnsView<T: Copy> {
pub sub: T,
pub div: T,
pub mod_: T,
// TODO: combine ADDMOD, MULMOD into one flag
// TODO: combine ADDMOD, MULMOD and SUBMOD into one flag
pub addmod: T,
pub mulmod: T,
pub addfp254: T,
pub mulfp254: T,
pub subfp254: T,
pub submod: T,
pub lt: T,
pub gt: T,
pub eq: T, // Note: This column must be 0 when is_cpu_cycle = 0.

View File

@ -34,6 +34,11 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/curve/bn254/field_arithmetic/degree_12_mul.asm"),
include_str!("asm/curve/bn254/field_arithmetic/frobenius.asm"),
include_str!("asm/curve/bn254/field_arithmetic/util.asm"),
include_str!("asm/curve/bn254/curve_add.asm"),
include_str!("asm/curve/bn254/curve_mul.asm"),
include_str!("asm/curve/bn254/glv.asm"),
include_str!("asm/curve/bn254/msm.asm"),
include_str!("asm/curve/bn254/precomputation.asm"),
include_str!("asm/curve/common.asm"),
include_str!("asm/curve/secp256k1/curve_add.asm"),
include_str!("asm/curve/secp256k1/ecrecover.asm"),
@ -42,6 +47,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/curve/secp256k1/moddiv.asm"),
include_str!("asm/curve/secp256k1/glv.asm"),
include_str!("asm/curve/secp256k1/precomputation.asm"),
include_str!("asm/curve/wnaf.asm"),
include_str!("asm/exp.asm"),
include_str!("asm/halt.asm"),
include_str!("asm/hash/blake2b/addresses.asm"),

View File

@ -0,0 +1,305 @@
// #define N 0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 // BN254 base field order
// BN254 elliptic curve addition.
// Uses the standard affine addition formula.
global bn_add:
// Uncomment for test inputs.
// PUSH 0xdeadbeef
// PUSH 2
// PUSH 1
// PUSH 0x1bf9384aa3f0b3ad763aee81940cacdde1af71617c06f46e11510f14f3d5d121
// PUSH 0xe7313274bb29566ff0c8220eb9841de1d96c2923c6a4028f7dd3c6a14cee770
// stack: x0, y0, x1, y1, retdest
// Check if points are valid BN254 points.
DUP2
// stack: y0, x0, y0, x1, y1, retdest
DUP2
// stack: x0, y0, x0, y0, x1, y1, retdest
%bn_check
// stack: isValid(x0, y0), x0, y0, x1, y1, retdest
DUP5
// stack: x1, isValid(x0, y0), x0, y0, x1, y1, retdest
DUP5
// stack: x1, y1, isValid(x0, y0), x0, y0, x1, y1, retdest
%bn_check
// stack: isValid(x1, y1), isValid(x0, y0), x0, y0, x1, y1, retdest
AND
// stack: isValid(x1, y1) & isValid(x0, y0), x0, y0, x1, y1, retdest
%jumpi(bn_add_valid_points)
// stack: x0, y0, x1, y1, retdest
// Otherwise return
%pop4
// stack: retdest
%bn_invalid_input
// BN254 elliptic curve addition.
// Assumption: (x0,y0) and (x1,y1) are valid points.
global bn_add_valid_points:
// stack: x0, y0, x1, y1, retdest
// Check if the first point is the identity.
DUP2
// stack: y0, x0, y0, x1, y1, retdest
DUP2
// stack: x0, y0, x0, y0, x1, y1, retdest
%ec_isidentity
// stack: (x0,y0)==(0,0), x0, y0, x1, y1, retdest
%jumpi(bn_add_first_zero)
// stack: x0, y0, x1, y1, retdest
// Check if the second point is the identity.
DUP4
// stack: y1, x0, y0, x1, y1, retdest
DUP4
// stack: x1, y1, x0, y0, x1, y1, retdest
%ec_isidentity
// stack: (x1,y1)==(0,0), x0, y0, x1, y1, retdest
%jumpi(bn_add_snd_zero)
// stack: x0, y0, x1, y1, retdest
// Check if both points have the same x-coordinate.
DUP3
// stack: x1, x0, y0, x1, y1, retdest
DUP2
// stack: x0, x1, x0, y0, x1, y1, retdest
EQ
// stack: x0 == x1, x0, y0, x1, y1, retdest
%jumpi(bn_add_equal_first_coord)
// stack: x0, y0, x1, y1, retdest
// Otherwise, we can use the standard formula.
// Compute lambda = (y0 - y1)/(x0 - x1)
DUP4
// stack: y1, x0, y0, x1, y1, retdest
DUP3
// stack: y0, y1, x0, y0, x1, y1, retdest
%submod
// stack: y0 - y1, x0, y0, x1, y1, retdest
DUP4
// stack: x1, y0 - y1, x0, y0, x1, y1, retdest
DUP3
// stack: x0, x1, y0 - y1, x0, y0, x1, y1, retdest
%submod
// stack: x0 - x1, y0 - y1, x0, y0, x1, y1, retdest
%moddiv
// stack: lambda, x0, y0, x1, y1, retdest
%jump(bn_add_valid_points_with_lambda)
// BN254 elliptic curve addition.
// Assumption: (x0,y0) == (0,0)
bn_add_first_zero:
// stack: x0, y0, x1, y1, retdest
// Just return (x1,y1)
%stack (x0, y0, x1, y1, retdest) -> (retdest, x1, y1)
JUMP
// BN254 elliptic curve addition.
// Assumption: (x1,y1) == (0,0)
bn_add_snd_zero:
// stack: x0, y0, x1, y1, retdest
// Just return (x0,y0)
%stack (x0, y0, x1, y1, retdest) -> (retdest, x0, y0)
JUMP
// BN254 elliptic curve addition.
// Assumption: lambda = (y0 - y1)/(x0 - x1)
bn_add_valid_points_with_lambda:
// stack: lambda, x0, y0, x1, y1, retdest
// Compute x2 = lambda^2 - x1 - x0
DUP2
// stack: x0, lambda, x0, y0, x1, y1, retdest
DUP5
// stack: x1, x0, lambda, x0, y0, x1, y1, retdest
%bn_base
// stack: N, x1, x0, lambda, x0, y0, x1, y1, retdest
DUP4
// stack: lambda, N, x1, x0, lambda, x0, y0, x1, y1, retdest
DUP1
// stack: lambda, lambda, N, x1, x0, lambda, x0, y0, x1, y1, retdest
MULMOD
// stack: lambda^2, x1, x0, lambda, x0, y0, x1, y1, retdest
%submod
// stack: lambda^2 - x1, x0, lambda, x0, y0, x1, y1, retdest
%submod
// stack: x2, lambda, x0, y0, x1, y1, retdest
// Compute y2 = lambda*(x1 - x2) - y1
%bn_base
// stack: N, x2, lambda, x0, y0, x1, y1, retdest
DUP2
// stack: x2, N, x2, lambda, x0, y0, x1, y1, retdest
DUP7
// stack: x1, x2, N, x2, lambda, x0, y0, x1, y1, retdest
%submod
// stack: x1 - x2, N, x2, lambda, x0, y0, x1, y1, retdest
DUP4
// stack: lambda, x1 - x2, N, x2, lambda, x0, y0, x1, y1, retdest
MULMOD
// stack: lambda * (x1 - x2), x2, lambda, x0, y0, x1, y1, retdest
DUP7
// stack: y1, lambda * (x1 - x2), x2, lambda, x0, y0, x1, y1, retdest
SWAP1
// stack: lambda * (x1 - x2), y1, x2, lambda, x0, y0, x1, y1, retdest
%submod
// stack: y2, x2, lambda, x0, y0, x1, y1, retdest
// Return x2,y2
%stack (y2, x2, lambda, x0, y0, x1, y1, retdest) -> (retdest, x2, y2)
JUMP
// BN254 elliptic curve addition.
// Assumption: (x0,y0) and (x1,y1) are valid points and x0 == x1
bn_add_equal_first_coord:
// stack: x0, y0, x1, y1, retdest with x0 == x1
// Check if the points are equal
DUP2
// stack: y0, x0, y0, x1, y1, retdest
DUP5
// stack: y1, y0, x0, y0, x1, y1, retdest
EQ
// stack: y1 == y0, x0, y0, x1, y1, retdest
%jumpi(bn_add_equal_points)
// stack: x0, y0, x1, y1, retdest
// Otherwise, one is the negation of the other so we can return (0,0).
%pop4
// stack: retdest
PUSH 0
// stack: 0, retdest
PUSH 0
// stack: 0, 0, retdest
SWAP2
// stack: retdest, 0, 0
JUMP
// BN254 elliptic curve addition.
// Assumption: x0 == x1 and y0 == y1
// Standard doubling formula.
bn_add_equal_points:
// stack: x0, y0, x1, y1, retdest
// Compute lambda = 3/2 * x0^2 / y0
%bn_base
// stack: N, x0, y0, x1, y1, retdest
%bn_base
// stack: N, N, x0, y0, x1, y1, retdest
DUP3
// stack: x0, N, N, x0, y0, x1, y1, retdest
DUP1
// stack: x0, x0, N, N, x0, y0, x1, y1, retdest
MULMOD
// stack: x0^2, N, x0, y0, x1, y1, retdest with
PUSH 0x183227397098d014dc2822db40c0ac2ecbc0b548b438e5469e10460b6c3e7ea5 // 3/2 in the base field
// stack: 3/2, x0^2, N, x0, y0, x1, y1, retdest
MULMOD
// stack: 3/2 * x0^2, x0, y0, x1, y1, retdest
DUP3
// stack: y0, 3/2 * x0^2, x0, y0, x1, y1, retdest
%moddiv
// stack: lambda, x0, y0, x1, y1, retdest
%jump(bn_add_valid_points_with_lambda)
// BN254 elliptic curve doubling.
// Assumption: (x0,y0) is a valid point.
// Standard doubling formula.
global bn_double:
// stack: x, y, retdest
DUP2 DUP2 %ec_isidentity
// stack: (x,y)==(0,0), x, y, retdest
%jumpi(ec_double_retself)
DUP2 DUP2
// stack: x, y, x, y, retdest
%jump(bn_add_equal_points)
// Push the order of the BN254 base field.
%macro bn_base
PUSH 0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47
%endmacro
// Assumption: x, y < N and 2N < 2^256.
// Note: Doesn't hold for Secp256k1 base field.
%macro submod
// stack: x, y
%bn_base
// stack: N, x, y
ADD
// stack: N + x, y // Doesn't overflow since 2N < 2^256
SUB
// stack: N + x - y // Doesn't underflow since y < N
%bn_base
// stack: N, N + x - y
SWAP1
// stack: N + x - y, N
MOD
// stack: (N + x - y) % N = (x-y) % N
%endmacro
// Check if (x,y) is a valid curve point.
// Puts y^2 % N == (x^3 + 3) % N & (x < N) & (y < N) || (x,y)==(0,0) on top of the stack.
%macro bn_check
// stack: x, y
%bn_base
// stack: N, x, y
DUP2
// stack: x, N, x, y
LT
// stack: x < N, x, y
%bn_base
// stack: N, x < N, x, y
DUP4
// stack: y, N, x < N, x, y
LT
// stack: y < N, x < N, x, y
AND
// stack: (y < N) & (x < N), x, y
%stack (b, x, y) -> (x, x, @BN_BASE, x, @BN_BASE, @BN_BASE, x, y, b)
// stack: x, x, N, x, N, N, x, y, b
MULMOD
// stack: x^2 % N, x, N, N, x, y, b
MULMOD
// stack: x^3 % N, N, x, y, b
PUSH 3
// stack: 3, x^3 % N, N, x, y, b
ADDMOD
// stack: (x^3 + 3) % N, x, y, b
DUP3
// stack: y, (x^3 + 3) % N, x, y, b
%bn_base
// stack: N, y, (x^3 + 3) % N, x, y, b
SWAP1
// stack: y, N, (x^3 + 3) % N, x, y, b
DUP1
// stack: y, y, N, (x^3 + 3) % N, x, y, b
MULMOD
// stack: y^2 % N, (x^3 + 3) % N, x, y, b
EQ
// stack: y^2 % N == (x^3 + 3) % N, x, y, b
SWAP2
// stack: y, x, y^2 % N == (x^3 + 3) % N, b
%ec_isidentity
// stack: (x,y)==(0,0), y^2 % N == (x^3 + 3) % N, b
SWAP2
// stack: b, y^2 % N == (x^3 + 3) % N, (x,y)==(0,0)
AND
// stack: y^2 % N == (x^3 + 3) % N & (x < N) & (y < N), (x,y)==(0,0)
OR
// stack: y^2 % N == (x^3 + 3) % N & (x < N) & (y < N) || (x,y)==(0,0)
%endmacro
// Return (u256::MAX, u256::MAX) which is used to indicate the input was invalid.
%macro bn_invalid_input
// stack: retdest
PUSH 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// stack: u256::MAX, retdest
PUSH 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
// stack: u256::MAX, u256::MAX, retdest
SWAP2
// stack: retdest, u256::MAX, u256::MAX
JUMP
%endmacro

View File

@ -0,0 +1,41 @@
// BN254 elliptic curve scalar multiplication.
// Uses GLV, wNAF with w=5, and a MSM algorithm.
global bn_mul:
// stack: x, y, s, retdest
DUP2
// stack: y, x, y, s, retdest
DUP2
// stack: x, y, x, y, s, retdest
%ec_isidentity
// stack: (x,y)==(0,0), x, y, s, retdest
%jumpi(ret_zero_ec_mul)
// stack: x, y, s, retdest
DUP2
// stack: y, x, y, s, retdest
DUP2
// stack: x, y, x, y, s, retdest
%bn_check
// stack: isValid(x, y), x, y, s, retdest
%jumpi(bn_mul_valid_point)
// stack: x, y, s, retdest
%pop3
%bn_invalid_input
bn_mul_valid_point:
%stack (x, y, s, retdest) -> (s, bn_mul_after_glv, x, y, bn_msm, bn_mul_end, retdest)
%jump(bn_glv_decompose)
bn_mul_after_glv:
// stack: bneg, a, b, x, y, bn_msm, bn_mul_end, retdest
// Store bneg at this (otherwise unused) location. Will be used later in the MSM.
%mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q, @BN_BNEG_LOC)
// stack: a, b, x, y, bn_msm, bn_mul_end, retdest
PUSH bn_mul_after_a SWAP1 PUSH @SEGMENT_KERNEL_BN_WNAF_A PUSH @BN_SCALAR %jump(wnaf)
bn_mul_after_a:
// stack: b, x, y, bn_msm, bn_mul_end, retdest
PUSH bn_mul_after_b SWAP1 PUSH @SEGMENT_KERNEL_BN_WNAF_B PUSH @BN_SCALAR %jump(wnaf)
bn_mul_after_b:
// stack: x, y, bn_msm, bn_mul_end, retdest
%jump(bn_precompute_table)
bn_mul_end:
%stack (Ax, Ay, retdest) -> (retdest, Ax, Ay)
JUMP

View File

@ -0,0 +1,97 @@
// Inspired by https://github.com/AztecProtocol/weierstrudel/blob/master/huff_modules/endomorphism.huff
// See also Sage code in evm/src/cpu/kernel/tests/ecc/bn_glv_test_data
// Given scalar `k ∈ Bn254::ScalarField`, return `u, k1, k2` with `k1,k2 < 2^127` and such that
// `k = k1 - s*k2` if `u==0` otherwise `k = k1 + s*k2`, where `s` is the scalar value representing the endomorphism.
// In the comments below, N means @BN_SCALAR
//
// Z3 proof that the resulting `k1, k2` satisfy `k1>0`, `k1 < 2^127` and `|k2| < 2^127`.
// ```python
// from z3 import Solver, Int, Or, unsat
// q = 0x30644E72E131A029B85045B68181585D2833E84879B9709143E1F593F0000001
// glv_s = 0xB3C4D79D41A917585BFC41088D8DAAA78B17EA66B99C90DD
//
// b2 = 0x89D3256894D213E3
// b1 = -0x6F4D8248EEB859FC8211BBEB7D4F1128
//
// g1 = 0x24CCEF014A773D2CF7A7BD9D4391EB18D
// g2 = 0x2D91D232EC7E0B3D7
// k = Int("k")
// c1 = Int("c1")
// c2 = Int("c2")
// s = Solver()
//
// c2p = -c2
// s.add(k < q)
// s.add(0 < k)
// s.add(c1 * (2**256) <= g2 * k)
// s.add((c1 + 1) * (2**256) > g2 * k)
// s.add(c2p * (2**256) <= g1 * k)
// s.add((c2p + 1) * (2**256) > g1 * k)
//
// q1 = c1 * b1
// q2 = c2 * b2
//
// k2 = q2 - q1
// k2L = (glv_s * k2) % q
// k1 = k - k2L
// k2 = -k2
//
// s.add(Or((k2 >= 2**127), (-k2 >= 2**127), (k1 >= 2**127), (k1 < 0)))
//
// assert s.check() == unsat
// ```
global bn_glv_decompose:
// stack: k, retdest
PUSH @BN_SCALAR DUP1 DUP1
// Compute c2 which is the top 256 bits of k*g1. Use asm from https://medium.com/wicketh/mathemagic-full-multiply-27650fec525d.
PUSH @U256_MAX
// stack: -1, N, N, N, k, retdest
PUSH @BN_GLV_MINUS_G1 DUP6
// stack: k, g1, -1, N, N, N, k, retdest
MULMOD
// stack: (k * g1 % -1), N, N, N, k, retdest
PUSH @BN_GLV_MINUS_G1 DUP6
// stack: k, g1, (k * g1 % -1), N, N, N, k, retdest
MUL
// stack: bottom = (k * g1), (k * g1 % -1), N, N, N, k, retdest
DUP1 DUP3
// stack: (k * g1 % -1), bottom, bottom, (k * g1 % -1), N, N, N, k, retdest
LT SWAP2 SUB SUB
// stack: c2, N, N, N, k, retdest
PUSH @BN_GLV_B2 MULMOD
// stack: q2=c2*b2, N, N, k, retdest
// Use the same trick to compute c1 = top 256 bits of g2*k.
PUSH @BN_SCALAR PUSH @U256_MAX
PUSH @BN_GLV_G2 DUP7 MULMOD
PUSH @BN_GLV_G2 DUP7 MUL
DUP1 DUP3 LT
SWAP2 SUB SUB
// stack: c1, N, q2, N, N, k, retdest
PUSH @BN_GLV_B1 MULMOD
// stack: q1, q2, N, N, k, retdest
// We compute k2 = q1 + q2 - N, but we check for underflow and return N-q1-q2 instead if there is one,
// along with a flag `underflow` set to 1 if there is an underflow, 0 otherwise.
ADD %sub_check_underflow
// stack: k2, underflow, N, k, retdest
SWAP3 PUSH @BN_SCALAR DUP5 PUSH @BN_GLV_S
// stack: s, k2, N, k, underflow, N, k2, retdest
MULMOD
// stack: s*k2, k, underflow, N, k2, retdest
// Need to return `k + s*k2` if no underflow occur, otherwise return `k - s*k2` which is done in the `underflowed` fn.
SWAP2 DUP1 %jumpi(underflowed)
%stack (underflow, k, x, N, k2) -> (k, x, N, k2, underflow)
ADDMOD
%stack (k1, k2, underflow, retdest) -> (retdest, underflow, k1, k2)
JUMP
underflowed:
// stack: underflow, k, s*k2, N, k2
// Compute (k-s*k2)%N. TODO: Use SUBMOD here when ready
%stack (u, k, x, N, k2) -> (N, x, k, N, k2, u)
SUB ADDMOD
%stack (k1, k2, underflow, retdest) -> (retdest, underflow, k1, k2)
JUMP

View File

@ -0,0 +1,73 @@
// Computes the multiplication `a*G` using a standard MSM with the GLV decomposition of `a`.
// see there for a detailed description.
global bn_msm:
// stack: retdest
PUSH 0 PUSH 0 PUSH 0
global bn_msm_loop:
// stack: accx, accy, i, retdest
DUP3 %bn_mload_wnaf_a
// stack: w, accx, accy, i, retdest
DUP1 %jumpi(bn_msm_loop_add_a_nonzero)
POP
msm_loop_add_b:
//stack: accx, accy, i, retdest
DUP3 %bn_mload_wnaf_b
// stack: w, accx, accy, i, retdest
DUP1 %jumpi(bn_msm_loop_add_b_nonzero)
POP
msm_loop_contd:
%stack (accx, accy, i, retdest) -> (i, i, accx, accy, retdest)
// TODO: the GLV scalars for the BN curve are 127-bit, so could use 127 here. But this would require modifying `wnaf.asm`. Not sure it's worth it...
%eq_const(129) %jumpi(msm_end)
%increment
//stack: i+1, accx, accy, retdest
%stack (i, accx, accy, retdest) -> (accx, accy, bn_msm_loop, i, retdest)
%jump(bn_double)
msm_end:
%stack (i, accx, accy, retdest) -> (retdest, accx, accy)
JUMP
bn_msm_loop_add_a_nonzero:
%stack (w, accx, accy, i, retdest) -> (w, accx, accy, msm_loop_add_b, i, retdest)
%bn_mload_point_a
// stack: px, py, accx, accy, msm_loop_add_b, i, retdest
%jump(bn_add_valid_points)
bn_msm_loop_add_b_nonzero:
%stack (w, accx, accy, i, retdest) -> (w, accx, accy, msm_loop_contd, i, retdest)
%bn_mload_point_b
// stack: px, py, accx, accy, msm_loop_contd, i, retdest
%jump(bn_add_valid_points)
%macro bn_mload_wnaf_a
// stack: i
%mload_kernel(@SEGMENT_KERNEL_BN_WNAF_A)
%endmacro
%macro bn_mload_wnaf_b
// stack: i
%mload_kernel(@SEGMENT_KERNEL_BN_WNAF_B)
%endmacro
%macro bn_mload_point_a
// stack: w
DUP1
%mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
//stack: Gy, w
SWAP1 %decrement %mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
//stack: Gx, Gy
%endmacro
%macro bn_mload_point_b
// stack: w
DUP1
%mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
PUSH @BN_BNEG_LOC %mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
%stack (bneg, Gy, w) -> (@BN_BASE, Gy, bneg, bneg, Gy, w)
SUB SWAP1 ISZERO MUL SWAP2 MUL ADD
SWAP1 %decrement %mload_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
//stack: Gx, Gy
PUSH @BN_GLV_BETA
MULFP254
%endmacro

View File

@ -0,0 +1,35 @@
// Precompute a table of multiples of the BN254 point `Q = (Qx, Qy)`.
// Let `(Qxi, Qyi) = i * Q`, then store in the `SEGMENT_KERNEL_BN_TABLE_Q` segment of memory the values
// `i-1 => Qxi`, `i => Qyi if i < 16 else -Qy(32-i)` for `i in range(1, 32, 2)`.
global bn_precompute_table:
// stack: Qx, Qy, retdest
PUSH precompute_table_contd DUP3 DUP3
%jump(bn_double)
precompute_table_contd:
// stack: Qx2, Qy2, Qx, Qy, retdest
PUSH 1
bn_precompute_table_loop:
// stack i, Qx2, Qy2, Qx, Qy, retdest
PUSH 1 DUP2 SUB
%stack (im, i, Qx2, Qy2, Qx, Qy, retdest) -> (i, Qy, im, Qx, i, Qx2, Qy2, Qx, Qy, retdest)
%mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q) %mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
// stack: i, Qx2, Qy2, Qx, Qy, retdest
DUP1 PUSH 32 SUB PUSH 1 DUP2 SUB
// stack: 31-i, 32-i, i, Qx2, Qy2, Qx, Qy, retdest
DUP7 PUSH @BN_BASE SUB
// TODO: Could maybe avoid storing Qx a second time here, not sure if it would be more efficient.
%stack (Qyy, iii, ii, i, Qx2, Qy2, Qx, Qy, retdest) -> (iii, Qx, ii, Qyy, i, Qx2, Qy2, Qx, Qy, retdest)
%mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q) %mstore_kernel(@SEGMENT_KERNEL_BN_TABLE_Q)
// stack: i, Qx2, Qy2, Qx, Qy, retdest
PUSH 2 ADD
// stack: i+2, Qx2, Qy2, Qx, Qy, retdest
DUP1 PUSH 16 LT %jumpi(precompute_table_end)
%stack (i, Qx2, Qy2, Qx, Qy, retdest) -> (Qx, Qy, Qx2, Qy2, precompute_table_loop_contd, i, Qx2, Qy2, retdest)
%jump(bn_add_valid_points)
precompute_table_loop_contd:
%stack (Qx, Qy, i, Qx2, Qy2, retdest) -> (i, Qx2, Qy2, Qx, Qy, retdest)
%jump(bn_precompute_table_loop)
precompute_table_end:
// stack: i, Qx2, Qy2, Qx, Qy, retdest
%pop5 JUMP

View File

@ -9,3 +9,17 @@ global ret_zero_ec_mul:
SWAP2
// stack: retdest, 0, 0
JUMP
global ec_double_retself:
%stack (x, y, retdest) -> (retdest, x, y)
JUMP
// Check if (x,y)==(0,0)
%macro ec_isidentity
// stack: x, y
OR
// stack: x | y
ISZERO
// stack: (x,y) == (0,0)
%endmacro

View File

@ -2,7 +2,7 @@
// Secp256k1 elliptic curve addition.
// Assumption: (x0,y0) and (x1,y1) are valid points.
global ec_add_valid_points_secp:
global secp_add_valid_points:
// stack: x0, y0, x1, y1, retdest
// Check if the first point is the identity.
@ -12,7 +12,7 @@ global ec_add_valid_points_secp:
// stack: x0, y0, x0, y0, x1, y1, retdest
%ec_isidentity
// stack: (x0,y0)==(0,0), x0, y0, x1, y1, retdest
%jumpi(ec_add_first_zero)
%jumpi(secp_add_first_zero)
// stack: x0, y0, x1, y1, retdest
// Check if the second point is the identity.
@ -22,7 +22,7 @@ global ec_add_valid_points_secp:
// stack: x1, y1, x0, y0, x1, y1, retdest
%ec_isidentity
// stack: (x1,y1)==(0,0), x0, y0, x1, y1, retdest
%jumpi(ec_add_snd_zero)
%jumpi(secp_add_snd_zero)
// stack: x0, y0, x1, y1, retdest
// Check if both points have the same x-coordinate.
@ -32,9 +32,9 @@ global ec_add_valid_points_secp:
// stack: x0, x1, x0, y0, x1, y1, retdest
EQ
// stack: x0 == x1, x0, y0, x1, y1, retdest
%jumpi(ec_add_equal_first_coord)
%jumpi(secp_add_equal_first_coord)
// Standard affine addition formula.
global ec_add_valid_points_no_edge_case_secp:
global secp_add_valid_points_no_edge_case:
// stack: x0, y0, x1, y1, retdest
// Compute lambda = (y0 - y1)/(x0 - x1)
DUP4
@ -51,11 +51,11 @@ global ec_add_valid_points_no_edge_case_secp:
// stack: x0 - x1, y0 - y1, x0, y0, x1, y1, retdest
%moddiv_secp_base
// stack: lambda, x0, y0, x1, y1, retdest
%jump(ec_add_valid_points_with_lambda)
%jump(secp_add_valid_points_with_lambda)
// Secp256k1 elliptic curve addition.
// Assumption: (x0,y0) == (0,0)
ec_add_first_zero:
secp_add_first_zero:
// stack: x0, y0, x1, y1, retdest
// Just return (x1,y1)
@ -69,7 +69,7 @@ ec_add_first_zero:
// Secp256k1 elliptic curve addition.
// Assumption: (x1,y1) == (0,0)
ec_add_snd_zero:
secp_add_snd_zero:
// stack: x0, y0, x1, y1, retdest
// Just return (x1,y1)
@ -89,7 +89,7 @@ ec_add_snd_zero:
// Secp256k1 elliptic curve addition.
// Assumption: lambda = (y0 - y1)/(x0 - x1)
ec_add_valid_points_with_lambda:
secp_add_valid_points_with_lambda:
// stack: lambda, x0, y0, x1, y1, retdest
// Compute x2 = lambda^2 - x1 - x0
@ -145,7 +145,7 @@ ec_add_valid_points_with_lambda:
// Secp256k1 elliptic curve addition.
// Assumption: (x0,y0) and (x1,y1) are valid points and x0 == x1
ec_add_equal_first_coord:
secp_add_equal_first_coord:
// stack: x0, y0, x1, y1, retdest with x0 == x1
// Check if the points are equal
@ -155,7 +155,7 @@ ec_add_equal_first_coord:
// stack: y1, y0, x0, y0, x1, y1, retdest
EQ
// stack: y1 == y0, x0, y0, x1, y1, retdest
%jumpi(ec_add_equal_points)
%jumpi(secp_add_equal_points)
// stack: x0, y0, x1, y1, retdest
// Otherwise, one is the negation of the other so we can return (0,0).
@ -173,7 +173,7 @@ ec_add_equal_first_coord:
// Secp256k1 elliptic curve addition.
// Assumption: x0 == x1 and y0 == y1
// Standard doubling formula.
ec_add_equal_points:
secp_add_equal_points:
// Compute lambda = 3/2 * x0^2 / y0
%stack (x0, y0, x1, y1, retdest) -> (x0, x0, @SECP_BASE, @SECP_BASE, x0, y0, x1, y1, retdest)
MULMOD
@ -181,16 +181,16 @@ ec_add_equal_points:
MULMOD
DUP3
%moddiv_secp_base
%jump(ec_add_valid_points_with_lambda)
%jump(secp_add_valid_points_with_lambda)
// Secp256k1 elliptic curve doubling.
// Assumption: (x,y) is a valid point.
// Standard doubling formula.
global ec_double_secp:
global secp_double:
// stack: x, y, retdest
DUP2 DUP2 %ec_isidentity
// stack: (x,y)==(0,0), x, y, retdest
%jumpi(retself)
%jumpi(ec_double_retself)
// Compute lambda = 3/2 * x0^2 / y0
%stack (x, y, retdest) -> (x, x, @SECP_BASE, @SECP_BASE, x, y, retdest)
@ -200,11 +200,7 @@ global ec_double_secp:
DUP3
%moddiv_secp_base
%stack (lambda, x, y, retdest) -> (lambda, x, y, x, y, retdest)
%jump(ec_add_valid_points_with_lambda)
retself:
%stack (x, y, retdest) -> (retdest, x, y)
JUMP
%jump(secp_add_valid_points_with_lambda)
// Push the order of the Secp256k1 scalar field.
%macro secp_base
@ -221,7 +217,7 @@ retself:
// Check if (x,y) is a valid curve point.
// Puts y^2 % N == (x^3 + 3) % N & (x < N) & (y < N) || (x,y)==(0,0) on top of the stack.
%macro ec_check_secp
%macro secp_check
// stack: x, y
%secp_base
// stack: N, x, y

View File

@ -64,13 +64,13 @@ ecrecover_valid_input:
// return msm_with_precomputation([a0, a1, b0, b1], [G, phi(G), Q, phi(Q)]) -- phi is the Secp endomorphism.
ecdsa_msm_with_glv:
%stack (a, b, Qx, Qy, retdest) -> (a, ecdsa_after_glv_a, b, Qx, Qy, retdest)
%jump(glv_decompose)
%jump(secp_glv_decompose)
ecdsa_after_glv_a:
%stack (a1neg, a0, a1, b, Qx, Qy, retdest) -> (b, ecdsa_after_glv_b, a1neg, a0, a1, Qx, Qy, retdest)
%jump(glv_decompose)
%jump(secp_glv_decompose)
ecdsa_after_glv_b:
%stack (b1neg, b0, b1, a1neg, a0, a1, Qx, Qy, retdest) -> (a1neg, b1neg, Qx, Qy, ecdsa_after_precompute, a0, a1, b0, b1, retdest)
%jump(precompute_table)
%jump(secp_precompute_table)
ecdsa_after_precompute:
// stack: a0, a1, b0, b1, retdest
PUSH 0 PUSH 0 PUSH 129 // 129 is the bit length of the GLV exponents
@ -91,11 +91,11 @@ ecdsa_after_precompute_loop:
SWAP1 %mul_const(2)
%mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
%stack (Px, Py, i, accx, accy, a0, a1, b0, b1, retdest) -> (Px, Py, accx, accy, ecdsa_after_precompute_loop_contd, i, a0, a1, b0, b1, retdest)
%jump(ec_add_valid_points_secp)
%jump(secp_add_valid_points)
ecdsa_after_precompute_loop_contd:
%stack (accx, accy, i, a0, a1, b0, b1, retdest) -> (i, accx, accy, ecdsa_after_precompute_loop_contd2, i, a0, a1, b0, b1, retdest)
ISZERO %jumpi(ecdsa_after_precompute_loop_end)
%jump(ec_double_secp)
%jump(secp_double)
ecdsa_after_precompute_loop_contd2:
%stack (accx, accy, i, a0, a1, b0, b1, retdest) -> (i, accx, accy, a0, a1, b0, b1, retdest)
%decrement %jump(ecdsa_after_precompute_loop)

View File

@ -1,5 +1,5 @@
// Inspired by https://github.com/AztecProtocol/weierstrudel/blob/master/huff_modules/endomorphism.huff
// See also Sage code in evm/src/cpu/kernel/tests/ecc/glv_test_data
// See also Sage code in evm/src/cpu/kernel/tests/ecc/secp_glv_test_data
// Given scalar `k ∈ Secp256k1::ScalarField`, return `u, k1, k2` with `k1,k2 < 2^129` and such that
// `k = k1 - s*k2` if `u==0` otherwise `k = k1 + s*k2`, where `s` is the scalar value representing the endomorphism.
// In the comments below, N means @SECP_SCALAR
@ -36,7 +36,7 @@
// s.add(Or((k2 >= 2**129), (-k2 >= 2**129), (k1 >= 2**129), (k1 < 0)))
// assert s.check() == unsat
// ```
global glv_decompose:
global secp_glv_decompose:
// stack: k, retdest
PUSH @SECP_SCALAR DUP1 DUP1
// Compute c2 which is the top 256 bits of k*g1. Use asm from https://medium.com/wicketh/mathemagic-full-multiply-27650fec525d.

View File

@ -1,6 +1,6 @@
// Initial stack: Gneg, Qneg, Qx, Qy, retdest
// Compute a*G ± b*phi(G) + c*Q ± d*phi(Q) for a,b,c,d in {0,1}^4 and store its x-coordinate at location `2*(8a+4b+2c+d)` and its y-coordinate at location `2*(8a+4b+2c+d)+1` in the SEGMENT_KERNEL_ECDSA_TABLE segment.
global precompute_table:
global secp_precompute_table:
// First store G, ± phi(G), G ± phi(G)
// Use Gneg for the ±, e.g., ±phi(G) is computed as `Gneg * (-phi(G)) + (1-Gneg)*phi(G)` (note only the y-coordinate needs to be filtered).
// stack: Gneg, Qneg, Qx, Qy, retdest
@ -30,7 +30,7 @@ global precompute_table:
DUP5 PUSH @SECP_BASE SUB MUL ADD
%stack (selectQy, betaQx, Qx, Qy, retdest) -> (2, betaQx, 3, selectQy, betaQx, selectQy, Qx, Qy, precompute_table_contd, retdest)
%mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE) %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
%jump(ec_add_valid_points_no_edge_case_secp)
%jump(secp_add_valid_points_no_edge_case)
precompute_table_contd:
%stack (x, y, retdest) -> (6, x, 7, y, retdest)
%mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE) %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
@ -46,7 +46,7 @@ precompute_table_loop:
PUSH 9 %mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
PUSH 8 %mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
// stack: Gx, Gy, x, y, precompute_table_loop_contd, x, y, i, retdest
%jump(ec_add_valid_points_secp)
%jump(secp_add_valid_points)
precompute_table_loop_contd:
%stack (Rx, Ry, x, y, i, retdest) -> (i, 8, Rx, i, 9, Ry, x, y, i, retdest)
ADD %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE) ADD %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
@ -54,14 +54,14 @@ precompute_table_loop_contd:
PUSH 17 %mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
PUSH 16 %mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
%stack (Gx, Gy, x, y, x, y, i, retdest) -> (Gx, Gy, x, y, precompute_table_loop_contd2, x, y, i, retdest)
%jump(ec_add_valid_points_secp)
%jump(secp_add_valid_points)
precompute_table_loop_contd2:
%stack (Rx, Ry, x, y, i, retdest) -> (i, 16, Rx, i, 17, Ry, x, y, i, retdest)
ADD %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE) ADD %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
PUSH 25 %mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
PUSH 24 %mload_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)
%stack (Gx, Gy, x, y, i, retdest) -> (Gx, Gy, x, y, precompute_table_loop_contd3, i, retdest)
%jump(ec_add_valid_points_secp)
%jump(secp_add_valid_points)
precompute_table_loop_contd3:
%stack (Rx, Ry, i, retdest) -> (i, 24, Rx, i, 25, Ry, i, retdest)
ADD %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE) ADD %mstore_kernel(@SEGMENT_KERNEL_ECDSA_TABLE)

View File

@ -0,0 +1,69 @@
// wNAF expansion with w=5.
// Stores the reversed expansion of the given scalar in memory at the given segment and offsets 0..130.
// Should be called with scalars of bit length <= 129, which is the case when using GLV.
// Pseudo-code:
// def wnaf(n):
// ans = [0 for _ in range(130)]
// o = 0
// while n != 0:
// i = n.trailing_zero_bits()
// o += i
// n >>= i
// m = n & 31
// ans[o] = m
// if m > 16:
// ne += 32
// ne -= m
// return ans
global wnaf:
// stack: N, segment, n, retdest (N is the size of the group in which the mul is taking place)
DUP3 MOD ISZERO %jumpi(wnaf_zero_scalar)
PUSH 0
wnaf_loop:
%stack (o, segment, n, retdest) -> (n, wnaf_loop_contd, o, segment, retdest)
%jump(trailing_zeros)
wnaf_loop_contd:
%stack (n, i, o, segment, retdest) -> (o, i, n, segment, retdest)
ADD
%stack (o, n, segment, retdest) -> (n, segment, o, retdest)
DUP1 %and_const(31) SWAP1
PUSH 16 DUP3 GT
// stack: m>16, n, m, segment, o, retdest
%mul_const(32) ADD
// stack: n, m, segment, o, retdest
DUP2 SWAP1 SUB
%stack (n, m, segment, o, retdest) -> (129, o, m, o, segment, n, retdest)
SUB
%stack (i, m, o, segment, n, retdest) -> (0, segment, i, m, o, segment, n, retdest)
MSTORE_GENERAL
// stack: o, segment, n, retdest
DUP3 ISZERO %jumpi(wnaf_end)
// stack: o, segment, n, retdest
%jump(wnaf_loop)
wnaf_end:
// stack: o, segment, n, retdest
%pop3 JUMP
wnaf_zero_scalar:
// stack: segment, n, retdest
%pop2 JUMP
// Number of trailing zeros computed with a simple loop and returning the scalar without its lsb zeros.
trailing_zeros:
// stack: x, retdest
PUSH 0
trailing_zeros_loop:
// stack: count, x, retdest
PUSH 1 DUP3 AND
// stack: x&1, count, x, retdest
%jumpi(trailing_zeros_end)
// stack: count, x, retdest
%increment SWAP1 PUSH 1 SHR SWAP1
// stack: count, x>>1, retdest
%jump(trailing_zeros_loop)
trailing_zeros_end:
%stack (count, x, retdest) -> (retdest, x, count)
JUMP

View File

@ -63,7 +63,7 @@ const HASH_CONSTANTS: [(&str, [u8; 32]); 2] = [
),
];
const EC_CONSTANTS: [(&str, [u8; 32]); 10] = [
const EC_CONSTANTS: [(&str, [u8; 32]); 18] = [
(
"U256_MAX",
hex!("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
@ -72,6 +72,39 @@ const EC_CONSTANTS: [(&str, [u8; 32]); 10] = [
"BN_BASE",
hex!("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47"),
),
(
"BN_SCALAR",
hex!("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001"),
),
(
"BN_GLV_BETA",
hex!("000000000000000059e26bcea0d48bacd4f263f1acdb5c4f5763473177fffffe"),
),
(
"BN_GLV_S",
hex!("0000000000000000b3c4d79d41a917585bfc41088d8daaa78b17ea66b99c90dd"),
),
(
"BN_GLV_MINUS_G1",
hex!("000000000000000000000000000000024ccef014a773d2cf7a7bd9d4391eb18d"),
),
(
"BN_GLV_G2",
hex!("000000000000000000000000000000000000000000000002d91d232ec7e0b3d7"),
),
(
"BN_GLV_B1",
hex!("30644e72e131a029b85045b68181585cb8e665ff8b011694c1d039a872b0eed9"),
),
(
"BN_GLV_B2",
hex!("00000000000000000000000000000000000000000000000089d3256894d213e3"),
),
(
"BN_BNEG_LOC",
// This just needs to be large enough to not interfere with anything else in SEGMENT_KERNEL_BN_TABLE_Q.
hex!("0000000000000000000000000000000000000000000000000000000000001337"),
),
(
"SECP_BASE",
hex!("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"),

File diff suppressed because it is too large Load Diff

View File

@ -4,15 +4,16 @@ mod bn {
use ethereum_types::U256;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::interpreter::run_interpreter;
use crate::cpu::kernel::interpreter::{run_interpreter, Interpreter};
use crate::cpu::kernel::tests::u256ify;
use crate::memory::segments::Segment;
#[test]
fn test_ec_ops() -> Result<()> {
// Make sure we can parse and assemble the entire kernel.
let ec_add = KERNEL.global_labels["ec_add"];
let ec_double = KERNEL.global_labels["ec_double"];
let ec_mul = KERNEL.global_labels["ec_mul"];
let ec_add = KERNEL.global_labels["bn_add"];
let ec_double = KERNEL.global_labels["bn_double"];
let ec_mul = KERNEL.global_labels["bn_mul"];
let identity = ("0x0", "0x0");
let invalid = ("0x0", "0x3"); // Not on curve
let point0 = (
@ -130,11 +131,100 @@ mod bn {
Ok(())
}
#[test]
fn test_glv_verify_data() -> Result<()> {
let glv = KERNEL.global_labels["bn_glv_decompose"];
let f = include_str!("bn_glv_test_data");
for line in f.lines().filter(|s| !s.starts_with("//")) {
let mut line = line
.split_whitespace()
.map(|s| U256::from_str_radix(s, 10).unwrap())
.collect::<Vec<_>>();
let k = line.remove(0);
line.reverse();
let mut initial_stack = u256ify(["0xdeadbeef"])?;
initial_stack.push(k);
let mut int = Interpreter::new(&KERNEL.code, glv, initial_stack, &KERNEL.prover_inputs);
int.run()?;
assert_eq!(line, int.stack());
}
Ok(())
}
#[test]
fn test_precomputation() -> Result<()> {
let precompute = KERNEL.global_labels["bn_precompute_table"];
let initial_stack = u256ify([
"0xdeadbeef",
"0x10d7cf0621b6e42c1dbb421f5ef5e1936ca6a87b38198d1935be31e28821d171",
"0x11b7d55f16aaac07de9a0ed8ac2e8023570dbaa78571fc95e553c4b3ba627689",
])?;
let mut int = Interpreter::new(
&KERNEL.code,
precompute,
initial_stack,
&KERNEL.prover_inputs,
);
int.run()?;
let mut computed_table = Vec::new();
for i in 0..32 {
computed_table.push(
int.generation_state
.memory
.mload_general(0, Segment::BnTableQ, i),
);
}
let table = u256ify([
"0x11b7d55f16aaac07de9a0ed8ac2e8023570dbaa78571fc95e553c4b3ba627689",
"0x10d7cf0621b6e42c1dbb421f5ef5e1936ca6a87b38198d1935be31e28821d171",
"0x1565e5587d8566239c23219bc0e1d1d267d19100c3869d0c55b1e3ea4532304e",
"0x19fd9b572558479df062632562113e4d9a3eb655698ee3be9a5350ed23e690ee",
"0x19469e55e27021c0af1310ad266cdf1d9eef6942c80afe9c7b517acf16a2a3e1",
"0x226ec29db9339d7ffb1bc3260f1ca008b804f78553d316c37203118466bb5f5a",
"0x10a16b4786bd1717a031a1948010593173d36ab35535641c9fe41802d639b435",
"0x294fe34d7ec9024c96cfde58311b9ee394ff9f8735d882005fcf0d28709b459d",
"0x300f58e61d4ab1872f6b5fad517c6df1b23468fcfa81154786ec230cb0df6d20",
"0x12ff1d200127d2ba7a0171cadbe0f729fc5acbe95565cc57f07c9fa42c001390",
"0x1045a28c9a35a17b63da593c0137ac08a1fda78430b71755941d3dc501b35272",
"0x2a3f4d91b58179451ec177f599d7eaf79e2555f169fd3e5d2af314600fad299",
"0x21de5680f03b262f53d3252d5ca71bbc5f2c9ff5483fb63abaea1ee7e9cede1d",
"0x144249d3fc4c82327845a38ea51181acb374ab30a1e7ea0f13bc8a8b04d96411",
"0x2ba4ce4289de377397878c1195e21a1d573b02d9463f5c454ec50bdf11aee512",
"0x259a447b42bab48e07388baece550607bc0a8a88e1ea224eba94c6bed08e470e",
"0x2ba4ce4289de377397878c1195e21a1d573b02d9463f5c454ec50bdf11aee512",
"0xaca09f79e76eb9bb117ba07b32c5255db76e0088687a83e818bc55807eeb639",
"0x21de5680f03b262f53d3252d5ca71bbc5f2c9ff5483fb63abaea1ee7e9cede1d",
"0x1c22049ee4e51df7400aa227dc6fd6b0e40cbf60c689e07e2864018bd3a39936",
"0x1045a28c9a35a17b63da593c0137ac08a1fda78430b71755941d3dc501b35272",
"0x2dc05999c5d9889566642e3727e3d9ae1d9f153251d1f6a769715ad0d7822aae",
"0x300f58e61d4ab1872f6b5fad517c6df1b23468fcfa81154786ec230cb0df6d20",
"0x1d653152e009cd6f3e4ed3eba5a061339b269ea8130bfe354ba3ec72ac7ce9b7",
"0x10a16b4786bd1717a031a1948010593173d36ab35535641c9fe41802d639b435",
"0x7146b2562689ddd2180675e5065b97a0281cb0a3299488cdc517eee67e1b7aa",
"0x19469e55e27021c0af1310ad266cdf1d9eef6942c80afe9c7b517acf16a2a3e1",
"0xdf58bd527fe02a9bd3482907264b854df7c730c149eb3c9ca1d7a9271c19ded",
"0x1565e5587d8566239c23219bc0e1d1d267d19100c3869d0c55b1e3ea4532304e",
"0x1666b31bbbd9588bc7ede2911f701a0ffd42b43bfee2e6cea1cd3b29b4966c59",
"0x11b7d55f16aaac07de9a0ed8ac2e8023570dbaa78571fc95e553c4b3ba627689",
"0x1f8c7f6cbf7abbfd9a950397228b76ca2adac21630583d7406625a34505b2bd6",
])?;
assert_eq!(computed_table, table);
Ok(())
}
}
#[cfg(test)]
mod secp {
use anyhow::Result;
use ethereum_types::U256;
@ -146,8 +236,8 @@ mod secp {
fn test_ec_ops() -> Result<()> {
// Make sure we can parse and assemble the entire kernel.
let kernel = combined_kernel();
let ec_add = kernel.global_labels["ec_add_valid_points_secp"];
let ec_double = kernel.global_labels["ec_double_secp"];
let ec_add = kernel.global_labels["secp_add_valid_points"];
let ec_double = kernel.global_labels["secp_double"];
let identity = ("0x0", "0x0");
let point0 = (
"0xc82ccceebd739e646631b7270ed8c33e96c4940b19db91eaf67da6ec92d109b",
@ -207,9 +297,9 @@ mod secp {
#[test]
fn test_glv_verify_data() -> Result<()> {
let glv = KERNEL.global_labels["glv_decompose"];
let glv = KERNEL.global_labels["secp_glv_decompose"];
let f = include_str!("glv_test_data");
let f = include_str!("secp_glv_test_data");
for line in f.lines().filter(|s| !s.starts_with("//")) {
let mut line = line
.split_whitespace()

View File

@ -50,6 +50,7 @@ const STACK_BEHAVIORS: OpsColumnsView<Option<StackBehavior>> = OpsColumnsView {
addfp254: BASIC_BINARY_OP,
mulfp254: BASIC_BINARY_OP,
subfp254: BASIC_BINARY_OP,
submod: BASIC_TERNARY_OP,
lt: BASIC_BINARY_OP,
gt: BASIC_BINARY_OP,
eq: BASIC_BINARY_OP,

View File

@ -217,7 +217,7 @@ impl<F: Field> CtlData<F> {
}
}
pub(crate) fn cross_table_lookup_data<F: RichField, C: GenericConfig<D, F = F>, const D: usize>(
pub(crate) fn cross_table_lookup_data<F: RichField, const D: usize>(
trace_poly_values: &[Vec<PolynomialValues<F>>; NUM_TABLES],
cross_table_lookups: &[CrossTableLookup<F>],
ctl_challenges: &GrandProductChallengeSet<F>,
@ -371,7 +371,7 @@ impl<'a, F: RichField + Extendable<D>, const D: usize>
}
}
pub(crate) fn eval_cross_table_lookup_checks<F, FE, P, C, S, const D: usize, const D2: usize>(
pub(crate) fn eval_cross_table_lookup_checks<F, FE, P, S, const D: usize, const D2: usize>(
vars: StarkEvaluationVars<FE, P, { S::COLUMNS }>,
ctl_vars: &[CtlCheckVars<F, FE, P, D2>],
consumer: &mut ConstraintConsumer<P>,
@ -379,7 +379,6 @@ pub(crate) fn eval_cross_table_lookup_checks<F, FE, P, C, S, const D: usize, con
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
for lookup_vars in ctl_vars {
@ -540,11 +539,7 @@ pub(crate) fn eval_cross_table_lookup_checks_circuit<
}
}
pub(crate) fn verify_cross_table_lookups<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub(crate) fn verify_cross_table_lookups<F: RichField + Extendable<D>, const D: usize>(
cross_table_lookups: &[CrossTableLookup<F>],
ctl_zs_lasts: [Vec<F>; NUM_TABLES],
config: &StarkConfig,
@ -573,11 +568,7 @@ pub(crate) fn verify_cross_table_lookups<
Ok(())
}
pub(crate) fn verify_cross_table_lookups_circuit<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub(crate) fn verify_cross_table_lookups_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
cross_table_lookups: Vec<CrossTableLookup<F>>,
ctl_zs_lasts: [Vec<Target>; NUM_TABLES],

View File

@ -228,7 +228,7 @@ where
}
// Verify the CTL checks.
verify_cross_table_lookups_circuit::<F, C, D>(
verify_cross_table_lookups_circuit::<F, D>(
&mut builder,
all_cross_table_lookups(),
pis.map(|p| p.ctl_zs_last),

View File

@ -40,11 +40,13 @@ pub(crate) enum Segment {
ShiftTable = 16,
JumpdestBits = 17,
EcdsaTable = 18,
BN254Pairings = 19,
BnWnafA = 19,
BnWnafB = 20,
BnTableQ = 21,
}
impl Segment {
pub(crate) const COUNT: usize = 20;
pub(crate) const COUNT: usize = 22;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -67,7 +69,9 @@ impl Segment {
Self::ShiftTable,
Self::JumpdestBits,
Self::EcdsaTable,
Self::BN254Pairings,
Self::BnWnafA,
Self::BnWnafB,
Self::BnTableQ,
]
}
@ -93,7 +97,9 @@ impl Segment {
Segment::ShiftTable => "SEGMENT_SHIFT_TABLE",
Segment::JumpdestBits => "SEGMENT_JUMPDEST_BITS",
Segment::EcdsaTable => "SEGMENT_KERNEL_ECDSA_TABLE",
Segment::BN254Pairings => "SEGMENT_BN254_PAIRINGS",
Segment::BnWnafA => "SEGMENT_KERNEL_BN_WNAF_A",
Segment::BnWnafB => "SEGMENT_KERNEL_BN_WNAF_B",
Segment::BnTableQ => "SEGMENT_KERNEL_BN_TABLE_Q",
}
}
@ -119,7 +125,9 @@ impl Segment {
Segment::ShiftTable => 256,
Segment::JumpdestBits => 1,
Segment::EcdsaTable => 256,
Segment::BN254Pairings => 256,
Segment::BnWnafA => 8,
Segment::BnWnafB => 8,
Segment::BnTableQ => 256,
}
}
}

View File

@ -13,7 +13,7 @@ use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
use plonky2::plonk::config::{AlgebraicHasher, Hasher};
use plonky2::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};
use plonky2::util::reducing::{ReducingFactor, ReducingFactorTarget};
use plonky2_maybe_rayon::*;
@ -89,7 +89,7 @@ pub(crate) struct GrandProductChallengeSet<T: Copy + Eq + PartialEq + Debug> {
}
/// Compute all Z polynomials (for permutation arguments).
pub(crate) fn compute_permutation_z_polys<F, C, S, const D: usize>(
pub(crate) fn compute_permutation_z_polys<F, S, const D: usize>(
stark: &S,
config: &StarkConfig,
trace_poly_values: &[PolynomialValues<F>],
@ -97,7 +97,6 @@ pub(crate) fn compute_permutation_z_polys<F, C, S, const D: usize>(
) -> Vec<PolynomialValues<F>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let permutation_pairs = stark.permutation_pairs();
@ -286,7 +285,7 @@ where
pub(crate) permutation_challenge_sets: Vec<GrandProductChallengeSet<F>>,
}
pub(crate) fn eval_permutation_checks<F, FE, P, C, S, const D: usize, const D2: usize>(
pub(crate) fn eval_permutation_checks<F, FE, P, S, const D: usize, const D2: usize>(
stark: &S,
config: &StarkConfig,
vars: StarkEvaluationVars<FE, P, { S::COLUMNS }>,
@ -296,7 +295,6 @@ pub(crate) fn eval_permutation_checks<F, FE, P, C, S, const D: usize, const D2:
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let PermutationCheckVars {

View File

@ -124,7 +124,7 @@ where
let ctl_data_per_table = timed!(
timing,
"compute CTL data",
cross_table_lookup_data::<F, C, D>(
cross_table_lookup_data::<F, D>(
&trace_poly_values,
&all_stark.cross_table_lookups,
&ctl_challenges,
@ -286,7 +286,7 @@ where
timed!(
timing,
"compute permutation Z(x) polys",
compute_permutation_z_polys::<F, C, S, D>(stark, config, trace_poly_values, challenges)
compute_permutation_z_polys::<F, S, D>(stark, config, trace_poly_values, challenges)
)
});
let num_permutation_zs = permutation_zs.as_ref().map(|v| v.len()).unwrap_or(0);
@ -533,7 +533,7 @@ where
filter_column: &zs_columns.filter_column,
})
.collect::<Vec<_>>();
eval_vanishing_poly::<F, F, P, C, S, D, 1>(
eval_vanishing_poly::<F, F, P, S, D, 1>(
stark,
config,
vars,
@ -550,7 +550,7 @@ where
let num_challenges = alphas.len();
(0..P::WIDTH).into_iter().map(move |i| {
(0..P::WIDTH).map(move |i| {
(0..num_challenges)
.map(|j| constraints_evals[j].as_slice()[i])
.collect()
@ -651,7 +651,7 @@ fn check_constraints<'a, F, C, S, const D: usize>(
filter_column: &zs_columns.filter_column,
})
.collect::<Vec<_>>();
eval_vanishing_poly::<F, F, F, C, S, D, 1>(
eval_vanishing_poly::<F, F, F, S, D, 1>(
stark,
config,
vars,

View File

@ -132,7 +132,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
}
// Verify the CTL checks.
verify_cross_table_lookups::<F, C, D>(
verify_cross_table_lookups::<F, D>(
&cross_table_lookups,
pis.map(|p| p.ctl_zs_last),
inner_config,
@ -393,7 +393,7 @@ fn verify_stark_proof_with_challenges_circuit<
with_context!(
builder,
"evaluate vanishing polynomial",
eval_vanishing_poly_circuit::<F, C, S, D>(
eval_vanishing_poly_circuit::<F, S, D>(
builder,
stark,
inner_config,

View File

@ -2,7 +2,6 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::GenericConfig;
use crate::config::StarkConfig;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
@ -17,7 +16,7 @@ use crate::permutation::{
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
pub(crate) fn eval_vanishing_poly<F, FE, P, C, S, const D: usize, const D2: usize>(
pub(crate) fn eval_vanishing_poly<F, FE, P, S, const D: usize, const D2: usize>(
stark: &S,
config: &StarkConfig,
vars: StarkEvaluationVars<FE, P, { S::COLUMNS }>,
@ -28,12 +27,11 @@ pub(crate) fn eval_vanishing_poly<F, FE, P, C, S, const D: usize, const D2: usiz
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
stark.eval_packed_generic(vars, consumer);
if let Some(permutation_vars) = permutation_vars {
eval_permutation_checks::<F, FE, P, C, S, D, D2>(
eval_permutation_checks::<F, FE, P, S, D, D2>(
stark,
config,
vars,
@ -41,10 +39,10 @@ pub(crate) fn eval_vanishing_poly<F, FE, P, C, S, const D: usize, const D2: usiz
consumer,
);
}
eval_cross_table_lookup_checks::<F, FE, P, C, S, D, D2>(vars, ctl_vars, consumer);
eval_cross_table_lookup_checks::<F, FE, P, S, D, D2>(vars, ctl_vars, consumer);
}
pub(crate) fn eval_vanishing_poly_circuit<F, C, S, const D: usize>(
pub(crate) fn eval_vanishing_poly_circuit<F, S, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
config: &StarkConfig,
@ -54,7 +52,6 @@ pub(crate) fn eval_vanishing_poly_circuit<F, C, S, const D: usize>(
consumer: &mut RecursiveConstraintConsumer<F, D>,
) where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); S::COLUMNS]:,
{

View File

@ -97,7 +97,7 @@ where
config,
)?;
verify_cross_table_lookups::<F, C, D>(
verify_cross_table_lookups::<F, D>(
cross_table_lookups,
all_proof.stark_proofs.map(|p| p.proof.openings.ctl_zs_last),
config,
@ -155,7 +155,7 @@ where
next_zs: permutation_ctl_zs_next[..num_permutation_zs].to_vec(),
permutation_challenge_sets: challenges.permutation_challenge_sets.clone().unwrap(),
});
eval_vanishing_poly::<F, F::Extension, F::Extension, C, S, D, D>(
eval_vanishing_poly::<F, F::Extension, F::Extension, S, D, D>(
stark,
config,
vars,

View File

@ -51,6 +51,9 @@ fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramErr
(0x0e, true) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SubFp254,
)),
(0x0f, true) => Ok(Operation::TernaryArithmetic(
arithmetic::TernaryOperator::SubMod,
)),
(0x10, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Lt)),
(0x11, _) => Ok(Operation::BinaryArithmetic(arithmetic::BinaryOperator::Gt)),
(0x12, _) => Ok(Operation::Syscall(opcode)),
@ -167,6 +170,7 @@ fn fill_op_flag<F: Field>(op: Operation, row: &mut CpuColumnsView<F>) {
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SubFp254) => &mut flags.subfp254,
Operation::TernaryArithmetic(arithmetic::TernaryOperator::AddMod) => &mut flags.addmod,
Operation::TernaryArithmetic(arithmetic::TernaryOperator::MulMod) => &mut flags.mulmod,
Operation::TernaryArithmetic(arithmetic::TernaryOperator::SubMod) => &mut flags.submod,
Operation::KeccakGeneral => &mut flags.keccak_general,
Operation::ProverInput => &mut flags.prover_input,
Operation::Pop => &mut flags.pop,

View File

@ -119,7 +119,7 @@ pub(crate) fn bench_field<F: Field>(c: &mut Criterion) {
&format!("batch_multiplicative_inverse-tiny<{}>", type_name::<F>()),
|b| {
b.iter_batched(
|| (0..2).into_iter().map(|_| F::rand()).collect::<Vec<_>>(),
|| (0..2).map(|_| F::rand()).collect::<Vec<_>>(),
|x| F::batch_multiplicative_inverse(&x),
BatchSize::SmallInput,
)
@ -130,7 +130,7 @@ pub(crate) fn bench_field<F: Field>(c: &mut Criterion) {
&format!("batch_multiplicative_inverse-small<{}>", type_name::<F>()),
|b| {
b.iter_batched(
|| (0..4).into_iter().map(|_| F::rand()).collect::<Vec<_>>(),
|| (0..4).map(|_| F::rand()).collect::<Vec<_>>(),
|x| F::batch_multiplicative_inverse(&x),
BatchSize::SmallInput,
)
@ -141,7 +141,7 @@ pub(crate) fn bench_field<F: Field>(c: &mut Criterion) {
&format!("batch_multiplicative_inverse-medium<{}>", type_name::<F>()),
|b| {
b.iter_batched(
|| (0..16).into_iter().map(|_| F::rand()).collect::<Vec<_>>(),
|| (0..16).map(|_| F::rand()).collect::<Vec<_>>(),
|x| F::batch_multiplicative_inverse(&x),
BatchSize::SmallInput,
)
@ -152,7 +152,7 @@ pub(crate) fn bench_field<F: Field>(c: &mut Criterion) {
&format!("batch_multiplicative_inverse-large<{}>", type_name::<F>()),
|b| {
b.iter_batched(
|| (0..256).into_iter().map(|_| F::rand()).collect::<Vec<_>>(),
|| (0..256).map(|_| F::rand()).collect::<Vec<_>>(),
|x| F::batch_multiplicative_inverse(&x),
BatchSize::LargeInput,
)
@ -163,12 +163,7 @@ pub(crate) fn bench_field<F: Field>(c: &mut Criterion) {
&format!("batch_multiplicative_inverse-huge<{}>", type_name::<F>()),
|b| {
b.iter_batched(
|| {
(0..65536)
.into_iter()
.map(|_| F::rand())
.collect::<Vec<_>>()
},
|| (0..65536).map(|_| F::rand()).collect::<Vec<_>>(),
|x| F::batch_multiplicative_inverse(&x),
BatchSize::LargeInput,
)

View File

@ -103,7 +103,7 @@ where
{
let (inner_proof, inner_vd, inner_cd) = inner;
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
let pt = builder.add_virtual_proof_with_pis::<InnerC>(inner_cd);
let pt = builder.add_virtual_proof_with_pis(inner_cd);
let inner_data = builder.add_virtual_verifier_data(inner_cd.config.fri_config.cap_height);

View File

@ -73,7 +73,7 @@ impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
}
}
pub fn fri_challenges<C: GenericConfig<D, F = F>>(
pub fn fri_challenges(
&mut self,
builder: &mut CircuitBuilder<F, D>,
commit_phase_merkle_caps: &[MerkleCapTarget],

View File

@ -15,7 +15,7 @@ use crate::hash::merkle_tree::MerkleCap;
use crate::hash::path_compression::{compress_merkle_proofs, decompress_merkle_proofs};
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::Target;
use crate::plonk::config::{GenericConfig, Hasher};
use crate::plonk::config::Hasher;
use crate::plonk::plonk_common::salt_size;
use crate::plonk::proof::{FriInferredElements, ProofChallenges};
@ -135,11 +135,7 @@ pub struct CompressedFriProof<F: RichField + Extendable<D>, H: Hasher<F>, const
impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> FriProof<F, H, D> {
/// Compress all the Merkle paths in the FRI proof and remove duplicate indices.
pub fn compress<C: GenericConfig<D, F = F, Hasher = H>>(
self,
indices: &[usize],
params: &FriParams,
) -> CompressedFriProof<F, H, D> {
pub fn compress(self, indices: &[usize], params: &FriParams) -> CompressedFriProof<F, H, D> {
let FriProof {
commit_phase_merkle_caps,
query_round_proofs,
@ -241,7 +237,7 @@ impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> FriProof<F, H,
impl<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> CompressedFriProof<F, H, D> {
/// Decompress all the Merkle paths in the FRI proof and reinsert duplicate indices.
pub(crate) fn decompress<C: GenericConfig<D, F = F, Hasher = H>>(
pub(crate) fn decompress(
self,
challenges: &ProofChallenges<F, D>,
fri_inferred_elements: FriInferredElements<F, D>,

View File

@ -25,7 +25,7 @@ use crate::with_context;
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Computes P'(x^arity) from {P(x*g^i)}_(i=0..arity), where g is a `arity`-th root of unity
/// and P' is the FRI reduced polynomial.
fn compute_evaluation<C: GenericConfig<D, F = F>>(
fn compute_evaluation(
&mut self,
x: Target,
x_index_within_coset_bits: &[BoolTarget],
@ -58,7 +58,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Make sure we have enough wires and routed wires to do the FRI checks efficiently. This check
/// isn't required -- without it we'd get errors elsewhere in the stack -- but just gives more
/// helpful errors.
fn check_recursion_config<C: GenericConfig<D, F = F>>(&self, max_fri_arity_bits: usize) {
fn check_recursion_config(&self, max_fri_arity_bits: usize) {
let random_access = RandomAccessGate::<F, D>::new_from_config(
&self.config,
max_fri_arity_bits.max(self.config.fri_config.cap_height),
@ -91,11 +91,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
);
}
fn fri_verify_proof_of_work<H: AlgebraicHasher<F>>(
&mut self,
fri_pow_response: Target,
config: &FriConfig,
) {
fn fri_verify_proof_of_work(&mut self, fri_pow_response: Target, config: &FriConfig) {
self.assert_leading_zeros(
fri_pow_response,
config.proof_of_work_bits + (64 - F::order().bits()) as u32,
@ -114,7 +110,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
C::Hasher: AlgebraicHasher<F>,
{
if let Some(max_arity_bits) = params.max_arity_bits() {
self.check_recursion_config::<C>(max_arity_bits);
self.check_recursion_config(max_arity_bits);
}
debug_assert_eq!(
@ -129,7 +125,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
with_context!(
self,
"check PoW",
self.fri_verify_proof_of_work::<C::Hasher>(challenges.fri_pow_response, &params.config)
self.fri_verify_proof_of_work(challenges.fri_pow_response, &params.config)
);
// Check that parameters are coherent.
@ -206,7 +202,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
fn fri_combine_initial<C: GenericConfig<D, F = F>>(
fn fri_combine_initial(
&mut self,
instance: &FriInstanceInfoTarget<D>,
proof: &FriInitialTreeProofTarget,
@ -298,7 +294,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
let mut old_eval = with_context!(
self,
"combine initial oracles",
self.fri_combine_initial::<C>(
self.fri_combine_initial(
instance,
&round_proof.initial_trees_proof,
challenges.fri_alpha,
@ -324,7 +320,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
old_eval = with_context!(
self,
"infer evaluation using interpolation",
self.compute_evaluation::<C>(
self.compute_evaluation(
subgroup_x,
x_index_within_coset_bits,
arity_bits,

View File

@ -33,7 +33,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Takes an iterator of bits `(b_i)` and returns `sum b_i * 2^i`, i.e.,
/// the number with little-endian bit representation given by `bits`.
pub(crate) fn le_sum(&mut self, bits: impl Iterator<Item = impl Borrow<BoolTarget>>) -> Target {
pub fn le_sum(&mut self, bits: impl Iterator<Item = impl Borrow<BoolTarget>>) -> Target {
let bits = bits.map(|b| *b.borrow()).collect_vec();
let num_bits = bits.len();
assert!(

View File

@ -184,7 +184,6 @@ impl<F: RichField, H: Hasher<F>> MerkleTree<F, H> {
// Mask out high bits to get the index within the sub-tree.
let mut pair_index = leaf_index & ((1 << num_layers) - 1);
let siblings = (0..num_layers)
.into_iter()
.map(|i| {
let parity = pair_index & 1;
pair_index >>= 1;

View File

@ -139,9 +139,7 @@ pub fn flatten_target<const D: usize>(l: &[ExtensionTarget<D>]) -> Vec<Target> {
}
/// Batch every D-sized chunks into extension targets.
pub fn unflatten_target<F: RichField + Extendable<D>, const D: usize>(
l: &[Target],
) -> Vec<ExtensionTarget<D>> {
pub fn unflatten_target<const D: usize>(l: &[Target]) -> Vec<ExtensionTarget<D>> {
debug_assert_eq!(l.len() % D, 0);
l.chunks_exact(D)
.map(|c| c.to_vec().try_into().unwrap())

View File

@ -277,7 +277,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
plonk_gammas,
plonk_alphas,
plonk_zeta,
fri_challenges: challenger.fri_challenges::<C>(
fri_challenges: challenger.fri_challenges(
self,
commit_phase_merkle_caps,
final_poly,

View File

@ -65,7 +65,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> P
plonk_zs_partial_products_cap,
quotient_polys_cap,
openings,
opening_proof: opening_proof.compress::<C>(indices, params),
opening_proof: opening_proof.compress(indices, params),
}
}
}
@ -163,7 +163,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
plonk_zs_partial_products_cap,
quotient_polys_cap,
openings,
opening_proof: opening_proof.decompress::<C>(challenges, fri_inferred_elements, params),
opening_proof: opening_proof.decompress(challenges, fri_inferred_elements, params),
}
}
}

View File

@ -420,7 +420,7 @@ fn compute_quotient_polys<
public_inputs_hash,
);
let mut quotient_values_batch = eval_vanishing_poly_base_batch::<F, C, D>(
let mut quotient_values_batch = eval_vanishing_poly_base_batch::<F, D>(
common_data,
&indices_batch,
&shifted_xs_batch,

View File

@ -10,7 +10,6 @@ use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::Target;
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CommonCircuitData;
use crate::plonk::config::GenericConfig;
use crate::plonk::plonk_common;
use crate::plonk::plonk_common::eval_l_0_circuit;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch};
@ -22,11 +21,7 @@ use crate::with_context;
/// Evaluate the vanishing polynomial at `x`. In this context, the vanishing polynomial is a random
/// linear combination of gate constraints, plus some other terms relating to the permutation
/// argument. All such terms should vanish on `H`.
pub(crate) fn eval_vanishing_poly<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub(crate) fn eval_vanishing_poly<F: RichField + Extendable<D>, const D: usize>(
common_data: &CommonCircuitData<F, D>,
x: F::Extension,
vars: EvaluationVars<F, D>,
@ -41,7 +36,7 @@ pub(crate) fn eval_vanishing_poly<
let max_degree = common_data.quotient_degree_factor;
let num_prods = common_data.num_partial_products;
let constraint_terms = evaluate_gate_constraints::<F, C, D>(common_data, vars);
let constraint_terms = evaluate_gate_constraints::<F, D>(common_data, vars);
// The L_0(x) (Z(x) - 1) vanishing terms.
let mut vanishing_z_1_terms = Vec::new();
@ -97,11 +92,7 @@ pub(crate) fn eval_vanishing_poly<
}
/// Like `eval_vanishing_poly`, but specialized for base field points. Batched.
pub(crate) fn eval_vanishing_poly_base_batch<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub(crate) fn eval_vanishing_poly_base_batch<F: RichField + Extendable<D>, const D: usize>(
common_data: &CommonCircuitData<F, D>,
indices_batch: &[usize],
xs_batch: &[F],
@ -129,7 +120,7 @@ pub(crate) fn eval_vanishing_poly_base_batch<
let num_gate_constraints = common_data.num_gate_constraints;
let constraint_terms_batch =
evaluate_gate_constraints_base_batch::<F, C, D>(common_data, vars_batch);
evaluate_gate_constraints_base_batch::<F, D>(common_data, vars_batch);
debug_assert!(constraint_terms_batch.len() == n * num_gate_constraints);
let num_challenges = common_data.config.num_challenges;
@ -208,11 +199,7 @@ pub(crate) fn eval_vanishing_poly_base_batch<
/// `num_gate_constraints` is the largest number of constraints imposed by any gate. It is not
/// strictly necessary, but it helps performance by ensuring that we allocate a vector with exactly
/// the capacity that we need.
pub fn evaluate_gate_constraints<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub fn evaluate_gate_constraints<F: RichField + Extendable<D>, const D: usize>(
common_data: &CommonCircuitData<F, D>,
vars: EvaluationVars<F, D>,
) -> Vec<F::Extension> {
@ -242,11 +229,7 @@ pub fn evaluate_gate_constraints<
/// Returns a vector of `num_gate_constraints * vars_batch.len()` field elements. The constraints
/// corresponding to `vars_batch[i]` are found in `result[i], result[vars_batch.len() + i],
/// result[2 * vars_batch.len() + i], ...`.
pub fn evaluate_gate_constraints_base_batch<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub fn evaluate_gate_constraints_base_batch<F: RichField + Extendable<D>, const D: usize>(
common_data: &CommonCircuitData<F, D>,
vars_batch: EvaluationVarsBaseBatch<F>,
) -> Vec<F> {
@ -273,11 +256,7 @@ pub fn evaluate_gate_constraints_base_batch<
constraints_batch
}
pub fn evaluate_gate_constraints_circuit<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub fn evaluate_gate_constraints_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
common_data: &CommonCircuitData<F, D>,
vars: EvaluationTargets<D>,
@ -308,11 +287,7 @@ pub fn evaluate_gate_constraints_circuit<
///
/// Assumes `x != 1`; if `x` could be 1 then this is unsound. This is fine if `x` is a random
/// variable drawn from a sufficiently large domain.
pub(crate) fn eval_vanishing_poly_circuit<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pub(crate) fn eval_vanishing_poly_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
common_data: &CommonCircuitData<F, D>,
x: ExtensionTarget<D>,
@ -332,7 +307,7 @@ pub(crate) fn eval_vanishing_poly_circuit<
let constraint_terms = with_context!(
builder,
"evaluate gate constraints",
evaluate_gate_constraints_circuit::<F, C, D>(builder, common_data, vars,)
evaluate_gate_constraints_circuit::<F, D>(builder, common_data, vars,)
);
// The L_0(x) (Z(x) - 1) vanishing terms.

View File

@ -59,7 +59,7 @@ pub(crate) fn verify_with_challenges<
let partial_products = &proof.openings.partial_products;
// Evaluate the vanishing polynomial at our challenge point, zeta.
let vanishing_polys_zeta = eval_vanishing_poly::<F, C, D>(
let vanishing_polys_zeta = eval_vanishing_poly::<F, D>(
common_data,
challenges.plonk_zeta,
vars,

View File

@ -374,9 +374,9 @@ mod tests {
// Conditionally verify the two proofs.
let mut builder = CircuitBuilder::<F, D>::new(config);
let mut pw = PartialWitness::new();
let pt = builder.add_virtual_proof_with_pis::<C>(&data.common);
let pt = builder.add_virtual_proof_with_pis(&data.common);
pw.set_proof_with_pis_target(&pt, &proof);
let dummy_pt = builder.add_virtual_proof_with_pis::<C>(&data.common);
let dummy_pt = builder.add_virtual_proof_with_pis(&data.common);
pw.set_proof_with_pis_target::<C, D>(&dummy_pt, &dummy_proof);
let inner_data =
builder.add_virtual_verifier_data(data.common.config.fri_config.cap_height);

View File

@ -40,7 +40,7 @@ impl<C: GenericConfig<D>, const D: usize> VerifierOnlyCircuitData<C, D> {
}
impl VerifierCircuitTarget {
fn from_slice<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
fn from_slice<F: RichField + Extendable<D>, const D: usize>(
slice: &[Target],
common_data: &CommonCircuitData<F, D>,
) -> Result<Self> {
@ -101,7 +101,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
self.goal_common_data = Some(common_data.clone());
}
let inner_cyclic_pis = VerifierCircuitTarget::from_slice::<F, C, D>(
let inner_cyclic_pis = VerifierCircuitTarget::from_slice::<F, D>(
&cyclic_proof_with_pis.public_inputs,
common_data,
)?;
@ -207,7 +207,7 @@ mod tests {
let data = builder.build::<C>();
let config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(config);
let proof = builder.add_virtual_proof_with_pis::<C>(&data.common);
let proof = builder.add_virtual_proof_with_pis(&data.common);
let verifier_data =
builder.add_virtual_verifier_data(data.common.config.fri_config.cap_height);
builder.verify_proof::<C>(&proof, &verifier_data, &data.common);
@ -215,7 +215,7 @@ mod tests {
let config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(config);
let proof = builder.add_virtual_proof_with_pis::<C>(&data.common);
let proof = builder.add_virtual_proof_with_pis(&data.common);
let verifier_data =
builder.add_virtual_verifier_data(data.common.config.fri_config.cap_height);
builder.verify_proof::<C>(&proof, &verifier_data, &data.common);
@ -257,7 +257,7 @@ mod tests {
let condition = builder.add_virtual_bool_target_safe();
// Unpack inner proof's public inputs.
let inner_cyclic_proof_with_pis = builder.add_virtual_proof_with_pis::<C>(&common_data);
let inner_cyclic_proof_with_pis = builder.add_virtual_proof_with_pis(&common_data);
let inner_cyclic_pis = &inner_cyclic_proof_with_pis.public_inputs;
let inner_cyclic_initial_hash = HashOutTarget::try_from(&inner_cyclic_pis[0..4]).unwrap();
let inner_cyclic_latest_hash = HashOutTarget::try_from(&inner_cyclic_pis[4..8]).unwrap();

View File

@ -113,7 +113,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
{
let dummy_circuit = dummy_circuit::<F, C, D>(common_data);
let dummy_proof_with_pis = dummy_proof(&dummy_circuit, HashMap::new())?;
let dummy_proof_with_pis_target = self.add_virtual_proof_with_pis::<C>(common_data);
let dummy_proof_with_pis_target = self.add_virtual_proof_with_pis(common_data);
let dummy_verifier_data_target =
self.add_virtual_verifier_data(self.config.fri_config.cap_height);

View File

@ -74,7 +74,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
let vanishing_polys_zeta = with_context!(
self,
"evaluate the vanishing polynomial at our challenge point, zeta.",
eval_vanishing_poly_circuit::<F, C, D>(
eval_vanishing_poly_circuit::<F, D>(
self,
inner_common_data,
challenges.plonk_zeta,
@ -126,11 +126,11 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
);
}
pub fn add_virtual_proof_with_pis<InnerC: GenericConfig<D, F = F>>(
pub fn add_virtual_proof_with_pis(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> ProofWithPublicInputsTarget<D> {
let proof = self.add_virtual_proof::<InnerC>(common_data);
let proof = self.add_virtual_proof(common_data);
let public_inputs = self.add_virtual_targets(common_data.num_public_inputs);
ProofWithPublicInputsTarget {
proof,
@ -138,10 +138,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
fn add_virtual_proof<InnerC: GenericConfig<D, F = F>>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> ProofTarget<D> {
fn add_virtual_proof(&mut self, common_data: &CommonCircuitData<F, D>) -> ProofTarget<D> {
let config = &common_data.config;
let fri_params = &common_data.fri_params;
let cap_height = fri_params.config.cap_height;
@ -158,15 +155,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
wires_cap: self.add_virtual_cap(cap_height),
plonk_zs_partial_products_cap: self.add_virtual_cap(cap_height),
quotient_polys_cap: self.add_virtual_cap(cap_height),
openings: self.add_opening_set::<InnerC>(common_data),
openings: self.add_opening_set(common_data),
opening_proof: self.add_virtual_fri_proof(num_leaves_per_oracle, fri_params),
}
}
fn add_opening_set<InnerC: GenericConfig<D, F = F>>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> OpeningSetTarget<D> {
fn add_opening_set(&mut self, common_data: &CommonCircuitData<F, D>) -> OpeningSetTarget<D> {
let config = &common_data.config;
let num_challenges = config.num_challenges;
let total_partial_products = num_challenges * common_data.num_partial_products;
@ -363,7 +357,7 @@ mod tests {
{
let mut builder = CircuitBuilder::<F, D>::new(config.clone());
let mut pw = PartialWitness::new();
let pt = builder.add_virtual_proof_with_pis::<InnerC>(&inner_cd);
let pt = builder.add_virtual_proof_with_pis(&inner_cd);
pw.set_proof_with_pis_target(&pt, &inner_proof);
let inner_data = builder.add_virtual_verifier_data(inner_cd.config.fri_config.cap_height);

View File

@ -14,7 +14,7 @@ use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
use plonky2::plonk::config::{AlgebraicHasher, Hasher};
use plonky2::util::reducing::{ReducingFactor, ReducingFactorTarget};
use plonky2_maybe_rayon::*;
@ -63,7 +63,7 @@ pub(crate) struct PermutationChallengeSet<T: Copy> {
}
/// Compute all Z polynomials (for permutation arguments).
pub(crate) fn compute_permutation_z_polys<F, C, S, const D: usize>(
pub(crate) fn compute_permutation_z_polys<F, S, const D: usize>(
stark: &S,
config: &StarkConfig,
trace_poly_values: &[PolynomialValues<F>],
@ -71,7 +71,6 @@ pub(crate) fn compute_permutation_z_polys<F, C, S, const D: usize>(
) -> Vec<PolynomialValues<F>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let permutation_pairs = stark.permutation_pairs();
@ -260,7 +259,7 @@ where
pub(crate) permutation_challenge_sets: Vec<PermutationChallengeSet<F>>,
}
pub(crate) fn eval_permutation_checks<F, FE, P, C, S, const D: usize, const D2: usize>(
pub(crate) fn eval_permutation_checks<F, FE, P, S, const D: usize, const D2: usize>(
stark: &S,
config: &StarkConfig,
vars: StarkEvaluationVars<FE, P, { S::COLUMNS }, { S::PUBLIC_INPUTS }>,
@ -270,7 +269,6 @@ pub(crate) fn eval_permutation_checks<F, FE, P, C, S, const D: usize, const D2:
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,

View File

@ -80,7 +80,7 @@ where
config.num_challenges,
stark.permutation_batch_size(),
);
let permutation_z_polys = compute_permutation_z_polys::<F, C, S, D>(
let permutation_z_polys = compute_permutation_z_polys::<F, S, D>(
&stark,
config,
&trace_poly_values,
@ -285,7 +285,7 @@ where
permutation_challenge_sets: permutation_challenge_sets.to_vec(),
},
);
eval_vanishing_poly::<F, F, P, C, S, D, 1>(
eval_vanishing_poly::<F, F, P, S, D, 1>(
stark,
config,
vars,
@ -303,7 +303,7 @@ where
let num_challenges = alphas.len();
(0..P::WIDTH).into_iter().map(move |i| {
(0..P::WIDTH).map(move |i| {
(0..num_challenges)
.map(|j| constraints_evals[j].as_slice()[i])
.collect()

View File

@ -128,7 +128,7 @@ fn verify_stark_proof_with_challenges_circuit<
with_context!(
builder,
"evaluate vanishing polynomial",
eval_vanishing_poly_circuit::<F, C, S, D>(
eval_vanishing_poly_circuit::<F, S, D>(
builder,
&stark,
inner_config,

View File

@ -2,7 +2,6 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::GenericConfig;
use crate::config::StarkConfig;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
@ -13,7 +12,7 @@ use crate::permutation::{
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
pub(crate) fn eval_vanishing_poly<F, FE, P, C, S, const D: usize, const D2: usize>(
pub(crate) fn eval_vanishing_poly<F, FE, P, S, const D: usize, const D2: usize>(
stark: &S,
config: &StarkConfig,
vars: StarkEvaluationVars<FE, P, { S::COLUMNS }, { S::PUBLIC_INPUTS }>,
@ -23,14 +22,13 @@ pub(crate) fn eval_vanishing_poly<F, FE, P, C, S, const D: usize, const D2: usiz
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,
{
stark.eval_packed_generic(vars, consumer);
if let Some(permutation_data) = permutation_data {
eval_permutation_checks::<F, FE, P, C, S, D, D2>(
eval_permutation_checks::<F, FE, P, S, D, D2>(
stark,
config,
vars,
@ -40,7 +38,7 @@ pub(crate) fn eval_vanishing_poly<F, FE, P, C, S, const D: usize, const D2: usiz
}
}
pub(crate) fn eval_vanishing_poly_circuit<F, C, S, const D: usize>(
pub(crate) fn eval_vanishing_poly_circuit<F, S, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
config: &StarkConfig,
@ -49,7 +47,6 @@ pub(crate) fn eval_vanishing_poly_circuit<F, C, S, const D: usize>(
consumer: &mut RecursiveConstraintConsumer<F, D>,
) where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
[(); S::COLUMNS]:,
[(); S::PUBLIC_INPUTS]:,

View File

@ -98,7 +98,7 @@ where
next_zs: permutation_zs_next.as_ref().unwrap().clone(),
permutation_challenge_sets: challenges.permutation_challenge_sets.unwrap(),
});
eval_vanishing_poly::<F, F::Extension, F::Extension, C, S, D, D>(
eval_vanishing_poly::<F, F::Extension, F::Extension, S, D, D>(
&stark,
config,
vars,