Add range checks to the arithmetic Stark (#866)

* Simplify loop and remove clippy.

* Offset auxiliary coefficients so they're always positive.

* Split mul aux input into lo/hi parts.

* Rename register.

* Combine `QUO_INPUT_{LO,HI}`; rearrange some columns.

* Split `MODULAR_AUX_INPUT` into high and low pieces.

* Remove range_check_error debug output.

* First draft of generating the range checks.

* Remove opcodes for operations that were defined elsewhere.

* Clean up interface to build arithmetic trace.

* Fix "degree too high" bug in DIV by zero.

* Fix constraint_transition usage in recursive compare.

* Fix variable name; use named constant.

* Fix comment values.

* Fix bug in recursive MUL circuit.

* Superficial improvements; remove unnecessary genericity.

* Fix bug in recursive MULMOD circuit.

* Remove debugging noise; expand test.

* Minor comment.

* Enforce assumption in assert.

* Make DIV its own operation.

* Make MOD it's own operation; rename structs; refactor.

* Expand basic test.

* Remove comment.

* Put Stark operations in their own file.

* Test long traces.

* Minor comment.

* Address William's comments.

* Use `const_assert!` instead of `debug_assert!` because Clippy.
This commit is contained in:
Hamish Ivey-Law 2023-01-31 02:23:24 +11:00 committed by GitHub
parent 136cdd053f
commit 6c4ef29fec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 596 additions and 234 deletions

View File

@ -8,7 +8,6 @@ use plonky2::iop::ext_target::ExtensionTarget;
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::read_value_u64_limbs;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::range_check_error;
pub(crate) fn u256_add_cc(input0: [u64; N_LIMBS], input1: [u64; N_LIMBS]) -> ([u64; N_LIMBS], u64) {
// Input and output have 16-bit limbs
@ -104,7 +103,7 @@ where
cy
}
pub fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS]) {
pub fn generate<F: RichField>(lv: &mut [F]) {
let input0 = read_value_u64_limbs(lv, ADD_INPUT_0);
let input1 = read_value_u64_limbs(lv, ADD_INPUT_1);
@ -117,10 +116,6 @@ pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
range_check_error!(ADD_INPUT_0, 16);
range_check_error!(ADD_INPUT_1, 16);
range_check_error!(ADD_OUTPUT, 16);
let is_add = lv[IS_ADD];
let input0_limbs = &lv[ADD_INPUT_0];
let input1_limbs = &lv[ADD_INPUT_1];

View File

@ -1,13 +1,17 @@
use std::marker::PhantomData;
use std::ops::Add;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::hash::hash_types::RichField;
use plonky2::util::transpose;
use crate::arithmetic::operations::Operation;
use crate::arithmetic::{add, columns, compare, modular, mul, sub};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
use crate::permutation::PermutationPair;
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
@ -16,53 +20,60 @@ pub struct ArithmeticStark<F, const D: usize> {
pub f: PhantomData<F>,
}
impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
pub fn generate(
&self,
local_values: &mut [F; columns::NUM_ARITH_COLUMNS],
next_values: &mut [F; columns::NUM_ARITH_COLUMNS],
) {
// Check that at most one operation column is "one" and that the
// rest are "zero".
assert_eq!(
columns::ALL_OPERATIONS
.iter()
.map(|&c| {
if local_values[c] == F::ONE {
Ok(1u64)
} else if local_values[c] == F::ZERO {
Ok(0u64)
} else {
Err("column was not 0 nor 1")
}
})
.fold_ok(0u64, Add::add),
Ok(1)
);
const RANGE_MAX: usize = 1usize << 16; // Range check strict upper bound
if local_values[columns::IS_ADD].is_one() {
add::generate(local_values);
} else if local_values[columns::IS_SUB].is_one() {
sub::generate(local_values);
} else if local_values[columns::IS_MUL].is_one() {
mul::generate(local_values);
} else if local_values[columns::IS_LT].is_one() {
compare::generate(local_values, columns::IS_LT);
} else if local_values[columns::IS_GT].is_one() {
compare::generate(local_values, columns::IS_GT);
} else if local_values[columns::IS_ADDMOD].is_one() {
modular::generate(local_values, next_values, columns::IS_ADDMOD);
} else if local_values[columns::IS_SUBMOD].is_one() {
modular::generate(local_values, next_values, columns::IS_SUBMOD);
} else if local_values[columns::IS_MULMOD].is_one() {
modular::generate(local_values, next_values, columns::IS_MULMOD);
} else if local_values[columns::IS_MOD].is_one() {
modular::generate(local_values, next_values, columns::IS_MOD);
} else if local_values[columns::IS_DIV].is_one() {
modular::generate(local_values, next_values, columns::IS_DIV);
} else {
panic!("the requested operation should not be handled by the arithmetic table");
impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
/// Expects input in *column*-major layout
fn generate_range_checks(&self, cols: &mut Vec<Vec<F>>) {
debug_assert!(cols.len() == columns::NUM_ARITH_COLUMNS);
let n_rows = cols[0].len();
debug_assert!(cols.iter().all(|col| col.len() == n_rows));
for i in 0..RANGE_MAX {
cols[columns::RANGE_COUNTER][i] = F::from_canonical_usize(i);
}
// For each column c in cols, generate the range-check
// permutations and put them in the corresponding range-check
// columns rc_c and rc_c+1.
for (c, rc_c) in columns::SHARED_COLS.zip(columns::RC_COLS.step_by(2)) {
let (col_perm, table_perm) = permuted_cols(&cols[c], &cols[columns::RANGE_COUNTER]);
cols[rc_c].copy_from_slice(&col_perm);
cols[rc_c + 1].copy_from_slice(&table_perm);
}
}
pub fn generate(&self, operations: Vec<&dyn Operation<F>>) -> Vec<PolynomialValues<F>> {
// The number of rows reserved is the smallest value that's
// guaranteed to avoid a reallocation: The only ops that use
// two rows are the modular operations and DIV, so the only
// way to reach capacity is when every op is modular or DIV
// (which is obviously unlikely in normal
// circumstances). (Also need at least RANGE_MAX rows to
// accommodate range checks.)
let max_rows = std::cmp::max(2 * operations.len(), RANGE_MAX);
let mut trace_rows = Vec::with_capacity(max_rows);
for op in operations {
let (row1, maybe_row2) = op.to_rows();
trace_rows.push(row1);
if let Some(row2) = maybe_row2 {
trace_rows.push(row2);
}
}
// Pad the trace with zero rows if it doesn't have enough rows
// to accommodate the range check columns.
for _ in trace_rows.len()..RANGE_MAX {
trace_rows.push(vec![F::ZERO; columns::NUM_ARITH_COLUMNS]);
}
let mut trace_cols = transpose(&trace_rows);
self.generate_range_checks(&mut trace_cols);
trace_cols.into_iter().map(PolynomialValues::new).collect()
}
}
@ -77,8 +88,14 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
// Range check all the columns
for col in columns::RC_COLS.step_by(2) {
eval_lookups(vars, yield_constr, col, col + 1);
}
let lv = vars.local_values;
let nv = vars.next_values;
add::eval_packed_generic(lv, yield_constr);
sub::eval_packed_generic(lv, yield_constr);
mul::eval_packed_generic(lv, yield_constr);
@ -92,6 +109,11 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
vars: StarkEvaluationTargets<D, { Self::COLUMNS }>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// Range check all the columns
for col in columns::RC_COLS.step_by(2) {
eval_lookups_circuit(builder, vars, yield_constr, col, col + 1);
}
let lv = vars.local_values;
let nv = vars.next_values;
add::eval_ext_circuit(builder, lv, yield_constr);
@ -104,4 +126,198 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
fn constraint_degree(&self) -> usize {
3
}
fn permutation_pairs(&self) -> Vec<PermutationPair> {
const START: usize = columns::START_SHARED_COLS;
const END: usize = START + columns::NUM_SHARED_COLS;
let mut pairs = Vec::with_capacity(2 * columns::NUM_SHARED_COLS);
for (c, c_perm) in (START..END).zip_eq(columns::RC_COLS.step_by(2)) {
pairs.push(PermutationPair::singletons(c, c_perm));
pairs.push(PermutationPair::singletons(
c_perm + 1,
columns::RANGE_COUNTER,
));
}
pairs
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ethereum_types::U256;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use super::{columns, ArithmeticStark};
use crate::arithmetic::operations::*;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
#[test]
fn degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ArithmeticStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ArithmeticStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
#[test]
fn basic_trace() {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ArithmeticStark<F, D>;
let stark = S {
f: Default::default(),
};
// 123 + 456 == 579
let add = SimpleBinaryOp::new(columns::IS_ADD, U256::from(123), U256::from(456));
// (123 * 456) % 1007 == 703
let mulmod = ModularBinaryOp::new(
columns::IS_MULMOD,
U256::from(123),
U256::from(456),
U256::from(1007),
);
// (123 - 456) % 1007 == 674
let submod = ModularBinaryOp::new(
columns::IS_SUBMOD,
U256::from(123),
U256::from(456),
U256::from(1007),
);
// 123 * 456 == 56088
let mul = SimpleBinaryOp::new(columns::IS_MUL, U256::from(123), U256::from(456));
// 128 % 13 == 11
let modop = ModOp {
input: U256::from(128),
modulus: U256::from(13),
};
// 128 / 13 == 9
let div = DivOp {
numerator: U256::from(128),
denominator: U256::from(13),
};
let ops: Vec<&dyn Operation<F>> = vec![&add, &mulmod, &submod, &mul, &div, &modop];
let pols = stark.generate(ops);
// Trace should always have NUM_ARITH_COLUMNS columns and
// min(RANGE_MAX, operations.len()) rows. In this case there
// are only 6 rows, so we should have RANGE_MAX rows.
assert!(
pols.len() == columns::NUM_ARITH_COLUMNS
&& pols.iter().all(|v| v.len() == super::RANGE_MAX)
);
// Each operation has a single word answer that we can check
let expected_output = [
// Row (some ops take two rows), col, expected
(0, columns::ADD_OUTPUT, 579),
(1, columns::MODULAR_OUTPUT, 703),
(3, columns::MODULAR_OUTPUT, 674),
(5, columns::MUL_OUTPUT, 56088),
(6, columns::MODULAR_OUTPUT, 11),
(8, columns::DIV_OUTPUT, 9),
];
for (row, col, expected) in expected_output {
// First register should match expected value...
let first = col.start;
let out = pols[first].values[row].to_canonical_u64();
assert_eq!(
out, expected,
"expected column {} on row {} to be {} but it was {}",
first, row, expected, out,
);
// ...other registers should be zero
let rest = col.start + 1..col.end;
assert!(pols[rest].iter().all(|v| v.values[row] == F::ZERO));
}
}
#[test]
fn big_traces() {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ArithmeticStark<F, D>;
let stark = S {
f: Default::default(),
};
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let ops = (0..super::RANGE_MAX)
.map(|_| {
SimpleBinaryOp::new(
columns::IS_MUL,
U256::from(rng.gen::<[u8; 32]>()),
U256::from(rng.gen::<[u8; 32]>()),
)
})
.collect::<Vec<_>>();
// TODO: This is clearly not the right way to build this
// vector; I can't work out how to do it using the map above
// though, with or without Boxes.
let ops = ops.iter().map(|o| o as &dyn Operation<F>).collect();
let pols = stark.generate(ops);
// Trace should always have NUM_ARITH_COLUMNS columns and
// min(RANGE_MAX, operations.len()) rows. In this case there
// are RANGE_MAX operations with one row each, so RANGE_MAX.
assert!(
pols.len() == columns::NUM_ARITH_COLUMNS
&& pols.iter().all(|v| v.len() == super::RANGE_MAX)
);
let ops = (0..super::RANGE_MAX)
.map(|_| {
ModularBinaryOp::new(
columns::IS_MULMOD,
U256::from(rng.gen::<[u8; 32]>()),
U256::from(rng.gen::<[u8; 32]>()),
U256::from(rng.gen::<[u8; 32]>()),
)
})
.collect::<Vec<_>>();
// TODO: This is clearly not the right way to build this
// vector; I can't work out how to do it using the map above
// though, with or without Boxes.
let ops = ops.iter().map(|o| o as &dyn Operation<F>).collect();
let pols = stark.generate(ops);
// Trace should always have NUM_ARITH_COLUMNS columns and
// min(RANGE_MAX, operations.len()) rows. In this case there
// are RANGE_MAX operations with two rows each, so 2*RANGE_MAX.
assert!(
pols.len() == columns::NUM_ARITH_COLUMNS
&& pols.iter().all(|v| v.len() == 2 * super::RANGE_MAX)
);
}
}

View File

@ -32,41 +32,42 @@ pub const IS_SUBMOD: usize = IS_ADDMOD + 1;
pub const IS_MULMOD: usize = IS_SUBMOD + 1;
pub const IS_LT: usize = IS_MULMOD + 1;
pub const IS_GT: usize = IS_LT + 1;
pub const IS_SHL: usize = IS_GT + 1;
pub const IS_SHR: usize = IS_SHL + 1;
const START_SHARED_COLS: usize = IS_SHR + 1;
pub(crate) const ALL_OPERATIONS: [usize; 12] = [
IS_ADD, IS_MUL, IS_SUB, IS_DIV, IS_MOD, IS_ADDMOD, IS_SUBMOD, IS_MULMOD, IS_LT, IS_GT, IS_SHL,
IS_SHR,
];
pub(crate) const START_SHARED_COLS: usize = IS_GT + 1;
/// Within the Arithmetic Unit, there are shared columns which can be
/// used by any arithmetic circuit, depending on which one is active
/// this cycle.
///
/// Modular arithmetic takes 9 * N_LIMBS columns which is split across
/// two rows, the first with 5 * N_LIMBS columns and the second with
/// 4 * N_LIMBS columns. (There are hence N_LIMBS "wasted columns" in
/// Modular arithmetic takes 11 * N_LIMBS columns which is split across
/// two rows, the first with 6 * N_LIMBS columns and the second with
/// 5 * N_LIMBS columns. (There are hence N_LIMBS "wasted columns" in
/// the second row.)
const NUM_SHARED_COLS: usize = 5 * N_LIMBS;
pub(crate) const NUM_SHARED_COLS: usize = 6 * N_LIMBS;
pub(crate) const SHARED_COLS: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + NUM_SHARED_COLS;
const GENERAL_INPUT_0: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + N_LIMBS;
const GENERAL_INPUT_1: Range<usize> = GENERAL_INPUT_0.end..GENERAL_INPUT_0.end + N_LIMBS;
pub(crate) const GENERAL_INPUT_0: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + N_LIMBS;
pub(crate) const GENERAL_INPUT_1: Range<usize> = GENERAL_INPUT_0.end..GENERAL_INPUT_0.end + N_LIMBS;
const GENERAL_INPUT_2: Range<usize> = GENERAL_INPUT_1.end..GENERAL_INPUT_1.end + N_LIMBS;
const GENERAL_INPUT_3: Range<usize> = GENERAL_INPUT_2.end..GENERAL_INPUT_2.end + N_LIMBS;
const AUX_INPUT_0_LO: Range<usize> = GENERAL_INPUT_3.end..GENERAL_INPUT_3.end + N_LIMBS;
// NB: Only one of these two sets of columns will be used for a given operation
const GENERAL_INPUT_4: Range<usize> = GENERAL_INPUT_3.end..GENERAL_INPUT_3.end + N_LIMBS;
const GENERAL_INPUT_4_DBL: Range<usize> = GENERAL_INPUT_3.end..GENERAL_INPUT_3.end + 2 * N_LIMBS;
// The auxiliary input columns overlap the general input columns
// because they correspond to the values in the second row for modular
// operations.
const AUX_INPUT_0_HI: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + N_LIMBS;
const AUX_INPUT_1: Range<usize> = AUX_INPUT_0_HI.end..AUX_INPUT_0_HI.end + 2 * N_LIMBS;
const AUX_INPUT_0: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + N_LIMBS;
const AUX_INPUT_1: Range<usize> = AUX_INPUT_0.end..AUX_INPUT_0.end + 2 * N_LIMBS;
// These auxiliary input columns are awkwardly split across two rows,
// with the first half after the general input columns and the second
// half after the auxiliary input columns.
const AUX_INPUT_2: Range<usize> = AUX_INPUT_1.end..AUX_INPUT_1.end + N_LIMBS;
const AUX_INPUT_2: Range<usize> = AUX_INPUT_1.end..AUX_INPUT_1.end + 2 * N_LIMBS - 1;
// Each element c of {MUL,MODULAR}_AUX_INPUT is -2^20 <= c <= 2^20;
// this value is used as an offset so that everything is positive in
// the range checks.
pub(crate) const AUX_COEFF_ABS_MAX: i64 = 1 << 20;
// ADD takes 3 * N_LIMBS = 48 columns
pub(crate) const ADD_INPUT_0: Range<usize> = GENERAL_INPUT_0;
@ -78,11 +79,12 @@ pub(crate) const SUB_INPUT_0: Range<usize> = GENERAL_INPUT_0;
pub(crate) const SUB_INPUT_1: Range<usize> = GENERAL_INPUT_1;
pub(crate) const SUB_OUTPUT: Range<usize> = GENERAL_INPUT_2;
// MUL takes 4 * N_LIMBS = 64 columns
// MUL takes 5 * N_LIMBS = 80 columns
pub(crate) const MUL_INPUT_0: Range<usize> = GENERAL_INPUT_0;
pub(crate) const MUL_INPUT_1: Range<usize> = GENERAL_INPUT_1;
pub(crate) const MUL_OUTPUT: Range<usize> = GENERAL_INPUT_2;
pub(crate) const MUL_AUX_INPUT: Range<usize> = GENERAL_INPUT_3;
pub(crate) const MUL_AUX_INPUT_LO: Range<usize> = GENERAL_INPUT_3;
pub(crate) const MUL_AUX_INPUT_HI: Range<usize> = GENERAL_INPUT_4;
// LT and GT take 4 * N_LIMBS = 64 columns
pub(crate) const CMP_INPUT_0: Range<usize> = GENERAL_INPUT_0;
@ -90,8 +92,8 @@ pub(crate) const CMP_INPUT_1: Range<usize> = GENERAL_INPUT_1;
pub(crate) const CMP_OUTPUT: usize = GENERAL_INPUT_2.start;
pub(crate) const CMP_AUX_INPUT: Range<usize> = GENERAL_INPUT_3;
// MULMOD takes 4 * N_LIMBS + 2 * 2*N_LIMBS + N_LIMBS = 144 columns
// but split over two rows of 80 columns and 64 columns.
// MULMOD takes 4 * N_LIMBS + 3 * 2*N_LIMBS + N_LIMBS = 176 columns
// but split over two rows of 96 columns and 80 columns.
//
// ADDMOD, SUBMOD, MOD and DIV are currently implemented in terms of
// the general modular code, so they also take 144 columns (also split
@ -100,18 +102,28 @@ pub(crate) const MODULAR_INPUT_0: Range<usize> = GENERAL_INPUT_0;
pub(crate) const MODULAR_INPUT_1: Range<usize> = GENERAL_INPUT_1;
pub(crate) const MODULAR_MODULUS: Range<usize> = GENERAL_INPUT_2;
pub(crate) const MODULAR_OUTPUT: Range<usize> = GENERAL_INPUT_3;
pub(crate) const MODULAR_QUO_INPUT_LO: Range<usize> = AUX_INPUT_0_LO;
pub(crate) const MODULAR_QUO_INPUT: Range<usize> = GENERAL_INPUT_4_DBL;
pub(crate) const MODULAR_OUT_AUX_RED: Range<usize> = AUX_INPUT_0;
// NB: Last value is not used in AUX, it is used in MOD_IS_ZERO
pub(crate) const MODULAR_QUO_INPUT_HI: Range<usize> = AUX_INPUT_0_HI;
pub(crate) const MODULAR_AUX_INPUT: Range<usize> = AUX_INPUT_1.start..AUX_INPUT_1.end - 1;
pub(crate) const MODULAR_MOD_IS_ZERO: usize = AUX_INPUT_1.end - 1;
pub(crate) const MODULAR_OUT_AUX_RED: Range<usize> = AUX_INPUT_2;
pub(crate) const MODULAR_MOD_IS_ZERO: usize = AUX_INPUT_1.start;
pub(crate) const MODULAR_AUX_INPUT_LO: Range<usize> = AUX_INPUT_1.start + 1..AUX_INPUT_1.end;
pub(crate) const MODULAR_AUX_INPUT_HI: Range<usize> = AUX_INPUT_2;
// Must be set to MOD_IS_ZERO for DIV operation i.e. MOD_IS_ZERO * lv[IS_DIV]
pub(crate) const MODULAR_DIV_DENOM_IS_ZERO: usize = AUX_INPUT_2.end;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_NUMERATOR: Range<usize> = MODULAR_INPUT_0;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_DENOMINATOR: Range<usize> = MODULAR_MODULUS;
#[allow(unused)] // TODO: Will be used when hooking into the CPU
pub(crate) const DIV_OUTPUT: Range<usize> = MODULAR_QUO_INPUT_LO;
pub(crate) const DIV_OUTPUT: Range<usize> =
MODULAR_QUO_INPUT.start..MODULAR_QUO_INPUT.start + N_LIMBS;
pub const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS;
// Need one column for the table, then two columns for every value
// that needs to be range checked in the trace, namely the permutation
// of the column and the permutation of the range. The two
// permutations associated to column i will be in columns RC_COLS[2i]
// and RC_COLS[2i+1].
pub(crate) const NUM_RANGE_CHECK_COLS: usize = 1 + 2 * NUM_SHARED_COLS;
pub(crate) const RANGE_COUNTER: usize = START_SHARED_COLS + NUM_SHARED_COLS;
pub(crate) const RC_COLS: Range<usize> = RANGE_COUNTER + 1..RANGE_COUNTER + 1 + 2 * NUM_SHARED_COLS;
pub const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS + NUM_RANGE_CHECK_COLS;

View File

@ -24,13 +24,12 @@ use crate::arithmetic::columns::*;
use crate::arithmetic::sub::u256_sub_br;
use crate::arithmetic::utils::read_value_u64_limbs;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::range_check_error;
pub(crate) fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS], op: usize) {
pub(crate) fn generate<F: RichField>(lv: &mut [F], filter: usize) {
let input0 = read_value_u64_limbs(lv, CMP_INPUT_0);
let input1 = read_value_u64_limbs(lv, CMP_INPUT_1);
let (diff, br) = match op {
let (diff, br) = match filter {
// input0 - input1 == diff + br*2^256
IS_LT => u256_sub_br(input0, input1),
// input1 - input0 == diff + br*2^256
@ -84,10 +83,6 @@ pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
range_check_error!(CMP_INPUT_0, 16);
range_check_error!(CMP_INPUT_1, 16);
range_check_error!(CMP_AUX_INPUT, 16);
let is_lt = lv[IS_LT];
let is_gt = lv[IS_GT];
@ -147,7 +142,11 @@ pub(crate) fn eval_ext_circuit_lt<F: RichField + Extendable<D>, const D: usize>(
);
let good_output = builder.sub_extension(cy, output);
let filter = builder.mul_extension(is_op, good_output);
yield_constr.constraint_transition(builder, filter);
if is_two_row_op {
yield_constr.constraint_transition(builder, filter);
} else {
yield_constr.constraint(builder, filter);
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(

View File

@ -14,6 +14,8 @@ mod utils;
pub mod arithmetic_stark;
pub(crate) mod columns;
pub mod operations;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum BinaryOperator {
Add,

View File

@ -122,7 +122,6 @@ use crate::arithmetic::columns::*;
use crate::arithmetic::compare::{eval_ext_circuit_lt, eval_packed_generic_lt};
use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::range_check_error;
/// Convert the base-2^16 representation of a number into a BigInt.
///
@ -191,8 +190,8 @@ fn bigint_to_columns<const N: usize>(num: &BigInt) -> [i64; N] {
/// NB: `operation` can set the higher order elements in its result to
/// zero if they are not used.
fn generate_modular_op<F: RichField>(
lv: &mut [F; NUM_ARITH_COLUMNS],
nv: &mut [F; NUM_ARITH_COLUMNS],
lv: &mut [F],
nv: &mut [F],
filter: usize,
operation: fn([i64; N_LIMBS], [i64; N_LIMBS]) -> [i64; 2 * N_LIMBS - 1],
) {
@ -263,37 +262,35 @@ fn generate_modular_op<F: RichField>(
// Higher order terms of the product must be zero for valid quot and modulus:
debug_assert!(&prod[2 * N_LIMBS..].iter().all(|&x| x == 0i64));
lv[MODULAR_OUTPUT].copy_from_slice(&output_limbs.map(F::from_canonical_i64));
lv[MODULAR_QUO_INPUT].copy_from_slice(&quot_limbs.map(F::from_noncanonical_i64));
// constr_poly must be zero when evaluated at x = β :=
// 2^LIMB_BITS, hence it's divisible by (x - β). `aux_limbs` is
// the result of removing that root.
let aux_limbs = pol_remove_root_2exp::<LIMB_BITS, _, { 2 * N_LIMBS }>(constr_poly);
let mut aux_limbs = pol_remove_root_2exp::<LIMB_BITS, _, { 2 * N_LIMBS }>(constr_poly);
lv[MODULAR_OUTPUT].copy_from_slice(&output_limbs.map(|c| F::from_canonical_i64(c)));
// Copy lo and hi halves of quot_limbs into their respective registers
for (i, &lo) in MODULAR_QUO_INPUT_LO.zip(&quot_limbs[..N_LIMBS]) {
lv[i] = F::from_noncanonical_i64(lo);
}
for (i, &hi) in MODULAR_QUO_INPUT_HI.zip(&quot_limbs[N_LIMBS..]) {
nv[i] = F::from_noncanonical_i64(hi);
for c in aux_limbs.iter_mut() {
// we store the unsigned offset value c + 2^20.
*c += AUX_COEFF_ABS_MAX;
}
debug_assert!(aux_limbs.iter().all(|&c| c.abs() <= 2 * AUX_COEFF_ABS_MAX));
for (i, &c) in MODULAR_AUX_INPUT.zip(&aux_limbs[..2 * N_LIMBS - 1]) {
nv[i] = F::from_noncanonical_i64(c);
for (i, &c) in MODULAR_AUX_INPUT_LO.zip(&aux_limbs[..2 * N_LIMBS - 1]) {
nv[i] = F::from_canonical_u16(c as u16);
}
for (i, &c) in MODULAR_AUX_INPUT_HI.zip(&aux_limbs[..2 * N_LIMBS - 1]) {
nv[i] = F::from_canonical_u16((c >> 16) as u16);
}
nv[MODULAR_MOD_IS_ZERO] = mod_is_zero;
nv[MODULAR_OUT_AUX_RED].copy_from_slice(&out_aux_red.map(|c| F::from_canonical_i64(c)));
nv[MODULAR_OUT_AUX_RED].copy_from_slice(&out_aux_red.map(F::from_canonical_i64));
nv[MODULAR_DIV_DENOM_IS_ZERO] = mod_is_zero * lv[IS_DIV];
}
/// Generate the output and auxiliary values for modular operations.
///
/// `filter` must be one of `columns::IS_{ADDMOD,MULMOD,MOD}`.
pub(crate) fn generate<F: RichField>(
lv: &mut [F; NUM_ARITH_COLUMNS],
nv: &mut [F; NUM_ARITH_COLUMNS],
filter: usize,
) {
pub(crate) fn generate<F: RichField>(lv: &mut [F], nv: &mut [F], filter: usize) {
debug_assert!(lv.len() == NUM_ARITH_COLUMNS && nv.len() == NUM_ARITH_COLUMNS);
match filter {
columns::IS_ADDMOD => generate_modular_op(lv, nv, filter, pol_add),
columns::IS_SUBMOD => generate_modular_op(lv, nv, filter, pol_sub),
@ -319,14 +316,6 @@ fn modular_constr_poly<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
filter: P,
) -> [P; 2 * N_LIMBS] {
range_check_error!(MODULAR_INPUT_0, 16);
range_check_error!(MODULAR_INPUT_1, 16);
range_check_error!(MODULAR_MODULUS, 16);
range_check_error!(MODULAR_QUO_INPUT_LO, 16);
range_check_error!(MODULAR_QUO_INPUT_HI, 16);
range_check_error!(MODULAR_AUX_INPUT, 20, signed);
range_check_error!(MODULAR_OUTPUT, 16);
let mut modulus = read_value::<N_LIMBS, _>(lv, MODULAR_MODULUS);
let mod_is_zero = nv[MODULAR_MOD_IS_ZERO];
@ -344,10 +333,14 @@ fn modular_constr_poly<P: PackedField>(
let mut output = read_value::<N_LIMBS, _>(lv, MODULAR_OUTPUT);
// Is 1 iff the operation is DIV and the denominator is zero.
let div_denom_is_zero = nv[MODULAR_DIV_DENOM_IS_ZERO];
yield_constr.constraint_transition(filter * (mod_is_zero * lv[IS_DIV] - div_denom_is_zero));
// Needed to compensate for adding mod_is_zero to modulus above,
// since the call eval_packed_generic_lt() below subtracts modulus
// verify in the case of a DIV.
output[0] += mod_is_zero * lv[IS_DIV];
// to verify in the case of a DIV.
output[0] += div_denom_is_zero;
// Verify that the output is reduced, i.e. output < modulus.
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
@ -371,13 +364,12 @@ fn modular_constr_poly<P: PackedField>(
true,
);
// restore output[0]
output[0] -= mod_is_zero * lv[IS_DIV];
output[0] -= div_denom_is_zero;
// prod = q(x) * m(x)
let quot = {
let mut quot = [P::default(); 2 * N_LIMBS];
quot[..N_LIMBS].copy_from_slice(&lv[MODULAR_QUO_INPUT_LO]);
quot[N_LIMBS..].copy_from_slice(&nv[MODULAR_QUO_INPUT_HI]);
quot.copy_from_slice(&lv[MODULAR_QUO_INPUT]);
quot
};
@ -391,13 +383,21 @@ fn modular_constr_poly<P: PackedField>(
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign(&mut constr_poly, &output);
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let offset = P::Scalar::from_canonical_u64(AUX_COEFF_ABS_MAX as u64);
// constr_poly = c(x) + q(x) * m(x) + (x - β) * s(x)
let mut aux = [P::ZEROS; 2 * N_LIMBS];
for (i, j) in MODULAR_AUX_INPUT.enumerate() {
aux[i] = nv[j];
for (c, i) in aux.iter_mut().zip(MODULAR_AUX_INPUT_LO) {
// MODULAR_AUX_INPUT elements were offset by 2^20 in
// generation, so we undo that here.
*c = nv[i] - offset;
}
// add high 16-bits of aux input
for (c, j) in aux.iter_mut().zip(MODULAR_AUX_INPUT_HI) {
*c += base * nv[j];
}
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
pol_add_assign(&mut constr_poly, &pol_adjoin_root(aux, base));
constr_poly
@ -412,9 +412,9 @@ pub(crate) fn eval_packed_generic<P: PackedField>(
// NB: The CTL code guarantees that filter is 0 or 1, i.e. that
// only one of the operations below is "live".
let filter = lv[columns::IS_ADDMOD]
+ lv[columns::IS_SUBMOD]
+ lv[columns::IS_MULMOD]
+ lv[columns::IS_MOD]
+ lv[columns::IS_SUBMOD]
+ lv[columns::IS_DIV];
// Ensure that this operation is not the last row of the table;
@ -480,7 +480,12 @@ fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>
modulus[0] = builder.add_extension(modulus[0], mod_is_zero);
let mut output = read_value::<N_LIMBS, _>(lv, MODULAR_OUTPUT);
output[0] = builder.mul_add_extension(mod_is_zero, lv[IS_DIV], output[0]);
let div_denom_is_zero = nv[MODULAR_DIV_DENOM_IS_ZERO];
let t = builder.mul_sub_extension(mod_is_zero, lv[IS_DIV], div_denom_is_zero);
let t = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, t);
output[0] = builder.add_extension(output[0], div_denom_is_zero);
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
let one = builder.one_extension();
@ -497,13 +502,11 @@ fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>
is_less_than,
true,
);
output[0] =
builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, lv[IS_DIV], output[0]);
output[0] = builder.sub_extension(output[0], div_denom_is_zero);
let quot = {
let zero = builder.zero_extension();
let mut quot = [zero; 2 * N_LIMBS];
quot[..N_LIMBS].copy_from_slice(&lv[MODULAR_QUO_INPUT_LO]);
quot[N_LIMBS..].copy_from_slice(&nv[MODULAR_QUO_INPUT_HI]);
quot.copy_from_slice(&lv[MODULAR_QUO_INPUT]);
quot
};
@ -516,13 +519,19 @@ fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign_ext_circuit(builder, &mut constr_poly, &output);
let offset =
builder.constant_extension(F::Extension::from_canonical_u64(AUX_COEFF_ABS_MAX as u64));
let zero = builder.zero_extension();
let mut aux = [zero; 2 * N_LIMBS];
for (i, j) in MODULAR_AUX_INPUT.enumerate() {
aux[i] = nv[j];
for (c, i) in aux.iter_mut().zip(MODULAR_AUX_INPUT_LO) {
*c = builder.sub_extension(nv[i], offset);
}
let base = F::from_canonical_u64(1u64 << LIMB_BITS);
for (c, j) in aux.iter_mut().zip(MODULAR_AUX_INPUT_HI) {
*c = builder.mul_const_add_extension(base, nv[j], *c);
}
let base = builder.constant_extension(F::Extension::from_canonical_u64(1u64 << LIMB_BITS));
let base = builder.constant_extension(base.into());
let t = pol_adjoin_root_ext_circuit(builder, aux, base);
pol_add_assign_ext_circuit(builder, &mut constr_poly, &t);

View File

@ -64,9 +64,8 @@ use plonky2::iop::ext_target::ExtensionTarget;
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::range_check_error;
pub fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS]) {
pub fn generate<F: RichField>(lv: &mut [F]) {
let input0 = read_value_i64_limbs(lv, MUL_INPUT_0);
let input1 = read_value_i64_limbs(lv, MUL_INPUT_1);
@ -96,23 +95,40 @@ pub fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS]) {
let mut aux_limbs = pol_remove_root_2exp::<LIMB_BITS, _, N_LIMBS>(unreduced_prod);
aux_limbs[N_LIMBS - 1] = -cy;
lv[MUL_AUX_INPUT].copy_from_slice(&aux_limbs.map(|c| F::from_noncanonical_i64(c)));
for c in aux_limbs.iter_mut() {
// we store the unsigned offset value c + 2^20
*c += AUX_COEFF_ABS_MAX;
}
debug_assert!(aux_limbs.iter().all(|&c| c.abs() <= 2 * AUX_COEFF_ABS_MAX));
lv[MUL_AUX_INPUT_LO].copy_from_slice(&aux_limbs.map(|c| F::from_canonical_u16(c as u16)));
lv[MUL_AUX_INPUT_HI]
.copy_from_slice(&aux_limbs.map(|c| F::from_canonical_u16((c >> 16) as u16)));
}
pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
range_check_error!(MUL_INPUT_0, 16);
range_check_error!(MUL_INPUT_1, 16);
range_check_error!(MUL_OUTPUT, 16);
range_check_error!(MUL_AUX_INPUT, 20);
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let is_mul = lv[IS_MUL];
let input0_limbs = read_value::<N_LIMBS, _>(lv, MUL_INPUT_0);
let input1_limbs = read_value::<N_LIMBS, _>(lv, MUL_INPUT_1);
let output_limbs = read_value::<N_LIMBS, _>(lv, MUL_OUTPUT);
let aux_limbs = read_value::<N_LIMBS, _>(lv, MUL_AUX_INPUT);
let aux_limbs = {
// MUL_AUX_INPUT was offset by 2^20 in generation, so we undo
// that here
let offset = P::Scalar::from_canonical_u64(AUX_COEFF_ABS_MAX as u64);
let mut aux_limbs = read_value::<N_LIMBS, _>(lv, MUL_AUX_INPUT_LO);
let aux_limbs_hi = &lv[MUL_AUX_INPUT_HI];
for (lo, &hi) in aux_limbs.iter_mut().zip(aux_limbs_hi) {
*lo += hi * base - offset;
}
aux_limbs
};
// Constraint poly holds the coefficients of the polynomial that
// must be identically zero for this multiplication to be
@ -133,7 +149,6 @@ pub fn eval_packed_generic<P: PackedField>(
pol_sub_assign(&mut constr_poly, &output_limbs);
// This subtracts (x - β) * s(x) from constr_poly.
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
pol_sub_assign(&mut constr_poly, &pol_adjoin_root(aux_limbs, base));
// At this point constr_poly holds the coefficients of the
@ -154,7 +169,20 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
let input0_limbs = read_value::<N_LIMBS, _>(lv, MUL_INPUT_0);
let input1_limbs = read_value::<N_LIMBS, _>(lv, MUL_INPUT_1);
let output_limbs = read_value::<N_LIMBS, _>(lv, MUL_OUTPUT);
let aux_limbs = read_value::<N_LIMBS, _>(lv, MUL_AUX_INPUT);
let aux_limbs = {
let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
let offset =
builder.constant_extension(F::Extension::from_canonical_u64(AUX_COEFF_ABS_MAX as u64));
let mut aux_limbs = read_value::<N_LIMBS, _>(lv, MUL_AUX_INPUT_LO);
let aux_limbs_hi = &lv[MUL_AUX_INPUT_HI];
for (lo, &hi) in aux_limbs.iter_mut().zip(aux_limbs_hi) {
//*lo = lo + hi * base - offset;
let t = builder.mul_sub_extension(hi, base, offset);
*lo = builder.add_extension(*lo, t);
}
aux_limbs
};
let mut constr_poly = pol_mul_lo_ext_circuit(builder, input0_limbs, input1_limbs);
pol_sub_assign_ext_circuit(builder, &mut constr_poly, &output_limbs);

View File

@ -0,0 +1,170 @@
use ethereum_types::U256;
use plonky2::hash::hash_types::RichField;
use static_assertions::const_assert;
use crate::arithmetic::columns::*;
use crate::arithmetic::{add, compare, modular, mul, sub};
#[inline]
fn u64_to_array<F: RichField>(out: &mut [F], x: u64) {
const_assert!(LIMB_BITS == 16);
debug_assert!(out.len() == 4);
out[0] = F::from_canonical_u16(x as u16);
out[1] = F::from_canonical_u16((x >> 16) as u16);
out[2] = F::from_canonical_u16((x >> 32) as u16);
out[3] = F::from_canonical_u16((x >> 48) as u16);
}
fn u256_to_array<F: RichField>(out: &mut [F], x: U256) {
const_assert!(N_LIMBS == 16);
debug_assert!(out.len() == N_LIMBS);
u64_to_array(&mut out[0..4], x.0[0]);
u64_to_array(&mut out[4..8], x.0[1]);
u64_to_array(&mut out[8..12], x.0[2]);
u64_to_array(&mut out[12..16], x.0[3]);
}
pub trait Operation<F: RichField> {
/// Convert operation into one or two rows of the trace.
///
/// Morally these types should be [F; NUM_ARITH_COLUMNS], but we
/// use vectors because that's what utils::transpose expects.
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>);
}
pub struct SimpleBinaryOp {
/// The operation is identified using the associated filter from
/// `columns::IS_ADD` etc., stored in `op_filter`.
op_filter: usize,
input0: U256,
input1: U256,
}
impl SimpleBinaryOp {
pub fn new(op_filter: usize, input0: U256, input1: U256) -> Self {
assert!(
op_filter == IS_ADD
|| op_filter == IS_SUB
|| op_filter == IS_MUL
|| op_filter == IS_LT
|| op_filter == IS_GT
);
Self {
op_filter,
input0,
input1,
}
}
}
impl<F: RichField> Operation<F> for SimpleBinaryOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
let mut row = vec![F::ZERO; NUM_ARITH_COLUMNS];
row[self.op_filter] = F::ONE;
// Each of these operations uses the same columns for input; the
// asserts ensure no-one changes this.
// TODO: This is ugly; should just remove the other
// *_INPUT_[01] variables and remove this.
debug_assert!([ADD_INPUT_0, SUB_INPUT_0, MUL_INPUT_0, CMP_INPUT_0,]
.iter()
.all(|x| *x == GENERAL_INPUT_0));
debug_assert!([ADD_INPUT_1, SUB_INPUT_1, MUL_INPUT_1, CMP_INPUT_1,]
.iter()
.all(|x| *x == GENERAL_INPUT_1));
u256_to_array(&mut row[GENERAL_INPUT_0], self.input0);
u256_to_array(&mut row[GENERAL_INPUT_1], self.input1);
// This is ugly, but it avoids the huge amount of boilerplate
// required to dispatch directly to each add/sub/etc. operation.
match self.op_filter {
IS_ADD => add::generate(&mut row),
IS_SUB => sub::generate(&mut row),
IS_MUL => mul::generate(&mut row),
IS_LT | IS_GT => compare::generate(&mut row, self.op_filter),
_ => panic!("unrecognised operation"),
}
(row, None)
}
}
pub struct ModularBinaryOp {
op_filter: usize,
input0: U256,
input1: U256,
modulus: U256,
}
impl ModularBinaryOp {
pub fn new(op_filter: usize, input0: U256, input1: U256, modulus: U256) -> Self {
assert!(op_filter == IS_ADDMOD || op_filter == IS_SUBMOD || op_filter == IS_MULMOD);
Self {
op_filter,
input0,
input1,
modulus,
}
}
}
fn modular_to_rows_helper<F: RichField>(
op_filter: usize,
input0: U256,
input1: U256,
modulus: U256,
) -> (Vec<F>, Option<Vec<F>>) {
let mut row1 = vec![F::ZERO; NUM_ARITH_COLUMNS];
let mut row2 = vec![F::ZERO; NUM_ARITH_COLUMNS];
row1[op_filter] = F::ONE;
u256_to_array(&mut row1[MODULAR_INPUT_0], input0);
u256_to_array(&mut row1[MODULAR_INPUT_1], input1);
u256_to_array(&mut row1[MODULAR_MODULUS], modulus);
modular::generate(&mut row1, &mut row2, op_filter);
(row1, Some(row2))
}
impl<F: RichField> Operation<F> for ModularBinaryOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
modular_to_rows_helper(self.op_filter, self.input0, self.input1, self.modulus)
}
}
pub struct ModOp {
pub input: U256,
pub modulus: U256,
}
impl<F: RichField> Operation<F> for ModOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
modular_to_rows_helper(IS_MOD, self.input, U256::zero(), self.modulus)
}
}
pub struct DivOp {
pub numerator: U256,
pub denominator: U256,
}
impl<F: RichField> Operation<F> for DivOp {
fn to_rows(&self) -> (Vec<F>, Option<Vec<F>>) {
let mut row1 = vec![F::ZERO; NUM_ARITH_COLUMNS];
let mut row2 = vec![F::ZERO; NUM_ARITH_COLUMNS];
row1[IS_DIV] = F::ONE;
u256_to_array(&mut row1[DIV_NUMERATOR], self.numerator);
u256_to_array(&mut row1[DIV_DENOMINATOR], self.denominator);
modular::generate(&mut row1, &mut row2, IS_DIV);
(row1, Some(row2))
}
}

View File

@ -8,7 +8,6 @@ use crate::arithmetic::add::{eval_ext_circuit_are_equal, eval_packed_generic_are
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::read_value_u64_limbs;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::range_check_error;
pub(crate) fn u256_sub_br(input0: [u64; N_LIMBS], input1: [u64; N_LIMBS]) -> ([u64; N_LIMBS], u64) {
const LIMB_BOUNDARY: u64 = 1 << LIMB_BITS;
@ -28,7 +27,7 @@ pub(crate) fn u256_sub_br(input0: [u64; N_LIMBS], input1: [u64; N_LIMBS]) -> ([u
(output, br)
}
pub fn generate<F: RichField>(lv: &mut [F; NUM_ARITH_COLUMNS]) {
pub fn generate<F: RichField>(lv: &mut [F]) {
let input0 = read_value_u64_limbs(lv, SUB_INPUT_0);
let input1 = read_value_u64_limbs(lv, SUB_INPUT_1);
@ -41,10 +40,6 @@ pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
range_check_error!(SUB_INPUT_0, 16);
range_check_error!(SUB_INPUT_1, 16);
range_check_error!(SUB_OUTPUT, 16);
let is_sub = lv[IS_SUB];
let input0_limbs = &lv[SUB_INPUT_0];
let input1_limbs = &lv[SUB_INPUT_1];

View File

@ -5,58 +5,7 @@ use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::arithmetic::columns::{NUM_ARITH_COLUMNS, N_LIMBS};
/// Emit an error message regarding unchecked range assumptions.
/// Assumes the values in `cols` are `[cols[0], cols[0] + 1, ...,
/// cols[0] + cols.len() - 1]`.
///
/// TODO: Hamish to delete this when he has implemented and integrated
/// range checks.
pub(crate) fn _range_check_error<const RC_BITS: u32>(
_file: &str,
_line: u32,
_cols: Range<usize>,
_signedness: &str,
) {
// error!(
// "{}:{}: arithmetic unit skipped {}-bit {} range-checks on columns {}--{}: not yet implemented",
// line,
// file,
// RC_BITS,
// signedness,
// cols.start,
// cols.end - 1,
// );
}
#[macro_export]
macro_rules! range_check_error {
($cols:ident, $rc_bits:expr) => {
$crate::arithmetic::utils::_range_check_error::<$rc_bits>(
file!(),
line!(),
$cols,
"unsigned",
);
};
($cols:ident, $rc_bits:expr, signed) => {
$crate::arithmetic::utils::_range_check_error::<$rc_bits>(
file!(),
line!(),
$cols,
"signed",
);
};
([$cols:ident], $rc_bits:expr) => {
$crate::arithmetic::utils::_range_check_error::<$rc_bits>(
file!(),
line!(),
&[$cols],
"unsigned",
);
};
}
use crate::arithmetic::columns::N_LIMBS;
/// Return an array of `N` zeros of type T.
pub(crate) fn pol_zero<T, const N: usize>() -> [T; N]
@ -139,11 +88,11 @@ pub(crate) fn pol_sub_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut sum = [zero; 2 * N_LIMBS - 1];
let mut diff = [zero; 2 * N_LIMBS - 1];
for i in 0..N_LIMBS {
sum[i] = builder.sub_extension(a[i], b[i]);
diff[i] = builder.sub_extension(a[i], b[i]);
}
sum
diff
}
/// a(x) -= b(x), but must have deg(a) >= deg(b).
@ -186,19 +135,13 @@ where
res
}
pub(crate) fn pol_mul_wide_ext_circuit<
F: RichField + Extendable<D>,
const D: usize,
const M: usize,
const N: usize,
const P: usize,
>(
pub(crate) fn pol_mul_wide_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; M],
b: [ExtensionTarget<D>; N],
) -> [ExtensionTarget<D>; P] {
a: [ExtensionTarget<D>; N_LIMBS],
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut res = [zero; P];
let mut res = [zero; 2 * N_LIMBS - 1];
for (i, &ai) in a.iter().enumerate() {
for (j, &bj) in b.iter().enumerate() {
res[i + j] = builder.mul_add_extension(ai, bj, res[i + j]);
@ -322,8 +265,8 @@ pub(crate) fn pol_adjoin_root_ext_circuit<
) -> [ExtensionTarget<D>; N] {
let zero = builder.zero_extension();
let mut res = [zero; N];
// res[deg] = NEG_ONE * root * a[0] + ZERO * zero
res[0] = builder.arithmetic_extension(F::NEG_ONE, F::ZERO, root, a[0], zero);
// res[0] = NEG_ONE * root * a[0] + ZERO * zero
res[0] = builder.mul_extension_with_const(F::NEG_ONE, root, a[0]);
for deg in 1..N {
// res[deg] = NEG_ONE * root * a[deg] + ONE * a[deg - 1]
res[deg] = builder.arithmetic_extension(F::NEG_ONE, F::ONE, root, a[deg], a[deg - 1]);
@ -368,10 +311,7 @@ where
/// Read the range `value_idxs` of values from `lv` into an array of
/// length `N`. Panics if the length of the range is not `N`.
pub(crate) fn read_value<const N: usize, T: Copy>(
lv: &[T; NUM_ARITH_COLUMNS],
value_idxs: Range<usize>,
) -> [T; N] {
pub(crate) fn read_value<const N: usize, T: Copy>(lv: &[T], value_idxs: Range<usize>) -> [T; N] {
lv[value_idxs].try_into().unwrap()
}
@ -379,7 +319,7 @@ pub(crate) fn read_value<const N: usize, T: Copy>(
/// length `N`, interpreting the values as `u64`s. Panics if the
/// length of the range is not `N`.
pub(crate) fn read_value_u64_limbs<const N: usize, F: RichField>(
lv: &[F; NUM_ARITH_COLUMNS],
lv: &[F],
value_idxs: Range<usize>,
) -> [u64; N] {
let limbs: [_; N] = lv[value_idxs].try_into().unwrap();
@ -390,7 +330,7 @@ pub(crate) fn read_value_u64_limbs<const N: usize, F: RichField>(
/// length `N`, interpreting the values as `i64`s. Panics if the
/// length of the range is not `N`.
pub(crate) fn read_value_i64_limbs<const N: usize, F: RichField>(
lv: &[F; NUM_ARITH_COLUMNS],
lv: &[F],
value_idxs: Range<usize>,
) -> [i64; N] {
let limbs: [_; N] = lv[value_idxs].try_into().unwrap();

View File

@ -116,13 +116,9 @@ pub fn permuted_cols<F: PrimeField64>(inputs: &[F], table: &[F]) -> (Vec<F>, Vec
}
}
#[allow(clippy::needless_range_loop)] // indexing is just more natural here
for jj in j..n {
unused_table_vals.push(sorted_table[jj]);
}
for ii in i..n {
unused_table_inds.push(ii);
}
unused_table_vals.extend_from_slice(&sorted_table[j..n]);
unused_table_inds.extend(i..n);
for (ind, val) in unused_table_inds.into_iter().zip_eq(unused_table_vals) {
permuted_table[ind] = val;
}