Merge branch 'main' into stark_permutation_checks

This commit is contained in:
wborgeaud 2022-02-21 10:19:12 +01:00
commit f4a29a0249
17 changed files with 334 additions and 152 deletions

View File

@ -53,12 +53,12 @@ impl<P: PackedField> ConstraintConsumer<P> {
}
/// Add one constraint valid on all rows except the last.
pub fn constraint(&mut self, constraint: P) {
self.constraint_wrapping(constraint * self.z_last);
pub fn constraint_transition(&mut self, constraint: P) {
self.constraint(constraint * self.z_last);
}
/// Add one constraint on all rows.
pub fn constraint_wrapping(&mut self, constraint: P) {
pub fn constraint(&mut self, constraint: P) {
for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) {
*acc *= alpha;
*acc += constraint;
@ -68,13 +68,13 @@ impl<P: PackedField> ConstraintConsumer<P> {
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
pub fn constraint_first_row(&mut self, constraint: P) {
self.constraint_wrapping(constraint * self.lagrange_basis_first);
self.constraint(constraint * self.lagrange_basis_first);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
pub fn constraint_last_row(&mut self, constraint: P) {
self.constraint_wrapping(constraint * self.lagrange_basis_last);
self.constraint(constraint * self.lagrange_basis_last);
}
}
@ -122,17 +122,17 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
}
/// Add one constraint valid on all rows except the last.
pub fn constraint(
pub fn constraint_transition(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
) {
let filtered_constraint = builder.mul_extension(constraint, self.z_last);
self.constraint_wrapping(builder, filtered_constraint);
self.constraint(builder, filtered_constraint);
}
/// Add one constraint valid on all rows.
pub fn constraint_wrapping(
pub fn constraint(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
@ -150,7 +150,7 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
constraint: ExtensionTarget<D>,
) {
let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_first);
self.constraint_wrapping(builder, filtered_constraint);
self.constraint(builder, filtered_constraint);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
@ -161,6 +161,6 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
constraint: ExtensionTarget<D>,
) {
let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_last);
self.constraint_wrapping(builder, filtered_constraint);
self.constraint(builder, filtered_constraint);
}
}

View File

@ -68,9 +68,11 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for FibonacciStar
.constraint_last_row(vars.local_values[1] - vars.public_inputs[Self::PI_INDEX_RES]);
// x0' <- x1
yield_constr.constraint(vars.next_values[0] - vars.local_values[1]);
yield_constr.constraint_transition(vars.next_values[0] - vars.local_values[1]);
// x1' <- x0 + x1
yield_constr.constraint(vars.next_values[1] - vars.local_values[0] - vars.local_values[1]);
yield_constr.constraint_transition(
vars.next_values[1] - vars.local_values[0] - vars.local_values[1],
);
}
fn eval_ext_recursively(
@ -91,13 +93,13 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for FibonacciStar
// x0' <- x1
let first_col_constraint = builder.sub_extension(vars.next_values[0], vars.local_values[1]);
yield_constr.constraint(builder, first_col_constraint);
yield_constr.constraint_transition(builder, first_col_constraint);
// x1' <- x0 + x1
let second_col_constraint = {
let tmp = builder.sub_extension(vars.next_values[1], vars.local_values[0]);
builder.sub_extension(tmp, vars.local_values[1])
};
yield_constr.constraint(builder, second_col_constraint);
yield_constr.constraint_transition(builder, second_col_constraint);
}
fn constraint_degree(&self) -> usize {

View File

@ -6,6 +6,7 @@ edition = "2021"
[dependencies]
plonky2 = { path = "../plonky2" }
plonky2_util = { path = "../util" }
starky = { path = "../starky" }
anyhow = "1.0.40"
env_logger = "0.9.0"

View File

@ -7,18 +7,18 @@ use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::plonk_common::reduce_with_powers_ext_recursive;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::registers::arithmetic::*;
use crate::registers::alu::*;
use crate::registers::NUM_COLUMNS;
pub(crate) fn generate_addition<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
let in_1 = values[COL_ADD_INPUT_1].to_canonical_u64();
let in_2 = values[COL_ADD_INPUT_2].to_canonical_u64();
let in_3 = values[COL_ADD_INPUT_3].to_canonical_u64();
let in_1 = values[COL_ADD_INPUT_0].to_canonical_u64();
let in_2 = values[COL_ADD_INPUT_1].to_canonical_u64();
let in_3 = values[COL_ADD_INPUT_2].to_canonical_u64();
let output = in_1 + in_2 + in_3;
values[COL_ADD_OUTPUT_1] = F::from_canonical_u16(output as u16);
values[COL_ADD_OUTPUT_2] = F::from_canonical_u16((output >> 16) as u16);
values[COL_ADD_OUTPUT_3] = F::from_canonical_u16((output >> 32) as u16);
values[COL_ADD_OUTPUT_0] = F::from_canonical_u16(output as u16);
values[COL_ADD_OUTPUT_1] = F::from_canonical_u16((output >> 16) as u16);
values[COL_ADD_OUTPUT_2] = F::from_canonical_u16((output >> 32) as u16);
}
pub(crate) fn eval_addition<F: Field, P: PackedField<Scalar = F>>(
@ -26,12 +26,12 @@ pub(crate) fn eval_addition<F: Field, P: PackedField<Scalar = F>>(
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_add = local_values[IS_ADD];
let in_1 = local_values[COL_ADD_INPUT_1];
let in_2 = local_values[COL_ADD_INPUT_2];
let in_3 = local_values[COL_ADD_INPUT_3];
let out_1 = local_values[COL_ADD_OUTPUT_1];
let out_2 = local_values[COL_ADD_OUTPUT_2];
let out_3 = local_values[COL_ADD_OUTPUT_3];
let in_1 = local_values[COL_ADD_INPUT_0];
let in_2 = local_values[COL_ADD_INPUT_1];
let in_3 = local_values[COL_ADD_INPUT_2];
let out_1 = local_values[COL_ADD_OUTPUT_0];
let out_2 = local_values[COL_ADD_OUTPUT_1];
let out_3 = local_values[COL_ADD_OUTPUT_2];
let weight_2 = F::from_canonical_u64(1 << 16);
let weight_3 = F::from_canonical_u64(1 << 32);
@ -41,7 +41,7 @@ pub(crate) fn eval_addition<F: Field, P: PackedField<Scalar = F>>(
let computed_out = in_1 + in_2 + in_3;
yield_constr.constraint_wrapping(is_add * (out - computed_out));
yield_constr.constraint(is_add * (out - computed_out));
}
pub(crate) fn eval_addition_recursively<F: RichField + Extendable<D>, const D: usize>(
@ -50,12 +50,12 @@ pub(crate) fn eval_addition_recursively<F: RichField + Extendable<D>, const D: u
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_add = local_values[IS_ADD];
let in_1 = local_values[COL_ADD_INPUT_1];
let in_2 = local_values[COL_ADD_INPUT_2];
let in_3 = local_values[COL_ADD_INPUT_3];
let out_1 = local_values[COL_ADD_OUTPUT_1];
let out_2 = local_values[COL_ADD_OUTPUT_2];
let out_3 = local_values[COL_ADD_OUTPUT_3];
let in_1 = local_values[COL_ADD_INPUT_0];
let in_2 = local_values[COL_ADD_INPUT_1];
let in_3 = local_values[COL_ADD_INPUT_2];
let out_1 = local_values[COL_ADD_OUTPUT_0];
let out_2 = local_values[COL_ADD_OUTPUT_1];
let out_3 = local_values[COL_ADD_OUTPUT_2];
let limb_base = builder.constant(F::from_canonical_u64(1 << 16));
// Note that this can't overflow. Since each output limb has been range checked as 16-bits,
@ -66,5 +66,5 @@ pub(crate) fn eval_addition_recursively<F: RichField + Extendable<D>, const D: u
let diff = builder.sub_extension(out, computed_out);
let filtered_diff = builder.mul_extension(is_add, diff);
yield_constr.constraint_wrapping(builder, filtered_diff);
yield_constr.constraint(builder, filtered_diff);
}

View File

@ -0,0 +1,109 @@
//! Helper methods for checking that a value is canonical, i.e. is less than `|F|`.
//!
//! See https://hackmd.io/NC-yRmmtRQSvToTHb96e8Q#Checking-element-validity
use plonky2::field::extension_field::Extendable;
use plonky2::field::field_types::Field;
use plonky2::field::packed_field::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
/// Computes the helper value used in the is-canonical check.
pub(crate) fn compute_canonical_inv<F: Field>(value_to_check: u64) -> F {
let value_hi_32 = (value_to_check >> 32) as u32;
if value_hi_32 == u32::MAX {
debug_assert_eq!(value_to_check as u32, 0, "Value was not canonical.");
// In this case it doesn't matter what we put for the purported inverse value. The
// constraint containing this value will get multiplied by the low u32 limb, which will be
// zero, satisfying the constraint regardless of what we put here.
F::ZERO
} else {
F::from_canonical_u32(u32::MAX - value_hi_32).inverse()
}
}
/// Adds constraints to require that a list of four `u16`s, in little-endian order, represent a
/// canonical field element, i.e. that their combined value is less than `|F|`. Returns their
/// combined value.
pub(crate) fn combine_u16s_check_canonical<F: Field, P: PackedField<Scalar = F>>(
limb_0_u16: P,
limb_1_u16: P,
limb_2_u16: P,
limb_3_u16: P,
inverse: P,
yield_constr: &mut ConstraintConsumer<P>,
) -> P {
let base = F::from_canonical_u32(1 << 16);
let limb_0_u32 = limb_0_u16 + limb_1_u16 * base;
let limb_1_u32 = limb_2_u16 + limb_3_u16 * base;
combine_u32s_check_canonical(limb_0_u32, limb_1_u32, inverse, yield_constr)
}
/// Adds constraints to require that a list of four `u16`s, in little-endian order, represent a
/// canonical field element, i.e. that their combined value is less than `|F|`. Returns their
/// combined value.
pub(crate) fn combine_u16s_check_canonical_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
limb_0_u16: ExtensionTarget<D>,
limb_1_u16: ExtensionTarget<D>,
limb_2_u16: ExtensionTarget<D>,
limb_3_u16: ExtensionTarget<D>,
inverse: ExtensionTarget<D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) -> ExtensionTarget<D> {
let base = F::from_canonical_u32(1 << 16);
let limb_0_u32 = builder.mul_const_add_extension(base, limb_1_u16, limb_0_u16);
let limb_1_u32 = builder.mul_const_add_extension(base, limb_3_u16, limb_2_u16);
combine_u32s_check_canonical_circuit(builder, limb_0_u32, limb_1_u32, inverse, yield_constr)
}
/// Adds constraints to require that a pair of `u32`s, in little-endian order, represent a canonical
/// field element, i.e. that their combined value is less than `|F|`. Returns their combined value.
pub(crate) fn combine_u32s_check_canonical<F: Field, P: PackedField<Scalar = F>>(
limb_0_u32: P,
limb_1_u32: P,
inverse: P,
yield_constr: &mut ConstraintConsumer<P>,
) -> P {
let u32_max = P::from(F::from_canonical_u32(u32::MAX));
// This is zero if and only if the high limb is `u32::MAX`.
let diff = u32_max - limb_1_u32;
// If this is zero, the diff is invertible, so the high limb is not `u32::MAX`.
let hi_not_max = inverse * diff - F::ONE;
// If this is zero, either the high limb is not `u32::MAX`, or the low limb is zero.
let hi_not_max_or_lo_zero = hi_not_max * limb_0_u32;
yield_constr.constraint(hi_not_max_or_lo_zero);
// Return the combined value.
limb_0_u32 + limb_1_u32 * F::from_canonical_u64(1 << 32)
}
/// Adds constraints to require that a pair of `u32`s, in little-endian order, represent a canonical
/// field element, i.e. that their combined value is less than `|F|`. Returns their combined value.
pub(crate) fn combine_u32s_check_canonical_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
limb_0_u32: ExtensionTarget<D>,
limb_1_u32: ExtensionTarget<D>,
inverse: ExtensionTarget<D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) -> ExtensionTarget<D> {
let one = builder.one_extension();
let u32_max = builder.constant_extension(F::Extension::from_canonical_u32(u32::MAX));
// This is zero if and only if the high limb is `u32::MAX`.
let diff = builder.sub_extension(u32_max, limb_1_u32);
// If this is zero, the diff is invertible, so the high limb is not `u32::MAX`.
let hi_not_max = builder.mul_sub_extension(inverse, diff, one);
// If this is zero, either the high limb is not `u32::MAX`, or the low limb is zero.
let hi_not_max_or_lo_zero = builder.mul_extension(hi_not_max, limb_0_u32);
yield_constr.constraint(builder, hi_not_max_or_lo_zero);
// Return the combined value.
builder.mul_const_add_extension(F::from_canonical_u64(1 << 32), limb_1_u32, limb_0_u32)
}

View File

@ -6,7 +6,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::registers::arithmetic::*;
use crate::registers::alu::*;
use crate::registers::NUM_COLUMNS;
pub(crate) fn generate_division<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {

View File

@ -7,36 +7,35 @@ use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsume
use starky::vars::StarkEvaluationTargets;
use starky::vars::StarkEvaluationVars;
use crate::arithmetic::addition::{eval_addition, eval_addition_recursively, generate_addition};
use crate::arithmetic::division::{eval_division, eval_division_recursively, generate_division};
use crate::arithmetic::multiplication::{
eval_multiplication, eval_multiplication_recursively, generate_multiplication,
};
use crate::arithmetic::subtraction::{
use crate::alu::addition::{eval_addition, eval_addition_recursively, generate_addition};
use crate::alu::division::{eval_division, eval_division_recursively, generate_division};
use crate::alu::mul_add::{eval_mul_add, eval_mul_add_recursively, generate_mul_add};
use crate::alu::subtraction::{
eval_subtraction, eval_subtraction_recursively, generate_subtraction,
};
use crate::public_input_layout::NUM_PUBLIC_INPUTS;
use crate::registers::arithmetic::*;
use crate::registers::alu::*;
use crate::registers::NUM_COLUMNS;
mod addition;
mod canonical;
mod division;
mod multiplication;
mod mul_add;
mod subtraction;
pub(crate) fn generate_arithmetic_unit<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
pub(crate) fn generate_alu<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
if values[IS_ADD].is_one() {
generate_addition(values);
} else if values[IS_SUB].is_one() {
generate_subtraction(values);
} else if values[IS_MUL].is_one() {
generate_multiplication(values);
generate_mul_add(values);
} else if values[IS_DIV].is_one() {
generate_division(values);
}
}
pub(crate) fn eval_arithmetic_unit<F: Field, P: PackedField<Scalar = F>>(
pub(crate) fn eval_alu<F: Field, P: PackedField<Scalar = F>>(
vars: StarkEvaluationVars<F, P, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
yield_constr: &mut ConstraintConsumer<P>,
) {
@ -45,16 +44,16 @@ pub(crate) fn eval_arithmetic_unit<F: Field, P: PackedField<Scalar = F>>(
// Check that the operation flag values are binary.
for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] {
let val = local_values[col];
yield_constr.constraint_wrapping(val * val - val);
yield_constr.constraint(val * val - val);
}
eval_addition(local_values, yield_constr);
eval_subtraction(local_values, yield_constr);
eval_multiplication(local_values, yield_constr);
eval_mul_add(local_values, yield_constr);
eval_division(local_values, yield_constr);
}
pub(crate) fn eval_arithmetic_unit_recursively<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_alu_recursively<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, NUM_COLUMNS, NUM_PUBLIC_INPUTS>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
@ -65,11 +64,11 @@ pub(crate) fn eval_arithmetic_unit_recursively<F: RichField + Extendable<D>, con
for col in [IS_ADD, IS_SUB, IS_MUL, IS_DIV] {
let val = local_values[col];
let constraint = builder.mul_sub_extension(val, val, val);
yield_constr.constraint_wrapping(builder, constraint);
yield_constr.constraint(builder, constraint);
}
eval_addition_recursively(builder, local_values, yield_constr);
eval_subtraction_recursively(builder, local_values, yield_constr);
eval_multiplication_recursively(builder, local_values, yield_constr);
eval_mul_add_recursively(builder, local_values, yield_constr);
eval_division_recursively(builder, local_values, yield_constr);
}

View File

@ -0,0 +1,88 @@
use plonky2::field::extension_field::Extendable;
use plonky2::field::field_types::{Field, PrimeField64};
use plonky2::field::packed_field::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2_util::assume;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::alu::canonical::*;
use crate::registers::alu::*;
use crate::registers::NUM_COLUMNS;
pub(crate) fn generate_mul_add<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
let factor_0 = values[COL_MUL_ADD_FACTOR_0].to_canonical_u64();
let factor_1 = values[COL_MUL_ADD_FACTOR_1].to_canonical_u64();
let addend = values[COL_MUL_ADD_ADDEND].to_canonical_u64();
// Let the compiler know that each input must fit in 32 bits.
assume(factor_0 <= u32::MAX as u64);
assume(factor_1 <= u32::MAX as u64);
assume(addend <= u32::MAX as u64);
let output = factor_0 * factor_1 + addend;
// An advice value used to help verify that the limbs represent a canonical field element.
values[COL_MUL_ADD_RESULT_CANONICAL_INV] = compute_canonical_inv(output);
values[COL_MUL_ADD_OUTPUT_0] = F::from_canonical_u16(output as u16);
values[COL_MUL_ADD_OUTPUT_1] = F::from_canonical_u16((output >> 16) as u16);
values[COL_MUL_ADD_OUTPUT_2] = F::from_canonical_u16((output >> 32) as u16);
values[COL_MUL_ADD_OUTPUT_3] = F::from_canonical_u16((output >> 48) as u16);
}
pub(crate) fn eval_mul_add<F: Field, P: PackedField<Scalar = F>>(
local_values: &[P; NUM_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_mul = local_values[IS_MUL];
let factor_0 = local_values[COL_MUL_ADD_FACTOR_0];
let factor_1 = local_values[COL_MUL_ADD_FACTOR_1];
let addend = local_values[COL_MUL_ADD_ADDEND];
let output_1 = local_values[COL_MUL_ADD_OUTPUT_0];
let output_2 = local_values[COL_MUL_ADD_OUTPUT_1];
let output_3 = local_values[COL_MUL_ADD_OUTPUT_2];
let output_4 = local_values[COL_MUL_ADD_OUTPUT_3];
let result_canonical_inv = local_values[COL_MUL_ADD_RESULT_CANONICAL_INV];
let computed_output = factor_0 * factor_1 + addend;
let output = combine_u16s_check_canonical(
output_1,
output_2,
output_3,
output_4,
result_canonical_inv,
yield_constr,
);
yield_constr.constraint(computed_output - output);
}
pub(crate) fn eval_mul_add_recursively<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
local_values: &[ExtensionTarget<D>; NUM_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_mul = local_values[IS_MUL];
let factor_0 = local_values[COL_MUL_ADD_FACTOR_0];
let factor_1 = local_values[COL_MUL_ADD_FACTOR_1];
let addend = local_values[COL_MUL_ADD_ADDEND];
let output_1 = local_values[COL_MUL_ADD_OUTPUT_0];
let output_2 = local_values[COL_MUL_ADD_OUTPUT_1];
let output_3 = local_values[COL_MUL_ADD_OUTPUT_2];
let output_4 = local_values[COL_MUL_ADD_OUTPUT_3];
let result_canonical_inv = local_values[COL_MUL_ADD_RESULT_CANONICAL_INV];
let computed_output = builder.mul_add_extension(factor_0, factor_1, addend);
let output = combine_u16s_check_canonical_circuit(
builder,
output_1,
output_2,
output_3,
output_4,
result_canonical_inv,
yield_constr,
);
let diff = builder.sub_extension(computed_output, output);
yield_constr.constraint(builder, diff);
}

View File

@ -6,7 +6,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::registers::arithmetic::*;
use crate::registers::alu::*;
use crate::registers::NUM_COLUMNS;
pub(crate) fn generate_subtraction<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {

View File

@ -1,31 +0,0 @@
use plonky2::field::extension_field::Extendable;
use plonky2::field::field_types::{Field, PrimeField64};
use plonky2::field::packed_field::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::registers::arithmetic::*;
use crate::registers::NUM_COLUMNS;
pub(crate) fn generate_multiplication<F: PrimeField64>(values: &mut [F; NUM_COLUMNS]) {
// TODO
}
pub(crate) fn eval_multiplication<F: Field, P: PackedField<Scalar = F>>(
local_values: &[P; NUM_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_mul = local_values[IS_MUL];
// TODO
}
pub(crate) fn eval_multiplication_recursively<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
local_values: &[ExtensionTarget<D>; NUM_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_mul = local_values[IS_MUL];
// TODO
}

View File

@ -49,7 +49,7 @@ pub(crate) fn eval_core_registers<F: Field, P: PackedField<Scalar = F>>(
let next_clock = vars.next_values[COL_CLOCK];
let delta_clock = next_clock - local_clock;
yield_constr.constraint_first_row(local_clock);
yield_constr.constraint(delta_clock - F::ONE);
yield_constr.constraint_transition(delta_clock - F::ONE);
// The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1.
let local_range_16 = vars.local_values[COL_RANGE_16];
@ -57,7 +57,7 @@ pub(crate) fn eval_core_registers<F: Field, P: PackedField<Scalar = F>>(
let delta_range_16 = next_range_16 - local_range_16;
yield_constr.constraint_first_row(local_range_16);
yield_constr.constraint_last_row(local_range_16 - F::from_canonical_u64((1 << 16) - 1));
yield_constr.constraint(delta_range_16 * delta_range_16 - delta_range_16);
yield_constr.constraint_transition(delta_range_16 * delta_range_16 - delta_range_16);
// TODO constraints for stack etc.
}
@ -77,7 +77,7 @@ pub(crate) fn eval_core_registers_recursively<F: RichField + Extendable<D>, cons
let delta_clock = builder.sub_extension(next_clock, local_clock);
yield_constr.constraint_first_row(builder, local_clock);
let constraint = builder.sub_extension(delta_clock, one_ext);
yield_constr.constraint(builder, constraint);
yield_constr.constraint_transition(builder, constraint);
// The 16-bit table must start with 0, end with 2^16 - 1, and increment by 0 or 1.
let local_range_16 = vars.local_values[COL_RANGE_16];
@ -87,7 +87,7 @@ pub(crate) fn eval_core_registers_recursively<F: RichField + Extendable<D>, cons
let constraint = builder.sub_extension(local_range_16, max_u16_ext);
yield_constr.constraint_last_row(builder, constraint);
let constraint = builder.mul_add_extension(delta_range_16, delta_range_16, delta_range_16);
yield_constr.constraint(builder, constraint);
yield_constr.constraint_transition(builder, constraint);
// TODO constraints for stack etc.
}

View File

@ -2,7 +2,7 @@
#![allow(dead_code)]
#![allow(unused_variables)]
mod arithmetic;
mod alu;
mod core_registers;
mod memory;
mod permutation_unit;

View File

@ -127,8 +127,7 @@ pub(crate) fn eval_permutation_unit<F, FE, P, const D: usize>(
for i in 0..SPONGE_WIDTH {
let state_cubed = state[i] * state[i].square();
yield_constr
.constraint_wrapping(state_cubed - local_values[col_full_first_mid_sbox(r, i)]);
yield_constr.constraint(state_cubed - local_values[col_full_first_mid_sbox(r, i)]);
let state_cubed = local_values[col_full_first_mid_sbox(r, i)];
state[i] *= state_cubed.square(); // Form state ** 7.
}
@ -136,8 +135,7 @@ pub(crate) fn eval_permutation_unit<F, FE, P, const D: usize>(
state = mds_layer(state);
for i in 0..SPONGE_WIDTH {
yield_constr
.constraint_wrapping(state[i] - local_values[col_full_first_after_mds(r, i)]);
yield_constr.constraint(state[i] - local_values[col_full_first_after_mds(r, i)]);
state[i] = local_values[col_full_first_after_mds(r, i)];
}
}
@ -146,10 +144,10 @@ pub(crate) fn eval_permutation_unit<F, FE, P, const D: usize>(
state = constant_layer(state, HALF_N_FULL_ROUNDS + r);
let state0_cubed = state[0] * state[0].square();
yield_constr.constraint_wrapping(state0_cubed - local_values[col_partial_mid_sbox(r)]);
yield_constr.constraint(state0_cubed - local_values[col_partial_mid_sbox(r)]);
let state0_cubed = local_values[col_partial_mid_sbox(r)];
state[0] *= state0_cubed.square(); // Form state ** 7.
yield_constr.constraint_wrapping(state[0] - local_values[col_partial_after_sbox(r)]);
yield_constr.constraint(state[0] - local_values[col_partial_after_sbox(r)]);
state[0] = local_values[col_partial_after_sbox(r)];
state = mds_layer(state);
@ -160,8 +158,7 @@ pub(crate) fn eval_permutation_unit<F, FE, P, const D: usize>(
for i in 0..SPONGE_WIDTH {
let state_cubed = state[i] * state[i].square();
yield_constr
.constraint_wrapping(state_cubed - local_values[col_full_second_mid_sbox(r, i)]);
yield_constr.constraint(state_cubed - local_values[col_full_second_mid_sbox(r, i)]);
let state_cubed = local_values[col_full_second_mid_sbox(r, i)];
state[i] *= state_cubed.square(); // Form state ** 7.
}
@ -169,8 +166,7 @@ pub(crate) fn eval_permutation_unit<F, FE, P, const D: usize>(
state = mds_layer(state);
for i in 0..SPONGE_WIDTH {
yield_constr
.constraint_wrapping(state[i] - local_values[col_full_second_after_mds(r, i)]);
yield_constr.constraint(state[i] - local_values[col_full_second_after_mds(r, i)]);
state[i] = local_values[col_full_second_after_mds(r, i)];
}
}
@ -197,7 +193,7 @@ pub(crate) fn eval_permutation_unit_recursively<F: RichField + Extendable<D>, co
let state_cubed = builder.cube_extension(state[i]);
let diff =
builder.sub_extension(state_cubed, local_values[col_full_first_mid_sbox(r, i)]);
yield_constr.constraint_wrapping(builder, diff);
yield_constr.constraint(builder, diff);
let state_cubed = local_values[col_full_first_mid_sbox(r, i)];
state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]);
// Form state ** 7.
@ -208,7 +204,7 @@ pub(crate) fn eval_permutation_unit_recursively<F: RichField + Extendable<D>, co
for i in 0..SPONGE_WIDTH {
let diff =
builder.sub_extension(state[i], local_values[col_full_first_after_mds(r, i)]);
yield_constr.constraint_wrapping(builder, diff);
yield_constr.constraint(builder, diff);
state[i] = local_values[col_full_first_after_mds(r, i)];
}
}
@ -218,11 +214,11 @@ pub(crate) fn eval_permutation_unit_recursively<F: RichField + Extendable<D>, co
let state0_cubed = builder.cube_extension(state[0]);
let diff = builder.sub_extension(state0_cubed, local_values[col_partial_mid_sbox(r)]);
yield_constr.constraint_wrapping(builder, diff);
yield_constr.constraint(builder, diff);
let state0_cubed = local_values[col_partial_mid_sbox(r)];
state[0] = builder.mul_many_extension(&[state[0], state0_cubed, state0_cubed]); // Form state ** 7.
let diff = builder.sub_extension(state[0], local_values[col_partial_after_sbox(r)]);
yield_constr.constraint_wrapping(builder, diff);
yield_constr.constraint(builder, diff);
state[0] = local_values[col_partial_after_sbox(r)];
state = F::mds_layer_recursive(builder, &state);
@ -239,7 +235,7 @@ pub(crate) fn eval_permutation_unit_recursively<F: RichField + Extendable<D>, co
let state_cubed = builder.cube_extension(state[i]);
let diff =
builder.sub_extension(state_cubed, local_values[col_full_second_mid_sbox(r, i)]);
yield_constr.constraint_wrapping(builder, diff);
yield_constr.constraint(builder, diff);
let state_cubed = local_values[col_full_second_mid_sbox(r, i)];
state[i] = builder.mul_many_extension(&[state[i], state_cubed, state_cubed]);
// Form state ** 7.
@ -250,7 +246,7 @@ pub(crate) fn eval_permutation_unit_recursively<F: RichField + Extendable<D>, co
for i in 0..SPONGE_WIDTH {
let diff =
builder.sub_extension(state[i], local_values[col_full_second_after_mds(r, i)]);
yield_constr.constraint_wrapping(builder, diff);
yield_constr.constraint(builder, diff);
state[i] = local_values[col_full_second_after_mds(r, i)];
}
}

View File

@ -0,0 +1,57 @@
//! Arithmetic and logic unit.
pub(crate) const IS_ADD: usize = super::START_ALU;
pub(crate) const IS_SUB: usize = IS_ADD + 1;
pub(crate) const IS_MUL: usize = IS_SUB + 1;
pub(crate) const IS_DIV: usize = IS_MUL + 1;
const START_SHARED_COLS: usize = IS_DIV + 1;
/// Within the ALU, there are shared columns which can be used by any arithmetic/logic
/// circuit, depending on which one is active this cycle.
// Can be increased as needed as other operations are implemented.
const NUM_SHARED_COLS: usize = 4;
const fn shared_col(i: usize) -> usize {
debug_assert!(i < NUM_SHARED_COLS);
START_SHARED_COLS + i
}
/// The first value to be added; treated as an unsigned u32.
pub(crate) const COL_ADD_INPUT_0: usize = shared_col(0);
/// The second value to be added; treated as an unsigned u32.
pub(crate) const COL_ADD_INPUT_1: usize = shared_col(1);
/// The third value to be added; treated as an unsigned u32.
pub(crate) const COL_ADD_INPUT_2: usize = shared_col(2);
// Note: Addition outputs three 16-bit chunks, and since these values need to be range-checked
// anyway, we might as well use the range check unit's columns as our addition outputs. So the
// three proceeding columns are basically aliases, not columns owned by the ALU.
/// The first 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_ADD_OUTPUT_0: usize = super::range_check_16::col_rc_16_input(0);
/// The second 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_ADD_OUTPUT_1: usize = super::range_check_16::col_rc_16_input(1);
/// The third 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_ADD_OUTPUT_2: usize = super::range_check_16::col_rc_16_input(2);
/// The first value to be multiplied; treated as an unsigned u32.
pub(crate) const COL_MUL_ADD_FACTOR_0: usize = shared_col(0);
/// The second value to be multiplied; treated as an unsigned u32.
pub(crate) const COL_MUL_ADD_FACTOR_1: usize = shared_col(1);
/// The value to be added to the product; treated as an unsigned u32.
pub(crate) const COL_MUL_ADD_ADDEND: usize = shared_col(2);
/// The inverse of `u32::MAX - result_hi`, where `output_hi` is the high 32-bits of the result.
/// See https://hackmd.io/NC-yRmmtRQSvToTHb96e8Q#Checking-element-validity
pub(crate) const COL_MUL_ADD_RESULT_CANONICAL_INV: usize = shared_col(3);
/// The first 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_MUL_ADD_OUTPUT_0: usize = super::range_check_16::col_rc_16_input(0);
/// The second 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_MUL_ADD_OUTPUT_1: usize = super::range_check_16::col_rc_16_input(1);
/// The third 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_MUL_ADD_OUTPUT_2: usize = super::range_check_16::col_rc_16_input(2);
/// The fourth 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_MUL_ADD_OUTPUT_3: usize = super::range_check_16::col_rc_16_input(3);
pub(super) const END: usize = super::START_ALU + NUM_SHARED_COLS;

View File

@ -1,37 +0,0 @@
//! Arithmetic unit.
pub(crate) const IS_ADD: usize = super::START_ARITHMETIC;
pub(crate) const IS_SUB: usize = IS_ADD + 1;
pub(crate) const IS_MUL: usize = IS_SUB + 1;
pub(crate) const IS_DIV: usize = IS_MUL + 1;
const START_SHARED_COLS: usize = IS_DIV + 1;
/// Within the arithmetic unit, there are shared columns which can be used by any arithmetic
/// circuit, depending on which one is active this cycle.
// Can be increased as needed as other operations are implemented.
const NUM_SHARED_COLS: usize = 3;
const fn shared_col(i: usize) -> usize {
debug_assert!(i < NUM_SHARED_COLS);
START_SHARED_COLS + i
}
/// The first value to be added; treated as an unsigned u32.
pub(crate) const COL_ADD_INPUT_1: usize = shared_col(0);
/// The second value to be added; treated as an unsigned u32.
pub(crate) const COL_ADD_INPUT_2: usize = shared_col(1);
/// The third value to be added; treated as an unsigned u32.
pub(crate) const COL_ADD_INPUT_3: usize = shared_col(2);
// Note: Addition outputs three 16-bit chunks, and since these values need to be range-checked
// anyway, we might as well use the range check unit's columns as our addition outputs. So the
// three proceeding columns are basically aliases, not columns owned by the arithmetic unit.
/// The first 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_ADD_OUTPUT_1: usize = super::range_check_16::col_rc_16_input(0);
/// The second 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_ADD_OUTPUT_2: usize = super::range_check_16::col_rc_16_input(1);
/// The third 16-bit chunk of the output, based on little-endian ordering.
pub(crate) const COL_ADD_OUTPUT_3: usize = super::range_check_16::col_rc_16_input(2);
pub(super) const END: usize = super::START_ARITHMETIC + NUM_SHARED_COLS;

View File

@ -1,4 +1,4 @@
pub(crate) mod arithmetic;
pub(crate) mod alu;
pub(crate) mod boolean;
pub(crate) mod core;
pub(crate) mod logic;
@ -8,8 +8,8 @@ pub(crate) mod permutation;
pub(crate) mod range_check_16;
pub(crate) mod range_check_degree;
const START_ARITHMETIC: usize = 0;
const START_BOOLEAN: usize = arithmetic::END;
const START_ALU: usize = 0;
const START_BOOLEAN: usize = alu::END;
const START_CORE: usize = boolean::END;
const START_LOGIC: usize = core::END;
const START_LOOKUP: usize = logic::END;

View File

@ -10,9 +10,7 @@ use starky::stark::Stark;
use starky::vars::StarkEvaluationTargets;
use starky::vars::StarkEvaluationVars;
use crate::arithmetic::{
eval_arithmetic_unit, eval_arithmetic_unit_recursively, generate_arithmetic_unit,
};
use crate::alu::{eval_alu, eval_alu_recursively, generate_alu};
use crate::core_registers::{
eval_core_registers, eval_core_registers_recursively, generate_first_row_core_registers,
generate_next_row_core_registers,
@ -38,7 +36,7 @@ impl<F: RichField + Extendable<D>, const D: usize> SystemZero<F, D> {
let mut row = [F::ZERO; NUM_COLUMNS];
generate_first_row_core_registers(&mut row);
generate_arithmetic_unit(&mut row);
generate_alu(&mut row);
generate_permutation_unit(&mut row);
let mut trace = Vec::with_capacity(MIN_TRACE_ROWS);
@ -46,7 +44,7 @@ impl<F: RichField + Extendable<D>, const D: usize> SystemZero<F, D> {
loop {
let mut next_row = [F::ZERO; NUM_COLUMNS];
generate_next_row_core_registers(&row, &mut next_row);
generate_arithmetic_unit(&mut next_row);
generate_alu(&mut next_row);
generate_permutation_unit(&mut next_row);
trace.push(row);
@ -84,7 +82,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for SystemZero<F,
P: PackedField<Scalar = FE>,
{
eval_core_registers(vars, yield_constr);
eval_arithmetic_unit(vars, yield_constr);
eval_alu(vars, yield_constr);
eval_permutation_unit::<F, FE, P, D2>(vars, yield_constr);
// TODO: Other units
}
@ -96,7 +94,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for SystemZero<F,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_core_registers_recursively(builder, vars, yield_constr);
eval_arithmetic_unit_recursively(builder, vars, yield_constr);
eval_alu_recursively(builder, vars, yield_constr);
eval_permutation_unit_recursively(builder, vars, yield_constr);
// TODO: Other units
}