Simplify stack bounds constraints

This commit is contained in:
Jacqueline Nabaglo 2023-06-07 18:27:23 -07:00
parent 0117541909
commit 6920992e01
8 changed files with 52 additions and 209 deletions

View File

@ -8,7 +8,6 @@ use std::mem::{size_of, transmute};
pub(crate) union CpuGeneralColumnsView<T: Copy> {
arithmetic: CpuArithmeticView<T>,
exception: CpuExceptionView<T>,
exit_kernel: CpuExitKernelView<T>,
logic: CpuLogicView<T>,
jumps: CpuJumpsView<T>,
shift: CpuShiftView<T>,
@ -35,16 +34,6 @@ impl<T: Copy> CpuGeneralColumnsView<T> {
unsafe { &mut self.exception }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn exit_kernel(&self) -> &CpuExitKernelView<T> {
unsafe { &self.exit_kernel }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn exit_kernel_mut(&mut self) -> &mut CpuExitKernelView<T> {
unsafe { &mut self.exit_kernel }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn logic(&self) -> &CpuLogicView<T> {
unsafe { &self.logic }
@ -117,11 +106,6 @@ pub(crate) struct CpuExceptionView<T: Copy> {
pub(crate) exc_code_bits: [T; 3],
}
#[derive(Copy, Clone)]
pub(crate) struct CpuExitKernelView<T: Copy> {
pub(crate) stack_len_check_aux: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuLogicView<T: Copy> {
// Pseudoinverse of `(input0 - input1)`. Used prove that they are unequal. Assumes 32-bit limbs.

View File

@ -169,7 +169,6 @@ impl<F: RichField, const D: usize> CpuStark<F, D> {
let local_values: &mut CpuColumnsView<_> = local_values.borrow_mut();
decode::generate(local_values);
membus::generate(local_values);
stack_bounds::generate(local_values); // Must come after `decode`.
}
}

View File

@ -25,19 +25,6 @@ pub fn eval_packed_exit_kernel<P: PackedField>(
yield_constr.constraint_transition(filter * (input[6] - nv.gas));
// High limb of gas must be 0 for convenient detection of overflow.
yield_constr.constraint(filter * input[7]);
// Check that the syscall did not cause a stack overflow.
// A standard EVM instruction increases the length of the user's stack by at most one. We assume
// that the stack length was valid when we entered the syscall (i.e. it was <= 1024). If the
// syscall caused the stack to overflow, then the stack length is 1025 upon exit into userspace;
// that corresponds to a stack length of 1026 before `EXIT_KERNEL` is executed.
// This constraint ensures that the current stack length is not 1026 if we're returning to user
// mode. If we're remaining in kernel mode upon return from this syscall, the RHS will be 0, so
// the constraint is trivially satisfied by setting `stack_len_check_aux = 0`.
let offset_stack_len = lv.stack_len - P::Scalar::from_canonical_u32(1026);
let lhs = offset_stack_len * lv.general.exit_kernel().stack_len_check_aux;
let rhs = P::ONES - input[1];
yield_constr.constraint(filter * (lhs - rhs));
}
pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize>(
@ -71,22 +58,6 @@ pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize
let constr = builder.mul_extension(filter, input[7]);
yield_constr.constraint(builder, constr);
}
// Check that the syscall did not cause a stack overflow.
// See detailed comments in the packed implementation.
{
let stack_len_check_aux = lv.general.exit_kernel().stack_len_check_aux;
let lhs = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_u32(1026),
lv.stack_len,
stack_len_check_aux,
stack_len_check_aux,
);
let constr = builder.add_extension(lhs, input[1]);
let constr = builder.mul_sub_extension(filter, constr, filter);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed_jump_jumpi<P: PackedField>(

View File

@ -72,7 +72,11 @@ const STACK_BEHAVIORS: OpsColumnsView<Option<StackBehavior>> = OpsColumnsView {
}),
keccak_general: None, // TODO
prover_input: None, // TODO
pop: None, // TODO
pop: Some(StackBehavior {
num_pops: 1,
pushes: false,
disable_other_channels: true,
}),
jump: Some(StackBehavior {
num_pops: 1,
pushes: false,

View File

@ -1,14 +1,13 @@
//! Checks for stack underflow and overflow.
//! Checks for stack overflow.
//!
//! The constraints defined herein validate that stack exceptions (underflow and overflow) do not
//! occur. For example, if `is_add` is set but an addition would underflow, these constraints would
//! make the proof unverifiable.
//! The constraints defined herein validate that stack overflow did not occur. For example, if `dup`
//! is set but the copy would overflow would underflow, these constraints would make the proof
//! unverifiable.
//!
//! Faults are handled under a separate operation flag, `is_exception` (this is still TODO), which
//! traps to the kernel. The kernel then handles the exception. However, before it may do so, the
//! kernel must verify in software that an exception did in fact occur (i.e. the trap was
//! warranted) and `PANIC` otherwise; this prevents the prover from faking an exception on a valid
//! operation.
//! Faults are handled under a separate operation flag, `exception` , which traps to the kernel. The
//! kernel then handles the exception. However, before it may do so, it must verify in software that
//! an exception did in fact occur (i.e. the trap was warranted) and `PANIC` otherwise; this
//! prevents the prover from faking an exception on a valid operation.
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
@ -17,88 +16,24 @@ use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, COL_MAP};
use crate::cpu::columns::CpuColumnsView;
pub const MAX_USER_STACK_SIZE: usize = 1024;
// Below only includes the operations that pop the top of the stack **without reading the value from
// memory**, i.e. `POP`.
// Other operations that have a minimum stack size (e.g. `MULMOD`, which has three inputs) read
// all their inputs from memory. On underflow, the cross-table lookup fails, as -1, ..., -17 are
// invalid memory addresses.
pub(crate) const DECREMENTING_FLAGS: [usize; 1] = [COL_MAP.op.pop];
// Operations that increase the stack length by 1, but excluding:
// - privileged (kernel-only) operations (superfluous; doesn't affect correctness),
// - operations that from userspace to the kernel (required for correctness).
// TODO: This list is incomplete.
pub(crate) const INCREMENTING_FLAGS: [usize; 2] = [COL_MAP.op.pc, COL_MAP.op.dup];
/// Calculates `lv.stack_len_bounds_aux`. Note that this must be run after decode.
pub fn generate<F: Field>(lv: &mut CpuColumnsView<F>) {
let cycle_filter = lv.is_cpu_cycle;
if cycle_filter == F::ZERO {
return;
}
let check_underflow: F = DECREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let check_overflow: F = INCREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let no_check = F::ONE - (check_underflow + check_overflow);
let disallowed_len = check_overflow * F::from_canonical_usize(MAX_USER_STACK_SIZE) - no_check;
let diff = lv.stack_len - disallowed_len;
let user_mode = F::ONE - lv.is_kernel_mode;
let rhs = user_mode + check_underflow;
lv.stack_len_bounds_aux = match diff.try_inverse() {
Some(diff_inv) => diff_inv * rhs, // `rhs` may be a value other than 1 or 0
None => {
assert_eq!(rhs, F::ZERO);
F::ZERO
}
}
}
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// `check_underflow`, `check_overflow`, and `no_check` are mutually exclusive.
let check_underflow: P = DECREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let check_overflow: P = INCREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let no_check = P::ONES - (check_underflow + check_overflow);
// If we're in user mode, ensure that the stack length is not 1025. Note that a stack length of
// 1024 is valid. 1025 means we've gone one over, which is necessary for overflow, as an EVM
// opcode increases the stack length by at most one.
// If `check_underflow`, then the instruction we are executing pops a value from the stack
// without reading it from memory, and the usual underflow checks do not work. We must show that
// `lv.stack_len` is not 0. We choose to perform this check whether or not we're in kernel mode.
// (The check in kernel mode is not necessary if the kernel is correct, but this is an easy
// sanity check.
// If `check_overflow`, then the instruction we are executing increases the stack length by 1.
// If we are in user mode, then we must show that the stack length is not currently
// `MAX_USER_STACK_SIZE`, as this is the maximum for the user stack. Note that this check must
// not run in kernel mode as the kernel's stack length is unrestricted.
// If `no_check`, then we don't need to check anything. The constraint is written to always
// test that `lv.stack_len` does not equal _something_ so we just show that it's not -1, which
// is always true.
let filter = lv.is_cpu_cycle;
let diff = lv.stack_len - P::Scalar::from_canonical_usize(MAX_USER_STACK_SIZE + 1);
let lhs = diff * lv.stack_len_bounds_aux;
let rhs = P::ONES - lv.is_kernel_mode;
// 0 if `check_underflow`, `MAX_USER_STACK_SIZE` if `check_overflow`, and -1 if `no_check`.
let disallowed_len =
check_overflow * P::Scalar::from_canonical_usize(MAX_USER_STACK_SIZE) - no_check;
// This `lhs` must equal some `rhs`. If `rhs` is nonzero, then this shows that `lv.stack_len` is
// not `disallowed_len`.
let lhs = (lv.stack_len - disallowed_len) * lv.stack_len_bounds_aux;
// We want this constraint to be active if we're in user mode OR the instruction might overflow.
// (In other words, we want to _skip_ overflow checks in kernel mode).
let user_mode = P::ONES - lv.is_kernel_mode;
// `rhs` is may be 0, 1, or 2. It's 0 if we're in kernel mode and we would be checking for
// overflow.
// Note: if `user_mode` and `check_underflow` then, `rhs` is 2. This is fine: we're still
// showing that `lv.stack_len - disallowed_len` is nonzero.
let rhs = user_mode + check_underflow;
yield_constr.constraint(lv.is_cpu_cycle * (lhs - rhs));
yield_constr.constraint(filter * (lhs - rhs));
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
@ -106,52 +41,20 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();
let max_stack_size =
builder.constant_extension(F::from_canonical_usize(MAX_USER_STACK_SIZE).into());
// If we're in user mode, ensure that the stack length is not 1025. Note that a stack length of
// 1024 is valid. 1025 means we've gone one over, which is necessary for overflow, as an EVM
// opcode increases the stack length by at most one.
// `check_underflow`, `check_overflow`, and `no_check` are mutually exclusive.
let check_underflow = builder.add_many_extension(DECREMENTING_FLAGS.map(|i| lv[i]));
let check_overflow = builder.add_many_extension(INCREMENTING_FLAGS.map(|i| lv[i]));
let no_check = {
let any_check = builder.add_extension(check_underflow, check_overflow);
builder.sub_extension(one, any_check)
};
let filter = lv.is_cpu_cycle;
// If `check_underflow`, then the instruction we are executing pops a value from the stack
// without reading it from memory, and the usual underflow checks do not work. We must show that
// `lv.stack_len` is not 0. We choose to perform this check whether or not we're in kernel mode.
// (The check in kernel mode is not necessary if the kernel is correct, but this is an easy
// sanity check.
// If `check_overflow`, then the instruction we are executing increases the stack length by 1.
// If we are in user mode, then we must show that the stack length is not currently
// `MAX_USER_STACK_SIZE`, as this is the maximum for the user stack. Note that this check must
// not run in kernel mode as the kernel's stack length is unrestricted.
// If `no_check`, then we don't need to check anything. The constraint is written to always
// test that `lv.stack_len` does not equal _something_ so we just show that it's not -1, which
// is always true.
// 0 if `check_underflow`, `MAX_USER_STACK_SIZE` if `check_overflow`, and -1 if `no_check`.
let disallowed_len = builder.mul_sub_extension(check_overflow, max_stack_size, no_check);
// This `lhs` must equal some `rhs`. If `rhs` is nonzero, then this shows that `lv.stack_len` is
// not `disallowed_len`.
let lhs = {
let diff = builder.sub_extension(lv.stack_len, disallowed_len);
builder.mul_extension(diff, lv.stack_len_bounds_aux)
};
// We want this constraint to be active if we're in user mode OR the instruction might overflow.
// (In other words, we want to _skip_ overflow checks in kernel mode).
let user_mode = builder.sub_extension(one, lv.is_kernel_mode);
// `rhs` is may be 0, 1, or 2. It's 0 if we're in kernel mode and we would be checking for
// overflow.
// Note: if `user_mode` and `check_underflow` then, `rhs` is 2. This is fine: we're still
// showing that `lv.stack_len - disallowed_len` is nonzero.
let rhs = builder.add_extension(user_mode, check_underflow);
let constr = {
let diff = builder.sub_extension(lhs, rhs);
builder.mul_extension(lv.is_cpu_cycle, diff)
};
let lhs = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(MAX_USER_STACK_SIZE + 1),
lv.stack_len,
lv.stack_len_bounds_aux,
lv.stack_len_bounds_aux,
);
let constr = builder.add_extension(lhs, lv.is_kernel_mode);
let constr = builder.mul_sub_extension(filter, constr, filter);
yield_constr.constraint(builder, constr);
}

View File

@ -12,6 +12,7 @@ pub enum ProgramError {
KernelPanic,
MemoryError(MemoryError),
GasLimitError,
InterpreterError,
}
#[allow(clippy::enum_variant_names)]

View File

@ -161,13 +161,11 @@ pub(crate) fn generate_prover_input<F: Field>(
pub(crate) fn generate_pop<F: Field>(
state: &mut GenerationState<F>,
row: CpuColumnsView<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
if state.registers.stack_len == 0 {
return Err(ProgramError::StackUnderflow);
}
let [(_, log_in)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
state.registers.stack_len -= 1;
state.traces.push_memory(log_in);
state.traces.push_cpu(row);
Ok(())
}
@ -605,19 +603,7 @@ pub(crate) fn generate_exit_kernel<F: Field>(
let is_kernel_mode = is_kernel_mode_val != 0;
let gas_used_val = kexit_info.0[3];
if TryInto::<u32>::try_into(gas_used_val).is_err() {
panic!();
}
if is_kernel_mode {
row.general.exit_kernel_mut().stack_len_check_aux = F::ZERO;
} else {
let diff =
F::from_canonical_usize(state.registers.stack_len + 1) - F::from_canonical_u32(1026);
if let Some(inv) = diff.try_inverse() {
row.general.exit_kernel_mut().stack_len_check_aux = inv;
} else {
panic!("stack overflow; should have been caught before entering the syscall")
}
return Err(ProgramError::GasLimitError);
}
state.registers.program_counter = program_counter;

View File

@ -4,7 +4,7 @@ use plonky2::field::types::Field;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::stack_bounds::{DECREMENTING_FLAGS, INCREMENTING_FLAGS, MAX_USER_STACK_SIZE};
use crate::cpu::stack_bounds::MAX_USER_STACK_SIZE;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::witness::errors::ProgramError;
@ -270,23 +270,18 @@ fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(
fill_op_flag(op, &mut row);
let check_underflow: F = DECREMENTING_FLAGS.map(|i| row[i]).into_iter().sum();
let check_overflow: F = INCREMENTING_FLAGS.map(|i| row[i]).into_iter().sum();
let no_check = F::ONE - (check_underflow + check_overflow);
let disallowed_len = check_overflow * F::from_canonical_usize(MAX_USER_STACK_SIZE) - no_check;
let diff = row.stack_len - disallowed_len;
let user_mode = F::ONE - row.is_kernel_mode;
let rhs = user_mode + check_underflow;
row.stack_len_bounds_aux = match diff.try_inverse() {
Some(diff_inv) => diff_inv * rhs, // `rhs` may be a value other than 1 or 0
None => {
assert_eq!(rhs, F::ZERO);
F::ZERO
if state.registers.is_kernel {
row.stack_len_bounds_aux = F::ZERO;
} else {
let disallowed_len = F::from_canonical_usize(MAX_USER_STACK_SIZE + 1);
let diff = row.stack_len - disallowed_len;
if let Some(inv) = diff.try_inverse() {
row.stack_len_bounds_aux = inv;
} else {
// This is a stack overflow that should have been caught earlier.
return Err(ProgramError::InterpreterError);
}
};
}
perform_op(state, op, row)
}