Stack pointer + underflow/overflow checks (#710)

* Stack pointer + underflow/overflow checks

* Daniel comments

* Extra docs
This commit is contained in:
Jacqueline Nabaglo 2022-09-10 13:20:30 -07:00 committed by GitHub
parent 09f062481a
commit cae5f4870c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 199 additions and 12 deletions

View File

@ -38,6 +38,13 @@ pub struct CpuColumnsView<T: Copy> {
/// If CPU cycle: The program counter for the current instruction.
pub program_counter: T,
/// If CPU cycle: The stack length.
pub stack_len: T,
/// If CPU cycle: A prover-provided value needed to show that the instruction does not cause the
/// stack to underflow or overflow.
pub stack_len_bounds_aux: T,
/// If CPU cycle: We're in kernel (privileged) mode.
pub is_kernel_mode: T,

View File

@ -68,14 +68,16 @@ pub fn eval_packed_generic<P: PackedField>(
lv.is_cpu_cycle * is_native_instruction * (lv.is_kernel_mode - nv.is_kernel_mode),
);
// If a non-CPU cycle row is followed by a CPU cycle row, then the `program_counter` of the CPU
// cycle row is route_txn (the entry point of our kernel) and it is in kernel mode.
// If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `route_txn` (the entry point of our kernel),
// - execution is in kernel mode, and
// - the stack is empty.
let is_last_noncpu_cycle = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle;
let pc_diff =
nv.program_counter - P::Scalar::from_canonical_usize(KERNEL.global_labels["route_txn"]);
yield_constr.constraint_transition((lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle * pc_diff);
yield_constr.constraint_transition(
(lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle * (nv.is_kernel_mode - P::ONES),
);
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);
// The last row must be a CPU cycle row.
yield_constr.constraint_last_row(lv.is_cpu_cycle - P::ONES);
@ -115,17 +117,32 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
yield_constr.constraint_transition(builder, kernel_constr);
}
// If a non-CPU cycle row is followed by a CPU cycle row, then the `program_counter` of the CPU
// cycle row is route_txn (the entry point of our kernel) and it is in kernel mode.
// If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `route_txn` (the entry point of our kernel),
// - execution is in kernel mode, and
// - the stack is empty.
{
let filter = builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle);
let is_last_noncpu_cycle =
builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle);
// Start at `route_txn`.
let route_txn = builder.constant_extension(F::Extension::from_canonical_usize(
KERNEL.global_labels["route_txn"],
));
let pc_diff = builder.sub_extension(nv.program_counter, route_txn);
let pc_constr = builder.mul_extension(filter, pc_diff);
let pc_constr = builder.mul_extension(is_last_noncpu_cycle, pc_diff);
yield_constr.constraint_transition(builder, pc_constr);
let kernel_constr = builder.mul_sub_extension(filter, nv.is_kernel_mode, filter);
// Start in kernel mode
let kernel_constr = builder.mul_sub_extension(
is_last_noncpu_cycle,
nv.is_kernel_mode,
is_last_noncpu_cycle,
);
yield_constr.constraint_transition(builder, kernel_constr);
// Start with empty stack
let kernel_constr = builder.mul_extension(is_last_noncpu_cycle, nv.stack_len);
yield_constr.constraint_transition(builder, kernel_constr);
}

View File

@ -9,7 +9,9 @@ use plonky2::hash::hash_types::RichField;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS};
use crate::cpu::{bootstrap_kernel, control_flow, decode, jumps, simple_logic, syscalls};
use crate::cpu::{
bootstrap_kernel, control_flow, decode, jumps, simple_logic, stack_bounds, syscalls,
};
use crate::cross_table_lookup::Column;
use crate::memory::NUM_CHANNELS;
use crate::stark::Stark;
@ -94,6 +96,7 @@ impl<F: RichField, const D: usize> CpuStark<F, D> {
let local_values: &mut CpuColumnsView<_> = local_values.borrow_mut();
decode::generate(local_values);
simple_logic::generate(local_values);
stack_bounds::generate(local_values); // Must come after `decode`.
}
}
@ -115,6 +118,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
decode::eval_packed_generic(local_values, yield_constr);
jumps::eval_packed(local_values, next_values, yield_constr);
simple_logic::eval_packed(local_values, yield_constr);
stack_bounds::eval_packed(local_values, yield_constr);
syscalls::eval_packed(local_values, next_values, yield_constr);
}
@ -131,6 +135,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
decode::eval_ext_circuit(builder, local_values, yield_constr);
jumps::eval_ext_circuit(builder, local_values, next_values, yield_constr);
simple_logic::eval_ext_circuit(builder, local_values, yield_constr);
stack_bounds::eval_ext_circuit(builder, local_values, yield_constr);
syscalls::eval_ext_circuit(builder, local_values, next_values, yield_constr);
}

View File

@ -6,4 +6,5 @@ pub(crate) mod decode;
mod jumps;
pub mod kernel;
mod simple_logic;
mod stack_bounds;
mod syscalls;

157
evm/src/cpu/stack_bounds.rs Normal file
View File

@ -0,0 +1,157 @@
//! Checks for stack underflow and overflow.
//!
//! The constraints defined herein validate that stack exceptions (underflow and overflow) do not
//! occur. For example, if `is_add` is set but an addition would underflow, these constraints would
//! make the proof unverifiable.
//!
//! Faults are handled under a separate operation flag, `is_exception` (this is still TODO), which
//! traps to the kernel. The kernel then handles the exception. However, before it may do so, the
//! kernel must verify in software that an exception did in fact occur (i.e. the trap was
//! warranted) and `PANIC` otherwise; this prevents the prover from faking an exception on a valid
//! operation.
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, COL_MAP};
const MAX_USER_STACK_SIZE: u64 = 1024;
// Below only includes the operations that pop the top of the stack **without reading the value from
// memory**, i.e. `POP`.
// Other operations that have a minimum stack size (e.g. `MULMOD`, which has three inputs) read
// all their inputs from memory. On underflow, the cross-table lookup fails, as -1, ..., -17 are
// invalid memory addresses.
const DECREMENTING_FLAGS: [usize; 1] = [COL_MAP.is_pop];
// Operations that increase the stack length by 1, but excluding:
// - privileged (kernel-only) operations (superfluous; doesn't affect correctness),
// - operations that from userspace to the kernel (required for correctness).
// TODO: This list is incomplete.
const INCREMENTING_FLAGS: [usize; 2] = [COL_MAP.is_pc, COL_MAP.is_dup];
/// Calculates `lv.stack_len_bounds_aux`. Note that this must be run after decode.
pub fn generate<F: Field>(lv: &mut CpuColumnsView<F>) {
let cycle_filter = lv.is_cpu_cycle;
if cycle_filter == F::ZERO {
return;
}
let check_underflow: F = DECREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let check_overflow: F = INCREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let no_check = F::ONE - (check_underflow + check_overflow);
let disallowed_len = check_overflow * F::from_canonical_u64(MAX_USER_STACK_SIZE) - no_check;
let diff = lv.stack_len - disallowed_len;
let user_mode = F::ONE - lv.is_kernel_mode;
let rhs = user_mode + check_underflow;
lv.stack_len_bounds_aux = match diff.try_inverse() {
Some(diff_inv) => diff_inv * rhs, // `rhs` may be a value other than 1 or 0
None => {
assert_eq!(rhs, F::ZERO);
F::ZERO
}
}
}
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// `check_underflow`, `check_overflow`, and `no_check` are mutually exclusive.
let check_underflow: P = DECREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let check_overflow: P = INCREMENTING_FLAGS.map(|i| lv[i]).into_iter().sum();
let no_check = P::ONES - (check_underflow + check_overflow);
// If `check_underflow`, then the instruction we are executing pops a value from the stack
// without reading it from memory, and the usual underflow checks do not work. We must show that
// `lv.stack_len` is not 0. We choose to perform this check whether or not we're in kernel mode.
// (The check in kernel mode is not necessary if the kernel is correct, but this is an easy
// sanity check.
// If `check_overflow`, then the instruction we are executing increases the stack length by 1.
// If we are in user mode, then we must show that the stack length is not currently
// `MAX_USER_STACK_SIZE`, as this is the maximum for the user stack. Note that this check must
// not run in kernel mode as the kernel's stack length is unrestricted.
// If `no_check`, then we don't need to check anything. The constraint is written to always
// test that `lv.stack_len` does not equal _something_ so we just show that it's not -1, which
// is always true.
// 0 if `check_underflow`, `MAX_USER_STACK_SIZE` if `check_overflow`, and -1 if `no_check`.
let disallowed_len =
check_overflow * P::Scalar::from_canonical_u64(MAX_USER_STACK_SIZE) - no_check;
// This `lhs` must equal some `rhs`. If `rhs` is nonzero, then this shows that `lv.stack_len` is
// not `disallowed_len`.
let lhs = (lv.stack_len - disallowed_len) * lv.stack_len_bounds_aux;
// We want this constraint to be active if we're in user mode OR the instruction might overflow.
// (In other words, we want to _skip_ overflow checks in kernel mode).
let user_mode = P::ONES - lv.is_kernel_mode;
// `rhs` is may be 0, 1, or 2. It's 0 if we're in kernel mode and we would be checking for
// overflow.
// Note: if `user_mode` and `check_underflow` then, `rhs` is 2. This is fine: we're still
// showing that `lv.stack_len - disallowed_len` is nonzero.
let rhs = user_mode + check_underflow;
yield_constr.constraint(lv.is_cpu_cycle * (lhs - rhs));
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();
let max_stack_size =
builder.constant_extension(F::from_canonical_u64(MAX_USER_STACK_SIZE).into());
// `check_underflow`, `check_overflow`, and `no_check` are mutually exclusive.
let check_underflow = builder.add_many_extension(DECREMENTING_FLAGS.map(|i| lv[i]));
let check_overflow = builder.add_many_extension(INCREMENTING_FLAGS.map(|i| lv[i]));
let no_check = {
let any_check = builder.add_extension(check_underflow, check_overflow);
builder.sub_extension(one, any_check)
};
// If `check_underflow`, then the instruction we are executing pops a value from the stack
// without reading it from memory, and the usual underflow checks do not work. We must show that
// `lv.stack_len` is not 0. We choose to perform this check whether or not we're in kernel mode.
// (The check in kernel mode is not necessary if the kernel is correct, but this is an easy
// sanity check.
// If `check_overflow`, then the instruction we are executing increases the stack length by 1.
// If we are in user mode, then we must show that the stack length is not currently
// `MAX_USER_STACK_SIZE`, as this is the maximum for the user stack. Note that this check must
// not run in kernel mode as the kernel's stack length is unrestricted.
// If `no_check`, then we don't need to check anything. The constraint is written to always
// test that `lv.stack_len` does not equal _something_ so we just show that it's not -1, which
// is always true.
// 0 if `check_underflow`, `MAX_USER_STACK_SIZE` if `check_overflow`, and -1 if `no_check`.
let disallowed_len = builder.mul_sub_extension(check_overflow, max_stack_size, no_check);
// This `lhs` must equal some `rhs`. If `rhs` is nonzero, then this shows that `lv.stack_len` is
// not `disallowed_len`.
let lhs = {
let diff = builder.sub_extension(lv.stack_len, disallowed_len);
builder.mul_extension(diff, lv.stack_len_bounds_aux)
};
// We want this constraint to be active if we're in user mode OR the instruction might overflow.
// (In other words, we want to _skip_ overflow checks in kernel mode).
let user_mode = builder.sub_extension(one, lv.is_kernel_mode);
// `rhs` is may be 0, 1, or 2. It's 0 if we're in kernel mode and we would be checking for
// overflow.
// Note: if `user_mode` and `check_underflow` then, `rhs` is 2. This is fine: we're still
// showing that `lv.stack_len - disallowed_len` is nonzero.
let rhs = builder.add_extension(user_mode, check_underflow);
let constr = {
let diff = builder.sub_extension(lhs, rhs);
builder.mul_extension(lv.is_cpu_cycle, diff)
};
yield_constr.constraint(builder, constr);
}