Merge pull request #1183 from topos-protocol/remove_is_cpu_cycle_flag

Remove is_cpu_cycle
This commit is contained in:
Hamy Ratoanina 2023-08-15 22:59:50 +02:00 committed by GitHub
commit 683501cc67
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 56 additions and 72 deletions

View File

@ -38,10 +38,6 @@ pub struct CpuColumnsView<T: Copy> {
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
pub is_bootstrap_kernel: T,
/// Filter. 1 if the row corresponds to a cycle of execution and 0 otherwise.
/// Lets us re-use columns in non-cycle rows.
pub is_cpu_cycle: T,
/// If CPU cycle: Current context.
// TODO: this is currently unconstrained
pub context: T,

View File

@ -43,13 +43,13 @@ pub struct OpsColumnsView<T: Copy> {
// TODO: combine GET_CONTEXT and SET_CONTEXT into one flag
pub get_context: T,
pub set_context: T,
pub exit_kernel: T, // Note: This column must be 0 when is_cpu_cycle = 0.
pub exit_kernel: T,
// TODO: combine MLOAD_GENERAL and MSTORE_GENERAL into one flag
pub mload_general: T,
pub mstore_general: T,
pub syscall: T, // Note: This column must be 0 when is_cpu_cycle = 0.
pub exception: T, // Note: This column must be 0 when is_cpu_cycle = 0.
pub syscall: T,
pub exception: T,
}
// `u8` is guaranteed to have a `size_of` of 1.

View File

@ -67,34 +67,33 @@ pub fn eval_packed_generic<P: PackedField>(
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
// Once we start executing instructions, then we continue until the end of the table.
yield_constr.constraint_transition(lv.is_cpu_cycle * (nv.is_cpu_cycle - P::ONES));
yield_constr.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next - P::ONES));
// If a row is a CPU cycle and executing a native instruction (implemented as a table row; not
// microcoded) then the program counter is incremented by 1 to obtain the next row's program
// counter. Also, the next row has the same kernel flag.
let is_native_instruction: P = NATIVE_INSTRUCTIONS.iter().map(|&col_i| lv[col_i]).sum();
yield_constr.constraint_transition(
lv.is_cpu_cycle
* is_native_instruction
* (lv.program_counter - nv.program_counter + P::ONES),
);
yield_constr.constraint_transition(
lv.is_cpu_cycle * is_native_instruction * (lv.is_kernel_mode - nv.is_kernel_mode),
is_native_instruction * (lv.program_counter - nv.program_counter + P::ONES),
);
yield_constr
.constraint_transition(is_native_instruction * (lv.is_kernel_mode - nv.is_kernel_mode));
// If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel),
// - execution is in kernel mode, and
// - the stack is empty.
let is_last_noncpu_cycle = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle;
let is_last_noncpu_cycle = (is_cpu_cycle - P::ONES) * is_cpu_cycle_next;
let pc_diff = nv.program_counter - get_start_pc::<P::Scalar>();
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);
// The last row must be a CPU cycle row.
yield_constr.constraint_last_row(lv.is_cpu_cycle - P::ONES);
yield_constr.constraint_last_row(is_cpu_cycle - P::ONES);
// Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that
// that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels.
let (halt_pc0, halt_pc1) = get_halt_pcs::<P::Scalar>();
@ -110,9 +109,11 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i]));
// Once we start executing instructions, then we continue until the end of the table.
{
let constr = builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, lv.is_cpu_cycle);
let constr = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle);
yield_constr.constraint_transition(builder, constr);
}
@ -120,9 +121,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// microcoded) then the program counter is incremented by 1 to obtain the next row's program
// counter. Also, the next row has the same kernel flag.
{
let is_native_instruction =
builder.add_many_extension(NATIVE_INSTRUCTIONS.iter().map(|&col_i| lv[col_i]));
let filter = builder.mul_extension(lv.is_cpu_cycle, is_native_instruction);
let filter = builder.add_many_extension(NATIVE_INSTRUCTIONS.iter().map(|&col_i| lv[col_i]));
let pc_diff = builder.sub_extension(lv.program_counter, nv.program_counter);
let pc_constr = builder.mul_add_extension(filter, pc_diff, filter);
yield_constr.constraint_transition(builder, pc_constr);
@ -137,7 +136,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// - the stack is empty.
{
let is_last_noncpu_cycle =
builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle);
builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle_next);
// Start at `main`.
let main = builder.constant_extension(get_start_pc::<F>().into());
@ -161,7 +160,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// The last row must be a CPU cycle row.
{
let one = builder.one_extension();
let constr = builder.sub_extension(lv.is_cpu_cycle, one);
let constr = builder.sub_extension(is_cpu_cycle, one);
yield_constr.constraint_last_row(builder, constr);
}
// Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that

View File

@ -200,7 +200,7 @@ pub fn ctl_data_gp_memory<F: Field>(channel: usize) -> Vec<Column<F>> {
}
pub fn ctl_filter_code_memory<F: Field>() -> Column<F> {
Column::single(COL_MAP.is_cpu_cycle)
Column::sum(COL_MAP.op.iter())
}
pub fn ctl_filter_gp_memory<F: Field>(channel: usize) -> Column<F> {

View File

@ -60,12 +60,8 @@ const OPCODES: [(u8, usize, bool, usize); 32] = [
];
pub fn generate<F: RichField>(lv: &mut CpuColumnsView<F>) {
let cycle_filter = lv.is_cpu_cycle;
if cycle_filter == F::ZERO {
// This column cannot be shared.
lv.op.eq_iszero = F::ZERO;
return;
}
let cycle_filter: F = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
// This assert is not _strictly_ necessary, but I include it as a sanity check.
assert_eq!(cycle_filter, F::ONE, "cycle_filter should be 0 or 1");
@ -122,7 +118,7 @@ pub fn eval_packed_generic<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let cycle_filter = lv.is_cpu_cycle;
let cycle_filter: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
// Ensure that the kernel flag is valid (either 0 or 1).
let kernel_mode = lv.is_kernel_mode;
@ -192,7 +188,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
) {
let one = builder.one_extension();
let cycle_filter = lv.is_cpu_cycle;
let cycle_filter = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
// Ensure that the kernel flag is valid (either 0 or 1).
let kernel_mode = lv.is_kernel_mode;

View File

@ -114,7 +114,7 @@ fn eval_packed_dup<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.is_cpu_cycle * lv.op.dup;
let filter = lv.op.dup;
let in_channel = &lv.mem_channels[0];
let out_channel = &lv.mem_channels[NUM_GP_CHANNELS - 1];
@ -140,7 +140,7 @@ fn eval_ext_circuit_dup<F: RichField + Extendable<D>, const D: usize>(
) {
let neg_one = builder.constant_extension(F::NEG_ONE.into());
let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.dup);
let filter = lv.op.dup;
let in_channel = &lv.mem_channels[0];
let out_channel = &lv.mem_channels[NUM_GP_CHANNELS - 1];
@ -166,7 +166,7 @@ fn eval_packed_swap<P: PackedField>(
) {
let n_plus_one = n + P::ONES;
let filter = lv.is_cpu_cycle * lv.op.swap;
let filter = lv.op.swap;
let in1_channel = &lv.mem_channels[0];
let in2_channel = &lv.mem_channels[1];
@ -192,7 +192,7 @@ fn eval_ext_circuit_swap<F: RichField + Extendable<D>, const D: usize>(
let one = builder.one_extension();
let n_plus_one = builder.add_extension(n, one);
let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.swap);
let filter = lv.op.swap;
let in1_channel = &lv.mem_channels[0];
let in2_channel = &lv.mem_channels[1];

View File

@ -5,6 +5,7 @@ use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use super::columns::COL_MAP;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::ops::OpsColumnsView;
use crate::cpu::columns::CpuColumnsView;
@ -63,7 +64,7 @@ fn eval_packed_accumulate<P: PackedField>(
) {
// Is it an instruction that we constrain here?
// I.e., does it always cost a constant amount of gas?
let is_simple_instr: P = SIMPLE_OPCODES
let filter: P = SIMPLE_OPCODES
.into_iter()
.enumerate()
.filter_map(|(i, maybe_cost)| {
@ -71,7 +72,6 @@ fn eval_packed_accumulate<P: PackedField>(
maybe_cost.map(|_| lv.op[i])
})
.sum();
let filter = lv.is_cpu_cycle * is_simple_instr;
// How much gas did we use?
let gas_used: P = SIMPLE_OPCODES
@ -88,16 +88,14 @@ fn eval_packed_accumulate<P: PackedField>(
for (maybe_cost, op_flag) in izip!(SIMPLE_OPCODES.into_iter(), lv.op.into_iter()) {
if let Some(cost) = maybe_cost {
let cost = P::Scalar::from_canonical_u32(cost);
yield_constr
.constraint_transition(lv.is_cpu_cycle * op_flag * (nv.gas - lv.gas - cost));
yield_constr.constraint_transition(op_flag * (nv.gas - lv.gas - cost));
}
}
// For jumps.
let jump_gas_cost = P::Scalar::from_canonical_u32(G_MID.unwrap())
+ lv.opcode_bits[0] * P::Scalar::from_canonical_u32(G_HIGH.unwrap() - G_MID.unwrap());
yield_constr
.constraint_transition(lv.is_cpu_cycle * lv.op.jumps * (nv.gas - lv.gas - jump_gas_cost));
yield_constr.constraint_transition(lv.op.jumps * (nv.gas - lv.gas - jump_gas_cost));
}
fn eval_packed_init<P: PackedField>(
@ -105,8 +103,10 @@ fn eval_packed_init<P: PackedField>(
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
// `nv` is the first row that executes an instruction.
let filter = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle;
let filter = (is_cpu_cycle - P::ONES) * is_cpu_cycle_next;
// Set initial gas to zero.
yield_constr.constraint_transition(filter * nv.gas);
}
@ -128,7 +128,7 @@ fn eval_ext_circuit_accumulate<F: RichField + Extendable<D>, const D: usize>(
) {
// Is it an instruction that we constrain here?
// I.e., does it always cost a constant amount of gas?
let is_simple_instr = SIMPLE_OPCODES.into_iter().enumerate().fold(
let filter = SIMPLE_OPCODES.into_iter().enumerate().fold(
builder.zero_extension(),
|cumul, (i, maybe_cost)| {
// Add flag `lv.op[i]` to the sum if `SIMPLE_OPCODES[i]` is `Some`.
@ -138,7 +138,6 @@ fn eval_ext_circuit_accumulate<F: RichField + Extendable<D>, const D: usize>(
}
},
);
let filter = builder.mul_extension(lv.is_cpu_cycle, is_simple_instr);
// How much gas did we use?
let gas_used = SIMPLE_OPCODES.into_iter().enumerate().fold(
@ -161,21 +160,20 @@ fn eval_ext_circuit_accumulate<F: RichField + Extendable<D>, const D: usize>(
for (maybe_cost, op_flag) in izip!(SIMPLE_OPCODES.into_iter(), lv.op.into_iter()) {
if let Some(cost) = maybe_cost {
let filter = builder.mul_extension(lv.is_cpu_cycle, op_flag);
let nv_lv_diff = builder.sub_extension(nv.gas, lv.gas);
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_u32(cost),
filter,
op_flag,
nv_lv_diff,
filter,
op_flag,
);
yield_constr.constraint_transition(builder, constr);
}
}
// For jumps.
let filter = builder.mul_extension(lv.is_cpu_cycle, lv.op.jumps);
let filter = lv.op.jumps;
let jump_gas_cost = builder.mul_const_extension(
F::from_canonical_u32(G_HIGH.unwrap() - G_MID.unwrap()),
lv.opcode_bits[0],
@ -196,7 +194,9 @@ fn eval_ext_circuit_init<F: RichField + Extendable<D>, const D: usize>(
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// `nv` is the first row that executes an instruction.
let filter = builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle);
let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i]));
let filter = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle_next);
// Set initial gas to zero.
let constr = builder.mul_extension(filter, nv.gas);
yield_constr.constraint_transition(builder, constr);

View File

@ -4,6 +4,7 @@ use plonky2::field::types::PrimeField64;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use super::columns::COL_MAP;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
@ -34,7 +35,7 @@ pub const NUM_CHANNELS: usize = channel_indices::GP.end;
/// Calculates `lv.stack_len_bounds_aux`. Note that this must be run after decode.
pub fn generate<F: PrimeField64>(lv: &mut CpuColumnsView<F>) {
let cycle_filter = lv.is_cpu_cycle;
let cycle_filter: F = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
if cycle_filter != F::ZERO {
assert!(lv.is_kernel_mode.to_canonical_u64() <= 1);
}
@ -48,11 +49,11 @@ pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
// Validate `lv.code_context`. It should be 0 if in kernel mode and `lv.context` if in user
// mode.
yield_constr.constraint(
lv.is_cpu_cycle * (lv.code_context - (P::ONES - lv.is_kernel_mode) * lv.context),
);
yield_constr
.constraint(is_cpu_cycle * (lv.code_context - (P::ONES - lv.is_kernel_mode) * lv.context));
// Validate `channel.used`. It should be binary.
for channel in lv.mem_channels {
@ -69,7 +70,8 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// mode.
let diff = builder.sub_extension(lv.context, lv.code_context);
let constr = builder.mul_sub_extension(lv.is_kernel_mode, lv.context, diff);
let filtered_constr = builder.mul_extension(lv.is_cpu_cycle, constr);
let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
let filtered_constr = builder.mul_extension(is_cpu_cycle, constr);
yield_constr.constraint(builder, filtered_constr);
// Validate `channel.used`. It should be binary.

View File

@ -19,7 +19,7 @@ pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.is_cpu_cycle * (lv.op.addfp254 + lv.op.mulfp254 + lv.op.subfp254);
let filter = lv.op.addfp254 + lv.op.mulfp254 + lv.op.subfp254;
// We want to use all the same logic as the usual mod operations, but without needing to read
// the modulus from the stack. We simply constrain `mem_channels[2]` to be our prime (that's
@ -36,10 +36,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = {
let flag_sum = builder.add_many_extension([lv.op.addfp254, lv.op.mulfp254, lv.op.subfp254]);
builder.mul_extension(lv.is_cpu_cycle, flag_sum)
};
let filter = builder.add_many_extension([lv.op.addfp254, lv.op.mulfp254, lv.op.subfp254]);
// We want to use all the same logic as the usual mod operations, but without needing to read
// the modulus from the stack. We simply constrain `mem_channels[2]` to be our prime (that's

View File

@ -18,9 +18,7 @@ pub fn eval_packed<P: PackedField>(
// This is simple: just do output = 0xffffffff - input.
let input = lv.mem_channels[0].value;
let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
let cycle_filter = lv.is_cpu_cycle;
let is_not_filter = lv.op.not;
let filter = cycle_filter * is_not_filter;
let filter = lv.op.not;
for (input_limb, output_limb) in input.into_iter().zip(output) {
yield_constr.constraint(
filter * (output_limb + input_limb - P::Scalar::from_canonical_u64(ALL_1_LIMB)),
@ -35,9 +33,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
) {
let input = lv.mem_channels[0].value;
let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
let cycle_filter = lv.is_cpu_cycle;
let is_not_filter = lv.op.not;
let filter = builder.mul_extension(cycle_filter, is_not_filter);
let filter = lv.op.not;
for (input_limb, output_limb) in input.into_iter().zip(output) {
let constr = builder.add_extension(output_limb, input_limb);
let constr = builder.arithmetic_extension(

View File

@ -198,8 +198,7 @@ pub fn eval_packed<P: PackedField>(
) {
for (op, stack_behavior) in izip!(lv.op.into_iter(), STACK_BEHAVIORS.into_iter()) {
if let Some(stack_behavior) = stack_behavior {
let filter = lv.is_cpu_cycle * op;
eval_packed_one(lv, filter, stack_behavior, yield_constr);
eval_packed_one(lv, op, stack_behavior, yield_constr);
}
}
}
@ -313,8 +312,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
) {
for (op, stack_behavior) in izip!(lv.op.into_iter(), STACK_BEHAVIORS.into_iter()) {
if let Some(stack_behavior) = stack_behavior {
let filter = builder.mul_extension(lv.is_cpu_cycle, op);
eval_ext_circuit_one(builder, lv, filter, stack_behavior, yield_constr);
eval_ext_circuit_one(builder, lv, op, stack_behavior, yield_constr);
}
}
}

View File

@ -14,6 +14,7 @@ use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use super::columns::COL_MAP;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
@ -27,7 +28,7 @@ pub fn eval_packed<P: PackedField>(
// 1024 is valid. 1025 means we've gone one over, which is necessary for overflow, as an EVM
// opcode increases the stack length by at most one.
let filter = lv.is_cpu_cycle;
let filter: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
let diff = lv.stack_len - P::Scalar::from_canonical_usize(MAX_USER_STACK_SIZE + 1);
let lhs = diff * lv.stack_len_bounds_aux;
let rhs = P::ONES - lv.is_kernel_mode;
@ -44,7 +45,7 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// 1024 is valid. 1025 means we've gone one over, which is necessary for overflow, as an EVM
// opcode increases the stack length by at most one.
let filter = lv.is_cpu_cycle;
let filter = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
let lhs = builder.arithmetic_extension(
F::ONE,

View File

@ -243,7 +243,6 @@ fn perform_op<F: Field>(
/// operation. It also returns the opcode.
fn base_row<F: Field>(state: &mut GenerationState<F>) -> (CpuColumnsView<F>, u8) {
let mut row: CpuColumnsView<F> = CpuColumnsView::default();
row.is_cpu_cycle = F::ONE;
row.clock = F::from_canonical_usize(state.traces.clock());
row.context = F::from_canonical_usize(state.registers.context);
row.program_counter = F::from_canonical_usize(state.registers.program_counter);