diff --git a/evm/src/cpu/bootstrap_kernel.rs b/evm/src/cpu/bootstrap_kernel.rs index 66f88d3a..4aee617c 100644 --- a/evm/src/cpu/bootstrap_kernel.rs +++ b/evm/src/cpu/bootstrap_kernel.rs @@ -25,6 +25,7 @@ pub(crate) fn generate_bootstrap_kernel(state: &mut GenerationState for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) { let mut cpu_row = CpuColumnsView::default(); cpu_row.clock = F::from_canonical_usize(state.traces.clock()); + cpu_row.is_bootstrap_kernel = F::ONE; // Write this chunk to memory, while simultaneously packing its bytes into a u32 word. for (channel, (addr, &byte)) in chunk.enumerate() { @@ -39,6 +40,7 @@ pub(crate) fn generate_bootstrap_kernel(state: &mut GenerationState let mut final_cpu_row = CpuColumnsView::default(); final_cpu_row.clock = F::from_canonical_usize(state.traces.clock()); + final_cpu_row.is_bootstrap_kernel = F::ONE; final_cpu_row.is_keccak_sponge = F::ONE; // The Keccak sponge CTL uses memory value columns for its inputs and outputs. final_cpu_row.mem_channels[0].value[0] = F::ZERO; // context @@ -64,8 +66,8 @@ pub(crate) fn eval_bootstrap_kernel>( let next_values: &CpuColumnsView<_> = vars.next_values.borrow(); // IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}. - let local_is_bootstrap = P::ONES - local_values.op.into_iter().sum::

(); - let next_is_bootstrap = P::ONES - next_values.op.into_iter().sum::

(); + let local_is_bootstrap = local_values.is_bootstrap_kernel; + let next_is_bootstrap = next_values.is_bootstrap_kernel; yield_constr.constraint_first_row(local_is_bootstrap - P::ONES); yield_constr.constraint_last_row(local_is_bootstrap); let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap; @@ -111,10 +113,8 @@ pub(crate) fn eval_bootstrap_kernel_circuit, const let one = builder.one_extension(); // IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}. - let local_is_bootstrap = builder.add_many_extension(local_values.op.iter()); - let local_is_bootstrap = builder.sub_extension(one, local_is_bootstrap); - let next_is_bootstrap = builder.add_many_extension(next_values.op.iter()); - let next_is_bootstrap = builder.sub_extension(one, next_is_bootstrap); + let local_is_bootstrap = local_values.is_bootstrap_kernel; + let next_is_bootstrap = next_values.is_bootstrap_kernel; let constraint = builder.sub_extension(local_is_bootstrap, one); yield_constr.constraint_first_row(builder, constraint); yield_constr.constraint_last_row(builder, local_is_bootstrap); diff --git a/evm/src/cpu/columns/mod.rs b/evm/src/cpu/columns/mod.rs index 134ab02b..fecc8df9 100644 --- a/evm/src/cpu/columns/mod.rs +++ b/evm/src/cpu/columns/mod.rs @@ -35,6 +35,9 @@ pub struct MemoryChannelView { #[repr(C)] #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct CpuColumnsView { + /// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise. + pub is_bootstrap_kernel: T, + /// If CPU cycle: Current context. // TODO: this is currently unconstrained pub context: T, diff --git a/evm/src/cpu/control_flow.rs b/evm/src/cpu/control_flow.rs index eeabf6f0..9c17367a 100644 --- a/evm/src/cpu/control_flow.rs +++ b/evm/src/cpu/control_flow.rs @@ -34,14 +34,9 @@ const NATIVE_INSTRUCTIONS: [usize; 17] = [ // not exceptions (also jump) ]; -pub(crate) fn get_halt_pcs() -> (F, F) { - let halt_pc0 = KERNEL.global_labels["halt_pc0"]; - let halt_pc1 = KERNEL.global_labels["halt_pc1"]; - - ( - F::from_canonical_usize(halt_pc0), - F::from_canonical_usize(halt_pc1), - ) +pub(crate) fn get_halt_pc() -> F { + let halt_pc = KERNEL.global_labels["halt"]; + F::from_canonical_usize(halt_pc) } pub(crate) fn get_start_pc() -> F { @@ -57,8 +52,15 @@ pub fn eval_packed_generic( ) { let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum(); - // Once we start executing instructions, then we continue until the end of the table. - yield_constr.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next - P::ONES)); + + let next_halt_state = P::ONES - nv.is_bootstrap_kernel - is_cpu_cycle_next; + + // Once we start executing instructions, then we continue until the end of the table + // or we reach dummy padding rows. This, along with the constraints on the first row, + // enforces that operation flags and the halt flag are mutually exclusive over the entire + // CPU trace. + yield_constr + .constraint_transition(is_cpu_cycle * (is_cpu_cycle_next + next_halt_state - P::ONES)); // If a row is a CPU cycle and executing a native instruction (implemented as a table row; not // microcoded) then the program counter is incremented by 1 to obtain the next row's program @@ -79,16 +81,6 @@ pub fn eval_packed_generic( yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff); yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES)); yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len); - - // The last row must be a CPU cycle row. - yield_constr.constraint_last_row(is_cpu_cycle - P::ONES); - // Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that - // that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels. - let (halt_pc0, halt_pc1) = get_halt_pcs::(); - yield_constr - .constraint_last_row((lv.program_counter - halt_pc0) * (lv.program_counter - halt_pc1)); - // Finally, the last row must be in kernel mode. - yield_constr.constraint_last_row(lv.is_kernel_mode - P::ONES); } pub fn eval_ext_circuit, const D: usize>( @@ -97,11 +89,21 @@ pub fn eval_ext_circuit, const D: usize>( nv: &CpuColumnsView>, yield_constr: &mut RecursiveConstraintConsumer, ) { + let one = builder.one_extension(); + let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i])); - // Once we start executing instructions, then we continue until the end of the table. + + let next_halt_state = builder.add_extension(nv.is_bootstrap_kernel, is_cpu_cycle_next); + let next_halt_state = builder.sub_extension(one, next_halt_state); + + // Once we start executing instructions, then we continue until the end of the table + // or we reach dummy padding rows. This, along with the constraints on the first row, + // enforces that operation flags and the halt flag are mutually exclusive over the entire + // CPU trace. { - let constr = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle); + let constr = builder.add_extension(is_cpu_cycle_next, next_halt_state); + let constr = builder.mul_sub_extension(is_cpu_cycle, constr, is_cpu_cycle); yield_constr.constraint_transition(builder, constr); } @@ -144,30 +146,4 @@ pub fn eval_ext_circuit, const D: usize>( let kernel_constr = builder.mul_extension(is_last_noncpu_cycle, nv.stack_len); yield_constr.constraint_transition(builder, kernel_constr); } - - // The last row must be a CPU cycle row. - { - let one = builder.one_extension(); - let constr = builder.sub_extension(is_cpu_cycle, one); - yield_constr.constraint_last_row(builder, constr); - } - // Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that - // that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels. - { - let (halt_pc0, halt_pc1) = get_halt_pcs(); - let halt_pc0_target = builder.constant_extension(halt_pc0); - let halt_pc1_target = builder.constant_extension(halt_pc1); - - let halt_pc0_offset = builder.sub_extension(lv.program_counter, halt_pc0_target); - let halt_pc1_offset = builder.sub_extension(lv.program_counter, halt_pc1_target); - let constr = builder.mul_extension(halt_pc0_offset, halt_pc1_offset); - - yield_constr.constraint_last_row(builder, constr); - } - // Finally, the last row must be in kernel mode. - { - let one = builder.one_extension(); - let constr = builder.sub_extension(lv.is_kernel_mode, one); - yield_constr.constraint_last_row(builder, constr); - } } diff --git a/evm/src/cpu/cpu_stark.rs b/evm/src/cpu/cpu_stark.rs index 2d2e9072..bd2fcf19 100644 --- a/evm/src/cpu/cpu_stark.rs +++ b/evm/src/cpu/cpu_stark.rs @@ -8,6 +8,7 @@ use plonky2::field::packed::PackedField; use plonky2::field::types::Field; use plonky2::hash::hash_types::RichField; +use super::halt; use crate::all_stark::Table; use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS}; @@ -244,6 +245,7 @@ impl, const D: usize> Stark for CpuStark, const D: usize> Stark for CpuStark( + lv: &CpuColumnsView

, + nv: &CpuColumnsView

, + yield_constr: &mut ConstraintConsumer

, +) { + let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum(); + let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum(); + + let halt_state = P::ONES - lv.is_bootstrap_kernel - is_cpu_cycle; + let next_halt_state = P::ONES - nv.is_bootstrap_kernel - is_cpu_cycle_next; + + // The halt flag must be boolean. + yield_constr.constraint(halt_state * (halt_state - P::ONES)); + // Once we reach a padding row, there must be only padding rows. + yield_constr.constraint_transition(halt_state * (next_halt_state - P::ONES)); + + // Padding rows should have their memory channels disabled. + for i in 0..NUM_GP_CHANNELS { + let channel = lv.mem_channels[i]; + yield_constr.constraint(halt_state * channel.used); + } + + // The last row must be a dummy padding row. + yield_constr.constraint_last_row(halt_state - P::ONES); + + // Also, a padding row's `program_counter` must be at the `halt` label. + // In particular, it ensures that the first padding row may only be added + // after we jumped to the `halt` function. Subsequent padding rows may set + // the `program_counter` to arbitrary values (there's no transition + // constraints) so we can place this requirement on them too. + let halt_pc = get_halt_pc::(); + yield_constr.constraint(halt_state * (lv.program_counter - halt_pc)); +} + +pub fn eval_ext_circuit, const D: usize>( + builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, + lv: &CpuColumnsView>, + nv: &CpuColumnsView>, + yield_constr: &mut RecursiveConstraintConsumer, +) { + let one = builder.one_extension(); + + let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i])); + let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i])); + + let halt_state = builder.add_extension(lv.is_bootstrap_kernel, is_cpu_cycle); + let halt_state = builder.sub_extension(one, halt_state); + let next_halt_state = builder.add_extension(nv.is_bootstrap_kernel, is_cpu_cycle_next); + let next_halt_state = builder.sub_extension(one, next_halt_state); + + // The halt flag must be boolean. + let constr = builder.mul_sub_extension(halt_state, halt_state, halt_state); + yield_constr.constraint(builder, constr); + // Once we reach a padding row, there must be only padding rows. + let constr = builder.mul_sub_extension(halt_state, next_halt_state, halt_state); + yield_constr.constraint_transition(builder, constr); + + // Padding rows should have their memory channels disabled. + for i in 0..NUM_GP_CHANNELS { + let channel = lv.mem_channels[i]; + let constr = builder.mul_extension(halt_state, channel.used); + yield_constr.constraint(builder, constr); + } + + // The last row must be a dummy padding row. + { + let one = builder.one_extension(); + let constr = builder.sub_extension(halt_state, one); + yield_constr.constraint_last_row(builder, constr); + } + + // Also, a padding row's `program_counter` must be at the `halt` label. + // In particular, it ensures that the first padding row may only be added + // after we jumped to the `halt` function. Subsequent padding rows may set + // the `program_counter` to arbitrary values (there's no transition + // constraints) so we can place this requirement on them too. + { + let halt_pc = get_halt_pc(); + let halt_pc_target = builder.constant_extension(halt_pc); + let constr = builder.sub_extension(lv.program_counter, halt_pc_target); + let constr = builder.mul_extension(halt_state, constr); + + yield_constr.constraint(builder, constr); + } +} diff --git a/evm/src/cpu/kernel/asm/halt.asm b/evm/src/cpu/kernel/asm/halt.asm index 906ce51a..49561fd6 100644 --- a/evm/src/cpu/kernel/asm/halt.asm +++ b/evm/src/cpu/kernel/asm/halt.asm @@ -1,6 +1,2 @@ global halt: - PUSH halt_pc0 -global halt_pc0: - DUP1 -global halt_pc1: - JUMP + PANIC diff --git a/evm/src/cpu/mod.rs b/evm/src/cpu/mod.rs index 91b04cf4..b7312147 100644 --- a/evm/src/cpu/mod.rs +++ b/evm/src/cpu/mod.rs @@ -6,6 +6,7 @@ pub mod cpu_stark; pub(crate) mod decode; mod dup_swap; mod gas; +mod halt; mod jumps; pub mod kernel; pub(crate) mod membus; diff --git a/evm/src/generation/mod.rs b/evm/src/generation/mod.rs index 13c6670b..35078e07 100644 --- a/evm/src/generation/mod.rs +++ b/evm/src/generation/mod.rs @@ -16,6 +16,7 @@ use GlobalMetadata::{ use crate::all_stark::{AllStark, NUM_TABLES}; use crate::config::StarkConfig; use crate::cpu::bootstrap_kernel::generate_bootstrap_kernel; +use crate::cpu::columns::CpuColumnsView; use crate::cpu::kernel::aggregator::KERNEL; use crate::cpu::kernel::constants::global_metadata::GlobalMetadata; use crate::generation::outputs::{get_outputs, GenerationOutputs}; @@ -278,26 +279,36 @@ pub fn generate_traces, const D: usize>( fn simulate_cpu, const D: usize>( state: &mut GenerationState, ) -> anyhow::Result<()> { - let halt_pc0 = KERNEL.global_labels["halt_pc0"]; - let halt_pc1 = KERNEL.global_labels["halt_pc1"]; + let halt_pc = KERNEL.global_labels["halt"]; - let mut already_in_halt_loop = false; loop { // If we've reached the kernel's halt routine, and our trace length is a power of 2, stop. let pc = state.registers.program_counter; - let in_halt_loop = state.registers.is_kernel && (pc == halt_pc0 || pc == halt_pc1); - if in_halt_loop && !already_in_halt_loop { + let halt = state.registers.is_kernel && pc == halt_pc; + if halt { log::info!("CPU halted after {} cycles", state.traces.clock()); + + // Padding + let mut row = CpuColumnsView::::default(); + row.clock = F::from_canonical_usize(state.traces.clock()); + row.context = F::from_canonical_usize(state.registers.context); + row.program_counter = F::from_canonical_usize(pc); + row.is_kernel_mode = F::ONE; + row.gas = F::from_canonical_u64(state.registers.gas_used); + row.stack_len = F::from_canonical_usize(state.registers.stack_len); + + loop { + state.traces.push_cpu(row); + row.clock += F::ONE; + if state.traces.clock().is_power_of_two() { + break; + } + } + log::info!("CPU trace padded to {} cycles", state.traces.clock()); + + return Ok(()); } - already_in_halt_loop |= in_halt_loop; transition(state)?; - - if already_in_halt_loop && state.traces.clock().is_power_of_two() { - log::info!("CPU trace padded to {} cycles", state.traces.clock()); - break; - } } - - Ok(()) } diff --git a/evm/src/witness/transition.rs b/evm/src/witness/transition.rs index e3e7b584..1418beba 100644 --- a/evm/src/witness/transition.rs +++ b/evm/src/witness/transition.rs @@ -286,7 +286,7 @@ fn log_kernel_instruction(state: &GenerationState, op: Operation) { let pc = state.registers.program_counter; let is_interesting_offset = KERNEL .offset_label(pc) - .filter(|label| !label.starts_with("halt_pc")) + .filter(|label| !label.starts_with("halt")) .is_some(); let level = if is_interesting_offset { log::Level::Debug