mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-07 16:23:12 +00:00
Change padding rule for CPU (#1234)
* Change padding rule for CPU * Disable memory channels for padding rows * Apply some of Jacqueline's comments * Update halt routine * Add clarifying comment * Redundant constraints and padding bug * Revert "Remove is_bootstrap_kernel column" This reverts commit 49d92cb8f1b0ae9de76872f76af4429699ff692f. * Make halt_state implicit * Move halting logic constraints to dedicated module * Include new module * Update some comments
This commit is contained in:
parent
1afcafaded
commit
8903aec129
@ -25,6 +25,7 @@ pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>
|
|||||||
for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) {
|
for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) {
|
||||||
let mut cpu_row = CpuColumnsView::default();
|
let mut cpu_row = CpuColumnsView::default();
|
||||||
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
|
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
|
||||||
|
cpu_row.is_bootstrap_kernel = F::ONE;
|
||||||
|
|
||||||
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
|
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
|
||||||
for (channel, (addr, &byte)) in chunk.enumerate() {
|
for (channel, (addr, &byte)) in chunk.enumerate() {
|
||||||
@ -39,6 +40,7 @@ pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>
|
|||||||
|
|
||||||
let mut final_cpu_row = CpuColumnsView::default();
|
let mut final_cpu_row = CpuColumnsView::default();
|
||||||
final_cpu_row.clock = F::from_canonical_usize(state.traces.clock());
|
final_cpu_row.clock = F::from_canonical_usize(state.traces.clock());
|
||||||
|
final_cpu_row.is_bootstrap_kernel = F::ONE;
|
||||||
final_cpu_row.is_keccak_sponge = F::ONE;
|
final_cpu_row.is_keccak_sponge = F::ONE;
|
||||||
// The Keccak sponge CTL uses memory value columns for its inputs and outputs.
|
// The Keccak sponge CTL uses memory value columns for its inputs and outputs.
|
||||||
final_cpu_row.mem_channels[0].value[0] = F::ZERO; // context
|
final_cpu_row.mem_channels[0].value[0] = F::ZERO; // context
|
||||||
@ -64,8 +66,8 @@ pub(crate) fn eval_bootstrap_kernel<F: Field, P: PackedField<Scalar = F>>(
|
|||||||
let next_values: &CpuColumnsView<_> = vars.next_values.borrow();
|
let next_values: &CpuColumnsView<_> = vars.next_values.borrow();
|
||||||
|
|
||||||
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
|
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
|
||||||
let local_is_bootstrap = P::ONES - local_values.op.into_iter().sum::<P>();
|
let local_is_bootstrap = local_values.is_bootstrap_kernel;
|
||||||
let next_is_bootstrap = P::ONES - next_values.op.into_iter().sum::<P>();
|
let next_is_bootstrap = next_values.is_bootstrap_kernel;
|
||||||
yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
|
yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
|
||||||
yield_constr.constraint_last_row(local_is_bootstrap);
|
yield_constr.constraint_last_row(local_is_bootstrap);
|
||||||
let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
|
let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
|
||||||
@ -111,10 +113,8 @@ pub(crate) fn eval_bootstrap_kernel_circuit<F: RichField + Extendable<D>, const
|
|||||||
let one = builder.one_extension();
|
let one = builder.one_extension();
|
||||||
|
|
||||||
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
|
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
|
||||||
let local_is_bootstrap = builder.add_many_extension(local_values.op.iter());
|
let local_is_bootstrap = local_values.is_bootstrap_kernel;
|
||||||
let local_is_bootstrap = builder.sub_extension(one, local_is_bootstrap);
|
let next_is_bootstrap = next_values.is_bootstrap_kernel;
|
||||||
let next_is_bootstrap = builder.add_many_extension(next_values.op.iter());
|
|
||||||
let next_is_bootstrap = builder.sub_extension(one, next_is_bootstrap);
|
|
||||||
let constraint = builder.sub_extension(local_is_bootstrap, one);
|
let constraint = builder.sub_extension(local_is_bootstrap, one);
|
||||||
yield_constr.constraint_first_row(builder, constraint);
|
yield_constr.constraint_first_row(builder, constraint);
|
||||||
yield_constr.constraint_last_row(builder, local_is_bootstrap);
|
yield_constr.constraint_last_row(builder, local_is_bootstrap);
|
||||||
|
|||||||
@ -35,6 +35,9 @@ pub struct MemoryChannelView<T: Copy> {
|
|||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
|
||||||
pub struct CpuColumnsView<T: Copy> {
|
pub struct CpuColumnsView<T: Copy> {
|
||||||
|
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
|
||||||
|
pub is_bootstrap_kernel: T,
|
||||||
|
|
||||||
/// If CPU cycle: Current context.
|
/// If CPU cycle: Current context.
|
||||||
// TODO: this is currently unconstrained
|
// TODO: this is currently unconstrained
|
||||||
pub context: T,
|
pub context: T,
|
||||||
|
|||||||
@ -34,14 +34,9 @@ const NATIVE_INSTRUCTIONS: [usize; 17] = [
|
|||||||
// not exceptions (also jump)
|
// not exceptions (also jump)
|
||||||
];
|
];
|
||||||
|
|
||||||
pub(crate) fn get_halt_pcs<F: Field>() -> (F, F) {
|
pub(crate) fn get_halt_pc<F: Field>() -> F {
|
||||||
let halt_pc0 = KERNEL.global_labels["halt_pc0"];
|
let halt_pc = KERNEL.global_labels["halt"];
|
||||||
let halt_pc1 = KERNEL.global_labels["halt_pc1"];
|
F::from_canonical_usize(halt_pc)
|
||||||
|
|
||||||
(
|
|
||||||
F::from_canonical_usize(halt_pc0),
|
|
||||||
F::from_canonical_usize(halt_pc1),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn get_start_pc<F: Field>() -> F {
|
pub(crate) fn get_start_pc<F: Field>() -> F {
|
||||||
@ -57,8 +52,15 @@ pub fn eval_packed_generic<P: PackedField>(
|
|||||||
) {
|
) {
|
||||||
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
|
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
|
||||||
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
|
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
|
||||||
// Once we start executing instructions, then we continue until the end of the table.
|
|
||||||
yield_constr.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next - P::ONES));
|
let next_halt_state = P::ONES - nv.is_bootstrap_kernel - is_cpu_cycle_next;
|
||||||
|
|
||||||
|
// Once we start executing instructions, then we continue until the end of the table
|
||||||
|
// or we reach dummy padding rows. This, along with the constraints on the first row,
|
||||||
|
// enforces that operation flags and the halt flag are mutually exclusive over the entire
|
||||||
|
// CPU trace.
|
||||||
|
yield_constr
|
||||||
|
.constraint_transition(is_cpu_cycle * (is_cpu_cycle_next + next_halt_state - P::ONES));
|
||||||
|
|
||||||
// If a row is a CPU cycle and executing a native instruction (implemented as a table row; not
|
// If a row is a CPU cycle and executing a native instruction (implemented as a table row; not
|
||||||
// microcoded) then the program counter is incremented by 1 to obtain the next row's program
|
// microcoded) then the program counter is incremented by 1 to obtain the next row's program
|
||||||
@ -79,16 +81,6 @@ pub fn eval_packed_generic<P: PackedField>(
|
|||||||
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
|
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
|
||||||
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
|
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
|
||||||
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);
|
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);
|
||||||
|
|
||||||
// The last row must be a CPU cycle row.
|
|
||||||
yield_constr.constraint_last_row(is_cpu_cycle - P::ONES);
|
|
||||||
// Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that
|
|
||||||
// that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels.
|
|
||||||
let (halt_pc0, halt_pc1) = get_halt_pcs::<P::Scalar>();
|
|
||||||
yield_constr
|
|
||||||
.constraint_last_row((lv.program_counter - halt_pc0) * (lv.program_counter - halt_pc1));
|
|
||||||
// Finally, the last row must be in kernel mode.
|
|
||||||
yield_constr.constraint_last_row(lv.is_kernel_mode - P::ONES);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
|
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||||
@ -97,11 +89,21 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
|
|||||||
nv: &CpuColumnsView<ExtensionTarget<D>>,
|
nv: &CpuColumnsView<ExtensionTarget<D>>,
|
||||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||||
) {
|
) {
|
||||||
|
let one = builder.one_extension();
|
||||||
|
|
||||||
let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
|
let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
|
||||||
let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i]));
|
let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i]));
|
||||||
// Once we start executing instructions, then we continue until the end of the table.
|
|
||||||
|
let next_halt_state = builder.add_extension(nv.is_bootstrap_kernel, is_cpu_cycle_next);
|
||||||
|
let next_halt_state = builder.sub_extension(one, next_halt_state);
|
||||||
|
|
||||||
|
// Once we start executing instructions, then we continue until the end of the table
|
||||||
|
// or we reach dummy padding rows. This, along with the constraints on the first row,
|
||||||
|
// enforces that operation flags and the halt flag are mutually exclusive over the entire
|
||||||
|
// CPU trace.
|
||||||
{
|
{
|
||||||
let constr = builder.mul_sub_extension(is_cpu_cycle, is_cpu_cycle_next, is_cpu_cycle);
|
let constr = builder.add_extension(is_cpu_cycle_next, next_halt_state);
|
||||||
|
let constr = builder.mul_sub_extension(is_cpu_cycle, constr, is_cpu_cycle);
|
||||||
yield_constr.constraint_transition(builder, constr);
|
yield_constr.constraint_transition(builder, constr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,30 +146,4 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
|
|||||||
let kernel_constr = builder.mul_extension(is_last_noncpu_cycle, nv.stack_len);
|
let kernel_constr = builder.mul_extension(is_last_noncpu_cycle, nv.stack_len);
|
||||||
yield_constr.constraint_transition(builder, kernel_constr);
|
yield_constr.constraint_transition(builder, kernel_constr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The last row must be a CPU cycle row.
|
|
||||||
{
|
|
||||||
let one = builder.one_extension();
|
|
||||||
let constr = builder.sub_extension(is_cpu_cycle, one);
|
|
||||||
yield_constr.constraint_last_row(builder, constr);
|
|
||||||
}
|
|
||||||
// Also, the last row's `program_counter` must be inside the `halt` infinite loop. Note that
|
|
||||||
// that loop consists of two instructions, so we must check for `halt` and `halt_inner` labels.
|
|
||||||
{
|
|
||||||
let (halt_pc0, halt_pc1) = get_halt_pcs();
|
|
||||||
let halt_pc0_target = builder.constant_extension(halt_pc0);
|
|
||||||
let halt_pc1_target = builder.constant_extension(halt_pc1);
|
|
||||||
|
|
||||||
let halt_pc0_offset = builder.sub_extension(lv.program_counter, halt_pc0_target);
|
|
||||||
let halt_pc1_offset = builder.sub_extension(lv.program_counter, halt_pc1_target);
|
|
||||||
let constr = builder.mul_extension(halt_pc0_offset, halt_pc1_offset);
|
|
||||||
|
|
||||||
yield_constr.constraint_last_row(builder, constr);
|
|
||||||
}
|
|
||||||
// Finally, the last row must be in kernel mode.
|
|
||||||
{
|
|
||||||
let one = builder.one_extension();
|
|
||||||
let constr = builder.sub_extension(lv.is_kernel_mode, one);
|
|
||||||
yield_constr.constraint_last_row(builder, constr);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ use plonky2::field::packed::PackedField;
|
|||||||
use plonky2::field::types::Field;
|
use plonky2::field::types::Field;
|
||||||
use plonky2::hash::hash_types::RichField;
|
use plonky2::hash::hash_types::RichField;
|
||||||
|
|
||||||
|
use super::halt;
|
||||||
use crate::all_stark::Table;
|
use crate::all_stark::Table;
|
||||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||||
use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS};
|
use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS};
|
||||||
@ -244,6 +245,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
|
|||||||
decode::eval_packed_generic(local_values, yield_constr);
|
decode::eval_packed_generic(local_values, yield_constr);
|
||||||
dup_swap::eval_packed(local_values, yield_constr);
|
dup_swap::eval_packed(local_values, yield_constr);
|
||||||
gas::eval_packed(local_values, next_values, yield_constr);
|
gas::eval_packed(local_values, next_values, yield_constr);
|
||||||
|
halt::eval_packed(local_values, next_values, yield_constr);
|
||||||
jumps::eval_packed(local_values, next_values, yield_constr);
|
jumps::eval_packed(local_values, next_values, yield_constr);
|
||||||
membus::eval_packed(local_values, yield_constr);
|
membus::eval_packed(local_values, yield_constr);
|
||||||
memio::eval_packed(local_values, next_values, yield_constr);
|
memio::eval_packed(local_values, next_values, yield_constr);
|
||||||
@ -271,6 +273,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
|
|||||||
decode::eval_ext_circuit(builder, local_values, yield_constr);
|
decode::eval_ext_circuit(builder, local_values, yield_constr);
|
||||||
dup_swap::eval_ext_circuit(builder, local_values, yield_constr);
|
dup_swap::eval_ext_circuit(builder, local_values, yield_constr);
|
||||||
gas::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
gas::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
||||||
|
halt::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
||||||
jumps::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
jumps::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
||||||
membus::eval_ext_circuit(builder, local_values, yield_constr);
|
membus::eval_ext_circuit(builder, local_values, yield_constr);
|
||||||
memio::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
memio::eval_ext_circuit(builder, local_values, next_values, yield_constr);
|
||||||
|
|||||||
98
evm/src/cpu/halt.rs
Normal file
98
evm/src/cpu/halt.rs
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
//! Once the CPU execution is over (i.e. reached the `halt` label in the kernel),
|
||||||
|
//! the CPU trace will be padded with special dummy rows, incurring no memory overhead.
|
||||||
|
|
||||||
|
use plonky2::field::extension::Extendable;
|
||||||
|
use plonky2::field::packed::PackedField;
|
||||||
|
use plonky2::hash::hash_types::RichField;
|
||||||
|
use plonky2::iop::ext_target::ExtensionTarget;
|
||||||
|
|
||||||
|
use super::control_flow::get_halt_pc;
|
||||||
|
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||||
|
use crate::cpu::columns::{CpuColumnsView, COL_MAP};
|
||||||
|
use crate::cpu::membus::NUM_GP_CHANNELS;
|
||||||
|
|
||||||
|
pub fn eval_packed<P: PackedField>(
|
||||||
|
lv: &CpuColumnsView<P>,
|
||||||
|
nv: &CpuColumnsView<P>,
|
||||||
|
yield_constr: &mut ConstraintConsumer<P>,
|
||||||
|
) {
|
||||||
|
let is_cpu_cycle: P = COL_MAP.op.iter().map(|&col_i| lv[col_i]).sum();
|
||||||
|
let is_cpu_cycle_next: P = COL_MAP.op.iter().map(|&col_i| nv[col_i]).sum();
|
||||||
|
|
||||||
|
let halt_state = P::ONES - lv.is_bootstrap_kernel - is_cpu_cycle;
|
||||||
|
let next_halt_state = P::ONES - nv.is_bootstrap_kernel - is_cpu_cycle_next;
|
||||||
|
|
||||||
|
// The halt flag must be boolean.
|
||||||
|
yield_constr.constraint(halt_state * (halt_state - P::ONES));
|
||||||
|
// Once we reach a padding row, there must be only padding rows.
|
||||||
|
yield_constr.constraint_transition(halt_state * (next_halt_state - P::ONES));
|
||||||
|
|
||||||
|
// Padding rows should have their memory channels disabled.
|
||||||
|
for i in 0..NUM_GP_CHANNELS {
|
||||||
|
let channel = lv.mem_channels[i];
|
||||||
|
yield_constr.constraint(halt_state * channel.used);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last row must be a dummy padding row.
|
||||||
|
yield_constr.constraint_last_row(halt_state - P::ONES);
|
||||||
|
|
||||||
|
// Also, a padding row's `program_counter` must be at the `halt` label.
|
||||||
|
// In particular, it ensures that the first padding row may only be added
|
||||||
|
// after we jumped to the `halt` function. Subsequent padding rows may set
|
||||||
|
// the `program_counter` to arbitrary values (there's no transition
|
||||||
|
// constraints) so we can place this requirement on them too.
|
||||||
|
let halt_pc = get_halt_pc::<P::Scalar>();
|
||||||
|
yield_constr.constraint(halt_state * (lv.program_counter - halt_pc));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||||
|
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
|
||||||
|
lv: &CpuColumnsView<ExtensionTarget<D>>,
|
||||||
|
nv: &CpuColumnsView<ExtensionTarget<D>>,
|
||||||
|
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||||
|
) {
|
||||||
|
let one = builder.one_extension();
|
||||||
|
|
||||||
|
let is_cpu_cycle = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| lv[col_i]));
|
||||||
|
let is_cpu_cycle_next = builder.add_many_extension(COL_MAP.op.iter().map(|&col_i| nv[col_i]));
|
||||||
|
|
||||||
|
let halt_state = builder.add_extension(lv.is_bootstrap_kernel, is_cpu_cycle);
|
||||||
|
let halt_state = builder.sub_extension(one, halt_state);
|
||||||
|
let next_halt_state = builder.add_extension(nv.is_bootstrap_kernel, is_cpu_cycle_next);
|
||||||
|
let next_halt_state = builder.sub_extension(one, next_halt_state);
|
||||||
|
|
||||||
|
// The halt flag must be boolean.
|
||||||
|
let constr = builder.mul_sub_extension(halt_state, halt_state, halt_state);
|
||||||
|
yield_constr.constraint(builder, constr);
|
||||||
|
// Once we reach a padding row, there must be only padding rows.
|
||||||
|
let constr = builder.mul_sub_extension(halt_state, next_halt_state, halt_state);
|
||||||
|
yield_constr.constraint_transition(builder, constr);
|
||||||
|
|
||||||
|
// Padding rows should have their memory channels disabled.
|
||||||
|
for i in 0..NUM_GP_CHANNELS {
|
||||||
|
let channel = lv.mem_channels[i];
|
||||||
|
let constr = builder.mul_extension(halt_state, channel.used);
|
||||||
|
yield_constr.constraint(builder, constr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last row must be a dummy padding row.
|
||||||
|
{
|
||||||
|
let one = builder.one_extension();
|
||||||
|
let constr = builder.sub_extension(halt_state, one);
|
||||||
|
yield_constr.constraint_last_row(builder, constr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also, a padding row's `program_counter` must be at the `halt` label.
|
||||||
|
// In particular, it ensures that the first padding row may only be added
|
||||||
|
// after we jumped to the `halt` function. Subsequent padding rows may set
|
||||||
|
// the `program_counter` to arbitrary values (there's no transition
|
||||||
|
// constraints) so we can place this requirement on them too.
|
||||||
|
{
|
||||||
|
let halt_pc = get_halt_pc();
|
||||||
|
let halt_pc_target = builder.constant_extension(halt_pc);
|
||||||
|
let constr = builder.sub_extension(lv.program_counter, halt_pc_target);
|
||||||
|
let constr = builder.mul_extension(halt_state, constr);
|
||||||
|
|
||||||
|
yield_constr.constraint(builder, constr);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,6 +1,2 @@
|
|||||||
global halt:
|
global halt:
|
||||||
PUSH halt_pc0
|
PANIC
|
||||||
global halt_pc0:
|
|
||||||
DUP1
|
|
||||||
global halt_pc1:
|
|
||||||
JUMP
|
|
||||||
|
|||||||
@ -6,6 +6,7 @@ pub mod cpu_stark;
|
|||||||
pub(crate) mod decode;
|
pub(crate) mod decode;
|
||||||
mod dup_swap;
|
mod dup_swap;
|
||||||
mod gas;
|
mod gas;
|
||||||
|
mod halt;
|
||||||
mod jumps;
|
mod jumps;
|
||||||
pub mod kernel;
|
pub mod kernel;
|
||||||
pub(crate) mod membus;
|
pub(crate) mod membus;
|
||||||
|
|||||||
@ -16,6 +16,7 @@ use GlobalMetadata::{
|
|||||||
use crate::all_stark::{AllStark, NUM_TABLES};
|
use crate::all_stark::{AllStark, NUM_TABLES};
|
||||||
use crate::config::StarkConfig;
|
use crate::config::StarkConfig;
|
||||||
use crate::cpu::bootstrap_kernel::generate_bootstrap_kernel;
|
use crate::cpu::bootstrap_kernel::generate_bootstrap_kernel;
|
||||||
|
use crate::cpu::columns::CpuColumnsView;
|
||||||
use crate::cpu::kernel::aggregator::KERNEL;
|
use crate::cpu::kernel::aggregator::KERNEL;
|
||||||
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
|
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
|
||||||
use crate::generation::outputs::{get_outputs, GenerationOutputs};
|
use crate::generation::outputs::{get_outputs, GenerationOutputs};
|
||||||
@ -278,26 +279,36 @@ pub fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
|
|||||||
fn simulate_cpu<F: RichField + Extendable<D>, const D: usize>(
|
fn simulate_cpu<F: RichField + Extendable<D>, const D: usize>(
|
||||||
state: &mut GenerationState<F>,
|
state: &mut GenerationState<F>,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let halt_pc0 = KERNEL.global_labels["halt_pc0"];
|
let halt_pc = KERNEL.global_labels["halt"];
|
||||||
let halt_pc1 = KERNEL.global_labels["halt_pc1"];
|
|
||||||
|
|
||||||
let mut already_in_halt_loop = false;
|
|
||||||
loop {
|
loop {
|
||||||
// If we've reached the kernel's halt routine, and our trace length is a power of 2, stop.
|
// If we've reached the kernel's halt routine, and our trace length is a power of 2, stop.
|
||||||
let pc = state.registers.program_counter;
|
let pc = state.registers.program_counter;
|
||||||
let in_halt_loop = state.registers.is_kernel && (pc == halt_pc0 || pc == halt_pc1);
|
let halt = state.registers.is_kernel && pc == halt_pc;
|
||||||
if in_halt_loop && !already_in_halt_loop {
|
if halt {
|
||||||
log::info!("CPU halted after {} cycles", state.traces.clock());
|
log::info!("CPU halted after {} cycles", state.traces.clock());
|
||||||
|
|
||||||
|
// Padding
|
||||||
|
let mut row = CpuColumnsView::<F>::default();
|
||||||
|
row.clock = F::from_canonical_usize(state.traces.clock());
|
||||||
|
row.context = F::from_canonical_usize(state.registers.context);
|
||||||
|
row.program_counter = F::from_canonical_usize(pc);
|
||||||
|
row.is_kernel_mode = F::ONE;
|
||||||
|
row.gas = F::from_canonical_u64(state.registers.gas_used);
|
||||||
|
row.stack_len = F::from_canonical_usize(state.registers.stack_len);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
state.traces.push_cpu(row);
|
||||||
|
row.clock += F::ONE;
|
||||||
|
if state.traces.clock().is_power_of_two() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log::info!("CPU trace padded to {} cycles", state.traces.clock());
|
||||||
|
|
||||||
|
return Ok(());
|
||||||
}
|
}
|
||||||
already_in_halt_loop |= in_halt_loop;
|
|
||||||
|
|
||||||
transition(state)?;
|
transition(state)?;
|
||||||
|
|
||||||
if already_in_halt_loop && state.traces.clock().is_power_of_two() {
|
|
||||||
log::info!("CPU trace padded to {} cycles", state.traces.clock());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -286,7 +286,7 @@ fn log_kernel_instruction<F: Field>(state: &GenerationState<F>, op: Operation) {
|
|||||||
let pc = state.registers.program_counter;
|
let pc = state.registers.program_counter;
|
||||||
let is_interesting_offset = KERNEL
|
let is_interesting_offset = KERNEL
|
||||||
.offset_label(pc)
|
.offset_label(pc)
|
||||||
.filter(|label| !label.starts_with("halt_pc"))
|
.filter(|label| !label.starts_with("halt"))
|
||||||
.is_some();
|
.is_some();
|
||||||
let level = if is_interesting_offset {
|
let level = if is_interesting_offset {
|
||||||
log::Level::Debug
|
log::Level::Debug
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user