Remove full memory channel (#1450)

* Remove a full memory channel

* Remove unnecessary uses

* Revert PDF change

* Apply comments

* Apply more comments

* Move disabling functions to cpu_stark.rs

* Apply comments
This commit is contained in:
Hamy Ratoanina 2024-01-11 11:36:47 -05:00 committed by GitHub
parent a78a29a698
commit f80ebe77f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 316 additions and 323 deletions

Binary file not shown.

View File

@ -135,6 +135,11 @@ fn ctl_byte_packing<F: Field>() -> CrossTableLookup<F> {
cpu_stark::ctl_data_byte_packing_push(),
Some(cpu_stark::ctl_filter_byte_packing_push()),
);
let cpu_jumptable_read_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_jumptable_read(),
Some(cpu_stark::ctl_filter_syscall_exceptions()),
);
let byte_packing_looked = TableWithColumns::new(
Table::BytePacking,
byte_packing_stark::ctl_looked_data(),
@ -145,6 +150,7 @@ fn ctl_byte_packing<F: Field>() -> CrossTableLookup<F> {
cpu_packing_looking,
cpu_unpacking_looking,
cpu_push_packing_looking,
cpu_jumptable_read_looking,
],
byte_packing_looked,
)
@ -238,6 +244,16 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
cpu_stark::ctl_data_partial_memory::<F>(),
Some(cpu_stark::ctl_filter_partial_memory()),
);
let cpu_set_context_write = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_memory_old_sp_write_set_context::<F>(),
Some(cpu_stark::ctl_filter_set_context()),
);
let cpu_set_context_read = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_memory_new_sp_read_set_context::<F>(),
Some(cpu_stark::ctl_filter_set_context()),
);
let keccak_sponge_reads = (0..KECCAK_RATE_BYTES).map(|i| {
TableWithColumns::new(
Table::KeccakSponge,
@ -252,12 +268,17 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
Some(byte_packing_stark::ctl_looking_memory_filter(i)),
)
});
let all_lookers = iter::once(cpu_memory_code_read)
.chain(cpu_memory_gp_ops)
.chain(iter::once(cpu_push_write_ops))
.chain(keccak_sponge_reads)
.chain(byte_packing_ops)
.collect();
let all_lookers = vec![
cpu_memory_code_read,
cpu_push_write_ops,
cpu_set_context_write,
cpu_set_context_read,
]
.into_iter()
.chain(cpu_memory_gp_ops)
.chain(keccak_sponge_reads)
.chain(byte_packing_ops)
.collect();
let memory_looked = TableWithColumns::new(
Table::Memory,
memory_stark::ctl_data(),

View File

@ -7,11 +7,12 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use super::columns::ops::OpsColumnsView;
use super::cpu_stark::{disable_unused_channels, disable_unused_channels_circuit};
use super::membus::NUM_GP_CHANNELS;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::memory::segments::{Segment, SEGMENT_SCALING_FACTOR};
use crate::memory::segments::Segment;
use crate::memory::VALUE_LIMBS;
// If true, the instruction will keep the current context for the next row.
@ -95,12 +96,7 @@ fn eval_packed_get<P: PackedField>(
yield_constr.constraint(filter * (nv.stack_len - (lv.stack_len + P::ONES)));
// Unused channels.
for i in 1..NUM_GP_CHANNELS {
if i != 3 {
let channel = lv.mem_channels[i];
yield_constr.constraint(filter * channel.used);
}
}
disable_unused_channels(lv, filter, vec![1], yield_constr);
yield_constr.constraint(filter * nv.mem_channels[0].used);
}
@ -137,13 +133,7 @@ fn eval_ext_circuit_get<F: RichField + Extendable<D>, const D: usize>(
}
// Unused channels.
for i in 1..NUM_GP_CHANNELS {
if i != 3 {
let channel = lv.mem_channels[i];
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
}
disable_unused_channels_circuit(builder, lv, filter, vec![1], yield_constr);
{
let constr = builder.mul_extension(filter, nv.mem_channels[0].used);
yield_constr.constraint(builder, constr);
@ -158,12 +148,6 @@ fn eval_packed_set<P: PackedField>(
) {
let filter = lv.op.context_op * lv.opcode_bits[0];
let stack_top = lv.mem_channels[0].value;
let write_old_sp_channel = lv.mem_channels[1];
let read_new_sp_channel = lv.mem_channels[2];
// We need to unscale the context metadata segment and related field.
let ctx_metadata_segment = P::Scalar::from_canonical_usize(Segment::ContextMetadata.unscale());
let stack_size_field = P::Scalar::from_canonical_usize(ContextMetadata::StackSize.unscale());
let local_sp_dec = lv.stack_len - P::ONES;
// The next row's context is read from stack_top.
yield_constr.constraint(filter * (stack_top[2] - nv.context));
@ -171,27 +155,9 @@ fn eval_packed_set<P: PackedField>(
yield_constr.constraint(filter * limb);
}
// The old SP is decremented (since the new context was popped) and written to memory.
yield_constr.constraint(filter * (write_old_sp_channel.value[0] - local_sp_dec));
for &limb in &write_old_sp_channel.value[1..] {
yield_constr.constraint(filter * limb);
}
yield_constr.constraint(filter * (write_old_sp_channel.used - P::ONES));
yield_constr.constraint(filter * write_old_sp_channel.is_read);
yield_constr.constraint(filter * (write_old_sp_channel.addr_context - lv.context));
yield_constr.constraint(filter * (write_old_sp_channel.addr_segment - ctx_metadata_segment));
yield_constr.constraint(filter * (write_old_sp_channel.addr_virtual - stack_size_field));
// The old SP is decremented (since the new context was popped) and stored in memory.
// The new SP is loaded from memory.
yield_constr.constraint(filter * (read_new_sp_channel.value[0] - nv.stack_len));
for &limb in &read_new_sp_channel.value[1..] {
yield_constr.constraint(filter * limb);
}
yield_constr.constraint(filter * (read_new_sp_channel.used - P::ONES));
yield_constr.constraint(filter * (read_new_sp_channel.is_read - P::ONES));
yield_constr.constraint(filter * (read_new_sp_channel.addr_context - nv.context));
yield_constr.constraint(filter * (read_new_sp_channel.addr_segment - ctx_metadata_segment));
yield_constr.constraint(filter * (read_new_sp_channel.addr_virtual - stack_size_field));
// This is all done with CTLs: nothing is constrained here.
// Constrain stack_inv_aux_2.
let new_top_channel = nv.mem_channels[0];
@ -200,17 +166,19 @@ fn eval_packed_set<P: PackedField>(
* (lv.general.stack().stack_inv_aux * lv.opcode_bits[0]
- lv.general.stack().stack_inv_aux_2),
);
// The new top is loaded in memory channel 3, if the stack isn't empty (see eval_packed).
// The new top is loaded in memory channel 2, if the stack isn't empty (see eval_packed).
for (&limb_new_top, &limb_read_top) in new_top_channel
.value
.iter()
.zip(lv.mem_channels[3].value.iter())
.zip(lv.mem_channels[2].value.iter())
{
yield_constr.constraint(
lv.op.context_op * lv.general.stack().stack_inv_aux_2 * (limb_new_top - limb_read_top),
);
}
// Unused channels.
disable_unused_channels(lv, filter, vec![1], yield_constr);
yield_constr.constraint(filter * new_top_channel.used);
}
@ -224,17 +192,6 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
) {
let filter = builder.mul_extension(lv.op.context_op, lv.opcode_bits[0]);
let stack_top = lv.mem_channels[0].value;
let write_old_sp_channel = lv.mem_channels[1];
let read_new_sp_channel = lv.mem_channels[2];
// We need to unscale the context metadata segment and related field.
let ctx_metadata_segment = builder.constant_extension(F::Extension::from_canonical_usize(
Segment::ContextMetadata.unscale(),
));
let stack_size_field = builder.constant_extension(F::Extension::from_canonical_usize(
ContextMetadata::StackSize.unscale(),
));
let one = builder.one_extension();
let local_sp_dec = builder.sub_extension(lv.stack_len, one);
// The next row's context is read from stack_top.
{
@ -247,73 +204,9 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
yield_constr.constraint(builder, constr);
}
// The old SP is decremented (since the new context was popped) and written to memory.
{
let diff = builder.sub_extension(write_old_sp_channel.value[0], local_sp_dec);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for &limb in &write_old_sp_channel.value[1..] {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, write_old_sp_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_extension(filter, write_old_sp_channel.is_read);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(write_old_sp_channel.addr_context, lv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(write_old_sp_channel.addr_segment, ctx_metadata_segment);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(write_old_sp_channel.addr_virtual, stack_size_field);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// The old SP is decremented (since the new context was popped) and stored in memory.
// The new SP is loaded from memory.
{
let diff = builder.sub_extension(read_new_sp_channel.value[0], nv.stack_len);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for &limb in &read_new_sp_channel.value[1..] {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, read_new_sp_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, read_new_sp_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_sp_channel.addr_context, nv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_sp_channel.addr_segment, ctx_metadata_segment);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_sp_channel.addr_virtual, stack_size_field);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// This is all done with CTLs: nothing is constrained here.
// Constrain stack_inv_aux_2.
let new_top_channel = nv.mem_channels[0];
@ -326,11 +219,11 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
let constr = builder.mul_extension(lv.op.context_op, diff);
yield_constr.constraint(builder, constr);
}
// The new top is loaded in memory channel 3, if the stack isn't empty (see eval_packed).
// The new top is loaded in memory channel 2, if the stack isn't empty (see eval_packed).
for (&limb_new_top, &limb_read_top) in new_top_channel
.value
.iter()
.zip(lv.mem_channels[3].value.iter())
.zip(lv.mem_channels[2].value.iter())
{
let diff = builder.sub_extension(limb_new_top, limb_read_top);
let prod = builder.mul_extension(lv.general.stack().stack_inv_aux_2, diff);
@ -338,6 +231,8 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
yield_constr.constraint(builder, constr);
}
// Unused channels.
disable_unused_channels_circuit(builder, lv, filter, vec![1], yield_constr);
{
let constr = builder.mul_extension(filter, new_top_channel.used);
yield_constr.constraint(builder, constr);
@ -355,10 +250,10 @@ pub(crate) fn eval_packed<P: PackedField>(
eval_packed_set(lv, nv, yield_constr);
// Stack constraints.
// Both operations use memory channel 3. The operations are similar enough that
// Both operations use memory channel 2. The operations are similar enough that
// we can constrain both at the same time.
let filter = lv.op.context_op;
let channel = lv.mem_channels[3];
let channel = lv.mem_channels[2];
// For get_context, we check if lv.stack_len is 0. For set_context, we check if nv.stack_len is 0.
// However, for get_context, we can deduce lv.stack_len from nv.stack_len since the operation only pushes.
let stack_len = nv.stack_len - (P::ONES - lv.opcode_bits[0]);
@ -396,10 +291,10 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
eval_ext_circuit_set(builder, lv, nv, yield_constr);
// Stack constraints.
// Both operations use memory channel 3. The operations are similar enough that
// Both operations use memory channel 2. The operations are similar enough that
// we can constrain both at the same time.
let filter = lv.op.context_op;
let channel = lv.mem_channels[3];
let channel = lv.mem_channels[2];
// For get_context, we check if lv.stack_len is 0. For set_context, we check if nv.stack_len is 0.
// However, for get_context, we can deduce lv.stack_len from nv.stack_len since the operation only pushes.
let diff = builder.add_const_extension(lv.opcode_bits[0], -F::ONE);

View File

@ -11,6 +11,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use super::columns::CpuColumnsView;
use super::halt;
use super::kernel::constants::context_metadata::ContextMetadata;
use super::membus::NUM_GP_CHANNELS;
use crate::all_stark::Table;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
@ -21,7 +22,7 @@ use crate::cpu::{
};
use crate::cross_table_lookup::{Column, Filter, TableWithColumns};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::memory::segments::{Segment, SEGMENT_SCALING_FACTOR};
use crate::memory::segments::Segment;
use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
use crate::stark::Stark;
@ -189,6 +190,40 @@ pub(crate) fn ctl_filter_byte_unpacking<F: Field>() -> Filter<F> {
)
}
/// Creates the vector of `Columns` corresponding to three consecutive (byte) reads in memory.
/// It's used by syscalls and exceptions to read an address in a jumptable.
pub(crate) fn ctl_data_jumptable_read<F: Field>() -> Vec<Column<F>> {
let is_read = Column::constant(F::ONE);
let mut res = vec![is_read];
// When reading the jumptable, the address to start reading from is in
// GP channel 1; the result is in GP channel 1's values.
let channel_map = COL_MAP.mem_channels[1];
res.extend(Column::singles([
channel_map.addr_context,
channel_map.addr_segment,
channel_map.addr_virtual,
]));
let val = Column::singles(channel_map.value);
// len is always 3.
let len = Column::constant(F::from_canonical_usize(3));
res.push(len);
let num_channels = F::from_canonical_usize(NUM_CHANNELS);
let timestamp = Column::linear_combination([(COL_MAP.clock, num_channels)]);
res.push(timestamp);
res.extend(val);
res
}
/// CTL filter for syscalls and exceptions.
pub(crate) fn ctl_filter_syscall_exceptions<F: Field>() -> Filter<F> {
Filter::new_simple(Column::sum([COL_MAP.op.syscall, COL_MAP.op.exception]))
}
/// Creates the vector of `Columns` corresponding to the contents of the CPU registers when performing a `PUSH`.
/// `PUSH` internal reads are done by calling `BytePackingStark`.
pub(crate) fn ctl_data_byte_packing_push<F: Field>() -> Vec<Column<F>> {
@ -305,6 +340,53 @@ pub(crate) fn ctl_data_partial_memory<F: Field>() -> Vec<Column<F>> {
cols
}
/// Old stack pointer write for SET_CONTEXT.
pub(crate) fn ctl_data_memory_old_sp_write_set_context<F: Field>() -> Vec<Column<F>> {
let mut cols = vec![
Column::constant(F::ZERO), // is_read
Column::single(COL_MAP.context), // addr_context
Column::constant(F::from_canonical_usize(Segment::ContextMetadata.unscale())), // addr_segment
Column::constant(F::from_canonical_usize(
ContextMetadata::StackSize.unscale(),
)), // addr_virtual
];
// Low limb is current stack length minus one.
cols.push(Column::linear_combination_with_constant(
[(COL_MAP.stack_len, F::ONE)],
-F::ONE,
));
// High limbs of the value are all zero.
cols.extend(repeat(Column::constant(F::ZERO)).take(VALUE_LIMBS - 1));
cols.push(mem_time_and_channel(MEM_GP_CHANNELS_IDX_START + 1));
cols
}
/// New stack pointer read for SET_CONTEXT.
pub(crate) fn ctl_data_memory_new_sp_read_set_context<F: Field>() -> Vec<Column<F>> {
let mut cols = vec![
Column::constant(F::ONE), // is_read
Column::single(COL_MAP.mem_channels[0].value[2]), // addr_context (in the top of the stack)
Column::constant(F::from_canonical_usize(Segment::ContextMetadata.unscale())), // addr_segment
Column::constant(F::from_canonical_u64(
ContextMetadata::StackSize as u64 - Segment::ContextMetadata as u64,
)), // addr_virtual
];
// Low limb is new stack length.
cols.push(Column::single_next_row(COL_MAP.stack_len));
// High limbs of the value are all zero.
cols.extend(repeat(Column::constant(F::ZERO)).take(VALUE_LIMBS - 1));
cols.push(mem_time_and_channel(MEM_GP_CHANNELS_IDX_START + 2));
cols
}
/// CTL filter for code read and write operations.
pub(crate) fn ctl_filter_code_memory<F: Field>() -> Filter<F> {
Filter::new_simple(Column::sum(COL_MAP.op.iter()))
@ -319,6 +401,49 @@ pub(crate) fn ctl_filter_partial_memory<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.partial_channel.used))
}
/// CTL filter for the `SET_CONTEXT` operation.
/// SET_CONTEXT is differentiated from GET_CONTEXT by its zeroth bit set to 1
pub(crate) fn ctl_filter_set_context<F: Field>() -> Filter<F> {
Filter::new(
vec![(
Column::single(COL_MAP.op.context_op),
Column::single(COL_MAP.opcode_bits[0]),
)],
vec![],
)
}
/// Disable the specified memory channels.
/// Since channel 0 contains the top of the stack and is handled specially,
/// channels to disable are 1, 2 or both. All cases can be expressed as a vec.
pub(crate) fn disable_unused_channels<P: PackedField>(
lv: &CpuColumnsView<P>,
filter: P,
channels: Vec<usize>,
yield_constr: &mut ConstraintConsumer<P>,
) {
for i in channels {
yield_constr.constraint(filter * lv.mem_channels[i].used);
}
}
/// Circuit version of `disable_unused_channels`.
/// Disable the specified memory channels.
/// Since channel 0 contains the top of the stack and is handled specially,
/// channels to disable are 1, 2 or both. All cases can be expressed as a vec.
pub(crate) fn disable_unused_channels_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
filter: ExtensionTarget<D>,
channels: Vec<usize>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
for i in channels {
let constr = builder.mul_extension(filter, lv.mem_channels[i].used);
yield_constr.constraint(builder, constr);
}
}
/// Structure representing the CPU Stark.
#[derive(Copy, Clone, Default)]
pub(crate) struct CpuStark<F, const D: usize> {

View File

@ -138,11 +138,6 @@ fn eval_packed_dup<P: PackedField>(
// Disable next top.
yield_constr.constraint(filter * nv.mem_channels[0].used);
// Constrain unused channels.
for i in 3..NUM_GP_CHANNELS {
yield_constr.constraint(filter * lv.mem_channels[i].used);
}
}
/// Circuit version of `eval_packed_dup`.
@ -205,12 +200,6 @@ fn eval_ext_circuit_dup<F: RichField + Extendable<D>, const D: usize>(
let constr = builder.mul_extension(filter, nv.mem_channels[0].used);
yield_constr.constraint(builder, constr);
}
// Constrain unused channels.
for i in 3..NUM_GP_CHANNELS {
let constr = builder.mul_extension(filter, lv.mem_channels[i].used);
yield_constr.constraint(builder, constr);
}
}
/// Evaluates constraints for SWAP.
@ -245,11 +234,6 @@ fn eval_packed_swap<P: PackedField>(
// Disable next top.
yield_constr.constraint(filter * nv.mem_channels[0].used);
// Constrain unused channels.
for i in 3..NUM_GP_CHANNELS {
yield_constr.constraint(filter * lv.mem_channels[i].used);
}
}
/// Circuit version of `eval_packed_swap`.
@ -314,12 +298,6 @@ fn eval_ext_circuit_swap<F: RichField + Extendable<D>, const D: usize>(
let constr = builder.mul_extension(filter, nv.mem_channels[0].used);
yield_constr.constraint(builder, constr);
}
// Constrain unused channels.
for i in 3..NUM_GP_CHANNELS {
let constr = builder.mul_extension(filter, lv.mem_channels[i].used);
yield_constr.constraint(builder, constr);
}
}
/// Evaluates the constraints for the DUP and SWAP opcodes.

View File

@ -16,7 +16,7 @@ use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::generation::mpt::{AccountRlp, LegacyReceiptRlp};
use crate::generation::rlp::all_rlp_prover_inputs_reversed;
use crate::generation::TrieInputs;
use crate::memory::segments::{Segment, SEGMENT_SCALING_FACTOR};
use crate::memory::segments::Segment;
use crate::proof::{BlockHashes, BlockMetadata, TrieRoots};
use crate::util::h2u;
use crate::GenerationInputs;

View File

@ -7,7 +7,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cpu::columns::CpuColumnsView;
/// General-purpose memory channels; they can read and write to all contexts/segments/addresses.
pub(crate) const NUM_GP_CHANNELS: usize = 4;
pub(crate) const NUM_GP_CHANNELS: usize = 3;
/// Indices for code and general purpose memory channels.
pub mod channel_indices {
@ -29,8 +29,11 @@ pub mod channel_indices {
/// - the address is `program_counter`,
/// - the value must fit in one byte (in the least-significant position) and its eight bits are
/// found in `opcode_bits`.
///
/// There is also a partial channel, which shares its values with another general purpose channel.
///
/// These limitations save us numerous columns in the CPU table.
pub(crate) const NUM_CHANNELS: usize = channel_indices::GP.end;
pub(crate) const NUM_CHANNELS: usize = channel_indices::GP.end + 1;
/// Evaluates constraints regarding the membus.
pub(crate) fn eval_packed<P: PackedField>(

View File

@ -9,7 +9,7 @@ use super::cpu_stark::get_addr;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::stack;
use crate::memory::segments::{Segment, SEGMENT_SCALING_FACTOR};
use crate::memory::segments::Segment;
const fn get_addr_load<T: Copy>(lv: &CpuColumnsView<T>) -> (T, T, T) {
get_addr(lv, 0)

View File

@ -48,7 +48,7 @@ pub(crate) fn eval_packed<P: PackedField>(
yield_constr.constraint(is_shift * (two_exp.addr_virtual - displacement.value[0]));
// Other channels must be unused
for chan in &lv.mem_channels[3..NUM_GP_CHANNELS - 1] {
for chan in &lv.mem_channels[3..NUM_GP_CHANNELS] {
yield_constr.constraint(is_shift * chan.used); // channel is not used
}
@ -116,7 +116,7 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
yield_constr.constraint(builder, t);
// Other channels must be unused
for chan in &lv.mem_channels[3..NUM_GP_CHANNELS - 1] {
for chan in &lv.mem_channels[3..NUM_GP_CHANNELS] {
let t = builder.mul_extension(is_shift, chan.used);
yield_constr.constraint(builder, t);
}

View File

@ -17,7 +17,6 @@ use crate::memory::segments::Segment;
// Copy the constant but make it `usize`.
const BYTES_PER_OFFSET: usize = crate::cpu::kernel::assembler::BYTES_PER_OFFSET as usize;
const_assert!(BYTES_PER_OFFSET < NUM_GP_CHANNELS); // Reserve one channel for stack push
/// Evaluates constraints for syscalls and exceptions.
pub(crate) fn eval_packed<P: PackedField>(
@ -71,41 +70,38 @@ pub(crate) fn eval_packed<P: PackedField>(
let exc_handler_addr_start =
exc_jumptable_start + exc_code * P::Scalar::from_canonical_usize(BYTES_PER_OFFSET);
for (i, channel) in lv.mem_channels[1..BYTES_PER_OFFSET + 1].iter().enumerate() {
// Set `used` and `is_read`.
yield_constr.constraint(total_filter * (channel.used - P::ONES));
yield_constr.constraint(total_filter * (channel.is_read - P::ONES));
let jumpdest_channel = lv.mem_channels[1];
// Set kernel context and code segment
yield_constr.constraint(total_filter * channel.addr_context);
yield_constr.constraint(total_filter * (channel.addr_segment - code_segment));
// Set `used` and `is_read`.
// The channel is not used: the reads will be done with the byte packing CTL.
yield_constr.constraint(total_filter * (jumpdest_channel.used));
yield_constr.constraint(total_filter * (jumpdest_channel.is_read - P::ONES));
// Set address, using a separate channel for each of the `BYTES_PER_OFFSET` limbs.
let limb_address_syscall = opcode_handler_addr_start + P::Scalar::from_canonical_usize(i);
let limb_address_exception = exc_handler_addr_start + P::Scalar::from_canonical_usize(i);
// Set kernel context and code segment
yield_constr.constraint(total_filter * jumpdest_channel.addr_context);
yield_constr.constraint(total_filter * (jumpdest_channel.addr_segment - code_segment));
yield_constr.constraint(filter_syscall * (channel.addr_virtual - limb_address_syscall));
yield_constr.constraint(filter_exception * (channel.addr_virtual - limb_address_exception));
// Set address.
yield_constr
.constraint(filter_syscall * (jumpdest_channel.addr_virtual - opcode_handler_addr_start));
yield_constr
.constraint(filter_exception * (jumpdest_channel.addr_virtual - exc_handler_addr_start));
// Set higher limbs to zero.
for &limb in &jumpdest_channel.value[1..] {
yield_constr.constraint(total_filter * limb);
}
// Disable unused channels
for channel in &lv.mem_channels[BYTES_PER_OFFSET + 1..NUM_GP_CHANNELS] {
for channel in &lv.mem_channels[2..NUM_GP_CHANNELS] {
yield_constr.constraint(total_filter * channel.used);
}
// Set program counter to the handler address
// The addresses are big-endian in memory
let target = lv.mem_channels[1..BYTES_PER_OFFSET + 1]
.iter()
.map(|channel| channel.value[0])
.fold(P::ZEROS, |cumul, limb| {
cumul * P::Scalar::from_canonical_u64(256) + limb
});
yield_constr.constraint_transition(total_filter * (nv.program_counter - target));
yield_constr
.constraint_transition(total_filter * (nv.program_counter - jumpdest_channel.value[0]));
// Set kernel mode
yield_constr.constraint_transition(total_filter * (nv.is_kernel_mode - P::ONES));
// Maintain current context
yield_constr.constraint_transition(total_filter * (nv.context - lv.context));
// Reset gas counter to zero.
yield_constr.constraint_transition(total_filter * nv.gas);
@ -197,61 +193,58 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
exc_jumptable_start,
);
for (i, channel) in lv.mem_channels[1..BYTES_PER_OFFSET + 1].iter().enumerate() {
// Set `used` and `is_read`.
{
let constr = builder.mul_sub_extension(total_filter, channel.used, total_filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(total_filter, channel.is_read, total_filter);
yield_constr.constraint(builder, constr);
}
let jumpdest_channel = lv.mem_channels[1];
// Set kernel context and code segment
{
let constr = builder.mul_extension(total_filter, channel.addr_context);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.arithmetic_extension(
F::ONE,
-code_segment,
total_filter,
channel.addr_segment,
total_filter,
);
yield_constr.constraint(builder, constr);
}
// Set address, using a separate channel for each of the `BYTES_PER_OFFSET` limbs.
{
let diff_syscall =
builder.sub_extension(channel.addr_virtual, opcode_handler_addr_start);
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(i),
filter_syscall,
diff_syscall,
filter_syscall,
);
yield_constr.constraint(builder, constr);
let diff_exception =
builder.sub_extension(channel.addr_virtual, exc_handler_addr_start);
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(i),
filter_exception,
diff_exception,
filter_exception,
);
yield_constr.constraint(builder, constr);
}
// Set `used` and `is_read`.
// The channel is not used: the reads will be done with the byte packing CTL.
{
let constr = builder.mul_extension(total_filter, jumpdest_channel.used);
yield_constr.constraint(builder, constr);
}
{
let constr =
builder.mul_sub_extension(total_filter, jumpdest_channel.is_read, total_filter);
yield_constr.constraint(builder, constr);
}
// Disable unused channels (the last channel is used to push to the stack)
for channel in &lv.mem_channels[BYTES_PER_OFFSET + 1..NUM_GP_CHANNELS] {
// Set kernel context and code segment
{
let constr = builder.mul_extension(total_filter, jumpdest_channel.addr_context);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.arithmetic_extension(
F::ONE,
-code_segment,
total_filter,
jumpdest_channel.addr_segment,
total_filter,
);
yield_constr.constraint(builder, constr);
}
// Set address.
{
let diff_syscall =
builder.sub_extension(jumpdest_channel.addr_virtual, opcode_handler_addr_start);
let constr = builder.mul_extension((filter_syscall), diff_syscall);
yield_constr.constraint(builder, constr);
}
{
let diff_exception =
builder.sub_extension(jumpdest_channel.addr_virtual, exc_handler_addr_start);
let constr = builder.mul_extension(filter_exception, diff_exception);
yield_constr.constraint(builder, constr);
}
// Set higher limbs to zero.
for &limb in &jumpdest_channel.value[1..] {
let constr = builder.mul_extension(total_filter, limb);
yield_constr.constraint(builder, constr);
}
// Disable unused channels
for channel in &lv.mem_channels[2..NUM_GP_CHANNELS] {
let constr = builder.mul_extension(total_filter, channel.used);
yield_constr.constraint(builder, constr);
}
@ -259,13 +252,7 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// Set program counter to the handler address
// The addresses are big-endian in memory
{
let target = lv.mem_channels[1..BYTES_PER_OFFSET + 1]
.iter()
.map(|channel| channel.value[0])
.fold(builder.zero_extension(), |cumul, limb| {
builder.mul_const_add_extension(F::from_canonical_u64(256), cumul, limb)
});
let diff = builder.sub_extension(nv.program_counter, target);
let diff = builder.sub_extension(nv.program_counter, jumpdest_channel.value[0]);
let constr = builder.mul_extension(total_filter, diff);
yield_constr.constraint_transition(builder, constr);
}
@ -274,12 +261,6 @@ pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
let constr = builder.mul_sub_extension(total_filter, nv.is_kernel_mode, total_filter);
yield_constr.constraint_transition(builder, constr);
}
// Maintain current context
{
let diff = builder.sub_extension(nv.context, lv.context);
let constr = builder.mul_extension(total_filter, diff);
yield_constr.constraint_transition(builder, constr);
}
// Reset gas counter to zero.
{
let constr = builder.mul_extension(total_filter, nv.gas);

View File

@ -4,8 +4,8 @@ use keccak_hash::keccak;
use plonky2::field::types::Field;
use super::util::{
byte_packing_log, byte_unpacking_log, mem_write_partial_log_and_fill, push_no_write,
push_with_write,
byte_packing_log, byte_unpacking_log, mem_read_with_log, mem_write_log,
mem_write_partial_log_and_fill, push_no_write, push_with_write,
};
use crate::arithmetic::BinaryOperator;
use crate::cpu::columns::CpuColumnsView;
@ -349,7 +349,7 @@ pub(crate) fn generate_get_context<F: Field>(
Segment::Stack,
state.registers.stack_len - 1,
);
let res = mem_write_gp_log_and_fill(3, address, state, &mut row, state.registers.stack_top);
let res = mem_write_gp_log_and_fill(2, address, state, &mut row, state.registers.stack_top);
Some(res)
};
push_no_write(
@ -380,7 +380,10 @@ pub(crate) fn generate_set_context<F: Field>(
let old_sp_addr = MemoryAddress::new(old_ctx, Segment::ContextMetadata, sp_field);
let new_sp_addr = MemoryAddress::new(new_ctx, Segment::ContextMetadata, sp_field);
let log_write_old_sp = mem_write_gp_log_and_fill(1, old_sp_addr, state, &mut row, sp_to_save);
// This channel will hold in limb 0 and 1 the one-limb value of two separate memory operations:
// the old stack pointer write and the new stack pointer read.
// Channels only matter for time stamps: the write must happen before the read.
let log_write_old_sp = mem_write_log(GeneralPurpose(1), old_sp_addr, state, sp_to_save);
let (new_sp, log_read_new_sp) = if old_ctx == new_ctx {
let op = MemoryOp::new(
MemoryChannel::GeneralPurpose(2),
@ -389,23 +392,9 @@ pub(crate) fn generate_set_context<F: Field>(
MemoryOpKind::Read,
sp_to_save,
);
let channel = &mut row.mem_channels[2];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ONE;
channel.is_read = F::ONE;
channel.addr_context = F::from_canonical_usize(new_ctx);
channel.addr_segment = F::from_canonical_usize(Segment::ContextMetadata.unscale());
channel.addr_virtual = F::from_canonical_usize(new_sp_addr.virt);
let val_limbs: [u64; 4] = sp_to_save.0;
for (i, limb) in val_limbs.into_iter().enumerate() {
channel.value[2 * i] = F::from_canonical_u32(limb as u32);
channel.value[2 * i + 1] = F::from_canonical_u32((limb >> 32) as u32);
}
(sp_to_save, op)
} else {
mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row)
mem_read_with_log(GeneralPurpose(2), new_sp_addr, state)
};
// If the new stack isn't empty, read stack_top from memory.
@ -425,7 +414,7 @@ pub(crate) fn generate_set_context<F: Field>(
let new_top_addr = MemoryAddress::new(new_ctx, Segment::Stack, new_sp - 1);
let (new_top, log_read_new_top) =
mem_read_gp_with_log_and_fill(3, new_top_addr, state, &mut row);
mem_read_gp_with_log_and_fill(2, new_top_addr, state, &mut row);
state.registers.stack_top = new_top;
state.traces.push_memory(log_read_new_top);
} else {
@ -705,27 +694,30 @@ pub(crate) fn generate_syscall<F: Field>(
let handler_addr_addr =
handler_jumptable_addr + (opcode as usize) * (BYTES_PER_OFFSET as usize);
assert_eq!(BYTES_PER_OFFSET, 3, "Code below assumes 3 bytes per offset");
let (handler_addr0, log_in0) = mem_read_gp_with_log_and_fill(
1,
MemoryAddress::new(0, Segment::Code, handler_addr_addr),
state,
&mut row,
);
let (handler_addr1, log_in1) = mem_read_gp_with_log_and_fill(
2,
MemoryAddress::new(0, Segment::Code, handler_addr_addr + 1),
state,
&mut row,
);
let (handler_addr2, log_in2) = mem_read_gp_with_log_and_fill(
3,
MemoryAddress::new(0, Segment::Code, handler_addr_addr + 2),
state,
&mut row,
);
let base_address = MemoryAddress::new(0, Segment::Code, handler_addr_addr);
let bytes = (0..BYTES_PER_OFFSET as usize)
.map(|i| {
let address = MemoryAddress {
virt: base_address.virt + i,
..base_address
};
let val = state.memory.get(address);
val.low_u32() as u8
})
.collect_vec();
let handler_addr = (handler_addr0 << 16) + (handler_addr1 << 8) + handler_addr2;
let new_program_counter = u256_to_usize(handler_addr)?;
let packed_int = U256::from_big_endian(&bytes);
let jumptable_channel = &mut row.mem_channels[1];
jumptable_channel.is_read = F::ONE;
jumptable_channel.addr_context = F::ZERO;
jumptable_channel.addr_segment = F::from_canonical_usize(Segment::Code as usize);
jumptable_channel.addr_virtual = F::from_canonical_usize(handler_addr_addr);
jumptable_channel.value[0] = F::from_canonical_usize(u256_to_usize(packed_int)?);
byte_packing_log(state, base_address, bytes);
let new_program_counter = u256_to_usize(packed_int)?;
let gas = U256::from(state.registers.gas_used);
@ -734,14 +726,15 @@ pub(crate) fn generate_syscall<F: Field>(
+ (gas << 192);
// `ArithmeticStark` range checks `mem_channels[0]`, which contains
// the top of the stack, `mem_channels[1]`, `mem_channels[2]` and
// next_row's `mem_channels[0]` which contains the next top of the stack.
// the top of the stack, `mem_channels[1]`, which contains the new PC,
// `mem_channels[2]`, which is empty, and next_row's `mem_channels[0]`,
// which contains the next top of the stack.
// Our goal here is to range-check the gas, contained in syscall_info,
// stored in the next stack top.
let range_check_op = arithmetic::Operation::range_check(
state.registers.stack_top,
handler_addr0,
handler_addr1,
packed_int,
U256::from(0),
U256::from(opcode),
syscall_info,
);
@ -757,9 +750,6 @@ pub(crate) fn generate_syscall<F: Field>(
log::debug!("Syscall to {}", KERNEL.offset_name(new_program_counter));
state.traces.push_arithmetic(range_check_op);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
state.traces.push_cpu(row);
Ok(())
@ -950,27 +940,29 @@ pub(crate) fn generate_exception<F: Field>(
let handler_addr_addr =
handler_jumptable_addr + (exc_code as usize) * (BYTES_PER_OFFSET as usize);
assert_eq!(BYTES_PER_OFFSET, 3, "Code below assumes 3 bytes per offset");
let (handler_addr0, log_in0) = mem_read_gp_with_log_and_fill(
1,
MemoryAddress::new(0, Segment::Code, handler_addr_addr),
state,
&mut row,
);
let (handler_addr1, log_in1) = mem_read_gp_with_log_and_fill(
2,
MemoryAddress::new(0, Segment::Code, handler_addr_addr + 1),
state,
&mut row,
);
let (handler_addr2, log_in2) = mem_read_gp_with_log_and_fill(
3,
MemoryAddress::new(0, Segment::Code, handler_addr_addr + 2),
state,
&mut row,
);
let base_address = MemoryAddress::new(0, Segment::Code, handler_addr_addr);
let bytes = (0..BYTES_PER_OFFSET as usize)
.map(|i| {
let address = MemoryAddress {
virt: base_address.virt + i,
..base_address
};
let val = state.memory.get(address);
val.low_u32() as u8
})
.collect_vec();
let handler_addr = (handler_addr0 << 16) + (handler_addr1 << 8) + handler_addr2;
let new_program_counter = u256_to_usize(handler_addr)?;
let packed_int = U256::from_big_endian(&bytes);
let jumptable_channel = &mut row.mem_channels[1];
jumptable_channel.is_read = F::ONE;
jumptable_channel.addr_context = F::ZERO;
jumptable_channel.addr_segment = F::from_canonical_usize(Segment::Code as usize);
jumptable_channel.addr_virtual = F::from_canonical_usize(handler_addr_addr);
jumptable_channel.value[0] = F::from_canonical_usize(u256_to_usize(packed_int)?);
byte_packing_log(state, base_address, bytes);
let new_program_counter = u256_to_usize(packed_int)?;
let gas = U256::from(state.registers.gas_used);
@ -982,14 +974,15 @@ pub(crate) fn generate_exception<F: Field>(
let opcode = state.memory.get(address);
// `ArithmeticStark` range checks `mem_channels[0]`, which contains
// the top of the stack, `mem_channels[1]`, `mem_channels[2]` and
// next_row's `mem_channels[0]` which contains the next top of the stack.
// the top of the stack, `mem_channels[1]`, which contains the new PC,
// `mem_channels[2]`, which is empty, and next_row's `mem_channels[0]`,
// which contains the next top of the stack.
// Our goal here is to range-check the gas, contained in syscall_info,
// stored in the next stack top.
let range_check_op = arithmetic::Operation::range_check(
state.registers.stack_top,
handler_addr0,
handler_addr1,
packed_int,
U256::from(0),
opcode,
exc_info,
);
@ -1004,9 +997,6 @@ pub(crate) fn generate_exception<F: Field>(
log::debug!("Exception to {}", KERNEL.offset_name(new_program_counter));
state.traces.push_arithmetic(range_check_op);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
state.traces.push_cpu(row);
Ok(())