mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-03 14:23:07 +00:00
Merge branch 'main' into ecrecover_kernel
# Conflicts: # evm/src/cpu/kernel/aggregator.rs
This commit is contained in:
commit
a268677936
@ -13,6 +13,7 @@ ethereum-types = "0.13.1"
|
||||
hex = { version = "0.4.3", optional = true }
|
||||
itertools = "0.10.3"
|
||||
log = "0.4.14"
|
||||
once_cell = "1.13.0"
|
||||
pest = "2.1.3"
|
||||
pest_derive = "2.1.0"
|
||||
rayon = "1.5.1"
|
||||
|
||||
@ -146,7 +146,8 @@ mod tests {
|
||||
use crate::cross_table_lookup::testutils::check_ctls;
|
||||
use crate::keccak::keccak_stark::{KeccakStark, NUM_INPUTS, NUM_ROUNDS};
|
||||
use crate::logic::{self, LogicStark, Operation};
|
||||
use crate::memory::memory_stark::{generate_random_memory_ops, MemoryStark};
|
||||
use crate::memory::memory_stark::tests::generate_random_memory_ops;
|
||||
use crate::memory::memory_stark::MemoryStark;
|
||||
use crate::memory::NUM_CHANNELS;
|
||||
use crate::proof::AllProof;
|
||||
use crate::prover::prove;
|
||||
|
||||
@ -1,33 +1,71 @@
|
||||
//! The initial phase of execution, where the kernel code is hashed while being written to memory.
|
||||
//! The hash is then checked against a precomputed kernel hash.
|
||||
|
||||
use std::borrow::Borrow;
|
||||
|
||||
use itertools::Itertools;
|
||||
use plonky2::field::extension::Extendable;
|
||||
use plonky2::field::packed::PackedField;
|
||||
use plonky2::field::types::Field;
|
||||
use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::plonk::circuit_builder::CircuitBuilder;
|
||||
use plonky2_util::ceil_div_usize;
|
||||
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
use crate::cpu::columns::{COL_MAP, NUM_CPU_COLUMNS};
|
||||
use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS};
|
||||
use crate::cpu::kernel::aggregator::KERNEL;
|
||||
use crate::cpu::kernel::keccak_util::keccakf_u32s;
|
||||
use crate::cpu::public_inputs::NUM_PUBLIC_INPUTS;
|
||||
use crate::generation::state::GenerationState;
|
||||
use crate::memory;
|
||||
use crate::memory::segments;
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::memory::NUM_CHANNELS;
|
||||
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
|
||||
|
||||
/// The Keccak rate (1088 bits), measured in bytes.
|
||||
const KECCAK_RATE_BYTES: usize = 1088 / 8;
|
||||
|
||||
/// The Keccak rate (1088 bits), measured in u32 limbs.
|
||||
const KECCAK_RATE_LIMBS: usize = 1088 / 32;
|
||||
|
||||
/// We can't process more than `NUM_CHANNELS` bytes per row, since that's all the memory bandwidth
|
||||
/// we have. We also can't process more than 4 bytes (or the number of bytes in a `u32`), since we
|
||||
/// want them to fit in a single limb of Keccak input.
|
||||
const BYTES_PER_ROW: usize = 4;
|
||||
|
||||
pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>) {
|
||||
for chunk in &state.kernel.code.clone().into_iter().enumerate().chunks(4) {
|
||||
let mut code = KERNEL.code.clone();
|
||||
|
||||
// Zero-pad the code such that its size is a multiple of the Keccak rate.
|
||||
let padded_size = ceil_div_usize(code.len(), KECCAK_RATE_BYTES) * KECCAK_RATE_BYTES;
|
||||
code.resize(padded_size, 0);
|
||||
|
||||
let mut sponge_state = [0u32; 50];
|
||||
let mut sponge_input_pos: usize = 0;
|
||||
|
||||
// Iterate through chunks of the code, such that we can write one chunk to memory per row.
|
||||
for chunk in &code.into_iter().enumerate().chunks(BYTES_PER_ROW) {
|
||||
state.current_cpu_row.is_bootstrap_kernel = F::ONE;
|
||||
|
||||
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
|
||||
let mut packed_bytes: u32 = 0;
|
||||
for (addr, byte) in chunk {
|
||||
let mut value = [F::ZERO; memory::VALUE_LIMBS];
|
||||
value[0] = F::from_canonical_u8(byte);
|
||||
let channel = addr % NUM_CHANNELS;
|
||||
state.set_mem_current(channel, Segment::Code, addr, byte.into());
|
||||
|
||||
let channel = addr % memory::NUM_CHANNELS;
|
||||
state.set_mem_current(channel, segments::CODE, addr, value);
|
||||
packed_bytes = (packed_bytes << 8) | byte as u32;
|
||||
}
|
||||
|
||||
// TODO: Set other registers.
|
||||
sponge_state[sponge_input_pos] = packed_bytes;
|
||||
state.current_cpu_row.keccak_input_limbs = sponge_state.map(F::from_canonical_u32);
|
||||
state.commit_cpu_row();
|
||||
|
||||
state.commit_cpu_row();
|
||||
sponge_input_pos = (sponge_input_pos + 1) % KECCAK_RATE_LIMBS;
|
||||
// If we just crossed a multiple of KECCAK_RATE_LIMBS, then we've filled the Keccak input
|
||||
// buffer, so it's time to absorb.
|
||||
if sponge_input_pos == 0 {
|
||||
state.current_cpu_row.is_keccak = F::ONE;
|
||||
keccakf_u32s(&mut sponge_state);
|
||||
state.current_cpu_row.keccak_output_limbs = sponge_state.map(F::from_canonical_u32);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -36,18 +74,35 @@ pub(crate) fn eval_bootstrap_kernel<F: Field, P: PackedField<Scalar = F>>(
|
||||
vars: StarkEvaluationVars<F, P, NUM_CPU_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut ConstraintConsumer<P>,
|
||||
) {
|
||||
let local_values: &CpuColumnsView<_> = vars.local_values.borrow();
|
||||
let next_values: &CpuColumnsView<_> = vars.next_values.borrow();
|
||||
|
||||
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
|
||||
let local_is_bootstrap = vars.local_values[COL_MAP.is_bootstrap_kernel];
|
||||
let next_is_bootstrap = vars.next_values[COL_MAP.is_bootstrap_kernel];
|
||||
let local_is_bootstrap = local_values.is_bootstrap_kernel;
|
||||
let next_is_bootstrap = next_values.is_bootstrap_kernel;
|
||||
yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
|
||||
yield_constr.constraint_last_row(local_is_bootstrap);
|
||||
let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
|
||||
yield_constr.constraint_transition(delta_is_bootstrap * (delta_is_bootstrap + P::ONES));
|
||||
|
||||
// If IS_BOOTSTRAP_KERNEL changed (from 1 to 0), check that the current kernel hash matches a
|
||||
// precomputed one.
|
||||
let hash_diff = F::ZERO; // TODO
|
||||
yield_constr.constraint_transition(delta_is_bootstrap * hash_diff)
|
||||
// TODO: Constraints to enforce that, if IS_BOOTSTRAP_KERNEL,
|
||||
// - If CLOCK is a multiple of KECCAK_RATE_LIMBS, activate the Keccak CTL, and ensure the output
|
||||
// is copied to the next row (besides the first limb which will immediately be overwritten).
|
||||
// - Otherwise, ensure that the Keccak input is copied to the next row (besides the next limb).
|
||||
// - The next limb we add to the buffer is also written to memory.
|
||||
|
||||
// If IS_BOOTSTRAP_KERNEL changed (from 1 to 0), check that
|
||||
// - the clock is a multiple of KECCAK_RATE_LIMBS (TODO)
|
||||
// - the current kernel hash matches a precomputed one
|
||||
for (&expected, actual) in KERNEL
|
||||
.code_hash
|
||||
.iter()
|
||||
.zip(local_values.keccak_output_limbs)
|
||||
{
|
||||
let expected = P::from(F::from_canonical_u32(expected));
|
||||
let diff = expected - actual;
|
||||
yield_constr.constraint_transition(delta_is_bootstrap * diff);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn eval_bootstrap_kernel_circuit<F: RichField + Extendable<D>, const D: usize>(
|
||||
@ -55,11 +110,13 @@ pub(crate) fn eval_bootstrap_kernel_circuit<F: RichField + Extendable<D>, const
|
||||
vars: StarkEvaluationTargets<D, NUM_CPU_COLUMNS, NUM_PUBLIC_INPUTS>,
|
||||
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
|
||||
) {
|
||||
let local_values: &CpuColumnsView<_> = vars.local_values.borrow();
|
||||
let next_values: &CpuColumnsView<_> = vars.next_values.borrow();
|
||||
let one = builder.one_extension();
|
||||
|
||||
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
|
||||
let local_is_bootstrap = vars.local_values[COL_MAP.is_bootstrap_kernel];
|
||||
let next_is_bootstrap = vars.next_values[COL_MAP.is_bootstrap_kernel];
|
||||
let local_is_bootstrap = local_values.is_bootstrap_kernel;
|
||||
let next_is_bootstrap = next_values.is_bootstrap_kernel;
|
||||
let constraint = builder.sub_extension(local_is_bootstrap, one);
|
||||
yield_constr.constraint_first_row(builder, constraint);
|
||||
yield_constr.constraint_last_row(builder, local_is_bootstrap);
|
||||
@ -68,9 +125,23 @@ pub(crate) fn eval_bootstrap_kernel_circuit<F: RichField + Extendable<D>, const
|
||||
builder.mul_add_extension(delta_is_bootstrap, delta_is_bootstrap, delta_is_bootstrap);
|
||||
yield_constr.constraint_transition(builder, constraint);
|
||||
|
||||
// If IS_BOOTSTRAP_KERNEL changed (from 1 to 0), check that the current kernel hash matches a
|
||||
// precomputed one.
|
||||
let hash_diff = builder.zero_extension(); // TODO
|
||||
let constraint = builder.mul_extension(delta_is_bootstrap, hash_diff);
|
||||
yield_constr.constraint_transition(builder, constraint)
|
||||
// TODO: Constraints to enforce that, if IS_BOOTSTRAP_KERNEL,
|
||||
// - If CLOCK is a multiple of KECCAK_RATE_LIMBS, activate the Keccak CTL, and ensure the output
|
||||
// is copied to the next row (besides the first limb which will immediately be overwritten).
|
||||
// - Otherwise, ensure that the Keccak input is copied to the next row (besides the next limb).
|
||||
// - The next limb we add to the buffer is also written to memory.
|
||||
|
||||
// If IS_BOOTSTRAP_KERNEL changed (from 1 to 0), check that
|
||||
// - the clock is a multiple of KECCAK_RATE_LIMBS (TODO)
|
||||
// - the current kernel hash matches a precomputed one
|
||||
for (&expected, actual) in KERNEL
|
||||
.code_hash
|
||||
.iter()
|
||||
.zip(local_values.keccak_output_limbs)
|
||||
{
|
||||
let expected = builder.constant_extension(F::Extension::from_canonical_u32(expected));
|
||||
let diff = builder.sub_extension(expected, actual);
|
||||
let constraint = builder.mul_extension(delta_is_bootstrap, diff);
|
||||
yield_constr.constraint_transition(builder, constraint);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ use std::ops::{Index, IndexMut};
|
||||
use crate::memory;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct CpuColumnsView<T> {
|
||||
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
|
||||
pub is_bootstrap_kernel: T,
|
||||
|
||||
@ -4,13 +4,19 @@ use std::collections::HashMap;
|
||||
|
||||
use ethereum_types::U256;
|
||||
use itertools::Itertools;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use super::assembler::{assemble, Kernel};
|
||||
use crate::cpu::kernel::parser::parse;
|
||||
use crate::memory::segments::Segment;
|
||||
|
||||
pub static KERNEL: Lazy<Kernel> = Lazy::new(combined_kernel);
|
||||
|
||||
pub fn evm_constants() -> HashMap<String, U256> {
|
||||
let mut c = HashMap::new();
|
||||
c.insert("SEGMENT_ID_TXN_DATA".into(), 0.into()); // TODO: Replace with actual segment ID.
|
||||
for segment in Segment::all() {
|
||||
c.insert(segment.var_name().into(), (segment as u32).into());
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
|
||||
@ -9,18 +9,21 @@
|
||||
%endmacro
|
||||
|
||||
%macro pop2
|
||||
pop
|
||||
pop
|
||||
%rep 2
|
||||
pop
|
||||
%endrep
|
||||
%endmacro
|
||||
|
||||
%macro pop3
|
||||
pop
|
||||
%pop2
|
||||
%rep 3
|
||||
pop
|
||||
%endrep
|
||||
%endmacro
|
||||
|
||||
%macro pop4
|
||||
%pop2
|
||||
%pop2
|
||||
%rep 4
|
||||
pop
|
||||
%endrep
|
||||
%endmacro
|
||||
|
||||
// If pred is zero, yields z; otherwise, yields nz
|
||||
|
||||
@ -6,6 +6,7 @@ use log::debug;
|
||||
|
||||
use super::ast::PushTarget;
|
||||
use crate::cpu::kernel::ast::Literal;
|
||||
use crate::cpu::kernel::keccak_util::hash_kernel;
|
||||
use crate::cpu::kernel::{
|
||||
ast::{File, Item},
|
||||
opcodes::{get_opcode, get_push_opcode},
|
||||
@ -19,9 +20,25 @@ const BYTES_PER_OFFSET: u8 = 3;
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct Kernel {
|
||||
pub(crate) code: Vec<u8>,
|
||||
|
||||
/// Computed using `hash_kernel`. It is encoded as `u32` limbs for convenience, since we deal
|
||||
/// with `u32` limbs in our Keccak table.
|
||||
pub(crate) code_hash: [u32; 8],
|
||||
|
||||
pub(crate) global_labels: HashMap<String, usize>,
|
||||
}
|
||||
|
||||
impl Kernel {
|
||||
fn new(code: Vec<u8>, global_labels: HashMap<String, usize>) -> Self {
|
||||
let code_hash = hash_kernel(&code);
|
||||
Self {
|
||||
code,
|
||||
code_hash,
|
||||
global_labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Macro {
|
||||
params: Vec<String>,
|
||||
items: Vec<Item>,
|
||||
@ -44,6 +61,7 @@ pub(crate) fn assemble(files: Vec<File>, constants: HashMap<String, U256>) -> Ke
|
||||
let mut local_labels = Vec::with_capacity(files.len());
|
||||
for file in files {
|
||||
let expanded_file = expand_macros(file.body, ¯os);
|
||||
let expanded_file = expand_repeats(expanded_file);
|
||||
let expanded_file = inline_constants(expanded_file, &constants);
|
||||
local_labels.push(find_labels(&expanded_file, &mut offset, &mut global_labels));
|
||||
expanded_files.push(expanded_file);
|
||||
@ -56,10 +74,7 @@ pub(crate) fn assemble(files: Vec<File>, constants: HashMap<String, U256>) -> Ke
|
||||
debug!("Assembled file size: {} bytes", file_len);
|
||||
}
|
||||
assert_eq!(code.len(), offset, "Code length doesn't match offset.");
|
||||
Kernel {
|
||||
code,
|
||||
global_labels,
|
||||
}
|
||||
Kernel::new(code, global_labels)
|
||||
}
|
||||
|
||||
fn find_macros(files: &[File]) -> HashMap<String, Macro> {
|
||||
@ -132,6 +147,21 @@ fn expand_macro_call(
|
||||
expand_macros(expanded_item, macros)
|
||||
}
|
||||
|
||||
fn expand_repeats(body: Vec<Item>) -> Vec<Item> {
|
||||
let mut expanded = vec![];
|
||||
for item in body {
|
||||
if let Item::Repeat(count, block) = item {
|
||||
let reps = count.to_u256().as_usize();
|
||||
for _ in 0..reps {
|
||||
expanded.extend(block.clone());
|
||||
}
|
||||
} else {
|
||||
expanded.push(item);
|
||||
}
|
||||
}
|
||||
expanded
|
||||
}
|
||||
|
||||
fn inline_constants(body: Vec<Item>, constants: &HashMap<String, U256>) -> Vec<Item> {
|
||||
body.into_iter()
|
||||
.map(|item| {
|
||||
@ -157,8 +187,8 @@ fn find_labels(
|
||||
let mut local_labels = HashMap::<String, usize>::new();
|
||||
for item in body {
|
||||
match item {
|
||||
Item::MacroDef(_, _, _) | Item::MacroCall(_, _) => {
|
||||
panic!("Macros should have been expanded already")
|
||||
Item::MacroDef(_, _, _) | Item::MacroCall(_, _) | Item::Repeat(_, _) => {
|
||||
panic!("Macros and repeats should have been expanded already")
|
||||
}
|
||||
Item::GlobalLabelDeclaration(label) => {
|
||||
let old = global_labels.insert(label.clone(), *offset);
|
||||
@ -185,8 +215,8 @@ fn assemble_file(
|
||||
// Assemble the file.
|
||||
for item in body {
|
||||
match item {
|
||||
Item::MacroDef(_, _, _) | Item::MacroCall(_, _) => {
|
||||
panic!("Macros should have been expanded already")
|
||||
Item::MacroDef(_, _, _) | Item::MacroCall(_, _) | Item::Repeat(_, _) => {
|
||||
panic!("Macros and repeats should have been expanded already")
|
||||
}
|
||||
Item::GlobalLabelDeclaration(_) | Item::LocalLabelDeclaration(_) => {
|
||||
// Nothing to do; we processed labels in the prior phase.
|
||||
@ -286,10 +316,7 @@ mod tests {
|
||||
expected_global_labels.insert("function_1".to_string(), 0);
|
||||
expected_global_labels.insert("function_2".to_string(), 3);
|
||||
|
||||
let expected_kernel = Kernel {
|
||||
code: expected_code,
|
||||
global_labels: expected_global_labels,
|
||||
};
|
||||
let expected_kernel = Kernel::new(expected_code, expected_global_labels);
|
||||
|
||||
let program = vec![file_1, file_2];
|
||||
assert_eq!(assemble(program, HashMap::new()), expected_kernel);
|
||||
@ -393,6 +420,13 @@ mod tests {
|
||||
assert_eq!(kernel.code, vec![push4, 0xDE, 0xAD, 0xBE, 0xEF]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn repeat() {
|
||||
let kernel = parse_and_assemble(&["%rep 3 ADD %endrep"]);
|
||||
let add = get_opcode("ADD");
|
||||
assert_eq!(kernel.code, vec![add, add, add]);
|
||||
}
|
||||
|
||||
fn parse_and_assemble(files: &[&str]) -> Kernel {
|
||||
parse_and_assemble_with_constants(files, HashMap::new())
|
||||
}
|
||||
|
||||
@ -12,6 +12,8 @@ pub(crate) enum Item {
|
||||
MacroDef(String, Vec<String>, Vec<Item>),
|
||||
/// Calls a macro: name, args.
|
||||
MacroCall(String, Vec<PushTarget>),
|
||||
/// Repetition, like `%rep` in NASM.
|
||||
Repeat(Literal, Vec<Item>),
|
||||
/// Declares a global label.
|
||||
GlobalLabelDeclaration(String),
|
||||
/// Declares a label that is local to the current file.
|
||||
|
||||
@ -15,9 +15,10 @@ literal = { literal_hex | literal_decimal }
|
||||
variable = ${ "$" ~ identifier }
|
||||
constant = ${ "@" ~ identifier }
|
||||
|
||||
item = { macro_def | macro_call | global_label | local_label | bytes_item | push_instruction | nullary_instruction }
|
||||
item = { macro_def | macro_call | repeat | global_label | local_label | bytes_item | push_instruction | nullary_instruction }
|
||||
macro_def = { ^"%macro" ~ identifier ~ macro_paramlist? ~ item* ~ ^"%endmacro" }
|
||||
macro_call = ${ "%" ~ !(^"macro" | ^"endmacro") ~ identifier ~ macro_arglist? }
|
||||
macro_call = ${ "%" ~ !(^"macro" | ^"endmacro" | ^"rep" | ^"endrep") ~ identifier ~ macro_arglist? }
|
||||
repeat = { ^"%rep" ~ literal ~ item* ~ ^"%endrep" }
|
||||
macro_paramlist = { "(" ~ identifier ~ ("," ~ identifier)* ~ ")" }
|
||||
macro_arglist = !{ "(" ~ push_target ~ ("," ~ push_target)* ~ ")" }
|
||||
global_label = { ^"GLOBAL " ~ identifier ~ ":" }
|
||||
|
||||
@ -3,20 +3,61 @@ use ethereum_types::{U256, U512};
|
||||
/// Halt interpreter execution whenever a jump to this offset is done.
|
||||
const HALT_OFFSET: usize = 0xdeadbeef;
|
||||
|
||||
struct Interpreter<'a> {
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct EvmMemory {
|
||||
memory: Vec<u8>,
|
||||
}
|
||||
|
||||
impl EvmMemory {
|
||||
fn len(&self) -> usize {
|
||||
self.memory.len()
|
||||
}
|
||||
|
||||
/// Expand memory until `self.len() >= offset`.
|
||||
fn expand(&mut self, offset: usize) {
|
||||
while self.len() < offset {
|
||||
self.memory.extend([0; 32]);
|
||||
}
|
||||
}
|
||||
|
||||
fn mload(&mut self, offset: usize) -> U256 {
|
||||
self.expand(offset + 32);
|
||||
U256::from_big_endian(&self.memory[offset..offset + 32])
|
||||
}
|
||||
|
||||
fn mstore(&mut self, offset: usize, value: U256) {
|
||||
self.expand(offset + 32);
|
||||
let value_be = {
|
||||
let mut tmp = [0; 32];
|
||||
value.to_big_endian(&mut tmp);
|
||||
tmp
|
||||
};
|
||||
self.memory[offset..offset + 32].copy_from_slice(&value_be);
|
||||
}
|
||||
|
||||
fn mstore8(&mut self, offset: usize, value: U256) {
|
||||
self.expand(offset + 1);
|
||||
let value_byte = value.0[0] as u8;
|
||||
self.memory[offset] = value_byte;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Interpreter<'a> {
|
||||
code: &'a [u8],
|
||||
jumpdests: Vec<usize>,
|
||||
offset: usize,
|
||||
stack: Vec<U256>,
|
||||
pub(crate) stack: Vec<U256>,
|
||||
pub(crate) memory: EvmMemory,
|
||||
running: bool,
|
||||
}
|
||||
|
||||
pub fn run(code: &[u8], initial_offset: usize, initial_stack: Vec<U256>) -> Vec<U256> {
|
||||
pub(crate) fn run(code: &[u8], initial_offset: usize, initial_stack: Vec<U256>) -> Interpreter {
|
||||
let mut interpreter = Interpreter {
|
||||
code,
|
||||
jumpdests: find_jumpdests(code),
|
||||
offset: initial_offset,
|
||||
stack: initial_stack,
|
||||
memory: EvmMemory::default(),
|
||||
running: true,
|
||||
};
|
||||
|
||||
@ -24,7 +65,7 @@ pub fn run(code: &[u8], initial_offset: usize, initial_stack: Vec<U256>) -> Vec<
|
||||
interpreter.run_opcode();
|
||||
}
|
||||
|
||||
interpreter.stack
|
||||
interpreter
|
||||
}
|
||||
|
||||
impl<'a> Interpreter<'a> {
|
||||
@ -104,9 +145,9 @@ impl<'a> Interpreter<'a> {
|
||||
0x46 => todo!(), // "CHAINID",
|
||||
0x48 => todo!(), // "BASEFEE",
|
||||
0x50 => self.run_pop(), // "POP",
|
||||
0x51 => todo!(), // "MLOAD",
|
||||
0x52 => todo!(), // "MSTORE",
|
||||
0x53 => todo!(), // "MSTORE8",
|
||||
0x51 => self.run_mload(), // "MLOAD",
|
||||
0x52 => self.run_mstore(), // "MSTORE",
|
||||
0x53 => self.run_mstore8(), // "MSTORE8",
|
||||
0x54 => todo!(), // "SLOAD",
|
||||
0x55 => todo!(), // "SSTORE",
|
||||
0x56 => self.run_jump(), // "JUMP",
|
||||
@ -249,6 +290,24 @@ impl<'a> Interpreter<'a> {
|
||||
self.pop();
|
||||
}
|
||||
|
||||
fn run_mload(&mut self) {
|
||||
let offset = self.pop();
|
||||
let value = self.memory.mload(offset.as_usize());
|
||||
self.push(value);
|
||||
}
|
||||
|
||||
fn run_mstore(&mut self) {
|
||||
let offset = self.pop();
|
||||
let value = self.pop();
|
||||
self.memory.mstore(offset.as_usize(), value);
|
||||
}
|
||||
|
||||
fn run_mstore8(&mut self) {
|
||||
let offset = self.pop();
|
||||
let value = self.pop();
|
||||
self.memory.mstore8(offset.as_usize(), value);
|
||||
}
|
||||
|
||||
fn run_jump(&mut self) {
|
||||
let x = self.pop().as_usize();
|
||||
self.offset = x;
|
||||
@ -306,13 +365,40 @@ fn find_jumpdests(code: &[u8]) -> Vec<usize> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::cpu::kernel::interpreter::run;
|
||||
use hex_literal::hex;
|
||||
|
||||
use crate::cpu::kernel::interpreter::{run, Interpreter};
|
||||
|
||||
#[test]
|
||||
fn test_run() {
|
||||
let code = vec![
|
||||
0x60, 0x1, 0x60, 0x2, 0x1, 0x63, 0xde, 0xad, 0xbe, 0xef, 0x56,
|
||||
]; // PUSH1, 1, PUSH1, 2, ADD, PUSH4 deadbeef, JUMP
|
||||
assert_eq!(run(&code, 0, vec![]), vec![0x3.into()]);
|
||||
assert_eq!(run(&code, 0, vec![]).stack, vec![0x3.into()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_with_memory() {
|
||||
// PUSH1 0xff
|
||||
// PUSH1 0
|
||||
// MSTORE
|
||||
|
||||
// PUSH1 0
|
||||
// MLOAD
|
||||
|
||||
// PUSH1 1
|
||||
// MLOAD
|
||||
|
||||
// PUSH1 0x42
|
||||
// PUSH1 0x27
|
||||
// MSTORE8
|
||||
let code = vec![
|
||||
0x60, 0xff, 0x60, 0x0, 0x52, 0x60, 0, 0x51, 0x60, 0x1, 0x51, 0x60, 0x42, 0x60, 0x27,
|
||||
0x53,
|
||||
];
|
||||
let run = run(&code, 0, vec![]);
|
||||
let Interpreter { stack, memory, .. } = run;
|
||||
assert_eq!(stack, vec![0xff.into(), 0xff00.into()]);
|
||||
assert_eq!(&memory.memory, &hex!("00000000000000000000000000000000000000000000000000000000000000ff0000000000000042000000000000000000000000000000000000000000000000"));
|
||||
}
|
||||
}
|
||||
|
||||
14
evm/src/cpu/kernel/keccak_util.rs
Normal file
14
evm/src/cpu/kernel/keccak_util.rs
Normal file
@ -0,0 +1,14 @@
|
||||
/// A Keccak-f based hash.
|
||||
///
|
||||
/// This hash does not use standard Keccak padding, since we don't care about extra zeros at the
|
||||
/// end of the code.
|
||||
pub(crate) fn hash_kernel(_code: &[u8]) -> [u32; 8] {
|
||||
let state = [0u32; 50];
|
||||
// TODO: absorb code
|
||||
state[0..8].try_into().unwrap()
|
||||
}
|
||||
|
||||
/// Like tiny-keccak's `keccakf`, but deals with `u32` limbs instead of `u64` limbs.
|
||||
pub(crate) fn keccakf_u32s(_state: &mut [u32; 50]) {
|
||||
// TODO: Implement
|
||||
}
|
||||
@ -1,6 +1,7 @@
|
||||
pub mod aggregator;
|
||||
pub mod assembler;
|
||||
mod ast;
|
||||
pub(crate) mod keccak_util;
|
||||
mod opcodes;
|
||||
mod parser;
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@ fn parse_item(item: Pair<Rule>) -> Item {
|
||||
match item.as_rule() {
|
||||
Rule::macro_def => parse_macro_def(item),
|
||||
Rule::macro_call => parse_macro_call(item),
|
||||
Rule::repeat => parse_repeat(item),
|
||||
Rule::global_label => {
|
||||
Item::GlobalLabelDeclaration(item.into_inner().next().unwrap().as_str().into())
|
||||
}
|
||||
@ -70,6 +71,13 @@ fn parse_macro_call(item: Pair<Rule>) -> Item {
|
||||
Item::MacroCall(name, args)
|
||||
}
|
||||
|
||||
fn parse_repeat(item: Pair<Rule>) -> Item {
|
||||
assert_eq!(item.as_rule(), Rule::repeat);
|
||||
let mut inner = item.into_inner().peekable();
|
||||
let count = parse_literal(inner.next().unwrap());
|
||||
Item::Repeat(count, inner.map(parse_item).collect())
|
||||
}
|
||||
|
||||
fn parse_push_target(target: Pair<Rule>) -> PushTarget {
|
||||
assert_eq!(target.as_rule(), Rule::push_target);
|
||||
let inner = target.into_inner().next().unwrap();
|
||||
|
||||
@ -1,19 +1,18 @@
|
||||
use plonky2::field::types::Field;
|
||||
use ethereum_types::U256;
|
||||
|
||||
use crate::memory::memory_stark::MemoryOp;
|
||||
use crate::memory::segments::NUM_SEGMENTS;
|
||||
use crate::memory::VALUE_LIMBS;
|
||||
use crate::memory::segments::Segment;
|
||||
|
||||
#[allow(unused)] // TODO: Should be used soon.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct MemoryState<F: Field> {
|
||||
pub(crate) struct MemoryState {
|
||||
/// A log of each memory operation, in the order that it occurred.
|
||||
pub log: Vec<MemoryOp<F>>,
|
||||
pub log: Vec<MemoryOp>,
|
||||
|
||||
pub contexts: Vec<MemoryContextState<F>>,
|
||||
pub contexts: Vec<MemoryContextState>,
|
||||
}
|
||||
|
||||
impl<F: Field> Default for MemoryState<F> {
|
||||
impl Default for MemoryState {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
log: vec![],
|
||||
@ -24,28 +23,27 @@ impl<F: Field> Default for MemoryState<F> {
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub(crate) struct MemoryContextState<F: Field> {
|
||||
pub(crate) struct MemoryContextState {
|
||||
/// The content of each memory segment.
|
||||
pub segments: [MemorySegmentState<F>; NUM_SEGMENTS],
|
||||
pub segments: [MemorySegmentState; Segment::COUNT],
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub(crate) struct MemorySegmentState<F: Field> {
|
||||
pub content: Vec<[F; VALUE_LIMBS]>,
|
||||
pub(crate) struct MemorySegmentState {
|
||||
pub content: Vec<U256>,
|
||||
}
|
||||
|
||||
impl<F: Field> MemorySegmentState<F> {
|
||||
pub(super) fn get(&self, virtual_addr: usize) -> [F; VALUE_LIMBS] {
|
||||
impl MemorySegmentState {
|
||||
pub(super) fn get(&self, virtual_addr: usize) -> U256 {
|
||||
self.content
|
||||
.get(virtual_addr)
|
||||
.copied()
|
||||
.unwrap_or([F::ZERO; VALUE_LIMBS])
|
||||
.unwrap_or(U256::zero())
|
||||
}
|
||||
|
||||
pub(super) fn set(&mut self, virtual_addr: usize, value: [F; VALUE_LIMBS]) {
|
||||
if virtual_addr + 1 > self.content.len() {
|
||||
self.content
|
||||
.resize(virtual_addr + 1, [F::ZERO; VALUE_LIMBS]);
|
||||
pub(super) fn set(&mut self, virtual_addr: usize, value: U256) {
|
||||
if virtual_addr >= self.content.len() {
|
||||
self.content.resize(virtual_addr + 1, U256::zero());
|
||||
}
|
||||
self.content[virtual_addr] = value;
|
||||
}
|
||||
|
||||
@ -12,6 +12,8 @@ use crate::util::trace_rows_to_poly_values;
|
||||
mod memory;
|
||||
pub(crate) mod state;
|
||||
|
||||
/// A piece of data which has been encoded using Recursive Length Prefix (RLP) serialization.
|
||||
/// See https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp/
|
||||
pub type RlpBlob = Vec<u8>;
|
||||
|
||||
/// Merkle proofs are encoded using an RLP blob for each node in the path.
|
||||
@ -46,7 +48,7 @@ pub fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
|
||||
logic_ops: logic_inputs,
|
||||
..
|
||||
} = state;
|
||||
assert_eq!(current_cpu_row, [F::ZERO; NUM_CPU_COLUMNS]);
|
||||
assert_eq!(current_cpu_row, [F::ZERO; NUM_CPU_COLUMNS].into());
|
||||
|
||||
let cpu_trace = trace_rows_to_poly_values(cpu_rows);
|
||||
let keccak_trace = all_stark.keccak_stark.generate_trace(keccak_inputs);
|
||||
|
||||
@ -1,23 +1,21 @@
|
||||
use std::mem;
|
||||
|
||||
use ethereum_types::U256;
|
||||
use plonky2::field::types::Field;
|
||||
|
||||
use crate::cpu::columns::NUM_CPU_COLUMNS;
|
||||
use crate::cpu::kernel::aggregator::combined_kernel;
|
||||
use crate::cpu::kernel::assembler::Kernel;
|
||||
use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS};
|
||||
use crate::generation::memory::MemoryState;
|
||||
use crate::logic::{Op, Operation};
|
||||
use crate::memory::memory_stark::MemoryOp;
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::{keccak, logic};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct GenerationState<F: Field> {
|
||||
pub(crate) kernel: Kernel,
|
||||
|
||||
pub(crate) cpu_rows: Vec<[F; NUM_CPU_COLUMNS]>,
|
||||
pub(crate) current_cpu_row: [F; NUM_CPU_COLUMNS],
|
||||
pub(crate) current_cpu_row: CpuColumnsView<F>,
|
||||
|
||||
pub(crate) current_context: usize,
|
||||
pub(crate) memory: MemoryState<F>,
|
||||
pub(crate) memory: MemoryState,
|
||||
|
||||
pub(crate) keccak_inputs: Vec<[u64; keccak::keccak_stark::NUM_INPUTS]>,
|
||||
pub(crate) logic_ops: Vec<logic::Operation>,
|
||||
@ -27,24 +25,24 @@ impl<F: Field> GenerationState<F> {
|
||||
/// Compute logical AND, and record the operation to be added in the logic table later.
|
||||
#[allow(unused)] // TODO: Should be used soon.
|
||||
pub(crate) fn and(&mut self, input0: U256, input1: U256) -> U256 {
|
||||
self.logic_op(Op::And, input0, input1)
|
||||
self.logic_op(logic::Op::And, input0, input1)
|
||||
}
|
||||
|
||||
/// Compute logical OR, and record the operation to be added in the logic table later.
|
||||
#[allow(unused)] // TODO: Should be used soon.
|
||||
pub(crate) fn or(&mut self, input0: U256, input1: U256) -> U256 {
|
||||
self.logic_op(Op::Or, input0, input1)
|
||||
self.logic_op(logic::Op::Or, input0, input1)
|
||||
}
|
||||
|
||||
/// Compute logical XOR, and record the operation to be added in the logic table later.
|
||||
#[allow(unused)] // TODO: Should be used soon.
|
||||
pub(crate) fn xor(&mut self, input0: U256, input1: U256) -> U256 {
|
||||
self.logic_op(Op::Xor, input0, input1)
|
||||
self.logic_op(logic::Op::Xor, input0, input1)
|
||||
}
|
||||
|
||||
/// Compute logical AND, and record the operation to be added in the logic table later.
|
||||
pub(crate) fn logic_op(&mut self, op: Op, input0: U256, input1: U256) -> U256 {
|
||||
let operation = Operation::new(op, input0, input1);
|
||||
pub(crate) fn logic_op(&mut self, op: logic::Op, input0: U256, input1: U256) -> U256 {
|
||||
let operation = logic::Operation::new(op, input0, input1);
|
||||
let result = operation.result;
|
||||
self.logic_ops.push(operation);
|
||||
result
|
||||
@ -55,12 +53,12 @@ impl<F: Field> GenerationState<F> {
|
||||
pub(crate) fn get_mem_current(
|
||||
&mut self,
|
||||
channel_index: usize,
|
||||
segment: usize,
|
||||
segment: Segment,
|
||||
virt: usize,
|
||||
) -> [F; crate::memory::VALUE_LIMBS] {
|
||||
) -> U256 {
|
||||
let timestamp = self.cpu_rows.len();
|
||||
let context = self.current_context;
|
||||
let value = self.memory.contexts[context].segments[segment].get(virt);
|
||||
let value = self.memory.contexts[context].segments[segment as usize].get(virt);
|
||||
self.memory.log.push(MemoryOp {
|
||||
channel_index: Some(channel_index),
|
||||
timestamp,
|
||||
@ -77,9 +75,9 @@ impl<F: Field> GenerationState<F> {
|
||||
pub(crate) fn set_mem_current(
|
||||
&mut self,
|
||||
channel_index: usize,
|
||||
segment: usize,
|
||||
segment: Segment,
|
||||
virt: usize,
|
||||
value: [F; crate::memory::VALUE_LIMBS],
|
||||
value: U256,
|
||||
) {
|
||||
let timestamp = self.cpu_rows.len();
|
||||
let context = self.current_context;
|
||||
@ -92,12 +90,13 @@ impl<F: Field> GenerationState<F> {
|
||||
virt,
|
||||
value,
|
||||
});
|
||||
self.memory.contexts[context].segments[segment].set(virt, value)
|
||||
self.memory.contexts[context].segments[segment as usize].set(virt, value)
|
||||
}
|
||||
|
||||
pub(crate) fn commit_cpu_row(&mut self) {
|
||||
self.cpu_rows.push(self.current_cpu_row);
|
||||
self.current_cpu_row = [F::ZERO; NUM_CPU_COLUMNS];
|
||||
let mut swapped_row = [F::ZERO; NUM_CPU_COLUMNS].into();
|
||||
mem::swap(&mut self.current_cpu_row, &mut swapped_row);
|
||||
self.cpu_rows.push(swapped_row.into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,9 +105,8 @@ impl<F: Field> GenerationState<F> {
|
||||
impl<F: Field> Default for GenerationState<F> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
kernel: combined_kernel(),
|
||||
cpu_rows: vec![],
|
||||
current_cpu_row: [F::ZERO; NUM_CPU_COLUMNS],
|
||||
current_cpu_row: [F::ZERO; NUM_CPU_COLUMNS].into(),
|
||||
current_context: 0,
|
||||
memory: MemoryState::default(),
|
||||
keccak_inputs: vec![],
|
||||
|
||||
@ -9,7 +9,8 @@ pub(crate) const ADDR_CONTEXT: usize = IS_READ + 1;
|
||||
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
|
||||
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
|
||||
|
||||
// Eight limbs to hold up to a 256-bit value.
|
||||
// Eight 32-bit limbs hold a total of 256 bits.
|
||||
// If a value represents an integer, it is little-endian encoded.
|
||||
const VALUE_START: usize = ADDR_VIRTUAL + 1;
|
||||
pub(crate) const fn value_limb(i: usize) -> usize {
|
||||
debug_assert!(i < VALUE_LIMBS);
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use ethereum_types::U256;
|
||||
use itertools::Itertools;
|
||||
use plonky2::field::extension::{Extendable, FieldExtension};
|
||||
use plonky2::field::packed::PackedField;
|
||||
@ -10,7 +10,6 @@ use plonky2::hash::hash_types::RichField;
|
||||
use plonky2::timed;
|
||||
use plonky2::util::timing::TimingTree;
|
||||
use plonky2::util::transpose;
|
||||
use rand::Rng;
|
||||
use rayon::prelude::*;
|
||||
|
||||
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
|
||||
@ -21,6 +20,7 @@ use crate::memory::columns::{
|
||||
COUNTER, COUNTER_PERMUTED, IS_READ, NUM_COLUMNS, RANGE_CHECK, RANGE_CHECK_PERMUTED,
|
||||
SEGMENT_FIRST_CHANGE, TIMESTAMP, VIRTUAL_FIRST_CHANGE,
|
||||
};
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
|
||||
use crate::permutation::PermutationPair;
|
||||
use crate::stark::Stark;
|
||||
@ -46,23 +46,23 @@ pub struct MemoryStark<F, const D: usize> {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MemoryOp<F> {
|
||||
pub(crate) struct MemoryOp {
|
||||
/// The channel this operation came from, or `None` if it's a dummy operation for padding.
|
||||
pub channel_index: Option<usize>,
|
||||
pub timestamp: usize,
|
||||
pub is_read: bool,
|
||||
pub context: usize,
|
||||
pub segment: usize,
|
||||
pub segment: Segment,
|
||||
pub virt: usize,
|
||||
pub value: [F; 8],
|
||||
pub value: U256,
|
||||
}
|
||||
|
||||
impl<F: Field> MemoryOp<F> {
|
||||
impl MemoryOp {
|
||||
/// Generate a row for a given memory operation. Note that this does not generate columns which
|
||||
/// depend on the next operation, such as `CONTEXT_FIRST_CHANGE`; those are generated later.
|
||||
/// It also does not generate columns such as `COUNTER`, which are generated later, after the
|
||||
/// trace has been transposed into column-major form.
|
||||
fn to_row(&self) -> [F; NUM_COLUMNS] {
|
||||
fn to_row<F: Field>(&self) -> [F; NUM_COLUMNS] {
|
||||
let mut row = [F::ZERO; NUM_COLUMNS];
|
||||
if let Some(channel) = self.channel_index {
|
||||
row[is_channel(channel)] = F::ONE;
|
||||
@ -70,89 +70,16 @@ impl<F: Field> MemoryOp<F> {
|
||||
row[TIMESTAMP] = F::from_canonical_usize(self.timestamp);
|
||||
row[IS_READ] = F::from_bool(self.is_read);
|
||||
row[ADDR_CONTEXT] = F::from_canonical_usize(self.context);
|
||||
row[ADDR_SEGMENT] = F::from_canonical_usize(self.segment);
|
||||
row[ADDR_SEGMENT] = F::from_canonical_usize(self.segment as usize);
|
||||
row[ADDR_VIRTUAL] = F::from_canonical_usize(self.virt);
|
||||
for j in 0..VALUE_LIMBS {
|
||||
row[value_limb(j)] = self.value[j];
|
||||
row[value_limb(j)] = F::from_canonical_u32((self.value >> (j * 32)).low_u32());
|
||||
}
|
||||
row
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_random_memory_ops<F: RichField, R: Rng>(
|
||||
num_ops: usize,
|
||||
rng: &mut R,
|
||||
) -> Vec<MemoryOp<F>> {
|
||||
let mut memory_ops = Vec::new();
|
||||
|
||||
let mut current_memory_values: HashMap<(usize, usize, usize), [F; 8]> = HashMap::new();
|
||||
let num_cycles = num_ops / 2;
|
||||
for clock in 0..num_cycles {
|
||||
let mut used_indices = HashSet::new();
|
||||
let mut new_writes_this_cycle = HashMap::new();
|
||||
let mut has_read = false;
|
||||
for _ in 0..2 {
|
||||
let mut channel_index = rng.gen_range(0..NUM_CHANNELS);
|
||||
while used_indices.contains(&channel_index) {
|
||||
channel_index = rng.gen_range(0..NUM_CHANNELS);
|
||||
}
|
||||
used_indices.insert(channel_index);
|
||||
|
||||
let is_read = if clock == 0 {
|
||||
false
|
||||
} else {
|
||||
!has_read && rng.gen()
|
||||
};
|
||||
has_read = has_read || is_read;
|
||||
|
||||
let (context, segment, virt, vals) = if is_read {
|
||||
let written: Vec<_> = current_memory_values.keys().collect();
|
||||
let &(context, segment, virt) = written[rng.gen_range(0..written.len())];
|
||||
let &vals = current_memory_values
|
||||
.get(&(context, segment, virt))
|
||||
.unwrap();
|
||||
|
||||
(context, segment, virt, vals)
|
||||
} else {
|
||||
// TODO: with taller memory table or more padding (to enable range-checking bigger diffs),
|
||||
// test larger address values.
|
||||
let mut context = rng.gen_range(0..40);
|
||||
let mut segment = rng.gen_range(0..8);
|
||||
let mut virt = rng.gen_range(0..20);
|
||||
while new_writes_this_cycle.contains_key(&(context, segment, virt)) {
|
||||
context = rng.gen_range(0..40);
|
||||
segment = rng.gen_range(0..8);
|
||||
virt = rng.gen_range(0..20);
|
||||
}
|
||||
|
||||
let val: [u32; 8] = rng.gen();
|
||||
let vals: [F; 8] = val.map(F::from_canonical_u32);
|
||||
|
||||
new_writes_this_cycle.insert((context, segment, virt), vals);
|
||||
|
||||
(context, segment, virt, vals)
|
||||
};
|
||||
|
||||
let timestamp = clock * NUM_CHANNELS + channel_index;
|
||||
memory_ops.push(MemoryOp {
|
||||
channel_index: Some(channel_index),
|
||||
timestamp,
|
||||
is_read,
|
||||
context,
|
||||
segment,
|
||||
virt,
|
||||
value: vals,
|
||||
});
|
||||
}
|
||||
for (k, v) in new_writes_this_cycle {
|
||||
current_memory_values.insert(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
memory_ops
|
||||
}
|
||||
|
||||
fn get_max_range_check<F: Field>(memory_ops: &[MemoryOp<F>]) -> usize {
|
||||
fn get_max_range_check(memory_ops: &[MemoryOp]) -> usize {
|
||||
memory_ops
|
||||
.iter()
|
||||
.tuple_windows()
|
||||
@ -160,7 +87,7 @@ fn get_max_range_check<F: Field>(memory_ops: &[MemoryOp<F>]) -> usize {
|
||||
if curr.context != next.context {
|
||||
next.context - curr.context - 1
|
||||
} else if curr.segment != next.segment {
|
||||
next.segment - curr.segment - 1
|
||||
next.segment as usize - curr.segment as usize - 1
|
||||
} else if curr.virt != next.virt {
|
||||
next.virt - curr.virt - 1
|
||||
} else {
|
||||
@ -216,7 +143,7 @@ pub fn generate_first_change_flags_and_rc<F: RichField>(trace_rows: &mut [[F; NU
|
||||
impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
/// Generate most of the trace rows. Excludes a few columns like `COUNTER`, which are generated
|
||||
/// later, after transposing to column-major form.
|
||||
fn generate_trace_row_major(&self, mut memory_ops: Vec<MemoryOp<F>>) -> Vec<[F; NUM_COLUMNS]> {
|
||||
fn generate_trace_row_major(&self, mut memory_ops: Vec<MemoryOp>) -> Vec<[F; NUM_COLUMNS]> {
|
||||
memory_ops.sort_by_key(|op| (op.context, op.segment, op.virt, op.timestamp));
|
||||
|
||||
Self::pad_memory_ops(&mut memory_ops);
|
||||
@ -241,7 +168,7 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
trace_col_vecs[COUNTER_PERMUTED] = permuted_table;
|
||||
}
|
||||
|
||||
fn pad_memory_ops(memory_ops: &mut Vec<MemoryOp<F>>) {
|
||||
fn pad_memory_ops(memory_ops: &mut Vec<MemoryOp>) {
|
||||
let num_ops = memory_ops.len();
|
||||
let max_range_check = get_max_range_check(memory_ops);
|
||||
let num_ops_padded = num_ops.max(max_range_check + 1).next_power_of_two();
|
||||
@ -264,7 +191,7 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_trace(&self, memory_ops: Vec<MemoryOp<F>>) -> Vec<PolynomialValues<F>> {
|
||||
pub(crate) fn generate_trace(&self, memory_ops: Vec<MemoryOp>) -> Vec<PolynomialValues<F>> {
|
||||
let mut timing = TimingTree::new("generate trace", log::Level::Debug);
|
||||
|
||||
// Generate most of the trace in row-major form.
|
||||
@ -533,13 +460,90 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
pub(crate) mod tests {
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use crate::memory::memory_stark::MemoryStark;
|
||||
use anyhow::Result;
|
||||
use ethereum_types::U256;
|
||||
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
|
||||
use rand::prelude::SliceRandom;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::memory::memory_stark::{MemoryOp, MemoryStark};
|
||||
use crate::memory::segments::Segment;
|
||||
use crate::memory::NUM_CHANNELS;
|
||||
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
|
||||
|
||||
pub(crate) fn generate_random_memory_ops<R: Rng>(num_ops: usize, rng: &mut R) -> Vec<MemoryOp> {
|
||||
let mut memory_ops = Vec::new();
|
||||
|
||||
let mut current_memory_values: HashMap<(usize, Segment, usize), U256> = HashMap::new();
|
||||
let num_cycles = num_ops / 2;
|
||||
for clock in 0..num_cycles {
|
||||
let mut used_indices = HashSet::new();
|
||||
let mut new_writes_this_cycle = HashMap::new();
|
||||
let mut has_read = false;
|
||||
for _ in 0..2 {
|
||||
let mut channel_index = rng.gen_range(0..NUM_CHANNELS);
|
||||
while used_indices.contains(&channel_index) {
|
||||
channel_index = rng.gen_range(0..NUM_CHANNELS);
|
||||
}
|
||||
used_indices.insert(channel_index);
|
||||
|
||||
let is_read = if clock == 0 {
|
||||
false
|
||||
} else {
|
||||
!has_read && rng.gen()
|
||||
};
|
||||
has_read = has_read || is_read;
|
||||
|
||||
let (context, segment, virt, vals) = if is_read {
|
||||
let written: Vec<_> = current_memory_values.keys().collect();
|
||||
let &(context, segment, virt) = written[rng.gen_range(0..written.len())];
|
||||
let &vals = current_memory_values
|
||||
.get(&(context, segment, virt))
|
||||
.unwrap();
|
||||
|
||||
(context, segment, virt, vals)
|
||||
} else {
|
||||
// TODO: with taller memory table or more padding (to enable range-checking bigger diffs),
|
||||
// test larger address values.
|
||||
let mut context = rng.gen_range(0..40);
|
||||
let segments = [Segment::Code, Segment::Stack, Segment::MainMemory];
|
||||
let mut segment = *segments.choose(rng).unwrap();
|
||||
let mut virt = rng.gen_range(0..20);
|
||||
while new_writes_this_cycle.contains_key(&(context, segment, virt)) {
|
||||
context = rng.gen_range(0..40);
|
||||
segment = *segments.choose(rng).unwrap();
|
||||
virt = rng.gen_range(0..20);
|
||||
}
|
||||
|
||||
let val = U256(rng.gen());
|
||||
|
||||
new_writes_this_cycle.insert((context, segment, virt), val);
|
||||
|
||||
(context, segment, virt, val)
|
||||
};
|
||||
|
||||
let timestamp = clock * NUM_CHANNELS + channel_index;
|
||||
memory_ops.push(MemoryOp {
|
||||
channel_index: Some(channel_index),
|
||||
timestamp,
|
||||
is_read,
|
||||
context,
|
||||
segment,
|
||||
virt,
|
||||
value: vals,
|
||||
});
|
||||
}
|
||||
for (k, v) in new_writes_this_cycle {
|
||||
current_memory_values.insert(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
memory_ops
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stark_degree() -> Result<()> {
|
||||
const D: usize = 2;
|
||||
|
||||
@ -1,22 +1,61 @@
|
||||
/// Contains EVM bytecode.
|
||||
pub const CODE: usize = 0;
|
||||
#[allow(dead_code)] // TODO: Not all segments are used yet.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
|
||||
pub(crate) enum Segment {
|
||||
/// Contains EVM bytecode.
|
||||
Code = 0,
|
||||
/// The program stack.
|
||||
Stack = 1,
|
||||
/// Main memory, owned by the contract code.
|
||||
MainMemory = 2,
|
||||
/// Data passed to the current context by its caller.
|
||||
Calldata = 3,
|
||||
/// Data returned to the current context by its latest callee.
|
||||
Returndata = 4,
|
||||
/// A segment which contains a few fixed-size metadata fields, such as the caller's context, or the
|
||||
/// size of `CALLDATA` and `RETURNDATA`.
|
||||
Metadata = 5,
|
||||
/// General purpose kernel memory, used by various kernel functions.
|
||||
/// In general, calling a helper function can result in this memory being clobbered.
|
||||
KernelGeneral = 6,
|
||||
/// Contains transaction data (after it's parsed and converted to a standard format).
|
||||
TxnData = 7,
|
||||
/// Raw RLP data.
|
||||
RlpRaw = 8,
|
||||
/// RLP data that has been parsed and converted to a more "friendly" format.
|
||||
RlpParsed = 9,
|
||||
}
|
||||
|
||||
pub const STACK: usize = 1;
|
||||
impl Segment {
|
||||
pub(crate) const COUNT: usize = 10;
|
||||
|
||||
/// Main memory, owned by the contract code.
|
||||
pub const MAIN_MEM: usize = 2;
|
||||
pub(crate) fn all() -> [Self; Self::COUNT] {
|
||||
[
|
||||
Self::Code,
|
||||
Self::Stack,
|
||||
Self::MainMemory,
|
||||
Self::Calldata,
|
||||
Self::Returndata,
|
||||
Self::Metadata,
|
||||
Self::KernelGeneral,
|
||||
Self::TxnData,
|
||||
Self::RlpRaw,
|
||||
Self::RlpParsed,
|
||||
]
|
||||
}
|
||||
|
||||
/// Memory owned by the kernel.
|
||||
pub const KERNEL_MEM: usize = 3;
|
||||
|
||||
/// Data passed to the current context by its caller.
|
||||
pub const CALLDATA: usize = 4;
|
||||
|
||||
/// Data returned to the current context by its latest callee.
|
||||
pub const RETURNDATA: usize = 5;
|
||||
|
||||
/// A segment which contains a few fixed-size metadata fields, such as the caller's context, or the
|
||||
/// size of `CALLDATA` and `RETURNDATA`.
|
||||
pub const METADATA: usize = 6;
|
||||
|
||||
pub const NUM_SEGMENTS: usize = 7;
|
||||
/// The variable name that gets passed into kernel assembly code.
|
||||
pub(crate) fn var_name(&self) -> &'static str {
|
||||
match self {
|
||||
Segment::Code => "SEGMENT_CODE",
|
||||
Segment::Stack => "SEGMENT_STACK",
|
||||
Segment::MainMemory => "SEGMENT_MAIN_MEMORY",
|
||||
Segment::Calldata => "SEGMENT_CALLDATA",
|
||||
Segment::Returndata => "SEGMENT_RETURNDATA",
|
||||
Segment::Metadata => "SEGMENT_METADATA",
|
||||
Segment::KernelGeneral => "SEGMENT_KERNEL_GENERAL",
|
||||
Segment::TxnData => "SEGMENT_TXN_DATA",
|
||||
Segment::RlpRaw => "SEGMENT_RLP_RAW",
|
||||
Segment::RlpParsed => "SEGMENT_RLP_PARSED",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user