Basic smart contract test

This commit is contained in:
Daniel Lubarov 2023-01-14 21:18:58 -08:00
parent be7a489c6e
commit df2ba7a384
18 changed files with 467 additions and 54 deletions

View File

@ -1,12 +1,15 @@
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::memory::segments::Segment;
fn eval_packed_get<P: PackedField>(
lv: &CpuColumnsView<P>,
@ -45,7 +48,44 @@ fn eval_packed_set<P: PackedField>(
) {
let filter = lv.op.set_context;
let pop_channel = lv.mem_channels[0];
yield_constr.constraint_transition(filter * (pop_channel.value[0] - nv.context));
let write_old_sp_channel = lv.mem_channels[1];
let read_new_sp_channel = lv.mem_channels[2];
let stack_segment = P::Scalar::from_canonical_u64(Segment::Stack as u64);
let ctx_metadata_segment = P::Scalar::from_canonical_u64(Segment::ContextMetadata as u64);
let stack_size_field = P::Scalar::from_canonical_u64(ContextMetadata::StackSize as u64);
let local_sp_dec = lv.stack_len - P::ONES;
// The next row's context is read from memory channel 0.
yield_constr.constraint(filter * (pop_channel.value[0] - nv.context));
yield_constr.constraint(filter * (pop_channel.used - P::ONES));
yield_constr.constraint(filter * (pop_channel.is_read - P::ONES));
yield_constr.constraint(filter * (pop_channel.addr_context - lv.context));
yield_constr.constraint(filter * (pop_channel.addr_segment - stack_segment));
yield_constr.constraint(filter * (pop_channel.addr_virtual - local_sp_dec));
// The old SP is decremented (since the new context was popped) and written to memory.
yield_constr.constraint(filter * (write_old_sp_channel.value[0] - local_sp_dec));
for limb in &write_old_sp_channel.value[1..] {
yield_constr.constraint(filter * *limb);
}
yield_constr.constraint(filter * (write_old_sp_channel.used - P::ONES));
yield_constr.constraint(filter * write_old_sp_channel.is_read);
yield_constr.constraint(filter * (write_old_sp_channel.addr_context - lv.context));
yield_constr.constraint(filter * (write_old_sp_channel.addr_segment - ctx_metadata_segment));
yield_constr.constraint(filter * (write_old_sp_channel.addr_virtual - stack_size_field));
// The new SP is loaded from memory.
yield_constr.constraint(filter * (read_new_sp_channel.value[0] - nv.stack_len));
yield_constr.constraint(filter * (read_new_sp_channel.used - P::ONES));
yield_constr.constraint(filter * (read_new_sp_channel.is_read - P::ONES));
yield_constr.constraint(filter * (read_new_sp_channel.addr_context - nv.context));
yield_constr.constraint(filter * (read_new_sp_channel.addr_segment - ctx_metadata_segment));
yield_constr.constraint(filter * (read_new_sp_channel.addr_virtual - stack_size_field));
// Disable unused memory channels
for &channel in &lv.mem_channels[3..] {
yield_constr.constraint(filter * channel.used);
}
}
fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
@ -56,10 +96,118 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
) {
let filter = lv.op.set_context;
let pop_channel = lv.mem_channels[0];
let write_old_sp_channel = lv.mem_channels[1];
let read_new_sp_channel = lv.mem_channels[2];
let stack_segment =
builder.constant_extension(F::Extension::from_canonical_u32(Segment::Stack as u32));
let ctx_metadata_segment = builder.constant_extension(F::Extension::from_canonical_u32(
Segment::ContextMetadata as u32,
));
let stack_size_field = builder.constant_extension(F::Extension::from_canonical_u32(
ContextMetadata::StackSize as u32,
));
let one = builder.one_extension();
let local_sp_dec = builder.sub_extension(lv.stack_len, one);
let diff = builder.sub_extension(pop_channel.value[0], nv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint_transition(builder, constr);
// The next row's context is read from memory channel 0.
{
let diff = builder.sub_extension(pop_channel.value[0], nv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, pop_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, pop_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(pop_channel.addr_context, lv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(pop_channel.addr_segment, stack_segment);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(pop_channel.addr_virtual, local_sp_dec);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// The old SP is decremented (since the new context was popped) and written to memory.
{
let diff = builder.sub_extension(write_old_sp_channel.value[0], local_sp_dec);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for limb in &write_old_sp_channel.value[1..] {
let constr = builder.mul_extension(filter, *limb);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, write_old_sp_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_extension(filter, write_old_sp_channel.is_read);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(write_old_sp_channel.addr_context, lv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(write_old_sp_channel.addr_segment, ctx_metadata_segment);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(write_old_sp_channel.addr_virtual, stack_size_field);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// The new SP is loaded from memory.
{
let diff = builder.sub_extension(read_new_sp_channel.value[0], nv.stack_len);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, read_new_sp_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, read_new_sp_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_sp_channel.addr_context, nv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_sp_channel.addr_segment, ctx_metadata_segment);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_sp_channel.addr_virtual, stack_size_field);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// Disable unused memory channels
for &channel in &lv.mem_channels[3..] {
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed<P: PackedField>(

View File

@ -13,6 +13,16 @@ global extcodehash:
// stack: codehash, retdest
SWAP1 JUMP
%macro extcodehash
%stack (address) -> (address, %%after)
%jump(extcodehash)
%%after:
%endmacro
%macro ext_code_empty
%extcodehash
%eq_const(@EMPTY_STRING_HASH)
%endmacro
%macro codesize
// stack: (empty)

View File

@ -52,9 +52,90 @@ global process_message_txn:
// stack: transfer_eth_status, retdest
%jumpi(process_message_txn_insufficient_balance)
// stack: retdest
// TODO: If code is non-empty, execute it in a new context.
JUMP
// If to's code is empty, return.
%mload_txn_field(@TXN_FIELD_TO) %ext_code_empty
// stack: code_empty, retdest
%jumpi(process_message_txn_return)
// Otherwise, load to's code and execute it in a new context.
// stack: retdest
%create_context
// stack: new_ctx, retdest
PUSH process_message_txn_code_loaded
PUSH @SEGMENT_CODE
DUP3 // new_ctx
%mload_txn_field(@TXN_FIELD_TO)
// stack: address, new_ctx, segment, process_message_txn_code_loaded, new_ctx, retdest
%jump(load_code)
global process_message_txn_insufficient_balance:
// stack: retdest
PANIC // TODO
global process_message_txn_return:
// TODO: Return leftover gas?
JUMP
global process_message_txn_code_loaded:
// stack: code_len, new_ctx, retdest
POP
// stack: new_ctx, retdest
// Store the address in metadata.
%mload_txn_field(@TXN_FIELD_TO)
PUSH @CTX_METADATA_ADDRESS
PUSH @SEGMENT_CONTEXT_METADATA
DUP4 // new_ctx
MSTORE_GENERAL
// stack: new_ctx, retdest
// Store the caller in metadata.
%mload_txn_field(@TXN_FIELD_ORIGIN)
PUSH @CTX_METADATA_CALLER
PUSH @SEGMENT_CONTEXT_METADATA
DUP4 // new_ctx
MSTORE_GENERAL
// stack: new_ctx, retdest
// Store the call value field in metadata.
%mload_txn_field(@TXN_FIELD_VALUE)
PUSH @CTX_METADATA_CALL_VALUE
PUSH @SEGMENT_CONTEXT_METADATA
DUP4 // new_ctx
MSTORE_GENERAL
// stack: new_ctx, retdest
// No need to write @CTX_METADATA_STATIC, because it's 0 which is the default.
// Store parent context in metadata.
GET_CONTEXT
PUSH @CTX_METADATA_PARENT_CONTEXT
PUSH @SEGMENT_CONTEXT_METADATA
DUP4 // new_ctx
MSTORE_GENERAL
// stack: new_ctx, retdest
// Store parent PC = process_message_txn_after_call.
PUSH process_message_txn_after_call
PUSH @CTX_METADATA_PARENT_PC
PUSH @SEGMENT_CONTEXT_METADATA
DUP4 // new_ctx
MSTORE_GENERAL
// stack: new_ctx, retdest
// TODO: Populate CALLDATA
// TODO: Save parent gas and set child gas
// Now, switch to the new context and go to usermode with PC=0.
SET_CONTEXT
// stack: retdest
PUSH 0 // jump dest
EXIT_KERNEL
global process_message_txn_after_call:
// stack: success, retdest
// TODO: Return leftover gas? Or handled by termination instructions?
POP // Pop success for now. Will go into the reciept when we support that.
JUMP

View File

@ -4,6 +4,7 @@
global sys_stop:
// TODO: Set parent context's CTX_METADATA_RETURNDATA_SIZE to 0.
// TODO: Refund unused gas to parent.
PUSH 1 // success
%jump(terminate_common)
global sys_return:
@ -11,17 +12,20 @@ global sys_return:
// TODO: Copy returned memory to parent context's RETURNDATA (but not if we're returning from a constructor?)
// TODO: Copy returned memory to parent context's memory (as specified in their call instruction)
// TODO: Refund unused gas to parent.
PUSH 1 // success
%jump(terminate_common)
global sys_selfdestruct:
%consume_gas_const(@GAS_SELFDESTRUCT)
// TODO: Destroy account.
// TODO: Refund unused gas to parent.
PUSH 1 // success
%jump(terminate_common)
global sys_revert:
// TODO: Refund unused gas to parent.
// TODO: Revert state changes.
PUSH 0 // success
%jump(terminate_common)
// The execution is in an exceptional halt-ing state if
@ -33,6 +37,7 @@ global sys_revert:
// - state modification is attempted during a static call
global fault_exception:
// TODO: Revert state changes.
PUSH 0 // success
%jump(terminate_common)
terminate_common:
@ -42,6 +47,13 @@ terminate_common:
// SEGMENT_KERNEL_GENERAL[0], then load it after the context switch.
PUSH 0
// stack: 0, success
%mstore_kernel_general
// stack: (empty)
// Similarly, we write the parent PC to SEGMENT_KERNEL_GENERAL[1] so that
// we can later read it after switching to the parent context.
%mload_context_metadata(@CTX_METADATA_PARENT_PC)
PUSH 1
%mstore_kernel(@SEGMENT_KERNEL_GENERAL)
// stack: (empty)
@ -50,11 +62,9 @@ terminate_common:
SET_CONTEXT
// stack: (empty)
// Load the success flag that we stored in SEGMENT_KERNEL_GENERAL[0].
PUSH 0
%mload_kernel(@SEGMENT_KERNEL_GENERAL)
// stack: success
// Load the success flag and parent PC that we stored in SEGMENT_KERNEL_GENERAL.
PUSH 0 %mload_kernel_general
PUSH 1 %mload_kernel_general
// JUMP to the parent IP.
%mload_context_metadata(@CTX_METADATA_PARENT_PC)
// stack: parent_pc, success
JUMP

View File

@ -80,12 +80,14 @@ global add_eth:
// stack: balance_ptr, addr, amount, retdest
DUP1 %mload_trie_data
// stack: balance, balance_ptr, addr, amount, retdest
%stack (balance, balance_ptr, addr, amount) -> (amount, balance, addr, balance_ptr)
%stack (balance, balance_ptr, addr, amount) -> (amount, balance, balance_ptr)
ADD
// stack: new_balance, addr, balance_ptr, retdest
SWAP1 %mstore_trie_data
// stack: addr, retdest
POP JUMP
// stack: new_balance, balance_ptr, retdest
SWAP1
// stack: balance_ptr, new_balance, retdest
%mstore_trie_data
// stack: retdest
JUMP
global add_eth_new_account:
// TODO: Skip creation if amount == 0?
// stack: null_account_ptr, addr, amount, retdest

View File

@ -48,6 +48,7 @@ impl Kernel {
.keys()
.cloned()
.sorted_by_key(|label| global_labels[label])
.inspect(|key| debug!("Global label: {} => {:?}", key, global_labels[key]))
.collect();
Self {
code,

View File

@ -25,10 +25,11 @@ pub(crate) enum ContextMetadata {
StateTrieCheckpointPointer = 9,
/// Size of the active main memory.
MSize = 10,
StackSize = 11,
}
impl ContextMetadata {
pub(crate) const COUNT: usize = 11;
pub(crate) const COUNT: usize = 12;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -43,6 +44,7 @@ impl ContextMetadata {
Self::Static,
Self::StateTrieCheckpointPointer,
Self::MSize,
Self::StackSize,
]
}
@ -60,6 +62,7 @@ impl ContextMetadata {
ContextMetadata::Static => "CTX_METADATA_STATIC",
ContextMetadata::StateTrieCheckpointPointer => "CTX_METADATA_STATE_TRIE_CHECKPOINT_PTR",
ContextMetadata::MSize => "CTX_METADATA_MSIZE",
ContextMetadata::StackSize => "CTX_METADATA_STACK_SIZE",
}
}
}

View File

@ -4,7 +4,7 @@ mod ast;
pub(crate) mod constants;
mod cost_estimator;
pub(crate) mod keccak_util;
mod opcodes;
pub mod opcodes;
mod optimizer;
mod parser;
pub mod stack;

View File

@ -1,12 +1,12 @@
/// The opcode of the `PUSH[n]` instruction, given a byte count `n`.
pub(crate) fn get_push_opcode(n: u8) -> u8 {
pub fn get_push_opcode(n: u8) -> u8 {
assert!(n > 0);
assert!(n <= 32);
0x60 + n - 1
}
/// The opcode of a standard instruction (not a `PUSH`).
pub(crate) fn get_opcode(mnemonic: &str) -> u8 {
pub fn get_opcode(mnemonic: &str) -> u8 {
match mnemonic.to_uppercase().as_str() {
"STOP" => 0x00,
"ADD" => 0x01,

View File

@ -37,9 +37,6 @@ pub fn generate<F: PrimeField64>(lv: &mut CpuColumnsView<F>) {
let cycle_filter = lv.is_cpu_cycle;
if cycle_filter != F::ZERO {
assert!(lv.is_kernel_mode.to_canonical_u64() <= 1);
// Set `lv.code_context` to 0 if in kernel mode and to `lv.context` if in user mode.
lv.code_context = (F::ONE - lv.is_kernel_mode) * lv.context;
}
for channel in lv.mem_channels {

View File

@ -93,11 +93,7 @@ const STACK_BEHAVIORS: OpsColumnsView<Option<StackBehavior>> = OpsColumnsView {
pushes: true,
disable_other_channels: true,
}),
set_context: Some(StackBehavior {
num_pops: 1,
pushes: false,
disable_other_channels: true,
}),
set_context: None, // SET_CONTEXT is special since it involves the old and the new stack.
consume_gas: None, // TODO
exit_kernel: Some(StackBehavior {
num_pops: 1,

View File

@ -3,6 +3,7 @@ use std::any::type_name;
use anyhow::{ensure, Result};
use itertools::Itertools;
use maybe_rayon::*;
use once_cell::sync::Lazy;
use plonky2::field::extension::Extendable;
use plonky2::field::packable::Packable;
use plonky2::field::packed::PackedField;
@ -22,6 +23,7 @@ use crate::all_stark::{AllStark, Table, NUM_TABLES};
use crate::config::StarkConfig;
use crate::constraint_consumer::ConstraintConsumer;
use crate::cpu::cpu_stark::CpuStark;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cross_table_lookup::{cross_table_lookup_data, CtlCheckVars, CtlData};
use crate::generation::{generate_traces, GenerationInputs};
use crate::keccak::keccak_stark::KeccakStark;
@ -54,6 +56,7 @@ where
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
{
timed!(timing, "build kernel", Lazy::force(&KERNEL));
let (traces, public_values) = timed!(
timing,
"generate all traces",

View File

@ -142,6 +142,10 @@ impl MemoryState {
}
pub fn get(&self, address: MemoryAddress) -> U256 {
if address.context >= self.contexts.len() {
return U256::zero();
}
let segment = Segment::all()[address.segment];
let val = self.contexts[address.context].segments[address.segment].get(address.virt);
assert!(
@ -155,6 +159,10 @@ impl MemoryState {
}
pub fn set(&mut self, address: MemoryAddress, val: U256) {
while address.context >= self.contexts.len() {
self.contexts.push(MemoryContextState::default());
}
let segment = Segment::all()[address.segment];
assert!(
val.bits() <= segment.bit_range(),

View File

@ -6,6 +6,7 @@ use plonky2::field::types::Field;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::assembler::BYTES_PER_OFFSET;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cpu::simple_logic::eq_iszero::generate_pinv_diff;
use crate::generation::state::GenerationState;
@ -309,8 +310,22 @@ pub(crate) fn generate_set_context<F: Field>(
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let [(ctx, log_in)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
state.registers.context = ctx.as_usize();
let sp_to_save = state.registers.stack_len.into();
let old_ctx = state.registers.context;
let new_ctx = ctx.as_usize();
let sp_field = ContextMetadata::StackSize as usize;
let old_sp_addr = MemoryAddress::new(old_ctx, Segment::ContextMetadata, sp_field);
let new_sp_addr = MemoryAddress::new(new_ctx, Segment::ContextMetadata, sp_field);
let log_write_old_sp = mem_write_gp_log_and_fill(1, old_sp_addr, state, &mut row, sp_to_save);
let (new_sp, log_read_new_sp) = mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row);
state.registers.context = new_ctx;
state.registers.stack_len = new_sp.as_usize();
state.traces.push_memory(log_in);
state.traces.push_memory(log_write_old_sp);
state.traces.push_memory(log_read_new_sp);
state.traces.push_cpu(row);
Ok(())
}
@ -320,11 +335,11 @@ pub(crate) fn generate_push<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let context = state.registers.effective_context();
let code_context = state.registers.code_context();
let num_bytes = n as usize + 1;
let initial_offset = state.registers.program_counter + 1;
let offsets = initial_offset..initial_offset + num_bytes;
let mut addrs = offsets.map(|offset| MemoryAddress::new(context, Segment::Code, offset));
let mut addrs = offsets.map(|offset| MemoryAddress::new(code_context, Segment::Code, offset));
// First read val without going through `mem_read_with_log` type methods, so we can pass it
// to stack_push_log_and_fill.
@ -333,7 +348,7 @@ pub(crate) fn generate_push<F: Field>(
state
.memory
.get(MemoryAddress::new(
context,
code_context,
Segment::Code,
initial_offset + i,
))
@ -384,11 +399,7 @@ pub(crate) fn generate_dup<F: Field>(
.stack_len
.checked_sub(1 + (n as usize))
.ok_or(ProgramError::StackUnderflow)?;
let other_addr = MemoryAddress::new(
state.registers.effective_context(),
Segment::Stack,
other_addr_lo,
);
let other_addr = MemoryAddress::new(state.registers.context, Segment::Stack, other_addr_lo);
let (val, log_in) = mem_read_gp_with_log_and_fill(0, other_addr, state, &mut row);
let log_out = stack_push_log_and_fill(state, &mut row, val)?;
@ -409,11 +420,7 @@ pub(crate) fn generate_swap<F: Field>(
.stack_len
.checked_sub(2 + (n as usize))
.ok_or(ProgramError::StackUnderflow)?;
let other_addr = MemoryAddress::new(
state.registers.effective_context(),
Segment::Stack,
other_addr_lo,
);
let other_addr = MemoryAddress::new(state.registers.context, Segment::Stack, other_addr_lo);
let [(in0, log_in0)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
let (in1, log_in1) = mem_read_gp_with_log_and_fill(1, other_addr, state, &mut row);

View File

@ -11,7 +11,7 @@ pub struct RegistersState {
}
impl RegistersState {
pub(crate) fn effective_context(&self) -> usize {
pub(crate) fn code_context(&self) -> usize {
if self.is_kernel {
KERNEL_CONTEXT
} else {

View File

@ -13,7 +13,7 @@ use crate::witness::util::{mem_read_code_with_log_and_fill, stack_peek};
use crate::{arithmetic, logic};
fn read_code_memory<F: Field>(state: &mut GenerationState<F>, row: &mut CpuColumnsView<F>) -> u8 {
let code_context = state.registers.effective_context();
let code_context = state.registers.code_context();
row.code_context = F::from_canonical_usize(code_context);
let address = MemoryAddress::new(code_context, Segment::Code, state.registers.program_counter);
@ -241,14 +241,18 @@ fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(
let opcode = read_code_memory(state, &mut row);
let op = decode(state.registers, opcode)?;
log_instruction(state, op);
if state.registers.is_kernel {
log_kernel_instruction(state, op);
} else {
log::info!("User instruction: {:?}", op);
}
fill_op_flag(op, &mut row);
perform_op(state, op, row)
}
fn log_instruction<F: Field>(state: &mut GenerationState<F>, op: Operation) {
fn log_kernel_instruction<F: Field>(state: &mut GenerationState<F>, op: Operation) {
let pc = state.registers.program_counter;
let is_interesting_offset = KERNEL
.offset_label(pc)
@ -261,8 +265,9 @@ fn log_instruction<F: Field>(state: &mut GenerationState<F>, op: Operation) {
};
log::log!(
level,
"Cycle {}, pc={}, instruction={:?}, stack={:?}",
"Cycle {}, ctx={}, pc={}, instruction={:?}, stack={:?}",
state.traces.clock(),
state.registers.context,
KERNEL.offset_name(pc),
op,
(0..state.registers.stack_len)
@ -270,9 +275,7 @@ fn log_instruction<F: Field>(state: &mut GenerationState<F>, op: Operation) {
.collect_vec()
);
if state.registers.is_kernel && pc >= KERNEL.code.len() {
panic!("Kernel PC is out of range: {}", pc);
}
assert!(pc < KERNEL.code.len(), "Kernel PC is out of range: {}", pc);
}
fn handle_error<F: Field>(_state: &mut GenerationState<F>) {

View File

@ -33,7 +33,7 @@ pub(crate) fn stack_peek<F: Field>(state: &GenerationState<F>, i: usize) -> Opti
return None;
}
Some(state.memory.get(MemoryAddress::new(
state.registers.effective_context(),
state.registers.context,
Segment::Stack,
state.registers.stack_len - 1 - i,
)))
@ -142,7 +142,7 @@ pub(crate) fn stack_pop_with_log_and_fill<const N: usize, F: Field>(
let result = std::array::from_fn(|i| {
let address = MemoryAddress::new(
state.registers.effective_context(),
state.registers.context,
Segment::Stack,
state.registers.stack_len - 1 - i,
);
@ -164,7 +164,7 @@ pub(crate) fn stack_push_log_and_fill<F: Field>(
}
let address = MemoryAddress::new(
state.registers.effective_context(),
state.registers.context,
Segment::Stack,
state.registers.stack_len,
);

View File

@ -0,0 +1,144 @@
use std::collections::HashMap;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use ethereum_types::U256;
use hex_literal::hex;
use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::plonk::config::PoseidonGoldilocksConfig;
use plonky2::util::timing::TimingTree;
use plonky2_evm::all_stark::AllStark;
use plonky2_evm::config::StarkConfig;
use plonky2_evm::cpu::kernel::opcodes::{get_opcode, get_push_opcode};
use plonky2_evm::generation::mpt::AccountRlp;
use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
use plonky2_evm::verifier::verify_proof;
type F = GoldilocksField;
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
/// Test a simple token transfer to a new address.
#[test]
fn test_basic_smart_contract() -> anyhow::Result<()> {
init_logger();
let all_stark = AllStark::<F, D>::default();
let config = StarkConfig::standard_fast_config();
let sender = hex!("2c7536e3605d9c16a7a3d7b1898e529396a65c23");
let to = hex!("a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0");
let sender_state_key = keccak(sender);
let to_state_key = keccak(to);
let sender_nibbles = Nibbles::from_bytes_be(sender_state_key.as_bytes()).unwrap();
let to_nibbles = Nibbles::from_bytes_be(to_state_key.as_bytes()).unwrap();
let value = U256::from(100u32);
let push1 = get_push_opcode(1);
let add = get_opcode("ADD");
let stop = get_opcode("STOP");
let code = [push1, 3, push1, 4, add, stop];
let code_hash = keccak(code);
let sender_account_before = AccountRlp {
nonce: 5.into(),
balance: eth_to_wei(100_000.into()),
..AccountRlp::default()
};
let to_account_before = AccountRlp {
code_hash,
..AccountRlp::default()
};
let state_trie_before = {
let mut children = std::array::from_fn(|_| PartialTrie::Empty.into());
children[sender_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_before).to_vec(),
}
.into();
children[to_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
nibbles: to_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&to_account_before).to_vec(),
}
.into();
PartialTrie::Branch {
children,
value: vec![],
}
};
let tries_before = TrieInputs {
state_trie: state_trie_before,
transactions_trie: PartialTrie::Empty,
receipts_trie: PartialTrie::Empty,
storage_tries: vec![],
};
// Generated using a little py-evm script.
let txn = hex!("f861050a8255f094a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0648242421ba02c89eb757d9deeb1f5b3859a9d4d679951ef610ac47ad4608dc142beb1b7e313a05af7e9fbab825455d36c36c7f4cfcafbeafa9a77bdff936b52afb36d4fe4bcdd");
let block_metadata = BlockMetadata::default();
let mut contract_code = HashMap::new();
contract_code.insert(code_hash, code.to_vec());
let inputs = GenerationInputs {
signed_txns: vec![txn.to_vec()],
tries: tries_before,
contract_code,
block_metadata,
};
let mut timing = TimingTree::new("prove", log::Level::Debug);
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut timing)?;
timing.filter(Duration::from_millis(100)).print();
let expected_state_trie_after = {
let sender_account_after = AccountRlp {
balance: sender_account_before.balance - value, // TODO: Also subtract gas_used * price.
// nonce: sender_account_before.nonce + 1, // TODO
..sender_account_before
};
let to_account_after = AccountRlp {
balance: to_account_before.balance + value,
..to_account_before
};
let mut children = std::array::from_fn(|_| PartialTrie::Empty.into());
children[sender_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_after).to_vec(),
}
.into();
children[to_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
nibbles: to_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&to_account_after).to_vec(),
}
.into();
PartialTrie::Branch {
children,
value: vec![],
}
};
assert_eq!(
proof.public_values.trie_roots_after.state_root,
expected_state_trie_after.calc_hash()
);
verify_proof(&all_stark, proof, &config)
}
fn eth_to_wei(eth: U256) -> U256 {
// 1 ether = 10^18 wei.
eth * U256::from(10).pow(18.into())
}
fn init_logger() {
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info"));
}