Remove aborts for invalid jumps and Rebase

This commit is contained in:
4l0n50 2023-12-13 17:33:53 +01:00
parent 7eff4e2751
commit 3e78865d64
11 changed files with 541 additions and 46 deletions

View File

@ -367,12 +367,9 @@ call_too_deep:
%checkpoint // Checkpoint
%increment_call_depth
// Perform jumpdest analyis
PUSH %%after
%mload_context_metadata(@CTX_METADATA_CODE_SIZE)
GET_CONTEXT
// stack: ctx, code_size, retdest
%jump(jumpdest_analysis)
%%after:
%jumpdest_analisys
PUSH 0 // jump dest
EXIT_KERNEL
// (Old context) stack: new_ctx

View File

@ -1,45 +1,47 @@
// Populates @SEGMENT_JUMPDEST_BITS for the given context's code.
// Pre stack: ctx, code_len, retdest
// Set @SEGMENT_JUMPDEST_BITS to one between positions [init_pos, final_pos],
// for the given context's code.
// Pre stack: init_pos, ctx, final_pos, retdest
// Post stack: (empty)
global jumpdest_analysis:
// stack: ctx, code_len, retdest
PUSH 0 // i = 0
global verify_path_and_write_table:
loop:
// stack: i, ctx, code_len, retdest
// Ideally we would break if i >= code_len, but checking i > code_len is
// cheaper. It doesn't hurt to over-read by 1, since we'll read 0 which is
// a no-op.
DUP3 DUP2 GT // i > code_len
%jumpi(return)
// stack: i, ctx, final_pos, retdest
DUP3 DUP2 EQ // i == final_pos
%jumpi(proof_ok)
DUP3 DUP2 GT // i > final_pos
%jumpi(proof_not_ok)
// stack: i, ctx, code_len, retdest
// stack: i, ctx, final_pos, retdest
%stack (i, ctx) -> (ctx, @SEGMENT_CODE, i, i, ctx)
MLOAD_GENERAL
// stack: opcode, i, ctx, code_len, retdest
// stack: opcode, i, ctx, final_pos, retdest
DUP1
// Slightly more efficient than `%eq_const(0x5b) ISZERO`
PUSH 0x5b
SUB
// stack: opcode != JUMPDEST, opcode, i, ctx, code_len, retdest
// stack: opcode != JUMPDEST, opcode, i, ctx, final_pos, retdest
%jumpi(continue)
// stack: JUMPDEST, i, ctx, code_len, retdest
// stack: JUMPDEST, i, ctx, final_pos, retdest
%stack (JUMPDEST, i, ctx) -> (1, ctx, @SEGMENT_JUMPDEST_BITS, i, JUMPDEST, i, ctx)
MSTORE_GENERAL
continue:
// stack: opcode, i, ctx, code_len, retdest
// stack: opcode, i, ctx, final_pos, retdest
%add_const(code_bytes_to_skip)
%mload_kernel_code
// stack: bytes_to_skip, i, ctx, code_len, retdest
// stack: bytes_to_skip, i, ctx, final_pos, retdest
ADD
// stack: i, ctx, code_len, retdest
// stack: i, ctx, final_pos, retdest
%jump(loop)
return:
// stack: i, ctx, code_len, retdest
proof_ok:
// stack: i, ctx, final_pos, retdest
// We already know final pos is a jumpdest
%stack (i, ctx, final_pos) -> (1, ctx, @SEGMENT_JUMPDEST_BITS, i)
MSTORE_GENERAL
JUMP
proof_not_ok:
%pop3
JUMP
@ -89,3 +91,116 @@ code_bytes_to_skip:
%rep 128
BYTES 1 // 0x80-0xff
%endrep
// A proof attesting that jumpdest is a valid jump destinations is
// either 0 or an index 0 < i <= jumpdest - 32.
// A proof is valid if:
// - i == 0 and we can go from the first opcode to jumpdest and code[jumpdest] = 0x5b
// - i > 0 and:
// - for j in {i+0,..., i+31} code[j] != PUSHk for all k >= 32 - j - i,
// - we can go from opcode i+32 to jumpdest,
// - code[jumpdest] = 0x5b.
// stack: proof_prefix_addr, jumpdest, ctx, retdest
// stack: (empty) abort if jumpdest is not a valid destination
global write_table_if_jumpdest:
// stack: proof_prefix_addr, jumpdest, ctx, retdest
%stack
(proof_prefix_addr, jumpdest, ctx) ->
(ctx, @SEGMENT_CODE, jumpdest, jumpdest, ctx, proof_prefix_addr)
MLOAD_GENERAL
// stack: opcode, jumpdest, ctx, proof_prefix_addr, retdest
%jump_eq_const(0x5b, return)
//stack: jumpdest, ctx, proof_prefix_addr, retdest
SWAP2 DUP1
// stack: proof_prefix_addr, proof_prefix_addr, ctx, jumpdest
ISZERO
%jumpi(verify_path_and_write_table)
// stack: proof_prefix_addr, ctx, jumpdest, retdest
// If we are here we need to check that the next 32 bytes are less
// than JUMPXX for XX < 32 - i <=> opcode < 0x7f - i = 127 - i, 0 <= i < 32,
// or larger than 127
%check_and_step(127) %check_and_step(126) %check_and_step(125) %check_and_step(124)
%check_and_step(123) %check_and_step(122) %check_and_step(121) %check_and_step(120)
%check_and_step(119) %check_and_step(118) %check_and_step(117) %check_and_step(116)
%check_and_step(115) %check_and_step(114) %check_and_step(113) %check_and_step(112)
%check_and_step(111) %check_and_step(110) %check_and_step(109) %check_and_step(108)
%check_and_step(107) %check_and_step(106) %check_and_step(105) %check_and_step(104)
%check_and_step(103) %check_and_step(102) %check_and_step(101) %check_and_step(100)
%check_and_step(99) %check_and_step(98) %check_and_step(97) %check_and_step(96)
// check the remaining path
%jump(verify_path_and_write_table)
return:
// stack: proof_prefix_addr, jumpdest, ctx, retdest
%pop3
JUMP
// Chek if the opcode pointed by proof_prefix address is
// less than max and increment proof_prefix_addr
%macro check_and_step(max)
%stack
(proof_prefix_addr, ctx, jumpdest) ->
(ctx, @SEGMENT_CODE, proof_prefix_addr, proof_prefix_addr, ctx, jumpdest)
MLOAD_GENERAL
// stack: opcode, ctx, proof_prefix_addr, jumpdest
DUP1
%gt_const(127)
%jumpi(%%ok)
%jumpi_lt_const($max, return)
// stack: proof_prefix_addr, ctx, jumpdest
PUSH 0 // We need something to pop
%%ok:
POP
%increment
%endmacro
%macro write_table_if_jumpdest
%stack (proof, addr, ctx) -> (proof, addr, ctx, %%after)
%jump(write_table_if_jumpdest)
%%after:
%endmacro
// Write the jumpdest table. This is done by
// non-deterministically guessing the sequence of jumpdest
// addresses used during program execution within the current context.
// For each jumpdest address we also non-deterministically guess
// a proof, which is another address in the code such that
// is_jumpdest don't abort, when the proof is at the top of the stack
// an the jumpdest address below. If that's the case we set the
// corresponding bit in @SEGMENT_JUMPDEST_BITS to 1.
//
// stack: ctx, retdest
// stack: (empty)
global jumpdest_analisys:
// If address > 0 then address is interpreted as address' + 1
// and the next prover input should contain a proof for address'.
PROVER_INPUT(jumpdest_table::next_address)
DUP1 %jumpi(check_proof)
// If proof == 0 there are no more jump destinations to check
POP
// This is just a hook used for avoiding verification of the jumpdest
// table in another contexts. It is useful during proof generation,
// allowing the avoidance of table verification when simulating user code.
global jumpdest_analisys_end:
POP
JUMP
check_proof:
%decrement
DUP2 SWAP1
// stack: address, ctx, ctx
// We read the proof
PROVER_INPUT(jumpdest_table::next_proof)
// stack: proof, address, ctx, ctx
%write_table_if_jumpdest
%jump(jumpdest_analisys)
%macro jumpdest_analisys
%stack (ctx) -> (ctx, %%after)
%jump(jumpdest_analisys)
%%after:
%endmacro

View File

@ -8,6 +8,19 @@
jumpi
%endmacro
%macro jump_eq_const(c, jumpdest)
PUSH $c
SUB
%jumpi($jumpdest)
%endmacro
%macro jumpi_lt_const(c, jumpdest)
// %assert_zero is cheaper than %assert_nonzero, so we will leverage the
// fact that (x < c) == !(x >= c).
%ge_const($c)
%jumpi($jumpdest)
%endmacro
%macro pop2
%rep 2
POP

View File

@ -1,7 +1,7 @@
//! An EVM interpreter for testing and debugging purposes.
use core::cmp::Ordering;
use std::collections::HashMap;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::ops::Range;
use anyhow::bail;
@ -10,6 +10,7 @@ use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
use super::assembler::BYTES_PER_OFFSET;
use super::utils::u256_from_bool;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
@ -413,7 +414,23 @@ impl<'a> Interpreter<'a> {
.collect()
}
fn incr(&mut self, n: usize) {
pub(crate) fn set_jumpdest_bits(&mut self, context: usize, jumpdest_bits: Vec<bool>) {
self.generation_state.memory.contexts[context].segments[Segment::JumpdestBits as usize]
.content = jumpdest_bits.iter().map(|&x| u256_from_bool(x)).collect();
self.generation_state
.set_proofs_and_jumpdests(HashMap::from([(
context,
BTreeSet::from_iter(
jumpdest_bits
.into_iter()
.enumerate()
.filter(|&(_, x)| x)
.map(|(i, _)| i),
),
)]));
}
pub(crate) fn incr(&mut self, n: usize) {
self.generation_state.registers.program_counter += n;
}

View File

@ -5,8 +5,8 @@ use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::opcodes::{get_opcode, get_push_opcode};
#[test]
fn test_jumpdest_analysis() -> Result<()> {
let jumpdest_analysis = KERNEL.global_labels["jumpdest_analysis"];
fn test_jumpdest_analisys() -> Result<()> {
let jumpdest_analisys = KERNEL.global_labels["jumpdest_analisys"];
const CONTEXT: usize = 3; // arbitrary
let add = get_opcode("ADD");
@ -25,18 +25,16 @@ fn test_jumpdest_analysis() -> Result<()> {
jumpdest,
];
let expected_jumpdest_bits = vec![false, true, false, false, false, true, false, true];
let jumpdest_bits = vec![false, true, false, false, false, true, false, true];
// Contract creation transaction.
let initial_stack = vec![0xDEADBEEFu32.into(), code.len().into(), CONTEXT.into()];
let mut interpreter = Interpreter::new_with_kernel(jumpdest_analysis, initial_stack);
let initial_stack = vec![0xDEADBEEFu32.into(), CONTEXT.into()];
let mut interpreter = Interpreter::new_with_kernel(jumpdest_analisys, initial_stack);
interpreter.set_code(CONTEXT, code);
interpreter.set_jumpdest_bits(CONTEXT, jumpdest_bits);
interpreter.run()?;
assert_eq!(interpreter.stack(), vec![]);
assert_eq!(
interpreter.get_jumpdest_bits(CONTEXT),
expected_jumpdest_bits
);
Ok(())
}

View File

@ -1,4 +1,4 @@
use std::collections::HashMap;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
@ -8,6 +8,7 @@ use ethereum_types::{Address, BigEndianHash, H256, U256};
use itertools::enumerate;
use plonky2::field::extension::Extendable;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
@ -21,13 +22,16 @@ use crate::all_stark::{AllStark, NUM_TABLES};
use crate::config::StarkConfig;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::assembler::Kernel;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::opcodes::get_opcode;
use crate::generation::state::GenerationState;
use crate::generation::trie_extractor::{get_receipt_trie, get_state_trie, get_txn_trie};
use crate::memory::segments::Segment;
use crate::proof::{BlockHashes, BlockMetadata, ExtraBlockData, PublicValues, TrieRoots};
use crate::prover::check_abort_signal;
use crate::util::{h2u, u256_to_usize};
use crate::util::{h2u, u256_to_u8, u256_to_usize};
use crate::witness::errors::{ProgramError, ProverInputError};
use crate::witness::memory::{MemoryAddress, MemoryChannel};
use crate::witness::transition::transition;
@ -38,7 +42,7 @@ pub(crate) mod state;
mod trie_extractor;
use self::mpt::{load_all_mpts, TrieRootPtrs};
use crate::witness::util::mem_write_log;
use crate::witness::util::{mem_write_log, stack_peek};
/// Inputs needed for trace generation.
#[derive(Clone, Debug, Deserialize, Serialize, Default)]
@ -296,9 +300,7 @@ pub fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
Ok((tables, public_values))
}
fn simulate_cpu<F: RichField + Extendable<D>, const D: usize>(
state: &mut GenerationState<F>,
) -> anyhow::Result<()> {
fn simulate_cpu<F: Field>(state: &mut GenerationState<F>) -> anyhow::Result<()> {
let halt_pc = KERNEL.global_labels["halt"];
loop {
@ -333,3 +335,81 @@ fn simulate_cpu<F: RichField + Extendable<D>, const D: usize>(
transition(state)?;
}
}
fn simulate_cpu_between_labels_and_get_user_jumps<F: Field>(
initial_label: &str,
final_label: &str,
state: &mut GenerationState<F>,
) -> Result<Option<HashMap<usize, BTreeSet<usize>>>, ProgramError> {
if state.jumpdest_proofs.is_some() {
Ok(None)
} else {
const JUMP_OPCODE: u8 = 0x56;
const JUMPI_OPCODE: u8 = 0x57;
let halt_pc = KERNEL.global_labels[final_label];
let mut jumpdest_addresses: HashMap<_, BTreeSet<usize>> = HashMap::new();
state.registers.program_counter = KERNEL.global_labels[initial_label];
let initial_clock = state.traces.clock();
let initial_context = state.registers.context;
log::debug!("Simulating CPU for jumpdest analysis.");
loop {
// skip jumdest table validations in simulations
if state.registers.program_counter == KERNEL.global_labels["jumpdest_analisys"] {
state.registers.program_counter = KERNEL.global_labels["jumpdest_analisys_end"]
}
let pc = state.registers.program_counter;
let context = state.registers.context;
let halt = state.registers.is_kernel
&& pc == halt_pc
&& state.registers.context == initial_context;
let opcode = u256_to_u8(state.memory.get(MemoryAddress {
context,
segment: Segment::Code as usize,
virt: state.registers.program_counter,
}))?;
let cond = if let Ok(cond) = stack_peek(state, 1) {
cond != U256::zero()
} else {
false
};
if !state.registers.is_kernel
&& (opcode == JUMP_OPCODE || (opcode == JUMPI_OPCODE && cond))
{
// Avoid deeper calls to abort
let jumpdest = u256_to_usize(state.registers.stack_top)?;
state.memory.set(
MemoryAddress {
context,
segment: Segment::JumpdestBits as usize,
virt: jumpdest,
},
U256::one(),
);
let jumpdest_opcode = state.memory.get(MemoryAddress {
context,
segment: Segment::Code as usize,
virt: jumpdest,
});
if let Some(ctx_addresses) = jumpdest_addresses.get_mut(&context) {
ctx_addresses.insert(jumpdest);
} else {
jumpdest_addresses.insert(context, BTreeSet::from([jumpdest]));
}
}
if halt {
log::debug!(
"Simulated CPU halted after {} cycles",
state.traces.clock() - initial_clock
);
return Ok(Some(jumpdest_addresses));
}
transition(state).map_err(|_| {
ProgramError::ProverInputError(ProverInputError::InvalidJumpdestSimulation)
})?;
}
}
}

View File

@ -1,3 +1,5 @@
use std::cmp::min;
use std::collections::HashMap;
use std::mem::transmute;
use std::str::FromStr;
@ -5,20 +7,26 @@ use anyhow::{bail, Error};
use ethereum_types::{BigEndianHash, H256, U256, U512};
use itertools::{enumerate, Itertools};
use num_bigint::BigUint;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use serde::{Deserialize, Serialize};
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::extension_tower::{FieldExt, Fp12, BLS381, BN254};
use crate::generation::prover_input::EvmField::{
Bls381Base, Bls381Scalar, Bn254Base, Bn254Scalar, Secp256k1Base, Secp256k1Scalar,
};
use crate::generation::prover_input::FieldOp::{Inverse, Sqrt};
use crate::generation::simulate_cpu_between_labels_and_get_user_jumps;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::memory::segments::Segment::BnPairing;
use crate::util::{biguint_to_mem_vec, mem_vec_to_biguint, u256_to_usize};
use crate::witness::errors::ProgramError;
use crate::util::{biguint_to_mem_vec, mem_vec_to_biguint, u256_to_u8, u256_to_usize};
use crate::witness::errors::ProverInputError::*;
use crate::witness::errors::{ProgramError, ProverInputError};
use crate::witness::memory::MemoryAddress;
use crate::witness::util::{current_context_peek, stack_peek};
@ -47,6 +55,7 @@ impl<F: Field> GenerationState<F> {
"bignum_modmul" => self.run_bignum_modmul(),
"withdrawal" => self.run_withdrawal(),
"num_bits" => self.run_num_bits(),
"jumpdest_table" => self.run_jumpdest_table(input_fn),
_ => Err(ProgramError::ProverInputError(InvalidFunction)),
}
}
@ -229,6 +238,237 @@ impl<F: Field> GenerationState<F> {
Ok(num_bits.into())
}
}
fn run_jumpdest_table(&mut self, input_fn: &ProverInputFn) -> Result<U256, ProgramError> {
match input_fn.0[1].as_str() {
"next_address" => self.run_next_jumpdest_table_address(),
"next_proof" => self.run_next_jumpdest_table_proof(),
_ => Err(ProgramError::ProverInputError(InvalidInput)),
}
}
/// Return the next used jump addres
fn run_next_jumpdest_table_address(&mut self) -> Result<U256, ProgramError> {
let context = self.registers.context;
let code_len = u256_to_usize(self.memory.get(MemoryAddress {
context,
segment: Segment::ContextMetadata as usize,
virt: ContextMetadata::CodeSize as usize,
}))?;
if self.jumpdest_proofs.is_none() {
self.generate_jumpdest_proofs()?;
}
let Some(jumpdest_proofs) = &mut self.jumpdest_proofs else {
return Err(ProgramError::ProverInputError(
ProverInputError::InvalidJumpdestSimulation,
));
};
if let Some(ctx_jumpdest_proofs) = jumpdest_proofs.get_mut(&self.registers.context)
&& let Some(next_jumpdest_address) = ctx_jumpdest_proofs.pop()
{
Ok((next_jumpdest_address + 1).into())
} else {
self.jumpdest_proofs = None;
Ok(U256::zero())
}
}
/// Returns the proof for the last jump address.
fn run_next_jumpdest_table_proof(&mut self) -> Result<U256, ProgramError> {
let Some(jumpdest_proofs) = &mut self.jumpdest_proofs else {
return Err(ProgramError::ProverInputError(
ProverInputError::InvalidJumpdestSimulation,
));
};
if let Some(ctx_jumpdest_proofs) = jumpdest_proofs.get_mut(&self.registers.context)
&& let Some(next_jumpdest_proof) = ctx_jumpdest_proofs.pop()
{
Ok(next_jumpdest_proof.into())
} else {
Err(ProgramError::ProverInputError(
ProverInputError::InvalidJumpdestSimulation,
))
}
}
}
impl<F: Field> GenerationState<F> {
fn generate_jumpdest_proofs(&mut self) -> Result<(), ProgramError> {
let checkpoint = self.checkpoint();
let memory = self.memory.clone();
let code = self.get_current_code()?;
// We need to set the simulated jumpdest bits to one as otherwise
// the simulation will fail.
self.set_jumpdest_bits(&code);
// Simulate the user's code and (unnecessarily) part of the kernel code, skipping the validate table call
let Some(jumpdest_table) = simulate_cpu_between_labels_and_get_user_jumps(
"jumpdest_analisys_end",
"terminate_common",
self,
)?
else {
return Ok(());
};
// Return to the state before starting the simulation
self.rollback(checkpoint);
self.memory = memory;
// Find proofs for all context
self.set_proofs_and_jumpdests(jumpdest_table);
Ok(())
}
pub(crate) fn set_proofs_and_jumpdests(
&mut self,
jumpdest_table: HashMap<usize, std::collections::BTreeSet<usize>>,
) {
self.jumpdest_proofs = Some(HashMap::from_iter(jumpdest_table.into_iter().map(
|(ctx, jumpdest_table)| {
let code = self.get_code(ctx).unwrap();
if let Some(&largest_address) = jumpdest_table.last() {
let proofs = get_proofs_and_jumpdests(&code, largest_address, jumpdest_table);
(ctx, proofs)
} else {
(ctx, vec![])
}
},
)));
}
fn get_current_code(&self) -> Result<Vec<u8>, ProgramError> {
self.get_code(self.registers.context)
}
fn get_code(&self, context: usize) -> Result<Vec<u8>, ProgramError> {
let code_len = self.get_code_len()?;
let code = (0..code_len)
.map(|i| {
u256_to_u8(self.memory.get(MemoryAddress {
context: self.registers.context,
segment: Segment::Code as usize,
virt: i,
}))
})
.collect::<Result<Vec<u8>, _>>()?;
Ok(code)
}
fn get_code_len(&self) -> Result<usize, ProgramError> {
let code_len = u256_to_usize(self.memory.get(MemoryAddress {
context: self.registers.context,
segment: Segment::ContextMetadata as usize,
virt: ContextMetadata::CodeSize as usize,
}))?;
Ok(code_len)
}
fn set_jumpdest_bits<'a>(&mut self, code: &'a Vec<u8>) {
const JUMPDEST_OPCODE: u8 = 0x5b;
for (pos, opcode) in CodeIterator::new(&code) {
if opcode == JUMPDEST_OPCODE {
self.memory.set(
MemoryAddress {
context: self.registers.context,
segment: Segment::JumpdestBits as usize,
virt: pos,
},
U256::one(),
);
}
}
}
}
/// For each address in `jumpdest_table`, each bounded by larges_address,
/// this function searches for a proof. A proof is the closest address
/// for which none of the previous 32 bytes in the code (including opcodes
/// and pushed bytes are PUSHXX and the address is in its range. It returns
/// a vector of even size containing proofs followed by their addresses
fn get_proofs_and_jumpdests<'a>(
code: &'a Vec<u8>,
largest_address: usize,
jumpdest_table: std::collections::BTreeSet<usize>,
) -> Vec<usize> {
const PUSH1_OPCODE: u8 = 0x60;
const PUSH32_OPCODE: u8 = 0x7f;
let (proofs, _) = CodeIterator::until(&code, largest_address + 1).fold(
(vec![], 0),
|(mut proofs, acc), (pos, opcode)| {
let has_prefix = if let Some(prefix_start) = pos.checked_sub(32) {
code[prefix_start..pos]
.iter()
.enumerate()
.fold(true, |acc, (prefix_pos, &byte)| {
acc && (byte > PUSH32_OPCODE
|| (prefix_start + prefix_pos) as i32
+ (byte as i32 - PUSH1_OPCODE as i32)
+ 1
< pos as i32)
})
} else {
false
};
let acc = if has_prefix { pos - 32 } else { acc };
if jumpdest_table.contains(&pos) {
// Push the proof
proofs.push(acc);
// Push the address
proofs.push(pos);
}
(proofs, acc)
},
);
proofs
}
struct CodeIterator<'a> {
code: &'a [u8],
pos: usize,
end: usize,
}
impl<'a> CodeIterator<'a> {
fn new(code: &'a [u8]) -> Self {
CodeIterator {
end: code.len(),
code,
pos: 0,
}
}
fn until(code: &'a [u8], end: usize) -> Self {
CodeIterator {
end: std::cmp::min(code.len(), end),
code,
pos: 0,
}
}
}
impl<'a> Iterator for CodeIterator<'a> {
type Item = (usize, u8);
fn next(&mut self) -> Option<Self::Item> {
const PUSH1_OPCODE: u8 = 0x60;
const PUSH32_OPCODE: u8 = 0x70;
let CodeIterator { code, pos, end } = self;
if *pos >= *end {
return None;
}
let opcode = code[*pos];
let old_pos = *pos;
*pos += if (PUSH1_OPCODE..=PUSH32_OPCODE).contains(&opcode) {
(opcode - PUSH1_OPCODE + 2).into()
} else {
1
};
Some((old_pos, opcode))
}
}
enum EvmField {

View File

@ -1,4 +1,4 @@
use std::collections::HashMap;
use std::collections::{BTreeSet, HashMap};
use ethereum_types::{Address, BigEndianHash, H160, H256, U256};
use keccak_hash::keccak;
@ -50,6 +50,8 @@ pub(crate) struct GenerationState<F: Field> {
/// Pointers, within the `TrieData` segment, of the three MPTs.
pub(crate) trie_root_ptrs: TrieRootPtrs,
pub(crate) jumpdest_proofs: Option<HashMap<usize, Vec<usize>>>,
}
impl<F: Field> GenerationState<F> {
@ -91,6 +93,7 @@ impl<F: Field> GenerationState<F> {
txn_root_ptr: 0,
receipt_root_ptr: 0,
},
jumpdest_proofs: None,
};
let trie_root_ptrs = state.preinitialize_mpts(&inputs.tries);
@ -167,6 +170,26 @@ impl<F: Field> GenerationState<F> {
.map(|i| stack_peek(self, i).unwrap())
.collect()
}
/// Clone everything but the traces
pub(crate) fn soft_clone(&self) -> GenerationState<F> {
Self {
inputs: self.inputs.clone(),
registers: self.registers,
memory: self.memory.clone(),
traces: Traces::default(),
rlp_prover_inputs: self.rlp_prover_inputs.clone(),
state_key_to_address: self.state_key_to_address.clone(),
bignum_modmul_result_limbs: self.bignum_modmul_result_limbs.clone(),
withdrawal_prover_inputs: self.withdrawal_prover_inputs.clone(),
trie_root_ptrs: TrieRootPtrs {
state_root_ptr: 0,
txn_root_ptr: 0,
receipt_root_ptr: 0,
},
jumpdest_proofs: None,
}
}
}
/// Withdrawals prover input array is of the form `[addr0, amount0, ..., addrN, amountN, U256::MAX, U256::MAX]`.

View File

@ -70,6 +70,11 @@ pub(crate) fn u256_to_u64<F: Field>(u256: U256) -> Result<(F, F), ProgramError>
))
}
/// Safe conversion from U256 to u8, which errors in case of overflow instead of panicking.
pub(crate) fn u256_to_u8(u256: U256) -> Result<u8, ProgramError> {
u256.try_into().map_err(|_| ProgramError::IntegerTooLarge)
}
/// Safe alternative to `U256::as_usize()`, which errors in case of overflow instead of panicking.
pub(crate) fn u256_to_usize(u256: U256) -> Result<usize, ProgramError> {
u256.try_into().map_err(|_| ProgramError::IntegerTooLarge)

View File

@ -36,4 +36,6 @@ pub enum ProverInputError {
InvalidInput,
InvalidFunction,
NumBitsError,
InvalidJumpDestination,
InvalidJumpdestSimulation,
}

View File

@ -395,7 +395,12 @@ fn try_perform_instruction<F: Field>(
if state.registers.is_kernel {
log_kernel_instruction(state, op);
} else {
log::debug!("User instruction: {:?}", op);
log::debug!(
"User instruction: {:?}, ctx = {:?}, stack = {:?}",
op,
state.registers.context,
state.stack()
);
}
fill_op_flag(op, &mut row);