Merge branch 'main' into jacqui/bad-opcode-witness-generation

This commit is contained in:
Jacqueline Nabaglo 2023-06-02 21:34:52 -07:00 committed by GitHub
commit 7ab0bba559
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 1118 additions and 113 deletions

View File

@ -44,7 +44,7 @@ jobs:
command: test
args: --all
env:
RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -Cprefer-dynamic=y
RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0
RUST_LOG: 1
CARGO_INCREMENTAL: 1
RUST_BACKTRACE: 1

View File

@ -12,7 +12,7 @@ use plonky2::util::transpose;
use static_assertions::const_assert;
use crate::all_stark::Table;
use crate::arithmetic::{addcy, columns, divmod, modular, mul, Operation};
use crate::arithmetic::{addcy, byte, columns, divmod, modular, mul, Operation};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, TableWithColumns};
use crate::lookup::{eval_lookups, eval_lookups_circuit, permuted_cols};
@ -49,7 +49,7 @@ fn cpu_arith_data_link<F: Field>(ops: &[usize], regs: &[Range<usize>]) -> Vec<Co
}
pub fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
const ARITH_OPS: [usize; 13] = [
const ARITH_OPS: [usize; 14] = [
columns::IS_ADD,
columns::IS_SUB,
columns::IS_MUL,
@ -63,6 +63,7 @@ pub fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
columns::IS_SUBMOD,
columns::IS_DIV,
columns::IS_MOD,
columns::IS_BYTE,
];
const REGISTER_MAP: [Range<usize>; 4] = [
@ -137,8 +138,10 @@ impl<F: RichField, const D: usize> ArithmeticStark<F, D> {
}
// Pad the trace with zero rows if it doesn't have enough rows
// to accommodate the range check columns.
for _ in trace_rows.len()..RANGE_MAX {
// to accommodate the range check columns. Also make sure the
// trace length is a power of two.
let padded_len = trace_rows.len().next_power_of_two();
for _ in trace_rows.len()..std::cmp::max(padded_len, RANGE_MAX) {
trace_rows.push(vec![F::ZERO; columns::NUM_ARITH_COLUMNS]);
}
@ -183,6 +186,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
addcy::eval_packed_generic(lv, yield_constr);
divmod::eval_packed(lv, nv, yield_constr);
modular::eval_packed(lv, nv, yield_constr);
byte::eval_packed(lv, yield_constr);
}
fn eval_ext_circuit(
@ -214,6 +218,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ArithmeticSta
addcy::eval_ext_circuit(builder, lv, yield_constr);
divmod::eval_ext_circuit(builder, lv, nv, yield_constr);
modular::eval_ext_circuit(builder, lv, nv, yield_constr);
byte::eval_ext_circuit(builder, lv, yield_constr);
}
fn constraint_degree(&self) -> usize {
@ -317,7 +322,10 @@ mod tests {
// 128 % 13 == 11
let modop = Operation::binary(BinaryOperator::Mod, U256::from(128), U256::from(13));
let ops: Vec<Operation> = vec![add, mulmod, addmod, mul, modop, lt1, lt2, lt3, div];
// byte(30, 0xABCD) = 0xAB
let byte = Operation::binary(BinaryOperator::Byte, U256::from(30), U256::from(0xABCD));
let ops: Vec<Operation> = vec![add, mulmod, addmod, mul, modop, lt1, lt2, lt3, div, byte];
let pols = stark.generate_trace(ops);
@ -341,6 +349,7 @@ mod tests {
(9, 1),
(10, 0),
(11, 9),
(13, 0xAB),
];
for (row, expected) in expected_output {

483
evm/src/arithmetic/byte.rs Normal file
View File

@ -0,0 +1,483 @@
//! Support for the EVM BYTE instruction
//!
//! This crate verifies the EVM BYTE instruction, defined as follows:
//!
//! INPUTS: 256-bit values I and X = \sum_{i=0}^31 X_i B^i,
//! where B = 2^8 and 0 <= X_i < B for all i.
//!
//! OUTPUT: X_{31-I} if 0 <= I < 32, otherwise 0.
//!
//! NB: index I=0 corresponds to byte X_31, i.e. the most significant
//! byte. This is exactly the opposite of anyone would expect; who
//! knows what the EVM designers were thinking. Anyway, if anything
//! below seems confusing, first check to ensure you're counting from
//! the wrong end of X, as the spec requires.
//!
//! Wlog consider 0 <= I < 32, so I has five bits b0,...,b4. We are
//! given X as an array of 16-bit limbs; write X := \sum_{i=0}^15 Y_i
//! 2^{16i} where 0 <= Y_i < 2^16.
//!
//! The technique (hat tip to Jacqui for the idea) is to store a tree
//! of limbs of X that are selected according to the bits in I. The
//! main observation is that each bit `bi` halves the number of
//! candidate bytes that we might return: If b4 is 0, then I < 16 and
//! the possible bytes are in the top half of X: Y_8,..,Y_15
//! (corresponding to bytes X_16,..,X_31), and if b4 is 1 then I >= 16
//! and the possible bytes are the bottom half of X: Y_0,..,Y_7
//! (corresponding to bytes X_0,..,X_15).
//!
//! Let Z_0,..,Z_7 be the bytes selected in the first step. Then, in
//! the next step, if b3 is 0, we select Z_4,..,Z_7 and if it's 1 we
//! select Z_0,..,Z_3. Together, b4 and b3 divide the bytes of X into
//! 4 equal-sized chunks of 4 limbs, and the byte we're after will be
//! among the limbs 4 selected limbs.
//!
//! Repeating for b2 and b1, we reduce to a single 16-bit limb
//! L=x+y*256; the desired byte will be x if b0 is 1 and y if b0
//! is 0.
//!
//! -*-
//!
//! To prove that the bytes x and y are in the range [0, 2^8) (rather
//! than [0, 2^16), which is all the range-checker guarantees) we do
//! the following (hat tip to Jacqui for this trick too): Instead of
//! storing x and y, we store w = 256 * x and y. Then, to verify that
//! x, y < 256 and the last limb L = x + y * 256, we check that
//! L = w / 256 + y * 256.
//!
//! The proof of why verifying that L = w / 256 + y * 256
//! suffices is as follows:
//!
//! 1. The given L, w and y are range-checked to be less than 2^16.
//! 2. y * 256 ∈ {0, 256, 512, ..., 2^24 - 512, 2^24 - 256}
//! 3. w / 256 = L - y * 256 ∈ {-2^24 + 256, -2^24 + 257, ..., 2^16 - 2, 2^16 - 1}
//! 4. By inspection, for w < 2^16, if w / 256 < 2^16 or
//! w / 256 >= P - 2^24 + 256 (i.e. if w / 256 falls in the range
//! of point 3 above), then w = 256 * m for some 0 <= m < 256.
//! 5. Hence w / 256 ∈ {0, 1, ..., 255}
//! 6. Hence y * 256 = L - w / 256 ∈ {-255, -254, ..., 2^16 - 1}
//! 7. Taking the intersection of ranges in 2. and 6. we see that
//! y * 256 ∈ {0, 256, 512, ..., 2^16 - 256}
//! 8. Hence y ∈ {0, 1, ..., 255}
use std::ops::Range;
use ethereum_types::U256;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use static_assertions::const_assert;
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::u256_to_array;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
// Give meaningful names to the columns of AUX_INPUT_REGISTER_0 that
// we're using
const BYTE_IDX_DECOMP: Range<usize> = AUX_INPUT_REGISTER_0.start..AUX_INPUT_REGISTER_0.start + 6;
const BYTE_IDX_DECOMP_HI: usize = AUX_INPUT_REGISTER_0.start + 5;
const BYTE_LAST_LIMB_LO: usize = AUX_INPUT_REGISTER_0.start + 6;
const BYTE_LAST_LIMB_HI: usize = AUX_INPUT_REGISTER_0.start + 7;
const BYTE_IDX_IS_LARGE: usize = AUX_INPUT_REGISTER_0.start + 8;
const BYTE_IDX_HI_LIMB_SUM_INV_0: usize = AUX_INPUT_REGISTER_0.start + 9;
const BYTE_IDX_HI_LIMB_SUM_INV_1: usize = AUX_INPUT_REGISTER_0.start + 10;
const BYTE_IDX_HI_LIMB_SUM_INV_2: usize = AUX_INPUT_REGISTER_0.start + 11;
const BYTE_IDX_HI_LIMB_SUM_INV_3: usize = AUX_INPUT_REGISTER_0.start + 12;
/// Decompose `idx` into bits and bobs and store in `idx_decomp`.
///
/// Specifically, write
///
/// idx = idx0_lo5 + idx0_hi * 2^5 + \sum_i idx[i] * 2^(16i),
///
/// where `0 <= idx0_lo5 < 32` and `0 <= idx0_hi < 2^11`. Store the
/// 5 bits of `idx0_lo5` in `idx_decomp[0..5]`; we don't explicitly need
/// the higher 11 bits of the first limb, so we put them in
/// `idx_decomp[5]`. The rest of `idx_decomp` is set to 0.
fn set_idx_decomp<F: PrimeField64>(idx_decomp: &mut [F], idx: &U256) {
debug_assert!(idx_decomp.len() == 6);
for i in 0..5 {
idx_decomp[i] = F::from_bool(idx.bit(i));
}
idx_decomp[5] = F::from_canonical_u16((idx.low_u64() as u16) >> 5);
}
pub(crate) fn generate<F: PrimeField64>(lv: &mut [F], idx: U256, val: U256) {
u256_to_array(&mut lv[INPUT_REGISTER_0], idx);
u256_to_array(&mut lv[INPUT_REGISTER_1], val);
set_idx_decomp(&mut lv[BYTE_IDX_DECOMP], &idx);
let idx0_hi = lv[BYTE_IDX_DECOMP_HI];
let hi_limb_sum = lv[INPUT_REGISTER_0][1..]
.iter()
.fold(idx0_hi, |acc, &x| acc + x);
let hi_limb_sum_inv = hi_limb_sum
.try_inverse()
.unwrap_or(F::ONE)
.to_canonical_u64();
// It's a bit silly that we have to split this value, which
// doesn't need to be range-checked, into 16-bit limbs so that it
// can be range-checked; but the rigidity of the range-checking
// mechanism means we can't optionally switch it off for some
// instructions.
lv[BYTE_IDX_HI_LIMB_SUM_INV_0] = F::from_canonical_u16(hi_limb_sum_inv as u16);
lv[BYTE_IDX_HI_LIMB_SUM_INV_1] = F::from_canonical_u16((hi_limb_sum_inv >> 16) as u16);
lv[BYTE_IDX_HI_LIMB_SUM_INV_2] = F::from_canonical_u16((hi_limb_sum_inv >> 32) as u16);
lv[BYTE_IDX_HI_LIMB_SUM_INV_3] = F::from_canonical_u16((hi_limb_sum_inv >> 48) as u16);
lv[BYTE_IDX_IS_LARGE] = F::from_bool(!hi_limb_sum.is_zero());
// Set the tree values according to the low 5 bits of idx, even
// when idx >= 32.
// Use the bits of idx0 to build a multiplexor that selects
// the correct byte of val. Each level of the tree uses one
// bit to halve the set of possible bytes from the previous
// level. The tree stores limbs rather than bytes though, so
// the last value must be handled specially.
// Morally, offset at i is 2^i * bit[i], but because of the
// reversed indexing and handling of the last element
// separately, the offset is 2^i * ( ! bit[i + 1]). (The !bit
// corresponds to calculating 31 - bits which is just bitwise NOT.)
// `lvl_len` is the number of elements of the current level of the
// "tree". Can think of `val_limbs` as level 0, with length =
// N_LIMBS = 16.
const_assert!(N_LIMBS == 16); // Enforce assumption
// Build the tree of limbs from the low 5 bits of idx:
let mut i = 3; // tree level, from 3 downto 0.
let mut src = INPUT_REGISTER_1.start; // val_limbs start
let mut dest = AUX_INPUT_REGISTER_1.start; // tree start
loop {
let lvl_len = 1 << i;
// pick which half of src becomes the new tree level
let offset = (!idx.bit(i + 1) as usize) * lvl_len;
src += offset;
// copy new tree level to dest
lv.copy_within(src..src + lvl_len, dest);
if i == 0 {
break;
}
// next src is this new tree level
src = dest;
// next dest is after this new tree level
dest += lvl_len;
i -= 1;
}
// Handle the last bit; i.e. pick a byte of the final limb.
let t = lv[dest].to_canonical_u64();
let lo = t as u8 as u64;
let hi = t >> 8;
// Store 256 * lo rather than lo:
lv[BYTE_LAST_LIMB_LO] = F::from_canonical_u64(lo << 8);
lv[BYTE_LAST_LIMB_HI] = F::from_canonical_u64(hi);
let tree = &mut lv[AUX_INPUT_REGISTER_1];
let output = if idx.bit(0) {
tree[15] = F::from_canonical_u64(lo);
lo.into()
} else {
tree[15] = F::from_canonical_u64(hi);
hi.into()
};
u256_to_array(
&mut lv[OUTPUT_REGISTER],
if idx < 32.into() {
output
} else {
U256::zero()
},
);
}
pub fn eval_packed<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_byte = lv[IS_BYTE];
let idx = &lv[INPUT_REGISTER_0];
let val = &lv[INPUT_REGISTER_1];
let out = &lv[OUTPUT_REGISTER];
let idx_decomp = &lv[AUX_INPUT_REGISTER_0];
let tree = &lv[AUX_INPUT_REGISTER_1];
// low 5 bits of the first limb of idx:
let mut idx0_lo5 = P::ZEROS;
for i in 0..5 {
let bit = idx_decomp[i];
yield_constr.constraint(is_byte * (bit * bit - bit));
idx0_lo5 += bit * P::Scalar::from_canonical_u64(1 << i);
}
// Verify that idx0_hi is the high (11) bits of the first limb of
// idx (in particular idx0_hi is at most 11 bits, since idx[0] is
// at most 16 bits).
let idx0_hi = idx_decomp[5] * P::Scalar::from_canonical_u64(32u64);
yield_constr.constraint(is_byte * (idx[0] - (idx0_lo5 + idx0_hi)));
// Verify the layers of the tree
// NB: Each of the bit values is negated in place to account for
// the reversed indexing.
let bit = idx_decomp[4];
for i in 0..8 {
let limb = bit * val[i] + (P::ONES - bit) * val[i + 8];
yield_constr.constraint(is_byte * (tree[i] - limb));
}
let bit = idx_decomp[3];
for i in 0..4 {
let limb = bit * tree[i] + (P::ONES - bit) * tree[i + 4];
yield_constr.constraint(is_byte * (tree[i + 8] - limb));
}
let bit = idx_decomp[2];
for i in 0..2 {
let limb = bit * tree[i + 8] + (P::ONES - bit) * tree[i + 10];
yield_constr.constraint(is_byte * (tree[i + 12] - limb));
}
let bit = idx_decomp[1];
let limb = bit * tree[12] + (P::ONES - bit) * tree[13];
yield_constr.constraint(is_byte * (tree[14] - limb));
// Check byte decomposition of last limb:
let base8 = P::Scalar::from_canonical_u64(1 << 8);
let lo_byte = lv[BYTE_LAST_LIMB_LO];
let hi_byte = lv[BYTE_LAST_LIMB_HI];
yield_constr.constraint(is_byte * (lo_byte + base8 * (base8 * hi_byte - limb)));
let bit = idx_decomp[0];
let t = bit * lo_byte + (P::ONES - bit) * base8 * hi_byte;
yield_constr.constraint(is_byte * (base8 * tree[15] - t));
let expected_out_byte = tree[15];
// Sum all higher limbs; sum will be non-zero iff idx >= 32.
let hi_limb_sum = idx0_hi + idx[1..].iter().copied().sum::<P>();
let idx_is_large = lv[BYTE_IDX_IS_LARGE];
// idx_is_large is 0 or 1
yield_constr.constraint(is_byte * (idx_is_large * idx_is_large - idx_is_large));
// If hi_limb_sum is nonzero, then idx_is_large must be one.
yield_constr.constraint(is_byte * hi_limb_sum * (idx_is_large - P::ONES));
let hi_limb_sum_inv = lv[BYTE_IDX_HI_LIMB_SUM_INV_0]
+ lv[BYTE_IDX_HI_LIMB_SUM_INV_1] * P::Scalar::from_canonical_u64(1 << 16)
+ lv[BYTE_IDX_HI_LIMB_SUM_INV_2] * P::Scalar::from_canonical_u64(1 << 32)
+ lv[BYTE_IDX_HI_LIMB_SUM_INV_3] * P::Scalar::from_canonical_u64(1 << 48);
// If idx_is_large is 1, then hi_limb_sum_inv must be the inverse
// of hi_limb_sum, hence hi_limb_sum is non-zero, hence idx is
// indeed "large".
//
// Otherwise, if idx_is_large is 0, then hi_limb_sum * hi_limb_sum_inv
// is zero, which is only possible if hi_limb_sum is zero, since
// hi_limb_sum_inv is non-zero.
yield_constr.constraint(is_byte * (hi_limb_sum * hi_limb_sum_inv - idx_is_large));
let out_byte = out[0];
let check = out_byte - (P::ONES - idx_is_large) * expected_out_byte;
yield_constr.constraint(is_byte * check);
// Check that the rest of the output limbs are zero
for i in 1..N_LIMBS {
yield_constr.constraint(is_byte * out[i]);
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_byte = lv[IS_BYTE];
let idx = &lv[INPUT_REGISTER_0];
let val = &lv[INPUT_REGISTER_1];
let out = &lv[OUTPUT_REGISTER];
let idx_decomp = &lv[AUX_INPUT_REGISTER_0];
let tree = &lv[AUX_INPUT_REGISTER_1];
let mut idx0_lo5 = builder.zero_extension();
for i in 0..5 {
let bit = idx_decomp[i];
let t = builder.mul_sub_extension(bit, bit, bit);
let t = builder.mul_extension(t, is_byte);
yield_constr.constraint(builder, t);
let scale = F::Extension::from(F::from_canonical_u64(1 << i));
let scale = builder.constant_extension(scale);
idx0_lo5 = builder.mul_add_extension(bit, scale, idx0_lo5);
}
let t = F::Extension::from(F::from_canonical_u64(32));
let t = builder.constant_extension(t);
let idx0_hi = builder.mul_extension(idx_decomp[5], t);
let t = builder.add_extension(idx0_lo5, idx0_hi);
let t = builder.sub_extension(idx[0], t);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
let one = builder.one_extension();
let bit = idx_decomp[4];
for i in 0..8 {
let t = builder.mul_extension(bit, val[i]);
let u = builder.sub_extension(one, bit);
let v = builder.mul_add_extension(u, val[i + 8], t);
let t = builder.sub_extension(tree[i], v);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
}
let bit = idx_decomp[3];
for i in 0..4 {
let t = builder.mul_extension(bit, tree[i]);
let u = builder.sub_extension(one, bit);
let v = builder.mul_add_extension(u, tree[i + 4], t);
let t = builder.sub_extension(tree[i + 8], v);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
}
let bit = idx_decomp[2];
for i in 0..2 {
let t = builder.mul_extension(bit, tree[i + 8]);
let u = builder.sub_extension(one, bit);
let v = builder.mul_add_extension(u, tree[i + 10], t);
let t = builder.sub_extension(tree[i + 12], v);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
}
let bit = idx_decomp[1];
let t = builder.mul_extension(bit, tree[12]);
let u = builder.sub_extension(one, bit);
let limb = builder.mul_add_extension(u, tree[13], t);
let t = builder.sub_extension(tree[14], limb);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
let base8 = F::Extension::from(F::from_canonical_u64(1 << 8));
let base8 = builder.constant_extension(base8);
let lo_byte = lv[BYTE_LAST_LIMB_LO];
let hi_byte = lv[BYTE_LAST_LIMB_HI];
let t = builder.mul_sub_extension(base8, hi_byte, limb);
let t = builder.mul_add_extension(base8, t, lo_byte);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
let bit = idx_decomp[0];
let nbit = builder.sub_extension(one, bit);
let t = builder.mul_many_extension([nbit, base8, hi_byte]);
let t = builder.mul_add_extension(bit, lo_byte, t);
let t = builder.mul_sub_extension(base8, tree[15], t);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
let expected_out_byte = tree[15];
let mut hi_limb_sum = idx0_hi;
for i in 1..N_LIMBS {
hi_limb_sum = builder.add_extension(hi_limb_sum, idx[i]);
}
let idx_is_large = lv[BYTE_IDX_IS_LARGE];
let t = builder.mul_sub_extension(idx_is_large, idx_is_large, idx_is_large);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
let t = builder.sub_extension(idx_is_large, one);
let t = builder.mul_many_extension([is_byte, hi_limb_sum, t]);
yield_constr.constraint(builder, t);
let base16 = F::from_canonical_u64(1 << 16);
let hi_limb_sum_inv = builder.mul_const_add_extension(
base16,
lv[BYTE_IDX_HI_LIMB_SUM_INV_3],
lv[BYTE_IDX_HI_LIMB_SUM_INV_2],
);
let hi_limb_sum_inv =
builder.mul_const_add_extension(base16, hi_limb_sum_inv, lv[BYTE_IDX_HI_LIMB_SUM_INV_1]);
let hi_limb_sum_inv =
builder.mul_const_add_extension(base16, hi_limb_sum_inv, lv[BYTE_IDX_HI_LIMB_SUM_INV_0]);
let t = builder.mul_sub_extension(hi_limb_sum, hi_limb_sum_inv, idx_is_large);
let t = builder.mul_extension(is_byte, t);
yield_constr.constraint(builder, t);
let out_byte = out[0];
let t = builder.sub_extension(one, idx_is_large);
let t = builder.mul_extension(t, expected_out_byte);
let check = builder.sub_extension(out_byte, t);
let t = builder.mul_extension(is_byte, check);
yield_constr.constraint(builder, t);
for i in 1..N_LIMBS {
let t = builder.mul_extension(is_byte, out[i]);
yield_constr.constraint(builder, t);
}
}
#[cfg(test)]
mod tests {
use plonky2::field::goldilocks_field::GoldilocksField;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use super::*;
use crate::arithmetic::columns::NUM_ARITH_COLUMNS;
type F = GoldilocksField;
fn verify_output(lv: &[F], expected_byte: u64) {
let out_byte = lv[OUTPUT_REGISTER][0].to_canonical_u64();
assert!(out_byte == expected_byte);
for j in 1..N_LIMBS {
assert!(lv[OUTPUT_REGISTER][j] == F::ZERO);
}
}
#[test]
fn generate_eval_consistency() {
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
const N_ITERS: usize = 1000;
for _ in 0..N_ITERS {
// set entire row to random 16-bit values
let mut lv =
[F::default(); NUM_ARITH_COLUMNS].map(|_| F::from_canonical_u16(rng.gen::<u16>()));
lv[IS_BYTE] = F::ONE;
let val = U256::from(rng.gen::<[u8; 32]>());
for i in 0..32 {
let idx = i.into();
generate(&mut lv, idx, val);
// Check correctness
let out_byte = val.byte(31 - i) as u64;
verify_output(&lv, out_byte);
let mut constrant_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
F::ONE,
F::ONE,
F::ONE,
);
eval_packed(&lv, &mut constrant_consumer);
for &acc in &constrant_consumer.constraint_accs {
assert_eq!(acc, F::ZERO);
}
}
// Check that output is zero when the index is big.
let big_indices = [32.into(), 33.into(), val, U256::max_value()];
for idx in big_indices {
generate(&mut lv, idx, val);
verify_output(&lv, 0);
}
}
}
}

View File

@ -35,8 +35,9 @@ pub(crate) const IS_SUBFP254: usize = IS_MULFP254 + 1;
pub(crate) const IS_SUBMOD: usize = IS_SUBFP254 + 1;
pub(crate) const IS_LT: usize = IS_SUBMOD + 1;
pub(crate) const IS_GT: usize = IS_LT + 1;
pub(crate) const IS_BYTE: usize = IS_GT + 1;
pub(crate) const START_SHARED_COLS: usize = IS_GT + 1;
pub(crate) const START_SHARED_COLS: usize = IS_BYTE + 1;
/// Within the Arithmetic Unit, there are shared columns which can be
/// used by any arithmetic circuit, depending on which one is active

View File

@ -5,6 +5,7 @@ use crate::extension_tower::BN_BASE;
use crate::util::{addmod, mulmod, submod};
mod addcy;
mod byte;
mod divmod;
mod modular;
mod mul;
@ -25,6 +26,7 @@ pub(crate) enum BinaryOperator {
AddFp254,
MulFp254,
SubFp254,
Byte,
}
impl BinaryOperator {
@ -52,6 +54,13 @@ impl BinaryOperator {
BinaryOperator::AddFp254 => addmod(input0, input1, BN_BASE),
BinaryOperator::MulFp254 => mulmod(input0, input1, BN_BASE),
BinaryOperator::SubFp254 => submod(input0, input1, BN_BASE),
BinaryOperator::Byte => {
if input0 >= 32.into() {
U256::zero()
} else {
input1.byte(31 - input0.as_usize()).into()
}
}
}
}
@ -67,6 +76,7 @@ impl BinaryOperator {
BinaryOperator::AddFp254 => columns::IS_ADDFP254,
BinaryOperator::MulFp254 => columns::IS_MULFP254,
BinaryOperator::SubFp254 => columns::IS_SUBFP254,
BinaryOperator::Byte => columns::IS_BYTE,
}
}
}
@ -98,7 +108,6 @@ impl TernaryOperator {
}
#[derive(Debug)]
#[allow(unused)] // TODO: Should be used soon.
pub(crate) enum Operation {
BinaryOperation {
operator: BinaryOperator,
@ -217,5 +226,9 @@ fn binary_op_to_rows<F: PrimeField64>(
BinaryOperator::AddFp254 | BinaryOperator::MulFp254 | BinaryOperator::SubFp254 => {
ternary_op_to_rows::<F>(op.row_filter(), input0, input1, BN_BASE, result)
}
BinaryOperator::Byte => {
byte::generate(&mut row, input0, input1);
(row, None)
}
}
}

View File

@ -80,7 +80,7 @@ pub fn ctl_filter_logic<F: Field>() -> Column<F> {
}
pub fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
const OPS: [usize; 13] = [
const OPS: [usize; 14] = [
COL_MAP.op.add,
COL_MAP.op.sub,
COL_MAP.op.mul,
@ -94,6 +94,7 @@ pub fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
COL_MAP.op.submod,
COL_MAP.op.div,
COL_MAP.op.mod_,
COL_MAP.op.byte,
];
// Create the CPU Table whose columns are those with the three
// inputs and one output of the ternary operations listed in `ops`

View File

@ -41,6 +41,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/core/transfer.asm"),
include_str!("asm/core/util.asm"),
include_str!("asm/core/access_lists.asm"),
include_str!("asm/core/log.asm"),
include_str!("asm/core/selfdestruct_list.asm"),
include_str!("asm/core/touched_addresses.asm"),
include_str!("asm/core/precompiles/main.asm"),

View File

@ -13,10 +13,14 @@ global sys_extcodehash:
// stack: kexit_info, address
SWAP1
DUP1 %is_dead %jumpi(extcodehash_dead)
%extcodehash
// stack: hash, kexit_info
SWAP1
EXIT_KERNEL
extcodehash_dead:
%stack (address, kexit_info) -> (kexit_info, 0)
EXIT_KERNEL
global extcodehash:
// stack: address, retdest

View File

@ -87,7 +87,7 @@ remove_accessed_addresses_found:
%stack (addr, key, value) -> (addr, key, value, %%after)
%jump(insert_accessed_storage_keys)
%%after:
// stack: cold_access
// stack: cold_access, original_value
%endmacro
/// Inserts the storage key and value into the access list if it is not already present.

View File

@ -23,7 +23,7 @@ global sys_call:
%u256_to_addr // Truncate to 160 bits
DUP1 %insert_accessed_addresses
%checkpoint // Checkpoint
DUP1 %insert_touched_addresses
DUP2 %insert_touched_addresses
%call_charge_gas(1, 1)
@ -45,6 +45,7 @@ global sys_call:
// stack: new_ctx, kexit_info, callgas, address, value, args_offset, args_size, ret_offset, ret_size
// Each line in the block below does not change the stack.
%set_static
DUP4 %set_new_ctx_addr
%address %set_new_ctx_caller
DUP5 %set_new_ctx_value
@ -93,6 +94,7 @@ global sys_callcode:
// stack: new_ctx, kexit_info, callgas, address, value, args_offset, args_size, ret_offset, ret_size
// Each line in the block below does not change the stack.
%set_static
%address %set_new_ctx_addr
%address %set_new_ctx_caller
DUP5 %set_new_ctx_value
@ -123,7 +125,7 @@ global sys_staticcall:
%u256_to_addr // Truncate to 160 bits
DUP1 %insert_accessed_addresses
%checkpoint // Checkpoint
DUP1 %insert_touched_addresses
DUP2 %insert_touched_addresses
// Add a value of 0 to the stack. Slightly inefficient but that way we can reuse %call_charge_gas.
%stack (cold_access, address, gas, kexit_info) -> (cold_access, address, gas, kexit_info, 0)
@ -195,13 +197,14 @@ global sys_delegatecall:
// stack: new_ctx, kexit_info, callgas, address, value, args_offset, args_size, ret_offset, ret_size
// Each line in the block below does not change the stack.
%set_static
%address %set_new_ctx_addr
%caller %set_new_ctx_caller
%callvalue %set_new_ctx_value
%set_new_ctx_parent_pc(after_call_instruction)
DUP4 %set_new_ctx_code
%stack (new_ctx, kexit_info, callgas, address, args_offset, args_size, ret_offset, ret_size)
%stack (new_ctx, kexit_info, callgas, address, value, args_offset, args_size, ret_offset, ret_size)
-> (new_ctx, kexit_info, ret_offset, ret_size)
%enter_new_ctx
@ -243,6 +246,15 @@ call_insufficient_balance:
// stack: new_ctx
%endmacro
// Set @CTX_METADATA_STATIC of the next context to the current value.
%macro set_static
// stack: new_ctx
%mload_context_metadata(@CTX_METADATA_STATIC)
%stack (is_static, new_ctx) -> (new_ctx, @SEGMENT_CONTEXT_METADATA, @CTX_METADATA_STATIC, is_static, new_ctx)
MSTORE_GENERAL
// stack: new_ctx
%endmacro
%macro set_new_ctx_addr
// stack: called_addr, new_ctx
%stack (called_addr, new_ctx)

View File

@ -16,7 +16,7 @@ global call_charge_gas:
%mul_const(@GAS_COLDACCOUNTACCESS_MINUS_WARMACCESS)
%add_const(@GAS_WARMACCESS)
// stack: cost, is_call_or_staticcall, is_call_or_callcode, address, gas, kexit_info, value, retdest
DUP4
DUP3
// stack: is_call_or_callcode, cost, is_call_or_staticcall, is_call_or_callcode, address, gas, kexit_info, value, retdest
%jumpi(xfer_cost)
after_xfer_cost:
@ -60,7 +60,7 @@ after_new_cost:
(retdest, kexit_info, C_callgas, address, value)
JUMP
xfer_cost:
global xfer_cost:
// stack: cost, is_call_or_staticcall, is_call_or_callcode, address, gas, kexit_info, value, retdest
DUP7
// stack: value, cost, is_call_or_staticcall, is_call_or_callcode, address, gas, kexit_info, value, retdest

View File

@ -150,7 +150,7 @@ after_constructor:
// stack: code_size, leftover_gas, success, address, kexit_info
%mul_const(@GAS_CODEDEPOSIT)
// stack: code_size_cost, leftover_gas, success, address, kexit_info
DUP2 DUP2 GT %jumpi(fault_exception)
DUP2 DUP2 GT %jumpi(create_oog)
SWAP1 SUB
// stack: leftover_gas, success, address, kexit_info
%pop_checkpoint
@ -213,6 +213,12 @@ create_code_too_large:
%stack (code_size, leftover_gas, success, address, kexit_info) -> (kexit_info, 0)
EXIT_KERNEL
create_oog:
%revert_checkpoint
%mstore_context_metadata(@CTX_METADATA_RETURNDATA_SIZE, 0)
%stack (code_size_cost, leftover_gas, success, address, kexit_info) -> (kexit_info, 0)
EXIT_KERNEL
%macro set_codehash
%stack (addr, codehash) -> (addr, codehash, %%after)
%jump(set_codehash)

View File

@ -65,12 +65,11 @@ count_zeros_finish:
PUSH @GAS_TRANSACTION
// stack: gas_txn, gas_creation, gas_txndata, retdest
// TODO: Add num_access_list_addresses * GAS_ACCESSLISTADDRESS
// TODO: Add num_access_list_slots * GAS_ACCESSLISTSTORAGE
ADD
ADD
// stack: total_gas, retdest
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_DATA_COST)
ADD
SWAP1
JUMP

View File

@ -0,0 +1,75 @@
// TODO: Implement receipts
global sys_log0:
%check_static
// stack: kexit_info, offset, size
DUP3 DUP3
%add_or_fault
// stack: offset+size, kexit_info, offset, size
DUP1 %ensure_reasonable_offset
%update_mem_bytes
// stack: kexit_info, offset, size
DUP3 %mul_const(@GAS_LOGDATA) %add_const(@GAS_LOG)
// stack: gas, kexit_info, offset, size
%charge_gas
%stack (kexit_info, offset, size) -> (kexit_info)
EXIT_KERNEL
global sys_log1:
%check_static
// stack: kexit_info, offset, size, topic
DUP3 DUP3
%add_or_fault
// stack: offset+size, kexit_info, offset, size, topic
DUP1 %ensure_reasonable_offset
%update_mem_bytes
// stack: kexit_info, offset, size, topic
DUP3 %mul_const(@GAS_LOGDATA) %add_const(@GAS_LOG) %add_const(@GAS_LOGTOPIC)
// stack: gas, kexit_info, offset, size, topic
%charge_gas
%stack (kexit_info, offset, size, topic) -> (kexit_info)
EXIT_KERNEL
global sys_log2:
%check_static
// stack: kexit_info, offset, size, topic1, topic2
DUP3 DUP3
%add_or_fault
// stack: offset+size, kexit_info, offset, size, topic1, topic2
DUP1 %ensure_reasonable_offset
%update_mem_bytes
// stack: kexit_info, offset, size, topic1, topic2
DUP3 %mul_const(@GAS_LOGDATA) %add_const(@GAS_LOG) %add_const(@GAS_LOGTOPIC) %add_const(@GAS_LOGTOPIC)
// stack: gas, kexit_info, offset, size, topic1, topic2
%charge_gas
%stack (kexit_info, offset, size, topic1, topic2) -> (kexit_info)
EXIT_KERNEL
global sys_log3:
%check_static
// stack: kexit_info, offset, size, topic1, topic2, topic3
DUP3 DUP3
%add_or_fault
// stack: offset+size, kexit_info, offset, size, topic1, topic2, topic3
DUP1 %ensure_reasonable_offset
%update_mem_bytes
// stack: kexit_info, offset, size, topic1, topic2, topic3
DUP3 %mul_const(@GAS_LOGDATA) %add_const(@GAS_LOG) %add_const(@GAS_LOGTOPIC) %add_const(@GAS_LOGTOPIC) %add_const(@GAS_LOGTOPIC)
// stack: gas, kexit_info, offset, size, topic1, topic2, topic3
%charge_gas
%stack (kexit_info, offset, size, topic1, topic2, topic3) -> (kexit_info)
EXIT_KERNEL
global sys_log4:
%check_static
// stack: kexit_info, offset, size, topic1, topic2, topic3, topic4
DUP3 DUP3
%add_or_fault
// stack: offset+size, kexit_info, offset, size, topic1, topic2, topic3, topic4
DUP1 %ensure_reasonable_offset
%update_mem_bytes
// stack: kexit_info, offset, size, topic1, topic2, topic3, topic4
DUP3 %mul_const(@GAS_LOGDATA) %add_const(@GAS_LOG) %add_const(@GAS_LOGTOPIC) %add_const(@GAS_LOGTOPIC) %add_const(@GAS_LOGTOPIC) %add_const(@GAS_LOGTOPIC)
// stack: gas, kexit_info, offset, size, topic1, topic2, topic3, topic4
%charge_gas
%stack (kexit_info, offset, size, topic1, topic2, topic3, topic4) -> (kexit_info)
EXIT_KERNEL

View File

@ -18,22 +18,27 @@ global process_normalized_txn:
// Assert gas_limit >= intrinsic_gas.
%mload_txn_field(@TXN_FIELD_INTRINSIC_GAS)
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
%assert_ge
%assert_ge(invalid_txn)
// Assert block gas limit >= txn gas limit.
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
%mload_global_metadata(@GLOBAL_METADATA_BLOCK_GAS_LIMIT)
%assert_ge(invalid_txn)
%mload_txn_field(@TXN_FIELD_ORIGIN)
// stack: sender, retdest
// Check that txn nonce matches account nonce.
DUP1 %nonce
DUP1 %eq_const(@MAX_NONCE) %assert_zero // EIP-2681
DUP1 %eq_const(@MAX_NONCE) %assert_zero(invalid_txn) // EIP-2681
// stack: sender_nonce, sender, retdest
%mload_txn_field(@TXN_FIELD_NONCE)
// stack: tx_nonce, sender_nonce, sender, retdest
%assert_eq
%assert_eq(invalid_txn)
// stack: sender, retdest
// Assert sender has no code.
DUP1 %ext_code_empty %assert_nonzero
DUP1 %ext_code_empty %assert_nonzero(invalid_txn)
// stack: sender, retdest
// Assert sender balance >= gas_limit * gas_price + value.
@ -44,7 +49,7 @@ global process_normalized_txn:
MUL
%mload_txn_field(@TXN_FIELD_VALUE)
ADD
%assert_le
%assert_le(invalid_txn)
// stack: retdest
// Assert chain ID matches block metadata
@ -58,7 +63,7 @@ global process_normalized_txn:
%mload_global_metadata(@GLOBAL_METADATA_BLOCK_CHAIN_ID)
MUL
// stack: filtered_block_chain_id, filtered_tx_chain_id, retdest
%assert_eq
%assert_eq(invalid_txn)
// stack: retdest
global buy_gas:
@ -77,6 +82,23 @@ global increment_sender_nonce:
%mload_txn_field(@TXN_FIELD_ORIGIN)
%increment_nonce
global warm_precompiles:
// Add precompiles to accessed addresses.
PUSH @ECREC %insert_accessed_addresses_no_return
PUSH @SHA256 %insert_accessed_addresses_no_return
PUSH @RIP160 %insert_accessed_addresses_no_return
PUSH @ID %insert_accessed_addresses_no_return
PUSH @EXPMOD %insert_accessed_addresses_no_return
PUSH @BN_ADD %insert_accessed_addresses_no_return
PUSH @BN_MUL %insert_accessed_addresses_no_return
PUSH @SNARKV %insert_accessed_addresses_no_return
PUSH @BLAKE2_F %insert_accessed_addresses_no_return
// EIP-3651
global warm_coinbase:
%mload_global_metadata(@GLOBAL_METADATA_BLOCK_BENEFICIARY)
%insert_accessed_addresses_no_return
global process_based_on_type:
%is_contract_creation
%jumpi(process_contract_creation_txn)
@ -214,17 +236,6 @@ global process_message_txn:
// stack: code_empty, retdest
%jumpi(process_message_txn_return)
// Add precompiles to accessed addresses.
PUSH @ECREC %insert_accessed_addresses_no_return
PUSH @SHA256 %insert_accessed_addresses_no_return
PUSH @RIP160 %insert_accessed_addresses_no_return
PUSH @ID %insert_accessed_addresses_no_return
PUSH @EXPMOD %insert_accessed_addresses_no_return
PUSH @BN_ADD %insert_accessed_addresses_no_return
PUSH @BN_MUL %insert_accessed_addresses_no_return
PUSH @SNARKV %insert_accessed_addresses_no_return
PUSH @BLAKE2_F %insert_accessed_addresses_no_return
// Otherwise, load to's code and execute it in a new context.
// stack: retdest
%create_context
@ -343,8 +354,9 @@ process_message_txn_fail:
%mload_txn_field(@TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS)
%mload_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS)
// stack: max_fee, max_priority_fee, base_fee
DUP3 DUP2 %assert_ge // Assert max_fee >= base_fee
DUP3 DUP2 %assert_ge(invalid_txn) // Assert max_fee >= base_fee
// stack: max_fee, max_priority_fee, base_fee
DUP2 DUP2 %assert_ge(invalid_txn) // Assert max_fee >= max_priority_fee
%stack (max_fee, max_priority_fee, base_fee) -> (max_fee, base_fee, max_priority_fee, base_fee)
SUB
// stack: max_fee - base_fee, max_priority_fee, base_fee
@ -394,3 +406,7 @@ contract_creation_fault_4:
%delete_all_touched_addresses
%delete_all_selfdestructed_addresses
JUMP
global invalid_txn:
%jump(txn_loop)

View File

@ -6,18 +6,3 @@ global sys_blockhash:
global sys_prevrandao:
// TODO: What semantics will this have for Edge?
PANIC
global sys_log0:
%check_static
PANIC
global sys_log1:
%check_static
PANIC
global sys_log2:
%check_static
PANIC
global sys_log3:
%check_static
PANIC
global sys_log4:
%check_static
PANIC

View File

@ -15,9 +15,15 @@ global sys_return:
// stack: kexit_info, offset, size
%stack (kexit_info, offset, size) -> (offset, size, kexit_info, offset, size)
%add_or_fault
// stack: offset+size, kexit_info, offset, size
DUP4 ISZERO %jumpi(return_zero_size)
// stack: offset+size, kexit_info, offset, size
DUP1 %ensure_reasonable_offset
%update_mem_bytes
%jump(return_after_gas)
return_zero_size:
POP
return_after_gas:
// Load the parent's context.
%mload_context_metadata(@CTX_METADATA_PARENT_CONTEXT)
@ -115,8 +121,13 @@ global sys_revert:
%stack (kexit_info, offset, size) -> (offset, size, kexit_info, offset, size)
%add_or_fault
DUP1 %ensure_reasonable_offset
// stack: offset+size, kexit_info, offset, size
DUP4 ISZERO %jumpi(revert_zero_size)
%update_mem_bytes
%jump(revert_after_gas)
revert_zero_size:
POP
revert_after_gas:
// Load the parent's context.
%mload_context_metadata(@CTX_METADATA_PARENT_CONTEXT)

View File

@ -21,11 +21,7 @@
// Returns whether the current transaction is a contract creation transaction.
%macro is_contract_creation
// stack: (empty)
%mload_txn_field(@TXN_FIELD_TO)
// stack: to
ISZERO
// If there is no "to" field, then this is a contract creation.
// stack: to == 0
%mload_global_metadata(@GLOBAL_METADATA_CONTRACT_CREATION)
%endmacro
%macro is_precompile

View File

@ -42,6 +42,7 @@
// ```
global bn_glv_decompose:
// stack: k, retdest
%mod_const(@BN_SCALAR)
PUSH @BN_SCALAR DUP1 DUP1
// Compute c2 which is the top 256 bits of k*g1. Use asm from https://medium.com/wicketh/mathemagic-full-multiply-27650fec525d.
PUSH @U256_MAX
@ -73,7 +74,15 @@ global bn_glv_decompose:
// We compute k2 = q1 + q2 - N, but we check for underflow and return N-q1-q2 instead if there is one,
// along with a flag `underflow` set to 1 if there is an underflow, 0 otherwise.
ADD %sub_check_underflow
ADD %bn_sub_check_underflow
// stack: k2, underflow, N, k, retdest
DUP1 %ge_const(0x80000000000000000000000000000000) %jumpi(negate)
%jump(contd)
negate:
// stack: k2, underflow, N, k, retdest
SWAP1 PUSH 1 SUB SWAP1
PUSH @BN_SCALAR SUB
contd:
// stack: k2, underflow, N, k, retdest
SWAP3 PUSH @BN_SCALAR DUP5 PUSH @BN_GLV_S
// stack: s, k2, N, k, underflow, N, k2, retdest
@ -94,4 +103,14 @@ underflowed:
%stack (k1, k2, underflow, retdest) -> (retdest, underflow, k1, k2)
JUMP
%macro bn_sub_check_underflow
// stack: x, y
DUP2 DUP2 LT
// stack: x<y, x, y
DUP1 ISZERO DUP2 DUP4 DUP6 SUB MUL
// stack: (y-x)*(x<y), x>=y, x<y, x, y
%stack (a, b, c, x, y) -> (x, y, b, a, c)
SUB MUL ADD
%stack (res, bool) -> (res, @BN_SCALAR, bool)
MOD
%endmacro

View File

@ -78,7 +78,7 @@ bn254_input_check:
bn_pairing_invalid_input:
// stack: inp_j, j, k, inp, out, retdest
%stack (inp_j, j, k, inp, out, retdest) -> (retdest, inp_j)
%stack (inp_j, j, k, inp, out, retdest) -> (retdest, @U256_MAX)
JUMP
bn254_pairing_start:

View File

@ -100,9 +100,15 @@ ecdsa_after_precompute_loop_contd2:
%stack (accx, accy, i, a0, a1, b0, b1, retdest) -> (i, accx, accy, a0, a1, b0, b1, retdest)
%decrement %jump(ecdsa_after_precompute_loop)
ecdsa_after_precompute_loop_end:
// Check that the public key is not the point at infinity. See https://github.com/ethereum/eth-keys/pull/76 for discussion.
DUP2 DUP2 ISZERO SWAP1 ISZERO MUL %jumpi(pk_is_infinity)
%stack (accx, accy, ecdsa_after_precompute_loop_contd2, i, a0, a1, b0, b1, retdest) -> (retdest, accx, accy)
JUMP
pk_is_infinity:
%stack (accx, accy, ecdsa_after_precompute_loop_contd2, i, a0, a1, b0, b1, pubkey_to_addr, retdest) -> (retdest, @U256_MAX)
JUMP
// Take a public key (PKx, PKy) and return the associated address KECCAK256(PKx || PKy)[-20:].
pubkey_to_addr:
// stack: PKx, PKy, retdest

View File

@ -176,9 +176,6 @@ global sys_codecopy:
global sys_returndatacopy:
// stack: kexit_info, dest_offset, offset, size
PUSH @GAS_VERYLOW
DUP5
// stack: size, Gverylow, kexit_info, dest_offset, offset, size
ISZERO %jumpi(wcopy_empty)
// stack: Gverylow, kexit_info, dest_offset, offset, size
DUP5 %num_bytes_to_num_words %mul_const(@GAS_COPY) ADD %charge_gas
@ -191,6 +188,11 @@ global sys_returndatacopy:
DUP4 DUP4 %add_or_fault // Overflow check
%mload_context_metadata(@CTX_METADATA_RETURNDATA_SIZE) LT %jumpi(fault_exception) // Data len check
// stack: kexit_info, dest_offset, offset, size
DUP4
// stack: size, kexit_info, dest_offset, offset, size
ISZERO %jumpi(returndatacopy_empty)
%mload_context_metadata(@CTX_METADATA_RETURNDATA_SIZE)
// stack: total_size, kexit_info, dest_offset, offset, size
DUP4
@ -201,3 +203,7 @@ global sys_returndatacopy:
%stack (context, kexit_info, dest_offset, offset, size) ->
(context, @SEGMENT_MAIN_MEMORY, dest_offset, context, @SEGMENT_RETURNDATA, offset, size, wcopy_after, kexit_info)
%jump(memcpy)
returndatacopy_empty:
%stack (kexit_info, dest_offset, offset, size) -> (kexit_info)
EXIT_KERNEL

View File

@ -130,7 +130,7 @@ decode_rlp_list_len_big:
// fit in a single (256-bit) word on the stack.
// Pre stack: pos, len, retdest
// Post stack: pos', int
decode_int_given_len:
global decode_int_given_len:
%stack (pos, len, retdest) -> (pos, len, pos, retdest)
ADD
// stack: end_pos, pos, retdest

View File

@ -61,12 +61,30 @@
%endmacro
// Decode the "to" field and store it.
// This field is either 160-bit or empty in the case of a contract creation txn.
%macro decode_and_store_to
// stack: pos
%decode_rlp_scalar
%stack (pos, to) -> (to, pos)
%decode_rlp_string_len
// stack: pos, len
SWAP1
// stack: len, pos
DUP1 ISZERO %jumpi(%%contract_creation)
// stack: len, pos
DUP1 %eq_const(20) ISZERO %jumpi(invalid_txn) // Address is 160-bit
%stack (len, pos) -> (pos, len, %%with_scalar)
%jump(decode_int_given_len)
%%with_scalar:
// stack: pos, int
SWAP1
%mstore_txn_field(@TXN_FIELD_TO)
// stack: pos
%jump(%%end)
%%contract_creation:
// stack: len, pos
POP
PUSH 1 %mstore_global_metadata(@GLOBAL_METADATA_CONTRACT_CREATION)
// stack: pos
%%end:
%endmacro
// Decode the "value" field and store it.
@ -105,10 +123,15 @@
%macro decode_and_store_access_list
// stack: pos
DUP1 %mstore_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_START)
%decode_rlp_list_len
%stack (pos, len) -> (len, pos)
%jumpi(todo_access_lists_not_supported_yet)
%stack (pos, len) -> (len, len, pos, %%after)
%jumpi(decode_and_store_access_list)
// stack: len, pos, %%after
POP SWAP1 POP
// stack: pos
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_START) DUP2 SUB %mstore_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_LEN)
%%after:
%endmacro
%macro decode_and_store_y_parity
@ -135,5 +158,96 @@
// stack: pos
%endmacro
global todo_access_lists_not_supported_yet:
PANIC
// The access list is of the form `[[{20 bytes}, [{32 bytes}...]]...]`.
global decode_and_store_access_list:
// stack: len, pos
DUP2 ADD
// stack: end_pos, pos
// Store the RLP length.
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_START) DUP2 SUB %mstore_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_LEN)
SWAP1
decode_and_store_access_list_loop:
// stack: pos, end_pos
DUP2 DUP2 EQ %jumpi(decode_and_store_access_list_finish)
// stack: pos, end_pos
%decode_rlp_list_len // Should be a list `[{20 bytes}, [{32 bytes}...]]`
// stack: pos, internal_len, end_pos
SWAP1 POP // We don't need the length of this list.
// stack: pos, end_pos
%decode_rlp_scalar // Address // TODO: Should panic when address is not 20 bytes?
// stack: pos, addr, end_pos
SWAP1
// stack: addr, pos, end_pos
DUP1 %insert_accessed_addresses_no_return
// stack: addr, pos, end_pos
%add_address_cost
// stack: addr, pos, end_pos
SWAP1
// stack: pos, addr, end_pos
%decode_rlp_list_len // Should be a list of storage keys `[{32 bytes}...]`
// stack: pos, sk_len, addr, end_pos
SWAP1 DUP2 ADD
// stack: sk_end_pos, pos, addr, end_pos
SWAP1
// stack: pos, sk_end_pos, addr, end_pos
sk_loop:
DUP2 DUP2 EQ %jumpi(end_sk)
// stack: pos, sk_end_pos, addr, end_pos
%decode_rlp_scalar // Storage key // TODO: Should panic when key is not 32 bytes?
%stack (pos, key, sk_end_pos, addr, end_pos) ->
(addr, key, sk_loop_contd, pos, sk_end_pos, addr, end_pos)
%jump(insert_accessed_storage_keys_with_original_value)
sk_loop_contd:
// stack: pos, sk_end_pos, addr, end_pos
%add_storage_key_cost
%jump(sk_loop)
end_sk:
%stack (pos, sk_end_pos, addr, end_pos) -> (pos, end_pos)
%jump(decode_and_store_access_list_loop)
decode_and_store_access_list_finish:
%stack (pos, end_pos, retdest) -> (retdest, pos)
JUMP
%macro add_address_cost
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_DATA_COST)
%add_const(@GAS_ACCESSLISTADDRESS)
%mstore_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_DATA_COST)
%endmacro
%macro add_storage_key_cost
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_DATA_COST)
%add_const(@GAS_ACCESSLISTSTORAGE)
%mstore_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_DATA_COST)
%endmacro
insert_accessed_storage_keys_with_original_value:
%stack (addr, key, retdest) -> (key, addr, after_read, addr, key, retdest)
%jump(sload_with_addr)
after_read:
%stack (value, addr, key, retdest) -> ( addr, key, value, retdest)
%insert_accessed_storage_keys
%pop2
JUMP
sload_with_addr:
%stack (slot, addr) -> (slot, addr, after_storage_read)
%slot_to_storage_key
// stack: storage_key, addr, after_storage_read
PUSH 64 // storage_key has 64 nibbles
%stack (n64, storage_key, addr, after_storage_read) -> (addr, n64, storage_key, after_storage_read)
%mpt_read_state_trie
// stack: account_ptr, 64, storage_key, after_storage_read
DUP1 ISZERO %jumpi(ret_zero) // TODO: Fix this. This should never happen.
// stack: account_ptr, 64, storage_key, after_storage_read
%add_const(2)
// stack: storage_root_ptr_ptr
%mload_trie_data
// stack: storage_root_ptr, 64, storage_key, after_storage_read
%jump(mpt_read)
ret_zero:
// stack: account_ptr, 64, storage_key, after_storage_read, retdest
%pop4
PUSH 0 SWAP1 JUMP

View File

@ -102,9 +102,16 @@ type_0_compute_signed_data:
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_TO)
%mload_global_metadata(@GLOBAL_METADATA_CONTRACT_CREATION) %jumpi(zero_to)
// stack: to, rlp_pos, rlp_start, retdest
SWAP1 %encode_rlp_160
%jump(after_to)
zero_to:
// stack: to, rlp_pos, rlp_start, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
after_to:
%mload_txn_field(@TXN_FIELD_VALUE)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest

View File

@ -31,6 +31,109 @@ global process_type_1_txn:
POP
// stack: retdest
// TODO: Check signature.
// From EIP-2930:
// The signatureYParity, signatureR, signatureS elements of this transaction represent a secp256k1 signature
// over keccak256(0x01 || rlp([chainId, nonce, gasPrice, gasLimit, to, value, data, accessList])).
type_1_compute_signed_data:
%alloc_rlp_block
// stack: rlp_start, retdest
%mload_txn_field(@TXN_FIELD_CHAIN_ID)
// stack: chain_id, rlp_start, retdest
DUP2
// stack: rlp_pos, chain_id, rlp_start, retdest
%encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_NONCE)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_TO)
%mload_global_metadata(@GLOBAL_METADATA_CONTRACT_CREATION) %jumpi(zero_to)
// stack: to, rlp_pos, rlp_start, retdest
SWAP1 %encode_rlp_160
%jump(after_to)
zero_to:
// stack: to, rlp_pos, rlp_start, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
after_to:
%mload_txn_field(@TXN_FIELD_VALUE)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
// Encode txn data.
%mload_txn_field(@TXN_FIELD_DATA_LEN)
PUSH 0 // ADDR.virt
PUSH @SEGMENT_TXN_DATA
PUSH 0 // ADDR.context
// stack: ADDR: 3, len, rlp_pos, rlp_start, retdest
PUSH after_serializing_txn_data
// stack: after_serializing_txn_data, ADDR: 3, len, rlp_pos, rlp_start, retdest
SWAP5
// stack: rlp_pos, ADDR: 3, len, after_serializing_txn_data, rlp_start, retdest
%jump(encode_rlp_string)
after_serializing_txn_data:
// Instead of manually encoding the access list, we just copy the raw RLP from the transaction.
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_START)
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_LEN)
%stack (al_len, al_start, rlp_pos, rlp_start, retdest) ->
(
0, @SEGMENT_RLP_RAW, rlp_pos,
0, @SEGMENT_RLP_RAW, al_start,
al_len,
after_serializing_access_list,
rlp_pos, rlp_start, retdest)
%jump(memcpy)
after_serializing_access_list:
// stack: rlp_pos, rlp_start, retdest
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_LEN) ADD
// stack: rlp_pos, rlp_start, retdest
%prepend_rlp_list_prefix
// stack: prefix_start_pos, rlp_len, retdest
// Store a `1` in front of the RLP
%decrement
%stack (pos) -> (0, @SEGMENT_RLP_RAW, pos, 1, pos)
MSTORE_GENERAL
// stack: pos, rlp_len, retdest
// Hash the RLP + the leading `1`
SWAP1 %increment SWAP1
PUSH @SEGMENT_RLP_RAW
PUSH 0 // context
// stack: ADDR: 3, len, retdest
KECCAK_GENERAL
// stack: hash, retdest
%mload_txn_field(@TXN_FIELD_S)
%mload_txn_field(@TXN_FIELD_R)
%mload_txn_field(@TXN_FIELD_Y_PARITY) %add_const(27) // ecrecover interprets v as y_parity + 27
PUSH store_origin
// stack: store_origin, v, r, s, hash, retdest
SWAP4
// stack: hash, v, r, s, store_origin, retdest
%jump(ecrecover)
store_origin:
// stack: address, retdest
// If ecrecover returned u256::MAX, that indicates failure.
DUP1
%eq_const(0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff)
%jumpi(panic)
// stack: address, retdest
%mstore_txn_field(@TXN_FIELD_ORIGIN)
// stack: retdest
%jump(process_normalized_txn)

View File

@ -34,6 +34,113 @@ global process_type_2_txn:
POP
// stack: retdest
// TODO: Check signature.
// From EIP-1559:
// The signature_y_parity, signature_r, signature_s elements of this transaction represent a secp256k1 signature over
// keccak256(0x02 || rlp([chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, destination, amount, data, access_list]))
type_2_compute_signed_data:
%alloc_rlp_block
// stack: rlp_start, retdest
%mload_txn_field(@TXN_FIELD_CHAIN_ID)
// stack: chain_id, rlp_start, retdest
DUP2
// stack: rlp_pos, chain_id, rlp_start, retdest
%encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_NONCE)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
%mload_txn_field(@TXN_FIELD_TO)
%mload_global_metadata(@GLOBAL_METADATA_CONTRACT_CREATION) %jumpi(zero_to)
// stack: to, rlp_pos, rlp_start, retdest
SWAP1 %encode_rlp_160
%jump(after_to)
zero_to:
// stack: to, rlp_pos, rlp_start, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
after_to:
%mload_txn_field(@TXN_FIELD_VALUE)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, rlp_start, retdest
// Encode txn data.
%mload_txn_field(@TXN_FIELD_DATA_LEN)
PUSH 0 // ADDR.virt
PUSH @SEGMENT_TXN_DATA
PUSH 0 // ADDR.context
// stack: ADDR: 3, len, rlp_pos, rlp_start, retdest
PUSH after_serializing_txn_data
// stack: after_serializing_txn_data, ADDR: 3, len, rlp_pos, rlp_start, retdest
SWAP5
// stack: rlp_pos, ADDR: 3, len, after_serializing_txn_data, rlp_start, retdest
%jump(encode_rlp_string)
after_serializing_txn_data:
// Instead of manually encoding the access list, we just copy the raw RLP from the transaction.
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_START)
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_LEN)
%stack (al_len, al_start, rlp_pos, rlp_start, retdest) ->
(
0, @SEGMENT_RLP_RAW, rlp_pos,
0, @SEGMENT_RLP_RAW, al_start,
al_len,
after_serializing_access_list,
rlp_pos, rlp_start, retdest)
%jump(memcpy)
after_serializing_access_list:
// stack: rlp_pos, rlp_start, retdest
%mload_global_metadata(@GLOBAL_METADATA_ACCESS_LIST_RLP_LEN) ADD
// stack: rlp_pos, rlp_start, retdest
%prepend_rlp_list_prefix
// stack: prefix_start_pos, rlp_len, retdest
// Store a `2` in front of the RLP
%decrement
%stack (pos) -> (0, @SEGMENT_RLP_RAW, pos, 2, pos)
MSTORE_GENERAL
// stack: pos, rlp_len, retdest
// Hash the RLP + the leading `2`
SWAP1 %increment SWAP1
PUSH @SEGMENT_RLP_RAW
PUSH 0 // context
// stack: ADDR: 3, len, retdest
KECCAK_GENERAL
// stack: hash, retdest
%mload_txn_field(@TXN_FIELD_S)
%mload_txn_field(@TXN_FIELD_R)
%mload_txn_field(@TXN_FIELD_Y_PARITY) %add_const(27) // ecrecover interprets v as y_parity + 27
PUSH store_origin
// stack: store_origin, v, r, s, hash, retdest
SWAP4
// stack: hash, v, r, s, store_origin, retdest
%jump(ecrecover)
store_origin:
// stack: address, retdest
// If ecrecover returned u256::MAX, that indicates failure.
DUP1
%eq_const(0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff)
%jumpi(panic)
// stack: address, retdest
%mstore_txn_field(@TXN_FIELD_ORIGIN)
// stack: retdest
%jump(process_normalized_txn)

View File

@ -8,17 +8,31 @@ global panic:
%jumpi(panic)
%endmacro
%macro assert_zero(ret)
%jumpi($ret)
%endmacro
// Consumes the top element and asserts that it is nonzero.
%macro assert_nonzero
ISZERO
%jumpi(panic)
%endmacro
%macro assert_nonzero(ret)
ISZERO
%jumpi($ret)
%endmacro
%macro assert_eq
EQ
%assert_nonzero
%endmacro
%macro assert_eq(ret)
EQ
%assert_nonzero($ret)
%endmacro
%macro assert_lt
// %assert_zero is cheaper than %assert_nonzero, so we will leverage the
// fact that (x < y) == !(x >= y).
@ -26,6 +40,11 @@ global panic:
%assert_zero
%endmacro
%macro assert_lt(ret)
GE
%assert_zero($ret)
%endmacro
%macro assert_le
// %assert_zero is cheaper than %assert_nonzero, so we will leverage the
// fact that (x <= y) == !(x > y).
@ -33,6 +52,11 @@ global panic:
%assert_zero
%endmacro
%macro assert_le(ret)
GT
%assert_zero($ret)
%endmacro
%macro assert_gt
// %assert_zero is cheaper than %assert_nonzero, so we will leverage the
// fact that (x > y) == !(x <= y).
@ -40,6 +64,11 @@ global panic:
%assert_zero
%endmacro
%macro assert_gt(ret)
LE
%assert_zero($ret)
%endmacro
%macro assert_ge
// %assert_zero is cheaper than %assert_nonzero, so we will leverage the
// fact that (x >= y) == !(x < y).
@ -47,6 +76,11 @@ global panic:
%assert_zero
%endmacro
%macro assert_ge(ret)
LT
%assert_zero($ret)
%endmacro
%macro assert_eq_const(c)
%eq_const($c)
%assert_nonzero

View File

@ -59,10 +59,18 @@ pub(crate) enum GlobalMetadata {
/// Current checkpoint.
CurrentCheckpoint = 28,
TouchedAddressesLen = 29,
// Gas cost for the access list in type-1 txns. See EIP-2930.
AccessListDataCost = 30,
// Start of the access list in the RLP for type-1 txns.
AccessListRlpStart = 31,
// Length of the access list in the RLP for type-1 txns.
AccessListRlpLen = 32,
// Boolean flag indicating if the txn is a contract creation txn.
ContractCreation = 33,
}
impl GlobalMetadata {
pub(crate) const COUNT: usize = 29;
pub(crate) const COUNT: usize = 33;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -95,6 +103,10 @@ impl GlobalMetadata {
Self::JournalDataLen,
Self::CurrentCheckpoint,
Self::TouchedAddressesLen,
Self::AccessListDataCost,
Self::AccessListRlpStart,
Self::AccessListRlpLen,
Self::ContractCreation,
]
}
@ -130,6 +142,10 @@ impl GlobalMetadata {
Self::JournalDataLen => "GLOBAL_METADATA_JOURNAL_DATA_LEN",
Self::CurrentCheckpoint => "GLOBAL_METADATA_CURRENT_CHECKPOINT",
Self::TouchedAddressesLen => "GLOBAL_METADATA_TOUCHED_ADDRESSES_LEN",
Self::AccessListDataCost => "GLOBAL_METADATA_ACCESS_LIST_DATA_COST",
Self::AccessListRlpStart => "GLOBAL_METADATA_ACCESS_LIST_RLP_START",
Self::AccessListRlpLen => "GLOBAL_METADATA_ACCESS_LIST_RLP_LEN",
Self::ContractCreation => "GLOBAL_METADATA_CONTRACT_CREATION",
}
}
}

View File

@ -1,6 +1,8 @@
use anyhow::Result;
use ethereum_types::U256;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField;
use crate::cpu::kernel::interpreter::Interpreter;
@ -14,6 +16,7 @@ fn test_intrinsic_gas() -> Result<()> {
// Contract creation transaction.
let initial_stack = vec![0xdeadbeefu32.into()];
let mut interpreter = Interpreter::new_with_kernel(intrinsic_gas, initial_stack.clone());
interpreter.set_global_metadata_field(GlobalMetadata::ContractCreation, U256::one());
interpreter.run()?;
assert_eq!(interpreter.stack(), vec![(GAS_TX + GAS_TXCREATE).into()]);

View File

@ -25,6 +25,7 @@ pub(crate) fn gas_to_charge(op: Operation) -> u64 {
BinaryArithmetic(Mod) => G_LOW,
BinaryArithmetic(Lt) => G_VERYLOW,
BinaryArithmetic(Gt) => G_VERYLOW,
BinaryArithmetic(Byte) => G_VERYLOW,
Shl => G_VERYLOW,
Shr => G_VERYLOW,
BinaryArithmetic(AddFp254) => KERNEL_ONLY_INSTR,

View File

@ -26,7 +26,6 @@ use crate::{arithmetic, logic};
pub(crate) enum Operation {
Iszero,
Not,
Byte,
Shl,
Shr,
Syscall(u8, usize, bool),
@ -425,27 +424,6 @@ pub(crate) fn generate_not<F: Field>(
Ok(())
}
pub(crate) fn generate_byte<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let [(i, log_in0), (x, log_in1)] = stack_pop_with_log_and_fill::<2, _>(state, &mut row)?;
let byte = if i < 32.into() {
// byte(i) is the i'th little-endian byte; we want the i'th big-endian byte.
x.byte(31 - i.as_usize())
} else {
0
};
let log_out = stack_push_log_and_fill(state, &mut row, byte.into())?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_out);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_iszero<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,

View File

@ -67,7 +67,9 @@ fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramErr
(0x17, _) => Ok(Operation::BinaryLogic(logic::Op::Or)),
(0x18, _) => Ok(Operation::BinaryLogic(logic::Op::Xor)),
(0x19, _) => Ok(Operation::Not),
(0x1a, _) => Ok(Operation::Byte),
(0x1a, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::Byte,
)),
(0x1b, _) => Ok(Operation::Shl),
(0x1c, _) => Ok(Operation::Shr),
(0x1d, _) => Ok(Operation::Syscall(opcode, 2, false)), // SAR
@ -168,6 +170,7 @@ fn fill_op_flag<F: Field>(op: Operation, row: &mut CpuColumnsView<F>) {
Operation::BinaryArithmetic(arithmetic::BinaryOperator::Mod) => &mut flags.mod_,
Operation::BinaryArithmetic(arithmetic::BinaryOperator::Lt) => &mut flags.lt,
Operation::BinaryArithmetic(arithmetic::BinaryOperator::Gt) => &mut flags.gt,
Operation::BinaryArithmetic(arithmetic::BinaryOperator::Byte) => &mut flags.byte,
Operation::Shl => &mut flags.shl,
Operation::Shr => &mut flags.shr,
Operation::BinaryArithmetic(arithmetic::BinaryOperator::AddFp254) => &mut flags.addfp254,
@ -202,7 +205,6 @@ fn perform_op<F: Field>(
Operation::Swap(n) => generate_swap(n, state, row)?,
Operation::Iszero => generate_iszero(state, row)?,
Operation::Not => generate_not(state, row)?,
Operation::Byte => generate_byte(state, row)?,
Operation::Shl => generate_shl(state, row)?,
Operation::Shr => generate_shr(state, row)?,
Operation::Syscall(opcode, stack_values_read, stack_len_increased) => generate_syscall(opcode, stack_values_read, stack_len_increased, state, row)?,

View File

@ -81,8 +81,12 @@ fn add11_yml() -> anyhow::Result<()> {
let block_metadata = BlockMetadata {
block_beneficiary: Address::from(beneficiary),
block_timestamp: 0x03e8.into(),
block_number: 1.into(),
block_difficulty: 0x020000.into(),
block_gaslimit: 0xff112233445566u64.into(),
block_chain_id: 1.into(),
block_base_fee: 0xa.into(),
..BlockMetadata::default()
};
let mut contract_code = HashMap::new();

View File

@ -37,15 +37,12 @@ fn test_simple_transfer() -> anyhow::Result<()> {
let sender = hex!("2c7536e3605d9c16a7a3d7b1898e529396a65c23");
let to = hex!("a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0");
let beneficiary_state_key = keccak(beneficiary);
let sender_state_key = keccak(sender);
let to_state_key = keccak(to);
let beneficiary_nibbles = Nibbles::from_bytes_be(beneficiary_state_key.as_bytes()).unwrap();
let sender_nibbles = Nibbles::from_bytes_be(sender_state_key.as_bytes()).unwrap();
let to_nibbles = Nibbles::from_bytes_be(to_state_key.as_bytes()).unwrap();
let beneficiary_account_before = AccountRlp::default();
let sender_account_before = AccountRlp {
nonce: 5.into(),
balance: eth_to_wei(100_000.into()),
@ -72,7 +69,12 @@ fn test_simple_transfer() -> anyhow::Result<()> {
let block_metadata = BlockMetadata {
block_beneficiary: Address::from(beneficiary),
..BlockMetadata::default()
block_timestamp: 0x03e8.into(),
block_number: 1.into(),
block_difficulty: 0x020000.into(),
block_gaslimit: 0xff112233445566u64.into(),
block_chain_id: 1.into(),
block_base_fee: 0xa.into(),
};
let mut contract_code = HashMap::new();
@ -94,10 +96,6 @@ fn test_simple_transfer() -> anyhow::Result<()> {
let txdata_gas = 2 * 16;
let gas_used = 21_000 + txdata_gas;
let beneficiary_account_after = AccountRlp {
balance: beneficiary_account_before.balance + gas_used * 10,
..beneficiary_account_before
};
let sender_account_after = AccountRlp {
balance: sender_account_before.balance - value - gas_used * 10,
nonce: sender_account_before.nonce + 1,
@ -109,11 +107,6 @@ fn test_simple_transfer() -> anyhow::Result<()> {
};
let mut children = core::array::from_fn(|_| Node::Empty.into());
children[beneficiary_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: beneficiary_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&beneficiary_account_after).to_vec(),
}
.into();
children[sender_nibbles.get_nibble(0) as usize] = Node::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_after).to_vec(),