mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-07 16:23:12 +00:00
Merge branch 'main' into nondeterministic_ec_ops
# Conflicts: # evm/src/cpu/kernel/interpreter.rs
This commit is contained in:
commit
bb773e42b3
@ -40,14 +40,18 @@ pub fn fixed_base_curve_mul_circuit<C: Curve, F: RichField + Extendable<D>, cons
|
||||
// `s * P = sum s_i * P_i` with `P_i = (16^i) * P` and `s = sum s_i * (16^i)`.
|
||||
for (limb, point) in limbs.into_iter().zip(scaled_base) {
|
||||
// `muls_point[t] = t * P_i` for `t=0..16`.
|
||||
let muls_point = (0..16)
|
||||
let mut muls_point = (0..16)
|
||||
.scan(AffinePoint::ZERO, |acc, _| {
|
||||
let tmp = *acc;
|
||||
*acc = (point + *acc).to_affine();
|
||||
Some(tmp)
|
||||
})
|
||||
// First element if zero, so we skip it since `constant_affine_point` takes non-zero input.
|
||||
.skip(1)
|
||||
.map(|p| builder.constant_affine_point(p))
|
||||
.collect::<Vec<_>>();
|
||||
// We add back a point in position 0. `limb == zero` is checked below, so this point can be arbitrary.
|
||||
muls_point.insert(0, muls_point[0].clone());
|
||||
let is_zero = builder.is_equal(limb, zero);
|
||||
let should_add = builder.not(is_zero);
|
||||
// `r = s_i * P_i`
|
||||
|
||||
@ -9,6 +9,7 @@ use once_cell::sync::Lazy;
|
||||
|
||||
use super::assembler::{assemble, Kernel};
|
||||
use crate::cpu::kernel::parser::parse;
|
||||
use crate::cpu::kernel::txn_fields::NormalizedTxnField;
|
||||
use crate::memory::segments::Segment;
|
||||
|
||||
pub static KERNEL: Lazy<Kernel> = Lazy::new(combined_kernel);
|
||||
@ -24,6 +25,9 @@ pub fn evm_constants() -> HashMap<String, U256> {
|
||||
for segment in Segment::all() {
|
||||
c.insert(segment.var_name().into(), (segment as u32).into());
|
||||
}
|
||||
for txn_field in NormalizedTxnField::all() {
|
||||
c.insert(txn_field.var_name().into(), (txn_field as u32).into());
|
||||
}
|
||||
c
|
||||
}
|
||||
|
||||
@ -43,8 +47,16 @@ pub(crate) fn combined_kernel() -> Kernel {
|
||||
include_str!("asm/secp256k1/lift_x.asm"),
|
||||
include_str!("asm/secp256k1/inverse_scalar.asm"),
|
||||
include_str!("asm/ecrecover.asm"),
|
||||
include_str!("asm/storage_read.asm"),
|
||||
include_str!("asm/storage_write.asm"),
|
||||
include_str!("asm/rlp/encode.asm"),
|
||||
include_str!("asm/rlp/decode.asm"),
|
||||
include_str!("asm/rlp/read_to_memory.asm"),
|
||||
include_str!("asm/storage/read.asm"),
|
||||
include_str!("asm/storage/write.asm"),
|
||||
include_str!("asm/transactions/process_normalized.asm"),
|
||||
include_str!("asm/transactions/router.asm"),
|
||||
include_str!("asm/transactions/type_0.asm"),
|
||||
include_str!("asm/transactions/type_1.asm"),
|
||||
include_str!("asm/transactions/type_2.asm"),
|
||||
];
|
||||
|
||||
let parsed_files = files.iter().map(|f| parse(f)).collect_vec();
|
||||
|
||||
@ -78,10 +78,22 @@
|
||||
// stack: c, input, ...
|
||||
SWAP1
|
||||
// stack: input, c, ...
|
||||
SUB
|
||||
DIV
|
||||
// stack: input / c, ...
|
||||
%endmacro
|
||||
|
||||
// Slightly inefficient as we need to swap the inputs.
|
||||
// Consider avoiding this in performance-critical code.
|
||||
%macro mod_const(c)
|
||||
// stack: input, ...
|
||||
PUSH $c
|
||||
// stack: c, input, ...
|
||||
SWAP1
|
||||
// stack: input, c, ...
|
||||
MOD
|
||||
// stack: input % c, ...
|
||||
%endmacro
|
||||
|
||||
%macro shl_const(c)
|
||||
// stack: input, ...
|
||||
PUSH $c
|
||||
|
||||
153
evm/src/cpu/kernel/asm/rlp/decode.asm
Normal file
153
evm/src/cpu/kernel/asm/rlp/decode.asm
Normal file
@ -0,0 +1,153 @@
|
||||
// Note: currently, these methods do not check that RLP input is in canonical
|
||||
// form; for example a single byte could be encoded with the length-of-length
|
||||
// form. Technically an EVM must perform these checks, but we aren't really
|
||||
// concerned with it in our setting. An attacker who corrupted consensus could
|
||||
// prove a non-canonical state, but this would just temporarily stall the bridge
|
||||
// until a fix was deployed. We are more concerned with preventing any theft of
|
||||
// assets.
|
||||
|
||||
// Parse the length of a bytestring from RLP memory. The next len bytes after
|
||||
// pos' will contain the string.
|
||||
//
|
||||
// Pre stack: pos, retdest
|
||||
// Post stack: pos', len
|
||||
global decode_rlp_string_len:
|
||||
JUMPDEST
|
||||
// stack: pos, retdest
|
||||
DUP1
|
||||
%mload_current(@SEGMENT_RLP_RAW)
|
||||
// stack: first_byte, pos, retdest
|
||||
DUP1
|
||||
%gt_const(0xb7)
|
||||
// stack: first_byte >= 0xb8, first_byte, pos, retdest
|
||||
%jumpi(decode_rlp_string_len_large)
|
||||
// stack: first_byte, pos, retdest
|
||||
DUP1
|
||||
%gt_const(0x7f)
|
||||
// stack: first_byte >= 0x80, first_byte, pos, retdest
|
||||
%jumpi(decode_rlp_string_len_medium)
|
||||
|
||||
// String is a single byte in the range [0x00, 0x7f].
|
||||
%stack (first_byte, pos, retdest) -> (retdest, pos, 1)
|
||||
JUMP
|
||||
|
||||
decode_rlp_string_len_medium:
|
||||
// String is 0-55 bytes long. First byte contains the len.
|
||||
// stack: first_byte, pos, retdest
|
||||
%sub_const(0x80)
|
||||
// stack: len, pos, retdest
|
||||
SWAP1
|
||||
%add_const(1)
|
||||
// stack: pos', len, retdest
|
||||
%stack (pos, len, retdest) -> (retdest, pos, len)
|
||||
JUMP
|
||||
|
||||
decode_rlp_string_len_large:
|
||||
// String is >55 bytes long. First byte contains the len of the len.
|
||||
// stack: first_byte, pos, retdest
|
||||
%sub_const(0xb7)
|
||||
// stack: len_of_len, pos, retdest
|
||||
SWAP1
|
||||
%add_const(1)
|
||||
// stack: pos', len_of_len, retdest
|
||||
%jump(decode_int_given_len)
|
||||
|
||||
// Parse a scalar from RLP memory.
|
||||
// Pre stack: pos, retdest
|
||||
// Post stack: pos', scalar
|
||||
//
|
||||
// Scalars are variable-length, but this method assumes a max length of 32
|
||||
// bytes, so that the result can be returned as a single word on the stack.
|
||||
// As per the spec, scalars must not have leading zeros.
|
||||
global decode_rlp_scalar:
|
||||
JUMPDEST
|
||||
// stack: pos, retdest
|
||||
PUSH decode_int_given_len
|
||||
// stack: decode_int_given_len, pos, retdest
|
||||
SWAP1
|
||||
// stack: pos, decode_int_given_len, retdest
|
||||
// decode_rlp_string_len will return to decode_int_given_len, at which point
|
||||
// the stack will contain (pos', len, retdest), which are the proper args
|
||||
// to decode_int_given_len.
|
||||
%jump(decode_rlp_string_len)
|
||||
|
||||
// Parse the length of an RLP list from memory.
|
||||
// Pre stack: pos, retdest
|
||||
// Post stack: pos', len
|
||||
global decode_rlp_list_len:
|
||||
JUMPDEST
|
||||
// stack: pos, retdest
|
||||
DUP1
|
||||
%mload_current(@SEGMENT_RLP_RAW)
|
||||
// stack: first_byte, pos, retdest
|
||||
SWAP1
|
||||
%add_const(1) // increment pos
|
||||
SWAP1
|
||||
// stack: first_byte, pos', retdest
|
||||
// If first_byte is >= 0xf8, it's a > 55 byte list, and
|
||||
// first_byte - 0xf7 is the length of the length.
|
||||
DUP1
|
||||
%gt_const(0xf7) // GT is native while GE is not, so compare to 0xf6 instead
|
||||
// stack: first_byte >= 0xf7, first_byte, pos', retdest
|
||||
%jumpi(decode_rlp_list_len_big)
|
||||
|
||||
// This is the "small list" case.
|
||||
// The list length is first_byte - 0xc0.
|
||||
// stack: first_byte, pos', retdest
|
||||
%sub_const(0xc0)
|
||||
// stack: len, pos', retdest
|
||||
%stack (len, pos, retdest) -> (retdest, pos, len)
|
||||
JUMP
|
||||
|
||||
decode_rlp_list_len_big:
|
||||
JUMPDEST
|
||||
// The length of the length is first_byte - 0xf7.
|
||||
// stack: first_byte, pos', retdest
|
||||
%sub_const(0xf7)
|
||||
// stack: len_of_len, pos', retdest
|
||||
SWAP1
|
||||
// stack: pos', len_of_len, retdest
|
||||
%jump(decode_int_given_len)
|
||||
|
||||
// Parse an integer of the given length. It is assumed that the integer will
|
||||
// fit in a single (256-bit) word on the stack.
|
||||
// Pre stack: pos, len, retdest
|
||||
// Post stack: pos', int
|
||||
decode_int_given_len:
|
||||
JUMPDEST
|
||||
%stack (pos, len, retdest) -> (pos, len, pos, retdest)
|
||||
ADD
|
||||
// stack: end_pos, pos, retdest
|
||||
SWAP1
|
||||
// stack: pos, end_pos, retdest
|
||||
PUSH 0 // initial accumulator state
|
||||
// stack: acc, pos, end_pos, retdest
|
||||
|
||||
decode_int_given_len_loop:
|
||||
JUMPDEST
|
||||
// stack: acc, pos, end_pos, retdest
|
||||
DUP3
|
||||
DUP3
|
||||
EQ
|
||||
// stack: pos == end_pos, acc, pos, end_pos, retdest
|
||||
%jumpi(decode_int_given_len_finish)
|
||||
// stack: acc, pos, end_pos, retdest
|
||||
%shl_const(8)
|
||||
// stack: acc << 8, pos, end_pos, retdest
|
||||
DUP2
|
||||
// stack: pos, acc << 8, pos, end_pos, retdest
|
||||
%mload_current(@SEGMENT_RLP_RAW)
|
||||
// stack: byte, acc << 8, pos, end_pos, retdest
|
||||
ADD
|
||||
// stack: acc', pos, end_pos, retdest
|
||||
// Increment pos.
|
||||
SWAP1
|
||||
%add_const(1)
|
||||
SWAP1
|
||||
// stack: acc', pos', end_pos, retdest
|
||||
%jump(decode_int_given_len_loop)
|
||||
|
||||
decode_int_given_len_finish:
|
||||
JUMPDEST
|
||||
%stack (acc, pos, end_pos, retdest) -> (retdest, pos, acc)
|
||||
JUMP
|
||||
17
evm/src/cpu/kernel/asm/rlp/encode.asm
Normal file
17
evm/src/cpu/kernel/asm/rlp/encode.asm
Normal file
@ -0,0 +1,17 @@
|
||||
// RLP-encode a scalar, i.e. a variable-length integer.
|
||||
// Pre stack: pos, scalar
|
||||
// Post stack: (empty)
|
||||
global encode_rlp_scalar:
|
||||
PANIC // TODO: implement
|
||||
|
||||
// RLP-encode a fixed-length 160-bit string. Assumes string < 2^160.
|
||||
// Pre stack: pos, string
|
||||
// Post stack: (empty)
|
||||
global encode_rlp_160:
|
||||
PANIC // TODO: implement
|
||||
|
||||
// RLP-encode a fixed-length 256-bit string.
|
||||
// Pre stack: pos, string
|
||||
// Post stack: (empty)
|
||||
global encode_rlp_256:
|
||||
PANIC // TODO: implement
|
||||
39
evm/src/cpu/kernel/asm/rlp/read_to_memory.asm
Normal file
39
evm/src/cpu/kernel/asm/rlp/read_to_memory.asm
Normal file
@ -0,0 +1,39 @@
|
||||
// Read RLP data from the prover's tape, and save it to the SEGMENT_RLP_RAW
|
||||
// segment of memory.
|
||||
|
||||
// Pre stack: retdest
|
||||
// Post stack: (empty)
|
||||
|
||||
global read_rlp_to_memory:
|
||||
JUMPDEST
|
||||
// stack: retdest
|
||||
PROVER_INPUT // Read the RLP blob length from the prover tape.
|
||||
// stack: len, retdest
|
||||
PUSH 0 // initial position
|
||||
// stack: pos, len, retdest
|
||||
|
||||
read_rlp_to_memory_loop:
|
||||
JUMPDEST
|
||||
// stack: pos, len, retdest
|
||||
DUP2
|
||||
DUP2
|
||||
EQ
|
||||
// stack: pos == len, pos, len, retdest
|
||||
%jumpi(read_rlp_to_memory_finish)
|
||||
// stack: pos, len, retdest
|
||||
PROVER_INPUT
|
||||
// stack: byte, pos, len, retdest
|
||||
DUP2
|
||||
// stack: pos, byte, pos, len, retdest
|
||||
%mstore_current(@SEGMENT_RLP_RAW)
|
||||
// stack: pos, len, retdest
|
||||
%add_const(1)
|
||||
// stack: pos', len, retdest
|
||||
%jump(read_rlp_to_memory_loop)
|
||||
|
||||
read_rlp_to_memory_finish:
|
||||
JUMPDEST
|
||||
// stack: pos, len, retdest
|
||||
%pop2
|
||||
// stack: retdest
|
||||
JUMP
|
||||
2
evm/src/cpu/kernel/asm/storage/read.asm
Normal file
2
evm/src/cpu/kernel/asm/storage/read.asm
Normal file
@ -0,0 +1,2 @@
|
||||
global storage_read:
|
||||
// TODO
|
||||
2
evm/src/cpu/kernel/asm/storage/write.asm
Normal file
2
evm/src/cpu/kernel/asm/storage/write.asm
Normal file
@ -0,0 +1,2 @@
|
||||
global storage_write:
|
||||
// TODO
|
||||
@ -1,10 +0,0 @@
|
||||
// TODO: Dummy code for now.
|
||||
global storage_read:
|
||||
JUMPDEST
|
||||
PUSH 1234
|
||||
POP
|
||||
// An infinite loop:
|
||||
mylabel:
|
||||
JUMPDEST
|
||||
PUSH mylabel
|
||||
JUMP
|
||||
@ -1,6 +0,0 @@
|
||||
// TODO: Dummy code for now.
|
||||
global storage_write:
|
||||
JUMPDEST
|
||||
PUSH 123 // Whatever.
|
||||
POP
|
||||
BYTES 0x1, 0x02, 3
|
||||
@ -0,0 +1,5 @@
|
||||
// After the transaction data has been parsed into a normalized set of fields
|
||||
// (see TxnField), this routine processes the transaction.
|
||||
|
||||
global process_normalized_txn:
|
||||
// TODO
|
||||
38
evm/src/cpu/kernel/asm/transactions/router.asm
Normal file
38
evm/src/cpu/kernel/asm/transactions/router.asm
Normal file
@ -0,0 +1,38 @@
|
||||
// This is the entry point of transaction processing. We load the transaction
|
||||
// RLP data into memory, check the transaction type, then based on the type we
|
||||
// jump to the appropriate transaction parsing method.
|
||||
|
||||
global route_txn:
|
||||
JUMPDEST
|
||||
// stack: (empty)
|
||||
// First load transaction data into memory, where it will be parsed.
|
||||
PUSH read_txn_from_memory
|
||||
%jump(read_rlp_to_memory)
|
||||
|
||||
// At this point, the raw txn data is in memory.
|
||||
read_txn_from_memory:
|
||||
JUMPDEST
|
||||
// stack: (empty)
|
||||
|
||||
// We will peak at the first byte to determine what type of transaction this is.
|
||||
// Note that type 1 and 2 transactions have a first byte of 1 and 2, respectively.
|
||||
// Type 0 (legacy) transactions have no such prefix, but their RLP will have a
|
||||
// first byte >= 0xc0, so there is no overlap.
|
||||
|
||||
PUSH 0
|
||||
%mload_current(@SEGMENT_RLP_RAW)
|
||||
%eq_const(1)
|
||||
// stack: first_byte == 1
|
||||
%jumpi(process_type_1_txn)
|
||||
// stack: (empty)
|
||||
|
||||
PUSH 0
|
||||
%mload_current(@SEGMENT_RLP_RAW)
|
||||
%eq_const(2)
|
||||
// stack: first_byte == 2
|
||||
%jumpi(process_type_2_txn)
|
||||
// stack: (empty)
|
||||
|
||||
// At this point, since it's not a type 1 or 2 transaction,
|
||||
// it must be a legacy (aka type 0) transaction.
|
||||
%jump(process_type_2_txn)
|
||||
189
evm/src/cpu/kernel/asm/transactions/type_0.asm
Normal file
189
evm/src/cpu/kernel/asm/transactions/type_0.asm
Normal file
@ -0,0 +1,189 @@
|
||||
// Type 0 transactions, aka legacy transaction, have the format
|
||||
// rlp([nonce, gas_price, gas_limit, to, value, data, v, r, s])
|
||||
//
|
||||
// The field v was originally encoded as
|
||||
// 27 + y_parity
|
||||
// but as of EIP 155 it can also be encoded as
|
||||
// 35 + 2 * chain_id + y_parity
|
||||
//
|
||||
// If a chain_id is present in v, the signed data is
|
||||
// keccak256(rlp([nonce, gas_price, gas_limit, to, value, data, chain_id, 0, 0]))
|
||||
// otherwise, it is
|
||||
// keccak256(rlp([nonce, gas_price, gas_limit, to, value, data]))
|
||||
|
||||
global process_type_0_txn:
|
||||
JUMPDEST
|
||||
// stack: (empty)
|
||||
PUSH process_txn_with_len
|
||||
PUSH 0 // initial pos
|
||||
// stack: pos, process_txn_with_len
|
||||
%jump(decode_rlp_list_len)
|
||||
|
||||
process_txn_with_len:
|
||||
// We don't actually need the length.
|
||||
%stack (pos, len) -> (pos)
|
||||
|
||||
PUSH store_nonce
|
||||
SWAP1
|
||||
// stack: pos, store_nonce
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_nonce:
|
||||
%stack (pos, nonce) -> (@TXN_FIELD_NONCE, nonce, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: pos
|
||||
PUSH store_gas_price
|
||||
SWAP1
|
||||
// stack: pos, store_gas_price
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_gas_price:
|
||||
// For legacy transactions, we set both the
|
||||
// TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS and TXN_FIELD_MAX_FEE_PER_GAS
|
||||
// fields to gas_price.
|
||||
%stack (pos, gas_price) -> (@TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS, gas_price,
|
||||
@TXN_FIELD_MAX_FEE_PER_GAS, gas_price, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: pos
|
||||
PUSH store_gas_limit
|
||||
SWAP1
|
||||
// stack: pos, store_gas_limit
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_gas_limit:
|
||||
%stack (pos, gas_limit) -> (@TXN_FIELD_GAS_LIMIT, gas_limit, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// Peak at the RLP to see if the next byte is zero.
|
||||
// If so, there is no value field, so skip the store_to step.
|
||||
// stack: pos
|
||||
DUP1
|
||||
%mload_current(@SEGMENT_RLP_RAW)
|
||||
ISZERO
|
||||
// stack: to_empty, pos
|
||||
%jumpi(parse_value)
|
||||
|
||||
// If we got here, there is a "to" field.
|
||||
PUSH store_to
|
||||
SWAP1
|
||||
// stack: pos, store_to
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_to:
|
||||
%stack (pos, to) -> (@TXN_FIELD_TO, to, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
// stack: pos
|
||||
|
||||
parse_value:
|
||||
// stack: pos
|
||||
PUSH store_value
|
||||
SWAP1
|
||||
// stack: pos, store_value
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_value:
|
||||
%stack (pos, value) -> (@TXN_FIELD_VALUE, value, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: pos
|
||||
PUSH store_data_len
|
||||
SWAP1
|
||||
// stack: pos, store_data_len
|
||||
%jump(decode_rlp_string_len)
|
||||
|
||||
store_data_len:
|
||||
%stack (pos, data_len) -> (@TXN_FIELD_DATA_LEN, data_len, pos, data_len, pos, data_len)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
// stack: pos, data_len, pos, data_len
|
||||
ADD
|
||||
// stack: new_pos, pos, data_len
|
||||
|
||||
// Memcpy the txn data from @SEGMENT_RLP_RAW to @SEGMENT_TXN_DATA.
|
||||
PUSH parse_v
|
||||
%stack (parse_v, new_pos, old_pos, data_len) -> (old_pos, data_len, parse_v, new_pos)
|
||||
PUSH @SEGMENT_RLP_RAW
|
||||
GET_CONTEXT
|
||||
PUSH 0
|
||||
PUSH @SEGMENT_TXN_DATA
|
||||
GET_CONTEXT
|
||||
// stack: DST, SRC, data_len, parse_v, new_pos
|
||||
%jump(memcpy)
|
||||
|
||||
parse_v:
|
||||
// stack: pos
|
||||
PUSH process_v
|
||||
SWAP1
|
||||
// stack: pos, process_v
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
process_v:
|
||||
// stack: pos, v
|
||||
SWAP1
|
||||
// stack: v, pos
|
||||
DUP1
|
||||
%gt_const(28)
|
||||
// stack: v > 28, v, pos
|
||||
%jumpi(process_v_new_style)
|
||||
|
||||
// We have an old style v, so y_parity = v - 27.
|
||||
// No chain ID is present, so we can leave TXN_FIELD_CHAIN_ID_PRESENT and
|
||||
// TXN_FIELD_CHAIN_ID with their default values of zero.
|
||||
// stack: v, pos
|
||||
%sub_const(27)
|
||||
%stack (y_parity, pos) -> (@TXN_FIELD_Y_PARITY, y_parity, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: pos
|
||||
%jump(parse_r)
|
||||
|
||||
process_v_new_style:
|
||||
// stack: v, pos
|
||||
// We have a new style v, so chain_id_present = 1,
|
||||
// chain_id = (v - 35) / 2, and y_parity = (v - 35) % 2.
|
||||
%stack (v, pos) -> (@TXN_FIELD_CHAIN_ID_PRESENT, 1, v, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: v, pos
|
||||
%sub_const(35)
|
||||
DUP1
|
||||
// stack: v - 35, v - 35, pos
|
||||
%div_const(2)
|
||||
// stack: chain_id, v - 35, pos
|
||||
PUSH @TXN_FIELD_CHAIN_ID
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: v - 35, pos
|
||||
%mod_const(2)
|
||||
// stack: y_parity, pos
|
||||
PUSH @TXN_FIELD_Y_PARITY
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
parse_r:
|
||||
// stack: pos
|
||||
PUSH store_r
|
||||
SWAP1
|
||||
// stack: pos, store_r
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_r:
|
||||
%stack (pos, r) -> (@TXN_FIELD_R, r, pos)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
|
||||
// stack: pos
|
||||
PUSH store_s
|
||||
SWAP1
|
||||
// stack: pos, store_s
|
||||
%jump(decode_rlp_scalar)
|
||||
|
||||
store_s:
|
||||
%stack (pos, s) -> (@TXN_FIELD_S, s)
|
||||
%mstore_current(@SEGMENT_NORMALIZED_TXN)
|
||||
// stack: (empty)
|
||||
|
||||
// TODO: Write the signed txn data to memory, where it can be hashed and
|
||||
// checked against the signature.
|
||||
|
||||
%jump(process_normalized_txn)
|
||||
12
evm/src/cpu/kernel/asm/transactions/type_1.asm
Normal file
12
evm/src/cpu/kernel/asm/transactions/type_1.asm
Normal file
@ -0,0 +1,12 @@
|
||||
// Type 1 transactions, introduced by EIP 2930, have the format
|
||||
// 0x01 || rlp([chain_id, nonce, gas_price, gas_limit, to, value, data,
|
||||
// access_list, y_parity, r, s])
|
||||
//
|
||||
// The signed data is
|
||||
// keccak256(0x01 || rlp([chain_id, nonce, gas_price, gas_limit, to, value,
|
||||
// data, access_list]))
|
||||
|
||||
global process_type_1_txn:
|
||||
JUMPDEST
|
||||
// stack: (empty)
|
||||
PANIC // TODO: Unfinished
|
||||
13
evm/src/cpu/kernel/asm/transactions/type_2.asm
Normal file
13
evm/src/cpu/kernel/asm/transactions/type_2.asm
Normal file
@ -0,0 +1,13 @@
|
||||
// Type 2 transactions, introduced by EIP 1559, have the format
|
||||
// 0x02 || rlp([chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas,
|
||||
// gas_limit, to, value, data, access_list, y_parity, r, s])
|
||||
//
|
||||
// The signed data is
|
||||
// keccak256(0x02 || rlp([chain_id, nonce, max_priority_fee_per_gas,
|
||||
// max_fee_per_gas, gas_limit, to, value, data,
|
||||
// access_list]))
|
||||
|
||||
global process_type_2_txn:
|
||||
JUMPDEST
|
||||
// stack: (empty)
|
||||
PANIC // TODO: Unfinished
|
||||
@ -6,60 +6,43 @@ use keccak_hash::keccak;
|
||||
|
||||
use crate::cpu::kernel::assembler::Kernel;
|
||||
use crate::cpu::kernel::prover_input::ProverInputFn;
|
||||
use crate::generation::memory::MemoryContextState;
|
||||
use crate::memory::segments::Segment;
|
||||
|
||||
/// Halt interpreter execution whenever a jump to this offset is done.
|
||||
const HALT_OFFSET: usize = 0xdeadbeef;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct EvmMemory {
|
||||
memory: Vec<u8>,
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct InterpreterMemory {
|
||||
context_memory: Vec<MemoryContextState>,
|
||||
}
|
||||
|
||||
impl EvmMemory {
|
||||
fn len(&self) -> usize {
|
||||
self.memory.len()
|
||||
}
|
||||
|
||||
/// Expand memory until `self.len() >= offset`.
|
||||
fn expand(&mut self, offset: usize) {
|
||||
while self.len() < offset {
|
||||
self.memory.extend([0; 32]);
|
||||
impl Default for InterpreterMemory {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
context_memory: vec![MemoryContextState::default()],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn mload(&mut self, offset: usize) -> U256 {
|
||||
self.expand(offset + 32);
|
||||
U256::from_big_endian(&self.memory[offset..offset + 32])
|
||||
impl InterpreterMemory {
|
||||
fn mload_general(&self, context: usize, segment: Segment, offset: usize) -> U256 {
|
||||
self.context_memory[context].segments[segment as usize].get(offset)
|
||||
}
|
||||
|
||||
fn mload8(&mut self, offset: usize) -> u8 {
|
||||
self.expand(offset + 1);
|
||||
self.memory[offset]
|
||||
}
|
||||
|
||||
fn mstore(&mut self, offset: usize, value: U256) {
|
||||
self.expand(offset + 32);
|
||||
let value_be = {
|
||||
let mut tmp = [0; 32];
|
||||
value.to_big_endian(&mut tmp);
|
||||
tmp
|
||||
};
|
||||
self.memory[offset..offset + 32].copy_from_slice(&value_be);
|
||||
}
|
||||
|
||||
fn mstore8(&mut self, offset: usize, value: U256) {
|
||||
self.expand(offset + 1);
|
||||
let value_byte = value.0[0] as u8;
|
||||
self.memory[offset] = value_byte;
|
||||
fn mstore_general(&mut self, context: usize, segment: Segment, offset: usize, value: U256) {
|
||||
self.context_memory[context].segments[segment as usize].set(offset, value)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Interpreter<'a> {
|
||||
// TODO: Remove `code` and `stack` fields as they are contained in `memory`.
|
||||
pub(crate) struct Interpreter<'a> {
|
||||
code: &'a [u8],
|
||||
jumpdests: Vec<usize>,
|
||||
offset: usize,
|
||||
pub(crate) stack: Vec<U256>,
|
||||
pub(crate) memory: EvmMemory,
|
||||
context: usize,
|
||||
memory: InterpreterMemory,
|
||||
prover_inputs_map: &'a HashMap<usize, ProverInputFn>,
|
||||
prover_inputs: Vec<U256>,
|
||||
running: bool,
|
||||
@ -89,9 +72,10 @@ pub fn run<'a>(
|
||||
jumpdests: find_jumpdests(code),
|
||||
offset: initial_offset,
|
||||
stack: initial_stack,
|
||||
memory: EvmMemory::default(),
|
||||
memory: InterpreterMemory::default(),
|
||||
prover_inputs_map: prover_inputs,
|
||||
prover_inputs: Vec::new(),
|
||||
context: 0,
|
||||
running: true,
|
||||
};
|
||||
|
||||
@ -210,13 +194,13 @@ impl<'a> Interpreter<'a> {
|
||||
0xf3 => todo!(), // "RETURN",
|
||||
0xf4 => todo!(), // "DELEGATECALL",
|
||||
0xf5 => todo!(), // "CREATE2",
|
||||
0xf6 => todo!(), // "GET_CONTEXT",
|
||||
0xf7 => todo!(), // "SET_CONTEXT",
|
||||
0xf6 => self.run_get_context(), // "GET_CONTEXT",
|
||||
0xf7 => self.run_set_context(), // "SET_CONTEXT",
|
||||
0xf8 => todo!(), // "CONSUME_GAS",
|
||||
0xf9 => todo!(), // "EXIT_KERNEL",
|
||||
0xfa => todo!(), // "STATICCALL",
|
||||
0xfb => todo!(), // "MLOAD_GENERAL",
|
||||
0xfc => todo!(), // "MSTORE_GENERAL",
|
||||
0xfb => self.run_mload_general(), // "MLOAD_GENERAL",
|
||||
0xfc => self.run_mstore_general(), // "MSTORE_GENERAL",
|
||||
0xfd => todo!(), // "REVERT",
|
||||
0xfe => bail!("Executed INVALID"), // "INVALID",
|
||||
0xff => todo!(), // "SELFDESTRUCT",
|
||||
@ -337,7 +321,11 @@ impl<'a> Interpreter<'a> {
|
||||
let offset = self.pop().as_usize();
|
||||
let size = self.pop().as_usize();
|
||||
let bytes = (offset..offset + size)
|
||||
.map(|i| self.memory.mload8(i))
|
||||
.map(|i| {
|
||||
self.memory
|
||||
.mload_general(self.context, Segment::MainMemory, i)
|
||||
.byte(0)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let hash = keccak(bytes);
|
||||
self.push(hash.into_uint());
|
||||
@ -359,21 +347,39 @@ impl<'a> Interpreter<'a> {
|
||||
}
|
||||
|
||||
fn run_mload(&mut self) {
|
||||
let offset = self.pop();
|
||||
let value = self.memory.mload(offset.as_usize());
|
||||
let offset = self.pop().as_usize();
|
||||
let value = U256::from_big_endian(
|
||||
&(0..32)
|
||||
.map(|i| {
|
||||
self.memory
|
||||
.mload_general(self.context, Segment::MainMemory, offset + i)
|
||||
.byte(0)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
self.push(value);
|
||||
}
|
||||
|
||||
fn run_mstore(&mut self) {
|
||||
let offset = self.pop();
|
||||
let offset = self.pop().as_usize();
|
||||
let value = self.pop();
|
||||
self.memory.mstore(offset.as_usize(), value);
|
||||
let mut bytes = [0; 32];
|
||||
value.to_big_endian(&mut bytes);
|
||||
for (i, byte) in (0..32).zip(bytes) {
|
||||
self.memory
|
||||
.mstore_general(self.context, Segment::MainMemory, offset + i, byte.into());
|
||||
}
|
||||
}
|
||||
|
||||
fn run_mstore8(&mut self) {
|
||||
let offset = self.pop();
|
||||
let offset = self.pop().as_usize();
|
||||
let value = self.pop();
|
||||
self.memory.mstore8(offset.as_usize(), value);
|
||||
self.memory.mstore_general(
|
||||
self.context,
|
||||
Segment::MainMemory,
|
||||
offset,
|
||||
value.byte(0).into(),
|
||||
);
|
||||
}
|
||||
|
||||
fn run_jump(&mut self) {
|
||||
@ -413,6 +419,33 @@ impl<'a> Interpreter<'a> {
|
||||
let len = self.stack.len();
|
||||
self.stack.swap(len - 1, len - n as usize - 1);
|
||||
}
|
||||
|
||||
fn run_get_context(&mut self) {
|
||||
self.push(self.context.into());
|
||||
}
|
||||
|
||||
fn run_set_context(&mut self) {
|
||||
let x = self.pop();
|
||||
self.context = x.as_usize();
|
||||
}
|
||||
|
||||
fn run_mload_general(&mut self) {
|
||||
let context = self.pop().as_usize();
|
||||
let segment = Segment::all()[self.pop().as_usize()];
|
||||
let offset = self.pop().as_usize();
|
||||
let value = self.memory.mload_general(context, segment, offset);
|
||||
assert!(value.bits() <= segment.bit_range());
|
||||
self.push(value);
|
||||
}
|
||||
|
||||
fn run_mstore_general(&mut self) {
|
||||
let context = self.pop().as_usize();
|
||||
let segment = Segment::all()[self.pop().as_usize()];
|
||||
let offset = self.pop().as_usize();
|
||||
let value = self.pop();
|
||||
assert!(value.bits() <= segment.bit_range());
|
||||
self.memory.mstore_general(context, segment, offset, value);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the (ordered) JUMPDEST offsets in the code.
|
||||
@ -438,6 +471,7 @@ mod tests {
|
||||
use hex_literal::hex;
|
||||
|
||||
use crate::cpu::kernel::interpreter::{run, Interpreter};
|
||||
use crate::memory::segments::Segment;
|
||||
|
||||
#[test]
|
||||
fn test_run() -> anyhow::Result<()> {
|
||||
@ -474,7 +508,14 @@ mod tests {
|
||||
let run = run(&code, 0, vec![], &pis)?;
|
||||
let Interpreter { stack, memory, .. } = run;
|
||||
assert_eq!(stack, vec![0xff.into(), 0xff00.into()]);
|
||||
assert_eq!(&memory.memory, &hex!("00000000000000000000000000000000000000000000000000000000000000ff0000000000000042000000000000000000000000000000000000000000000000"));
|
||||
assert_eq!(
|
||||
memory.context_memory[0].segments[Segment::MainMemory as usize].get(0x27),
|
||||
0x42.into()
|
||||
);
|
||||
assert_eq!(
|
||||
memory.context_memory[0].segments[Segment::MainMemory as usize].get(0x1f),
|
||||
0xff.into()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,6 +7,7 @@ mod opcodes;
|
||||
mod parser;
|
||||
mod prover_input;
|
||||
mod stack_manipulation;
|
||||
mod txn_fields;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
59
evm/src/cpu/kernel/txn_fields.rs
Normal file
59
evm/src/cpu/kernel/txn_fields.rs
Normal file
@ -0,0 +1,59 @@
|
||||
/// These are normalized transaction fields, i.e. not specific to any transaction type.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
|
||||
pub(crate) enum NormalizedTxnField {
|
||||
/// Whether a chain ID was present in the txn data. Type 0 transaction with v=27 or v=28 have
|
||||
/// no chain ID. This affects what fields get signed.
|
||||
ChainIdPresent = 0,
|
||||
ChainId = 1,
|
||||
Nonce = 2,
|
||||
MaxPriorityFeePerGas = 3,
|
||||
MaxFeePerGas = 4,
|
||||
GasLimit = 5,
|
||||
To = 6,
|
||||
Value = 7,
|
||||
/// The length of the data field. The data itself is stored in another segment.
|
||||
DataLen = 8,
|
||||
YParity = 9,
|
||||
R = 10,
|
||||
S = 11,
|
||||
}
|
||||
|
||||
impl NormalizedTxnField {
|
||||
pub(crate) const COUNT: usize = 12;
|
||||
|
||||
pub(crate) fn all() -> [Self; Self::COUNT] {
|
||||
[
|
||||
Self::ChainIdPresent,
|
||||
Self::ChainId,
|
||||
Self::Nonce,
|
||||
Self::MaxPriorityFeePerGas,
|
||||
Self::MaxFeePerGas,
|
||||
Self::GasLimit,
|
||||
Self::To,
|
||||
Self::Value,
|
||||
Self::DataLen,
|
||||
Self::YParity,
|
||||
Self::R,
|
||||
Self::S,
|
||||
]
|
||||
}
|
||||
|
||||
/// The variable name that gets passed into kernel assembly code.
|
||||
pub(crate) fn var_name(&self) -> &'static str {
|
||||
match self {
|
||||
NormalizedTxnField::ChainIdPresent => "TXN_FIELD_CHAIN_ID_PRESENT",
|
||||
NormalizedTxnField::ChainId => "TXN_FIELD_CHAIN_ID",
|
||||
NormalizedTxnField::Nonce => "TXN_FIELD_NONCE",
|
||||
NormalizedTxnField::MaxPriorityFeePerGas => "TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS",
|
||||
NormalizedTxnField::MaxFeePerGas => "TXN_FIELD_MAX_FEE_PER_GAS",
|
||||
NormalizedTxnField::GasLimit => "TXN_FIELD_GAS_LIMIT",
|
||||
NormalizedTxnField::To => "TXN_FIELD_TO",
|
||||
NormalizedTxnField::Value => "TXN_FIELD_VALUE",
|
||||
NormalizedTxnField::DataLen => "TXN_FIELD_DATA_LEN",
|
||||
NormalizedTxnField::YParity => "TXN_FIELD_Y_PARITY",
|
||||
NormalizedTxnField::R => "TXN_FIELD_R",
|
||||
NormalizedTxnField::S => "TXN_FIELD_S",
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -34,14 +34,14 @@ pub(crate) struct MemorySegmentState {
|
||||
}
|
||||
|
||||
impl MemorySegmentState {
|
||||
pub(super) fn get(&self, virtual_addr: usize) -> U256 {
|
||||
pub(crate) fn get(&self, virtual_addr: usize) -> U256 {
|
||||
self.content
|
||||
.get(virtual_addr)
|
||||
.copied()
|
||||
.unwrap_or(U256::zero())
|
||||
}
|
||||
|
||||
pub(super) fn set(&mut self, virtual_addr: usize, value: U256) {
|
||||
pub(crate) fn set(&mut self, virtual_addr: usize, value: U256) {
|
||||
if virtual_addr >= self.content.len() {
|
||||
self.content.resize(virtual_addr + 1, U256::zero());
|
||||
}
|
||||
|
||||
@ -10,7 +10,7 @@ use crate::cpu::columns::NUM_CPU_COLUMNS;
|
||||
use crate::generation::state::GenerationState;
|
||||
use crate::util::trace_rows_to_poly_values;
|
||||
|
||||
mod memory;
|
||||
pub(crate) mod memory;
|
||||
pub(crate) mod state;
|
||||
|
||||
/// A piece of data which has been encoded using Recursive Length Prefix (RLP) serialization.
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
#[allow(dead_code)] // TODO: Not all segments are used yet.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
|
||||
pub(crate) enum Segment {
|
||||
/// Contains EVM bytecode.
|
||||
@ -17,14 +17,16 @@ pub(crate) enum Segment {
|
||||
/// General purpose kernel memory, used by various kernel functions.
|
||||
/// In general, calling a helper function can result in this memory being clobbered.
|
||||
KernelGeneral = 6,
|
||||
/// Contains transaction data (after it's parsed and converted to a standard format).
|
||||
TxnData = 7,
|
||||
/// Contains normalized transaction fields; see `TxnField`.
|
||||
TxnFields = 7,
|
||||
/// Contains the data field of a transaction.
|
||||
TxnData = 8,
|
||||
/// Raw RLP data.
|
||||
RlpRaw = 8,
|
||||
RlpRaw = 9,
|
||||
}
|
||||
|
||||
impl Segment {
|
||||
pub(crate) const COUNT: usize = 9;
|
||||
pub(crate) const COUNT: usize = 10;
|
||||
|
||||
pub(crate) fn all() -> [Self; Self::COUNT] {
|
||||
[
|
||||
@ -35,6 +37,7 @@ impl Segment {
|
||||
Self::Returndata,
|
||||
Self::Metadata,
|
||||
Self::KernelGeneral,
|
||||
Self::TxnFields,
|
||||
Self::TxnData,
|
||||
Self::RlpRaw,
|
||||
]
|
||||
@ -50,8 +53,25 @@ impl Segment {
|
||||
Segment::Returndata => "SEGMENT_RETURNDATA",
|
||||
Segment::Metadata => "SEGMENT_METADATA",
|
||||
Segment::KernelGeneral => "SEGMENT_KERNEL_GENERAL",
|
||||
Segment::TxnFields => "SEGMENT_NORMALIZED_TXN",
|
||||
Segment::TxnData => "SEGMENT_TXN_DATA",
|
||||
Segment::RlpRaw => "SEGMENT_RLP_RAW",
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn bit_range(&self) -> usize {
|
||||
match self {
|
||||
Segment::Code => 8,
|
||||
Segment::Stack => 256,
|
||||
Segment::MainMemory => 8,
|
||||
Segment::Calldata => 8,
|
||||
Segment::Returndata => 8,
|
||||
Segment::Metadata => 256,
|
||||
Segment::KernelGeneral => 256,
|
||||
Segment::TxnFields => 256,
|
||||
Segment::TxnData => 256,
|
||||
Segment::RlpRaw => 8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,8 +33,8 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
|
||||
pub fn new() -> Challenger<F, H> {
|
||||
Challenger {
|
||||
sponge_state: [F::ZERO; SPONGE_WIDTH],
|
||||
input_buffer: Vec::new(),
|
||||
output_buffer: Vec::new(),
|
||||
input_buffer: Vec::with_capacity(SPONGE_RATE),
|
||||
output_buffer: Vec::with_capacity(SPONGE_RATE),
|
||||
_phantom: Default::default(),
|
||||
}
|
||||
}
|
||||
@ -44,6 +44,10 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
|
||||
self.output_buffer.clear();
|
||||
|
||||
self.input_buffer.push(element);
|
||||
|
||||
if self.input_buffer.len() == SPONGE_RATE {
|
||||
self.duplexing();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn observe_extension_element<const D: usize>(&mut self, element: &F::Extension)
|
||||
@ -79,12 +83,10 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
|
||||
}
|
||||
|
||||
pub fn get_challenge(&mut self) -> F {
|
||||
self.absorb_buffered_inputs();
|
||||
|
||||
if self.output_buffer.is_empty() {
|
||||
// Evaluate the permutation to produce `r` new outputs.
|
||||
self.sponge_state = H::Permutation::permute(self.sponge_state);
|
||||
self.output_buffer = self.sponge_state[0..SPONGE_RATE].to_vec();
|
||||
// If we have buffered inputs, we must perform a duplexing so that the challenge will
|
||||
// reflect them. Or if we've run out of outputs, we must perform a duplexing to get more.
|
||||
if !self.input_buffer.is_empty() || self.output_buffer.is_empty() {
|
||||
self.duplexing();
|
||||
}
|
||||
|
||||
self.output_buffer
|
||||
@ -125,27 +127,24 @@ impl<F: RichField, H: Hasher<F>> Challenger<F, H> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Absorb any buffered inputs. After calling this, the input buffer will be empty.
|
||||
fn absorb_buffered_inputs(&mut self) {
|
||||
if self.input_buffer.is_empty() {
|
||||
return;
|
||||
/// Absorb any buffered inputs. After calling this, the input buffer will be empty, and the
|
||||
/// output buffer will be full.
|
||||
fn duplexing(&mut self) {
|
||||
assert!(self.input_buffer.len() <= SPONGE_RATE);
|
||||
|
||||
// Overwrite the first r elements with the inputs. This differs from a standard sponge,
|
||||
// where we would xor or add in the inputs. This is a well-known variant, though,
|
||||
// sometimes called "overwrite mode".
|
||||
for (i, input) in self.input_buffer.drain(..).enumerate() {
|
||||
self.sponge_state[i] = input;
|
||||
}
|
||||
|
||||
for input_chunk in self.input_buffer.chunks(SPONGE_RATE) {
|
||||
// Overwrite the first r elements with the inputs. This differs from a standard sponge,
|
||||
// where we would xor or add in the inputs. This is a well-known variant, though,
|
||||
// sometimes called "overwrite mode".
|
||||
for (i, &input) in input_chunk.iter().enumerate() {
|
||||
self.sponge_state[i] = input;
|
||||
}
|
||||
// Apply the permutation.
|
||||
self.sponge_state = H::Permutation::permute(self.sponge_state);
|
||||
|
||||
// Apply the permutation.
|
||||
self.sponge_state = H::Permutation::permute(self.sponge_state);
|
||||
}
|
||||
|
||||
self.output_buffer = self.sponge_state[0..SPONGE_RATE].to_vec();
|
||||
|
||||
self.input_buffer.clear();
|
||||
self.output_buffer.clear();
|
||||
self.output_buffer
|
||||
.extend_from_slice(&self.sponge_state[0..SPONGE_RATE]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,7 +154,9 @@ impl<F: RichField, H: AlgebraicHasher<F>> Default for Challenger<F, H> {
|
||||
}
|
||||
}
|
||||
|
||||
/// A recursive version of `Challenger`.
|
||||
/// A recursive version of `Challenger`. The main difference is that `RecursiveChallenger`'s input
|
||||
/// buffer can grow beyond `SPONGE_RATE`. This is so that `observe_element` etc do not need access
|
||||
/// to the `CircuitBuilder`.
|
||||
pub struct RecursiveChallenger<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
|
||||
{
|
||||
sponge_state: [Target; SPONGE_WIDTH],
|
||||
@ -248,7 +249,8 @@ impl<F: RichField + Extendable<D>, H: AlgebraicHasher<F>, const D: usize>
|
||||
self.get_n_challenges(builder, D).try_into().unwrap()
|
||||
}
|
||||
|
||||
/// Absorb any buffered inputs. After calling this, the input buffer will be empty.
|
||||
/// Absorb any buffered inputs. After calling this, the input buffer will be empty, and the
|
||||
/// output buffer will be full.
|
||||
fn absorb_buffered_inputs(&mut self, builder: &mut CircuitBuilder<F, D>) {
|
||||
if self.input_buffer.is_empty() {
|
||||
return;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user