Main function, txn processing loop

This commit is contained in:
Daniel Lubarov 2022-09-29 23:09:32 -07:00
parent 58256ce052
commit c721155e23
15 changed files with 105 additions and 56 deletions

View File

@ -338,7 +338,7 @@ mod tests {
row.opcode_bits = bits_from_opcode(0x5b); row.opcode_bits = bits_from_opcode(0x5b);
row.is_cpu_cycle = F::ONE; row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE; row.is_kernel_mode = F::ONE;
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["route_txn"]); row.program_counter = F::from_canonical_usize(KERNEL.global_labels["main"]);
cpu_stark.generate(row.borrow_mut()); cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into()); cpu_trace_rows.push(row.into());
} }
@ -377,8 +377,8 @@ mod tests {
row.is_cpu_cycle = F::ONE; row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE; row.is_kernel_mode = F::ONE;
// Since these are the first cycle rows, we must start with PC=route_txn then increment. // Since these are the first cycle rows, we must start with PC=main then increment.
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["route_txn"] + i); row.program_counter = F::from_canonical_usize(KERNEL.global_labels["main"] + i);
row.opcode_bits = bits_from_opcode( row.opcode_bits = bits_from_opcode(
if logic_trace[logic::columns::IS_AND].values[i] != F::ZERO { if logic_trace[logic::columns::IS_AND].values[i] != F::ZERO {
0x16 0x16

View File

@ -69,12 +69,12 @@ pub fn eval_packed_generic<P: PackedField>(
); );
// If a non-CPU cycle row is followed by a CPU cycle row, then: // If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `route_txn` (the entry point of our kernel), // - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel),
// - execution is in kernel mode, and // - execution is in kernel mode, and
// - the stack is empty. // - the stack is empty.
let is_last_noncpu_cycle = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle; let is_last_noncpu_cycle = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle;
let pc_diff = let pc_diff =
nv.program_counter - P::Scalar::from_canonical_usize(KERNEL.global_labels["route_txn"]); nv.program_counter - P::Scalar::from_canonical_usize(KERNEL.global_labels["main"]);
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff); yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES)); yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len); yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);
@ -118,18 +118,18 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
} }
// If a non-CPU cycle row is followed by a CPU cycle row, then: // If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `route_txn` (the entry point of our kernel), // - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel),
// - execution is in kernel mode, and // - execution is in kernel mode, and
// - the stack is empty. // - the stack is empty.
{ {
let is_last_noncpu_cycle = let is_last_noncpu_cycle =
builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle); builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle);
// Start at `route_txn`. // Start at `main`.
let route_txn = builder.constant_extension(F::Extension::from_canonical_usize( let main = builder.constant_extension(F::Extension::from_canonical_usize(
KERNEL.global_labels["route_txn"], KERNEL.global_labels["main"],
)); ));
let pc_diff = builder.sub_extension(nv.program_counter, route_txn); let pc_diff = builder.sub_extension(nv.program_counter, main);
let pc_constr = builder.mul_extension(is_last_noncpu_cycle, pc_diff); let pc_constr = builder.mul_extension(is_last_noncpu_cycle, pc_diff);
yield_constr.constraint_transition(builder, pc_constr); yield_constr.constraint_transition(builder, pc_constr);

View File

@ -33,6 +33,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/curve/secp256k1/moddiv.asm"), include_str!("asm/curve/secp256k1/moddiv.asm"),
include_str!("asm/exp.asm"), include_str!("asm/exp.asm"),
include_str!("asm/halt.asm"), include_str!("asm/halt.asm"),
include_str!("asm/main.asm"),
include_str!("asm/memory/core.asm"), include_str!("asm/memory/core.asm"),
include_str!("asm/memory/memcpy.asm"), include_str!("asm/memory/memcpy.asm"),
include_str!("asm/memory/metadata.asm"), include_str!("asm/memory/metadata.asm"),

View File

@ -1,8 +0,0 @@
// Computes the Keccak256 hash of some arbitrary bytes in memory.
// The given memory values should be in the range of a byte.
//
// Pre stack: ADDR, len, retdest
// Post stack: hash
global keccak_general:
// stack: ADDR, len
// TODO

View File

@ -0,0 +1,8 @@
global main:
// If the prover has no more txns for us to process, halt.
PROVER_INPUT(end_of_txns)
%jumpi(halt)
// Call route_txn, returning to main to continue the loop.
PUSH main
%jump(route_txn)

View File

@ -6,7 +6,7 @@
global read_rlp_to_memory: global read_rlp_to_memory:
// stack: retdest // stack: retdest
PROVER_INPUT // Read the RLP blob length from the prover tape. PROVER_INPUT(rlp) // Read the RLP blob length from the prover tape.
// stack: len, retdest // stack: len, retdest
PUSH 0 // initial position PUSH 0 // initial position
// stack: pos, len, retdest // stack: pos, len, retdest
@ -19,7 +19,7 @@ read_rlp_to_memory_loop:
// stack: pos == len, pos, len, retdest // stack: pos == len, pos, len, retdest
%jumpi(read_rlp_to_memory_finish) %jumpi(read_rlp_to_memory_finish)
// stack: pos, len, retdest // stack: pos, len, retdest
PROVER_INPUT PROVER_INPUT(rlp)
// stack: byte, pos, len, retdest // stack: byte, pos, len, retdest
DUP2 DUP2
// stack: pos, byte, pos, len, retdest // stack: pos, byte, pos, len, retdest

View File

@ -3,14 +3,14 @@
// jump to the appropriate transaction parsing method. // jump to the appropriate transaction parsing method.
global route_txn: global route_txn:
// stack: (empty) // stack: retdest
// First load transaction data into memory, where it will be parsed. // First load transaction data into memory, where it will be parsed.
PUSH read_txn_from_memory PUSH read_txn_from_memory
%jump(read_rlp_to_memory) %jump(read_rlp_to_memory)
// At this point, the raw txn data is in memory. // At this point, the raw txn data is in memory.
read_txn_from_memory: read_txn_from_memory:
// stack: (empty) // stack: retdest
// We will peak at the first byte to determine what type of transaction this is. // We will peak at the first byte to determine what type of transaction this is.
// Note that type 1 and 2 transactions have a first byte of 1 and 2, respectively. // Note that type 1 and 2 transactions have a first byte of 1 and 2, respectively.
@ -20,17 +20,17 @@ read_txn_from_memory:
PUSH 0 PUSH 0
%mload_current(@SEGMENT_RLP_RAW) %mload_current(@SEGMENT_RLP_RAW)
%eq_const(1) %eq_const(1)
// stack: first_byte == 1 // stack: first_byte == 1, retdest
%jumpi(process_type_1_txn) %jumpi(process_type_1_txn)
// stack: (empty) // stack: retdest
PUSH 0 PUSH 0
%mload_current(@SEGMENT_RLP_RAW) %mload_current(@SEGMENT_RLP_RAW)
%eq_const(2) %eq_const(2)
// stack: first_byte == 2 // stack: first_byte == 2, retdest
%jumpi(process_type_2_txn) %jumpi(process_type_2_txn)
// stack: (empty) // stack: retdest
// At this point, since it's not a type 1 or 2 transaction, // At this point, since it's not a type 1 or 2 transaction,
// it must be a legacy (aka type 0) transaction. // it must be a legacy (aka type 0) transaction.
%jump(process_type_2_txn) %jump(process_type_0_txn)

View File

@ -12,15 +12,15 @@
// keccak256(rlp([nonce, gas_price, gas_limit, to, value, data])) // keccak256(rlp([nonce, gas_price, gas_limit, to, value, data]))
global process_type_0_txn: global process_type_0_txn:
// stack: (empty) // stack: retdest
PUSH 0 // initial pos PUSH 0 // initial pos
// stack: pos // stack: pos, retdest
%decode_rlp_list_len %decode_rlp_list_len
// We don't actually need the length. // We don't actually need the length.
%stack (pos, len) -> (pos) %stack (pos, len) -> (pos)
// Decode the nonce and store it. // Decode the nonce and store it.
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, nonce) -> (nonce, pos) %stack (pos, nonce) -> (nonce, pos)
%mstore_txn_field(@TXN_FIELD_NONCE) %mstore_txn_field(@TXN_FIELD_NONCE)
@ -29,38 +29,38 @@ global process_type_0_txn:
// For legacy transactions, we set both the // For legacy transactions, we set both the
// TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS and TXN_FIELD_MAX_FEE_PER_GAS // TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS and TXN_FIELD_MAX_FEE_PER_GAS
// fields to gas_price. // fields to gas_price.
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, gas_price) -> (gas_price, gas_price, pos) %stack (pos, gas_price) -> (gas_price, gas_price, pos)
%mstore_txn_field(@TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS) %mstore_txn_field(@TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS)
%mstore_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS) %mstore_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS)
// Decode the gas limit and store it. // Decode the gas limit and store it.
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, gas_limit) -> (gas_limit, pos) %stack (pos, gas_limit) -> (gas_limit, pos)
%mstore_txn_field(@TXN_FIELD_GAS_LIMIT) %mstore_txn_field(@TXN_FIELD_GAS_LIMIT)
// Decode the "to" field and store it. // Decode the "to" field and store it.
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, to) -> (to, pos) %stack (pos, to) -> (to, pos)
%mstore_txn_field(@TXN_FIELD_TO) %mstore_txn_field(@TXN_FIELD_TO)
// Decode the value field and store it. // Decode the value field and store it.
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, value) -> (value, pos) %stack (pos, value) -> (value, pos)
%mstore_txn_field(@TXN_FIELD_VALUE) %mstore_txn_field(@TXN_FIELD_VALUE)
// Decode the data length, store it, and compute new_pos after any data. // Decode the data length, store it, and compute new_pos after any data.
// stack: pos // stack: pos, retdest
%decode_rlp_string_len %decode_rlp_string_len
%stack (pos, data_len) -> (data_len, pos, data_len, pos, data_len) %stack (pos, data_len) -> (data_len, pos, data_len, pos, data_len)
%mstore_txn_field(@TXN_FIELD_DATA_LEN) %mstore_txn_field(@TXN_FIELD_DATA_LEN)
// stack: pos, data_len, pos, data_len // stack: pos, data_len, pos, data_len, retdest
ADD ADD
// stack: new_pos, pos, data_len // stack: new_pos, pos, data_len, retdest
// Memcpy the txn data from @SEGMENT_RLP_RAW to @SEGMENT_TXN_DATA. // Memcpy the txn data from @SEGMENT_RLP_RAW to @SEGMENT_TXN_DATA.
PUSH parse_v PUSH parse_v
@ -70,62 +70,62 @@ global process_type_0_txn:
PUSH 0 PUSH 0
PUSH @SEGMENT_TXN_DATA PUSH @SEGMENT_TXN_DATA
GET_CONTEXT GET_CONTEXT
// stack: DST, SRC, data_len, parse_v, new_pos // stack: DST, SRC, data_len, parse_v, new_pos, retdest
%jump(memcpy) %jump(memcpy)
parse_v: parse_v:
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
// stack: pos, v // stack: pos, v, retdest
SWAP1 SWAP1
// stack: v, pos // stack: v, pos, retdest
DUP1 DUP1
%gt_const(28) %gt_const(28)
// stack: v > 28, v, pos // stack: v > 28, v, pos, retdest
%jumpi(process_v_new_style) %jumpi(process_v_new_style)
// We have an old style v, so y_parity = v - 27. // We have an old style v, so y_parity = v - 27.
// No chain ID is present, so we can leave TXN_FIELD_CHAIN_ID_PRESENT and // No chain ID is present, so we can leave TXN_FIELD_CHAIN_ID_PRESENT and
// TXN_FIELD_CHAIN_ID with their default values of zero. // TXN_FIELD_CHAIN_ID with their default values of zero.
// stack: v, pos // stack: v, pos, retdest
%sub_const(27) %sub_const(27)
%stack (y_parity, pos) -> (y_parity, pos) %stack (y_parity, pos) -> (y_parity, pos)
%mstore_txn_field(@TXN_FIELD_Y_PARITY) %mstore_txn_field(@TXN_FIELD_Y_PARITY)
// stack: pos // stack: pos, retdest
%jump(parse_r) %jump(parse_r)
process_v_new_style: process_v_new_style:
// stack: v, pos // stack: v, pos, retdest
// We have a new style v, so chain_id_present = 1, // We have a new style v, so chain_id_present = 1,
// chain_id = (v - 35) / 2, and y_parity = (v - 35) % 2. // chain_id = (v - 35) / 2, and y_parity = (v - 35) % 2.
%stack (v, pos) -> (1, v, pos) %stack (v, pos) -> (1, v, pos)
%mstore_txn_field(@TXN_FIELD_CHAIN_ID_PRESENT) %mstore_txn_field(@TXN_FIELD_CHAIN_ID_PRESENT)
// stack: v, pos // stack: v, pos, retdest
%sub_const(35) %sub_const(35)
DUP1 DUP1
// stack: v - 35, v - 35, pos // stack: v - 35, v - 35, pos, retdest
%div_const(2) %div_const(2)
// stack: chain_id, v - 35, pos // stack: chain_id, v - 35, pos, retdest
%mstore_txn_field(@TXN_FIELD_CHAIN_ID) %mstore_txn_field(@TXN_FIELD_CHAIN_ID)
// stack: v - 35, pos // stack: v - 35, pos, retdest
%mod_const(2) %mod_const(2)
// stack: y_parity, pos // stack: y_parity, pos, retdest
%mstore_txn_field(@TXN_FIELD_Y_PARITY) %mstore_txn_field(@TXN_FIELD_Y_PARITY)
parse_r: parse_r:
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, r) -> (r, pos) %stack (pos, r) -> (r, pos)
%mstore_txn_field(@TXN_FIELD_R) %mstore_txn_field(@TXN_FIELD_R)
// stack: pos // stack: pos, retdest
%decode_rlp_scalar %decode_rlp_scalar
%stack (pos, s) -> (s) %stack (pos, s) -> (s)
%mstore_txn_field(@TXN_FIELD_S) %mstore_txn_field(@TXN_FIELD_S)
// stack: (empty) // stack: retdest
// TODO: Write the signed txn data to memory, where it can be hashed and // TODO: Write the signed txn data to memory, where it can be hashed and
// checked against the signature. // checked against the signature.

View File

@ -7,5 +7,5 @@
// data, access_list])) // data, access_list]))
global process_type_1_txn: global process_type_1_txn:
// stack: (empty) // stack: retdest
PANIC // TODO: Unfinished PANIC // TODO: Unfinished

View File

@ -8,5 +8,5 @@
// access_list])) // access_list]))
global process_type_2_txn: global process_type_2_txn:
// stack: (empty) // stack: retdest
PANIC // TODO: Unfinished PANIC // TODO: Unfinished

View File

@ -12,7 +12,8 @@ fn process_type_0_txn() -> Result<()> {
let process_type_0_txn = KERNEL.global_labels["process_type_0_txn"]; let process_type_0_txn = KERNEL.global_labels["process_type_0_txn"];
let process_normalized_txn = KERNEL.global_labels["process_normalized_txn"]; let process_normalized_txn = KERNEL.global_labels["process_normalized_txn"];
let mut interpreter = Interpreter::new_with_kernel(process_type_0_txn, vec![]); let retaddr = 0xDEADBEEFu32.into();
let mut interpreter = Interpreter::new_with_kernel(process_type_0_txn, vec![retaddr]);
// When we reach process_normalized_txn, we're done with parsing and normalizing. // When we reach process_normalized_txn, we're done with parsing and normalizing.
// Processing normalized transactions is outside the scope of this test. // Processing normalized transactions is outside the scope of this test.

View File

@ -23,6 +23,7 @@ use crate::util::trace_rows_to_poly_values;
pub(crate) mod memory; pub(crate) mod memory;
pub(crate) mod mpt; pub(crate) mod mpt;
pub(crate) mod prover_input; pub(crate) mod prover_input;
pub(crate) mod rlp;
pub(crate) mod state; pub(crate) mod state;
#[derive(Clone, Debug, Deserialize, Serialize, Default)] #[derive(Clone, Debug, Deserialize, Serialize, Default)]

View File

@ -24,12 +24,24 @@ impl<F: Field> GenerationState<F> {
#[allow(unused)] // TODO: Should be used soon. #[allow(unused)] // TODO: Should be used soon.
pub(crate) fn prover_input(&mut self, stack: &[U256], input_fn: &ProverInputFn) -> U256 { pub(crate) fn prover_input(&mut self, stack: &[U256], input_fn: &ProverInputFn) -> U256 {
match input_fn.0[0].as_str() { match input_fn.0[0].as_str() {
"end_of_txns" => self.run_end_of_txns(),
"ff" => self.run_ff(stack, input_fn), "ff" => self.run_ff(stack, input_fn),
"mpt" => self.run_mpt(), "mpt" => self.run_mpt(),
"rlp" => self.run_rlp(),
_ => panic!("Unrecognized prover input function."), _ => panic!("Unrecognized prover input function."),
} }
} }
fn run_end_of_txns(&mut self) -> U256 {
let end = self.next_txn_index == self.inputs.signed_txns.len();
if end {
U256::one()
} else {
self.next_txn_index += 1;
U256::zero()
}
}
/// Finite field operations. /// Finite field operations.
fn run_ff(&self, stack: &[U256], input_fn: &ProverInputFn) -> U256 { fn run_ff(&self, stack: &[U256], input_fn: &ProverInputFn) -> U256 {
let field = EvmField::from_str(input_fn.0[1].as_str()).unwrap(); let field = EvmField::from_str(input_fn.0[1].as_str()).unwrap();
@ -44,6 +56,13 @@ impl<F: Field> GenerationState<F> {
.pop() .pop()
.unwrap_or_else(|| panic!("Out of MPT data")) .unwrap_or_else(|| panic!("Out of MPT data"))
} }
/// RLP data.
fn run_rlp(&mut self) -> U256 {
self.rlp_prover_inputs
.pop()
.unwrap_or_else(|| panic!("Out of RLP data"))
}
} }
enum EvmField { enum EvmField {

18
evm/src/generation/rlp.rs Normal file
View File

@ -0,0 +1,18 @@
use ethereum_types::U256;
pub(crate) fn all_rlp_prover_inputs_reversed(signed_txns: &[Vec<u8>]) -> Vec<U256> {
let mut inputs = all_rlp_prover_inputs(signed_txns);
inputs.reverse();
inputs
}
fn all_rlp_prover_inputs(signed_txns: &[Vec<u8>]) -> Vec<U256> {
let mut prover_inputs = vec![];
for txn in signed_txns {
prover_inputs.push(txn.len().into());
for &byte in txn {
prover_inputs.push(byte.into());
}
}
prover_inputs
}

View File

@ -7,6 +7,7 @@ use tiny_keccak::keccakf;
use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS}; use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS};
use crate::generation::memory::MemoryState; use crate::generation::memory::MemoryState;
use crate::generation::mpt::all_mpt_prover_inputs_reversed; use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::rlp::all_rlp_prover_inputs_reversed;
use crate::generation::GenerationInputs; use crate::generation::GenerationInputs;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryOp; use crate::keccak_memory::keccak_memory_stark::KeccakMemoryOp;
use crate::memory::memory_stark::MemoryOp; use crate::memory::memory_stark::MemoryOp;
@ -19,6 +20,7 @@ use crate::{keccak, logic};
pub(crate) struct GenerationState<F: Field> { pub(crate) struct GenerationState<F: Field> {
#[allow(unused)] // TODO: Should be used soon. #[allow(unused)] // TODO: Should be used soon.
pub(crate) inputs: GenerationInputs, pub(crate) inputs: GenerationInputs,
pub(crate) next_txn_index: usize,
pub(crate) cpu_rows: Vec<[F; NUM_CPU_COLUMNS]>, pub(crate) cpu_rows: Vec<[F; NUM_CPU_COLUMNS]>,
pub(crate) current_cpu_row: CpuColumnsView<F>, pub(crate) current_cpu_row: CpuColumnsView<F>,
@ -32,14 +34,20 @@ pub(crate) struct GenerationState<F: Field> {
/// Prover inputs containing MPT data, in reverse order so that the next input can be obtained /// Prover inputs containing MPT data, in reverse order so that the next input can be obtained
/// via `pop()`. /// via `pop()`.
pub(crate) mpt_prover_inputs: Vec<U256>, pub(crate) mpt_prover_inputs: Vec<U256>,
/// Prover inputs containing RLP data, in reverse order so that the next input can be obtained
/// via `pop()`.
pub(crate) rlp_prover_inputs: Vec<U256>,
} }
impl<F: Field> GenerationState<F> { impl<F: Field> GenerationState<F> {
pub(crate) fn new(inputs: GenerationInputs) -> Self { pub(crate) fn new(inputs: GenerationInputs) -> Self {
let mpt_prover_inputs = all_mpt_prover_inputs_reversed(&inputs.tries); let mpt_prover_inputs = all_mpt_prover_inputs_reversed(&inputs.tries);
let rlp_prover_inputs = all_rlp_prover_inputs_reversed(&inputs.signed_txns);
Self { Self {
inputs, inputs,
next_txn_index: 0,
cpu_rows: vec![], cpu_rows: vec![],
current_cpu_row: [F::ZERO; NUM_CPU_COLUMNS].into(), current_cpu_row: [F::ZERO; NUM_CPU_COLUMNS].into(),
current_context: 0, current_context: 0,
@ -48,6 +56,7 @@ impl<F: Field> GenerationState<F> {
keccak_memory_inputs: vec![], keccak_memory_inputs: vec![],
logic_ops: vec![], logic_ops: vec![],
mpt_prover_inputs, mpt_prover_inputs,
rlp_prover_inputs,
} }
} }