Implement MPT preinitialization (#1406)

* Implement MPT preinitialization

* Apply comments

* Replace GlobalMetadata reads with stores in the kernel

* Change memory specs

* Remove trie data length as a prover input
This commit is contained in:
Linda Guiga 2023-12-07 12:08:47 -05:00 committed by GitHub
parent 4ba7718e66
commit a90aa40b7a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 651 additions and 811 deletions

View File

@ -76,4 +76,5 @@ By default, all memory is zero-initialized. However, to save numerous writes, we
\begin{itemize}
\item The read-only kernel code (in segment 0, context 0) is initialized with its correct values. It's checked by hashing the segment and verifying
that the hash value matches a verifier-provided one.
\item The ``TrieData'' segment is initialized with the input tries. The stored tries are hashed and checked against the provided initial hash. Note that the length of the segment and the pointers -- within the ``TrieData'' segment -- for the three tries are provided as prover inputs. The length is then checked against a value computed when hashing the tries.
\end{itemize}

Binary file not shown.

View File

@ -122,8 +122,6 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/mpt/insert/insert_extension.asm"),
include_str!("asm/mpt/insert/insert_leaf.asm"),
include_str!("asm/mpt/insert/insert_trie_specific.asm"),
include_str!("asm/mpt/load/load.asm"),
include_str!("asm/mpt/load/load_trie_specific.asm"),
include_str!("asm/mpt/read.asm"),
include_str!("asm/mpt/storage/storage_read.asm"),
include_str!("asm/mpt/storage/storage_write.asm"),

View File

@ -13,15 +13,28 @@ global main:
// Initialise the shift table
%shift_table_init
// Second, load all MPT data from the prover.
PUSH hash_initial_tries
%jump(load_all_mpts)
// Initialize the state, transaction and receipt trie root pointers.
PROVER_INPUT(trie_ptr::state)
%mstore_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT)
PROVER_INPUT(trie_ptr::txn)
%mstore_global_metadata(@GLOBAL_METADATA_TXN_TRIE_ROOT)
PROVER_INPUT(trie_ptr::receipt)
%mstore_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_ROOT)
global hash_initial_tries:
%mpt_hash_state_trie %mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_DIGEST_BEFORE) %assert_eq
// We compute the length of the trie data segment in `mpt_hash` so that we
// can check the value provided by the prover.
// We initialize the segment length with 1 because the segment contains
// the null pointer `0` when the tries are empty.
PUSH 1
%mpt_hash_state_trie %mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_DIGEST_BEFORE) %assert_eq
// stack: trie_data_len
%mpt_hash_txn_trie %mload_global_metadata(@GLOBAL_METADATA_TXN_TRIE_DIGEST_BEFORE) %assert_eq
// stack: trie_data_len
%mpt_hash_receipt_trie %mload_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_DIGEST_BEFORE) %assert_eq
// stack: trie_data_full_len
%mstore_global_metadata(@GLOBAL_METADATA_TRIE_DATA_SIZE)
global start_txn:
// stack: (empty)
@ -64,7 +77,10 @@ global hash_final_tries:
%mload_global_metadata(@GLOBAL_METADATA_BLOCK_GAS_USED_AFTER) %assert_eq
DUP3 %mload_global_metadata(@GLOBAL_METADATA_TXN_NUMBER_AFTER) %assert_eq
%pop3
PUSH 1 // initial trie data length
%mpt_hash_state_trie %mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_DIGEST_AFTER) %assert_eq
%mpt_hash_txn_trie %mload_global_metadata(@GLOBAL_METADATA_TXN_TRIE_DIGEST_AFTER) %assert_eq
%mpt_hash_receipt_trie %mload_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_DIGEST_AFTER) %assert_eq
// We don't need the trie data length here.
POP
%jump(halt)

View File

@ -2,40 +2,45 @@
//
// encode_value is a function which should take as input
// - the position within @SEGMENT_RLP_RAW to write to,
// - the offset of a value within @SEGMENT_TRIE_DATA, and
// - a return address.
// - the offset of a value within @SEGMENT_TRIE_DATA,
// - a return address, and
// - the current length of @SEGMENT_TRIE_DATA
// It should serialize the value, write it to @SEGMENT_RLP_RAW starting at the
// given position, and return an updated position (the next unused offset).
// given position, and return an updated position (the next unused offset) as well
// as an updated length for @SEGMENT_TRIE_DATA.
//
// Pre stack: node_ptr, encode_value, retdest
// Post stack: hash
// Given the initial length of the `TrieData` segment, it also updates the length
// for the current trie.
//
// Pre stack: node_ptr, encode_value, cur_len, retdest
// Post stack: hash, new_len
global mpt_hash:
// stack: node_ptr, encode_value, retdest
%stack (node_ptr, encode_value) -> (node_ptr, encode_value, mpt_hash_hash_if_rlp)
// stack: node_ptr, encode_value, cur_len, retdest
%stack (node_ptr, encode_value, cur_len) -> (node_ptr, encode_value, cur_len, mpt_hash_hash_if_rlp)
%jump(encode_or_hash_node)
mpt_hash_hash_if_rlp:
// stack: result, result_len, retdest
// stack: result, result_len, new_len, retdest
// If result_len < 32, then we have an RLP blob, and we need to hash it.
DUP2 %lt_const(32) %jumpi(mpt_hash_hash_rlp)
// Otherwise, we already have a hash, so just return it.
// stack: result, result_len, retdest
%stack (result, result_len, retdest) -> (retdest, result)
// stack: result, result_len, new_len, retdest
%stack (result, result_len, new_len, retdest) -> (retdest, result, new_len)
JUMP
mpt_hash_hash_rlp:
// stack: result, result_len, retdest
%stack (result, result_len)
// context, segment, offset, value, len, retdest
-> (0, @SEGMENT_RLP_RAW, 0, result, result_len, mpt_hash_hash_rlp_after_unpacking)
// stack: result, result_len, new_len, retdest
%stack (result, result_len, new_len)
// context, segment, offset, value, len, trie_len, retdest
-> (0, @SEGMENT_RLP_RAW, 0, result, result_len, mpt_hash_hash_rlp_after_unpacking, new_len)
%jump(mstore_unpacking)
mpt_hash_hash_rlp_after_unpacking:
// stack: result_len, retdest
// stack: result_len, new_len, retdest
PUSH 0 // offset
PUSH @SEGMENT_RLP_RAW // segment
PUSH 0 // context
// stack: result_addr: 3, result_len, retdest
// stack: result_addr: 3, result_len, new_len, retdest
KECCAK_GENERAL
// stack: hash, retdest
SWAP1
// stack: hash, new_len, retdest
%stack(hash, new_len, retdest) -> (retdest, hash, new_len)
JUMP
// Given a trie node, return its RLP encoding if it is is less than 32 bytes,
@ -47,11 +52,11 @@ mpt_hash_hash_rlp_after_unpacking:
// Pre stack: node_ptr, encode_value, retdest
// Post stack: result, result_len
global encode_or_hash_node:
// stack: node_ptr, encode_value, retdest
// stack: node_ptr, encode_value, cur_len, retdest
DUP1 %mload_trie_data
// Check if we're dealing with a concrete node, i.e. not a hash node.
// stack: node_type, node_ptr, encode_value, retdest
// stack: node_type, node_ptr, encode_value, cur_len, retdest
DUP1
PUSH @MPT_NODE_HASH
SUB
@ -59,51 +64,54 @@ global encode_or_hash_node:
// If we got here, node_type == @MPT_NODE_HASH.
// Load the hash and return (hash, 32).
// stack: node_type, node_ptr, encode_value, retdest
// stack: node_type, node_ptr, encode_value, cur_len, retdest
POP
// stack: node_ptr, encode_value, retdest
// Update the length of the `TrieData` segment: there are only two
// elements in a hash node.
SWAP2 %add_const(2) SWAP2
// stack: node_ptr, encode_value, cur_len, retdest
%increment // Skip over node type prefix
// stack: hash_ptr, encode_value, retdest
// stack: hash_ptr, encode_value, cur_len, retdest
%mload_trie_data
// stack: hash, encode_value, retdest
%stack (hash, encode_value, retdest) -> (retdest, hash, 32)
// stack: hash, encode_value, cur_len, retdest
%stack (hash, encode_value, cur_len, retdest) -> (retdest, hash, 32, cur_len)
JUMP
encode_or_hash_concrete_node:
%stack (node_type, node_ptr, encode_value) -> (node_type, node_ptr, encode_value, maybe_hash_node)
%stack (node_type, node_ptr, encode_value, cur_len) -> (node_type, node_ptr, encode_value, cur_len, maybe_hash_node)
%jump(encode_node)
maybe_hash_node:
// stack: result_ptr, result_len, retdest
// stack: result_ptr, result_len, cur_len, retdest
DUP2 %lt_const(32)
%jumpi(pack_small_rlp)
// result_len >= 32, so we hash the result.
// stack: result_ptr, result_len, retdest
// stack: result_ptr, result_len, cur_len, retdest
PUSH @SEGMENT_RLP_RAW // segment
PUSH 0 // context
// stack: result_addr: 3, result_len, retdest
// stack: result_addr: 3, result_len, cur_len, retdest
KECCAK_GENERAL
%stack (hash, retdest) -> (retdest, hash, 32)
%stack (hash, cur_len, retdest) -> (retdest, hash, 32, cur_len)
JUMP
pack_small_rlp:
// stack: result_ptr, result_len, retdest
%stack (result_ptr, result_len)
// stack: result_ptr, result_len, cur_len, retdest
%stack (result_ptr, result_len, cur_len)
-> (0, @SEGMENT_RLP_RAW, result_ptr, result_len,
after_packed_small_rlp, result_len)
after_packed_small_rlp, result_len, cur_len)
%jump(mload_packing)
after_packed_small_rlp:
%stack (result, result_len, retdest) -> (retdest, result, result_len)
%stack (result, result_len, cur_len, retdest) -> (retdest, result, result_len, cur_len)
JUMP
// RLP encode the given trie node, and return an (pointer, length) pair
// indicating where the data lives within @SEGMENT_RLP_RAW.
//
// Pre stack: node_type, node_ptr, encode_value, retdest
// Post stack: result_ptr, result_len
// Pre stack: node_type, node_ptr, encode_value, cur_len, retdest
// Post stack: result_ptr, result_len, cur_len
encode_node:
// stack: node_type, node_ptr, encode_value, retdest
// stack: node_type, node_ptr, encode_value, cur_len, retdest
// Increment node_ptr, so it points to the node payload instead of its type.
SWAP1 %increment SWAP1
// stack: node_type, node_payload_ptr, encode_value, retdest
// stack: node_type, node_payload_ptr, encode_value, cur_len, retdest
DUP1 %eq_const(@MPT_NODE_EMPTY) %jumpi(encode_node_empty)
DUP1 %eq_const(@MPT_NODE_BRANCH) %jumpi(encode_node_branch)
@ -115,25 +123,29 @@ encode_node:
PANIC
global encode_node_empty:
// stack: node_type, node_payload_ptr, encode_value, retdest
// stack: node_type, node_payload_ptr, encode_value, cur_len, retdest
// Then length of `TrieData` is unchanged here.
%pop3
// stack: retdest
// stack: cur_len, retdest
// An empty node is encoded as a single byte, 0x80, which is the RLP encoding of the empty string.
// TODO: Write this byte just once to RLP memory, then we can always return (0, 1).
%alloc_rlp_block
// stack: rlp_pos, retdest
// stack: rlp_pos, cur_len, retdest
PUSH 0x80
// stack: 0x80, rlp_pos, retdest
// stack: 0x80, rlp_pos, cur_len, retdest
DUP2
// stack: rlp_pos, 0x80, rlp_pos, retdest
// stack: rlp_pos, 0x80, rlp_pos, cur_len, retdest
%mstore_rlp
%stack (rlp_pos, retdest) -> (retdest, rlp_pos, 1)
%stack (rlp_pos, cur_len, retdest) -> (retdest, rlp_pos, 1, cur_len)
JUMP
global encode_node_branch:
// stack: node_type, node_payload_ptr, encode_value, retdest
// stack: node_type, node_payload_ptr, encode_value, cur_len, retdest
POP
// stack: node_payload_ptr, encode_value, retdest
// `TrieData` stores the node type, 16 children pointers, and a value pointer.
SWAP2 %add_const(18) SWAP2
// stack: node_payload_ptr, encode_value, cur_len, retdest
// Get the next unused offset within the encoded child buffers.
// Then immediately increment the next unused offset by 16, so any
@ -142,8 +154,7 @@ global encode_node_branch:
%mload_global_metadata(@GLOBAL_METADATA_TRIE_ENCODED_CHILD_SIZE)
DUP1 %add_const(16)
%mstore_global_metadata(@GLOBAL_METADATA_TRIE_ENCODED_CHILD_SIZE)
// stack: base_offset, node_payload_ptr, encode_value, retdest
// stack: base_offset, node_payload_ptr, encode_value, cur_len, retdest
// We will call encode_or_hash_node on each child. For the i'th child, we
// will store the result in SEGMENT_TRIE_ENCODED_CHILD[base + i], and its length in
// SEGMENT_TRIE_ENCODED_CHILD_LEN[base + i].
@ -151,111 +162,118 @@ global encode_node_branch:
%encode_child(4) %encode_child(5) %encode_child(6) %encode_child(7)
%encode_child(8) %encode_child(9) %encode_child(10) %encode_child(11)
%encode_child(12) %encode_child(13) %encode_child(14) %encode_child(15)
// stack: base_offset, node_payload_ptr, encode_value, retdest
// stack: base_offset, node_payload_ptr, encode_value, cur_len, retdest
// Now, append each child to our RLP tape.
%alloc_rlp_block DUP1
// stack: rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, retdest
// stack: rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest
%append_child(0) %append_child(1) %append_child(2) %append_child(3)
%append_child(4) %append_child(5) %append_child(6) %append_child(7)
%append_child(8) %append_child(9) %append_child(10) %append_child(11)
%append_child(12) %append_child(13) %append_child(14) %append_child(15)
// stack: rlp_pos', rlp_start, base_offset, node_payload_ptr, encode_value, retdest
// stack: rlp_pos', rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest
%stack (rlp_pos, rlp_start, base_offset, node_payload_ptr)
-> (node_payload_ptr, rlp_pos, rlp_start)
%add_const(16)
// stack: value_ptr_ptr, rlp_pos', rlp_start, encode_value, retdest
// stack: value_ptr_ptr, rlp_pos', rlp_start, encode_value, cur_len, retdest
%mload_trie_data
// stack: value_ptr, rlp_pos', rlp_start, encode_value, retdest
// stack: value_ptr, rlp_pos', rlp_start, encode_value, cur_len, retdest
DUP1 %jumpi(encode_node_branch_with_value)
// No value; append the empty string (0x80).
// stack: value_ptr, rlp_pos', rlp_start, encode_value, retdest
// stack: value_ptr, rlp_pos', rlp_start, encode_value, cur_len, retdest
%stack (value_ptr, rlp_pos, rlp_start, encode_value) -> (rlp_pos, 0x80, rlp_pos, rlp_start)
%mstore_rlp
// stack: rlp_pos', rlp_start, retdest
// stack: rlp_pos', rlp_start, cur_len, retdest
%increment
// stack: rlp_pos'', rlp_start, retdest
// stack: rlp_pos'', rlp_start, cur_len, retdest
%jump(encode_node_branch_prepend_prefix)
encode_node_branch_with_value:
// stack: value_ptr, rlp_pos', rlp_start, encode_value, retdest
%stack (value_ptr, rlp_pos, rlp_start, encode_value)
-> (encode_value, rlp_pos, value_ptr, encode_node_branch_prepend_prefix, rlp_start)
// stack: value_ptr, rlp_pos', rlp_start, encode_value, cur_len, retdest
%stack (value_ptr, rlp_pos, rlp_start, encode_value, cur_len)
-> (encode_value, rlp_pos, value_ptr, cur_len, encode_node_branch_after_value, rlp_start)
JUMP // call encode_value
encode_node_branch_after_value:
// stack: rlp_pos'', cur_len, rlp_start, retdest
%stack(rlp_pos, cur_len, rlp_start, retdest) -> (rlp_pos, rlp_start, cur_len, retdest)
encode_node_branch_prepend_prefix:
// stack: rlp_pos'', rlp_start, retdest
// stack: rlp_pos'', rlp_start, cur_len, retdest
%prepend_rlp_list_prefix
// stack: rlp_prefix_start, rlp_len, retdest
%stack (rlp_prefix_start, rlp_len, retdest)
-> (retdest, rlp_prefix_start, rlp_len)
// stack: rlp_prefix_start, rlp_len, cur_len, retdest
%stack (rlp_prefix_start, rlp_len, cur_len, retdest)
-> (retdest, rlp_prefix_start, rlp_len, cur_len)
JUMP
// Part of the encode_node_branch function. Encodes the i'th child.
// Stores the result in SEGMENT_TRIE_ENCODED_CHILD[base + i], and its length in
// SEGMENT_TRIE_ENCODED_CHILD_LEN[base + i].
%macro encode_child(i)
// stack: base_offset, node_payload_ptr, encode_value, retdest
// stack: base_offset, node_payload_ptr, encode_value, cur_len, retdest
PUSH %%after_encode
DUP4 DUP4
// stack: node_payload_ptr, encode_value, %%after_encode, base_offset, node_payload_ptr, encode_value, retdest
// stack: node_payload_ptr, encode_value, %%after_encode, base_offset, node_payload_ptr, encode_value, cur_len, retdest
%add_const($i) %mload_trie_data
// stack: child_i_ptr, encode_value, %%after_encode, base_offset, node_payload_ptr, encode_value, retdest
// stack: child_i_ptr, encode_value, %%after_encode, base_offset, node_payload_ptr, encode_value, cur_len, retdest
%stack(child_i_ptr, encode_value, after_encode, base_offset, node_payload_ptr, encode_value, cur_len) -> (child_i_ptr, encode_value, cur_len, after_encode, base_offset, node_payload_ptr, encode_value)
%jump(encode_or_hash_node)
%%after_encode:
// stack: result, result_len, base_offset, node_payload_ptr, encode_value, retdest
// stack: result, result_len, cur_len, base_offset, node_payload_ptr, encode_value, retdest
%stack(result, result_len, cur_len, base_offset, node_payload_ptr, encode_value) -> (result, result_len, base_offset, node_payload_ptr, encode_value, cur_len)
DUP3 %add_const($i) %mstore_kernel(@SEGMENT_TRIE_ENCODED_CHILD)
// stack: result_len, base_offset, node_payload_ptr, encode_value, retdest
// stack: result_len, base_offset, node_payload_ptr, encode_value, cur_len, retdest
DUP2 %add_const($i) %mstore_kernel(@SEGMENT_TRIE_ENCODED_CHILD_LEN)
// stack: base_offset, node_payload_ptr, encode_value, retdest
// stack: base_offset, node_payload_ptr, encode_value, cur_len, retdest
%endmacro
// Part of the encode_node_branch function. Appends the i'th child's RLP.
%macro append_child(i)
// stack: rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, retdest
// stack: rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest
DUP3 %add_const($i) %mload_kernel(@SEGMENT_TRIE_ENCODED_CHILD) // load result
DUP4 %add_const($i) %mload_kernel(@SEGMENT_TRIE_ENCODED_CHILD_LEN) // load result_len
// stack: result_len, result, rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, retdest
// stack: result_len, result, rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest
// If result_len != 32, result is raw RLP, with an appropriate RLP prefix already.
DUP1 %sub_const(32) %jumpi(%%unpack)
// Otherwise, result is a hash, and we need to add the prefix 0x80 + 32 = 160.
// stack: result_len, result, rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, retdest
// stack: result_len, result, rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest
PUSH 160
DUP4 // rlp_pos
%mstore_rlp
SWAP2 %increment SWAP2 // rlp_pos += 1
%%unpack:
%stack (result_len, result, rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, retdest)
%stack (result_len, result, rlp_pos, rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest)
-> (rlp_pos, result, result_len, %%after_unpacking,
rlp_start, base_offset, node_payload_ptr, encode_value, retdest)
rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest)
%jump(mstore_unpacking_rlp)
%%after_unpacking:
// stack: rlp_pos', rlp_start, base_offset, node_payload_ptr, encode_value, retdest
// stack: rlp_pos', rlp_start, base_offset, node_payload_ptr, encode_value, cur_len, retdest
%endmacro
global encode_node_extension:
// stack: node_type, node_payload_ptr, encode_value, retdest
%stack (node_type, node_payload_ptr, encode_value)
-> (node_payload_ptr, encode_value, encode_node_extension_after_encode_child, node_payload_ptr)
// stack: node_type, node_payload_ptr, encode_value, cur_len, retdest
SWAP3 %add_const(4) SWAP3
%stack (node_type, node_payload_ptr, encode_value, cur_len)
-> (node_payload_ptr, encode_value, cur_len, encode_node_extension_after_encode_child, node_payload_ptr)
%add_const(2) %mload_trie_data
// stack: child_ptr, encode_value, encode_node_extension_after_encode_child, node_payload_ptr, retdest
// stack: child_ptr, encode_value, cur_len, encode_node_extension_after_encode_child, node_payload_ptr, retdest
%jump(encode_or_hash_node)
encode_node_extension_after_encode_child:
// stack: result, result_len, node_payload_ptr, retdest
// stack: result, result_len, cur_len, node_payload_ptr, retdest
%stack (result, result_len, cur_len, node_payload_ptr) -> (result, result_len, node_payload_ptr, cur_len)
%alloc_rlp_block
// stack: rlp_start, result, result_len, node_payload_ptr, retdest
// stack: rlp_start, result, result_len, node_payload_ptr, cur_len, retdest
PUSH encode_node_extension_after_hex_prefix // retdest
PUSH 0 // terminated
// stack: terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, retdest
// stack: terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, cur_len, retdest
DUP6 %increment %mload_trie_data // Load the packed_nibbles field, which is at index 1.
// stack: packed_nibbles, terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, retdest
// stack: packed_nibbles, terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, cur_len, retdest
DUP7 %mload_trie_data // Load the num_nibbles field, which is at index 0.
// stack: num_nibbles, packed_nibbles, terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, retdest
// stack: num_nibbles, packed_nibbles, terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, cur_len, retdest
DUP5
// stack: rlp_start, num_nibbles, packed_nibbles, terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, retdest
// stack: rlp_start, num_nibbles, packed_nibbles, terminated, encode_node_extension_after_hex_prefix, rlp_start, result, result_len, node_payload_ptr, cur_len, retdest
%jump(hex_prefix_rlp)
encode_node_extension_after_hex_prefix:
// stack: rlp_pos, rlp_start, result, result_len, node_payload_ptr, retdest
// stack: rlp_pos, rlp_start, result, result_len, node_payload_ptr, cur_len, retdest
// If result_len != 32, result is raw RLP, with an appropriate RLP prefix already.
DUP4 %sub_const(32) %jumpi(encode_node_extension_unpack)
// Otherwise, result is a hash, and we need to add the prefix 0x80 + 32 = 160.
@ -264,44 +282,50 @@ encode_node_extension_after_hex_prefix:
%mstore_rlp
%increment // rlp_pos += 1
encode_node_extension_unpack:
%stack (rlp_pos, rlp_start, result, result_len, node_payload_ptr)
-> (rlp_pos, result, result_len, encode_node_extension_after_unpacking, rlp_start)
%stack (rlp_pos, rlp_start, result, result_len, node_payload_ptr, cur_len)
-> (rlp_pos, result, result_len, encode_node_extension_after_unpacking, rlp_start, cur_len)
%jump(mstore_unpacking_rlp)
encode_node_extension_after_unpacking:
// stack: rlp_pos, rlp_start, retdest
// stack: rlp_pos, rlp_start, cur_len, retdest
%prepend_rlp_list_prefix
%stack (rlp_prefix_start_pos, rlp_len, retdest)
-> (retdest, rlp_prefix_start_pos, rlp_len)
%stack (rlp_prefix_start_pos, rlp_len, cur_len, retdest)
-> (retdest, rlp_prefix_start_pos, rlp_len, cur_len)
JUMP
global encode_node_leaf:
// stack: node_type, node_payload_ptr, encode_value, retdest
// stack: node_type, node_payload_ptr, encode_value, cur_len, retdest
// `TrieData` holds the node type, the number of nibbles, the nibbles,
// the pointer to the value and the value.
// First, we add 4 for the node type, the number of nibbles, the nibbles
// and the pointer to the value.
SWAP3 %add_const(4) SWAP3
POP
// stack: node_payload_ptr, encode_value, retdest
// stack: node_payload_ptr, encode_value, cur_len, retdest
%alloc_rlp_block
PUSH encode_node_leaf_after_hex_prefix // retdest
PUSH 1 // terminated
// stack: terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, retdest
// stack: terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, cur_len, retdest
DUP4 %increment %mload_trie_data // Load the packed_nibbles field, which is at index 1.
// stack: packed_nibbles, terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, retdest
// stack: packed_nibbles, terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, cur_len, retdest
DUP5 %mload_trie_data // Load the num_nibbles field, which is at index 0.
// stack: num_nibbles, packed_nibbles, terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, retdest
// stack: num_nibbles, packed_nibbles, terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, cur_len, retdest
DUP5
// stack: rlp_start, num_nibbles, packed_nibbles, terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, retdest
// stack: rlp_start, num_nibbles, packed_nibbles, terminated, encode_node_leaf_after_hex_prefix, rlp_start, node_payload_ptr, encode_value, cur_len, retdest
%jump(hex_prefix_rlp)
encode_node_leaf_after_hex_prefix:
// stack: rlp_pos, rlp_start, node_payload_ptr, encode_value, retdest
// stack: rlp_pos, rlp_start, node_payload_ptr, encode_value, cur_len, retdest
SWAP2
%add_const(2) // The value pointer starts at index 3, after num_nibbles and packed_nibbles.
// stack: value_ptr_ptr, rlp_start, rlp_pos, encode_value, retdest
// stack: value_ptr_ptr, rlp_start, rlp_pos, encode_value, cur_len, retdest
%mload_trie_data
// stack: value_ptr, rlp_start, rlp_pos, encode_value, retdest
%stack (value_ptr, rlp_start, rlp_pos, encode_value, retdest)
-> (encode_value, rlp_pos, value_ptr, encode_node_leaf_after_encode_value, rlp_start, retdest)
// stack: value_ptr, rlp_start, rlp_pos, encode_value, cur_len, retdest
%stack (value_ptr, rlp_start, rlp_pos, encode_value, cur_len, retdest)
-> (encode_value, rlp_pos, value_ptr, cur_len, encode_node_leaf_after_encode_value, rlp_start, retdest)
JUMP
encode_node_leaf_after_encode_value:
// stack: rlp_end_pos, rlp_start, retdest
// stack: rlp_end_pos, cur_len, rlp_start, retdest
%stack(rlp_end_pos, cur_len, rlp_start, retdest) -> (rlp_end_pos, rlp_start, cur_len, retdest)
%prepend_rlp_list_prefix
%stack (rlp_prefix_start_pos, rlp_len, retdest)
-> (retdest, rlp_prefix_start_pos, rlp_len)
%stack (rlp_prefix_start_pos, rlp_len, cur_len, retdest)
-> (retdest, rlp_prefix_start_pos, rlp_len, cur_len)
JUMP

View File

@ -1,116 +1,138 @@
// Hashing logic specific to a particular trie.
global mpt_hash_state_trie:
// stack: retdest
// stack: cur_len, retdest
PUSH encode_account
%mload_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT)
// stack: node_ptr, encode_account, retdest
// stack: node_ptr, encode_account, cur_len, retdest
%jump(mpt_hash)
%macro mpt_hash_state_trie
// stack: cur_len
PUSH %%after
SWAP1
%jump(mpt_hash_state_trie)
%%after:
%endmacro
global mpt_hash_storage_trie:
// stack: node_ptr, retdest
%stack (node_ptr) -> (node_ptr, encode_storage_value)
// stack: node_ptr, cur_len, retdest
%stack (node_ptr, cur_len) -> (node_ptr, encode_storage_value, cur_len)
%jump(mpt_hash)
%macro mpt_hash_storage_trie
%stack (node_ptr) -> (node_ptr, %%after)
%stack (node_ptr, cur_len) -> (node_ptr, cur_len, %%after)
%jump(mpt_hash_storage_trie)
%%after:
%endmacro
global mpt_hash_txn_trie:
// stack: retdest
// stack: cur_len, retdest
PUSH encode_txn
%mload_global_metadata(@GLOBAL_METADATA_TXN_TRIE_ROOT)
// stack: node_ptr, encode_txn, retdest
// stack: node_ptr, encode_txn, cur_len, retdest
%jump(mpt_hash)
%macro mpt_hash_txn_trie
// stack: cur_len
PUSH %%after
SWAP1
%jump(mpt_hash_txn_trie)
%%after:
%endmacro
global mpt_hash_receipt_trie:
// stack: retdest
// stack: cur_len, retdest
PUSH encode_receipt
%mload_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_ROOT)
// stack: node_ptr, encode_receipt, retdest
// stack: node_ptr, encode_receipt, cur_len, retdest
%jump(mpt_hash)
%macro mpt_hash_receipt_trie
// stack: cur_len
PUSH %%after
SWAP1
%jump(mpt_hash_receipt_trie)
%%after:
%endmacro
global encode_account:
// stack: rlp_pos, value_ptr, retdest
// stack: rlp_pos, value_ptr, cur_len, retdest
// First, we compute the length of the RLP data we're about to write.
// We also update the length of the trie data segment.
// The nonce and balance fields are variable-length, so we need to load them
// to determine their contribution, while the other two fields are fixed
// 32-bytes integers.
// First, we add 4 to the trie data length, for the nonce,
// the balance, the storage pointer and the code hash.
SWAP2 %add_const(4) SWAP2
// Now, we start the encoding.
// stack: rlp_pos, value_ptr, cur_len, retdest
DUP2 %mload_trie_data // nonce = value[0]
%rlp_scalar_len
// stack: nonce_rlp_len, rlp_pos, value_ptr, retdest
// stack: nonce_rlp_len, rlp_pos, value_ptr, cur_len, retdest
DUP3 %increment %mload_trie_data // balance = value[1]
%rlp_scalar_len
// stack: balance_rlp_len, nonce_rlp_len, rlp_pos, value_ptr, retdest
// stack: balance_rlp_len, nonce_rlp_len, rlp_pos, value_ptr, cur_len, retdest
PUSH 66 // storage_root and code_hash fields each take 1 + 32 bytes
ADD ADD
// stack: payload_len, rlp_pos, value_ptr, retdest
// stack: payload_len, rlp_pos, value_ptr, cur_len, retdest
SWAP1
// stack: rlp_pos, payload_len, value_ptr, retdest
// stack: rlp_pos, payload_len, value_ptr, cur_len, retdest
DUP2 %rlp_list_len
// stack: list_len, rlp_pos, payload_len, value_ptr, retdest
// stack: list_len, rlp_pos, payload_len, value_ptr, cur_len, retdest
SWAP1
// stack: rlp_pos, list_len, payload_len, value_ptr, retdest
// stack: rlp_pos, list_len, payload_len, value_ptr, cur_len, retdest
%encode_rlp_multi_byte_string_prefix
// stack: rlp_pos_2, payload_len, value_ptr, retdest
// stack: rlp_pos_2, payload_len, value_ptr, cur_len, retdest
%encode_rlp_list_prefix
// stack: rlp_pos_3, value_ptr, retdest
// stack: rlp_pos_3, value_ptr, cur_len, retdest
DUP2 %mload_trie_data // nonce = value[0]
// stack: nonce, rlp_pos_3, value_ptr, retdest
// stack: nonce, rlp_pos_3, value_ptr, cur_len, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos_4, value_ptr, retdest
// stack: rlp_pos_4, value_ptr, cur_len, retdest
DUP2 %increment %mload_trie_data // balance = value[1]
// stack: balance, rlp_pos_4, value_ptr, retdest
// stack: balance, rlp_pos_4, value_ptr, cur_len, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos_5, value_ptr, retdest
DUP2 %add_const(2) %mload_trie_data // storage_root_ptr = value[2]
// stack: storage_root_ptr, rlp_pos_5, value_ptr, retdest
// stack: rlp_pos_5, value_ptr, cur_len, retdest
DUP3
DUP3 %add_const(2) %mload_trie_data // storage_root_ptr = value[2]
// stack: storage_root_ptr, cur_len, rlp_pos_5, value_ptr, cur_len, retdest
// Hash storage trie.
%mpt_hash_storage_trie
// stack: storage_root_digest, rlp_pos_5, value_ptr, retdest
SWAP1 %encode_rlp_256
// stack: rlp_pos_6, value_ptr, retdest
// stack: storage_root_digest, new_len, rlp_pos_5, value_ptr, cur_len, retdest
%stack(storage_root_digest, new_len, rlp_pos_five, value_ptr, cur_len) -> (rlp_pos_five, storage_root_digest, value_ptr, new_len)
%encode_rlp_256
// stack: rlp_pos_6, value_ptr, new_len, retdest
SWAP1 %add_const(3) %mload_trie_data // code_hash = value[3]
// stack: code_hash, rlp_pos_6, retdest
// stack: code_hash, rlp_pos_6, new_len, retdest
SWAP1 %encode_rlp_256
// stack: rlp_pos_7, retdest
SWAP1
// stack: rlp_pos_7, new_len, retdest
%stack(rlp_pos_7, new_len, retdest) -> (retdest, rlp_pos_7, new_len)
JUMP
global encode_txn:
// stack: rlp_pos, value_ptr, retdest
// stack: rlp_pos, value_ptr, cur_len, retdest
// Load the txn_rlp_len which is at the beginning of value_ptr
DUP2 %mload_trie_data
// stack: txn_rlp_len, rlp_pos, value_ptr, retdest
// stack: txn_rlp_len, rlp_pos, value_ptr, cur_len, retdest
// We need to add 1+txn_rlp_len to the length of the trie data.
SWAP3 DUP4 %increment ADD
// stack: new_len, rlp_pos, value_ptr, txn_rlp_len, retdest
SWAP3
SWAP2 %increment
// stack: txn_rlp_ptr=value_ptr+1, rlp_pos, txn_rlp_len, retdest
// stack: txn_rlp_ptr=value_ptr+1, rlp_pos, txn_rlp_len, new_len, retdest
%stack (txn_rlp_ptr, rlp_pos, txn_rlp_len) -> (rlp_pos, txn_rlp_len, txn_rlp_len, txn_rlp_ptr)
// Encode the txn rlp prefix
// stack: rlp_pos, txn_rlp_len, txn_rlp_len, txn_rlp_ptr, retdest
// stack: rlp_pos, txn_rlp_len, txn_rlp_len, txn_rlp_ptr, cur_len, retdest
%encode_rlp_multi_byte_string_prefix
// copy txn_rlp to the new block
// stack: rlp_pos, txn_rlp_len, txn_rlp_ptr, retdest
// stack: rlp_pos, txn_rlp_len, txn_rlp_ptr, new_len, retdest
%stack (rlp_pos, txn_rlp_len, txn_rlp_ptr) -> (
0, @SEGMENT_RLP_RAW, rlp_pos, // dest addr
0, @SEGMENT_TRIE_DATA, txn_rlp_ptr, // src addr. Kernel has context 0
@ -118,155 +140,176 @@ global encode_txn:
txn_rlp_len, rlp_pos)
%memcpy_bytes
ADD
// stack new_rlp_pos, retdest
SWAP1
// stack new_rlp_pos, new_len, retdest
%stack(new_rlp_pos, new_len, retdest) -> (retdest, new_rlp_pos, new_len)
JUMP
// We assume a receipt in memory is stored as:
// [payload_len, status, cum_gas_used, bloom, logs_payload_len, num_logs, [logs]].
// A log is [payload_len, address, num_topics, [topics], data_len, [data]].
global encode_receipt:
// stack: rlp_pos, value_ptr, retdest
// stack: rlp_pos, value_ptr, cur_len, retdest
// First, we add 261 to the trie data length for all values before the logs besides the type.
// These are: the payload length, the status, cum_gas_used, the bloom filter (256 elements),
// the length of the logs payload and the length of the logs.
SWAP2 %add_const(261) SWAP2
// There is a double encoding! What we compute is:
// either RLP(RLP(receipt)) for Legacy transactions or RLP(txn_type||RLP(receipt)) for transactions of type 1 or 2.
// First encode the wrapper prefix.
DUP2 %mload_trie_data
// stack: first_value, rlp_pos, value_ptr, retdest
// stack: first_value, rlp_pos, value_ptr, cur_len, retdest
// The first value is either the transaction type or the payload length.
// Since the receipt contains at least the 256-bytes long bloom filter, payload_len > 3.
DUP1 %lt_const(3) %jumpi(encode_nonzero_receipt_type)
// If we are here, then the first byte is the payload length.
%rlp_list_len
// stack: rlp_receipt_len, rlp_pos, value_ptr, retdest
// stack: rlp_receipt_len, rlp_pos, value_ptr, cur_len, retdest
SWAP1 %encode_rlp_multi_byte_string_prefix
// stack: rlp_pos, value_ptr, retdest
// stack: rlp_pos, value_ptr, cur_len, retdest
encode_receipt_after_type:
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len, retdest
// Then encode the receipt prefix.
// `payload_ptr` is either `value_ptr` or `value_ptr+1`, depending on the transaction type.
DUP2 %mload_trie_data
// stack: payload_len, rlp_pos, payload_len_ptr, retdest
// stack: payload_len, rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 %encode_rlp_list_prefix
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len, retdest
// Encode status.
DUP2 %increment %mload_trie_data
// stack: status, rlp_pos, payload_len_ptr, retdest
// stack: status, rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len, retdest
// Encode cum_gas_used.
DUP2 %add_const(2) %mload_trie_data
// stack: cum_gas_used, rlp_pos, payload_len_ptr, retdest
// stack: cum_gas_used, rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len, retdest
// Encode bloom.
PUSH 256 // Bloom length.
DUP3 %add_const(3) PUSH @SEGMENT_TRIE_DATA PUSH 0 // MPT src address.
DUP5
// stack: rlp_pos, SRC, 256, rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, SRC, 256, rlp_pos, payload_len_ptr, cur_len, retdest
%encode_rlp_string
// stack: rlp_pos, old_rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, old_rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 POP
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len, retdest
// Encode logs prefix.
DUP2 %add_const(259) %mload_trie_data
// stack: logs_payload_len, rlp_pos, payload_len_ptr, retdest
// stack: logs_payload_len, rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 %encode_rlp_list_prefix
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len, retdest
DUP2 %add_const(261)
// stack: logs_ptr, rlp_pos, payload_len_ptr, retdest
// stack: logs_ptr, rlp_pos, payload_len_ptr, cur_len, retdest
DUP3 %add_const(260) %mload_trie_data
// stack: num_logs, logs_ptr, rlp_pos, payload_len_ptr, retdest
// stack: num_logs, logs_ptr, rlp_pos, payload_len_ptr, cur_len, retdest
PUSH 0
encode_receipt_logs_loop:
// stack: i, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: i, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, cur_len, retdest
DUP2 DUP2 EQ
// stack: i == num_logs, i, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: i == num_logs, i, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, cur_len, retdest
%jumpi(encode_receipt_end)
// stack: i, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, retdest
// We add 4 to the trie data length for the fixed size elements in the current log.
SWAP5 %add_const(4) SWAP5
// stack: i, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, cur_len, retdest
DUP3 DUP5
// stack: rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
// Encode log prefix.
DUP2 %mload_trie_data
// stack: payload_len, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: payload_len, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 %encode_rlp_list_prefix
// stack: rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
// Encode address.
DUP2 %increment %mload_trie_data
// stack: address, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: address, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
SWAP1 %encode_rlp_160
// stack: rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
DUP2 %add_const(2) %mload_trie_data
// stack: num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
// Encode topics prefix.
DUP1 %mul_const(33)
// stack: topics_payload_len, num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: topics_payload_len, num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
DUP3 %encode_rlp_list_prefix
// stack: new_rlp_pos, num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: new_rlp_pos, num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
SWAP2 POP
// stack: num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len, retdest
// Add `num_topics` to the length of the trie data segment.
DUP1 SWAP9
// stack: cur_len, num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, num_topics, retdest
ADD SWAP8
// stack: num_topics, rlp_pos, current_log_ptr, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
SWAP2 %add_const(3)
// stack: topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
PUSH 0
encode_receipt_topics_loop:
// stack: j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
DUP4 DUP2 EQ
// stack: j == num_topics, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: j == num_topics, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
%jumpi(encode_receipt_topics_end)
// stack: j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
DUP2 DUP2 ADD
%mload_trie_data
// stack: current_topic, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: current_topic, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
DUP4
// stack: rlp_pos, current_topic, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, current_topic, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
%encode_rlp_256
// stack: new_rlp_pos, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: new_rlp_pos, j, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
SWAP3 POP
// stack: j, topics_ptr, new_rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: j, topics_ptr, new_rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
%increment
%jump(encode_receipt_topics_loop)
encode_receipt_topics_end:
// stack: num_topics, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: num_topics, topics_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
ADD
// stack: data_len_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: data_len_ptr, rlp_pos, num_topics, i, num_logs, current_log_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
SWAP5 POP
// stack: rlp_pos, num_topics, i, num_logs, data_len_ptr, old_rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, num_topics, i, num_logs, data_len_ptr, old_rlp_pos, payload_len_ptr, cur_len', retdest
SWAP5 POP
// stack: num_topics, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, retdest
// stack: num_topics, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, cur_len', retdest
POP
// stack: i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, retdest
// stack: i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, cur_len', retdest
// Encode data prefix.
DUP3 %mload_trie_data
// stack: data_len, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, retdest
// stack: data_len, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, cur_len', retdest
// Add `data_len` to the length of the trie data.
DUP1 SWAP7 ADD SWAP6
// stack: data_len, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
DUP4 %increment DUP2 ADD
// stack: next_log_ptr, data_len, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, retdest
// stack: next_log_ptr, data_len, i, num_logs, data_len_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
SWAP4 %increment
// stack: data_ptr, data_len, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: data_ptr, data_len, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
PUSH @SEGMENT_TRIE_DATA PUSH 0
// stack: SRC, data_len, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: SRC, data_len, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
DUP8
// stack: rlp_pos, SRC, data_len, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, SRC, data_len, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
%encode_rlp_string
// stack: new_rlp_pos, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: new_rlp_pos, i, num_logs, next_log_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
SWAP4 POP
// stack: i, num_logs, next_log_ptr, new_rlp_pos, payload_len_ptr, retdest
// stack: i, num_logs, next_log_ptr, new_rlp_pos, payload_len_ptr, cur_len'', retdest
%increment
%jump(encode_receipt_logs_loop)
encode_receipt_end:
// stack: num_logs, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, retdest
// stack: num_logs, num_logs, current_log_ptr, rlp_pos, payload_len_ptr, cur_len'', retdest
%pop3
// stack: rlp_pos, payload_len_ptr, retdest
// stack: rlp_pos, payload_len_ptr, cur_len'', retdest
SWAP1 POP
// stack: rlp_pos, retdest
SWAP1
// stack: rlp_pos, cur_len'', retdest
%stack(rlp_pos, new_len, retdest) -> (retdest, rlp_pos, new_len)
JUMP
encode_nonzero_receipt_type:
// stack: txn_type, rlp_pos, value_ptr, retdest
// stack: txn_type, rlp_pos, value_ptr, cur_len, retdest
// We have a nonlegacy receipt, so the type is also stored in the trie data segment.
SWAP3 %increment SWAP3
// stack: txn_type, rlp_pos, value_ptr, cur_len, retdest
DUP3 %increment %mload_trie_data
// stack: payload_len, txn_type, rlp_pos, value_ptr, retdest
// The transaction type is encoded in 1 byte
@ -285,15 +328,19 @@ encode_nonzero_receipt_type:
%jump(encode_receipt_after_type)
global encode_storage_value:
// stack: rlp_pos, value_ptr, retdest
// stack: rlp_pos, value_ptr, cur_len, retdest
SWAP1 %mload_trie_data SWAP1
// stack: rlp_pos, value, retdest
// A storage value is a scalar, so we only need to add 1 to the trie data length.
SWAP2 %increment SWAP2
// stack: rlp_pos, value, cur_len, retdest
// The YP says storage trie is a map "... to the RLP-encoded 256-bit integer values"
// which seems to imply that this should be %encode_rlp_256. But %encode_rlp_scalar
// causes the tests to pass, so it seems storage values should be treated as variable-
// length after all.
%doubly_encode_rlp_scalar
// stack: rlp_pos', retdest
SWAP1
// stack: rlp_pos', cur_len, retdest
%stack (rlp_pos, cur_len, retdest) -> (retdest, rlp_pos, cur_len)
JUMP

View File

@ -1,173 +0,0 @@
// Load all partial trie data from prover inputs.
global load_all_mpts:
// stack: retdest
// First set @GLOBAL_METADATA_TRIE_DATA_SIZE = 1.
// We don't want it to start at 0, as we use 0 as a null pointer.
PUSH 1
%set_trie_data_size
%load_mpt(mpt_load_state_trie_value) %mstore_global_metadata(@GLOBAL_METADATA_STATE_TRIE_ROOT)
%load_mpt(mpt_load_txn_trie_value) %mstore_global_metadata(@GLOBAL_METADATA_TXN_TRIE_ROOT)
%load_mpt(mpt_load_receipt_trie_value) %mstore_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_ROOT)
// stack: retdest
JUMP
// Load an MPT from prover inputs.
// Pre stack: load_value, retdest
// Post stack: node_ptr
global load_mpt:
// stack: load_value, retdest
PROVER_INPUT(mpt)
// stack: node_type, load_value, retdest
DUP1 %eq_const(@MPT_NODE_EMPTY) %jumpi(load_mpt_empty)
DUP1 %eq_const(@MPT_NODE_BRANCH) %jumpi(load_mpt_branch)
DUP1 %eq_const(@MPT_NODE_EXTENSION) %jumpi(load_mpt_extension)
DUP1 %eq_const(@MPT_NODE_LEAF) %jumpi(load_mpt_leaf)
DUP1 %eq_const(@MPT_NODE_HASH) %jumpi(load_mpt_digest)
PANIC // Invalid node type
load_mpt_empty:
// TRIE_DATA[0] = 0, and an empty node has type 0, so we can simply return the null pointer.
%stack (node_type, load_value, retdest) -> (retdest, 0)
JUMP
load_mpt_branch:
// stack: node_type, load_value, retdest
%get_trie_data_size
// stack: node_ptr, node_type, load_value, retdest
SWAP1 %append_to_trie_data
// stack: node_ptr, load_value, retdest
// Save the offset of our 16 child pointers so we can write them later.
// Then advance our current trie pointer beyond them, so we can load the
// value and have it placed after our child pointers.
%get_trie_data_size
// stack: children_ptr, node_ptr, load_value, retdest
DUP1 %add_const(17) // Skip over 16 children plus the value pointer
// stack: end_of_branch_ptr, children_ptr, node_ptr, load_value, retdest
DUP1 %set_trie_data_size
// Now the top of the stack points to where the branch node will end and the
// value will begin, if there is a value. But we need to ask the prover if a
// value is present, and point to null if not.
// stack: end_of_branch_ptr, children_ptr, node_ptr, load_value, retdest
PROVER_INPUT(mpt)
// stack: is_value_present, end_of_branch_ptr, children_ptr, node_ptr, load_value, retdest
%jumpi(load_mpt_branch_value_present)
// There is no value present, so value_ptr = null.
%stack (end_of_branch_ptr) -> (0)
// stack: value_ptr, children_ptr, node_ptr, load_value, retdest
%jump(load_mpt_branch_after_load_value)
load_mpt_branch_value_present:
// stack: value_ptr, children_ptr, node_ptr, load_value, retdest
PUSH load_mpt_branch_after_load_value
DUP5 // load_value
JUMP
load_mpt_branch_after_load_value:
// stack: value_ptr, children_ptr, node_ptr, load_value, retdest
SWAP1
// stack: children_ptr, value_ptr, node_ptr, load_value, retdest
// Load the 16 children.
%rep 16
DUP4 // load_value
%load_mpt
// stack: child_ptr, next_child_ptr_ptr, value_ptr, node_ptr, load_value, retdest
DUP2
// stack: next_child_ptr_ptr, child_ptr, next_child_ptr_ptr, value_ptr, node_ptr, load_value, retdest
%mstore_trie_data
// stack: next_child_ptr_ptr, value_ptr, node_ptr, load_value, retdest
%increment
// stack: next_child_ptr_ptr, value_ptr, node_ptr, load_value, retdest
%endrep
// stack: value_ptr_ptr, value_ptr, node_ptr, load_value, retdest
%mstore_trie_data
%stack (node_ptr, load_value, retdest) -> (retdest, node_ptr)
JUMP
load_mpt_extension:
// stack: node_type, load_value, retdest
%get_trie_data_size
// stack: node_ptr, node_type, load_value, retdest
SWAP1 %append_to_trie_data
// stack: node_ptr, load_value, retdest
PROVER_INPUT(mpt) // read num_nibbles
%append_to_trie_data
PROVER_INPUT(mpt) // read packed_nibbles
%append_to_trie_data
// stack: node_ptr, load_value, retdest
%get_trie_data_size
// stack: child_ptr_ptr, node_ptr, load_value, retdest
// Increment trie_data_size, to leave room for child_ptr_ptr, before we load our child.
DUP1 %increment %set_trie_data_size
%stack (child_ptr_ptr, node_ptr, load_value, retdest)
-> (load_value, load_mpt_extension_after_load_mpt,
child_ptr_ptr, retdest, node_ptr)
%jump(load_mpt)
load_mpt_extension_after_load_mpt:
// stack: child_ptr, child_ptr_ptr, retdest, node_ptr
SWAP1 %mstore_trie_data
// stack: retdest, node_ptr
JUMP
load_mpt_leaf:
// stack: node_type, load_value, retdest
%get_trie_data_size
// stack: node_ptr, node_type, load_value, retdest
SWAP1 %append_to_trie_data
// stack: node_ptr, load_value, retdest
PROVER_INPUT(mpt) // read num_nibbles
%append_to_trie_data
PROVER_INPUT(mpt) // read packed_nibbles
%append_to_trie_data
// stack: node_ptr, load_value, retdest
// We save value_ptr_ptr = get_trie_data_size, then increment trie_data_size
// to skip over the slot for value_ptr_ptr. We will write to value_ptr_ptr
// after the load_value call.
%get_trie_data_size
// stack: value_ptr_ptr, node_ptr, load_value, retdest
DUP1 %increment
// stack: value_ptr, value_ptr_ptr, node_ptr, load_value, retdest
DUP1 %set_trie_data_size
// stack: value_ptr, value_ptr_ptr, node_ptr, load_value, retdest
%stack (value_ptr, value_ptr_ptr, node_ptr, load_value, retdest)
-> (load_value, load_mpt_leaf_after_load_value,
value_ptr_ptr, value_ptr, retdest, node_ptr)
JUMP
load_mpt_leaf_after_load_value:
// stack: value_ptr_ptr, value_ptr, retdest, node_ptr
%mstore_trie_data
// stack: retdest, node_ptr
JUMP
load_mpt_digest:
// stack: node_type, load_value, retdest
%get_trie_data_size
// stack: node_ptr, node_type, load_value, retdest
SWAP1 %append_to_trie_data
// stack: node_ptr, load_value, retdest
PROVER_INPUT(mpt) // read digest
%append_to_trie_data
%stack (node_ptr, load_value, retdest) -> (retdest, node_ptr)
JUMP
// Convenience macro to call load_mpt and return where we left off.
// Pre stack: load_value
// Post stack: node_ptr
%macro load_mpt
%stack (load_value) -> (load_value, %%after)
%jump(load_mpt)
%%after:
%endmacro
// Convenience macro to call load_mpt and return where we left off.
// Pre stack: (empty)
// Post stack: node_ptr
%macro load_mpt(load_value)
PUSH %%after
PUSH $load_value
%jump(load_mpt)
%%after:
%endmacro

View File

@ -1,150 +0,0 @@
global mpt_load_state_trie_value:
// stack: retdest
// Load and append the nonce and balance.
PROVER_INPUT(mpt) %append_to_trie_data
PROVER_INPUT(mpt) %append_to_trie_data
// Now increment the trie data size by 2, to leave room for our storage trie
// pointer and code hash fields, before calling load_mpt which will append
// our storage trie data.
%get_trie_data_size
// stack: storage_trie_ptr_ptr, retdest
DUP1 %add_const(2)
// stack: storage_trie_ptr, storage_trie_ptr_ptr, retdest
%set_trie_data_size
// stack: storage_trie_ptr_ptr, retdest
%load_mpt(mpt_load_storage_trie_value)
// stack: storage_trie_ptr, storage_trie_ptr_ptr, retdest
DUP2 %mstore_trie_data
// stack: storage_trie_ptr_ptr, retdest
%increment
// stack: code_hash_ptr, retdest
PROVER_INPUT(mpt)
// stack: code_hash, code_hash_ptr, retdest
SWAP1 %mstore_trie_data
// stack: retdest
JUMP
global mpt_load_txn_trie_value:
// stack: retdest
PROVER_INPUT(mpt)
// stack: rlp_len, retdest
// The first element is the rlp length
DUP1 %append_to_trie_data
PUSH 0
mpt_load_loop:
// stack: i, rlp_len, retdest
DUP2 DUP2 EQ %jumpi(mpt_load_end)
PROVER_INPUT(mpt) %append_to_trie_data
%increment
%jump(mpt_load_loop)
mpt_load_end:
// stack: i, rlp_len, retdest
%pop2
JUMP
global mpt_load_receipt_trie_value:
// stack: retdest
// Load first byte. It is either `payload_len` or the transaction type.
PROVER_INPUT(mpt) DUP1 %append_to_trie_data
// If the first byte is less than 3, then it is the transaction type, equal to either 1 or 2.
// In that case, we still need to load the payload length.
%lt_const(3) %jumpi(mpt_load_payload_len)
mpt_load_after_type:
// Load status.
PROVER_INPUT(mpt) %append_to_trie_data
// Load cum_gas_used.
PROVER_INPUT(mpt) %append_to_trie_data
// Load bloom.
%rep 256
PROVER_INPUT(mpt) %append_to_trie_data
%endrep
// Load logs_payload_len.
PROVER_INPUT(mpt) %append_to_trie_data
// Load num_logs.
PROVER_INPUT(mpt)
DUP1
%append_to_trie_data
// stack: num_logs, retdest
// Load logs.
PUSH 0
mpt_load_receipt_trie_value_logs_loop:
// stack: i, num_logs, retdest
DUP2 DUP2 EQ
// stack: i == num_logs, i, num_logs, retdest
%jumpi(mpt_load_receipt_trie_value_end)
// stack: i, num_logs, retdest
// Load log_payload_len.
PROVER_INPUT(mpt) %append_to_trie_data
// Load address.
PROVER_INPUT(mpt) %append_to_trie_data
// Load num_topics.
PROVER_INPUT(mpt)
DUP1
%append_to_trie_data
// stack: num_topics, i, num_logs, retdest
// Load topics.
PUSH 0
mpt_load_receipt_trie_value_topics_loop:
// stack: j, num_topics, i, num_logs, retdest
DUP2 DUP2 EQ
// stack: j == num_topics, j, num_topics, i, num_logs, retdest
%jumpi(mpt_load_receipt_trie_value_topics_end)
// stack: j, num_topics, i, num_logs, retdest
// Load topic.
PROVER_INPUT(mpt) %append_to_trie_data
%increment
%jump(mpt_load_receipt_trie_value_topics_loop)
mpt_load_receipt_trie_value_topics_end:
// stack: num_topics, num_topics, i, num_logs, retdest
%pop2
// stack: i, num_logs, retdest
// Load data_len.
PROVER_INPUT(mpt)
DUP1
%append_to_trie_data
// stack: data_len, i, num_logs, retdest
// Load data.
PUSH 0
mpt_load_receipt_trie_value_data_loop:
// stack: j, data_len, i, num_logs, retdest
DUP2 DUP2 EQ
// stack: j == data_len, j, data_len, i, num_logs, retdest
%jumpi(mpt_load_receipt_trie_value_data_end)
// stack: j, data_len, i, num_logs, retdest
// Load data byte.
PROVER_INPUT(mpt) %append_to_trie_data
%increment
%jump(mpt_load_receipt_trie_value_data_loop)
mpt_load_receipt_trie_value_data_end:
// stack: data_len, data_len, i, num_logs, retdest
%pop2
%increment
%jump(mpt_load_receipt_trie_value_logs_loop)
mpt_load_receipt_trie_value_end:
// stack: num_logs, num_logs, retdest
%pop2
JUMP
mpt_load_payload_len:
// stack: retdest
PROVER_INPUT(mpt) %append_to_trie_data
%jump(mpt_load_after_type)
global mpt_load_storage_trie_value:
// stack: retdest
PROVER_INPUT(mpt)
%append_to_trie_data
// stack: retdest
JUMP

View File

@ -275,6 +275,12 @@ impl<'a> Interpreter<'a> {
code.into_iter().map(U256::from).collect();
}
pub(crate) fn set_memory_multi_addresses(&mut self, addrs: &[(MemoryAddress, U256)]) {
for &(addr, val) in addrs {
self.generation_state.memory.set(addr, val);
}
}
pub(crate) fn get_jumpdest_bits(&self, context: usize) -> Vec<bool> {
self.generation_state.memory.contexts[context].segments[Segment::JumpdestBits as usize]
.content

View File

@ -13,12 +13,57 @@ use crate::cpu::kernel::constants::context_metadata::ContextMetadata::{self, Gas
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::mpt::nibbles_64;
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::generation::mpt::{load_all_mpts, AccountRlp};
use crate::generation::TrieInputs;
use crate::memory::segments::Segment;
use crate::witness::memory::MemoryAddress;
use crate::Node;
pub(crate) fn initialize_mpts(interpreter: &mut Interpreter, trie_inputs: &TrieInputs) {
// Load all MPTs.
let (trie_root_ptrs, trie_data) =
load_all_mpts(trie_inputs).expect("Invalid MPT data for preinitialization");
let state_addr = MemoryAddress::new(
0,
Segment::GlobalMetadata,
GlobalMetadata::StateTrieRoot as usize,
);
let txn_addr = MemoryAddress::new(
0,
Segment::GlobalMetadata,
GlobalMetadata::TransactionTrieRoot as usize,
);
let receipts_addr = MemoryAddress::new(
0,
Segment::GlobalMetadata,
GlobalMetadata::ReceiptTrieRoot as usize,
);
let len_addr = MemoryAddress::new(
0,
Segment::GlobalMetadata,
GlobalMetadata::TrieDataSize as usize,
);
let to_set = [
(state_addr, trie_root_ptrs.state_root_ptr.into()),
(txn_addr, trie_root_ptrs.txn_root_ptr.into()),
(receipts_addr, trie_root_ptrs.receipt_root_ptr.into()),
(len_addr, trie_data.len().into()),
];
interpreter.set_memory_multi_addresses(&to_set);
for (i, data) in trie_data.iter().enumerate() {
let trie_addr = MemoryAddress::new(0, Segment::TrieData, i);
interpreter
.generation_state
.memory
.set(trie_addr, data.into());
}
}
// Test account with a given code hash.
fn test_account(code: &[u8]) -> AccountRlp {
AccountRlp {
@ -42,20 +87,12 @@ fn prepare_interpreter(
address: Address,
account: &AccountRlp,
) -> Result<()> {
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert_state_trie = KERNEL.global_labels["mpt_insert_state_trie"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let mut state_trie: HashedPartialTrie = Default::default();
let trie_inputs = Default::default();
interpreter.generation_state.registers.program_counter = load_all_mpts;
interpreter.push(0xDEADBEEFu32.into());
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
assert_eq!(interpreter.stack(), vec![]);
initialize_mpts(interpreter, &trie_inputs);
let k = nibbles_64(U256::from_big_endian(
keccak(address.to_fixed_bytes()).as_bytes(),
@ -93,15 +130,16 @@ fn prepare_interpreter(
// Now, execute mpt_hash_state_trie.
interpreter.generation_state.registers.program_counter = mpt_hash_state_trie;
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initial length of the trie data segment, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack().len(),
1,
"Expected 1 item on stack after hashing, found {:?}",
2,
"Expected 2 items on stack after hashing, found {:?}",
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let hash = H256::from_uint(&interpreter.stack()[1]);
state_trie.insert(k, rlp::encode(account).to_vec());
let expected_state_trie_hash = state_trie.hash();
@ -125,6 +163,7 @@ fn test_extcodesize() -> Result<()> {
// Test `extcodesize`
interpreter.generation_state.registers.program_counter = extcodesize;
interpreter.pop();
interpreter.pop();
assert!(interpreter.stack().is_empty());
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(U256::from_big_endian(address.as_bytes()));
@ -173,6 +212,7 @@ fn test_extcodecopy() -> Result<()> {
// Test `extcodecopy`
interpreter.generation_state.registers.program_counter = extcodecopy;
interpreter.pop();
interpreter.pop();
assert!(interpreter.stack().is_empty());
interpreter.push(size.into());
interpreter.push(offset.into());
@ -207,15 +247,7 @@ fn prepare_interpreter_all_accounts(
code: &[u8],
) -> Result<()> {
// Load all MPTs.
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
interpreter.generation_state.registers.program_counter = load_all_mpts;
interpreter.push(0xDEADBEEFu32.into());
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
initialize_mpts(interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
// Switch context and initialize memory with the data we need for the tests.
@ -311,16 +343,17 @@ fn sstore() -> Result<()> {
interpreter.set_is_kernel(true);
interpreter.set_context(0);
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initia length of the trie data segment, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack().len(),
1,
"Expected 1 item on stack after hashing, found {:?}",
2,
"Expected 2 items on stack after hashing, found {:?}",
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let hash = H256::from_uint(&interpreter.stack()[1]);
let mut expected_state_trie_after = HashedPartialTrie::from(Node::Empty);
expected_state_trie_after.insert(addr_nibbles, rlp::encode(&account_after).to_vec());
@ -389,16 +422,26 @@ fn sload() -> Result<()> {
interpreter.set_is_kernel(true);
interpreter.set_context(0);
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initia length of the trie data segment, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack().len(),
1,
"Expected 1 item on stack after hashing, found {:?}",
2,
"Expected 2 items on stack after hashing, found {:?}",
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let trie_data_segment_len = interpreter.stack()[0];
assert_eq!(
trie_data_segment_len,
interpreter
.get_memory_segment(Segment::TrieData)
.len()
.into()
);
let hash = H256::from_uint(&interpreter.stack()[1]);
let expected_state_trie_hash = state_trie_before.hash();
assert_eq!(hash, expected_state_trie_hash);

View File

@ -12,7 +12,8 @@ use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp, LegacyReceiptRlp};
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::generation::mpt::{AccountRlp, LegacyReceiptRlp};
use crate::generation::rlp::all_rlp_prover_inputs_reversed;
use crate::generation::TrieInputs;
use crate::memory::segments::Segment;
@ -28,14 +29,7 @@ fn prepare_interpreter(
transaction: &[u8],
contract_code: HashMap<H256, Vec<u8>>,
) {
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
interpreter.generation_state.registers.program_counter = load_all_mpts;
interpreter.push(0xDEADBEEFu32.into());
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs).expect("Invalid MPT data.");
interpreter.run().expect("MPT loading failed.");
initialize_mpts(interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
// Set necessary `GlobalMetadata`.

View File

@ -7,8 +7,9 @@ use rand::{thread_rng, Rng};
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::cpu::kernel::tests::mpt::nibbles_64;
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::generation::mpt::AccountRlp;
use crate::Node;
// Test account with a given code hash.
@ -28,19 +29,12 @@ fn prepare_interpreter(
address: Address,
account: &AccountRlp,
) -> Result<()> {
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert_state_trie = KERNEL.global_labels["mpt_insert_state_trie"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let mut state_trie: HashedPartialTrie = Default::default();
let trie_inputs = Default::default();
interpreter.generation_state.registers.program_counter = load_all_mpts;
interpreter.push(0xDEADBEEFu32.into());
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
initialize_mpts(interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
let k = nibbles_64(U256::from_big_endian(
@ -79,15 +73,16 @@ fn prepare_interpreter(
// Now, execute mpt_hash_state_trie.
interpreter.generation_state.registers.program_counter = mpt_hash_state_trie;
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initial trie data segment size, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack().len(),
1,
"Expected 1 item on stack after hashing, found {:?}",
2,
"Expected 2 items on stack after hashing, found {:?}",
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let hash = H256::from_uint(&interpreter.stack()[1]);
state_trie.insert(k, rlp::encode(account).to_vec());
let expected_state_trie_hash = state_trie.hash();
@ -110,6 +105,7 @@ fn test_balance() -> Result<()> {
// Test `balance`
interpreter.generation_state.registers.program_counter = KERNEL.global_labels["balance"];
interpreter.pop();
interpreter.pop();
assert!(interpreter.stack().is_empty());
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(U256::from_big_endian(address.as_bytes()));

View File

@ -6,8 +6,9 @@ use ethereum_types::{BigEndianHash, H256};
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::cpu::kernel::tests::mpt::{nibbles_64, test_account_1_rlp, test_account_2};
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::generation::mpt::AccountRlp;
use crate::generation::TrieInputs;
use crate::Node;
@ -65,16 +66,14 @@ fn test_state_trie(
receipts_trie: Default::default(),
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert_state_trie = KERNEL.global_labels["mpt_insert_state_trie"];
let mpt_delete = KERNEL.global_labels["mpt_delete"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs).map_err(|_| anyhow!("Invalid MPT data"))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
// Next, execute mpt_insert_state_trie.
@ -120,6 +119,7 @@ fn test_state_trie(
// Now, execute mpt_hash_state_trie.
interpreter.generation_state.registers.program_counter = mpt_hash_state_trie;
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initial length of the trie data segment, unused.
interpreter.run()?;
let state_trie_hash = H256::from_uint(&interpreter.pop());

View File

@ -4,8 +4,8 @@ use ethereum_types::{BigEndianHash, H256};
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::cpu::kernel::tests::mpt::{extension_to_leaf, test_account_1_rlp, test_account_2_rlp};
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::TrieInputs;
use crate::Node;
@ -108,28 +108,27 @@ fn mpt_hash_branch_to_leaf() -> Result<()> {
}
fn test_state_trie(trie_inputs: TrieInputs) -> Result<()> {
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs).map_err(|_| anyhow!("Invalid MPT data"))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
// Now, execute mpt_hash_state_trie.
interpreter.generation_state.registers.program_counter = mpt_hash_state_trie;
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initial length of the trie data segment, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack().len(),
1,
"Expected 1 item on stack, found {:?}",
2,
"Expected 2 items on stack, found {:?}",
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let hash = H256::from_uint(&interpreter.stack()[1]);
let expected_state_trie_hash = trie_inputs.state_trie.hash();
assert_eq!(hash, expected_state_trie_hash);

View File

@ -6,10 +6,11 @@ use ethereum_types::{BigEndianHash, H256};
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::cpu::kernel::tests::mpt::{
nibbles_64, nibbles_count, test_account_1_rlp, test_account_2,
};
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, AccountRlp};
use crate::generation::mpt::AccountRlp;
use crate::generation::TrieInputs;
use crate::Node;
@ -168,15 +169,13 @@ fn test_state_trie(
receipts_trie: Default::default(),
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert_state_trie = KERNEL.global_labels["mpt_insert_state_trie"];
let mpt_hash_state_trie = KERNEL.global_labels["mpt_hash_state_trie"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs).map_err(|_| anyhow!("Invalid MPT data"))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
// Next, execute mpt_insert_state_trie.
@ -212,15 +211,16 @@ fn test_state_trie(
// Now, execute mpt_hash_state_trie.
interpreter.generation_state.registers.program_counter = mpt_hash_state_trie;
interpreter.push(0xDEADBEEFu32.into());
interpreter.push(1.into()); // Initial length of the trie data segment, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack().len(),
1,
"Expected 1 item on stack after hashing, found {:?}",
2,
"Expected 2 items on stack after hashing, found {:?}",
interpreter.stack()
);
let hash = H256::from_uint(&interpreter.stack()[0]);
let hash = H256::from_uint(&interpreter.stack()[1]);
state_trie.insert(k, rlp::encode(&account).to_vec());
let expected_state_trie_hash = state_trie.hash();

View File

@ -10,8 +10,8 @@ use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::constants::trie_type::PartialTrieType;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::cpu::kernel::tests::mpt::{extension_to_leaf, test_account_1, test_account_1_rlp};
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::TrieInputs;
use crate::Node;
@ -24,17 +24,13 @@ fn load_all_mpts_empty() -> Result<()> {
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
assert_eq!(interpreter.get_trie_data(), vec![]);
// We need to have the first element in `TrieData` be 0.
assert_eq!(interpreter.get_trie_data(), vec![0.into()]);
assert_eq!(
interpreter.get_global_metadata_field(GlobalMetadata::StateTrieRoot),
@ -65,14 +61,9 @@ fn load_all_mpts_leaf() -> Result<()> {
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
let type_leaf = U256::from(PartialTrieType::Leaf as u32);
@ -116,14 +107,9 @@ fn load_all_mpts_hash() -> Result<()> {
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
let type_hash = U256::from(PartialTrieType::Hash as u32);
@ -159,14 +145,9 @@ fn load_all_mpts_empty_branch() -> Result<()> {
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
let type_branch = U256::from(PartialTrieType::Branch as u32);
@ -216,14 +197,9 @@ fn load_all_mpts_ext_to_leaf() -> Result<()> {
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
let type_extension = U256::from(PartialTrieType::Extension as u32);
@ -255,8 +231,6 @@ fn load_all_mpts_ext_to_leaf() -> Result<()> {
#[test]
fn load_mpt_txn_trie() -> Result<()> {
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let txn = hex!("f860010a830186a094095e7baea6a6c7c4c2dfeb977efac326af552e89808025a04a223955b0bd3827e3740a9a427d0ea43beb5bafa44a0204bf0a3306c8219f7ba0502c32d78f233e9e7ce9f5df3b576556d5d49731e0678fd5a068cdf359557b5b").to_vec();
let trie_inputs = TrieInputs {
@ -269,12 +243,9 @@ fn load_mpt_txn_trie() -> Result<()> {
storage_tries: vec![],
};
let initial_stack = vec![0xDEADBEEFu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
let mut expected_trie_data = vec![

View File

@ -4,8 +4,8 @@ use ethereum_types::BigEndianHash;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::cpu::kernel::tests::mpt::{extension_to_leaf, test_account_1, test_account_1_rlp};
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::TrieInputs;
#[test]
@ -17,15 +17,11 @@ fn mpt_read() -> Result<()> {
storage_tries: vec![],
};
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_read = KERNEL.global_labels["mpt_read"];
let initial_stack = vec![0xdeadbeefu32.into()];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let initial_stack = vec![];
let mut interpreter = Interpreter::new_with_kernel(0, initial_stack);
initialize_mpts(&mut interpreter, &trie_inputs);
assert_eq!(interpreter.stack(), vec![]);
// Now, execute mpt_read on the state trie.

View File

@ -8,7 +8,8 @@ use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::generation::mpt::{all_mpt_prover_inputs_reversed, LegacyReceiptRlp, LogRlp};
use crate::cpu::kernel::tests::account_code::initialize_mpts;
use crate::generation::mpt::{LegacyReceiptRlp, LogRlp};
use crate::memory::segments::Segment;
#[test]
@ -126,7 +127,7 @@ fn test_receipt_encoding() -> Result<()> {
// Get the expected RLP encoding.
let expected_rlp = rlp::encode(&rlp::encode(&receipt_1));
let initial_stack: Vec<U256> = vec![retdest, 0.into(), 0.into()];
let initial_stack: Vec<U256> = vec![retdest, 0.into(), 0.into(), 0.into()];
let mut interpreter = Interpreter::new_with_kernel(encode_receipt, initial_stack);
// Write data to memory.
@ -338,7 +339,6 @@ fn test_mpt_insert_receipt() -> Result<()> {
let retdest = 0xDEADBEEFu32.into();
let trie_inputs = Default::default();
let load_all_mpts = KERNEL.global_labels["load_all_mpts"];
let mpt_insert = KERNEL.global_labels["mpt_insert_receipt_trie"];
let num_topics = 3; // Both transactions have the same number of topics.
let payload_len = 423; // Total payload length for each receipt.
@ -409,11 +409,8 @@ fn test_mpt_insert_receipt() -> Result<()> {
// First, we load all mpts.
let initial_stack: Vec<U256> = vec![retdest];
let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack);
interpreter.generation_state.mpt_prover_inputs =
all_mpt_prover_inputs_reversed(&trie_inputs)
.map_err(|err| anyhow!("Invalid MPT data: {:?}", err))?;
interpreter.run()?;
let mut interpreter = Interpreter::new_with_kernel(0, vec![]);
initialize_mpts(&mut interpreter, &trie_inputs);
// If TrieData is empty, we need to push 0 because the first value is always 0.
let mut cur_trie_data = interpreter.get_memory_segment(Segment::TrieData);
@ -514,9 +511,10 @@ fn test_mpt_insert_receipt() -> Result<()> {
let mpt_hash_receipt = KERNEL.global_labels["mpt_hash_receipt_trie"];
interpreter.generation_state.registers.program_counter = mpt_hash_receipt;
interpreter.push(retdest);
interpreter.push(1.into()); // Initial length of the trie data segment, unused.
interpreter.run()?;
assert_eq!(
interpreter.stack()[0],
interpreter.stack()[1],
U256::from(hex!(
"da46cdd329bfedace32da95f2b344d314bc6f55f027d65f9f4ac04ee425e1f98"
))

View File

@ -33,6 +33,7 @@ pub(crate) mod rlp;
pub(crate) mod state;
mod trie_extractor;
use self::mpt::{load_all_mpts, TrieRootPtrs};
use crate::witness::util::mem_write_log;
/// Inputs needed for trace generation.
@ -195,11 +196,6 @@ pub fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
timed!(timing, "simulate CPU", simulate_cpu(&mut state)?);
assert!(
state.mpt_prover_inputs.is_empty(),
"All MPT data should have been consumed"
);
log::info!(
"Trace lengths (before padding): {:?}",
state.traces.get_lengths()
@ -220,6 +216,7 @@ pub fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
let gas_used_after = read_metadata(GlobalMetadata::BlockGasUsedAfter);
let txn_number_after = read_metadata(GlobalMetadata::TxnNumberAfter);
let trie_root_ptrs = state.trie_root_ptrs;
let extra_block_data = ExtraBlockData {
genesis_state_trie_root: inputs.genesis_state_trie_root,
txn_number_before: inputs.txn_number_before,

View File

@ -9,9 +9,13 @@ use keccak_hash::keccak;
use rlp::{Decodable, DecoderError, Encodable, PayloadInfo, Rlp, RlpStream};
use rlp_derive::{RlpDecodable, RlpEncodable};
use crate::cpu::kernel::constants::global_metadata::GlobalMetadata;
use crate::cpu::kernel::constants::trie_type::PartialTrieType;
use crate::generation::TrieInputs;
use crate::memory::segments::Segment;
use crate::util::h2u;
use crate::witness::errors::{ProgramError, ProverInputError};
use crate::witness::memory::MemoryAddress;
use crate::Node;
#[derive(RlpEncodable, RlpDecodable, Debug)]
@ -22,6 +26,13 @@ pub struct AccountRlp {
pub code_hash: H256,
}
#[derive(Clone, Debug)]
pub struct TrieRootPtrs {
pub state_root_ptr: usize,
pub txn_root_ptr: usize,
pub receipt_root_ptr: usize,
}
impl Default for AccountRlp {
fn default() -> Self {
Self {
@ -59,14 +70,6 @@ impl LegacyReceiptRlp {
}
}
pub(crate) fn all_mpt_prover_inputs_reversed(
trie_inputs: &TrieInputs,
) -> Result<Vec<U256>, ProgramError> {
let mut inputs = all_mpt_prover_inputs(trie_inputs)?;
inputs.reverse();
Ok(inputs)
}
pub(crate) fn parse_receipts(rlp: &[u8]) -> Result<Vec<U256>, ProgramError> {
let txn_type = match rlp.first().ok_or(ProgramError::InvalidRlp)? {
1 => 1,
@ -111,114 +114,114 @@ pub(crate) fn parse_receipts(rlp: &[u8]) -> Result<Vec<U256>, ProgramError> {
Ok(parsed_receipt)
}
/// Generate prover inputs for the initial MPT data, in the format expected by `mpt/load.asm`.
pub(crate) fn all_mpt_prover_inputs(trie_inputs: &TrieInputs) -> Result<Vec<U256>, ProgramError> {
let mut prover_inputs = vec![];
let storage_tries_by_state_key = trie_inputs
.storage_tries
.iter()
.map(|(hashed_address, storage_trie)| {
let key = Nibbles::from_bytes_be(hashed_address.as_bytes())
.expect("An H256 is 32 bytes long");
(key, storage_trie)
})
.collect();
mpt_prover_inputs_state_trie(
&trie_inputs.state_trie,
empty_nibbles(),
&mut prover_inputs,
&storage_tries_by_state_key,
)?;
mpt_prover_inputs(&trie_inputs.transactions_trie, &mut prover_inputs, &|rlp| {
let mut parsed_txn = vec![U256::from(rlp.len())];
parsed_txn.extend(rlp.iter().copied().map(U256::from));
Ok(parsed_txn)
})?;
mpt_prover_inputs(
&trie_inputs.receipts_trie,
&mut prover_inputs,
&parse_receipts,
)?;
Ok(prover_inputs)
fn parse_storage_value(value_rlp: &[u8]) -> Result<Vec<U256>, ProgramError> {
let value: U256 = rlp::decode(value_rlp).map_err(|_| ProgramError::InvalidRlp)?;
Ok(vec![value])
}
/// Given a trie, generate the prover input data for that trie. In essence, this serializes a trie
/// into a `U256` array, in a simple format which the kernel understands. For example, a leaf node
/// is serialized as `(TYPE_LEAF, key, value)`, where key is a `(nibbles, depth)` pair and `value`
/// is a variable-length structure which depends on which trie we're dealing with.
pub(crate) fn mpt_prover_inputs<F>(
const fn empty_nibbles() -> Nibbles {
Nibbles {
count: 0,
packed: U512::zero(),
}
}
fn load_mpt<F>(
trie: &HashedPartialTrie,
prover_inputs: &mut Vec<U256>,
trie_data: &mut Vec<U256>,
parse_value: &F,
) -> Result<(), ProgramError>
) -> Result<usize, ProgramError>
where
F: Fn(&[u8]) -> Result<Vec<U256>, ProgramError>,
{
prover_inputs.push((PartialTrieType::of(trie) as u32).into());
let node_ptr = trie_data.len();
let type_of_trie = PartialTrieType::of(trie) as u32;
if type_of_trie > 0 {
trie_data.push(type_of_trie.into());
}
match trie.deref() {
Node::Empty => Ok(()),
Node::Empty => Ok(0),
Node::Hash(h) => {
prover_inputs.push(U256::from_big_endian(h.as_bytes()));
Ok(())
trie_data.push(h2u(*h));
Ok(node_ptr)
}
Node::Branch { children, value } => {
// First, set children pointers to 0.
let first_child_ptr = trie_data.len();
trie_data.extend(vec![U256::zero(); 16]);
// Then, set value.
if value.is_empty() {
prover_inputs.push(U256::zero()); // value_present = 0
trie_data.push(U256::zero());
} else {
let parsed_value = parse_value(value)?;
prover_inputs.push(U256::one()); // value_present = 1
prover_inputs.extend(parsed_value);
}
for child in children {
mpt_prover_inputs(child, prover_inputs, parse_value)?;
trie_data.push((trie_data.len() + 1).into());
trie_data.extend(parsed_value);
}
Ok(())
// Now, load all children and update their pointers.
for (i, child) in children.iter().enumerate() {
let child_ptr = load_mpt(child, trie_data, parse_value)?;
trie_data[first_child_ptr + i] = child_ptr.into();
}
Ok(node_ptr)
}
Node::Extension { nibbles, child } => {
prover_inputs.push(nibbles.count.into());
prover_inputs.push(
trie_data.push(nibbles.count.into());
trie_data.push(
nibbles
.try_into_u256()
.map_err(|_| ProgramError::IntegerTooLarge)?,
);
mpt_prover_inputs(child, prover_inputs, parse_value)
trie_data.push((trie_data.len() + 1).into());
let child_ptr = load_mpt(child, trie_data, parse_value)?;
if child_ptr == 0 {
trie_data.push(0.into());
}
Ok(node_ptr)
}
Node::Leaf { nibbles, value } => {
prover_inputs.push(nibbles.count.into());
prover_inputs.push(
trie_data.push(nibbles.count.into());
trie_data.push(
nibbles
.try_into_u256()
.map_err(|_| ProgramError::IntegerTooLarge)?,
);
let leaf = parse_value(value)?;
prover_inputs.extend(leaf);
Ok(())
// Set `value_ptr_ptr`.
trie_data.push((trie_data.len() + 1).into());
let leaf = parse_value(value)?;
trie_data.extend(leaf);
Ok(node_ptr)
}
}
}
/// Like `mpt_prover_inputs`, but for the state trie, which is a bit unique since each value
/// leads to a storage trie which we recursively traverse.
pub(crate) fn mpt_prover_inputs_state_trie(
fn load_state_trie(
trie: &HashedPartialTrie,
key: Nibbles,
prover_inputs: &mut Vec<U256>,
trie_data: &mut Vec<U256>,
storage_tries_by_state_key: &HashMap<Nibbles, &HashedPartialTrie>,
) -> Result<(), ProgramError> {
prover_inputs.push((PartialTrieType::of(trie) as u32).into());
) -> Result<usize, ProgramError> {
let node_ptr = trie_data.len();
let type_of_trie = PartialTrieType::of(trie) as u32;
if type_of_trie > 0 {
trie_data.push(type_of_trie.into());
}
match trie.deref() {
Node::Empty => Ok(()),
Node::Empty => Ok(0),
Node::Hash(h) => {
prover_inputs.push(U256::from_big_endian(h.as_bytes()));
Ok(())
trie_data.push(h2u(*h));
Ok(node_ptr)
}
Node::Branch { children, value } => {
if !value.is_empty() {
@ -226,37 +229,43 @@ pub(crate) fn mpt_prover_inputs_state_trie(
ProverInputError::InvalidMptInput,
));
}
prover_inputs.push(U256::zero()); // value_present = 0
// First, set children pointers to 0.
let first_child_ptr = trie_data.len();
trie_data.extend(vec![U256::zero(); 16]);
// Then, set value pointer to 0.
trie_data.push(U256::zero());
// Now, load all children and update their pointers.
for (i, child) in children.iter().enumerate() {
let extended_key = key.merge_nibbles(&Nibbles {
count: 1,
packed: i.into(),
});
mpt_prover_inputs_state_trie(
child,
extended_key,
prover_inputs,
storage_tries_by_state_key,
)?;
let child_ptr =
load_state_trie(child, extended_key, trie_data, storage_tries_by_state_key)?;
trie_data[first_child_ptr + i] = child_ptr.into();
}
Ok(())
Ok(node_ptr)
}
Node::Extension { nibbles, child } => {
prover_inputs.push(nibbles.count.into());
prover_inputs.push(
trie_data.push(nibbles.count.into());
trie_data.push(
nibbles
.try_into_u256()
.map_err(|_| ProgramError::IntegerTooLarge)?,
);
// Set `value_ptr_ptr`.
trie_data.push((trie_data.len() + 1).into());
let extended_key = key.merge_nibbles(nibbles);
mpt_prover_inputs_state_trie(
child,
extended_key,
prover_inputs,
storage_tries_by_state_key,
)
let child_ptr =
load_state_trie(child, extended_key, trie_data, storage_tries_by_state_key)?;
if child_ptr == 0 {
trie_data.push(0.into());
}
Ok(node_ptr)
}
Node::Leaf { nibbles, value } => {
let account: AccountRlp = rlp::decode(value).map_err(|_| ProgramError::InvalidRlp)?;
@ -275,34 +284,69 @@ pub(crate) fn mpt_prover_inputs_state_trie(
.unwrap_or(&storage_hash_only);
assert_eq!(storage_trie.hash(), storage_root,
"In TrieInputs, an account's storage_root didn't match the associated storage trie hash");
"In TrieInputs, an account's storage_root didn't match the associated storage trie hash");
prover_inputs.push(nibbles.count.into());
prover_inputs.push(
trie_data.push(nibbles.count.into());
trie_data.push(
nibbles
.try_into_u256()
.map_err(|_| ProgramError::IntegerTooLarge)?,
);
prover_inputs.push(nonce);
prover_inputs.push(balance);
mpt_prover_inputs(storage_trie, prover_inputs, &parse_storage_value)?;
prover_inputs.push(code_hash.into_uint());
// Set `value_ptr_ptr`.
trie_data.push((trie_data.len() + 1).into());
Ok(())
trie_data.push(nonce);
trie_data.push(balance);
// Storage trie ptr.
let storage_ptr_ptr = trie_data.len();
trie_data.push((trie_data.len() + 2).into());
trie_data.push(code_hash.into_uint());
let storage_ptr = load_mpt(storage_trie, trie_data, &parse_storage_value)?;
if storage_ptr == 0 {
trie_data[storage_ptr_ptr] = 0.into();
}
Ok(node_ptr)
}
}
}
fn parse_storage_value(value_rlp: &[u8]) -> Result<Vec<U256>, ProgramError> {
let value: U256 = rlp::decode(value_rlp).map_err(|_| ProgramError::InvalidRlp)?;
Ok(vec![value])
}
pub(crate) fn load_all_mpts(
trie_inputs: &TrieInputs,
) -> Result<(TrieRootPtrs, Vec<U256>), ProgramError> {
let mut trie_data = vec![U256::zero()];
let storage_tries_by_state_key = trie_inputs
.storage_tries
.iter()
.map(|(hashed_address, storage_trie)| {
let key = Nibbles::from_bytes_be(hashed_address.as_bytes())
.expect("An H256 is 32 bytes long");
(key, storage_trie)
})
.collect();
const fn empty_nibbles() -> Nibbles {
Nibbles {
count: 0,
packed: U512::zero(),
}
let state_root_ptr = load_state_trie(
&trie_inputs.state_trie,
empty_nibbles(),
&mut trie_data,
&storage_tries_by_state_key,
)?;
let txn_root_ptr = load_mpt(&trie_inputs.transactions_trie, &mut trie_data, &|rlp| {
let mut parsed_txn = vec![U256::from(rlp.len())];
parsed_txn.extend(rlp.iter().copied().map(U256::from));
Ok(parsed_txn)
})?;
let receipt_root_ptr = load_mpt(&trie_inputs.receipts_trie, &mut trie_data, &parse_receipts)?;
let trie_root_ptrs = TrieRootPtrs {
state_root_ptr,
txn_root_ptr,
receipt_root_ptr,
};
Ok((trie_root_ptrs, trie_data))
}
pub mod transaction_testing {

View File

@ -36,10 +36,10 @@ impl<F: Field> GenerationState<F> {
pub(crate) fn prover_input(&mut self, input_fn: &ProverInputFn) -> Result<U256, ProgramError> {
match input_fn.0[0].as_str() {
"no_txn" => self.no_txn(),
"trie_ptr" => self.run_trie_ptr(input_fn),
"ff" => self.run_ff(input_fn),
"sf" => self.run_sf(input_fn),
"ffe" => self.run_ffe(input_fn),
"mpt" => self.run_mpt(),
"rlp" => self.run_rlp(),
"current_hash" => self.run_current_hash(),
"account_code" => self.run_account_code(input_fn),
@ -54,6 +54,16 @@ impl<F: Field> GenerationState<F> {
Ok(U256::from(self.inputs.signed_txn.is_none() as u8))
}
fn run_trie_ptr(&mut self, input_fn: &ProverInputFn) -> Result<U256, ProgramError> {
let trie = input_fn.0[1].as_str();
match trie {
"state" => Ok(U256::from(self.trie_root_ptrs.state_root_ptr)),
"txn" => Ok(U256::from(self.trie_root_ptrs.txn_root_ptr)),
"receipt" => Ok(U256::from(self.trie_root_ptrs.receipt_root_ptr)),
_ => Err(ProgramError::ProverInputError(InvalidInput)),
}
}
/// Finite field operations.
fn run_ff(&self, input_fn: &ProverInputFn) -> Result<U256, ProgramError> {
let field = EvmField::from_str(input_fn.0[1].as_str())
@ -109,13 +119,6 @@ impl<F: Field> GenerationState<F> {
Ok(field.field_extension_inverse(n, f))
}
/// MPT data.
fn run_mpt(&mut self) -> Result<U256, ProgramError> {
self.mpt_prover_inputs
.pop()
.ok_or(ProgramError::ProverInputError(OutOfMptData))
}
/// RLP data.
fn run_rlp(&mut self) -> Result<U256, ProgramError> {
self.rlp_prover_inputs

View File

@ -2,11 +2,14 @@ use std::collections::HashMap;
use ethereum_types::{Address, BigEndianHash, H160, H256, U256};
use keccak_hash::keccak;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use super::mpt::{load_all_mpts, TrieRootPtrs};
use super::TrieInputs;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::rlp::all_rlp_prover_inputs_reversed;
use crate::generation::GenerationInputs;
use crate::memory::segments::Segment;
@ -29,10 +32,6 @@ pub(crate) struct GenerationState<F: Field> {
pub(crate) memory: MemoryState,
pub(crate) traces: Traces<F>,
/// Prover inputs containing MPT data, in reverse order so that the next input can be obtained
/// via `pop()`.
pub(crate) mpt_prover_inputs: Vec<U256>,
/// Prover inputs containing RLP data, in reverse order so that the next input can be obtained
/// via `pop()`.
pub(crate) rlp_prover_inputs: Vec<U256>,
@ -48,9 +47,20 @@ pub(crate) struct GenerationState<F: Field> {
/// inputs are obtained in big-endian order via `pop()`). Contains both the remainder and the
/// quotient, in that order.
pub(crate) bignum_modmul_result_limbs: Vec<U256>,
/// Pointers, within the `TrieData` segment, of the three MPTs.
pub(crate) trie_root_ptrs: TrieRootPtrs,
}
impl<F: Field> GenerationState<F> {
fn preinitialize_mpts(&mut self, trie_inputs: &TrieInputs) -> TrieRootPtrs {
let (trie_roots_ptrs, trie_data) =
load_all_mpts(trie_inputs).expect("Invalid MPT data for preinitialization");
self.memory.contexts[0].segments[Segment::TrieData as usize].content = trie_data;
trie_roots_ptrs
}
pub(crate) fn new(inputs: GenerationInputs, kernel_code: &[u8]) -> Result<Self, ProgramError> {
log::debug!("Input signed_txn: {:?}", &inputs.signed_txn);
log::debug!("Input state_trie: {:?}", &inputs.tries.state_trie);
@ -61,23 +71,31 @@ impl<F: Field> GenerationState<F> {
log::debug!("Input receipts_trie: {:?}", &inputs.tries.receipts_trie);
log::debug!("Input storage_tries: {:?}", &inputs.tries.storage_tries);
log::debug!("Input contract_code: {:?}", &inputs.contract_code);
let mpt_prover_inputs = all_mpt_prover_inputs_reversed(&inputs.tries)?;
let rlp_prover_inputs =
all_rlp_prover_inputs_reversed(inputs.signed_txn.as_ref().unwrap_or(&vec![]));
all_rlp_prover_inputs_reversed(inputs.clone().signed_txn.as_ref().unwrap_or(&vec![]));
let withdrawal_prover_inputs = all_withdrawals_prover_inputs_reversed(&inputs.withdrawals);
let bignum_modmul_result_limbs = Vec::new();
Ok(Self {
inputs,
let mut state = Self {
inputs: inputs.clone(),
registers: Default::default(),
memory: MemoryState::new(kernel_code),
traces: Traces::default(),
mpt_prover_inputs,
rlp_prover_inputs,
withdrawal_prover_inputs,
state_key_to_address: HashMap::new(),
bignum_modmul_result_limbs,
})
trie_root_ptrs: TrieRootPtrs {
state_root_ptr: 0,
txn_root_ptr: 0,
receipt_root_ptr: 0,
},
};
let trie_root_ptrs = state.preinitialize_mpts(&inputs.tries);
state.trie_root_ptrs = trie_root_ptrs;
Ok(state)
}
/// Updates `program_counter`, and potentially adds some extra handling if we're jumping to a

View File

@ -13,6 +13,7 @@ use plonky2::util::timing::TimingTree;
use plonky2::util::transpose;
use plonky2_maybe_rayon::*;
use super::segments::Segment;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
@ -349,7 +350,11 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F
// We don't want to exclude the entirety of context 0. This constraint zero-initializes all segments except the
// specified ones (segment 0 is already included in initialize_aux).
// There is overlap with the previous constraint, but this is not a problem.
yield_constr.constraint_transition(initialize_aux * next_values_limbs[i]);
yield_constr.constraint_transition(
(next_addr_segment - P::Scalar::from_canonical_usize(Segment::TrieData as usize))
* initialize_aux
* next_values_limbs[i],
);
}
// Check the range column: First value must be 0,
@ -500,7 +505,13 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F
// We don't want to exclude the entirety of context 0. This constraint zero-initializes all segments except the
// specified ones (segment 0 is already included in initialize_aux).
// There is overlap with the previous constraint, but this is not a problem.
yield_constr.constraint_transition(builder, context_zero_initializing_constraint);
let segment_trie_data = builder.add_const_extension(
next_addr_segment,
F::NEG_ONE * F::from_canonical_u32(Segment::TrieData as u32),
);
let zero_init_constraint =
builder.mul_extension(segment_trie_data, context_zero_initializing_constraint);
yield_constr.constraint_transition(builder, zero_init_constraint);
}
// Check the range column: First value must be 0,

View File

@ -19,6 +19,7 @@ use serde::{Deserialize, Serialize};
use crate::all_stark::NUM_TABLES;
use crate::config::StarkConfig;
use crate::cross_table_lookup::GrandProductChallengeSet;
use crate::generation::mpt::TrieRootPtrs;
use crate::util::{get_h160, get_h256, h2u};
/// A STARK proof for each table, plus some metadata used to create recursive wrapper proofs.
@ -818,7 +819,7 @@ impl ExtraBlockDataTarget {
builder.connect(ed0.txn_number_before, ed1.txn_number_before);
builder.connect(ed0.txn_number_after, ed1.txn_number_after);
builder.connect(ed0.gas_used_before, ed1.gas_used_before);
builder.connect(ed1.gas_used_after, ed1.gas_used_after);
builder.connect(ed0.gas_used_after, ed1.gas_used_after);
}
}

View File

@ -440,7 +440,7 @@ fn test_log_with_aggreg() -> anyhow::Result<()> {
// Preprocess all circuits.
let all_circuits = AllRecursiveCircuits::<F, C, D>::new(
&all_stark,
&[16..17, 14..16, 16..18, 14..15, 10..11, 12..13, 19..20],
&[16..17, 14..16, 16..18, 14..15, 9..10, 12..13, 19..20],
&config,
);