Merge branch 'main' into optimizer

This commit is contained in:
Daniel Lubarov 2022-08-03 09:58:54 -07:00
commit 90be4749ef
13 changed files with 428 additions and 81 deletions

View File

@ -1,49 +1,14 @@
//! Loads each kernel assembly file and concatenates them.
use std::collections::HashMap;
use ethereum_types::U256;
use hex_literal::hex;
use itertools::Itertools;
use once_cell::sync::Lazy;
use super::assembler::{assemble, Kernel};
use crate::cpu::kernel::constants::evm_constants;
use crate::cpu::kernel::parser::parse;
use crate::cpu::kernel::txn_fields::NormalizedTxnField;
use crate::memory::segments::Segment;
pub static KERNEL: Lazy<Kernel> = Lazy::new(combined_kernel);
const EC_CONSTANTS: [(&str, [u8; 32]); 3] = [
(
"BN_BASE",
hex!("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47"),
),
(
"SECP_BASE",
hex!("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"),
),
(
"SECP_SCALAR",
hex!("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"),
),
];
pub fn evm_constants() -> HashMap<String, U256> {
let mut c = HashMap::new();
for (name, value) in EC_CONSTANTS {
c.insert(name.into(), U256::from_big_endian(&value));
}
for segment in Segment::all() {
c.insert(segment.var_name().into(), (segment as u32).into());
}
for txn_field in NormalizedTxnField::all() {
c.insert(txn_field.var_name().into(), (txn_field as u32).into());
}
c
}
#[allow(dead_code)] // TODO: Should be used once witness generation is done.
pub(crate) fn combined_kernel() -> Kernel {
let files = vec![
include_str!("asm/curve/bn254/curve_add.asm"),

View File

@ -32,6 +32,7 @@ global decode_rlp_string_len:
JUMP
decode_rlp_string_len_medium:
JUMPDEST
// String is 0-55 bytes long. First byte contains the len.
// stack: first_byte, pos, retdest
%sub_const(0x80)
@ -43,6 +44,7 @@ decode_rlp_string_len_medium:
JUMP
decode_rlp_string_len_large:
JUMPDEST
// String is >55 bytes long. First byte contains the len of the len.
// stack: first_byte, pos, retdest
%sub_const(0xb7)

View File

@ -0,0 +1,87 @@
use std::collections::HashMap;
use ethereum_types::U256;
use hex_literal::hex;
use crate::cpu::kernel::context_metadata::ContextMetadata;
use crate::cpu::kernel::global_metadata::GlobalMetadata;
use crate::cpu::kernel::txn_fields::NormalizedTxnField;
use crate::memory::segments::Segment;
/// Constants that are accessible to our kernel assembly code.
pub fn evm_constants() -> HashMap<String, U256> {
let mut c = HashMap::new();
for (name, value) in EC_CONSTANTS {
c.insert(name.into(), U256::from_big_endian(&value));
}
for (name, value) in GAS_CONSTANTS {
c.insert(name.into(), U256::from(value));
}
for segment in Segment::all() {
c.insert(segment.var_name().into(), (segment as u32).into());
}
for txn_field in NormalizedTxnField::all() {
c.insert(txn_field.var_name().into(), (txn_field as u32).into());
}
for txn_field in GlobalMetadata::all() {
c.insert(txn_field.var_name().into(), (txn_field as u32).into());
}
for txn_field in ContextMetadata::all() {
c.insert(txn_field.var_name().into(), (txn_field as u32).into());
}
c
}
const EC_CONSTANTS: [(&str, [u8; 32]); 3] = [
(
"BN_BASE",
hex!("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47"),
),
(
"SECP_BASE",
hex!("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"),
),
(
"SECP_SCALAR",
hex!("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"),
),
];
const GAS_CONSTANTS: [(&str, u16); 36] = [
("GAS_ZERO", 0),
("GAS_JUMPDEST", 1),
("GAS_BASE", 2),
("GAS_VERYLOW", 3),
("GAS_LOW", 5),
("GAS_MID", 8),
("GAS_HIGH", 10),
("GAS_WARMACCESS", 100),
("GAS_ACCESSLISTADDRESS", 2_400),
("GAS_ACCESSLISTSTORAGE", 1_900),
("GAS_COLDACCOUNTACCESS", 2_600),
("GAS_COLDSLOAD", 2_100),
("GAS_SSET", 20_000),
("GAS_SRESET", 2_900),
("REFUND_SCLEAR", 15_000),
("REFUND_SELFDESTRUCT", 24_000),
("GAS_SELFDESTRUCT", 5_000),
("GAS_CREATE", 32_000),
("GAS_CODEDEPOSIT", 200),
("GAS_CALLVALUE", 9_000),
("GAS_CALLSTIPEND", 2_300),
("GAS_NEWACCOUNT", 25_000),
("GAS_EXP", 10),
("GAS_EXPBYTE", 50),
("GAS_MEMORY", 3),
("GAS_TXCREATE", 32_000),
("GAS_TXDATAZERO", 4),
("GAS_TXDATANONZERO", 16),
("GAS_TRANSACTION", 21_000),
("GAS_LOG", 375),
("GAS_LOGDATA", 8),
("GAS_LOGTOPIC", 375),
("GAS_KECCAK256", 30),
("GAS_KECCAK256WORD", 6),
("GAS_COPY", 3),
("GAS_BLOCKHASH", 20),
];

View File

@ -0,0 +1,33 @@
/// These metadata fields contain VM state specific to a particular context.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub(crate) enum ContextMetadata {
/// The ID of the context which created this one.
ParentContext = 0,
/// The program counter to return to when we return to the parent context.
ParentProgramCounter = 1,
CalldataSize = 2,
ReturndataSize = 3,
}
impl ContextMetadata {
pub(crate) const COUNT: usize = 4;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
Self::ParentContext,
Self::ParentProgramCounter,
Self::CalldataSize,
Self::ReturndataSize,
]
}
/// The variable name that gets passed into kernel assembly code.
pub(crate) fn var_name(&self) -> &'static str {
match self {
ContextMetadata::ParentContext => "CTX_METADATA_PARENT_CONTEXT",
ContextMetadata::ParentProgramCounter => "CTX_METADATA_PARENT_PC",
ContextMetadata::CalldataSize => "CTX_METADATA_CALLDATA_SIZE",
ContextMetadata::ReturndataSize => "CTX_METADATA_RETURNDATA_SIZE",
}
}
}

View File

@ -0,0 +1,23 @@
/// These metadata fields contain global VM state, stored in the `Segment::Metadata` segment of the
/// kernel's context (which is zero).
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub(crate) enum GlobalMetadata {
/// The largest context ID that has been used so far in this execution. Tracking this allows us
/// give each new context a unique ID, so that its memory will be zero-initialized.
LargestContext = 0,
}
impl GlobalMetadata {
pub(crate) const COUNT: usize = 1;
pub(crate) fn all() -> [Self; Self::COUNT] {
[Self::LargestContext]
}
/// The variable name that gets passed into kernel assembly code.
pub(crate) fn var_name(&self) -> &'static str {
match self {
GlobalMetadata::LargestContext => "GLOBAL_METADATA_LARGEST_CONTEXT",
}
}
}

View File

@ -4,17 +4,19 @@ use anyhow::{anyhow, bail};
use ethereum_types::{BigEndianHash, U256, U512};
use keccak_hash::keccak;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::assembler::Kernel;
use crate::cpu::kernel::prover_input::ProverInputFn;
use crate::cpu::kernel::txn_fields::NormalizedTxnField;
use crate::generation::memory::{MemoryContextState, MemorySegmentState};
use crate::memory::segments::Segment;
/// Halt interpreter execution whenever a jump to this offset is done.
const HALT_OFFSET: usize = 0xdeadbeef;
const DEFAULT_HALT_OFFSET: usize = 0xdeadbeef;
#[derive(Debug)]
pub(crate) struct InterpreterMemory {
context_memory: Vec<MemoryContextState>,
pub(crate) context_memory: Vec<MemoryContextState>,
}
impl Default for InterpreterMemory {
@ -48,16 +50,19 @@ impl InterpreterMemory {
}
pub struct Interpreter<'a> {
kernel_mode: bool,
jumpdests: Vec<usize>,
offset: usize,
context: usize,
memory: InterpreterMemory,
pub(crate) memory: InterpreterMemory,
prover_inputs_map: &'a HashMap<usize, ProverInputFn>,
prover_inputs: Vec<U256>,
pub(crate) halt_offsets: Vec<usize>,
running: bool,
}
pub fn run_with_kernel(
// TODO: Remove param and just use KERNEL.
kernel: &Kernel,
initial_offset: usize,
initial_stack: Vec<U256>,
@ -76,24 +81,47 @@ pub fn run<'a>(
initial_stack: Vec<U256>,
prover_inputs: &'a HashMap<usize, ProverInputFn>,
) -> anyhow::Result<Interpreter<'a>> {
let mut interpreter = Interpreter {
jumpdests: find_jumpdests(code),
offset: initial_offset,
memory: InterpreterMemory::with_code_and_stack(code, initial_stack),
prover_inputs_map: prover_inputs,
prover_inputs: Vec::new(),
context: 0,
running: true,
};
while interpreter.running {
interpreter.run_opcode()?;
}
let mut interpreter = Interpreter::new(code, initial_offset, initial_stack, prover_inputs);
interpreter.run()?;
Ok(interpreter)
}
impl<'a> Interpreter<'a> {
pub(crate) fn new_with_kernel(initial_offset: usize, initial_stack: Vec<U256>) -> Self {
Self::new(
&KERNEL.code,
initial_offset,
initial_stack,
&KERNEL.prover_inputs,
)
}
pub(crate) fn new(
code: &'a [u8],
initial_offset: usize,
initial_stack: Vec<U256>,
prover_inputs: &'a HashMap<usize, ProverInputFn>,
) -> Self {
Self {
kernel_mode: true,
jumpdests: find_jumpdests(code),
offset: initial_offset,
memory: InterpreterMemory::with_code_and_stack(code, initial_stack),
prover_inputs_map: prover_inputs,
prover_inputs: Vec::new(),
context: 0,
halt_offsets: vec![DEFAULT_HALT_OFFSET],
running: true,
}
}
pub(crate) fn run(&mut self) -> anyhow::Result<()> {
while self.running {
self.run_opcode()?;
}
Ok(())
}
fn code(&self) -> &MemorySegmentState {
&self.memory.context_memory[self.context].segments[Segment::Code as usize]
}
@ -105,6 +133,19 @@ impl<'a> Interpreter<'a> {
.collect::<Vec<_>>()
}
pub(crate) fn get_txn_field(&self, field: NormalizedTxnField) -> U256 {
self.memory.context_memory[0].segments[Segment::TxnFields as usize].content[field as usize]
}
pub(crate) fn get_txn_data(&self) -> &[U256] {
&self.memory.context_memory[0].segments[Segment::TxnData as usize].content
}
pub(crate) fn set_rlp_memory(&mut self, rlp: Vec<u8>) {
self.memory.context_memory[0].segments[Segment::RlpRaw as usize].content =
rlp.into_iter().map(U256::from).collect();
}
fn incr(&mut self, n: usize) {
self.offset += n;
}
@ -156,7 +197,7 @@ impl<'a> Interpreter<'a> {
0x18 => self.run_xor(), // "XOR",
0x19 => self.run_not(), // "NOT",
0x1a => todo!(), // "BYTE",
0x1b => todo!(), // "SHL",
0x1b => self.run_shl(), // "SHL",
0x1c => todo!(), // "SHR",
0x1d => todo!(), // "SAR",
0x20 => self.run_keccak256(), // "KECCAK256",
@ -339,6 +380,12 @@ impl<'a> Interpreter<'a> {
self.push(!x);
}
fn run_shl(&mut self) {
let shift = self.pop();
let x = self.pop();
self.push(x << shift);
}
fn run_keccak256(&mut self) {
let offset = self.pop().as_usize();
let size = self.pop().as_usize();
@ -406,24 +453,27 @@ impl<'a> Interpreter<'a> {
fn run_jump(&mut self) {
let x = self.pop().as_usize();
self.offset = x;
if self.offset == HALT_OFFSET {
self.running = false;
} else if self.jumpdests.binary_search(&self.offset).is_err() {
panic!("Destination is not a JUMPDEST.");
}
self.jump_to(x);
}
fn run_jumpi(&mut self) {
let x = self.pop().as_usize();
let b = self.pop();
if !b.is_zero() {
self.offset = x;
if self.offset == HALT_OFFSET {
self.running = false;
} else if self.jumpdests.binary_search(&self.offset).is_err() {
panic!("Destination is not a JUMPDEST.");
}
self.jump_to(x);
}
}
fn jump_to(&mut self, offset: usize) {
// The JUMPDEST rule is not enforced in kernel mode.
if !self.kernel_mode && self.jumpdests.binary_search(&offset).is_err() {
panic!("Destination is not a JUMPDEST.");
}
self.offset = offset;
if self.halt_offsets.contains(&offset) {
self.running = false;
}
}

View File

@ -1,7 +1,10 @@
pub mod aggregator;
pub mod assembler;
mod ast;
mod constants;
mod context_metadata;
mod cost_estimator;
mod global_metadata;
pub(crate) mod keccak_util;
mod opcodes;
mod optimizer;
@ -19,7 +22,7 @@ mod tests;
use assembler::assemble;
use parser::parse;
use crate::cpu::kernel::aggregator::evm_constants;
use crate::cpu::kernel::constants::evm_constants;
/// Assemble files, outputting bytes.
/// This is for debugging the kernel only.

View File

@ -1,5 +1,3 @@
use std::str::FromStr;
use anyhow::Result;
use ethereum_types::U256;
use rand::{thread_rng, Rng};
@ -17,7 +15,7 @@ fn test_exp() -> Result<()> {
let b = U256([0; 4].map(|_| rng.gen()));
// Random input
let initial_stack = vec![U256::from_str("0xdeadbeef")?, b, a];
let initial_stack = vec![0xDEADBEEFu32.into(), b, a];
let stack_with_kernel = run_with_kernel(&kernel, exp, initial_stack)?
.stack()
.to_vec();
@ -29,7 +27,7 @@ fn test_exp() -> Result<()> {
assert_eq!(stack_with_kernel, stack_with_opcode);
// 0 base
let initial_stack = vec![U256::from_str("0xdeadbeef")?, b, U256::zero()];
let initial_stack = vec![0xDEADBEEFu32.into(), b, U256::zero()];
let stack_with_kernel = run_with_kernel(&kernel, exp, initial_stack)?
.stack()
.to_vec();
@ -41,7 +39,7 @@ fn test_exp() -> Result<()> {
assert_eq!(stack_with_kernel, stack_with_opcode);
// 0 exponent
let initial_stack = vec![U256::from_str("0xdeadbeef")?, U256::zero(), a];
let initial_stack = vec![0xDEADBEEFu32.into(), U256::zero(), a];
let stack_with_kernel = run_with_kernel(&kernel, exp, initial_stack)?
.stack()
.to_vec();

View File

@ -1,6 +1,8 @@
mod curve_ops;
mod ecrecover;
mod exp;
mod rlp;
mod transaction_parsing;
use std::str::FromStr;

View File

@ -0,0 +1,114 @@
use anyhow::Result;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::interpreter::Interpreter;
#[test]
fn test_decode_rlp_string_len_short() -> Result<()> {
let decode_rlp_string_len = KERNEL.global_labels["decode_rlp_string_len"];
let initial_stack = vec![0xDEADBEEFu32.into(), 2.into()];
let mut interpreter = Interpreter::new_with_kernel(decode_rlp_string_len, initial_stack);
// A couple dummy bytes, followed by "0x70" which is its own encoding.
interpreter.set_rlp_memory(vec![123, 234, 0x70]);
interpreter.run()?;
let expected_stack = vec![1.into(), 2.into()]; // len, pos
assert_eq!(interpreter.stack(), expected_stack);
Ok(())
}
#[test]
fn test_decode_rlp_string_len_medium() -> Result<()> {
let decode_rlp_string_len = KERNEL.global_labels["decode_rlp_string_len"];
let initial_stack = vec![0xDEADBEEFu32.into(), 2.into()];
let mut interpreter = Interpreter::new_with_kernel(decode_rlp_string_len, initial_stack);
// A couple dummy bytes, followed by the RLP encoding of "1 2 3 4 5".
interpreter.set_rlp_memory(vec![123, 234, 0x85, 1, 2, 3, 4, 5]);
interpreter.run()?;
let expected_stack = vec![5.into(), 3.into()]; // len, pos
assert_eq!(interpreter.stack(), expected_stack);
Ok(())
}
#[test]
fn test_decode_rlp_string_len_long() -> Result<()> {
let decode_rlp_string_len = KERNEL.global_labels["decode_rlp_string_len"];
let initial_stack = vec![0xDEADBEEFu32.into(), 2.into()];
let mut interpreter = Interpreter::new_with_kernel(decode_rlp_string_len, initial_stack);
// The RLP encoding of the string "1 2 3 ... 56".
interpreter.set_rlp_memory(vec![
123, 234, 0xb8, 56, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
]);
interpreter.run()?;
let expected_stack = vec![56.into(), 4.into()]; // len, pos
assert_eq!(interpreter.stack(), expected_stack);
Ok(())
}
#[test]
fn test_decode_rlp_list_len_short() -> Result<()> {
let decode_rlp_list_len = KERNEL.global_labels["decode_rlp_list_len"];
let initial_stack = vec![0xDEADBEEFu32.into(), 0.into()];
let mut interpreter = Interpreter::new_with_kernel(decode_rlp_list_len, initial_stack);
// The RLP encoding of [1, 2, [3, 4]].
interpreter.set_rlp_memory(vec![0xc5, 1, 2, 0xc2, 3, 4]);
interpreter.run()?;
let expected_stack = vec![5.into(), 1.into()]; // len, pos
assert_eq!(interpreter.stack(), expected_stack);
Ok(())
}
#[test]
fn test_decode_rlp_list_len_long() -> Result<()> {
let decode_rlp_list_len = KERNEL.global_labels["decode_rlp_list_len"];
let initial_stack = vec![0xDEADBEEFu32.into(), 0.into()];
let mut interpreter = Interpreter::new_with_kernel(decode_rlp_list_len, initial_stack);
// The RLP encoding of [1, ..., 56].
interpreter.set_rlp_memory(vec![
0xf8, 56, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
]);
interpreter.run()?;
let expected_stack = vec![56.into(), 2.into()]; // len, pos
assert_eq!(interpreter.stack(), expected_stack);
Ok(())
}
#[test]
fn test_decode_rlp_scalar() -> Result<()> {
let decode_rlp_scalar = KERNEL.global_labels["decode_rlp_scalar"];
let initial_stack = vec![0xDEADBEEFu32.into(), 0.into()];
let mut interpreter = Interpreter::new_with_kernel(decode_rlp_scalar, initial_stack);
// The RLP encoding of "12 34 56".
interpreter.set_rlp_memory(vec![0x83, 0x12, 0x34, 0x56]);
interpreter.run()?;
let expected_stack = vec![0x123456.into(), 4.into()]; // scalar, pos
assert_eq!(interpreter.stack(), expected_stack);
Ok(())
}

View File

@ -0,0 +1 @@
mod parse_type_0_txn;

View File

@ -0,0 +1,65 @@
use anyhow::Result;
use ethereum_types::U256;
use hex_literal::hex;
use NormalizedTxnField::*;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::txn_fields::NormalizedTxnField;
#[test]
fn process_type_0_txn() -> Result<()> {
let process_type_0_txn = KERNEL.global_labels["process_type_0_txn"];
let process_normalized_txn = KERNEL.global_labels["process_normalized_txn"];
let mut interpreter = Interpreter::new_with_kernel(process_type_0_txn, vec![]);
// When we reach process_normalized_txn, we're done with parsing and normalizing.
// Processing normalized transactions is outside the scope of this test.
interpreter.halt_offsets.push(process_normalized_txn);
// Generated with py-evm:
// import eth, eth_keys, eth_utils, rlp
// genesis_params = { 'difficulty': eth.constants.GENESIS_DIFFICULTY }
// chain = eth.chains.mainnet.MainnetChain.from_genesis(eth.db.atomic.AtomicDB(), genesis_params, {})
// unsigned_txn = chain.create_unsigned_transaction(
// nonce=5,
// gas_price=10,
// gas=22_000,
// to=eth.constants.ZERO_ADDRESS,
// value=100,
// data=b'\x42\x42',
// )
// sk = eth_keys.keys.PrivateKey(eth_utils.decode_hex('4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'))
// signed_txn = unsigned_txn.as_signed_transaction(sk)
// rlp.encode(signed_txn).hex()
interpreter.set_rlp_memory(hex!("f861050a8255f0940000000000000000000000000000000000000000648242421ca07c5c61ed975ebd286f6b027b8c504842e50a47d318e1e801719dd744fe93e6c6a01e7b5119b57dd54e175ff2f055c91f3ab1b53eba0b2c184f347cdff0e745aca2").to_vec());
interpreter.run()?;
assert_eq!(interpreter.get_txn_field(ChainIdPresent), 0.into());
assert_eq!(interpreter.get_txn_field(ChainId), 0.into());
assert_eq!(interpreter.get_txn_field(Nonce), 5.into());
assert_eq!(interpreter.get_txn_field(MaxPriorityFeePerGas), 10.into());
assert_eq!(interpreter.get_txn_field(MaxPriorityFeePerGas), 10.into());
assert_eq!(interpreter.get_txn_field(MaxFeePerGas), 10.into());
assert_eq!(interpreter.get_txn_field(To), 0.into());
assert_eq!(interpreter.get_txn_field(Value), 100.into());
assert_eq!(interpreter.get_txn_field(DataLen), 2.into());
assert_eq!(interpreter.get_txn_data(), &[0x42.into(), 0x42.into()]);
assert_eq!(interpreter.get_txn_field(YParity), 1.into());
assert_eq!(
interpreter.get_txn_field(R),
U256::from_big_endian(&hex!(
"7c5c61ed975ebd286f6b027b8c504842e50a47d318e1e801719dd744fe93e6c6"
))
);
assert_eq!(
interpreter.get_txn_field(S),
U256::from_big_endian(&hex!(
"1e7b5119b57dd54e175ff2f055c91f3ab1b53eba0b2c184f347cdff0e745aca2"
))
);
Ok(())
}

View File

@ -13,20 +13,21 @@ pub(crate) enum Segment {
Returndata = 4,
/// A segment which contains a few fixed-size metadata fields, such as the caller's context, or the
/// size of `CALLDATA` and `RETURNDATA`.
Metadata = 5,
GlobalMetadata = 5,
ContextMetadata = 6,
/// General purpose kernel memory, used by various kernel functions.
/// In general, calling a helper function can result in this memory being clobbered.
KernelGeneral = 6,
KernelGeneral = 7,
/// Contains normalized transaction fields; see `TxnField`.
TxnFields = 7,
TxnFields = 8,
/// Contains the data field of a transaction.
TxnData = 8,
TxnData = 9,
/// Raw RLP data.
RlpRaw = 9,
RlpRaw = 10,
}
impl Segment {
pub(crate) const COUNT: usize = 10;
pub(crate) const COUNT: usize = 11;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -35,7 +36,8 @@ impl Segment {
Self::MainMemory,
Self::Calldata,
Self::Returndata,
Self::Metadata,
Self::GlobalMetadata,
Self::ContextMetadata,
Self::KernelGeneral,
Self::TxnFields,
Self::TxnData,
@ -51,7 +53,8 @@ impl Segment {
Segment::MainMemory => "SEGMENT_MAIN_MEMORY",
Segment::Calldata => "SEGMENT_CALLDATA",
Segment::Returndata => "SEGMENT_RETURNDATA",
Segment::Metadata => "SEGMENT_METADATA",
Segment::GlobalMetadata => "SEGMENT_GLOBAL_METADATA",
Segment::ContextMetadata => "SEGMENT_CONTEXT_METADATA",
Segment::KernelGeneral => "SEGMENT_KERNEL_GENERAL",
Segment::TxnFields => "SEGMENT_NORMALIZED_TXN",
Segment::TxnData => "SEGMENT_TXN_DATA",
@ -67,7 +70,8 @@ impl Segment {
Segment::MainMemory => 8,
Segment::Calldata => 8,
Segment::Returndata => 8,
Segment::Metadata => 256,
Segment::GlobalMetadata => 256,
Segment::ContextMetadata => 256,
Segment::KernelGeneral => 256,
Segment::TxnFields => 256,
Segment::TxnData => 256,