From 0424fe680d8ed1d8bbddf01450d557938d8e8346 Mon Sep 17 00:00:00 2001 From: Daniel Lubarov Date: Thu, 6 Oct 2022 14:27:36 -0700 Subject: [PATCH] mload_packing --- evm/src/cpu/kernel/asm/memory/packing.asm | 43 ++++++++++++++-- evm/src/cpu/kernel/tests/packing.rs | 61 +++++++++++++++++++++++ 2 files changed, 101 insertions(+), 3 deletions(-) diff --git a/evm/src/cpu/kernel/asm/memory/packing.asm b/evm/src/cpu/kernel/asm/memory/packing.asm index 3021c640..c8b4c468 100644 --- a/evm/src/cpu/kernel/asm/memory/packing.asm +++ b/evm/src/cpu/kernel/asm/memory/packing.asm @@ -1,10 +1,47 @@ // Methods for encoding integers as bytes in memory, as well as the reverse, // decoding bytes as integers. All big-endian. +// Given a pointer to some bytes in memory, pack them into a word. Assumes 0 < len <= 32. +// Pre stack: addr: 3, len, retdest +// Post stack: packed_value +// NOTE: addr: 3 denotes a (context, segment, virtual) tuple global mload_packing: - // stack: context, segment, offset, len, retdest - PANIC // TODO - // stack: value + // stack: addr: 3, len, retdest + DUP3 DUP3 DUP3 MLOAD_GENERAL DUP5 %eq_const(1) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(1) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(2) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(2) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(3) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(3) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(4) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(4) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(5) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(5) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(6) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(6) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(7) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(7) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(8) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(8) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(9) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(9) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(10) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(10) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(11) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(11) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(12) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(12) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(13) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(13) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(14) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(14) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(15) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(15) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(16) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(16) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(17) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(17) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(18) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(18) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(19) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(19) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(20) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(20) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(21) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(21) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(22) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(22) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(23) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(23) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(24) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(24) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(25) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(25) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(26) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(26) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(27) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(27) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(28) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(28) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(29) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(29) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(30) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(30) DUP4 DUP4 MLOAD_GENERAL ADD DUP5 %eq_const(31) %jumpi(mload_packing_return) %shl_const(8) + DUP4 %add_const(31) DUP4 DUP4 MLOAD_GENERAL ADD +mload_packing_return: + %stack (packed_value, addr: 3, len, retdest) -> (retdest, packed_value) + JUMP // Pre stack: context, segment, offset, value, len, retdest // Post stack: offset' diff --git a/evm/src/cpu/kernel/tests/packing.rs b/evm/src/cpu/kernel/tests/packing.rs index dcfdd69b..71f66e6d 100644 --- a/evm/src/cpu/kernel/tests/packing.rs +++ b/evm/src/cpu/kernel/tests/packing.rs @@ -1,9 +1,70 @@ use anyhow::Result; +use ethereum_types::U256; use crate::cpu::kernel::aggregator::KERNEL; use crate::cpu::kernel::interpreter::Interpreter; use crate::memory::segments::Segment; +#[test] +fn test_mload_packing_1_byte() -> Result<()> { + let mstore_unpacking = KERNEL.global_labels["mload_packing"]; + + let retdest = 0xDEADBEEFu32.into(); + let len = 1.into(); + let offset = 2.into(); + let segment = (Segment::RlpRaw as u32).into(); + let context = 0.into(); + let initial_stack = vec![retdest, len, offset, segment, context]; + + let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); + interpreter.set_rlp_memory(vec![0, 0, 0xAB]); + + interpreter.run()?; + assert_eq!(interpreter.stack(), vec![0xAB.into()]); + + Ok(()) +} + +#[test] +fn test_mload_packing_3_bytes() -> Result<()> { + let mstore_unpacking = KERNEL.global_labels["mload_packing"]; + + let retdest = 0xDEADBEEFu32.into(); + let len = 3.into(); + let offset = 2.into(); + let segment = (Segment::RlpRaw as u32).into(); + let context = 0.into(); + let initial_stack = vec![retdest, len, offset, segment, context]; + + let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); + interpreter.set_rlp_memory(vec![0, 0, 0xAB, 0xCD, 0xEF]); + + interpreter.run()?; + assert_eq!(interpreter.stack(), vec![0xABCDEF.into()]); + + Ok(()) +} + +#[test] +fn test_mload_packing_32_bytes() -> Result<()> { + let mstore_unpacking = KERNEL.global_labels["mload_packing"]; + + let retdest = 0xDEADBEEFu32.into(); + let len = 32.into(); + let offset = 0.into(); + let segment = (Segment::RlpRaw as u32).into(); + let context = 0.into(); + let initial_stack = vec![retdest, len, offset, segment, context]; + + let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); + interpreter.set_rlp_memory(vec![0xFF; 32]); + + interpreter.run()?; + assert_eq!(interpreter.stack(), vec![U256::MAX]); + + Ok(()) +} + #[test] fn test_mstore_unpacking() -> Result<()> { let mstore_unpacking = KERNEL.global_labels["mstore_unpacking"];