Remove aborts for invalid jumps

This commit is contained in:
4l0n50 2023-12-20 14:13:36 +01:00
parent 829ae64fc4
commit 6ababc96ec
7 changed files with 72 additions and 51 deletions

View File

@ -369,7 +369,7 @@ call_too_deep:
// Perform jumpdest analyis
GET_CONTEXT
// stack: ctx, code_size, retdest
%validate_jumpdest_table
%jumpdest_analisys
PUSH 0 // jump dest
EXIT_KERNEL
// (Old context) stack: new_ctx

View File

@ -1,17 +1,14 @@
// Set @SEGMENT_JUMPDEST_BITS to one between positions [init_pos, final_pos],
// for the given context's code. Panics if we never hit final_pos
// for the given context's code.
// Pre stack: init_pos, ctx, final_pos, retdest
// Post stack: (empty)
global verify_path:
global verify_path_and_write_table:
loop:
// stack: i, ctx, final_pos, retdest
// Ideally we would break if i >= final_pos, but checking i > final_pos is
// cheaper. It doesn't hurt to over-read by 1, since we'll read 0 which is
// a no-op.
DUP3 DUP2 EQ // i == final_pos
%jumpi(return)
%jumpi(proof_ok)
DUP3 DUP2 GT // i > final_pos
%jumpi(panic)
%jumpi(proof_not_ok)
// stack: i, ctx, final_pos, retdest
%stack (i, ctx) -> (ctx, @SEGMENT_CODE, i, i, ctx)
@ -22,24 +19,29 @@ loop:
// Slightly more efficient than `%eq_const(0x5b) ISZERO`
PUSH 0x5b
SUB
// stack: opcode != JUMPDEST, opcode, i, ctx, code_len, retdest
// stack: opcode != JUMPDEST, opcode, i, ctx, final_pos, retdest
%jumpi(continue)
// stack: JUMPDEST, i, ctx, code_len, retdest
// stack: JUMPDEST, i, ctx, final_pos, retdest
%stack (JUMPDEST, i, ctx) -> (1, ctx, @SEGMENT_JUMPDEST_BITS, i, JUMPDEST, i, ctx)
MSTORE_GENERAL
continue:
// stack: opcode, i, ctx, code_len, retdest
// stack: opcode, i, ctx, final_pos, retdest
%add_const(code_bytes_to_skip)
%mload_kernel_code
// stack: bytes_to_skip, i, ctx, code_len, retdest
// stack: bytes_to_skip, i, ctx, final_pos, retdest
ADD
// stack: i, ctx, code_len, retdest
// stack: i, ctx, final_pos, retdest
%jump(loop)
return:
// stack: i, ctx, code_len, retdest
proof_ok:
// stack: i, ctx, final_pos, retdest
// We already know final pos is a jumpdest
%stack (i, ctx, final_pos) -> (1, ctx, @SEGMENT_JUMPDEST_BITS, i)
MSTORE_GENERAL
JUMP
proof_not_ok:
%pop3
JUMP
@ -101,26 +103,21 @@ code_bytes_to_skip:
// - code[jumpdest] = 0x5b.
// stack: proof_prefix_addr, jumpdest, ctx, retdest
// stack: (empty) abort if jumpdest is not a valid destination
global is_jumpdest:
global write_table_if_jumpdest:
// stack: proof_prefix_addr, jumpdest, ctx, retdest
//%stack
// (proof_prefix_addr, jumpdest, ctx) ->
// (ctx, @SEGMENT_JUMPDEST_BITS, jumpdest, proof_prefix_addr, jumpdest, ctx)
//MLOAD_GENERAL
//%jumpi(return_is_jumpdest)
%stack
(proof_prefix_addr, jumpdest, ctx) ->
(ctx, @SEGMENT_CODE, jumpdest, jumpdest, ctx, proof_prefix_addr)
MLOAD_GENERAL
// stack: opcode, jumpdest, ctx, proof_prefix_addr, retdest
%assert_eq_const(0x5b)
%jump_eq_const(0x5b, return)
//stack: jumpdest, ctx, proof_prefix_addr, retdest
SWAP2 DUP1
// stack: proof_prefix_addr, proof_prefix_addr, ctx, jumpdest
ISZERO
%jumpi(verify_path)
%jumpi(verify_path_and_write_table)
// stack: proof_prefix_addr, ctx, jumpdest, retdest
// If we are here we need to check that the next 32 bytes are less
// than JUMPXX for XX < 32 - i <=> opcode < 0x7f - i = 127 - i, 0 <= i < 32,
@ -135,9 +132,8 @@ global is_jumpdest:
%check_and_step(99) %check_and_step(98) %check_and_step(97) %check_and_step(96)
// check the remaining path
%jump(verify_path)
return_is_jumpdest:
%jump(verify_path_and_write_table)
return:
// stack: proof_prefix_addr, jumpdest, ctx, retdest
%pop3
JUMP
@ -154,7 +150,7 @@ return_is_jumpdest:
DUP1
%gt_const(127)
%jumpi(%%ok)
%assert_lt_const($max)
%jumpi_lt_const($max, return)
// stack: proof_prefix_addr, ctx, jumpdest
PUSH 0 // We need something to pop
%%ok:
@ -162,13 +158,13 @@ return_is_jumpdest:
%increment
%endmacro
%macro is_jumpdest
%macro write_table_if_jumpdest
%stack (proof, addr, ctx) -> (proof, addr, ctx, %%after)
%jump(is_jumpdest)
%jump(write_table_if_jumpdest)
%%after:
%endmacro
// Check if the jumpdest table is correct. This is done by
// Write the jumpdest table. This is done by
// non-deterministically guessing the sequence of jumpdest
// addresses used during program execution within the current context.
// For each jumpdest address we also non-deterministically guess
@ -179,7 +175,7 @@ return_is_jumpdest:
//
// stack: ctx, retdest
// stack: (empty)
global validate_jumpdest_table:
global jumpdest_analisys:
// If address > 0 then address is interpreted as address' + 1
// and the next prover input should contain a proof for address'.
PROVER_INPUT(jumpdest_table::next_address)
@ -189,24 +185,22 @@ global validate_jumpdest_table:
// This is just a hook used for avoiding verification of the jumpdest
// table in another contexts. It is useful during proof generation,
// allowing the avoidance of table verification when simulating user code.
global validate_jumpdest_table_end:
global jumpdest_analisys_end:
POP
JUMP
check_proof:
%decrement
DUP2 DUP2
// stack: address, ctx, address, ctx
DUP2 SWAP1
// stack: address, ctx, ctx
// We read the proof
PROVER_INPUT(jumpdest_table::next_proof)
// stack: proof, address, ctx, address, ctx
%is_jumpdest
%stack (address, ctx) -> (1, ctx, @SEGMENT_JUMPDEST_BITS, address, ctx)
MSTORE_GENERAL
// stack: proof, address, ctx, ctx
%write_table_if_jumpdest
%jump(validate_jumpdest_table)
%jump(jumpdest_analisys)
%macro validate_jumpdest_table
%macro jumpdest_analisys
%stack (ctx) -> (ctx, %%after)
%jump(validate_jumpdest_table)
%jump(jumpdest_analisys)
%%after:
%endmacro

View File

@ -8,6 +8,19 @@
jumpi
%endmacro
%macro jump_eq_const(c, jumpdest)
PUSH $c
SUB
%jumpi($jumpdest)
%endmacro
%macro jumpi_lt_const(c, jumpdest)
// %assert_zero is cheaper than %assert_nonzero, so we will leverage the
// fact that (x < c) == !(x >= c).
%ge_const($c)
%jumpi($jumpdest)
%endmacro
%macro pop2
%rep 2
POP

View File

@ -5,8 +5,8 @@ use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::opcodes::{get_opcode, get_push_opcode};
#[test]
fn test_validate_jumpdest_table() -> Result<()> {
let validate_jumpdest_table = KERNEL.global_labels["validate_jumpdest_table"];
fn test_jumpdest_analisys() -> Result<()> {
let jumpdest_analisys = KERNEL.global_labels["jumpdest_analisys"];
const CONTEXT: usize = 3; // arbitrary
let add = get_opcode("ADD");
@ -29,7 +29,7 @@ fn test_validate_jumpdest_table() -> Result<()> {
// Contract creation transaction.
let initial_stack = vec![0xDEADBEEFu32.into(), CONTEXT.into()];
let mut interpreter = Interpreter::new_with_kernel(validate_jumpdest_table, initial_stack);
let mut interpreter = Interpreter::new_with_kernel(jumpdest_analisys, initial_stack);
interpreter.set_code(CONTEXT, code);
interpreter.set_jumpdest_bits(CONTEXT, jumpdest_bits);

View File

@ -299,15 +299,15 @@ fn simulate_cpu_between_labels_and_get_user_jumps<F: Field>(
let mut jumpdest_addresses: HashMap<_, BTreeSet<usize>> = HashMap::new();
state.registers.program_counter = KERNEL.global_labels[initial_label];
let initial_clock = state.traces.clock();
let initial_context = state.registers.context;
log::debug!("Simulating CPU for jumpdest analysis.");
loop {
// skip jumdest table validations in simulations
if state.registers.program_counter == KERNEL.global_labels["validate_jumpdest_table"] {
state.registers.program_counter =
KERNEL.global_labels["validate_jumpdest_table_end"]
if state.registers.program_counter == KERNEL.global_labels["jumpdest_analisys"] {
state.registers.program_counter = KERNEL.global_labels["jumpdest_analisys_end"]
}
let pc = state.registers.program_counter;
let context = state.registers.context;
@ -337,6 +337,11 @@ fn simulate_cpu_between_labels_and_get_user_jumps<F: Field>(
},
U256::one(),
);
let jumpdest_opcode = state.memory.get(MemoryAddress {
context,
segment: Segment::Code as usize,
virt: jumpdest,
});
if let Some(ctx_addresses) = jumpdest_addresses.get_mut(&context) {
ctx_addresses.insert(jumpdest);
} else {
@ -344,7 +349,10 @@ fn simulate_cpu_between_labels_and_get_user_jumps<F: Field>(
}
}
if halt {
log::debug!("Simulated CPU halted after {} cycles", state.traces.clock());
log::debug!(
"Simulated CPU halted after {} cycles",
state.traces.clock() - initial_clock
);
return Ok(Some(jumpdest_addresses));
}
transition(state).map_err(|_| {

View File

@ -306,7 +306,7 @@ impl<F: Field> GenerationState<F> {
// Simulate the user's code and (unnecessarily) part of the kernel code, skipping the validate table call
let Some(jumpdest_table) = simulate_cpu_between_labels_and_get_user_jumps(
"validate_jumpdest_table_end",
"jumpdest_analisys_end",
"terminate_common",
self,
)?
@ -385,7 +385,8 @@ impl<F: Field> GenerationState<F> {
}
}
/// For each address in `jumpdest_table` it search a proof, that is the closest address
/// For each address in `jumpdest_table`, each bounded by larges_address,
/// this function searches for a proof. A proof is the closest address
/// for which none of the previous 32 bytes in the code (including opcodes
/// and pushed bytes are PUSHXX and the address is in its range. It returns
/// a vector of even size containing proofs followed by their addresses

View File

@ -395,7 +395,12 @@ fn try_perform_instruction<F: Field>(
if state.registers.is_kernel {
log_kernel_instruction(state, op);
} else {
log::debug!("User instruction: {:?}", op);
log::debug!(
"User instruction: {:?}, ctx = {:?}, stack = {:?}",
op,
state.registers.context,
state.stack()
);
}
fill_op_flag(op, &mut row);